MediaRecorder: How to stop Video recording and play back in same video element? - mediarecorder

I am using a with mediaRecorder
function getUserMediaSuccess(stream) {
$videoElement[0].srcObject = stream;
$videoElement[0].autoplay = true;
$videoElement[0].muted = true;
$videoElement[0].controls = false;
mediaRecorder = new MediaRecorder(stream, settings.recorderOptions);
}
Once recording is finished, I want to play the recorded chunks.
I tried with:
const blob = new Blob(chunks, { 'type' : settings.recorderOptions.mimeType});
$videoElement[0].src = window.URL.createObjectURL(blob);
and also with
$videoElement[0].pause();
$videoElement[0].removeAttribute('src');
$videoElement[0].load();
$videoElement[0].src = settings.filename;
$videoElement[0].controls = true;
I cannot stop the video element of showing the real time webcam.
I can play back the recorded video in ANOTHER video element. But I want to use the SAME that is used to display the webcam.
I also tried:
localStream.getTracks().forEach(function(track) {
track.stop();
});
Which gives a black screen, but I am unable then to play back again the recorded video.

it think you have done most things correctly.
based on the mozilla MediaRecorder example and some more research i think the magic is to switch between using srcObject and src:
srcObject for mediaStream (getUserMedia live preview)
src with window.URL.createObjectURL
this snippet works (if run un localhost or over https -
(maybe that the embedding does not fullfill all security things to allow access to getUserMedia..)
const videoEl = document.getElementById('theVideoElement');
let mediaRecorder = null;
let mediaChunks = [];
function recordStart() {
console.log('recordStart..');
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia (
{
audio: true,
video: true,
}
)
.then( stream => {
videoEl.srcObject = stream;
mediaRecorder = new MediaRecorder(stream);
mediaRecorder.addEventListener('dataavailable', event => {
mediaChunks.push(event.data);
});
mediaRecorder.addEventListener('stop', event => {
console.log("recorder stopped");
const blob = new Blob(mediaChunks, { 'type' : 'video/webm' });
mediaBlobURL = window.URL.createObjectURL(blob);
mediaChunks = [];
videoEl.src = mediaBlobURL;
});
mediaRecorder.start();
console.log("recorder started", mediaRecorder.state);
window.setTimeout(event => {
console.log("time is over.");
mediaRecorder.stop();
console.log(mediaRecorder.state);
console.log("recorder stopped");
// stop getUserMedia stream - this way the cam and mic gets released.
for (const track of stream.getTracks()) {
track.stop();
}
videoEl.srcObject = null;
console.log("stream stopped.");
}, 3000);
})
.catch( err => {
console.error(`The following getUserMedia error occurred:\n ${err}`);
});
} else {
console.error('getUserMedia not supported on your browser!');
}
}
console.info('******************************************');
window.addEventListener('load', (event) => {
console.info('All resources finished loading.');
const buttonEl = document.getElementById('button_start');
buttonEl.addEventListener('click', (event) => {
console.info('click!');
recordStart();
});
});
button {
display: block;
}
video {
display: block;
border: solid 1px black;
}
<button id="button_start">
start
</button>
<video
id="theVideoElement"
autoplay
controls
>
</video>

Related

Live-streaming video by connecting ArrayBuffers client-side

I'm trying to make a simple live webcam streaming (1 streamer to many clients) and I've run into a problem with showing the video on the client-side.
My basic system is that the webcam is recorded into 1000ms chunks using MediaRecorder, which is sent to the server using the WebSocket, which then broadcasts that recording all the other users, and those ArrayBuffers are converted and played consecutively on the user webpage. This is my code:
//recieve video chunks from server
socket.on('video-stream', (stream) => {
console.log(stream);
createVideo(URL.createObjectURL(new Blob(stream)), 1);
});
//record video in chunks, send over websocket
navigator.mediaDevices.getUserMedia({ video: true, audio: true }).then((stream) => {
setInterval(function () {
record(stream, 1000).then((recording) => {
socket.emit('video-stream', {
stream: recording,
room: window.location.pathname.split('/')[0] || '/',
cam: 1,
});
});
}, 1000);
});
var record = (stream, ms) => {
var rec = new MediaRecorder(stream),
data = [];
rec.ondataavailable = (e) => data.push(e.data);
rec.start();
var stopped = new Promise(
(y, n) => ((rec.onstop = y), (rec.onerror = (e) => n(e.error || e.name)))
);
return Promise.all([stopped, wait(ms).then(() => rec.stop())]).then(() => data);
};
var wait = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
function createVideo(stream, cam) {
var video = document.getElementById('cam-' + cam + '-embed');
video.src = stream;
//video.addEventListener('click', () => {
// if (video.volume != 0) video.volume = 0;
// else video.volume = 1;
//});
}
The problem is that this requires changing the src of the video element on the page every 1000ms which makes the video blink constantly and not be smooth. I need some way to merge the incoming video buffers on the clientside instead of constantly changing the video element. I've been trying to figure out how to do this with no luck. Can someone please help me merge the incoming data into 1 video?
I've also tried:
-RTC - doesn't work because way to much bandwidth required on the streaming user.
-Encoding and connecting the videos on the server side, and then piping it to a response as a readablestream. This also didn't work.
You need mediasource and sourcebuffer. Something like this:
var video = document.querySelector("#video");
video.src = URL.createObjectURL(mediaSource)
socket.on('onChunk', (d) => {
if(mediaSource.readyState == 'open') {
sourceBuffer.appendBuffer(d);
}
})
var mediaSource = new MediaSource();
var sourceBuffer = null;
mediaSource.addEventListener("sourceopen", function()
{
sourceBuffer = mediaSource.addSourceBuffer("video/webm;codecs=vp8,opus");
});
More info: HTML5 Video: Streaming Video with Blob URLs

new MediaRecorder(stream[, options]) stream can living modify?

new MediaRecorder(stream[, options]);
I want record the user camera and audio
I need mixing the song.mp3 to the audio track in recording.
and result export a video file to download by link.
But the MediaRecorder first params stream can living modify ?
But When I use recoder.stop()
It tips error: Failed to execute 'stop' on 'MediaRecorder': The MediaRecorder's state is 'inactive'.
My code:
function getFileBuffer(filepath) {
return fetch(filepath, {method: 'GET'}).then(response => response.arrayBuffer())
}
function mp3play() {
getFileBuffer('song.mp3')
.then(buffer => context.decodeAudioData(buffer))
.then(buffer => {
console.log(buffer)
const source = context.createBufferSource()
source.buffer = buffer
let volume = context.createGain()
volume.gain.value = 1
source.connect(volume)
dest = context.createMediaStreamDestination()
volume.connect(dest)
// volume.connect(context.destination)
source.start(0)
const _audioTrack = stream.getAudioTracks();
if (_audioTrack.length > 0) {
_audioTrack[0].stop();
stream.removeTrack(_audioTrack[0]);
}
//
// console.log(dest.stream)
// console.log(dest.stream.getAudioTracks()[0])
// stream.addTrack(dest.stream.getAudioTracks()[0])
})
}
function startRecording() {
recorder = new MediaRecorder(stream, {
mimeType: 'video/webm'
})
recorder.start()
stopBtn.removeAttribute('disabled')
startBtn.disabled = true
}
No we still can't record a MediaStream whose tracks are changed after the recording began, doing so will stop() the MediaRecorder. Here is a very related Q/A which was more about recording video.
What can be done though is to create a kind of merger MediaStream.
It's way easier with audio, moreover since you are already using the WebAudio API: all you need to do is to create an other MediaStreamDestination node, and connect / disconnect the different sources.
const base = "https://upload.wikimedia.org/wikipedia/en/d/";
const urls = [
"d3/Beach_Boys_-_Good_Vibrations.ogg",
"dc/Strawberry_Fields_Forever_%28Beatles_song_-_sample%29.ogg"
].map( url => base + url );
const context = new AudioContext();
const button = document.querySelector( 'button' );
button.onclick = async () => {
button.disabled = true;
context.resume();
const audiobuffers = await Promise.all( urls.map( fetchAsAudioBuffer ) );
button.remove();
const streamNode = context.createMediaStreamDestination();
const stream = streamNode.stream;
const recorder = new MediaRecorder( stream );
const chunks = [];
recorder.ondataavailable = evt => chunks.push( evt.data );
recorder.onstop = evt => exportAudio( new Blob( chunks ) );
document.getElementById( 'record-stopper' ).onclick = evt => {
recorder.stop();
current_source.stop( 0 );
};
let current_index = 0;
let current_source = null;
document.getElementById( 'switcher' ).onclick = switchAudioSource;
switchAudioSource();
recorder.start();
function switchAudioSource() {
if( current_source ) {
current_source.stop( 0 );
}
current_index = (current_index + 1) % audiobuffers.length;
current_source = context.createBufferSource();
current_source.buffer = audiobuffers[ current_index ];
current_source.loop = true;
current_source.connect( streamNode );
current_source.connect( context.destination );
current_source.start( 0 );
}
};
function exportAudio( blob ) {
const aud = new Audio( URL.createObjectURL( blob ) );
aud.controls = true;
document.body.prepend( aud );
}
async function fetchAsAudioBuffer( url ) {
const buf = await fetchAsBuffer( url );
return context.decodeAudioData( buf );
}
async function fetchAsBuffer( url ) {
const resp = await fetch( url );
return resp.arrayBuffer();
}
button+.recording-controls,
audio+.recording-controls {
display: none;
}
<button>begin</button>
<div class="recording-controls">
<label>Recording...</label>
<button id="switcher">Switch Audio Sources</button>
<button id="record-stopper">Stop Recording</button>
</div>
For video that would imply recording a CanvasMediaStreamTrack and drawing the different video streams on the source <canvas>, but we generally loose a lot of quality doing so...

Using fake-video in plugins I can't using two video in a test flow in cypress

i have two cameras in a test flow.
Videos and must go through both with different
Using fake-video in plugins I can fake the camera but I can't change the video.
and I must go through both cameras with different videos in a single spec.js
Could you help me to use two videos in a single spec.js, both videos can be played, I can change what video I play in which camera?
Plugins/index.js
module.exports = (on, config) => {
on('before:browser:launch', (browser = {}, args) => {
// args.push('--use-fake-device-for-media-stream')
if (browser.name === 'chrome') {
args.push('--use-fake-ui-for-media-stream')
args.push('--use-fake-device-for-media-stream')
args.push('--use-file-for-fake-video-capture=C:\\NOEMI\\EjemploWebcam\\webcam-tests\\cypress\\fixtures\\akiyo_cif.y4m')
//args.push('--use-file-for-fake-video-capture=C:\\NOEMI\\onboardingRepos\\onboarding-web\\cypress\\fixtures\\prueba.y4m')
}
return args
})
}
I was able to change the video source at runtime by using the task command and replacing the content of the file passed through --use-file-for-fake-video-capture. The tricky part of it is that as described here you will need to call getUserMedia() again after replacing the file.
// cypress.config.js
setupNodeEvents(on, config) {
on("before:browser:launch", (browser, launchOptions) => {
if (browser.family === "chromium" && browser.name !== "electron") {
const videoPath = path.resolve(__dirname, "cypress", "fixtures", "webcam.y4m");
launchOptions.args.push(`--use-file-for-fake-video-capture=${videoPath}`);
}
return launchOptions;
});
on("task", {
changeVideoSource(videoSource) {
console.log("TASK - Changing video source to", videoSource);
const webcamPath = path.join("cypress", "fixtures", "webcam.y4m");
const sourceVideoPath = path.join("cypress", "fixtures", videoSource);
const video = fs.readFileSync(sourceVideoPath);
fs.writeFileSync(webcamPath, video);
return null;
},
resetVideoSource() {
console.log("TASK - Resetting video source");
const webcamPath = path.join("cypress", "fixtures", "webcam.y4m");
const defaultVideoPath = path.join("cypress", "fixtures", "default.y4m");
const video = fs.readFileSync(defaultVideoPath);
fs.writeFileSync(webcamPath, video);
return null;
},
});
},
Then everytime you want to change the video just call cy.task("changeVideoSource", "video2.y4m");

Is there a way to increase recording quality with the Web Audio API in Safari?

I'm using WebRTC along with WebAudioRecorder.js and the Web Audio API to record microphone input from the user for audio recognition with the audD API (similar to Shazam). This is working fine in Chrome and Firefox and it seems the quality of the recording is fairly solid. However, audD is not able to recognize the blob/file being sent from my recording in Safari (11.1.2) because of what I'm guessing is low audio quality (the playback is almost inaudible). The only audio format that both Safari and audD are compatible with is mp3, so that's how I've been encoding the file.
Javascript:
// to be set to a WebAudioRecorder.js recorder instance
let recorder;
// to be set to the stream resulting from getUserMedia()
let gumStream;
function beginRecording() {
if (navigator.mediaDevices.getUserMedia) {
console.log('starting the recording');
navigator.mediaDevices.getUserMedia({ 'audio': true })
.then(function(stream) {
let AudioContext = window.AudioContext // Default
|| window.webkitAudioContext // Safari and old versions of Chrome
|| false;
if (AudioContext) {
let audioCtx = new AudioContext;
gumStream = stream;
let source = audioCtx.createMediaStreamSource(stream);
recorder = new WebAudioRecorder(source, {
workerDir: 'web-audio-recorder-js/lib/',
encoding: 'mp3'
});
} else {
alert('The Web Audio API is not supported.');
}
recorder.setOptions({
timeLimit: 120,
encodeAfterRecord: true,
ogg: {quality: 0.9},
mp3: {bitRate: 320},
});
recorder.startRecording();
recorder.onComplete = function(recorder, blob) {
createAudioPlayback(blob);
POSTreq(blob);
}
recorder.onError = function(recorder, err) {
console.error(err);
}
})
.catch(function(err) {
console.error(err);
})
}
}
function stopRecording() {
console.log('stopping the recording');
let recordingTime = recorder.recordingTime();
console.log(recordingTime);
let audioTrack = gumStream.getAudioTracks()[0];
console.log(audioTrack);
audioTrack.stop();
recorder.finishRecording();
$('#msg_box').text(`Recorded for ${Math.round(recordingTime)} seconds`);
console.log('recording stopped');
}
function createAudioPlayback(blobData) {
let url = URL.createObjectURL(blobData);
$('body').append(`<audio controls src="${url}"></audio>`);
}
function POSTreq (blobData) {
let xhr = new XMLHttpRequest();
let fd = new FormData();
fd.append('api_token', '');
fd.append('file', blobData);
fd.append('method', 'recognize');
fd.append('return_itunes_audios', true);
fd.append('itunes_country', 'us');
xhr.onreadystatechange = function() {
if (xhr.readyState === 4) {
parseRetrievedData(xhr.response);
}
}
xhr.open('POST', 'https://api.audd.io/');
xhr.responseType = 'json';
xhr.send(fd);
}
function parseRetrievedData(parseData) {
console.log('the data from the audD api is: ', parseData);
}
$(function() {
$('#start-button').click(function(e) {
beginRecording();
$('#stop-button').prop('hidden', false);
});
$('#stop-button').click(function(e) {
stopRecording();
});
});
HTML:
<div class="recorder_wrapper">
<div class="recorder">
<button id="start-button">Start</button>
<button id="stop-button">Stop</button>
<p id="msg_box"></p>
<section class="auth-links-region" role="region">
Signup
Login
</section>
<section class="authentication-region" role="region" hidden>
<p class="authentication-text"></p>
My Searches
Logout
</section>
</div>
</div>

Chrome extension to capture video of last few minutes of active tab

I am trying to write a Chrome plugin to capture a video of the active tab. My code is based on this post.
When my page action is invoked, I start the recording:
var recordedChunks = null;
var captureOptions = { audio : false, video : true };
chrome.tabCapture.capture(captureOptions,
function(stream) {
if (stream) {
recordedChunks = [];
var options = {mimeType: "video/webm"};
mediaRecorder = new MediaRecorder(stream, options);
mediaRecorder.start();
mediaRecorder.ondataavailable = function(event) {
if (event.data.size > 0) {
recordedChunks.push(event.data);
}
}
}
}
);
When the page action is invoked again, I stop the recording and download a file as:
mediaRecorder.stop();
var blob = new Blob(recordedChunks, {
type: 'video/webm'
});
var url = URL.createObjectURL(blob);
var a = document.createElement('a');
document.body.appendChild(a);
a.style = 'display: none';
a.href = url;
a.download = 'test.webm';
a.click();
window.URL.revokeObjectURL(url);
stream.getVideoTracks()[0].stop();
This works great - I am able to play the downloaded test.webm video.
But I only want to record the last few minutes of video of the active tab. I do not want the recordedChunks array to grow unbounded. So, I tried something like this in the start recording action:
chrome.tabCapture.capture(captureOptions,
function(stream) {
// ...
mediaRecorder.ondataavailable = function(event) {
if (event.data.size > 0) {
recordedChunks.push(event.data);
// CHANGE HERE: keep only the last 1000 blobs
while (recordedChunks.length > 1000) {
recordedChunks.shift();
}
}
}
}
);
But with this modification, the download test.webm video is not playable. How do I capture just the tail of the blob output from MediaRecorder?

Resources