Unable to play .aac extension audio file using media source extension.API - audio

I want to play a .aac audio file using MEdia source extension. I replaced the sample code with mime="audio/aac" and used .aac file. Is there any another parameter that needed to be specified.
var vidElement = document.querySelector('audio');
if (window.MediaSource) {
var mediaSource = new MediaSource();
vidElement.src = URL.createObjectURL(mediaSource);
mediaSource.addEventListener('sourceopen', sourceOpen);
} else {
console.log("The Media Source Extensions API is not supported.")
}
function sourceOpen(e) {
URL.revokeObjectURL(vidElement.src);
var mime = 'audio/aac';
var mediaSource = e.target;
var sourceBuffer = mediaSource.addSourceBuffer(mime);
var videoUrl = 'example.aac';
fetch(videoUrl)
.then(function(response) {
return response.arrayBuffer();
})
.then(function(arrayBuffer) {
sourceBuffer.addEventListener('updateend', function(e) {
if (!sourceBuffer.updating && mediaSource.readyState === 'open') {
mediaSource.endOfStream();
}
});
sourceBuffer.appendBuffer(arrayBuffer);
});
}

Converting to .mp4 and setting the mimetype to audio/mp4; codecs="mp4a.40.2" worked for me. I couldn't find a way to do it without converting.

Related

No audio in above 360p video downloaded from ytdl-core express?

I have made an api which is downloading videos from the link of youtube link but I'm not enable to download the video with its audio above 360p format. It is downloading only video and there is no audio.
Is there any solution to this ?
Typically 1080p or better video does not have audio encoded with it. The audio must be downloaded separately and merged via an appropriate encoding library. ffmpeg is the most widely used tool, with many Node.js modules available. Use the format objects returned from ytdl.getInfo to download specific streams to combine to fit your needs. Look at https://github.com/fent/node-ytdl-core/blob/master/example/ffmpeg.js for an example on doing this.
You can specific the video quality
const video = ytdl('http://www.youtube.com/watch?v=e_RsG3HPpA0',{quality: 18});
video.on('progress', function(info) {
console.log('Download progress')
})
video.on('end', function(info) {
console.log('Download finish')
})
video.pipe(fs.createWriteStream('video.mp4'));
Please check the option of quality value by this link
If you are using ytdl-core then the problem is with the itags and the availability of the itag you need.There are 3 itags only that support both video and audio and rest itags only support either audio or just video. For video and audio in ytdl-core you need to specifically check if the URl supports 720p or 1080p or not. I created two functions that can help you alot. what you can do is that simple send an xmlhttprequest from index.html and wait for link response so that you or your user can download from that link. "fup90" means false url provided and inc90 denotes incorrect URL so that you can handle the error if the URL is not a youtube URL. The code is shown below and note that you send an xmlhttpreqest using post method and with data in json string in this format var data = {downloadType:"audio"/"video",quality:"required itag",url:"youtube video url"}.
const ytdl = require('ytdl-core');
const express = require('express');
const parser = require('body-parser');
const app = express();
app.use(parser.text());
app.use(express.static(__dirname + "\\static"));
// video formats 18 - 360p and 22 - 720p
// audio
async function getAudioData(videoURL) {
let videoid = await ytdl.getURLVideoID(videoURL);
let info = await ytdl.getInfo(videoid);
// let format = ytdl.chooseFormat(info.formats, { quality: '134' }); for video
// var format = ytdl.filterFormats(info.formats, 'videoandaudio');
let format = ytdl.filterFormats(info.formats, 'audioonly');
let required_url = 0;
format.forEach(element => {
if (element.mimeType == `audio/mp4; codecs="mp4a.40.2"`) {
required_url = element.url;
}
});
return required_url;
}
async function getVideoData(videoURL, qualityCode) {
try {
let videoid = ytdl.getURLVideoID(videoURL);
let info = await ytdl.getInfo(videoid);
var ifExists = true;
if (qualityCode == 22) {
info.formats.forEach(element => {
if (element.itag == 22) {
qualityCode = 22;
} else {
qualityCode = 18;
ifExists = false;
}
});
}
let format = ytdl.chooseFormat(info.formats, { quality: qualityCode });
var answers = {
url: format.url,
exists: ifExists
}
} catch (e) {
var answers = {
url: "fup90",
exists: false
}
}
return answers;
}
app.get("/", (req, res) => {
res.sendFile(__dirname + "\\index.html");
});
app.post("/getdownload", async(req, res) => {
let data = JSON.parse(req.body);
if (data.downloadType === "video") {
var answer = await getVideoData(data.url, data.quality);
if (answer.url === "fup90") {
res.send("inc90");
} else {
res.send(answer.exists ? answer.url : "xformat");
}
} else if (data.downloadType === "audio") {
var audioLink = await getAudioData(data.url);
res.send(audioLink);
} else {
res.send("error from server");
}
});
app.listen(8000, () => {
console.log("server started at http://localhost:8000");
});

Using fake-video in plugins I can't using two video in a test flow in cypress

i have two cameras in a test flow.
Videos and must go through both with different
Using fake-video in plugins I can fake the camera but I can't change the video.
and I must go through both cameras with different videos in a single spec.js
Could you help me to use two videos in a single spec.js, both videos can be played, I can change what video I play in which camera?
Plugins/index.js
module.exports = (on, config) => {
on('before:browser:launch', (browser = {}, args) => {
// args.push('--use-fake-device-for-media-stream')
if (browser.name === 'chrome') {
args.push('--use-fake-ui-for-media-stream')
args.push('--use-fake-device-for-media-stream')
args.push('--use-file-for-fake-video-capture=C:\\NOEMI\\EjemploWebcam\\webcam-tests\\cypress\\fixtures\\akiyo_cif.y4m')
//args.push('--use-file-for-fake-video-capture=C:\\NOEMI\\onboardingRepos\\onboarding-web\\cypress\\fixtures\\prueba.y4m')
}
return args
})
}
I was able to change the video source at runtime by using the task command and replacing the content of the file passed through --use-file-for-fake-video-capture. The tricky part of it is that as described here you will need to call getUserMedia() again after replacing the file.
// cypress.config.js
setupNodeEvents(on, config) {
on("before:browser:launch", (browser, launchOptions) => {
if (browser.family === "chromium" && browser.name !== "electron") {
const videoPath = path.resolve(__dirname, "cypress", "fixtures", "webcam.y4m");
launchOptions.args.push(`--use-file-for-fake-video-capture=${videoPath}`);
}
return launchOptions;
});
on("task", {
changeVideoSource(videoSource) {
console.log("TASK - Changing video source to", videoSource);
const webcamPath = path.join("cypress", "fixtures", "webcam.y4m");
const sourceVideoPath = path.join("cypress", "fixtures", videoSource);
const video = fs.readFileSync(sourceVideoPath);
fs.writeFileSync(webcamPath, video);
return null;
},
resetVideoSource() {
console.log("TASK - Resetting video source");
const webcamPath = path.join("cypress", "fixtures", "webcam.y4m");
const defaultVideoPath = path.join("cypress", "fixtures", "default.y4m");
const video = fs.readFileSync(defaultVideoPath);
fs.writeFileSync(webcamPath, video);
return null;
},
});
},
Then everytime you want to change the video just call cy.task("changeVideoSource", "video2.y4m");

Stream audio to Azure speech api by node.js on browser

I'm making a demo of speech to text using Azure speech api on browser by node.js. According to API document here, it does specify that it need .wav or .ogg files. But the example down there does a api call through sending byte data to api.
So I've already get my data from microphone in byte array form. Is it the right path to convert it to byte and send it to api? Or is it better for me to save it as a .wav file then send to the api?
So below is my code.
This is stream from microphone part.
navigator.mediaDevices.getUserMedia({ audio: true })
.then(stream => { handlerFunction(stream) })
function handlerFunction(stream) {
rec = new MediaRecorder(stream);
rec.ondataavailable = e => {
audioChunks.push(e.data);
if (rec.state == "inactive") {
let blob = new Blob(audioChunks, { type: 'audio/wav; codec=audio/pcm; samplerate=16000' });
recordedAudio.src = URL.createObjectURL(blob);
recordedAudio.controls = true;
recordedAudio.autoplay = true;
console.log(blob);
let fileReader = new FileReader();
var arrayBuffer = new Uint8Array(1024);
var reader = new FileReader();
reader.readAsArrayBuffer(blob);
reader.onloadend = function () {
var byteArray = new Uint8Array(reader.result);
console.log("reader result" + reader.result)
etTimeout(() => getText(byteArray), 1000);
}
}
}
}
This is api call part
function getText(audio, callback) {
console.log("in function audio " + audio);
console.log("how many byte?: " + audio.byteLength)
const sendTime = Date.now();
fetch('https://westus.stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1?language=en-US', {
method: "POST",
headers: {
'Accept': 'application/json',
'Ocp-Apim-Subscription-Key': YOUR_API_KEY,
// 'Transfer-Encoding': 'chunked',
// 'Expect': '100-continue',
'Content-type': 'audio/wav; codec=audio/pcm; samplerate=16000'
},
body: audio
})
.then(function (r) {
return r.json();
})
.then(function (response) {
if (sendTime < time) {
return
}
time = sendTime
//callback(response)
}).catch(e => {
console.log("Error", e)
})
}
It returns with 400 (Bad Request) and says :
{Message: "Unsupported audio format"}
Reason:
Note you're not creating a MediaRecorder with a audio/wav mimeType by
new Blob(audioChunks,{type:'audio/wav; codec=audio/pcm; samplerate=16000'})
This statement is only a description for blob. I test my Chrome(v71) with isTypeSupported:
MediaRecorder.isTypeSupported("audio/wav") // return false
MediaRecorder.isTypeSupported("audio/ogg") // return false
MediaRecorder.isTypeSupported("audio/webm") // return true
It seems that the MediaRecorder will only record the audio in audio/webm. Also, when I run the following code on Chrome , the default rec.mimeType is audio/webm;codecs=opus
rec = new MediaRecorder(stream);
According to the Audio formats Requiremnts, the audio/webm is not supported yet.
Approach:
Before calling getText() we need convert the webm to wav firstly. There're quite a lot of libraries that can help us do that. I just copy Jam3's script before your code to convert webm to wav :
// add Jam3's script between Line 2 and Line 94 or import that module as you like
// create a audioContext that helps us decode the webm audio
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
rec = new MediaRecorder(stream,{
mimeType : 'audio/webm',
codecs : "opus",
});
// ...
rec.ondataavailable = e => {
audioChunks.push(e.data);
if (rec.state == "inactive") {
var blob = new Blob(audioChunks, { 'type': 'audio/webm; codecs=opus' });
var arrayBuffer;
var fileReader = new FileReader();
fileReader.onload = function(event) {
arrayBuffer = event.target.result;
};
fileReader.readAsArrayBuffer(blob);
fileReader.onloadend=function(d){
audioCtx.decodeAudioData(
fileReader.result,
function(buffer) {
var wav = audioBufferToWav(buffer);
setTimeout(() => getText(wav), 1000);
},
function(e){ console.log( e); }
);
};
}
}
And it works fine for me :
As a side note, I suggest you should use your backend to invoke the speech-to-text services. Never invoke azure stt service in a browser. That's because exposing your subscription key to front end is really dangerous. Anyone could inspect the network and steal your key.

Saving blobs as a single webm file

I'm recording the users screen via webrtc, and then posting video blobs every x seconds using MediaStreamRecorder. On the server side I have an action set up in sails which saves the blob as a webm file.
The problem is that I can't get it to append the data, and create one large webm file. When it appends the file size increases like expected, so the data is appending, but when I go to play the file it'll either play the first second, not play at all, or play but not show the video.
It would be possible to merge the files with ffmpeg, but I'd rather avoid this if at all possible.
Here's the code on the client:
'use strict';
// Polyfill in Firefox.
// See https://blog.mozilla.org/webrtc/getdisplaymedia-now-available-in-adapter-js/
if (typeof adapter != 'undefined' && adapter.browserDetails.browser == 'firefox') {
adapter.browserShim.shimGetDisplayMedia(window, 'screen');
}
io.socket.post('/processvideo', function(resData) {
console.log("Response: " + resData);
});
function handleSuccess(stream) {
const video = document.querySelector('video');
video.srcObject = stream;
var mediaRecorder = new MediaStreamRecorder(stream);
mediaRecorder.mimeType = 'video/webm';
mediaRecorder.ondataavailable = function (blob) {
console.log("Sending Data");
//var rawIO = io.socket._raw;
//rawIO.emit('some:event', "using native socket.io");
io.socket.post('/processvideo', {"vidblob": blob}, function(resData) {
console.log("Response: " + resData);
});
};
mediaRecorder.start(3000);
}
function handleError(error) {
errorMsg(`getDisplayMedia error: ${error.name}`, error);
}
function errorMsg(msg, error) {
const errorElement = document.querySelector('#errorMsg');
errorElement.innerHTML += `<p>${msg}</p>`;
if (typeof error !== 'undefined') {
console.error(error);
}
}
if ('getDisplayMedia' in navigator) {
navigator.getDisplayMedia({video: true})
.then(handleSuccess)
.catch(handleError);
} else {
errorMsg('getDisplayMedia is not supported');
}
Code on the server:
module.exports = async function processVideo (req, res) {
var fs = require('fs'),
path = require('path'),
upload_dir = './assets/media/uploads',
output_dir = './assets/media/outputs',
temp_dir = './assets/media/temp';
var params = req.allParams();
if(req.isSocket && req.method === 'POST') {
_upload(params.vidblob, "test.webm");
return res.send("Hi There");
}
else {
return res.send("Unknown Error");
}
function _upload(file_content, file_name) {
var fileRootName = file_name.split('.').shift(),
fileExtension = file_name.split('.').pop(),
filePathBase = upload_dir + '/',
fileRootNameWithBase = filePathBase + fileRootName,
filePath = fileRootNameWithBase + '.' + fileExtension,
fileID = 2;
/* Save all of the files as different files. */
/*
while (fs.existsSync(filePath)) {
filePath = fileRootNameWithBase + fileID + '.' + fileExtension;
fileID += 1;
}
fs.writeFileSync(filePath, file_content);
*/
/* Appends the binary data like you'd expect, but it's not playable. */
fs.appendFileSync(upload_dir + '/' + 'test.file', file_content);
}
}
Any help would be greatly appreciated!
I decided this would be difficult to develop, and wouldn't really fit the projects requirements. So I decided to build an electron app. Just posting this so I can resolve the question.

Chrome extension to capture video of last few minutes of active tab

I am trying to write a Chrome plugin to capture a video of the active tab. My code is based on this post.
When my page action is invoked, I start the recording:
var recordedChunks = null;
var captureOptions = { audio : false, video : true };
chrome.tabCapture.capture(captureOptions,
function(stream) {
if (stream) {
recordedChunks = [];
var options = {mimeType: "video/webm"};
mediaRecorder = new MediaRecorder(stream, options);
mediaRecorder.start();
mediaRecorder.ondataavailable = function(event) {
if (event.data.size > 0) {
recordedChunks.push(event.data);
}
}
}
}
);
When the page action is invoked again, I stop the recording and download a file as:
mediaRecorder.stop();
var blob = new Blob(recordedChunks, {
type: 'video/webm'
});
var url = URL.createObjectURL(blob);
var a = document.createElement('a');
document.body.appendChild(a);
a.style = 'display: none';
a.href = url;
a.download = 'test.webm';
a.click();
window.URL.revokeObjectURL(url);
stream.getVideoTracks()[0].stop();
This works great - I am able to play the downloaded test.webm video.
But I only want to record the last few minutes of video of the active tab. I do not want the recordedChunks array to grow unbounded. So, I tried something like this in the start recording action:
chrome.tabCapture.capture(captureOptions,
function(stream) {
// ...
mediaRecorder.ondataavailable = function(event) {
if (event.data.size > 0) {
recordedChunks.push(event.data);
// CHANGE HERE: keep only the last 1000 blobs
while (recordedChunks.length > 1000) {
recordedChunks.shift();
}
}
}
}
);
But with this modification, the download test.webm video is not playable. How do I capture just the tail of the blob output from MediaRecorder?

Resources