how to get microphone permission when button is clicked in extension popup - google-chrome-extension

navigator.mediaDevices.getUserMedia({
audio: {
echoCancellation: !0,
channelCount: 1,
sampleRate: {
ideal: e
},
sampleSize: i
}
})
it return promise value Rejected and promise status DOM:Shutdown

Related

Record google meet audio using puppeteerr

I am trying to record a google meet using puppeteer, so far I am able to join the meet as a bot and record the video. But I am not able to record the audio, I tried few packages like puppeteer-screen-recorder, but it is creating an empty file for audio.
Here is my script code
Pls change the meeting URL to replicate and test
import puppeteer from 'puppeteer-extra';
import StealthPlugin from 'puppeteer-extra-plugin-stealth';
import { executablePath } from 'puppeteer';
import fs from 'fs';
import AudioRecorder from 'node-audiorecorder';
import * as PuppeteerScreenRecorder from 'puppeteer-screen-recorder'
import RecordRTC from 'recordrtc';
import { getStream } from "puppeteer-stream";
const file = fs.createWriteStream("./test.webm");
puppeteer.use(StealthPlugin());
(async () => {
const browser = await puppeteer.launch({
headless: false,
defaultViewport: null,
devtools: false,
args: [
"--window-size=1920,1080",
"--window-position=1921,0",
"--autoplay-policy=no-user-gesture-required",
],
ignoreDefaultArgs: ["--mute-audio"],
executablePath: executablePath(),
});
const page = await browser.newPage();
const navigationPromise = page.waitForNavigation();
const context = browser.defaultBrowserContext();
await context.overridePermissions(
"https://meet.google.com/", ["microphone", "camera", "notifications"]
);
// going to Meet after signing in
await page.waitForTimeout(2500);
await page.goto('https://meet.google.com/cmp-zzwo-adb' + '?hl=en', {
waitUntil: 'networkidle0',
timeout: 10000,
});
await navigationPromise;
await page.waitForSelector('input[aria-label="Your name"]', {
visible: true,
timeout: 50000,
hidden: false,
});
// turn off cam using Ctrl+E
await page.waitForTimeout(1000);
await page.keyboard.down('ControlLeft');
await page.keyboard.press('KeyE');
await page.keyboard.up('ControlLeft');
await page.waitForTimeout(1000);
//turn off mic using Ctrl+D
await page.waitForTimeout(1000);
await page.keyboard.down('ControlLeft');
await page.keyboard.press('KeyD');
await page.keyboard.up('ControlLeft');
await page.waitForTimeout(1000);
//click on input field to enter name
await page.click(`input[aria-label="Your name"]`);
//enter name
await page.type(`input[aria-label="Your name"]`, 'Bot');
//click on ask to join button
await page.click(
`button[class="VfPpkd-LgbsSe VfPpkd-LgbsSe-OWXEXe-k8QpJ VfPpkd-LgbsSe-OWXEXe-dgl2Hf nCP5yc AjY5Oe DuMIQc LQeN7 jEvJdc QJgqC"]`
);
const stream = await getStream(page, { audio: true, mimeType: "audio/mp3" });
console.log("recording");
stream.pipe(file);
// setTimeout(async () => {
// await stream.destroy();
// file.close();
// console.log("finished");
// }, 1000 * 30);
const recorder = new PuppeteerScreenRecorder.PuppeteerScreenRecorder(page);
await recorder.start('./report/video/simple.webm'); // supports extension - mp4, avi, webm and mov
// const devices = await page.evaluate(() =>
// navigator.mediaDevices.getUserMedia(
// { audio: true }
// )
// )
// let x = await navigator.mediaDevices.getUserMedia({audio: true});
// console.log(x, "Available devices");
// navigator.mediaDevices.getUserMedia({
// video: false,
// audio: true
// }).then(async function (stream) {
// let recorder = RecordRTC(stream, {
// type: 'audio'
// });
// recorder.startRecording();
// const sleep = m => new Promise(r => setTimeout(r, m));
// await sleep(3000);
// recorder.stopRecording(function () {
// let blob = recorder.getBlob();
// invokeSaveAsDialog(blob);
// });
// });
setTimeout(async () => {
await recorder.stop();
await stream.destroy();
file.close();
console.log("finished");
await browser.close();
}, 15000)
})();
Here is my package.json file
{
"name": "own",
"version": "1.0.0",
"description": "",
"main": "index.js",
"type": "module",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"keywords": [],
"author": "",
"license": "ISC",
"dependencies": {
"node-audiorecorder": "^3.0.0",
"puppeteer": "^19.6.3",
"puppeteer-extra": "^3.3.4",
"puppeteer-extra-plugin-stealth": "^2.11.1",
"puppeteer-screen-recorder": "^2.1.2",
"puppeteer-stream": "^2.1.4",
"recordrtc": "^5.6.2",
"screencap": "^1.0.0",
"ws": "^8.12.1"
}
}
I tried using different available packages of puppeteer but it's creating an empty file only for audio. I was able to record the video with different packages but audio is not capturing. I want to record the audio of meeting attendees.

desktopCapturer example... how to make it work for specific application

I'm trying to follow this tutorial:
https://www.tutorialspoint.com/electron/electron_audio_and_video_capturing.htm
The first part of the tutorial worked fine... I can stream av from my pc camera and mic... into my electron app. But now I'm trying to do is stream audio and video from a specific application running on my windows desktop via the desktopCapturer object.
Problem
I'm not getting any errors. But the electron app's video html tag is not showing the stream from myappthatstreamsAV.
Code
I changed my index.html code to look like this: (just changed stuff inside the tag)
<!DOCTYPE html>
<html>
<head>
<meta charset = "UTF-8">
<title>Audio and Video</title>
</head>
<body>
<video autoplay></video>
<script type = "text/javascript">
var desktopCapturer = require('electron').desktopCapturer;
desktopCapturer.getSources({types: ['window', 'screen']}, (error, sources) => {
if (error) throw error
desktopCapturer.getSources({types: ['window', 'screen']}, (error, sources) => {
if (error) throw error
for (let i = 0; i < sources.length; ++i) {
if (sources[i].name === 'myappthatstreamsAV') {
navigator.webkitGetUserMedia({
audio: true,
video: {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: sources[i].id,
minWidth: 1280,
maxWidth: 1280,
minHeight: 720,
maxHeight: 720
}
}
}, handleStream, handleError)
return
}
}
})
function handleStream (stream) {
document.querySelector('video').src = URL.createObjectURL(stream)
}
function handleError (e) {
console.log(e)
}
</script>
</body>
</html>
and the index.js looks like this:
const {app, BrowserWindow} = require('electron')
const url = require('url')
const path = require('path')
let win
// Set the path where recordings will be saved
app.setPath("userData", __dirname + "/saved_recordings")
function createWindow() {
win = new BrowserWindow({width: 800, height: 600,
webPreferences: {
nodeIntegration: true
}
})
win.loadURL(url.format({
pathname: path.join(__dirname, 'index.html'),
protocol: 'file:',
slashes: true
}))
}
app.on('ready', createWindow)
What I've tried so far:
I added some debug statements like this:
<script type = "text/javascript">
var desktopCapturer = require('electron').desktopCapturer;
console.log("1")
console.log(desktopCapturer)
desktopCapturer.getSources({types: ['window', 'screen']}, (error, sources) => {
console.log("2")
if (error) throw error
for (let i = 0; i < sources.length; ++i) {
console.log((sources[i].name));
console.log("3")
and basically, it executes only the first two console.logs:
console.log("1")
console.log(desktopCapturer)
It never gets to 2 or 3.
Changed my code to look like this:
var desktopCapturer = require('electron').desktopCapturer;
console.log("are you here?")
console.log(desktopCapturer)
desktopCapturer.getSources({ types: ['window', 'screen'] }).then(async sources => {
for (const source of sources) {
if (source.name === 'mystreamApp') {
try {
const stream = await navigator.mediaDevices.getUserMedia({
audio: true,
video: {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: source.id,
minWidth: 1280,
maxWidth: 1280,
minHeight: 720,
maxHeight: 720
}
}
})
handleStream(stream)
} catch (e) {
handleError(e)
}
return
}
}
})
function handleStream (stream) {
const video = document.querySelector('video')
video.srcObject = stream
video.onloadedmetadata = (e) => video.play()
}
function handleError (e) {
console.log(e)
}
and now I see the video stream.
Audio is still not working. But i'll open another question for that.

Electron MediaRecorder record audio only from current window

I'm trying to only record the video and audio from the current application window but I am only able to record the video and the entire desktop audio.
Here is what I have:
async function selectSource(source) {
const constraints = {
audio: {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: source.id
}
},
video: {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: source.id
}
}
};
const stream = await navigator.mediaDevices
.getUserMedia(constraints);
const options = { mimeType: 'video/webm; codecs=vp9' };
mediaRecorder = new MediaRecorder(stream, options);
}
Thanks

Screen Sharing and video/audio calling using WebRTC and Electron on Mac OS

I am trying to create an electron application which can share the desktop with the system audio using webrtc and if I set the constraints :
const constraints = {
audio: {
mandatory: {
chromeMediaSource: 'desktop'
}
},
video: {
mandatory: {
chromeMediaSource: 'desktop'
}
}
}
I got this issue Mac OS audio:
ERROR:adm_helpers.cc(73)] Failed to query stereo recording. and then " NotFoundError: Requested device not found "
You need to use electron's desktopCapturer api.
Example -
// In the renderer process.
const {desktopCapturer} = require('electron')
desktopCapturer.getSources({types: ['window', 'screen']}, (error, sources) => {
if (error) throw error
for (let i = 0; i < sources.length; ++i) {
if (sources[i].name === 'Electron') {
navigator.mediaDevices.getUserMedia({
audio: false,
video: {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: sources[i].id,
minWidth: 1280,
maxWidth: 1280,
minHeight: 720,
maxHeight: 720
}
}
})
.then((stream) => handleStream(stream))
.catch((e) => handleError(e))
return
}
}
})
function handleStream (stream) {
const video = document.querySelector('video')
video.srcObject = stream
video.onloadedmetadata = (e) => video.play()
}
function handleError (e) {
console.log(e)
}
And use the audio flag above for getting the audio while screen sharing.
More details here - https://electronjs.org/docs/api/desktop-capturer
For MacOS users you need to get audio and video streams separately, then merge the streams like so:
const stream = await navigator.mediaDevices.getUserMedia({
audio: false,
video: {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: source.id
}
}
});
navigator.mediaDevices.getUserMedia({
audio: {
mandatory: {
chromeMediaSource: 'desktop'
}
},
video: false
}).then(function(audioStream) {
var audioTracks = audioStream.getAudioTracks();
// merge audio and video tracks
if(audioTracks.length > 0) {
stream.addTrack(audioTracks[0]);
}
recorder = new MediaRecorder(stream, {
mimeType: 'YOUR MIME TYPE'
});
recorder.ondataavailable = yourDataHandler;
recorder.onstop = yourStopHandler;
recorder.start();
}).catch(function(err) {
console.error('audioTrackError', err);
});

Alexa Audio Player Directive

I'm trying to build an Alexa skill that can play an audio file. I'm trying to send an Audio Player Play directive in the Launch Request, but when I use this code, I get no response back from my Alexa. Does it look correct?
const LaunchRequestHandler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === 'LaunchRequest';
},
handle(handlerInput) {
console.log('IN LAUNCHREQUEST');
return handlerInput.responseBuilder
.addDirective({
type: 'AudioPlayer.Play',
playBehavior: 'REPLACE_ALL',
audioItem: {
stream: {
token: "0",
url: "myurlhere",
offsetInMilliseconds: 0
}
}
})
}
};
You must return a "built" response, in the handler. So in you case the code would be:
const LaunchRequestHandler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === 'LaunchRequest';
},
handle(handlerInput) {
console.log('IN LAUNCHREQUEST');
return handlerInput.responseBuilder
.addDirective({
type: 'AudioPlayer.Play',
playBehavior: 'REPLACE_ALL',
audioItem: {
stream: {
token: "0",
url: "myurlhere",
offsetInMilliseconds: 0
}
}
})
.getResponse();
// ^^^ add this line
}
};
If you are using alexa sdk v2 (https://github.com/alexa/alexa-skills-kit-sdk-for-nodejs) then you can use inbuilt methods to play audio. Following methods are available to play long form audio.
addAudioPlayerPlayDirective(playBehavior: interfaces.audioplayer.PlayBehavior, url: string, token: string, offsetInMilliseconds: number, expectedPreviousToken?: string, audioItemMetadata? : AudioItemMetadata): this;
addAudioPlayerStopDirective(): this;
addAudioPlayerClearQueueDirective(clearBehavior: interfaces.audioplayer.ClearBehavior): this;
More information can be found on https://ask-sdk-for-nodejs.readthedocs.io/en/latest/Building-Response.html
Following is a code snippet that I use in my lambda to play audio.
//Create Image to be displayed with song
const metadata = {
title: 'Stopwatch Audio',
art: {
sources: [{
url: imageUrl
}]
}
};
handlerInput.responseBuilder.speak(speechText).addAudioPlayerPlayDirective("REPLACE_ALL", audiofile, audiofile, 0, null, metadata).withShouldEndSession(true).getResponse();

Resources