Electron MediaRecorder record audio only from current window - audio

I'm trying to only record the video and audio from the current application window but I am only able to record the video and the entire desktop audio.
Here is what I have:
async function selectSource(source) {
const constraints = {
audio: {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: source.id
}
},
video: {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: source.id
}
}
};
const stream = await navigator.mediaDevices
.getUserMedia(constraints);
const options = { mimeType: 'video/webm; codecs=vp9' };
mediaRecorder = new MediaRecorder(stream, options);
}
Thanks

Related

desktopCapturer example... how to make it work for specific application

I'm trying to follow this tutorial:
https://www.tutorialspoint.com/electron/electron_audio_and_video_capturing.htm
The first part of the tutorial worked fine... I can stream av from my pc camera and mic... into my electron app. But now I'm trying to do is stream audio and video from a specific application running on my windows desktop via the desktopCapturer object.
Problem
I'm not getting any errors. But the electron app's video html tag is not showing the stream from myappthatstreamsAV.
Code
I changed my index.html code to look like this: (just changed stuff inside the tag)
<!DOCTYPE html>
<html>
<head>
<meta charset = "UTF-8">
<title>Audio and Video</title>
</head>
<body>
<video autoplay></video>
<script type = "text/javascript">
var desktopCapturer = require('electron').desktopCapturer;
desktopCapturer.getSources({types: ['window', 'screen']}, (error, sources) => {
if (error) throw error
desktopCapturer.getSources({types: ['window', 'screen']}, (error, sources) => {
if (error) throw error
for (let i = 0; i < sources.length; ++i) {
if (sources[i].name === 'myappthatstreamsAV') {
navigator.webkitGetUserMedia({
audio: true,
video: {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: sources[i].id,
minWidth: 1280,
maxWidth: 1280,
minHeight: 720,
maxHeight: 720
}
}
}, handleStream, handleError)
return
}
}
})
function handleStream (stream) {
document.querySelector('video').src = URL.createObjectURL(stream)
}
function handleError (e) {
console.log(e)
}
</script>
</body>
</html>
and the index.js looks like this:
const {app, BrowserWindow} = require('electron')
const url = require('url')
const path = require('path')
let win
// Set the path where recordings will be saved
app.setPath("userData", __dirname + "/saved_recordings")
function createWindow() {
win = new BrowserWindow({width: 800, height: 600,
webPreferences: {
nodeIntegration: true
}
})
win.loadURL(url.format({
pathname: path.join(__dirname, 'index.html'),
protocol: 'file:',
slashes: true
}))
}
app.on('ready', createWindow)
What I've tried so far:
I added some debug statements like this:
<script type = "text/javascript">
var desktopCapturer = require('electron').desktopCapturer;
console.log("1")
console.log(desktopCapturer)
desktopCapturer.getSources({types: ['window', 'screen']}, (error, sources) => {
console.log("2")
if (error) throw error
for (let i = 0; i < sources.length; ++i) {
console.log((sources[i].name));
console.log("3")
and basically, it executes only the first two console.logs:
console.log("1")
console.log(desktopCapturer)
It never gets to 2 or 3.
Changed my code to look like this:
var desktopCapturer = require('electron').desktopCapturer;
console.log("are you here?")
console.log(desktopCapturer)
desktopCapturer.getSources({ types: ['window', 'screen'] }).then(async sources => {
for (const source of sources) {
if (source.name === 'mystreamApp') {
try {
const stream = await navigator.mediaDevices.getUserMedia({
audio: true,
video: {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: source.id,
minWidth: 1280,
maxWidth: 1280,
minHeight: 720,
maxHeight: 720
}
}
})
handleStream(stream)
} catch (e) {
handleError(e)
}
return
}
}
})
function handleStream (stream) {
const video = document.querySelector('video')
video.srcObject = stream
video.onloadedmetadata = (e) => video.play()
}
function handleError (e) {
console.log(e)
}
and now I see the video stream.
Audio is still not working. But i'll open another question for that.

Getting error on video calling application build with simple-peer and react

Recently I made a video calling website using simple-peer,react.js and socket.io. Its working fine between laptop web browsers but I am getting below error on video calling from mobile web browser to laptop web browser. Can someone please advise what is causing this error and how to rectify it.
code-
function VideoComponent(props) {
//const [yourID, setYourID] = useState("");
//const [users, setUsers] = useState({});
const [stream, setStream] = useState();
const [receivingCall, setReceivingCall] = useState(props.receivingCall);
const [caller, setCaller] = useState(props.caller);
const [callerSignal, setCallerSignal] = useState(props.callerSignal);
const [callAccepted, setCallAccepted] = useState(props.callAccepted);
const [open, setOpen] = useState(false)
const [calling, setCalling] = useState(false)
const userVideo = useRef();
const partnerVideo = useRef();
const socket = props.socket
//const ENDPOINT = '/'
useEffect(() => {
if(props.useAudio && props.useVideo){
navigator.mediaDevices.getUserMedia({ video: props.useVideo, audio: props.useAudio }).then(stream => {
setStream(stream);
if (userVideo.current && props.useAudio && props.useVideo) {
userVideo.current.srcObject = stream;
}
})
}
//socket = io(ENDPOINT);
/*socket.on("hey", (data) => {
setReceivingCall(true);
setCaller(data.from);
setCallerSignal(data.signal);
})*/
}, []);
const callPeer=()=> {
setCalling(true)
if(props.selectedUser[0]['status'] !== 'online'){
setOpen(true)
}
if(props.useAudio && props.useVideo){
const peer = new Peer({
initiator: true,
trickle: false,
stream: stream,
});
peer.on("signal", data => {
socket.emit("callUser", { userToCall: props.selectedUser[0]['_id'],
signalData: data, from: props.userDetail[0]['_id']})
})
peer.on("stream", stream => {
if (partnerVideo.current) {
console.log('receiving stream from partner')
partnerVideo.current.srcObject = stream;
}
});
socket.on("callAccepted", signal => {
setCallAccepted(true);
peer.signal(signal);
})
}
}
function acceptCall() {
setCallAccepted(true);
if(props.useAudio && props.useVideo){
const peer = new Peer({
initiator: false,
trickle: false,
stream: stream,
});
peer.on("signal", data => {
socket.emit("acceptCall", { signal: data, receiverID: caller })
})
peer.on("stream", stream => {
partnerVideo.current.srcObject = stream;
});
peer.signal(callerSignal);
}
}
let UserVideo;
if (stream) {
UserVideo = (
<video className='newVideo1' playsInline muted ref={userVideo} autoPlay />
);
}
let PartnerVideo;
if (callAccepted) {
PartnerVideo = (
<video className='newVideo' playsInline ref={partnerVideo} autoPlay />
);
}
let incomingCall;
if (receivingCall && !callAccepted) {
incomingCall = (
<div className='incomingCall'>
<h1>{caller} is calling you</h1>
<Button
variant="contained"
color="secondary"
onClick={acceptCall}
className='acceptButton'
>
Accept call
</Button>
</div>
)
}
Error while calling from mobile web browser(Chrome) to laptop web browser(Chrome)
index.js:17 Uncaught Error: Connection failed.
at h (index.js:17)
at f.value (index.js:654)
at RTCPeerConnection.t._pc.onconnectionstatechange (index.js:119)
It could be related to TURN server. You need to set up one if you are planning to deploy this app.
based on your code, it looks inspired by https://github.com/coding-with-chaim/react-video-chat/blob/master/client/src/App.js
Use the same TURN settings given in his code and see if it works. It worked for me.

Screen Sharing and video/audio calling using WebRTC and Electron on Mac OS

I am trying to create an electron application which can share the desktop with the system audio using webrtc and if I set the constraints :
const constraints = {
audio: {
mandatory: {
chromeMediaSource: 'desktop'
}
},
video: {
mandatory: {
chromeMediaSource: 'desktop'
}
}
}
I got this issue Mac OS audio:
ERROR:adm_helpers.cc(73)] Failed to query stereo recording. and then " NotFoundError: Requested device not found "
You need to use electron's desktopCapturer api.
Example -
// In the renderer process.
const {desktopCapturer} = require('electron')
desktopCapturer.getSources({types: ['window', 'screen']}, (error, sources) => {
if (error) throw error
for (let i = 0; i < sources.length; ++i) {
if (sources[i].name === 'Electron') {
navigator.mediaDevices.getUserMedia({
audio: false,
video: {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: sources[i].id,
minWidth: 1280,
maxWidth: 1280,
minHeight: 720,
maxHeight: 720
}
}
})
.then((stream) => handleStream(stream))
.catch((e) => handleError(e))
return
}
}
})
function handleStream (stream) {
const video = document.querySelector('video')
video.srcObject = stream
video.onloadedmetadata = (e) => video.play()
}
function handleError (e) {
console.log(e)
}
And use the audio flag above for getting the audio while screen sharing.
More details here - https://electronjs.org/docs/api/desktop-capturer
For MacOS users you need to get audio and video streams separately, then merge the streams like so:
const stream = await navigator.mediaDevices.getUserMedia({
audio: false,
video: {
mandatory: {
chromeMediaSource: 'desktop',
chromeMediaSourceId: source.id
}
}
});
navigator.mediaDevices.getUserMedia({
audio: {
mandatory: {
chromeMediaSource: 'desktop'
}
},
video: false
}).then(function(audioStream) {
var audioTracks = audioStream.getAudioTracks();
// merge audio and video tracks
if(audioTracks.length > 0) {
stream.addTrack(audioTracks[0]);
}
recorder = new MediaRecorder(stream, {
mimeType: 'YOUR MIME TYPE'
});
recorder.ondataavailable = yourDataHandler;
recorder.onstop = yourStopHandler;
recorder.start();
}).catch(function(err) {
console.error('audioTrackError', err);
});

Alexa Audio Player Directive

I'm trying to build an Alexa skill that can play an audio file. I'm trying to send an Audio Player Play directive in the Launch Request, but when I use this code, I get no response back from my Alexa. Does it look correct?
const LaunchRequestHandler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === 'LaunchRequest';
},
handle(handlerInput) {
console.log('IN LAUNCHREQUEST');
return handlerInput.responseBuilder
.addDirective({
type: 'AudioPlayer.Play',
playBehavior: 'REPLACE_ALL',
audioItem: {
stream: {
token: "0",
url: "myurlhere",
offsetInMilliseconds: 0
}
}
})
}
};
You must return a "built" response, in the handler. So in you case the code would be:
const LaunchRequestHandler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === 'LaunchRequest';
},
handle(handlerInput) {
console.log('IN LAUNCHREQUEST');
return handlerInput.responseBuilder
.addDirective({
type: 'AudioPlayer.Play',
playBehavior: 'REPLACE_ALL',
audioItem: {
stream: {
token: "0",
url: "myurlhere",
offsetInMilliseconds: 0
}
}
})
.getResponse();
// ^^^ add this line
}
};
If you are using alexa sdk v2 (https://github.com/alexa/alexa-skills-kit-sdk-for-nodejs) then you can use inbuilt methods to play audio. Following methods are available to play long form audio.
addAudioPlayerPlayDirective(playBehavior: interfaces.audioplayer.PlayBehavior, url: string, token: string, offsetInMilliseconds: number, expectedPreviousToken?: string, audioItemMetadata? : AudioItemMetadata): this;
addAudioPlayerStopDirective(): this;
addAudioPlayerClearQueueDirective(clearBehavior: interfaces.audioplayer.ClearBehavior): this;
More information can be found on https://ask-sdk-for-nodejs.readthedocs.io/en/latest/Building-Response.html
Following is a code snippet that I use in my lambda to play audio.
//Create Image to be displayed with song
const metadata = {
title: 'Stopwatch Audio',
art: {
sources: [{
url: imageUrl
}]
}
};
handlerInput.responseBuilder.speak(speechText).addAudioPlayerPlayDirective("REPLACE_ALL", audiofile, audiofile, 0, null, metadata).withShouldEndSession(true).getResponse();

how to get microphone permission when button is clicked in extension popup

navigator.mediaDevices.getUserMedia({
audio: {
echoCancellation: !0,
channelCount: 1,
sampleRate: {
ideal: e
},
sampleSize: i
}
})
it return promise value Rejected and promise status DOM:Shutdown

Resources