I have a problem connecting peer-to-peer video from native android browser to safari 11 desktop (vice versa), here is the error:
Unhandled Promise Rejection: OperationError (DOM Exception 34): Failed to set remote offer sdp: Session error code: ERROR_CONTENT. Session error description: Failed to set remote video description send parameters..
I'm currently stuck in this issue
and here is my whole client videochat code, thanks.
import app from '../../../config';
const videoChatService = app.service('participants/video-chat');
let localVid;
let remoteVid;
let localstream;
let rtcPeerConn;
let conversationId;
let userId;
let isSdpSent = false;
let hasAddTrack;
const configuration = {
iceServers: [{
urls: 'stun:stun.l.google.com:19302',
},
{
urls: 'stun:stun.services.mozilla.com',
username: 'louis#mozilla.com',
credential: 'webrtcdemo',
},
// {
// urls: 'turn:mdn-samples.mozilla.org',
// username: 'webrtc',
// credential: 'turnserver' }
] };
function closeVideoCall() {
if (rtcPeerConn) {
rtcPeerConn.onaddstream = null;
rtcPeerConn.ontrack = null;
rtcPeerConn.onremovestream = null;
rtcPeerConn.onicecandidate = null;
if (remoteVid.srcObject) {
remoteVid.srcObject.getTracks().forEach(track => track.stop());
remoteVid.srcObject = null;
}
if (localVid.srcObject) {
localVid.srcObject.getTracks().forEach(track => track.stop());
localVid.srcObject = null;
}
rtcPeerConn.close();
rtcPeerConn = null;
}
}
// set local sdp
function setLocalSDP(desc) {
console.log('>>> setLocalSDP', rtcPeerConn);
return rtcPeerConn.setLocalDescription(desc);
}
function logError(error) {
console.log(`>>>>logError, ${error.name}: ${error.message}`);
}
function handleNegotiatedNeededEvent() {
console.log('>>>>> on negotiation called');
console.log('query >>>', conversationId, userId);
if (!isSdpSent) {
rtcPeerConn.createOffer()
.then(setLocalSDP)
.then(() => {
isSdpSent = true;
videoChatService.patch(null, {
data: {
type: 'video-offer',
message: JSON.stringify(rtcPeerConn.localDescription),
},
}, {
query: {
conversation_id: conversationId,
user_id: userId,
},
}).then().catch(e => { console.log('patch error', e); });
})
.catch(logError);
}
}
function handleRemoveStreamEvent() {
closeVideoCall();
}
function handleTrackEvent (evt) {
console.log('>>>>> going to add their stream...', evt);
remoteVid = document.getElementById('remoteStream');
if (!remoteVid.srcObject) {
remoteVid.srcObject = evt.streams[0];
}
}
function handleAddStreamEvent(evt) {
console.log('>>>>> stream added');
remoteVid = document.getElementById('remoteStream');
remoteVid.srcObject = event.stream;
}
function handleICECandidateEvent(evt) {
console.log('>>>> onicecandidate', evt);
console.log('query >>>', conversationId, userId);
if (evt.candidate) {
videoChatService.patch(null, {
data: {
type: 'new-ice-candidate',
message: JSON.stringify(evt.candidate),
},
}, {
query: {
conversation_id: conversationId,
user_id: userId,
},
});
}
}
function handleICEConnectionStateChangeEvent() {
console.log(`>>>>> ICE connection state changed to ${rtcPeerConn.iceConnectionState}`);
switch (rtcPeerConn.iceConnectionState) {
case 'closed':
case 'failed':
case 'disconnected':
console.log('>>>> disconnected');
closeVideoCall();
break;
}
}
function handleSignalingStateChangeEvent() {
console.log(`>>>>> WebRTC signaling state changed to: ${rtcPeerConn.signalingState}`);
switch (rtcPeerConn.signalingState) {
case 'closed':
console.log('>>>> closed');
closeVideoCall();
break;
}
}
function createPeerConnection() {
rtcPeerConn = new RTCPeerConnection(configuration);
console.log('>>>>> create peer connection', rtcPeerConn);
hasAddTrack = (rtcPeerConn.addTrack !== undefined);
rtcPeerConn.onicecandidate = handleICECandidateEvent;
rtcPeerConn.onnegotiationneeded = handleNegotiatedNeededEvent;
rtcPeerConn.oniceconnectionstatechange = handleICEConnectionStateChangeEvent;
rtcPeerConn.onsignalingstatechange = handleSignalingStateChangeEvent;
rtcPeerConn.onremovestream = handleRemoveStreamEvent;
if (hasAddTrack) {
rtcPeerConn.ontrack = handleTrackEvent;
} else {
rtcPeerConn.onaddstream = handleAddStreamEvent;
}
}
function handleGetUSerMediaError(e) {
switch (e.name) {
case 'NotFoundError':
alert('Unable to open your call because no camera and/or microphone were found.');
break;
case 'SecurityError':
case 'PermissionDeniedError':
// Do nothing; this is the same as the user canceling the call.
break;
default:
alert(`Error opening your camera and/or microphone: ${e.message}`);
break;
}
}
// add video to local and add to track
function gotStream(stream) {
console.log('>>>> gotStream', stream);
localVid.srcObject = stream;
localstream = stream;
if (hasAddTrack) {
stream.getTracks().forEach(track => rtcPeerConn.addTrack(track, localstream));
} else {
rtcPeerConn.addStream(localstream);
}
}
// start signaling
export function startSignaling(conversation_id, user_id) {
localVid = document.getElementById('localStream');
remoteVid = document.getElementById('remoteStream');
console.log('>>>>> startSignaling');
conversationId = conversation_id;
userId = user_id;
return () => {
if (!rtcPeerConn) {
createPeerConnection();
navigator.mediaDevices.getUserMedia({
audio: true,
video: {
facingMode: 'user',
},
})
.then(gotStream)
.catch(handleGetUSerMediaError);
}
};
}
export function handleVideoOfferMsg(conversation_id, user_id, message) {
console.log('>>>>> handleVideoOfferMsg');
localstream = null;
conversationId = conversation_id;
userId = user_id;
localVid = document.getElementById('localStream');
remoteVid = document.getElementById('remoteStream');
return () => {
createPeerConnection();
console.log('query >>>', conversationId, userId);
const sdp = new RTCSessionDescription(message);
// sdp.sdp = sdp.replace('a=setup:active', 'a=setup:passive');
rtcPeerConn.setRemoteDescription(sdp)
.then(() => (
navigator.mediaDevices.getUserMedia({
audio: true,
video: {
facingMode: 'user',
},
})
))
.then(gotStream)
.then(() => (
rtcPeerConn.createAnswer()
))
.then(setLocalSDP)
.then(() => {
videoChatService.patch(null, {
data: {
type: 'video-answer',
message: JSON.stringify(rtcPeerConn.localDescription),
},
}, {
query: {
conversation_id: conversationId,
user_id: userId,
},
}).then().catch(e => { console.log('patch error', e); });
});
};
}
export function handleVideoAnswerMsg(message) {
console.log('>>>>> handle video answer message', message);
return () => {
const sdp = new RTCSessionDescription(message);
rtcPeerConn.setRemoteDescription(sdp)
.catch(logError);
};
}
// Adding ice candidate
export function addIceCandidate(message) {
console.log('>>>> addIceCandidate', message);
return () => {
const candidate = new RTCIceCandidate(message);
rtcPeerConn.addIceCandidate(candidate)
.then(() => {
console.log('>>> candidate added ');
})
.catch(e => {
console.log('Error candidate', e);
});
};
}
There are two different issues making a WebRTC connection between Chrome on Android and iOS/Safari not working:
1) No H.264 implementation on device
Chrome for Android has only a hardware implementation for H.264 and there is no software implementation. At this moment H.264 works only with devices with a processor of Qualcomm (Kitkat and later) or Samsung Exynos (Lollipop and later). Since Apple only supports H.264 other Android devices can't connect with iOS and Safari.
2) There is bug in Chrome for Android:
Chrome Android does not offer/answer H.264 Constrained Baseline Profile
Because Apple only supports H.264, Android/Chrome can not connect with iOS at this moment.
This problem will be solved in Chrome Android 65 (now Canary). See this for more information.
I see your error message that is exactly this bug so I am pretty sure this is the problem. But in the end it doesn't matter. But you should be aware of both problems.
This might be related to this issue.
Chrome on Android does not always support H264, Safari does only support H264.
To verify it is a video codec issue as mentioned by the other answers you can adjust the stream constraints:
navigator.mediaDevices.getUserMedia({
video: false,
audio: true
})
If the ICE handling succeeds after this change it is likely the SDP contained a codec that is not supported on the other side.
Then you should i.e. extend your code to fallback to audio only after displaying an error message in your GUI.
Related
I am writing a nodejs program using Google Cloud Speech-to-text. The audio recorded by the sox recorder of node-record-lpcm16 is delivered to the streamingRecognize function. At the end of recording one sentence, a message with isFinal as true should be returned, but the problem is that depending on the microphone, sometimes (when using Macbook Pro internal mic) it is returned and sometimes (when using Airpods Pro) it is not.
Plus, this issue occurs when I use ko-KR for recognizing. It works well in en-US as a recognition language.
const record = require('node-record-lpcm16');
const speech = require('#google-cloud/speech');
module.exports.getGoogleCloudSpeech = (speechProviderOptions) => {
return {
startListening(dispatch) {
const client = new speech.SpeechClient();
console.log('[speech] Invoked');
const request = {
config: {
encoding: 'LINEAR16',
sampleRateHertz: 16000,
languageCode: 'ko-KR',
},
interimResults: true,
};
let stopTimeout;
let stopped = false;
let expireTime;
// Create a recognize stream
const recognizeStream = client
.streamingRecognize(request)
.on('error', () => {
dispatch({ type: 'error' });
})
.on('data', data => {
if (data.results[0]) {
console.log(data);
const result = data.results[0].alternatives[0];
if (data.results[0].alternatives[0]) {
dispatch({
type: 'result',
result: {
transcript: result.transcript,
isFinal: data.results[0].isFinal,
},
});
}
}
});
// Start recording and send the microphone input to the Speech API
let totalSize = 0
let recording = record
.record({
sampleRate: 16000,
threshold: 0,
recordProgram: 'sox',
});
recording.stream()
.on('data', buf => {
totalSize += buf.length;
if (!expireTime) {
console.log('[speech] Ready to listen');
expireTime = Date.now() + 59000;
dispatch({ type: 'ready', expiryTime: expireTime });
stopTimeout = setTimeout(() => {
if (!stopped) {
recording.stop();
console.log('[speech] Stop (timeout)');
stopped = true;
}
}, expireTime - Date.now());
};
})
.on('error', console.error)
.pipe(
recognizeStream,
{ end: true },
)
.on('end', () => {
const time = totalSize / 16000 / 2;
console.log(
'[speech] API end, total size',
totalSize,
time.toFixed(2) + 's'
);
dispatch({ type: 'end' });
});
return {
stop() {
clearTimeout(stopTimeout);
if (!stopped) {
recording.stop();
console.log('[speech] Stop (user)');
stopped = true;
}
}
};
}
}
}
The flip camera doesn't work on devices where they have more than 2 video inputs. In the first load, the video appears but when the flip camera button is clicked the application throws an error.
Expected behavior:
The camera should be flipped (environment)
Actual behavior:
It throws the following error:
error : call to getusermedia failed domexception could not start video source
Software versions:
Browser(s): Chrome
Operating System: Android (devices that I'm checking & it's not working eq. Samsung M31, Redmi note 11 T, One Plus 7T)
twilio-video.js: 2.24.0
Third-party libraries (e.g., Angular, nodejs, etc.):
Code used to start twilio stream
async startTwilioStream(twilioToken: string, localVideo: ElementRef, remoteVideo: ElementRef): Promise<void> {
console.log('startTwilioStream');
this.localVideoElement = localVideo;
this.remoteVideoElement = remoteVideo;
await this.startLocalVideo(this.localVideoElement);
this.connectOptions = {
video: false,
audio: false,
tracks: [this.localAudioTrack, this.localVideoTrack],
audioConstraints: {
mandatory: {
googAutoGainControl: false,
},
},
region: 'in1',
preferredAudioCodecs: ['opus'],
preferredVideoCodecs: ['H264'],
};
connect(twilioToken, this.connectOptions).then((twilioRoom: any) => {
console.log('twilioRoom.localParticipant ================== ', twilioRoom.localParticipant);
setTimeout(() => {
if (this.remoteVideoElement?.nativeElement) {
this.remoteVideoElement.nativeElement.muted = false;
}
}, 5000);
this.twilioRoom = twilioRoom;
console.log('this.twilioRoom vvvv', this.twilioRoom);
twilioRoom.localParticipant.setNetworkQualityConfiguration({
local: 2,
remote: 1,
});
// flip.addEventListener('change', this.updateVideoDevice);
twilioRoom.on('participantConnected', participant => {
console.log('participant Connected===============', participant);
participant.tracks.forEach((publication) => {
console.log('publication', publication);
if (publication.isSubscribed) {
const track = publication.track;
this.attachTracks([track]);
}
});
this.twilioRoom = twilioRoom;
});
twilioRoom.on('participantDisconnected', participant => {
console.log('participantDisconnected', participant);
console.log('SOME PARTICIPANT DISCONNECTED');
if ((participant.identity === 'agent-screen-share' && this.serviceUserType !== 'agent') || (participant.identity === 'consumer-screen-share' && this.serviceUserType !== 'consumer')) {
this.changeDetectionEmitter.emit('remoteScreenShareStopped');
this.isRemoteScreenShareOn = false;
} else if (participant.identity !== 'agent-screen-share' && participant.identity !== 'consumer-screen-share') {
console.log('real participant dced');
this.remoteMediaStream = null;
this.detachTracks(participant);
this.isRemoteVideoOn = false;
}
this.twilioRoom = twilioRoom;
});
twilioRoom.participants.forEach((participant) => {
participant.tracks.forEach((publication) => {
if (publication.track) {
const track = publication.track;
this.attachTracks([track]);
}
});
participant.on('trackSubscribed', (track) => {
console.log('trackSubscribed', track);
this.attachTracks([track]);
});
this.twilioRoom = twilioRoom;
});
twilioRoom.on('trackAdded', (track, participant) => {
console.log('trackAdded', track, participant);
this.attachTracks([track]);
this.twilioRoom = twilioRoom;
});
// When a Participant adds a Track, attach it to the DOM.
twilioRoom.on('trackSubscribed', (track, err, participant) => {
console.log('trackSubscribed', track);
this.sendLoaderStatus('ringing');
if ((participant.identity === 'agent-screen-share' && this.serviceUserType !== 'agent') || (participant.identity === 'consumer-screen-share' && this.serviceUserType !== 'consumer')) {
this.attachScreenShareTrack([track]);
} else if (participant.identity === 'agent-screen-share' || participant.identity === 'consumer-screen-share') {
} else {
this.attachTracks([track]);
}
this.twilioRoom = twilioRoom;
});
// When a Participant removes a Track, detach it from the DOM.
twilioRoom.on('trackRemoved', (track, participant) => {
console.log('trackRemoved', track);
this.detachTracks([track]);
this.twilioRoom = twilioRoom;
});
}, err => {
});
}
Start local video and local audio track
async startLocalVideo(localVideo: ElementRef, deviceId = 'user'): Promise<void> {
this.localVideoElement = localVideo;
const localAudioTrack = await createLocalAudioTrack({
audio: true
});
const localVideoTrack = await createLocalVideoTrack({
facingMode: deviceId
});
this.localAudioTrack = localAudioTrack;
this.localVideoTrack = localVideoTrack;
if (!this.localAudioTrack) {
alert('Audio source not found, do you hava a mic connected ?');
}
if (!this.localVideoTrack) {
alert('Video source not found, do you hava a videocam connected ?');
}
console.log('this.localVideoTrack to check', this.localVideoTrack);
this.localDisplayMediaStream = new MediaStream();
console.log('this.localVideoTrack.mediaStreamTrack to check', this.localVideoTrack.mediaStreamTrack);
this.localDisplayMediaStream.addTrack(this.localVideoTrack.mediaStreamTrack);
console.log('this.localDisplayMediaStream to check', this.localDisplayMediaStream);
this.localVideoElement.nativeElement.srcObject = this.localDisplayMediaStream;
}
Flip event listener calls on the click of switch button
const flip = document.querySelector('#flip');
flip.addEventListener('click', (e) => {
if (this.facingMode == "user") {
this.facingMode = "environment";
this.twilioService.switch(this.facingMode)
} else {
this.facingMode = "user";
this.twilioService.switch(this.facingMode)
}
});
Switch camera function calls in flip event listener
async switch(facingMode) {
console.log(this.localDisplayMediaStream);
if (this.localDisplayMediaStream) {
this.localDisplayMediaStream.getTracks().forEach(track => {
track.stop();
});
if (this.twilioRoom) {
await this.twilioRoom.localParticipant.videoTracks.forEach((track: any) => {
console.log('track', track);
track.track.stop();
});
}
}
const localVideoTrack = await createLocalVideoTrack({
facingMode: facingMode
});
this.localVideoTrack = localVideoTrack;
this.localDisplayMediaStream = new MediaStream();
this.localDisplayMediaStream.addTrack(this.localVideoTrack.mediaStreamTrack);
this.localVideoElement.nativeElement.srcObject = this.localDisplayMediaStream;
}
I have been trying to follow this official guide from IBM (https://developer.ibm.com/tutorials/add-a-trigger-word-to-your-watson-assistant/) to build a Nodejs voice assistant that answer after it recognizes a "wake up word". That guide seems a little bit outdated so I decided to use it with Assistant V2 and ibm-watson 5.2.0 package from npm.
I am getting a WebSocket connection error with not much information on it, the issue seems to be on line 33 and with the params I am sending to the 'recognizeUsingWebsocket' method. Am I missing something along this parameters?
const AssistantV2 = require('ibm-watson/assistant/v2');
const TextToSpeechV1 = require('ibm-watson/text-to-speech/v1');
const SpeechToTextV1 = require('ibm-watson/speech-to-text/v1');
const { IamAuthenticator } = require('ibm-watson/auth');
const mic = require('mic');
const conversation = new AssistantV2({
authenticator: new IamAuthenticator({ apikey: '<api_key>' }),
url: 'https://gateway-lon.watsonplatform.net/assistant/api/',
version: '2018-09-19'
});
const speechToText = new SpeechToTextV1({
authenticator: new IamAuthenticator({ apikey: '<api_key>' }),
serviceUrl: 'https://gateway-lon.watsonplatform.net/speech-to-text/api'
});
const textToSpeech = new TextToSpeechV1({
authenticator: new IamAuthenticator({ apikey: '<api_key>' })
});
const micParams = {
rate: 44100,
channels: 2,
debug: true,
exitOnSilence: 6
};
const microphone = mic(micParams);
const micInputStream = microphone.getAudioStream();
const textStream = micInputStream
.pipe(
speechToText.recognizeUsingWebSocket({
accessToken:'<access_token>',
contentType: 'audio/l16; rate=44100; channels=2',
interimResults: true,
inactivityTimeout: -1
})
)
.setEncoding('utf8');
const speakResponse = (text) => {
var params = {
text: text,
accept: 'audio/wav',
voice: 'en-US_AllisonVoice'
};
var writeStream = fs.createWriteStream('output.wav');
textToSpeech
.synthesize(params)
.then((audio) => {
audio.pipe(writeStream);
})
.catch((err) => {
console.log('error:', err);
});
writeStream.on('finish', function() {
ffprobe('output.wav', function(err, probeData) {
if (probeData) {
pauseDuration = probeData.format.duration;
microphone.pause();
speaker.play('output.wav');
startTime = new Date();
}
});
});
writeStream.on('error', function(err) {
console.log('Text-to-speech streaming error: ' + err);
});
};
function printContext(header) {
if (debug) {
console.log(header);
if (context.system) {
if (context.system.dialog_stack) {
const util = require('util');
console.log(" dialog_stack: ['" + util.inspect(context.system.dialog_stack, false, null) + "']");
}
}
}
}
function watsonSays(response) {
if (typeof response !== 'undefined') {
console.log('Watson says:', response);
}
}
function isActive(text) {
var elapsedTime = new Date() - startTime;
if (elapsedTime > SLEEP_TIME) {
// go to sleep
startTime = new Date();
botIsActive = false;
}
if (botIsActive) {
// in active conversation, so stay awake
startTime = new Date();
return true;
} else {
// we are asleep - did we get a wake up call?
if (text.toLowerCase().indexOf(wakeWord) > -1) {
// time to wake up
console.log('App just woke up');
botIsActive = true;
} else {
// false alarm, go back to sleep
console.log('App needs the wake up command');
}
return botIsActive;
}
}
function performConversation() {
console.log('App is listening, you may speak now.');
textStream.on('data', (user_speech_text) => {
userSpeechText = user_speech_text.toLowerCase();
console.log('\n\nApp hears: ', user_speech_text);
if (isActive(user_speech_text)) {
conversation.message(
{
assistantId: process.env.ASSISTANT_ID,
sessionId: process.env.SESSION_ID,
input: { text: user_speech_text }
},
(err, response) => {
console.log(err);
context = response.context;
watson_response = response.output.text[0];
if (watson_response) {
speakResponse(watson_response);
}
watsonSays(watson_response);
}
);
}
});
}
microphone.start();
performConversation();
It seems that you do not use the right endpoint for websockets :
note : api endpoints have had a new version in dec 2019
you use :
https://gateway-lon.watsonplatform.net/speech-to-text/api
and it should be something like : (i think the prefix wss is key)
wss://api.{location}.speech-to-text.watson.cloud.ibm.com/instances/{instance_id}/v1/recognize
cf Api reference : https://cloud.ibm.com/apidocs/speech-to-text/speech-to-text#websocket_methods
I tried to set up a WebRTC P2P video communication with flutter, a node backend as signaling server and kurento media server.
When I run the app in combination with the Kurento Demo everything works fine but as soon as I try to implement my own backend the video stream doesn't start although the log messages indicate that everything is ok.
Please let me know if more input is required to find a solution.
Relevant code snippets
Web Frontend:
call(username) {
const wrapper = this;
const remoteVideo = this.videoOutputFactory();
if (!remoteVideo) {
console.error('videoOutput not found');
}
const options = {
'remoteVideo': document.getElementById('testVideo'),
onicecandidate(candidate) {
console.debug('onicecandidate', candidate);
wrapper.rpc.onIceCandidate(candidate);
},
mediaConstraints: wrapper.remoteMediaConstraints
};
console.debug('WebRtcWrapper.call: options', options);
return new Promise((resolve, reject) => {
console.log('Creating WebRtcPeer');
this.webRtcPeer = kurentoUtils.WebRtcPeer.WebRtcPeerSendrecv(options, (error) => {
if (error) {
console.error('Error while creating WebRtcPeer', error);
reject(error);
return;
}
console.log('Generating WebRtcPeer offer');
this.webRtcPeer.generateOffer((offerError, offerSdp) => {
if (offerError) {
console.error('Error while generating WebRtcPeer offer', error);
reject(error);
return;
}
this.rpc.call(username, offerSdp).then((res) => {
console.log("Got call answer - Generated-SDPOffer: " + offerSdp);
if (res.response === 'rejected') {
console.log('Call rejected by peer');
reject(res.rejectionMessage);
return;
}
console.log('Processing peer SDP answer', res.sdpAnswer);
this.webRtcPeer.processAnswer(res.sdpAnswer);
});
});
});
});
}
App
TestController._()
: _channel = IOWebSocketChannel.connect('wss://server.marcostephan.at:443') {
_peer = jsonrpc.Peer(_channel.cast<String>());
_peer.registerMethod(
'rtc.incomingCall', (jsonrpc.Parameters message) async => await _onIncomingCall(message));
_peer.registerMethod(
'rtc.offerIceCandidate', (jsonrpc.Parameters message) => _onOfferIceCandidate(message));
_peer.registerMethod(
'rtc.startCommunication', (jsonrpc.Parameters message) => _onStartCommunication(message));
_peer.registerMethod('conn.heartbeat', (jsonrpc.Parameters message) => "");
_peer.registerFallback((jsonrpc.Parameters params) =>
print('Unknown request [${params.method}]: ${params.value}'));
_peer.listen();
_peer.sendRequest("auth.login", {'username': 'john.doe', 'role': 'actor'});
_peer.sendNotification("disp.helpMe", {'category': 'spareParts'});
}
_onIncomingCall(jsonrpc.Parameters message) async {
try{
print('Incoming call from ${message['username'].value}');
if (this.onStateChange != null) {
this.onStateChange(SignalingState.CallStateNew);
}
await _createPeerConnection();
RTCSessionDescription s = await _peerConnection
.createOffer(_constraints);
_peerConnection.setLocalDescription(s);
return {
'from': message['username'].value,
'callResponse': 'accept',
'sdpOffer': s.sdp
};
}
catch(e){
print('TestController._onIncomingCall: ERROR: $e');
}
}
_onOfferIceCandidate(jsonrpc.Parameters message) {
try{
var candidateMap = message['candidate'].value;
print('Received IceCandidate $candidateMap');
if (_peerConnection != null) {
RTCIceCandidate candidate = new RTCIceCandidate(candidateMap['candidate'],
candidateMap['sdpMid'], candidateMap['sdpMLineIndex']);
_peerConnection.addCandidate(candidate);
}
}
catch(e){
print('TestController._onOfferIceCandidate: ERROR: $e');
}
}
_onStartCommunication(jsonrpc.Parameters message) {
try{
_peerConnection.setRemoteDescription(
RTCSessionDescription(message['sdpAnswer'].value, 'answer'));
}
catch(e){
print('TestController._onStartCommunication: ERROR: $e');
}
}
_createPeerConnection() async {
_localStream = await _createStream();
RTCPeerConnection pc = await createPeerConnection(_iceServers, _config);
_peerConnection = pc;
pc.addStream(_localStream);
pc.onAddStream = (stream) {
if (this.onRemoteStream != null) this.onRemoteStream(stream);
//_remoteStreams.add(stream);
};
pc.onIceConnectionState = (state) {
print(
'TestController._createPeerConnection: onIceConnectionState: $state');
};
pc.onIceCandidate = (candidate) {
_peer.sendNotification("rtc.onIceCandidate", {
'candidate': {
'sdpMLineIndex': candidate.sdpMlineIndex,
'sdpMid': candidate.sdpMid,
'candidate': candidate.candidate
}
});
};
}
Future<MediaStream> _createStream() async {
final Map<String, dynamic> mediaConstraints = {
'audio': true,
'video': {
'mandatory': {
'minWidth': '1980',
'minHeight': '1020',
'minFrameRate': '30',
},
'facingMode': 'environment',
'optional': [],
}
};
MediaStream stream = await navigator.getUserMedia(mediaConstraints);
if (this.onLocalStream != null) {
this.onLocalStream(stream);
}
return stream;
}
final Map<String, dynamic> _iceServers = {
'iceServers': [
{'url': 'stun:stun.l.google.com:19302'},
]
};
final Map<String, dynamic> _config = {
'mandatory': {},
'optional': [
{'DtlsSrtpKeyAgreement': true},
],
};
final Map<String, dynamic> _constraints = {
'mandatory': {
'OfferToReceiveAudio': true,
'OfferToReceiveVideo': true,
},
'optional': [],
};
Logs
Web Frontend
Pastebin
App
Pastebin
I am trying to implement a one to one audio call using webRTC (signalling using websockets) . But it works when i try it in one system using multiple tabs of chrome (localhost). When I try to hit my server from another machine it does initial handshakes , but call doesn't happen.
But when i try to change the tag to and changed the constraints to video constraints . it works even if the we try to access from other machine (i.e video call works ).
I initially thought it was because if firewall but when video call worked I was puzzled .
Here is my code:
// Constraints to get audio stream only
$scope.constraints = {
audio: {
mandatory: {
googEchoCancellation: true
},
optional: []
},
video:false
};
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
// success Callback of getUserMedia(), stream variable is the audio stream.
$scope.successCallback = function (stream) {
if (window.URL) {
myVideo.src = window.URL.createObjectURL(stream); // converting media stream to Blob URL.
} else {
myVideo.src = stream;
}
//attachMediaStream(audioTag, stream);
localStream = stream;
if (initiator)
maybeStart();
else
doAnswer();
};
// failure Callback of getUserMedia()
$scope.failureCallback = function (error) {
console.log('navigator.getUserMedia Failed: ', error);
};
var initiator, started = false;
$("#call").click(function () {
socket.emit("message", undefined);
initiator = true;
navigator.getUserMedia($scope.constraints, $scope.successCallback, $scope.failureCallback);
});
var channelReady = false;
socket.on('message', function (data) {
channelReady = true;
if (data) {
if (data.type === 'offer') {
if (!initiator) {
$("#acceptCall").show();
$("#acceptCall").click(function(){
if (!initiator && !started) {
var pc_config = {
iceServers: [
{ url: "stun:stun.l.google.com:19302" },
{ url: "turn:numb.viagenie.ca", credential: "drfunk", username: "toadums#hotmail.com"}
]
};
pc = new webkitRTCPeerConnection(pc_config);
pc.onicecandidate = onIceCandidate;
pc.onaddstream = onRemoteStreamAdded;
}
pc.setRemoteDescription(new RTCSessionDescription(data));
$scope.acceptCall();
});
}
} else if (data.type === 'answer' && started) {
pc.onaddstream = onRemoteStreamAdded;
pc.setRemoteDescription(new RTCSessionDescription(data));
} else if (data.type === 'candidate' && started) {
var candidate = new RTCIceCandidate({
sdpMLineIndex: data.label,
candidate: data.candidate
});
pc.addIceCandidate(candidate);
} else if (data.type === 'bye' && started) {
console.log("Bye");
}
}
});
function onRemoteStreamAdded(event) {
othersVideo.src = URL.createObjectURL(event.stream);
};
var sdpConstraints = {
'mandatory': {
'OfferToReceiveAudio': true,
'OfferToReceiveVideo': false
}
};
function doAnswer() {
pc.addStream(localStream);
pc.createAnswer(gotDescription,null,sdpConstraints);
}
function gotDescription(desc) {
pc.setLocalDescription(desc);
socket.send(desc);
}
function maybeStart() {
if (!started && localStream && channelReady)
createPeerConnection();
pc.addStream(localStream);
started = true;
if (initiator)
doCall();
}
$scope.acceptCall = function () {
navigator.getUserMedia($scope.constraints, $scope.successCallback, $scope.failureCallback);
}
function createPeerConnection() {
var pc_config = {
iceServers: [
{ url: "stun:stun.l.google.com:19302" },
{ url: "turn:numb.viagenie.ca", credential: "drfunk", username: "toadums#hotmail.com"}
]
};
pc = new webkitRTCPeerConnection(pc_config);
pc.onicecandidate = onIceCandidate;
console.log("Created RTCPeerConnnection with config:\n" + " \"" +
JSON.stringify(pc_config) + "\".");
};
function doCall() {
$scope.caller = true;
pc.createOffer(setLocalAndSendMessage,null,sdpConstraints);
};
function setLocalAndSendMessage(sessionDescription) {
pc.setLocalDescription(sessionDescription);
socket.send(sessionDescription);
}
function onIceCandidate(event) {
if (event.candidate) {
socket.emit('message', {
type: 'candidate',
label: event.candidate.sdpMLineIndex,
id: event.candidate.sdpMid,
candidate: event.candidate.candidate
});
} else {
console.log("End of candidates.");
}
}
If navigator.mediaDevices is undefined, this because work only in secure context (https)
see:
https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia