Is there a way to increase recording quality with the Web Audio API in Safari? - audio

I'm using WebRTC along with WebAudioRecorder.js and the Web Audio API to record microphone input from the user for audio recognition with the audD API (similar to Shazam). This is working fine in Chrome and Firefox and it seems the quality of the recording is fairly solid. However, audD is not able to recognize the blob/file being sent from my recording in Safari (11.1.2) because of what I'm guessing is low audio quality (the playback is almost inaudible). The only audio format that both Safari and audD are compatible with is mp3, so that's how I've been encoding the file.
Javascript:
// to be set to a WebAudioRecorder.js recorder instance
let recorder;
// to be set to the stream resulting from getUserMedia()
let gumStream;
function beginRecording() {
if (navigator.mediaDevices.getUserMedia) {
console.log('starting the recording');
navigator.mediaDevices.getUserMedia({ 'audio': true })
.then(function(stream) {
let AudioContext = window.AudioContext // Default
|| window.webkitAudioContext // Safari and old versions of Chrome
|| false;
if (AudioContext) {
let audioCtx = new AudioContext;
gumStream = stream;
let source = audioCtx.createMediaStreamSource(stream);
recorder = new WebAudioRecorder(source, {
workerDir: 'web-audio-recorder-js/lib/',
encoding: 'mp3'
});
} else {
alert('The Web Audio API is not supported.');
}
recorder.setOptions({
timeLimit: 120,
encodeAfterRecord: true,
ogg: {quality: 0.9},
mp3: {bitRate: 320},
});
recorder.startRecording();
recorder.onComplete = function(recorder, blob) {
createAudioPlayback(blob);
POSTreq(blob);
}
recorder.onError = function(recorder, err) {
console.error(err);
}
})
.catch(function(err) {
console.error(err);
})
}
}
function stopRecording() {
console.log('stopping the recording');
let recordingTime = recorder.recordingTime();
console.log(recordingTime);
let audioTrack = gumStream.getAudioTracks()[0];
console.log(audioTrack);
audioTrack.stop();
recorder.finishRecording();
$('#msg_box').text(`Recorded for ${Math.round(recordingTime)} seconds`);
console.log('recording stopped');
}
function createAudioPlayback(blobData) {
let url = URL.createObjectURL(blobData);
$('body').append(`<audio controls src="${url}"></audio>`);
}
function POSTreq (blobData) {
let xhr = new XMLHttpRequest();
let fd = new FormData();
fd.append('api_token', '');
fd.append('file', blobData);
fd.append('method', 'recognize');
fd.append('return_itunes_audios', true);
fd.append('itunes_country', 'us');
xhr.onreadystatechange = function() {
if (xhr.readyState === 4) {
parseRetrievedData(xhr.response);
}
}
xhr.open('POST', 'https://api.audd.io/');
xhr.responseType = 'json';
xhr.send(fd);
}
function parseRetrievedData(parseData) {
console.log('the data from the audD api is: ', parseData);
}
$(function() {
$('#start-button').click(function(e) {
beginRecording();
$('#stop-button').prop('hidden', false);
});
$('#stop-button').click(function(e) {
stopRecording();
});
});
HTML:
<div class="recorder_wrapper">
<div class="recorder">
<button id="start-button">Start</button>
<button id="stop-button">Stop</button>
<p id="msg_box"></p>
<section class="auth-links-region" role="region">
Signup
Login
</section>
<section class="authentication-region" role="region" hidden>
<p class="authentication-text"></p>
My Searches
Logout
</section>
</div>
</div>

Related

Unable to Stop DetectIntentStream on DialogFlow Es

I am trying to create a two-way communication voice bot using detectIntentStream Api of Dialogflow ES. But unable to stop streaming or end the session after communication.
I use example2 of this repository: https://github.com/dialogflow/selfservicekiosk-audio-streaming/tree/master/examples
here is the server code
const projectId = process.env.npm_config_PROJECT_ID;
const example = process.env.npm_config_EXAMPLE;
const port = process.env.npm_config_PORT || 3000;
const languageCode = "en-US";
let encoding = "AUDIO_ENCODING_LINEAR_16";
if (example > 3) {
// NOTE: ENCODING NAMING FOR SPEECH API IS DIFFERENT
encoding = "LINEAR16";
}
console.log(example);
if (example == 7) {
// NOTE: ENCODING NAMING FOR SPEECH API IS DIFFERENT
encoding = "linear16";
}
const singleUtterance = false;
const interimResults = false;
const sampleRateHertz = 16000;
const speechContexts = [
{
phrases: ["mail", "email"],
boost: 20.0,
},
];
console.log(example);
console.log(projectId);
// ----------------------
// load all the libraries for the server
const socketIo = require("socket.io");
const path = require("path");
const fs = require("fs");
const http = require("http");
const cors = require("cors");
const express = require("express");
const ss = require("socket.io-stream");
// load all the libraries for the Dialogflow part
const uuid = require("uuid");
const util = require("util");
const { Transform, pipeline } = require("stream");
const pump = util.promisify(pipeline);
const df = require("dialogflow").v2beta1;
// set some server variables
const app = express();
var server;
var sessionId, sessionClient, sessionPath, request;
var speechClient,
requestSTT,
ttsClient,
requestTTS,
mediaTranslationClient,
requestMedia;
// STT demo
const speech = require("#google-cloud/speech");
// TTS demo
const textToSpeech = require("#google-cloud/text-to-speech");
// Media Translation Demo
const mediatranslation = require("#google-cloud/media-translation");
/**
* Setup Express Server with CORS and SocketIO
*/
function setupServer() {
// setup Express
app.use(cors());
app.get("/", function (req, res) {
res.sendFile(path.join(__dirname + "/example" + example + ".html"));
});
server = http.createServer(app);
io = socketIo(server);
server.listen(port, () => {
console.log("Running server on port %s", port);
});
// Listener, once the client connect to the server socket
io.on("connect", (client) => {
console.log(`Client connected [id=${client.id}]`);
client.emit("server_setup", `Server connected [id=${client.id}]`);
ss(client).on("stream", function (stream, data) {
// get the name of the stream
const filename = path.basename(data.name);
// pipe the filename to the stream
stream.pipe(fs.createWriteStream(filename));
// make a detectIntStream call
detectIntentStream(stream, function (results) {
console.log(results);
console.log(results.outputAudio);
client.emit("results", results);
});
});
});
}
function setupDialogflow() {
// Dialogflow will need a session Id
sessionId = uuid.v4();
// Dialogflow will need a DF Session Client
// So each DF session is unique
sessionClient = new df.SessionsClient();
// Create a session path from the Session client,
// which is a combination of the projectId and sessionId.
sessionPath = sessionClient.sessionPath(projectId, sessionId);
// Create the initial request object
// When streaming, this is the first call you will
// make, a request without the audio stream
// which prepares Dialogflow in receiving audio
// with a certain sampleRateHerz, encoding and languageCode
// this needs to be in line with the audio settings
// that are set in the client
request = {
session: sessionPath,
queryInput: {
audioConfig: {
sampleRateHertz: sampleRateHertz,
encoding: encoding,
languageCode: languageCode,
speechContexts: speechContexts,
},
singleUtterance: singleUtterance,
},
outputAudioConfig: {
audioEncoding: "OUTPUT_AUDIO_ENCODING_LINEAR_16",
},
};
}
async function detectIntentStream(audio, cb) {
// execute the Dialogflow Call: streamingDetectIntent()
const stream = sessionClient
.streamingDetectIntent()
.on("data", function (data) {
// when data comes in
// log the intermediate transcripts
if (data.recognitionResult) {
console.log(
`Intermediate transcript:
${data.recognitionResult.transcript}`
);
} else {
// log the detected intent
console.log(`Detected intent:`);
cb(data);
}
})
.on("error", (e) => {
console.log(e);
})
.on("end", () => {
console.log("on end");
return null;
});
// Write request objects.
// Thee first message must contain StreamingDetectIntentRequest.session,
// [StreamingDetectIntentRequest.query_input] plus optionally
// [StreamingDetectIntentRequest.query_params]. If the client wants
// to receive an audio response, it should also contain
// StreamingDetectIntentRequest.output_audio_config.
// The message must not contain StreamingDetectIntentRequest.input_audio.
stream.write(request);
// pump is a small node module that pipes streams together and
// destroys all of them if one of them closes.
await pump(
audio,
// Format the audio stream into the request format.
new Transform({
objectMode: true,
transform: (obj, _, next) => {
next(null, {
inputAudio: obj,
outputAudioConfig: {
audioEncoding: `OUTPUT_AUDIO_ENCODING_LINEAR_16`,
},
});
},
}),
stream
);
}
setupDialogflow();
setupServer();
and here is client code
<!DOCTYPE html>
<!--
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<html>
<head>
<meta charset="utf-8" />
<title>RecordRTC over Socket.io</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<link rel="stylesheet" href="https://fonts.googleapis.com/icon?family=Material+Icons">
<link rel="stylesheet" href="https://code.getmdl.io/1.3.0/material.indigo-pink.min.css">
<script src="https://www.WebRTC-Experiment.com/RecordRTC.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/socket.io/2.3.0/socket.io.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/socket.io-stream/0.9.1/socket.io-stream.js"></script>
</head>
<body>
<div style="margin: 20px">
<h1 style="font-size: 18px;">Example 2: Dialogflow Speech Detection through streaming</h1>
<div>
<button id="start-recording" disabled>Start Streaming</button>
<button id="stop-recording" disabled>Stop Streaming</button>
</div>
<h2 style="font-size: 16px; margin-bottom: 10px;">Query Text</h2>
<code>data.queryResult.queryText</code><br/>
<input id="queryText" type="text" style="width: 400px;"/>
<h2 style="font-size: 16px; margin-bottom: 10px;">Intent</h2>
<code>data.queryResult.intent.displayName</code><br/>
<input id="intent" type="text" style="width: 400px;"/>
<h2 style="font-size: 16px;">Responses</h2>
<code>data.queryResult.fulfillmentText</code><br/>
<textarea id="results" style="width: 800px; height: 300px;"></textarea>
<code>data.queryResult.audio</code><br/>
<textarea id="result_audio" style="width: 800px; height: 300px;"></textarea>
</div>
<script type="text/javascript">
const startRecording = document.getElementById('start-recording');
const stopRecording = document.getElementById('stop-recording');
let recordAudio;
const socketio = io();
const socket = socketio.on('connect', function() {
startRecording.disabled = false;
});
startRecording.onclick = function() {
startRecording.disabled = true;
navigator.getUserMedia({
audio: true
}, function(stream) {
recordAudio = RecordRTC(stream, {
type: 'audio',
mimeType: 'audio/webm',
sampleRate: 44100,
desiredSampRate: 16000,
recorderType: StereoAudioRecorder,
numberOfAudioChannels: 1,
//1)
// get intervals based blobs
// value in milliseconds
// as you might not want to make detect calls every seconds
timeSlice: 4000,
//2)
// as soon as the stream is available
ondataavailable: function(blob) {
// 3
// making use of socket.io-stream for bi-directional
// streaming, create a stream
var stream = ss.createStream();
// stream directly to server
// it will be temp. stored locally
ss(socket).emit('stream', stream, {
name: 'stream.wav',
size: blob.size
});
// pipe the audio blob to the read stream
ss.createBlobReadStream(blob).pipe(stream);
}
});
recordAudio.stopRecording();
stopRecording.disabled = false;
}, function(error) {
console.error(JSON.stringify(error));
});
};
// 4)
// on stop button handler
stopRecording.onclick = function() {
// recording stopped
recordAudio.startRecording();
startRecording.disabled = false;
stopRecording.disabled = true;
// socketio.emit("close",function(){
// })
};
// const aduiopreview = document.getElementById('result_audio');
const resultpreview = document.getElementById('results');
const intentInput = document.getElementById('intent');
const textInput = document.getElementById('queryText');
socketio.on('results', function (data) {
console.log(data);
if(data.queryResult){
resultpreview.innerHTML += "" + data.queryResult.fulfillmentText;
intentInput.value = data.queryResult.intent.displayName;
textInput.value = "" + data.queryResult.queryText;
}
if(data.outputAudio){
console.log(data.outputAudio);
playOutput(data.outputAudio)
}
});
/*
* When working with Dialogflow and Dialogflow matched an intent,
* and returned an audio buffer. Play this output.
*/
function playOutput(arrayBuffer){
let audioContext = new AudioContext();
let outputSource;
try {
if(arrayBuffer.byteLength > 0){
console.log(arrayBuffer.byteLength);
audioContext.decodeAudioData(arrayBuffer,
function(buffer){
audioContext.resume();
outputSource = audioContext.createBufferSource();
outputSource.connect(audioContext.destination);
outputSource.buffer = buffer;
outputSource.start(0);
},
function(){
console.log(arguments);
});
}
} catch(e) {
console.log(e);
}
}
</script>
</body>
</html>

MediaRecorder: How to stop Video recording and play back in same video element?

I am using a with mediaRecorder
function getUserMediaSuccess(stream) {
$videoElement[0].srcObject = stream;
$videoElement[0].autoplay = true;
$videoElement[0].muted = true;
$videoElement[0].controls = false;
mediaRecorder = new MediaRecorder(stream, settings.recorderOptions);
}
Once recording is finished, I want to play the recorded chunks.
I tried with:
const blob = new Blob(chunks, { 'type' : settings.recorderOptions.mimeType});
$videoElement[0].src = window.URL.createObjectURL(blob);
and also with
$videoElement[0].pause();
$videoElement[0].removeAttribute('src');
$videoElement[0].load();
$videoElement[0].src = settings.filename;
$videoElement[0].controls = true;
I cannot stop the video element of showing the real time webcam.
I can play back the recorded video in ANOTHER video element. But I want to use the SAME that is used to display the webcam.
I also tried:
localStream.getTracks().forEach(function(track) {
track.stop();
});
Which gives a black screen, but I am unable then to play back again the recorded video.
it think you have done most things correctly.
based on the mozilla MediaRecorder example and some more research i think the magic is to switch between using srcObject and src:
srcObject for mediaStream (getUserMedia live preview)
src with window.URL.createObjectURL
this snippet works (if run un localhost or over https -
(maybe that the embedding does not fullfill all security things to allow access to getUserMedia..)
const videoEl = document.getElementById('theVideoElement');
let mediaRecorder = null;
let mediaChunks = [];
function recordStart() {
console.log('recordStart..');
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia (
{
audio: true,
video: true,
}
)
.then( stream => {
videoEl.srcObject = stream;
mediaRecorder = new MediaRecorder(stream);
mediaRecorder.addEventListener('dataavailable', event => {
mediaChunks.push(event.data);
});
mediaRecorder.addEventListener('stop', event => {
console.log("recorder stopped");
const blob = new Blob(mediaChunks, { 'type' : 'video/webm' });
mediaBlobURL = window.URL.createObjectURL(blob);
mediaChunks = [];
videoEl.src = mediaBlobURL;
});
mediaRecorder.start();
console.log("recorder started", mediaRecorder.state);
window.setTimeout(event => {
console.log("time is over.");
mediaRecorder.stop();
console.log(mediaRecorder.state);
console.log("recorder stopped");
// stop getUserMedia stream - this way the cam and mic gets released.
for (const track of stream.getTracks()) {
track.stop();
}
videoEl.srcObject = null;
console.log("stream stopped.");
}, 3000);
})
.catch( err => {
console.error(`The following getUserMedia error occurred:\n ${err}`);
});
} else {
console.error('getUserMedia not supported on your browser!');
}
}
console.info('******************************************');
window.addEventListener('load', (event) => {
console.info('All resources finished loading.');
const buttonEl = document.getElementById('button_start');
buttonEl.addEventListener('click', (event) => {
console.info('click!');
recordStart();
});
});
button {
display: block;
}
video {
display: block;
border: solid 1px black;
}
<button id="button_start">
start
</button>
<video
id="theVideoElement"
autoplay
controls
>
</video>

How would I set up audio streaming from a Node server to React frontend?

I'm trying to create a radio broadcast app as a small project for learning. A potential implementation involves storing writeable streams (in this case, responses) in a list, and writing mp3 data from a readable stream like so
readable.on('data', (chunk) => {
for (const writable of writables) {
writable.write(chunk);
}
});
The front end would look something like this:
class Player extends React.Component{
render(){
return(
<div>
<audio key = {this.props.title} controls autoPlay muted
src='/stream' type="audio/mp3"/>
<div>Current Playing: {this.props.title}</div>
<button onClick={this.props.playNext}>Next</button>
</div>
);
}
}
For the time being, I'm using throttle with a constant value to slow the stream transfer rate. However, I can't get any sound to play. I've tried using pipe inside of the /stream call
// readable.pipe(res);
and it does play the sound, but isn't valid for my purposes, since a radio should have all listeners on approximately the same chunks at the same time. I'm not getting any error messasges, so I'm struggling to figure out how to fix this.
try this
ReactJS
<ReactAudioPlayer
src={`http://localhost:3001/api/report/${asteriskid}`}
autoPlay
controls
/>
NodeJS
exports.getAudio = async(req, res) => {
try {
const AUDIO_PATH= process.env.RECORDING_PATH;
const cdrRepository = getRepository(Cdr);
const { uniqueid } = req.params;
const call = await cdrRepository.find({
select: ['recording'],
where: {
uniqueid : uniqueid
}
});
const audio = path.join(__dirname, AUDIO_PATH, String(call[0].recording));
const stat = fs.statSync(audio);
res.writeHead(200, {
'Content-Type': 'audio/wav',
'Content-Length': stat.size
});
const readStream = fs.createReadStream(audio);
readStream.pipe(res);
} catch (e) {
console.log(`Error: ${e}`)
}}

HTML5 audio not playing with Handlebars templated src attribute

I'm trying to play mp3's that I am streaming out of a Mongo GridFSBucket. The root of my issue is that I set the src attribute of my audio tag with my handlebars template to the URL that my stream should be headed to but the player is unresponsive.
If I don't feed this URL to my the audio tag and I remove the handlebars code, my browser (Chrome) creates a video tag that blacks out the window and superimposes HTML media player controls in the center and plays the track without issue.
How should I specify to the player that whatever it is streaming to the video tag it is creating should instead go to the audio player?
list.handlebars
<script src="../index.js" ></script>
<title>CRATE</title>
</head>
<body>
<div>tracks displayed below</div>
<div>{{userNameId}}'s Tracks</div>
{{#each tracks}}
<div class="row">
<div class="col-md-12">
<form id="trackSelection" method="GET" action="/play">
<input type="hidden" name="bytes" value="{{length}}">
<button class="playButton" type="submit" name="id" value=" .
{{_id}}">{{filename}}</button>
</form>
</div>
</div>
{{/each}}
<audio id="playback" src="{{sourceUrl}}" type="audio/mpeg" controls>
</audio>
</body>
</html>
index.js
app.get('/play', urlEditor, function(req, res, next) {
var db = req.db // => Db object
var size = sanitizer.escape(req.query.bytes);
var sanitizedTrackId = sanitizer.escape(req.query.id);
var username = 'ross';
var protocol = req.protocol;
var originalUrl = req.originalUrl;
// var sourceUrl = protocol + '://' + hosted + originalUrl;
var sourceUrl = protocol + '://' + req.get('host') + originalUrl;
console.log("sourceUrl", sourceUrl)
console.log("type", typeof sourceUrl)
if (username) {
const collection = db.collection(username + ".files");
var userNameId = collection.s.name;
console.log(userNameId);
const tracks = new Promise(function(resolve, reject) {
resolve(collection.find({}).toArray(function(err, tracks) {
assert.equal(err, null);
// return tracks;
finishPage(tracks);
}))
})
} else {
console.log('waiting')
}
function finishPage(tracks) {
try {
console.log("SID", sanitizedTrackId);
var trackID = new ObjectID(sanitizedTrackId);
} catch (err) {
return res.status(400).json({ message: "Invalid trackID in URL parameter. Must be a single String of 12 bytes or a string of 24 hex characters" });
}
let playEngine = new mongo.GridFSBucket(db, {
bucketName: username
});
var downloadStream = playEngine.openDownloadStream(trackID);
downloadStream.pipe(res);
console.log('success');
console.log("___________________");
var head = {
'Accept-Ranges': 'bytes',
'Content-Length': size,
'Content-Type': 'audio/mp3',
}
// res.render("list");
// res.set({ 'content-type': 'audio/mp3', 'accept-ranges': 'bytes', 'content-length': size }).render("list", { tracks: tracks, userNameId: userNameId, sourceUrl: sourceUrl });
res.status(206, head).render("list", { tracks: tracks, userNameId: userNameId, sourceUrl: sourceUrl });
}

socket.io, webrtc, nodejs video chat app getting errors over https: ERR_SSL_PROTOCOL_ERROR, 404 (Not Found), and ERR_CONNECTION_TIMED_OUT

I have put together a video chat app using socket.io, webrtc, nodejs from this online tutorial from github but now I am getting errors when converting it to be used over https:
Request URL:https://telemed.caduceususa.com/socket.io/?EIO=3&transport=polling&t=1479396416422-0
Request Method:GET
Status Code:404 Not Found
Remote Address:10.9.2.169:443
Other errors I have gotten in this process are as follows:
When I try to declare a different PORT I get - ERR_SSL_PROTOCOL_ERROR,
When I try to declare port 80 or 8080 i get: ERR_CONNECTION_TIMED_OUT
Something is going wrong on this line inside socket.io.js:
xhr.send(this.data);
I am running a node.js server on Windows Server 2012 and I have set up IIS to serve up the server on PORT 80. I have created the subdomain https://telemed.caduceususa.com in DNS and have purchased a trusted SSL cert to run the site over HTTPS.
Here is the excerpt of code from the dev tools that contains the above line that is causing the error as well as my other code:
/**
* Creates the XHR object and sends the request.
*
* #api private
*/
Request.prototype.create = function(){
var opts = { agent: this.agent, xdomain: this.xd, xscheme: this.xs, enablesXDR: this.enablesXDR };
// SSL options for Node.js client
opts.pfx = this.pfx;
opts.key = this.key;
opts.passphrase = this.passphrase;
opts.cert = this.cert;
opts.ca = this.ca;
opts.ciphers = this.ciphers;
opts.rejectUnauthorized = this.rejectUnauthorized;
var xhr = this.xhr = new XMLHttpRequest(opts);
var self = this;
try {
debug('xhr open %s: %s', this.method, this.uri);
xhr.open(this.method, this.uri, this.async);
if (this.supportsBinary) {
// This has to be done after open because Firefox is stupid
// https://stackoverflow.com/questions/13216903/get-binary-data-with-xmlhttprequest-in-a-firefox-extension
xhr.responseType = 'arraybuffer';
}
if ('POST' == this.method) {
try {
if (this.isBinary) {
xhr.setRequestHeader('Content-type', 'application/octet-stream');
} else {
xhr.setRequestHeader('Content-type', 'text/plain;charset=UTF-8');
}
} catch (e) {}
}
// ie6 check
if ('withCredentials' in xhr) {
xhr.withCredentials = true;
}
if (this.hasXDR()) {
xhr.onload = function(){
self.onLoad();
};
xhr.onerror = function(){
self.onError(xhr.responseText);
};
} else {
xhr.onreadystatechange = function(){
if (4 != xhr.readyState) return;
if (200 == xhr.status || 1223 == xhr.status) {
self.onLoad();
} else {
// make sure the `error` event handler that's user-set
// does not throw in the same tick and gets caught here
setTimeout(function(){
self.onError(xhr.status);
}, 0);
}
};
}
debug('xhr data %s', this.data);
xhr.send(this.data);
}
Here is the server.js file:
var fs = require('fs');
var hskey = fs.readFileSync('ssl/telemed_internal_server.key');
var hscert = fs.readFileSync('ssl/telemed_internal_cert.pem');
var ca = fs.readFileSync('ssl/telemed_internal_key.pem');
var credentials = {
ca: ca,
key: hskey,
cert: hscert
};
var static = require('node-static');
var https = require('https');
var util = require('util');
var file = new(static.Server)();
var app = https.createServer(credentials, function (req, res) {
file.serve(req, res);
}).listen(process.env.PORT || 80);
var io = require('socket.io').listen(app);
io.sockets.on('connection', function (socket){
// convenience function to log server messages on the client
function log(){
var array = [">>> Message from server: "];
for (var i = 0; i < arguments.length; i++) {
array.push(arguments[i]);
}
socket.emit('log', array);
}
// when receive sdp, broadcast sdp to other user
socket.on('sdp', function(data){
console.log('Received SDP from ' + socket.id);
socket.to(data.room).emit('sdp received', data.sdp);
});
// when receive ice candidate, broadcast sdp to other user
socket.on('ice candidate', function(data){
console.log('Received ICE candidate from ' + socket.id + ' ' + data.candidate);
socket.to(data.room).emit('ice candidate received', data.candidate);
});
socket.on('message', function (message) {
log('Got message:', message);
// for a real app, would be room only (not broadcast)
socket.broadcast.emit('message', message);
});
socket.on('create or join', function (room) {
// join room
var existingRoom = io.sockets.adapter.rooms[room];
var clients = [];
if(existingRoom){
clients = Object.keys(existingRoom);
}
if(clients.length == 0){
socket.join(room);
io.to(room).emit('empty', room);
}
else if(clients.length == 1){
socket.join(room);
socket.to(room).emit('joined', room, clients.length + 1);
}
// only allow 2 users max per room
else{
socket.emit('full', room);
}
});
socket.on('error', function(error){
console.error(error);
})
});
Here is the main.js (config) file:
//my signalling server
var serverIP = "https://telemed.caduceususa.com/";
// RTCPeerConnection Options
var server = {
// Uses Google's STUN server
iceServers: [{
"url": "stun:stun4.l.google.com:19302"
},
{
url: 'turn:numb.viagenie.ca',
credential: 'muazkh',
username: 'webrtc#live.com'
}]
};
// various other development IPs
// var serverIP = "https://192.168.43.241:2013";
// var serverIP = "https://10.0.11.196:2013";
var localPeerConnection, signallingServer;
var btnSend = document.getElementById('btn-send');
var btnVideoStop = document.getElementById('btn-video-stop');
var btnVideoStart = document.getElementById('btn-video-start');
var btnVideoJoin = document.getElementById('btn-video-join');
var localVideo = document.getElementById('local-video');
var remoteVideo = document.getElementById('remote-video');
var inputRoomName = document.getElementById('room-name');
var localStream, localIsCaller;
btnVideoStop.onclick = function(e) {
e.preventDefault();
// stop video stream
if (localStream != null) {
localStream.stop();
}
// kill all connections
if (localPeerConnection != null) {
localPeerConnection.removeStream(localStream);
localPeerConnection.close();
signallingServer.close();
localVideo.src = "";
remoteVideo.src = "";
}
btnVideoStart.disabled = false;
btnVideoJoin.disabled = false;
btnVideoStop.disabled = true;
}
btnVideoStart.onclick = function(e) {
e.preventDefault();
// is starting the call
localIsCaller = true;
initConnection();
}
btnVideoJoin.onclick = function(e) {
e.preventDefault();
// just joining a call, not offering
localIsCaller = false;
initConnection();
}
function initConnection() {
var room = inputRoomName.value;
if (room == undefined || room.length <= 0) {
alert('Please enter room name');
return;
}
// start connection!
connect(room);
btnVideoStart.disabled = true;
btnVideoJoin.disabled = true;
btnVideoStop.disabled = false;
}
// WEBRTC STUFF STARTS HERE
// Set objects as most are currently prefixed
window.RTCPeerConnection = window.RTCPeerConnection || window.mozRTCPeerConnection ||
window.webkitRTCPeerConnection || window.msRTCPeerConnection;
window.RTCSessionDescription = window.RTCSessionDescription || window.mozRTCSessionDescription ||
window.webkitRTCSessionDescription || window.msRTCSessionDescription;
navigator.getUserMedia = navigator.getUserMedia || navigator.mozGetUserMedia ||
navigator.webkitGetUserMedia || navigator.msGetUserMedia;
window.SignallingServer = window.SignallingServer;
var sdpConstraints = {
optional: [],
mandatory: {
OfferToReceiveVideo: true,
}
}
function connect(room) {
// create peer connection
localPeerConnection = new RTCPeerConnection(server);
// create local data channel, send it to remote
navigator.getUserMedia({
video: true,
audio: true
}, function(stream) {
// get and save local stream
trace('Got stream, saving it now and starting RTC conn');
// must add before calling setRemoteDescription() because then
// it triggers 'addstream' event
localPeerConnection.addStream(stream);
localStream = stream;
// show local video
localVideo.src = window.URL.createObjectURL(stream);
// can start once have gotten local video
establishRTCConnection(room);
}, errorHandler)
}
function establishRTCConnection(room) {
// create signalling server
signallingServer = new SignallingServer(room, serverIP);
signallingServer.connect();
// a remote peer has joined room, initiate sdp exchange
signallingServer.onGuestJoined = function() {
trace('guest joined!')
// set local description and send to remote
localPeerConnection.createOffer(function(sessionDescription) {
trace('set local session desc with offer');
localPeerConnection.setLocalDescription(sessionDescription);
// send local sdp to remote
signallingServer.sendSDP(sessionDescription);
});
}
// got sdp from remote
signallingServer.onReceiveSdp = function(sdp) {
// get stream again
localPeerConnection.addStream(localStream);
trace(localStream)
// if local was the caller, set remote desc
if (localIsCaller) {
trace('is caller');
trace('set remote session desc with answer');
localPeerConnection.setRemoteDescription(new RTCSessionDescription(
sdp));
}
// if local is joining a call, set remote sdp and create answer
else {
trace('set remote session desc with offer');
localPeerConnection.setRemoteDescription(new RTCSessionDescription(
sdp), function() {
trace('make answer')
localPeerConnection.createAnswer(function(
sessionDescription) {
// set local description
trace('set local session desc with answer');
localPeerConnection.setLocalDescription(
sessionDescription);
// send local sdp to remote too
signallingServer.sendSDP(sessionDescription);
});
});
}
}
// when received ICE candidate
signallingServer.onReceiveICECandidate = function(candidate) {
trace('Set remote ice candidate');
localPeerConnection.addIceCandidate(new RTCIceCandidate(candidate));
}
// when room is full, alert user
signallingServer.onRoomFull = function(room) {
window.alert('Room "' + room +
'"" is full! Please join or create another room');
}
// get ice candidates and send them over
// wont get called unless SDP has been exchanged
localPeerConnection.onicecandidate = function(event) {
if (event.candidate) {
//!!! send ice candidate over via signalling channel
trace("Sending candidate");
signallingServer.sendICECandidate(event.candidate);
}
}
// when stream is added to connection, put it in video src
localPeerConnection.onaddstream = function(data) {
remoteVideo.src = window.URL.createObjectURL(data.stream);
}
}
function errorHandler(error) {
console.error('Something went wrong!');
console.error(error);
}
function trace(text) {
console.info(text);
}
Here is the signalling server:
function trace(text){
console.info(text);
}
// Connects to signalling server with given room and IP
// has methods to exchange SDP and ICE candidates
var SignallingServer = function(room, socketServer){
this.room = room;
this.socket = io.connect(socketServer);
this.socket.on('full', function (room){
trace('Room ' + room + ' is full');
this.onRoomFull(room);
}.bind(this));
this.socket.on('empty', function (room){
this.isInitiator = true;
trace('Room ' + room + ' is empty');
});
this.socket.on('join', function (room){
trace('Making request to join room ' + room);
});
this.socket.on('joined', function (room, numClients){
trace('New user has joined ' + room);
trace('Room has ' + numClients + ' clients');
//ask host to initiate sdp transfer
this.onGuestJoined();
}.bind(this));
this.socket.on('sdp received', function(sdp){
trace('Received SDP ');
trace(sdp);
this.onReceiveSdp(sdp);
}.bind(this));
this.socket.on('ice candidate received', function(candidate){
trace('Received ICE candidate ');
trace(candidate);
this.onReceiveICECandidate(candidate);
}.bind(this));
this.socket.on('log', function (array){
console.log.apply(console, array);
});
}
SignallingServer.prototype = {
connect: function(){
if (this.room !== '') {
trace('Joining room ' + this.room);
this.socket.emit('create or join', this.room);
}
},
close: function(){
trace('Disconnecting')
this.socket.disconnect();
},
sendSDP: function(sdp){
trace('sending sdp')
this.socket.emit('sdp', {
room: this.room,
sdp: sdp
});
},
sendICECandidate: function(candidate){
trace('sending ice candidate');
this.socket.emit('ice candidate', {
room: this.room,
candidate: candidate
});
},
onReceiveSdp: function(sdp){
trace('Placeholder function: Received SDP')
},
onGuestJoined: function(){
trace('Placeholder function: Guest joined room')
},
onReceiveICECandidate: function(candidate){
trace('Placeholder function: Received ICE candidate')
},
onRoomFull: function(room){
trace('Placeholder function: Room is full!');
}
}
window.SignallingServer = SignallingServer;
AND FINALLY THE HTML (CAN SOMEONE ALSO EXPLAIN WHAT LIVERELOAD.JS IS?):
<!doctype html>
<!--[if lt IE 7]>
<html class="no-js lt-ie9 lt-ie8 lt-ie7" lang="">
<![endif]-->
<!--[if IE 7]>
<html class="no-js lt-ie9 lt-ie8" lang="">
<![endif]-->
<!--[if IE 8]>
<html class="no-js lt-ie9" lang="">
<![endif]-->
<!--[if gt IE 8]>
<!-->
<html class="no-js" lang="">
<!--<![endif]-->
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<title></title>
<meta name="description" content="">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="css/bootstrap.min.css">
<style>
body {
padding-top: 50px;
padding-bottom: 20px;
}
</style>
<link rel="stylesheet" href="css/bootstrap-theme.min.css">
<link rel="stylesheet" href="css/main.css">
<script src="js/vendor/modernizr-2.8.3-respond-1.4.2.min.js"></script>
</head>
<body>
<!--[if lt IE 8]>
<p class="browserupgrade">
You are using an <strong>outdated</strong>
browser. Please
upgrade your browser
to improve your experience.
</p>
<![endif]-->
<nav class="navbar navbar-default navbar-fixed-top" role="navigation">
<div class="container">
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar" aria-expanded="false">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<a class="navbar-brand" href="#">WebRTC Video Chat</a>
</div>
<div id="navbar" class="navbar-collapse collapse">
<!-- chatroom name form -->
<form class="navbar-form navbar-right form-inline">
<div class="form-group">
<input class="form-control" type="text" id="room-name" placeholder="Room name"/>
</div>
<button class="btn btn-primary" id="btn-video-start">Start</button>
<button class="btn btn-default" id="btn-video-join">Join</button>
<button class="btn btn-default" disabled id="btn-video-stop">Stop</button>
</form>
</div>
<!--/.navbar-collapse --> </div>
</nav>
<div class="container main">
<div class="row videos">
<div class="remote-video">
<video width="280" height="250" autoplay id="remote-video"></video>
</div>
<div class="local-video">
<video width="280" height="250" autoplay id="local-video" muted></video>
</div>
</div>
</div>
</div>
<!-- /container -->
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.11.2/jquery.min.js"></script>
<script>window.jQuery || document.write('<script src="js/vendor/jquery-1.11.2.min.js"><\/script>')</script>
<script src="js/vendor/bootstrap.min.js"></script>
<script src="js/vendor/socket.io.js"></script>
<script src="js/main.js"></script>
<script src="js/signalling.js"></script>
<script src="//localhost:9010/livereload.js"></script>
</body>
</html>

Resources