Stop sound not working in three js? - audio

I'm a little bit frustrated that I can't stop a sound. I used the example code on how to play a sound in three js, and under play(), I want to stop to test. It doesn't matter when I stop a sound in gameplay, I get the same error:
TypeError: Not enough arguments
I didn't think calling stop() required any arguments. I am running on Safari and Firefox.
Any help would be appreciated!
Here's my code:
// instantiate a listener
var audioListener = new THREE.AudioListener();
// add the listener to the camera
camera.add( audioListener );
// instantiate audio object
var oceanAmbientSound = new THREE.Audio( audioListener );
// add the audio object to the scene
scene.add( oceanAmbientSound );
// instantiate a loader
var loader = new THREE.AudioLoader();
// load a resource
loader.load(
// resource URL
'sounds/mySound.mp3',
// Function when resource is loaded
function ( audioBuffer ) {
// set the audio object buffer to the loaded object
oceanAmbientSound.setBuffer( audioBuffer );
// play the audio
oceanAmbientSound.play();
oceanAmbientSound.stop();
},
// Function called when download progresses
function ( xhr ) {
console.log( (xhr.loaded / xhr.total * 100) + '% loaded' );
},
// Function called when download errors
function ( xhr ) {
console.log( 'An error happened' );
}

Figured it out- it must be my old version of Safari (8.0.8). Nonetheless, thanks for your help!

Related

How to stream consecutive DYNAMIC audio files from NodeJS to HTML5 audio tag- as one stream

From an android device, the node server is receiving chunks of raw data file in arrayBuffer format.
The server is converting them individually (let's say 10 seconds worth of playable audio) into WAV format in order to stream to an HTML5 audio file.
The idea being that as soon as the android presses "send", the browser can immediately begin to hear the streamed audio file.
How can I stream these files consecutively to the HTML audio tag so that it sounds like one big file?
Currently I can stream one 10 second file to the tag and it plays. I also tried the audio tags "onended" attribute to try and fetch the next 10 second file, but you can hear that it is not connected as one file.
const audioStream = fs.createReadStream(audioPath);
audioStream.pipe(res);
Ideally I need to pipe all the files consecutively AS they come in from the android client, meaning I cannot first concatenate them into one file because the server is only receiving them as the browser is playing them (a latency of up to a few seconds is ok).
What can be done?
Edit:
Here is what I've tried:
var audio = document.getElementById('my');
var mediaSource = new MediaSource();
var SEGMENTS = 5;
var oReq = new XMLHttpRequest();
mediaSource.addEventListener('sourceopen', function() {
var sourceBuffer = mediaSource.addSourceBuffer('audio/mpeg');
sourceBuffer.addEventListener('updateend', function() {
//GET('6.mp3', function(data) { onAudioLoaded(data, index); });
oReq.open("GET", '6.mp3');
oReq.responseType = "arraybuffer";
oReq.send();
});
function onAudioLoaded(data, index) {
sourceBuffer.appendBuffer(data);
}
function h(){
var arraybuffer = oReq.response; // not responseText
onAudioLoaded(arraybuffer, 0)
// console.log(arraybuffer);
}
currently, I am fetching the same mp3 file from the server with a desire to string them together to see if it works.
As I mentioned below, I get an "source buffer full" error.
in the long run i'd move this to a websocket so the each time the server receives a chunk, it will send to here, thus stringing the result into one "live stream".
Any thoughts?
Thank you
EDIT:
As it seems to have turned out, my whole approach here is wrong;
Because I am converting each received chunk of raw audio into a unique mp3 file (it cannot be WAV because of no "Media Source Extension" support)-- even though I am able to string them together using MSE, a gap between each file is heard. I am assuming this is because of "padding" in each mp3 file.
Does anyone have any alternatives so that I can seamlessly play my chunks of raw audio in the browser? Unless I am missing something in this "implementation".
Thank you.
Regarding the question "How to play consecutive dynamic audio files as one stream"...
Try a code setup like this below. Tested on Chrome.
(some names are changed, like your var arraybuffer is now input_MP3_Bytes) :
<!DOCTYPE html>
<html>
<body>
<audio id="audioMSE" controls></audio>
<script>
var input_MP3_Bytes = []; var mediaSource; var sourceBuffer;
var is_FirstAppend = true; //# for setting up MSE if first append.
var mimeCodec = 'audio/mpeg;'; //# tested on Chrome browser only
//var mimeCodec = 'audio/mpeg; codecs="mp3"'; //# untested.. maybe needed by other browsers?
//# for audio tag reference
var myAudio = document.getElementById('audioMSE');
myAudio.play(); //# start conveyor belt (to receive incoming bytes)
//# load some input MP3 bytes for MSE
var fileURL = "file_01.mp3"; //"6.mp3"
load_MP3( fileURL );
function load_MP3( filename )
{
var oReq = new XMLHttpRequest();
oReq.open("GET", filename, true);
oReq.responseType = "arraybuffer";
oReq.addEventListener('loadend', on_MP3isLoaded );
oReq.send(null);
}
function on_MP3isLoaded()
{
input_MP3_Bytes = new Uint8Array( this.response ); //# fill the byte array
if (is_FirstAppend == true)
{
console.log( "MSE support : " + MediaSource.isTypeSupported(mimeCodec) );
makeMSE(); is_FirstAppend = false;
}
else { sourceBuffer.appendBuffer( input_MP3_Bytes ); }
}
function makeMSE ( )
{
if ('MediaSource' in window && MediaSource.isTypeSupported(mimeCodec) )
{
mediaSource = new MediaSource;
myAudio.src = URL.createObjectURL( mediaSource );
mediaSource.addEventListener( "sourceopen", mse_sourceOpen );
}
else { console.log("## Unsupported MIME type or codec: " + mimeCodec); }
}
function mse_sourceOpen()
{
sourceBuffer = mediaSource.addSourceBuffer( mimeCodec );
sourceBuffer.addEventListener( "updateend", mse_updateEnd );
//sourceBuffer.mode = "sequence";
sourceBuffer.appendBuffer( input_MP3_Bytes );
};
function mse_updateEnd()
{
//# what to do after feeding-in the bytes...?
//# 1) Here you could update (overwrite) same "input_MP3_Bytes" array with new GET request
//# 2) Or below is just a quick test... endless looping
//# 1) load another MP3 file
fileURL = "file_25.mp3";
load_MP3( fileURL );
//# 2) re-feed same bytes for endless looping
//sourceBuffer.appendBuffer( input_MP3_Bytes );
//mediaSource.endOfStream(); //# close stream to close audio
//console.log("mediaSource.readyState : " + mediaSource.readyState); //# is ended
}
</script>
</body>
</html>

EventEmitter memory leak detected: Proper way to pass CSV data to multiple modules?

I am dipping my toe into using different npm modules my own way whereas before I just executed already created gulpfiles. The npm module penthouse loads a webpage and determines the above the fold CSS for that page. I am trying to take that module and use it with a site crawler so I can get the above the fold css for all pages, and store that CSS in a table.
So essentially I am:
Crawling a site to get all the urls
capturing the page id from each url
storing pages & their id's in a CSV
load the CSV and pass each URL to penthouse
take penthouse output and store it in a table
So I am fine up until the last two steps. When I am reading the CSV, I get the error possible EventEmitter memory leak detected. 11 exit listeners added. Use emitter.setMaxListeners() to increase limit.
The stack trace points here at line 134. After reading about the error, it makes sense because I see a bunch of event listeners being added, but I don't see penthouse ever really executing and closing the event listeners.
It works just fine standalone as expected (Running penthouse against a single page then exiting). But when I execute the code below to try and loop through all URLs in a csv, it spits out the memory leak error twice, and just hangs. None of my console.log statements in the following script are executed.
However, I added console.log to the end of the penthouse index.js file, and it is executed multiple times (where it adds event listeners), but it never timeouts or exits.
So it's clear I am not integrating this properly, but not sure how to proceed. What would be the best way to force it to read one line in the CSV at a time, process the URL, then take the output and store it in the DB before moving onto the next line?
const fs = require('fs');
var csv = require('fast-csv');
var penthouse = require('penthouse'),
path = require('path');
var readUrlCsv = function() {
var stream = fs.createReadStream("/home/vagrant/urls.csv");
var csvStream = csv()
//returns single line from CSV
.on("data", function(data) {
// data[0]: table id, data[1]: page type, data[2]: url
penthouse({
url : data[2],
css : './dist/styles/main.css'
}, function(err, criticalCss) {
if (err) {
console.log(err);
}
console.log('do we ever get here?'); //answer is no
if (data[1] === 'post') {
wp.posts().id( data[0] ).post({
inline_css: criticalCss
}).then(function( response ) {
console.log('saved to db');
});
} else {
wp.pages().id( data[0] ).page({
inline_css: criticalCss
}).then(function( response ) {
console.log('saved to db');
});
}
});
})
.on("end", function(){
console.log("done");
});
return stream.pipe(csvStream);
};
UPDATE
Changed my method to look like below so it processes all rows first, but still throws the same error. Writes "done" to the console, and immediately spits out the memory warning twice.
var readUrlCsv = function() {
var stream = fs.createReadStream("/home/vagrant/urls.csv");
var urls = [];
var csvStream = csv()
.on("data", function(data) {
// data[0]: table id, data[1]: page type, data[2]: url
urls.push(data);
})
.on("end", function(){
console.log("done");
buildCriticalCss(urls);
});
return stream.pipe(csvStream);
};
var buildCriticalCss = function(urls) {
//console.log(urls);
urls.forEach(function(data, idx) {
//console.log(data);
penthouse({
url : data[2],
css : './dist/styles/main.css',
// OPTIONAL params
width : 1300, // viewport width
height : 900, // viewport height
timeout: 30000, // ms; abort critical css generation after this timeout
strict: false, // set to true to throw on css errors (will run faster if no errors)
maxEmbeddedBase64Length: 1000 // charaters; strip out inline base64 encoded resources larger than this
}, function(err, criticalCss) {
if (err) {
console.log(err);
}
console.log('do we ever finish one?');
if (data[1] === 'post') {
console.log('saving post ' + data[0]);
wp.posts().id( data[0] ).post({
inline_css: criticalCss
}).then(function( response ) {
console.log('saved post to db');
});
} else {
console.log('saving page ' + data[0]);
wp.pages().id( data[0] ).page({
inline_css: criticalCss
}).then(function( response ) {
console.log('saved page to db');
});
}
});
});
};
Update 2
I took the simple approach to control the amount of concurrent processes spawned.
var readUrlCsv = function() {
var stream = fs.createReadStream("/home/vagrant/urls.csv");
var urls = [];
var csvStream = csv()
.on("data", function(data) {
// data[0]: table id, data[1]: page type, data[2]: url
urls.push(data);
})
.on("end", function(){
console.log("done");
//console.log(urls);
buildCriticalCss(urls);
});
return stream.pipe(csvStream);
};
function buildCriticalCss(data) {
var row = data.shift();
console.log(row);
penthouse({
url : row[2],
css : './dist/styles/main.css',
// OPTIONAL params
width : 1300, // viewport width
height : 900, // viewport height
timeout: 30000, // ms; abort critical css generation after this timeout
strict: false, // set to true to throw on css errors (will run faster if no errors)
maxEmbeddedBase64Length: 1000 // charaters; strip out inline base64 encoded resources larger than this
}, function(err, criticalCss) {
if (err) {
console.log('err');
}
// handle your criticalCSS
console.log('finished');
console.log(row[2]);
// now start next job, if we have more urls
if (data.length !== 0) {
buildCriticalCss(data);
}
});
}
The error message you're seeing is a default printed to the console by node's event library if more than the allowed number of event listeners are defined for an instance of EventEmitter. It does not indicate an actual memory leak. Rather it is displayed to make sure you're aware of the possibility of a leak.
You can see this by checking the event.EventEmitter source code at lines 20 and 244.
To stop EventEmitter from displaying this message and since penthouse does not expose its specific EventEmitter, you'll need to set the default allowed event emitter listeners to something larger than its default value of 10 using:
var EventEmitter=require('event').EventEmitter;
EventEmitter.defaultMaxListeners=20;
Note that according to Node's documentation for EventEmitter.defaultMaxListeners, this will change the maximum number of listeners for all instances of EventEmitter, including those that have already been defined previous to the change.
Or you could simply ignore the message.
Further to the hanging of your code, I'd advise gathering all the results from the parsing of your CSV into an array, and then processing the array contents separately from the parsing process.
This would accomplish two things: It would allow you to
be assured the entire CSV file was valid before you started processing, and
instrument debugging messages while processing each element, which would give you deeper insight into how each element of the array was processed.
UPDATE
As noted below, depending on how many URLs you're processing, you're probably overwhelming Node's ability to handle all of your requests in parallel.
One easy way to proceed would be to use eventing to marshall your processing so your URLs are processed sequentially, as in:
var assert=require('assert'),
event=require('events'),
fs=require('fs'),
csv=require('fast-csv');
penthouse=require('penthouse');
var emitter=new events.EventEmitter();
/** Container for URL records read from CSV file.
*
* #type {Array}
*/
var urls=[];
/** Reads urls from file and triggers processing
*
* #emits processUrl
*/
var readUrlCsv = function() {
var stream = fs.createReadStream("/home/vagrant/urls.csv");
stream.on('error',function(e){ // always handle errors!!
console.error('failed to createReadStream: %s',e);
process.exit(-1);
});
var csvStream = csv()
.on("data", function(data) {
// data[0]: table id, data[1]: page type, data[2]: url
urls.push(data);
})
.on("end", function(){
console.log("done reading csv");
//console.log(urls);
emitter.emit('processUrl'); // start processing URLs
})
.on('error',function(e){
console.error('failed to parse CSV: %s',e);
process.exit(-1);
});
// no return required since we don't do anything with the result
stream.pipe(csvStream);
};
/** Event handler to process a single URL
*
* #emits processUrl
*/
var onProcessUrl=function(){
// always check your assumptions
assert(Array.isArray(urls),'urls must be an array');
var urlRecord=urls.shift();
if(urlRecord){
assert(Array.isArray(urlRecord),'urlRecord must be an array');
assert(urlRecord.length>2,'urlRecord must have at least three elements');
penthouse(
{
// ...
},
function(e,criticalCss){
if(e){
console.error('failed to process record %s: %s',urlRecord,e);
return; // IMPORTANT! do not drop through to rest of func!
}
// do what you need with the result here
if(urls.length===0){ // ok, we're done
console.log('completed processing URLs');
return;
}
emitter.emit('processUrl');
}
);
}
}
/**
* processUrl event - triggers processing of next URL
*
* #event processUrl
*/
emitter.on('processUrl',onProcessUrl); // assign handler
// start everything going...
readUrlCsv();
The benefit of using events here rather than your solution is the lack of recursion which can easily overwhelm your stack.
Hint: You can use events to handle all program flow issues normally addressed by Promises or modules like async.
And since events are at the very heart of Node (the "event loop"), it's really the best, most efficient way to solve such problems.
It's both elegant and "The Node Way"!
Here is a gist that illustrates the technique, without relying on streams or penthouse, the output of which is:
url: url1
RESULT: RESULT FOR url1
url: url2
RESULT: RESULT FOR url2
url: url3
RESULT: RESULT FOR url3
completed processing URLs
Besides using console.logs which usually is enough, you can also use the built in debugger: https://nodejs.org/api/debugger.html
Another thing you can do is go into the node_modules/penthouse directory and add your console.logs or debugger statement into the code for that module. That way you can debug your program there rather than the module just being a black box.
Also make sure there isn't some kind of race condition where for example the CSV doesn't always get output before it tries to read them in.
I think that the memory leak issue is probably a red herring as far as making your code function.
From your comment it sounds like you want to do something like the following with async.mapSeries: http://promise-nuggets.github.io/articles/15-map-in-series.html You could also use promises as it shows or even after getting promises set up use the async/await stuff with a regular for loop after compiling with babel. In the long run I recommend doing that sort of thing with async/await and babel but that might be overkill just to get this working.

Calling new object functions between different Express functions?

I am building a time-lapse camera web application using Raspberry Pi and the Raspberry Pi Camera Module. So far I have built a web application (using NodeJS, Express, AngularJS, and BootStrap 3) that can interact with the Raspberry Camera Module using an open source NodeJS module (https://www.npmjs.org/package/raspicam).
I have a global variable called "setting" that will always change whenever the user changes the camera settings:
var setting = {
mode: "timelapse",
output: "public/images/image%d.jpg", // image1, image2, image3, etc...
encoding: "jpg",
timelapse: 3000, // take a picture every 3 seconds
timeout: 12000 // take a total of 4 pictures over 12 seconds
}
I have three functions in Express that can:
set Camera settings
exports.setCamera = function(req, res) {
setting = {
mode: req.body.mode,
output: req.body.output,
encoding: req.body.encoding,
timelapse: req.body.timelapse,
timeout: req.body.timeout
}
res.json(setting, 200);
console.log('SET CAMERA - ' + JSON.stringify(setting));
}
start the Camera
exports.startCamera = function(req, res) {
camera = new RaspiCam(setting);
camera.on("start", function( err, timestamp ){
console.log("timelapse started at " + timestamp);
});
camera.on("read", function( err, timestamp, filename ){
console.log("timelapse image captured with filename: " + filename);
});
camera.on("exit", function( timestamp ){
console.log("timelapse child process has exited");
res.json(setting, 200);
});
camera.on("stop", function( err, timestamp ){
console.log("timelapse child process has been stopped at " + timestamp);
});
camera.start();
setTimeout(function(){
camera.stop();
}, setting.timeout + 1000);
console.log('START CAMERA - ' + JSON.stringify(setting));
}
stop the Camera
exports.stopCamera = function(req, res) {
camera.stop();
res.json(setting, 200);
console.log('STOP CAMERA - ' + JSON.stringify(setting));
}
As you can see in the "startCamera" function, I am creating a new RaspiCam object called "camera" that passes in the global variable "setting" (which can always change). When the camera object is created, I am also creating "start", "read", "exist", and "stop" functions for it. The problem is that since I am not setting the camera object as a global variable, when the user decides to click Stop halfway during the session, the "stopCamera" function gets called but it does not know what camera.stop() is and says it is undefined. Is there a way I can allow the "stopCamera" function to know what camera.stop() is (which was created in the "startCamera" function)?
Sorry if this is confusing, I don't know how else to describe my problem.. :(
I think you have a problem with how this is architected, but the simple solution to your question is check to see if the camera object has been initialized.
exports.stopCamera = function(req, res) {
if(camera && typeof(camera.stop) == "function") {
camera.stop();
console.log('STOP CAMERA - ' + JSON.stringify(setting));
}
res.json(setting, 200);
}

HTML5 Microphone capture stops after 5 seconds in Firefox

I'm capturing audio input from microphone with getUserMedia() function, works fine in chrome, but in firefox sound dies out after 5 seconds. If I send request for microphone again (without reloading the page) same thing happens. Here is the code (I used http://updates.html5rocks.com/2012/09/Live-Web-Audio-Input-Enabled as guidance):
//getting the function depending on browser
navigator.getMedia = ( navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia);
// success callback when requesting audio input stream
function gotAudioStream(stream) {
window.AudioContext = window.AudioContext || window.webkitAudioContext;
var audioContext = new AudioContext();
// Create an AudioNode from the stream.
var mediaStreamSource = audioContext.createMediaStreamSource( stream );
// Connect it to the destination to hear yourself (or any other node for processing!)
mediaStreamSource.connect( audioContext.destination );
}
function gotError(err) {
alert("An error occured! " + err);
}
//when button clicked, browser asks a permission to access microphone
jQuery("#sound_on").click(function()
{
navigator.getMedia({audio: true},gotAudioStream,gotError);
});
Any ideas?
EDIT/UPDATE
Thank you, csch, for the reference. Workaround by Karoun Kasraie worked!
context = new AudioContext();
navigator.getUserMedia({ audio: true }, function(stream) {
// the important thing is to save a reference to the MediaStreamAudioSourceNode
// thus, *window*.source or any other object reference will do
window.source = context.createMediaStreamSource(stream);
source.connect(context.destination);
}, alert);
It's a bug in Firefox, it can be found here:
https://bugzilla.mozilla.org/show_bug.cgi?id=934512
There's also a workaround:
context = new AudioContext();
navigator.getUserMedia({ audio: true }, function(stream) {
// the important thing is to save a reference to the MediaStreamAudioSourceNode
// thus, *window*.source or any other object reference will do
window.source = context.createMediaStreamSource(stream);
source.connect(context.destination);
}, alert);
source

Node.js thumbnailer using Imagemagick: nondeterministic corruption

I have a Node.js server which dynamically generates and serves small (200x200) thumbnails from images (640x640) in a database (mongodb). I'm using the node-imagemagick module for thumbnailing.
My code works roughly 95% of the time; about 1 in 20 (or fewer) thumbnailed images are corrupt on the client (iOS), which reports:
JPEG Corrupt JPEG data: premature end of data segment
For the corrupt images, the client displays the top 50% - 75% of the image, and the remainder is truncated.
The behavior is non-deterministic and the specific images which are corrupt changes on a per-request basis.
I'm using the following code to resize the image and output the thumbnail:
im.resize({
srcData: image.imageData.buffer,
width: opt_width,
}, function(err, stdout) {
var responseHeaders = {};
responseHeaders['content-type'] = 'image/jpeg';
responseHeaders['content-length'] = stdout.length;
debug('Writing ', stdout.length, ' bytes.');
response.writeHead(200, responseHeaders);
response.write(stdout, 'binary');
response.end();
});
What could be wrong, here?
Notes:
The problem is not an incorrect content-length header. When I omit the header, the result is the same.
When I do not resize the image, the full-size image always seems to be fine.
In researching this I found this and this StackOverflow questions, which both solved the problem by increasing the buffer size. In my case the images are very small, so this seems unlikely to be responsible.
I was originally assigning stdout to a new Buffer(stdout, 'binary') and writing that. Removing it ('binary' will be deprecated) made no difference.
The problem seems to have been due to a slightly older version of node-imagemagick (0.1.2); upgrading to 0.1.3 was the solution.
In case this is helpful to anyone, here's the code I used to make Node.js queue up and handle client requests one at a time.
// Set up your server like normal.
http.createServer(handleRequest);
// ...
var requestQueue = [];
var isHandlingRequest = false; // Prevent new requests from being handled.
// If you have any endpoints that don't always call response.end(), add them here.
var urlsToHandleConcurrently = {
'/someCometStyleThingy': true
};
function handleRequest(req, res) {
if (req.url in urlsToHandleConcurrently) {
handleQueuedRequest(req, res);
return;
}
requestQueue.push([req, res]); // Enqueue new requests.
processRequestQueue(); // Check if a request in the queue can be handled.
}
function processRequestQueue() {
// Continue if no requests are being processed and the queue is not empty.
if (isHandlingRequest) return;
if (requestQueue.length == 0) return;
var op = requestQueue.shift();
var req = op[0], res = op[1];
// Wrap .end() on the http.ServerRequest instance to
// unblock and process the next queued item.
res.oldEnd = res.end;
res.end = function(data) {
res.oldEnd(data);
isHandlingRequest = false;
processRequestQueue();
};
// Start handling the request, while blocking the queue until res.end() is called.
isHandlingRequest = true;
handleQueuedRequest(req, res);
}
function handleQueuedRequest(req, res) {
// Your regular request handling code here...
}

Resources