Sound doesn't play twice - audio

I have a web app for Tizen OS. I need to play one sound twice and do some actions after every time.
In browser it works perfect, but acts really strange on a device.
Sometimes first time sound plays as it should be, second time there is no sound. Sometimes both times it plays, but callbacks are not called.
Code:
var callback = function() {
doAction()
};
SoundManager.playSound(soundID, callback);
setTimeout(function() {
callback = function() {
doAnotherAction();
};
SoundManager.playSound(soundID, callback);
}, 3000);
Sound Manager:
SoundManager.playSound = function(id, callback) {
if (callback != null) {
var func = function() {
callback();
this.removeEventListener('ended', func, false);
}
$(SoundManager.sounds[id])[0].addEventListener('ended', func, false);
}
$(SoundManager.sounds[id])[0].play();
};

Found solution.
Every time just add 'ended' callback to the audio and reset time:
this.currentTime = 0;

Related

Event listener when the transformation of the stream is finished, Node.js

I am trying to register event listener at the end of the data in pipe transformation. I was
trying register the event to all streams in a pipe:
a) my custom transform stream (StreamToBuffer)
b) standard file read stream
c) standard gunzip stream.
But unfortunately, none of them works (see code below). As far as I
try, only 'data' event works, but it does not help.
What I need is to continue with processing of the tailBuffer in StreamToBuffer class after the transformation is finished.
Can you suggest how to achive this?
The code (simplified for brevity):
function samplePipe() {
var streamToBuffer = new StreamToBuffer();
var readStream = fs.createReadStream(bgzFile1, { flags: 'r',
encoding: null,
fd: null,
mode: '0666',
autoClose: true
});
var gunzipTransform = zlib.createGunzip();
readStream.on('end', function() {
//not fired
console.log('end event readStream');
});
streamToBuffer.on('end', function() {
//not fired
console.log('end event streamBuffer');
});
gunzipTransform.on('end', function() {
//not fired
console.log('end event gunzipTransform');
});
readStream
.pipe(gunzipTransform)
.pipe(streamToBuffer)
;
}
StreamToBuffer:
function StreamToBuffer() {
stream.Transform.call(this);
this.tailBuffer = new Buffer(0);
}
util.inherits(StreamToBuffer, stream.Transform);
StreamToBuffer.prototype._transform = function(chunk, encoding, callback) {
this.tailBuffer = Buffer.concat([this.tailBuffer, chunk]);
console.log('streamToBuffer');
}
StreamToBuffer.prototype._flush = function(callback) {
callback();
}
module.exports = StreamToBuffer;
EDITED:
After playing a little with passing callback function to StreamToBuffer constructor, I have discovered the mistake - missing callback(); in _transform() method. After adding it, the event 'end' listener works, at least on standard read stream.
StreamToBuffer.prototype._transform = function(chunk, encoding, callback) {
this.tailBuffer = Buffer.concat([this.tailBuffer, chunk]);
console.log('streamToBuffer');
callback();
}
Another way is to pass callback function to StreamToBuffer constructor and then call it in _flush method. This has the advantage that we can be sure that the transformation is completed.
function samplePipe() {
var streamToBuffer = new StreamToBuffer(processBuffer);
.....
}
function processBuffer(buffer) {
console.log('processBuffer');
}
StreamToBuffer:
function StreamToBuffer(callback) {
stream.Transform.call(this);
this.tailBuffer = new Buffer(0);
this.finishCallback = callback;
}
util.inherits(StreamToBuffer, stream.Transform);
StreamToBuffer.prototype._transform = function(chunk, encoding, callback) {
this.tailBuffer = Buffer.concat([this.tailBuffer, chunk]);
console.log('streamToBuffer');
callback();
}
StreamToBuffer.prototype._flush = function(callback) {
console.log('flushed');
callback();
this.finishCallback(this.tailBuffer);
}
module.exports = StreamToBuffer;
ALthough I did not receive any answer yet (thanks for other comments, anyway), I think this question can be useful for the people like me, who are learning node. If you know better solution, pls answer. Thank you.
For writable stream, try finish event instead of end event:
streamToBuffer.on('finish', function() {
// finish event fired
});

Trying to make my own RxJs observable

I'm trying to convert an existing API to work with RxJS... fairly new to node, and very new to RxJs, so please bear with me.
I have an existing API (getNextMessage), that either blocks (asynchronously), or returns a new item or error via a node-style (err, val) callback, when the something becomes available.
so it looks something like:
getNextMessage(nodeStyleCompletionCallback);
You could think of getNextMessage like an http request, that completes in the future, when the server responds, but you do need to call getNextMessage again, once a message is received, to keep getting new items from the server.
So, in order to make it into an observable collection, I have to get RxJs to keep calling my getNextMessage function until the subscriber is disposed();
Basically, I'm trying to create my own RxJs observable collection.
The problems are:
I don't know how to make subscriber.dispose() kill the async.forever
I probably shouldn't be using async.forever in the first place
I'm not sure I should be even getting 'completed' for each message - shouldn't that be at the end of a sequence
I'd like to eventually remove the need for using fromNodeCallback, to have a first class RxJS observable
Clearly I'm a little confused.
Would love a bit of help, thanks!
Here is my existing code:
var Rx = require('rx');
var port = require('../lib/port');
var async = require('async');
function observableReceive(portName)
{
var observerCallback;
var listenPort = new port(portName);
var disposed = false;
var asyncReceive = function(asyncCallback)
{
listenPort.getNextMessage(
function(error, json)
{
observerCallback(error, json);
if (!disposed)
setImmediate(asyncCallback);
}
);
}
return function(outerCallback)
{
observerCallback = outerCallback;
async.forever(asyncReceive);
}
}
var receive = Rx.Observable.fromNodeCallback(observableReceive('rxtest'));
var source = receive();
var subscription = source.forEach(
function (json)
{
console.log('receive completed: ' + JSON.stringify(json));
},
function (error) {
console.log("receive failed: " + error.toString());
},
function () {
console.log('Completed');
subscription.dispose();
}
);
So here's probably what I would do.
var Rx = require('Rx');
// This is just for kicks. You have your own getNextMessage to use. ;)
var getNextMessage = (function(){
var i = 1;
return function (callback) {
setTimeout(function () {
if (i > 10) {
callback("lawdy lawd it's ova' ten, ya'll.");
} else {
callback(undefined, i++);
}
}, 5);
};
}());
// This just makes an observable version of getNextMessage.
var nextMessageAsObservable = Rx.Observable.create(function (o) {
getNextMessage(function (err, val) {
if (err) {
o.onError(err);
} else {
o.onNext(val);
o.onCompleted();
}
});
});
// This repeats the call to getNextMessage as many times (11) as you want.
// "take" will cancel the subscription after receiving 11 items.
nextMessageAsObservable
.repeat()
.take(11)
.subscribe(
function (x) { console.log('next', x); },
function (err) { console.log('error', err); },
function () { console.log('done'); }
);
I realize this is over a year old, but I think a better solution for this would be to make use of recursive scheduling instead:
Rx.Observable.forever = function(next, scheduler) {
scheduler = scheduler || Rx.Scheduler.default,
//Internally wrap the the callback into an observable
next = Rx.Observable.fromNodeCallback(next);
return Rx.Observable.create(function(observer) {
var disposable = new Rx.SingleAssignmentDisposable(),
hasState = false;
disposable.setDisposable(scheduler.scheduleRecursiveWithState(null,
function(state, self) {
hasState && observer.onNext(state);
hasState = false;
next().subscribe(function(x){
hasState = true;
self(x);
}, observer.onError.bind(observer));
}));
return disposable;
});
};
The idea here is that you can schedule new items once the previous one has completed. You call next() which invokes the passed in method and when it returns a value, you schedule the next item for invocation.
You can then use it like so:
Rx.Observable.forever(getNextMessage)
.take(11)
.subscribe(function(message) {
console.log(message);
});
See a working example here

Use first result in an asynchronous race/Winner takes all race

Basically, I want to pit two asynchronous calls against each other, and only use the winner.
I cannot seem to figure out how to do this, only how to prevent it. Is it remotely possible?
Lame pseudo-code:
//rigging a race
function MysqlUser()
{
setTimeout(function(){
return "mysqluser";
}, 500);
}
function ADUser()
{
setTimeout(function(){
return "aduser";
}, 1000);
}
function getUser()
{
var user = null;
user = ADBind();
user = MysqlBind();
//if user != null
return user;
//else?
}
I'd like (in this instance) for MysqlUser to win out over ADUser.
Any help would be appreciated.
You can write a simple first function that takes a list of task and calls back with the result of only the first to complete:
function first(tasks, cb) {
var done = false;
tasks.forEach(function(task) {
task(function(result) {
if(done) return;
done = true;
cb(result);
});
});
}
Then:
function mysqlUser(cb) {
setTimeout(function() {
cb("mysqluser");
}, 500);
}
function adUser(cb) {
setTimeout(function() {
cb("aduser");
}, 1000);
}
first([mysqlUser, adUser], function(user) {
console.log(user);
});
This might need some more thought if you want to cope with both operations failing.

NodeJS async queue too fast (Slowing down async queue method)

I have an HTTP Get request and I want to parse the response and save it to my database.
If i call crawl(i) independentely i get good results. But i have to call crawl() from 1 to 2000.
I get good results but some responses seem to get lost and some responses are duplicates. I don't think I understand how to call thousands of asynchronous functions. I am using the async module queue function but so far I am still missing some data and still have some duplicates. What am I doing wrong here? Thanks for your help.
What i am crawling
My node functions :
function getOptions(i) {
return {
host: 'magicseaweed.com',
path: '/syndicate/rss/index.php?id='+i+'&unit=uk',
method: 'GET'
}
};
function crawl(i){
var req = http.request(getOptions(i), function(res) {
res.on('data', function (body) {
parseLocation(body);
});
});
req.end();
}
function parseLocation(body){
parser.parseString(body, function(err, result) {
if(result && typeof result.rss != 'undefined') {
var locationTitle = result.rss.channel[0].title;
var locationString = result.rss.channel[0].item[0].link[0];
var location = new Location({
id: locationString.split('/')[2],
name: locationTitle
});
location.save();
}
});
}
N = 2 //# of simultaneous tasks
var q = async.queue(function (task, callback) {
crawl(task.url);
callback();
}, N);
q.drain = function() {
console.log('Crawling done.');
}
for(var i = 0; i < 100; i++){
q.push({url: 'http://magicseaweed.com/syndicate/rss/index.php?id='+i+'&unit=uk'});
}
[EDIT] WELL, after a lot of testing it seems that the service I am crawling cannot handle so many request that fast. Because when I do each requests sequentially, I can get all the good responses.
Is there a way to SLOW DOWN ASYNC queue method?
You should have a look at this great module, async which simplifies async tasks like this. You can use queue, simple example:
N = # of simultaneous tasks
var q = async.queue(function (task, callback) {
somehttprequestfunction(task.url, function(){
callback();
}
}, N);
q.drain = function() {
console.log('all items have been processed');
}
for(var i = 0; i < 2000; i++){
q.push({url:"http://somewebsite.com/"+i+"/feed/"});
}
It will have a window of ongoing actions and the tasks room will be available for a future task if you only invoke the callback function. Difference is, your code now opens 2000 connections immidiately and obviously the failure rate is high. Limiting it to a reasonable value, 5,10,20 (depends on site and connection) will result in a better sucess rate. If a request fails, you can always try it again, or push the task to another async queue for another trial. The key point is to invoke callback() in queue function, so that a room will be available when it is done.
var q = async.queue(function (task, callback) {
crawl(task.url);
callback();
}, N);
You'are executing next task immediately after starting the previous one, in this way, the queue is just meaningless. You should modify your code like this:
// first, modify your 'crawl' function to take a callback argument, and call this callback after the job is done.
// then
var q = async.queue(function (task, next/* name this argument as 'next' is more meaningful */) {
crawl(task.url, function () {
// after this one is done, start next one.
next();
});
// or, more simple way, crawl(task.url, next);
}, N);
Another option if you want. Vanilla JS without fancy libraries.
var incrementer = 0;
var resultsArray = [];
var myInterval = setInterval(function() {
incrementer++
if(incrementer == 100){
clearInterval(myInterval)
//when done parse results array
}
//make request here
//push request result to array here
}, 500);
Invokes the function every half second. Easy way to force sync and exit after x requests.
I know I am a little late to the question, however here is a solution I wrote to slow down the number of requests when testing an api endpoint, using node 4 or node 5:
var fs = require('fs');
var supertest = require('supertest');
var request = supertest("http://sometesturl.com/api/test/v1/")
var Helper = require('./check.helper');
var basicAuth = Helper.basicAuth;
var options = Helper.options;
fs.readFile('test.txt', function(err, data){
var parsedItems = JSON.parse(data);
var urlparts = []
// create a queue
for (let year of range(1975, 2016)) {
for (var make in parsedItems[year]){
console.log(year, make, '/models/' + year + '/' + make)
urlparts.push({urlpart:'/models/' + year + '/' + make, year: year, make: make})
}
}
// start dequeue
waitDequeue();
// This function calls itself after the makeRequest promise completes
function waitDequeue(){
var item = urlparts.pop()
if (item){
makeRequest(item)
.then(function(){
// wait this time before next dequeue
setTimeout(function() {
waitDequeue();
}, 3000);
})
} else {
write(parsedItems)
}
}
// make a request, mutate parsedItems then resolve
function makeRequest(item){
return new Promise((resolve, reject)=>{
request
.get(item.urlpart)
.set(options.auth[0], options.auth[1])
.set(options.type[0], options.type[1])
.end(function(err, res) {
if (err) return done1(err);
console.log(res.body)
res.body.forEach(function(model){
parsedItems[item.year][item.make][model] = {}
});
resolve()
})
})
}
// write the results back to the file
function write(parsedItems){
fs.writeFile('test.txt', JSON.stringify(parsedItems, null, 4), function(err){
console.log(err)
})
}
})
A little late but I have found this works!
Using async you can slow down the queue by using whilst inside the task handler eg:
var q = async.priorityQueue(function(task, callback) {
// your code process here for each task
//when ready to complete the task delay it by calling
async.whilst( //wait 6 seconds
function() {
return count < 10;
},
function(callback) {
count++;
setTimeout(function() {
callback(null, count);
}, 1000);
},
function (err, n) {
// n seconds have passed
callback(); //callback to q handler
}
); //whilst
} , 5);

Throttling intensive IO task in node.js

I'm playing around with node.js, trying to re-write a particularly poorly designed part of my production system at work. So far, so good, I use rabbitmq for messaging, and my node.js part of the system runs ghostscript command line tool to convert tiff files to pdf. Obviously I need to make sure I'm not running more than some fixed amount of conversions at a time. What would be the best way to do this with node? I understand that maybe node.js isn't really about running heavy disk IO stuff, but I'm having too much fun with it to quit.
I was considering just using a blocking call to execute command line utilities but the thing is that some messages don't require this conversion and there's no need to delay their processing.
[Update] node-batches seems more appropriate.
I think you need something like forEachLimit (the following snippet was extracted from the async library)
forEachLimit = function (arr, limit, iterator, callback) {
callback = callback || function () {};
if (!arr.length || limit <= 0) {
return callback();
}
var completed = 0;
var started = 0;
var running = 0;
(function replenish () {
if (completed === arr.length) {
return callback();
}
while (running < limit && started < arr.length) {
iterator(arr[started], function (err) {
if (err) {
callback(err);
callback = function () {};
}
else {
completed += 1;
running -= 1;
if (completed === arr.length) {
callback();
}
else {
replenish();
}
}
});
started += 1;
running += 1;
}
})();
};
Usage:
var fileToConvert = ['file1', 'file2', 'file3']
maxConcurrency = 4;
function fnIter(item, callback){
console.log('converting', item);
// Convertion happen here
require('child_process').exec("some -f "+item, function(error, stdout, stderr){
callback(stderr); // stderr should be "null" if everything went good.
});
}
function fnDone(){
console.log('done !');
}
forEachLimit(fileToConvert, maxConcurrency, fnIter, fnDone);

Resources