node process can not find setTimeout object in subsequent requests - node.js

I am trying to clear timeout set using setTimeout method by node process, in subsequent requests (using express). So, basically, I set timeout when our live stream event starts (get notified by webhook) and aim to stop this for guest users after one hour. One hour is being calculated via setTimeout, which works fine so far. However, if event gets stopped before one hour, I need to clear the timeout. I am trying to use clearTimeOut but it just can't find same variable.
// Event starts
var setTimeoutIds = {};
var val = req.body.eventId;
setTimeoutIds[val] = setTimeout(function() {
req.app.io.emit('disable_for_guest',req.body);
live_events.update({event_id:req.body.eventId},{guest_visibility:false},function(err,data){
//All ok
});
}, disable_after_milliseconds);
console.log(setTimeoutIds);
req.app.io.emit('session_started',req.body);
When event ends:
try{
var event_id = req.body.eventId;
clearTimeout(setTimeoutIds[event_id]);
delete setTimeoutIds[event_id];
}catch(e){
console.log('Event ID could not be removed' + e);
}
req.app.io.emit('event_ended',req.body);
Output :
Output

You are defining setTimeoutIds in the scope of the handler. You must define it at module level.
var setTimeoutIds = {};
router.post('/webhook', function(req, res) {
...
That makes the variable available until the next restart of the server.
Note: this approach only works as long as you only have a single server with a single node process serving your application. Once you go multi-process and/or multi-server, you need a completely different approach.

Related

Clear a timer in another API set by setInterval() created in one API

Clear a timer in another API set by setInterval() created in one API.
I have an api which starts setInterval() and i save its timer-id in database.
When another API is called i want to clear old timer using old timer's timer-id from database. I know clearInterval() will do the job.
clearInterval() will do the job but, after saving it to DB(JSON.stringify(intervalId)) and retrieve(JSON.parse(intervalId)) it to kill timer, it's not working. Can't i get hold of timer once getting out of a function? Even if i save it in DB(timer-id). Am i doing anything wrong?
var intervalID = setInterval(function() {
console.log(intervalID);
/// Updating in DB
Model.updateById(Id, {timer_data: JSON.stringify(intervalID)});
}, 30000);
}
//Now retrieving and killing it
Model.findById(id).then(data=> {
var intervalID = JSON.parse(data.timer_data);
clearInterval(intervalID);
}
I expected old timer to be killed, but it still lives and executes. It removes all my assignments, i need it to be killed when required.
Unlike the browsers, node.js implementation of clearInterval() does not return a number ID. It returns a Timeout object. And because the object has Symbols, you would not be able to save it to a database.
As a workaround, I suggest to save the Timeout object in memory itself.
Here is a code snippet to help you out:
// Declaring the object
const inMemoryTimers = {};
/** Setting the timer */
// While saving the timer, saving the timer as inMemoryTimers[Id]
const timer = setInterval(() => {
inMemoryTimers[Id] = timer;
}, 1000);
/** Clearing the timer */
// Retrieve the timer from inMemoryTimers
const timer = inMemoryTimers[Id];
clearInterval(timer);

socket.on event gets triggered multiple times

var express = require('express');
var app = express();
var server = app.listen(3000);
var replyFromBot;
app.use(express.static('public'));
var socket = require('socket.io');
var io = socket(server);
io.sockets.on('connection' , newConnection);
function newConnection(socket) {
console.log(socket.id);
listen = true;
socket.on('Quest' ,reply);
function reply(data) {
replyFromBot = bot.reply("local-user", data);
console.log(socket.id+ " "+replyFromBot);
socket.emit('Ans' , replyFromBot);
}
}
i've created a server based chat-bot application using node.js socket.io and express but the thing is for first time when i call socket.on it gets executed once and for 2nd time it gets executed twice for 3rd thrice and so on i've tackled this issue by setting a flag on my client so that it would display only once. i just wants to know is my code logically correct i mean is this a good code? because if the client ask a question for 10th time than listeners array will have 10+9+8....+1 listeners it would go on increasing depending upon number of questions clients asked. which is not good
i tried using removeListener it just removes listener once and it dosent call back for 2nd time. what do you guys recommend? do i go with this or is there any other way to add the listener when socket.on called and remove it when it gets executed and again add listener for the next time it gets called
thank-you.
client code:
function reply() {
socket.emit('Quest' , Quest);
flag = true;
audio.play();
socket.on('Ans', function(replyFromBot) {
if(flag) {
console.log("hi");
var para = document.createElement("p2");
x = document.getElementById("MiddleBox");
para.appendChild(document.createTextNode(replyFromBot));
x.appendChild(para);
x.scrollTop = x.scrollHeight;
flag = false;
}
});
}
The problem is caused by your client code. Each time you call the reply() function in the client you set up an additional socket.on('Ans', ...) event handler which means they accumulate. You can change that to socket.once() and it will remove itself each time after it get the Ans message. You can then also remove your flag variable.
function reply() {
socket.emit('Quest' , Quest);
audio.play();
// change this to .once()
socket.once('Ans', function(replyFromBot) {
console.log("hi");
var para = document.createElement("p2");
x = document.getElementById("MiddleBox");
para.appendChild(document.createTextNode(replyFromBot));
x.appendChild(para);
x.scrollTop = x.scrollHeight;
});
}
Socket.io is not really built as a request/response system which is what you are trying to use it as. An even better way to implement this would be to use the ack capability that socket.io has so you can get a direct response back to your Quest message you send.
You also need to fix your shared variables replyFromBot and listen on your server because those are concurrency problems waiting to happen as soon as you have multiple users using your server.
Better Solution
A better solution would be to use the ack capability that socket.io has to get a direct response to a message you sent. To do that, you'd change your server to this:
function newConnection(socket) {
console.log(socket.id);
socket.on('Quest', function(data, fn) {
let replyFromBot = bot.reply("local-user", data);
console.log(socket.id+ " "+replyFromBot);
// send ack response
fn(replyFromBot);
});
}
And, change your client code to this:
function reply() {
audio.play();
socket.emit('Quest', Quest, function(replyFromBot) {
console.log("hi");
var para = document.createElement("p2");
x = document.getElementById("MiddleBox");
para.appendChild(document.createTextNode(replyFromBot));
x.appendChild(para);
x.scrollTop = x.scrollHeight;
});
}
Doing it this way, you're hooking into a direct reply from the message so it works as request/response much better than the way you were doing it.
Instead of socket.on('Quest' ,reply); try socket.once('Quest' ,reply);
The bug in your code is that each time newConnection() is called node registers a event listener 'Quest'. So first time newConnection() is called the number of event listener with event 'Quest' is one, the second time function is called, number of event listener increases to two and so on
socket.once() ensures that number of event listener bound to socket with event 'Quest' registered is exactly one

How to fork a process in node that writes express response

I'd like to fork a long running express request in node and send an express response with the child, allowing the parent to serve other requests. I'm already using cluster but I'd like to fork another process in addition to the cluster for specific long running requests. What I'd like to prevent is all the processes in the cluster being consumed by a specific long running processes, while most of the other requests are fast.
Thanks
var express = require('express');
var webserver = express();
webserver.get("/test", function(request, response) {
// long running HTTP request
response.send(...);
});
What I'm thinking of is something like following, although I'm not sure this works:
var cp = require('child_process');
var express = require('express');
var webserver = express();
webserver.get("/test", function(request, response) {
var child = cp.fork('do_nothing.js');
child.on("message", function(message) {
if(message == "start") {
response.send(...);
process.exit();
}
});
child.send("start");
});
Let me know if anyone knows how to do this.
Edit: So, the idea is that the child could take a long time. There are a limited number of processes in the cluster serving express responses and I don't want to consume them all on a specific long-running request type. In the code below, the entire cluster would be consumed by the long running express requests.
while(1) {
if(rand() % 100 == 0) {
if(fork() == 0) {
sleep(hour(1));
exit(0);
}
} else {
sleep(second(1));
}
waitpid(WAIT_ANY, &status, WNOHANG);
}
Edit: I am going to mark the self-answer as solved. I'm sure there's a way to pass a socket to a child but it's not really necessary because the cluster master can manage all child processes. Thanks for your help.
Your second code block is confusing because it appears that you're killing the parent process with process.exit() rather than the child.
In any case, if we assume the problem is this:
You have a cluster of "regular processes".
Occasionally, you want to take an incoming request that was assigned to one of the cluster processes and pass it off to a long running child that will eventually send the response.
After sending the response, the long running child process should exit.
You have a couple options.
You can have the clustered process that was assigned the request, start up a child, send it some initial data and listen for a message back from the child. When it gets the message back from the child, it can send the response and kill the child. This appears to be what you're attempting to do in your second code block.
You can have the clustered process that was assigned the request, start up a child and reassign the request socket to the child process and the child can then own that socket from then on. When it finally sends the response, it can then exit itself.
The first is simpler because no socket assignment from one process to another is required. To implement the second, you'd have to write or find the code to do socket reassignment and then reconstituted as an express request within the child. The cluster module does something like this so the code is there to be found and learned from, but I'm not aware of a trivial way to do it.
Personally, I don't see any particular downside to the first. I suppose if the clustered process were to die for some , you'd lose the long running request socket, but hopefully you can just code your clustered processes not to die unnecessarily.
You can read this article on sending a socket to a new node.js process:
Sending a socket to a forked process
And, this node.js doc on sending a socket:
Example: sending a socket object
So, I've verified that this is not necessary for my use case, but I was able to get it working using the code below. It's not exactly what the OP asks for, but it works.
What it's doing is sending an instruction to the cluster master, which forks the additional process upon receipt of the slow express request.
Since the express request doesn't need to know the status of the newly forked cluster worker, it just handles the slow request as normal and then exits.
The instruction to the cluster master informs the master not to replace the dying slow express request process, so the number of workers reverts to the original number after the slow request finishes.
The pool will increase in size when there are slow requests, but revert to normal. This will prevent like 20 simultaneous slow requests from bringing down the cluster.
var numberOfWorkers = 10;
var workerCount = 0;
var slowRequestPids = { };
if (cluster.isMaster) {
for(var i = 0; i < numberOfWorkers; i++) {
workerCount++;
cluster.fork();
}
cluster.on('exit', function(worker) {
workerCount--;
var pidString = String(worker.process.pid);
if(pidString in slowRequestPids) {
delete slowRequestPids[pidString];
if(workerCount >= numberOfWorkers) {
logger.info('not forking replacement for slow process');
return;
}
}
logger.info('forking replacement for a process that died unexpectedly');
workerCount++;
cluster.fork();
}
cluster.on("message", function(msg) {
if(typeof msg.fork != "undefined" && workerCount < 100) {
logger.info("forking additional process upon slow request");
slowRequestPids[msg.fork] = 1;
workerCount++;
cluster.fork();
}
});
return;
}
webserver.use("/slow", function(req, res) {
process.send({fork: String(process.pid) });
sleep.sleep(300);
res.send({ response_from: "virtual child" });
res.on("finish", function() {
logger.info('process exits, restoring cluster to original size');
process.exit();
});
});

How do I exit if there is no new output within a defined timeframe

I'm running a NodeJS server, which becomes hung due to various factors. Every time this happens, I have to restart the NodeJS server to resolve the issue. Is there any workaround that exits the process automatically if there is no output within a defined timeframe? I suppose setTimeout() and the "process" module are needed, however, I'm unaware of how to achieve it elegantly.
Here's how you would exit a process after a timespan with setTimeout(). Its worth noting though you should just resolve your error in your process first though.
var input; // your input
var timeout = 60000; // timeout amount
setTimeout(timeout, function() {
if (!input)
process.exit(1);
});
The process module and setTimeout()are globals, they're automatically available to your module from the Node.js Core API.
Edit- ChildProcess
Since your "hanging" based on this TCP Module, take your implementing module and run it using childProcess.fork(). Then you can react to the lack of output from the module and kill the process accordingly.
Create a new file and put the following in it
var process = require('child_process');
// Spawn your Hanging Code
var tcp = process.fork('<pathToYourTcpCode>');
var timeout = 60000; // timeout amount
var output = '';
tcp.stdout.on('data', function(data) {
output += data;
});
setTimeout(timeout, function() {
if (!output)
tcp.kill('SIGINT');
});

Node JS : socket.io - handling multiple connection

I am a newbie in using socket.io in Node JS - however I have coded a client (web page) / server program to render some statistics data at client side.
The requirement is like – to render a box statistics (out of many). Since the user can open multiple browser windows - we have a scenario where one box data can be requested by many times:
http://www.tool.com?boxname=box1
As such I want to achieve spawning one job for multiple requests for same box. Below is the logic I have implemented to meet the requirement:
Client Side:
Establishes a connection to server creating a data channel:
socket.on(boxname.toLowerCase(), function (data){
}
So whenever there is a data in boxname event I receive, parse the data and render it.
Server Side
First call an expect script to copy a script to run within an infinite loop on a box.
On successful copying of the script in the box - invoke another expect script to start its execution:
exports.ServiceMonitor = function(boxName, res) {
Step 1.
I need to copy a script as many times request comes else I would not be able to enter ‘exit’ event of this spawned process where I start the script execution as mentioned below:
var child = spawn('./scripts/copyscriptonbox.sh' , [boxName.toLowerCase(), getURL(boxName)]);
In the later part of the code I keep adding box names to boxnames variable which is declared global. So on a new boxname request - I search for number of occurrences of boxname in boxnames variable. If the count is 1 or more it would mean to me that a script is already running on that box:
var noofbox = boxnames.split(boxName.toLowerCase()).length - 1;
child.on('exit', function (code) {
logger.info('child stream exit: ' + code);
if(code == 0)
{
boxNames += boxName.toLowerCase();
logger.info('Box name added: ' + boxNames);
res.render('boxpage', {}); //render a web page
io.sockets.on('connection', function (socket) {
logger.info('Connected to box :' + boxName);
if(noofbox <= 0)
schild = spawn('./scripts/boxmon.sh', [boxName.toLowerCase(), getURL(boxName)]);
schild.on('exit', function (code) {
logger.info('schild stream exit.');
});
schild.stderr.on('data', function (data) {
logger.info('stderr: ' + data);
});
schild.stdout.on('data', function (data) {
//generate response
});
socket.on('disconnect',function(){logger.info('Disconnect Killed');ServiceMonitorEnd(boxName.toLowerCase());});
});
}
});
}
The issue is that if in two browser window I access URL : www.tool.com?boxname=box1 - first time I get the log only once (Connected to box : box1) but second time I get the same logs 2 times where as I was expecting it to be one time - I mean as many request comes in after the first one the logs gets printed that many times – if 5 then log gets printed for 1(first time)+2 (second time)+3(third time)+4 (fourth time)+5 (fifth time)? I understand that when ‘connection’ event is called for x times then it enters that many times for each connection.
How can I make the 'connection' event on the socket.io once for each request only?
Use socket.once instead of socket.on, it is another EventEmitter like .on, that is emitted only once, the first time. Check the docs. But remember after that any such events will not be received by the client.

Resources