How can I manage multiple concurrent child processes that have been forked?
In this example, start_child() can be invoked multiple times, and each invocation can run indefinitely. When forking an arbitrary number of child processes like this, how can I communicate with / address each individual child process? Let's say I have 3 forked child processes running, and they keep running indefinitely, but I want to kill (or send a message to) child process number 2. How would I do that?
If stop_child() is invoked, it kills all of the currently running child processes. How do I refactor this code so that I can call stop_child() on an individual child process?
let app = require('express')();
let server = require('http').Server(app);
let io = require('socket.io')(server);
let fork = require('child_process').fork;
server.listen(80);
app.get('/', function(request, response) {
response.sendFile(__dirname + '/index.html');
});
io.on('connection', function(socket) {
socket.on('start_child', function () {
start_child();
});
socket.on('stop_child', function () {
child.kill();
}
}
function start_child() {
child = fork('./child_script.js');
//conditional logic to either run
//start_child() again or let it end
}
UPDATE
I tried this, based on some of the comments. But if I launch 3 processes and then call, for example, child[0].kill(), I get an error: Cannot read property 'kill' of undefined. I'm guessing my problem is that I'm not correctly passing the i variable to the io.on() call:
let app = require('express')();
let server = require('http').Server(app);
let io = require('socket.io')(server);
let fork = require('child_process').fork;
let i = 0;
let child = [];
server.listen(80);
app.get('/', function(request, response) {
response.sendFile(__dirname + '/index.html');
});
io.on('connection', function(socket) {
socket.on('start_child', function (i) {
start_child(i++);
});
//this is hard coded just for testing
socket.on('stop_child', function () {
child[0].kill();
}
}
function start_child(i) {
child[i] = fork('./child_script.js');
//conditional logic to either run
//start_child() again or let it end
}
UPDATE #2
Okay, I figured out that I need to send the incrementing variable from the client side, passed through an object coming from the emit call. Now when I call child[0].kill() there is no error. The problem is that the child process is not killed:
server.js
let app = require('express')();
let server = require('http').Server(app);
let io = require('socket.io')(server);
let fork = require('child_process').fork;
server.listen(80);
app.get('/', function(request, response) {
response.sendFile(__dirname + '/index.html');
});
io.on('connection', function(socket) {
socket.on('start_child', function (count) {
let num = count.count;
start_child(num);
});
//this is hard coded just for testing
socket.on('stop_child', function (count) {
let num = count.count;
child[num].kill();
}
}
function start_child(num) {
child[num] = fork('./child_script.js');
//conditional logic to either run
//start_child() again or let it end
}
index.html
$(function () {
let socket = io();
let i = 0;
$('#start').on('click', function () {
socket.emit('start_child', {"count": i++});
});
$('#stop').on('click', function () {
//the count is hard coded here just for testing purposes
socket.emit('stop_child', {"count": 0});
});
});
FINAL UPDATE - WITH RESOLUTION
Resolution #2 (right above this) is actually the solution. The problem I was having after that (where the child.kill() call didn't seem to do anything) was caused by a piece of the code that I had left out (in the code comment: 'conditional logic to either run start_child() again or let it end').
This is what was in there:
if (condition) {
setTimeout(start_child, 5000);
} else {
console.log('this child process has ended');
}
And this is what I changed it to (basically, I just had to pass the incrementing variable to the start_child() function so that it would have the same place in the child array when it restarted):
if (condition) {
setTimeout(function() {
start_child(num)
}, 5000);
} else {
console.log('this child process has ended');
}
fork() returns a ChildProcess object which has methods and events on that object for interprocess communication with that process. So you have to save each ChildProcess object from when you call start_child() and then use the appropriate object in order to communicate with the other process.
You can see events and methods of the ChildProcess object here. There are also numerous code examples.
In order to send message to the child process, use the following example :
parent.js
**
const { fork } = require('child_process');
const forked = fork('child.js');
forked.on('message', (msg) => {
console.log('Message from child', msg);
});
forked.send({ hello: 'world' });
**
child.js
process.on('message', (msg) => {
console.log('Message from parent:', msg);
});
let counter = 0;
setInterval(() => {
process.send({ counter: counter++ });
}, 1000);
For further information refer this article :
https://medium.freecodecamp.com/node-js-child-processes-everything-you-need-to-know-e69498fe970a
Related
I am making use of "socket.io-client" and "socket.io stream" to make a request and then stream some data. I have the following code that handles this logic
Client Server Logic
router.get('/writeData', function(req, res) {
var io = req.app.get('socketio');
var nameNodeSocket = io.connect(NAMENODE_ADDRESS, { reconnect: true });
var nameNodeData = {};
async.waterfall([
checkForDataNodes,
readFileFromS3
], function(err, result) {
if (err !== null) {
res.json(err);
}else{
res.json("Finished Writing to DN's");
}
});
function checkForDataNodes(cb) {
nameNodeSocket.on('nameNodeData', function(data) {
nameNodeData = data;
console.log(nameNodeData);
cb(null, nameNodeData);
});
if (nameNodeData.numDataNodes === 0) {
cb("No datanodes found");
}
}
function readFileFromS3(nameNodeData, cb) {
for (var i in nameNodeData['blockToDataNodes']) {
var IP = nameNodeData['blockToDataNodes'][i]['ipValue'];
var dataNodeSocket = io.connect('http://'+ IP +":5000");
var ss = require("socket.io-stream");
var stream = ss.createStream();
var byteStartRange = nameNodeData['blockToDataNodes'][i]['byteStart'];
var byteStopRange = nameNodeData['blockToDataNodes'][i]['byteStop'];
paramsWithRange['Range'] = "bytes=" + byteStartRange.toString() + "-" + byteStopRange.toString();
//var file = require('fs').createWriteStream('testFile' + i + '.txt');
var getFileName = nameNodeData['blockToDataNodes'][i]['key'].split('/');
var fileData = {
'mainFile': paramsWithRange['Key'].split('/')[1],
'blockName': getFileName[1]
};
ss(dataNodeSocket).emit('sendData', stream, fileData);
s3.getObject(paramsWithRange).createReadStream().pipe(stream);
//dataNodeSocket.disconnect();
}
cb(null);
}
});
Server Logic (that gets the data)
var dataNodeIO = require('socket.io')(server);
var ss = require("socket.io-stream");
dataNodeIO.on('connection', function(socket) {
console.log("Succesfully connected!");
ss(socket).on('sendData', function(stream, data) {
var IP = data['ipValue'];
var blockName = data['blockName'];
var mainFile = data['mainFile'];
dataNode.makeDir(mainFile);
dataNode.addToReport(mainFile, blockName);
stream.pipe(fs.createWriteStream(mainFile + '/' + blockName));
});
});
How can I properly disconnect the connections in function readFileFromS3. I have noticed using dataNodeSocket.disconnect() at the end does not work as I cannot verify the data was received on the 2nd server. But if I comment it out, I can see the data being streamed to the second server.
My objective is to close the connections in Client Server side
It appears that the main problem with closing the socket is that you weren't waiting for the stream to be done writing before trying to close the socket. So, because the writing is all asynchronous and finishes sometime later, you were trying to close the socket before the data had been written.
Also because you were putting asynchronous operations inside a for loop, you were also running all your operations in parallel which may not be exactly what you want as it makes error handling more difficult and server load more difficult.
Here's the code I would suggest that does the following:
Create a function streamFileFromS3() that streams a single file and returns a promise that will notify when it's done.
Use await in a for loop with that streamFileFromS3() to serialize the operations. You don't have to serialize them, but then you would have to change your error handling to figure out what to do if one errors while the others are already running and you'd have to be more careful about concurrency issues.
Use try/catch to catch any errors from streamFileFromS3().
Add error handling on the stream.
Change all occurrences of data['propertyName'] to data.propertyName. The only time you need to use brackets is if the property name contains a character that is not allowed in a Javascript identifier or if the property name is in a variable. Otherwise, the dot notation is preferred.
Add socket.io connection error handling logic for both socket.io connections.
Set returned status to 500 when there's an error processing the request
So, here's the code for that:
const ss = require("socket.io-stream");
router.get('/writeData', function(req, res) {
const io = req.app.get('socketio');
function streamFileFromS3(ip, data) {
return new Promise((resolve, reject) => {
const dataNodeSocket = io.connect(`http://${ip}:5000`);
dataNodeSocket.on('connect_error', reject);
dataNodeSocket.on('connect_timeout', () {
reject(new Error(`timeout connecting to http://${ip}:5000`));
});
dataNodeSocket.on('connection', () => {
// dataNodeSocket connected now
const stream = ss.createStream().on('error', reject);
paramsWithRange.Range = `bytes=${data.byteStart}-${data.byteStop}`;
const filename = data.key.split('/')[1];
const fileData = {
'mainFile': paramsWithRange.Key.split('/')[1],
'blockName': filename
};
ss(dataNodeSocket).emit('sendData', stream, fileData);
// get S3 data and pipe it to the socket.io stream
s3.getObject(paramsWithRange).createReadStream().on('error', reject).pipe(stream);
stream.on('close', () => {
dataNodeSocket.disconnect();
resolve();
});
});
});
}
function connectError(msg) {
res.status(500).send(`Error connecting to ${NAMENODE_ADDRESS}`);
}
const nameNodeSocket = io.connect(NAMENODE_ADDRESS, { reconnect: true });
nameNodeSocket.on('connect_error', connectError).on('connect_timeout', connectError);
nameNodeSocket.on('nameNodeData', async (nameNodeData) => {
try {
for (let item of nameNodeData.blockToDataNodes) {
await streamFileFromS3(item.ipValue, item);
}
res.json("Finished Writing to DN's");
} catch(e) {
res.status(500).json(e);
}
});
});
Other notes:
I don't know what paramsWithRange is as it is not declared here and when you were doing everything in parallel, it was getting shared among all the connections which is asking for a concurrency issue. In my serialized implementation, it's probably safe to share it, but the way it is now bothers me as it's a concurrency issue waiting to happen.
I have a server
var connect = require('connect');
var serveStatic = require('serve-static');
var HTMLServer = function(path){
this.path = path;
this.server = connect().use(serveStatic(this.path));
this.startServer = function(callback){
this.server = this.server.listen(8080, callback);
};
this.stopServer = function(callback){
this.server.close(callback);
}
}
And I use it as follows:
var thisServer = new HTMLServer(__dirname);
thisServer.startServer(function(){
console.log('Server running on 8080...');
setTimeout(function(){
thisServer.stopServer(function(){
console.log('Server closed');
});
}, 3000);
});
As expected, server starts and after 3000 milliseconds it stops.
But, if within these 3000 milliseconds I make a request to this server, the stopServer is called, however the server is not closed.
I'm sure this line this.server.close(callback); gets executed, but doesn't close the server as I expect.
How can I fix that?
Is a request to the server changing the server instance in a way that needs a special handling?
Later edit:
I would like to add some precision now that I left the code running. It seems the server does get closed, however not instantly, but after an amount of time that I don't understand, no longer than 5 minutes.
So the close operation seems to be delayed. Can I make it instant somehow?
While #jfriend00 was correct that node.js keeps running until all exiting sockets are finished, the process.exit solution was a bit too radical for my use case and I needed a cleaner solution to close the server gracefully.
Looking into getConnections only added more confusion since it didn't function as expected. (for example it returned 2 connections even if I didn't make any request).
I also looked into server.listening but it returned false even if the server accepted more requests. Perhaps accepts connection from a client that made requests before closing the server.
Anyway, the solution for me was to use the http-shutdown lib which essentially adds the following .shutdown method to your server object.
function addShutdown(server) {
var connections = {};
var isShuttingDown = false;
var connectionCounter = 0;
function destroy(socket, force) {
if (force || (socket._isIdle && isShuttingDown)) {
socket.destroy();
delete connections[socket._connectionId];
}
};
function onConnection(socket) {
var id = connectionCounter++;
socket._isIdle = true;
socket._connectionId = id;
connections[id] = socket;
socket.on('close', function() {
delete connections[id];
});
};
server.on('request', function(req, res) {
req.socket._isIdle = false;
res.on('finish', function() {
req.socket._isIdle = true;
destroy(req.socket);
});
});
server.on('connection', onConnection);
server.on('secureConnection', onConnection);
function shutdown(force, cb) {
isShuttingDown = true;
server.close(function(err) {
if (cb) {
process.nextTick(function() { cb(err) });
}
});
Object.keys(connections).forEach(function(key) {
destroy(connections[key], force);
});
};
server.shutdown = function(cb) {
shutdown(false, cb);
};
server.forceShutdown = function(cb) {
shutdown(true, cb);
};
return server;
};
With this function, I can update my server as follows, and now stopServer works as expected:
var HTMLServer = function(path){
this.path = path;
this.server = connect().use(serveStatic(this.path));
this.startServer = function(callback){
this.server = addShutdown(this.server.listen(8080, callback));
};
this.stopServer = function(callback){
console.log("I was called");
this.server.shutdown(callback);
}
}
I am stuck here due to a simple event related issue. Here is the issue:
I have created a cluster using cluster.js and forked server.js from
cluster.js.
I have put a timer from cluster.js and after every 1 min I am
triggering an event 'testTimer'. I have used a event file to do
it.
I am trying to capture this event 'testTimer' from the child
process using the same file I have imported into server.js and doing
a .on('testTimer', callback)
However, the events are not captured in any of the processes. I have tried making the event global and assign the event globally to a symbol but was unable to get it work/capture event as well.
Here is the codes:
cluster.js (child process creator)
...require > events.js...
... create cluster logic...
setInterval(function () {
evt.emit('testTimer', {tester: 'test'});
evt.tester();
}, 1000);
server.js (child process)
...require > events.js...
evt.on('testTimer', function (data) {
console.log('Starting Sync ', data);
});
events.js (common file for events)
var util = require("util");
var EventEmitter = require("events").EventEmitter;
function test () {
EventEmitter.call(this);
}
test.prototype.tester = function (){
this.emit('testTimer', {missed: 'this'})
}
util.inherits(test, EventEmitter);
module.exports = test;
EventEmitter instances can't reach beyond the bounds of a process. If you want to communicate between parent and children, use worker.send():
// cluster.js
setInterval(function () {
for (const id in cluster.workers) {
cluster.workers[id].send({ type : 'testTimer', data : { tester : 'test' }});
}
}, 1000);
// server.js
process.on('message', function(message) {
if (message.type === 'testTimer') {
console.log('Starting Sync ', message.data);
}
})
I have a requirement that i want to run my task after booting a http server.
But i cannot check whether the server is booted.
Instead,i used a setTimeout function to assume the server is booted with 2 seconds interval.
Could anybody,who knows a better solution,help me?
Great thanks!
My code is as follows:
// main.js
function exec(command) {
var defer = q.defer();
var child = childProcess.exec(command, function (err, stdout, stderr) {
if (err) {
return defer.reject(err);
}
});
child.stdout.pipe(process.stdout);
// TODO: find another graceful way to check child process ends
setTimeout(function () {
defer.resolve();
}, 2000);
return defer.promise;
}
exec('"' + process.execPath + '" app.js')
.then(function () {
// my task here;
console.log('my task')
});
// app.js
var http = require('http'),
server;
server = http.createServer(function (req, res) {
// serve html;
res.end('hello world');
});
server.listen(9999);
// run
$> node main.js
A good solution would be to try to connect to the webserver until it responds. Since you're using Q, you can take a look at the code from this answer, which enables you to retry an operation until it succeeds.
function retry(operation, delay) {
return operation().catch(function(reason) {
// Note: I replaced delay * 2 by delay, it will be better in your case
return Q.delay(delay).then(retry.bind(null, operation, delay));
});
}
Then you could do something like
startMyWebServer()
.then (function () {
return retry(connectToWebServer, 500).timeout(10000);
})
.then(function () {
console.log("Webserver ready")
})
.catch(function (err) {
console.log("Unable to contact webserver")
});
The call to retry will retry to connect to the webserver if the previous connection failed, with a delay of 500ms. If after 10s it didn't succeed, the promise will be rejected.
The connectToWebServer function should be a function that tries to connect to your webserver and returns a promise, i.e. something like
function connectToWebServer() {
var d = q.defer();
request('http://your-ip:your-port', function (err, res) {
if (err || res.statusCode != 200) {
return d.reject("Error : "+(err || "status code "+res.statusCode));
}
d.resolve();
}
return d.promise;
}
I have a node.js server, which will print out some message in the console and then start the server.
I am creating a automation test by using tap to check the message in the console.log and check if server is started, i.e. there is a PID generated.
I tried 2 different methods -child_process.exec and child_process.spawn
1. Use child_process.exec with a call back function.
This does not work as the server is long running and will not even
go to the call back, so I cannot even check for any stdout.
Then I use child_process.exec without call back, this solves the
first issue where I can now get the message back from stdout.
The second issue is that the test will hang since the server is long running and will not terminate by itself.
code snippet:
var exec = require('child_process').exec;
tap.test('test server start', function(t) {
childProcess= exec('node',['server']);
console.log('[exec] childProcess.pid: ', childProcess.pid);
t.notEqual(childProcess.pid, undefined);
childProcess.stdout.on('data', function (data) {
console.log('[exec] stdout: ', data.toString());
t.match(data.toString(), "Example app listening at http://:::3000");
t.end();
childProcess.kill('SIGTERM');
});
childProcess.stderr.on('data', function (data) {
console.log('[exec] stderr: ', data.toString());
});
childProcess.on('close', function (code) {
if (code!=null)
console.log('child process exited with code '+ code);
});
});
use child_process.spawn -code snippet
var spawn = require('child_process').spawn;
tap.test('test server start', function(t) {
childProcess= spawn('node',['server']);
console.log('[spawn] childProcess.pid: ', childProcess.pid);
t.notEqual(childProcess.pid, undefined);
childProcess.stdout.on('data', function (data) {
console.log('[spawn] stdout: ', data.toString());
t.match(data.toString(), "Example app listening at http://:::3000");
t.end();
childProcess.kill('SIGTERM');
});
childProcess.stderr.on('data', function (data) {
console.log('[spawn] stderr: ', data.toString());
});
childProcess.on('close', function (code) {
if (code!=null)
console.log('child process exited with code '+ code);
});
});
In both 1 & 2, the test will hang since the server is long running,
I need to use child_process.kill() to terminate the test
Is there a better method to achieve this?
Thanks in advance for any improvements.
Well, I think that you can check if the server is alive in a different way (without spawning a new process).
For example, you can start your server waiting for connections:
const net = require('net');
var connections = {};
var server = net.createServer(function(conn) { });
server.listen(3333);
server.on('connection',function(conn) {
var key = conn.remoteAddress + ':' + conn.remotePort;
connections[key] = conn;
conn.on('close',function() {
delete connections[key];
});
});
Then, connect some clients (or just one) to the server:
var connected = 0;
for (var i = 0;i < 10;i++) {
var client = net.connect(3333);
client.on('connect',function() {
connected++;
console.log(connected);
});
}
So, if you are be able to connect to the server, then your server is alive.
And finally, when you want to close the server, just create a new function like this one:
var destroy = function ()
{
server.close(function() {
console.log('ok');
});
for (var key in connections) {
connections[key].destroy();
}
}
Call it for example after 10 successful connections to the server. Inside the for loop:
for (var i = 0; i < 10; i++) {
var client = net.connect(3333);
client.on('connect',function() {
connected++;
if (connected === 10) {
destroy();
}
});
}
This is a very basic example, but I think that it's enough to understand another way to do what you want to do.
I.