In my below code error is not getting catched by Parent.js which is thrown by processChildOne.js
// Parent.js
var cp = require('child_process');
var childOne = cp.fork('./processChildOne.js');
var childTwo = cp.fork('./processChildTwo.js');
childOne.on('message', function(m) {
// Receive results from child process
console.log('received1: ' + m);
});
// Send child process some work
childOne.send('First Fun');
childTwo.on('message', function(m) {
// Receive results from child process
console.log('received2: ' + m);
});
// Send child process some work
childTwo.send('Second Fun');
// processChildOne.js
process.on('message', function(m) {
var conn = mongoose.createConnection('mongodb://localhost:27017/DB');
conn.on('error', console.error.bind(console, 'connection error:'));
// Pass results back to parent process
process.send("Fun1 complete");
});
If processChildOne.js fails, how to throw error to parent so that processChildOne.js and processChildTwo.js both should be killed. How can we keep track of how many child processes have executed and how many still are in pending.
Thanks in advance
I think whats going on, your child process is not really throwing an error, its writing to console.error, so there's no 'error' to catch in the parent process.
You may want to throw an error explicitly in the child, or an error will get thrown anyway by whatever library.. With this, I got the same problem you mention..
node.js
var cp = require('child_process').fork('./p1.js');
cp.on('message', function(){
console.log('ya', arguments);
})
p1.js
console.error('bad stuff man')
But this at least threw the error as expected
p1.js
throw "bad stuff man";
This worked for catching the error in the client and sending to parent process.
node.js
var cp = require('child_process').fork('./p1.js');
cp.on('message', function(){
console.log('error from client', arguments[0]);
})
p1.js
try{
throw "bad stuff man"
} catch(e){
process.send(e);
}
or for catching ALL errors in the client process and sending them to parent..
p1.js
process.on('uncaughtException', function(e){
process.send(e);
})
throw "bad stuff man";
For spawning multiple processes, and keeping track of the number, you should be able to do this..
node.js
var numprocesses = 5, running = 0;
for(var i = numprocesses; i--;){
var cp = require('child_process').fork('./p1.js');
cp.on('message', function(pid){
console.log('error from client', pid, arguments[0]);
})
cp.on('exit', function(){
console.log('done');
running--;
console.log('number running', running, ', remaining', numprocesses-running);
})
running++;
}
p1.js
process.on('uncaughtException', function(e){
process.send(process.pid + ': ' + e);
})
// simulate this to be a long running process of random length
setTimeout(function(){}, Math.floor(Math.random()*10000));
throw "bad stuff man";
Related
I have a process that uses RabbitMQ and NodeJS to do image processing. Due to the intensive task, I think I have the same issue as the link here https://github.com/squaremo/amqp.node/issues/261
I am trying to figure out how to implement the last comment on that issue.
"Yep. NodeJS is single-threaded, and if you use that thread to do something for a long time, nothing else will happen. As #michaelklishin suggests, the known solution for this general problem is using a child process, or the cluster module."
EDIT:
I Updated the code below with a sample of how I think I can do this with the amqp-connection-manager module. Right now I use a global variable to hold the actual message to be able to ack. I am guessing there is a better way to do this.
//Used to be an example for how to keep the connection thread and the working thread separate
//in order to fix the issue of missing heartbeat intervals due to processing on the same thread
const cluster = require('cluster');
var amqp = require('amqp-connection-manager');
var config = require('./config.json');
var working_queue = "Test_Queue";
//DONT REALLY DO THIS
var rabbit_msg_data;
//******* CLUSTER SETUP *******
// This will spawn off the number of worker for this process.
if (cluster.isMaster) {
console.log("Master "+process.pid+" is running");
worker = cluster.fork();
cluster.on('exit', (worker, code, signal) => {
if(signal)
{
console.log("worker was killed by signal: "+signal);
console.log(worker);
}
else if (code !== 0)
{
console.log("worker exited with error code: "+code);
console.log(worker);
}
else
{
console.log("Worker "+worker.process.pid+" exited successfully");
console.log(worker);
//Not sure if this works this way or if I need to put this worker into variables
}
});
//testing sending a message back and forth
// setTimeout(function() {
// worker.send("I got a request!");
// }, 1000);
//******** RABBIT MQ CONNECTION **********
// Create a connection manager to rabbitmq
var connection = amqp.connect(config.rabbit_connections_arr, {json: true, heartbeatIntervalInSeconds: 2});
connection.on('connect', function() {
console.log('Connected to rabbitmq');
});
connection.on('disconnect', function(params) {
console.log('Disconnected from rabbitmq:', params.err.stack);
});
// Set up a channel listening for messages in the queue.
var channelWrapper_listening = connection.createChannel({
setup: function(channel) {
// `channel` here is a regular amqplib `ConfirmChannel`.
return Promise.all([
channel.assertQueue(working_queue, {durable: true}),
channel.prefetch(1),
channel.consume(working_queue, function(data){
rabbit_msg_data = data;
worker.send(data.content.toString());
}, requeue = false)
]);
}
});
worker.on('message', function(msg){
// console.log("Worker to Master (ack): ", msg.content.toString());
console.log("Worker to Master (ack): ", msg);
//console.log(msg.content.toString());
channelWrapper_listening.ack(rabbit_msg_data);
});
}
else //All worker processes (MAIN LOGIC)
{
console.log("Worker "+process.pid+" started");
process.on('message',function(msg){
console.log("Master to Worker (working): ", msg);
//send msg back when done working on it.
setTimeout(function() {
process.send(msg);
}, 5000);
});
}
I fork a child process with worker module. I wanted to return the result of a map from the child process after it's done some work to the parent process but it doesn't work. The size of the map is pretty large and I am not sure if that's the reason. How do I send a map to the parent process from child process?
function doWork() {
var child = cp.fork(__dirname + '/work');
child.on('message', function(m) {
console.log('completed: ' + m.results);
});
child.send({
msg: 'do work',
name: self.myname
});
}
worker.js
var results = {};
process.on ("message", function (m){
// work populates results map
work(m.name);
process.send({
msg : 'DONE',
// somehow this doesn't work
results: JSON.stringify(results)
});
});
If I remove result then it works. I could get the msg in the parent process.
var results = {};
process.on ("message", function (m){
// work populates results map
work(m.name);
process.send({
msg : 'DONE',
});
});
I have the following piece of code with the "npm start" argument starting a node server instance :
const childProcess = require("child_process");
// running server before tests
before(function(done) {
childProcess.exec(["npm start"], function(err, out, code) {
if (err instanceof Error)
throw err;
process.stderr.write(err);
process.stdout.write(out);
process.exit();
});
setTimeout(done, 5000);
});
//run tests
require("./customer-individual.js");
require("./customer-organization.js");
After tests run the node server instance is still running somewhere as a background process . How can i kill it ?
You can use the following:
const child = childProcess.exec(["npm start"], function(err, out, code) {
// ...
});
child.kill(); // same as child.kill('SIGTERM');
console.log(child.killed); // will log true
Or any other signal, please refer to the docs: https://nodejs.org/api/child_process.html#child_process_child_kill_signal
I have a node.js server, which will print out some message in the console and then start the server.
I am creating a automation test by using tap to check the message in the console.log and check if server is started, i.e. there is a PID generated.
I tried 2 different methods -child_process.exec and child_process.spawn
1. Use child_process.exec with a call back function.
This does not work as the server is long running and will not even
go to the call back, so I cannot even check for any stdout.
Then I use child_process.exec without call back, this solves the
first issue where I can now get the message back from stdout.
The second issue is that the test will hang since the server is long running and will not terminate by itself.
code snippet:
var exec = require('child_process').exec;
tap.test('test server start', function(t) {
childProcess= exec('node',['server']);
console.log('[exec] childProcess.pid: ', childProcess.pid);
t.notEqual(childProcess.pid, undefined);
childProcess.stdout.on('data', function (data) {
console.log('[exec] stdout: ', data.toString());
t.match(data.toString(), "Example app listening at http://:::3000");
t.end();
childProcess.kill('SIGTERM');
});
childProcess.stderr.on('data', function (data) {
console.log('[exec] stderr: ', data.toString());
});
childProcess.on('close', function (code) {
if (code!=null)
console.log('child process exited with code '+ code);
});
});
use child_process.spawn -code snippet
var spawn = require('child_process').spawn;
tap.test('test server start', function(t) {
childProcess= spawn('node',['server']);
console.log('[spawn] childProcess.pid: ', childProcess.pid);
t.notEqual(childProcess.pid, undefined);
childProcess.stdout.on('data', function (data) {
console.log('[spawn] stdout: ', data.toString());
t.match(data.toString(), "Example app listening at http://:::3000");
t.end();
childProcess.kill('SIGTERM');
});
childProcess.stderr.on('data', function (data) {
console.log('[spawn] stderr: ', data.toString());
});
childProcess.on('close', function (code) {
if (code!=null)
console.log('child process exited with code '+ code);
});
});
In both 1 & 2, the test will hang since the server is long running,
I need to use child_process.kill() to terminate the test
Is there a better method to achieve this?
Thanks in advance for any improvements.
Well, I think that you can check if the server is alive in a different way (without spawning a new process).
For example, you can start your server waiting for connections:
const net = require('net');
var connections = {};
var server = net.createServer(function(conn) { });
server.listen(3333);
server.on('connection',function(conn) {
var key = conn.remoteAddress + ':' + conn.remotePort;
connections[key] = conn;
conn.on('close',function() {
delete connections[key];
});
});
Then, connect some clients (or just one) to the server:
var connected = 0;
for (var i = 0;i < 10;i++) {
var client = net.connect(3333);
client.on('connect',function() {
connected++;
console.log(connected);
});
}
So, if you are be able to connect to the server, then your server is alive.
And finally, when you want to close the server, just create a new function like this one:
var destroy = function ()
{
server.close(function() {
console.log('ok');
});
for (var key in connections) {
connections[key].destroy();
}
}
Call it for example after 10 successful connections to the server. Inside the for loop:
for (var i = 0; i < 10; i++) {
var client = net.connect(3333);
client.on('connect',function() {
connected++;
if (connected === 10) {
destroy();
}
});
}
This is a very basic example, but I think that it's enough to understand another way to do what you want to do.
I.
I try to spawn a child process performing cpu intensive calculations through a job queue with Kue. My code at the moment looks like this:
consumer.js
var kue = require('kue');
var util = require('util');
var spawn = require('child_process').spawn;
var jobs = kue.createQueue();
jobs.process('calc', 2, function(job, done){
var work = spawn('Rscript', ['opti2.R', job.data.file]);
work.stderr.on('data', function (data) {
job.log('stderr: ' + data);
});
work.stdout.on('data', function (data) {
job.log('stdout: ' + data);
});
work.on('exit', function (code, signal) {
console.log('child process exited with code ' + code + ' with singal ' + signal);
if(code != 0){
done(****How to get the stderr of the child process as an error here***);
} else {
done(Error());
}
});
});
The code somewhat do what i would like it to do, but is there a better way to report the job as failed (to Kue) and get the stderr from the spawned process?
You can use job.log method to send data directly to Kue.
I would also recommend you to switch from .spawn to .exec, because it returns stdout and stderr as strings in its final callback along with a good error, which suits your needs well:
var exec = require('child_process').exec;
jobs.process('calc', 2, function(job, done){
exec('Rscript opti2.R ' + job.data.file, function (error, stdout, stderr) {
if (stdout.length > 0) job.log('stdout: ' + stdout);
if (stderr.length > 0) job.log('stderr: ' + stderr);
done(error);
});
});
Though solution should work with .spawn as well: simply replace each console.log call in your code with job.log.
Though, you may want to bufferize your stderr in order to send it to Kue in one chunk:
jobs.process('calc', 2, function(job, done){
var work = spawn('Rscript', ['opti2.R', job.data.file]);
var stderr = '';
work.stderr.on('data', function (data) {
stderr += data;
});
work.stdout.on('data', function (data) {
job.log(data); // sending arriving `stdout` chunks as normal log events
});
work.on('close', function (code, signal) {
console.log('child process exited with code ' + code + ' with singal ' + signal);
if(code != 0){
done(stderr); // sending all collected stderr as an explanation
} else {
done();
}
});
});
I would also recommend using close event instead of exit, because it waits for child's stdio streams.
For more information see Event: 'exit' docs:
This event is emitted after the child process ends.
Note that the child process stdio streams might still be open.
and Event: 'close' docs:
This event is emitted when the stdio streams of a child process have
all terminated.