I have an app (master) which distributes work to n amount of workers. Inside the worker js I have hooked the console output as follows:
console._log = console.log;
console._error = console.error;
console.log = (...args) => {
process.send({
cmd:'log',
channel:'out',
data: args.join(' ')
});
};
console.error = (...args) => {
process.send({
cmd:'log',
channel:'err',
data: args.join(' ')
});
};
The master now is responsible of logging all incoming messages into a file besides std. Accomplished with the following code & module:
const intercept = require('intercept-stdout');
const stripAnsi = require('strip-ansi');
const unhook_intercept = intercept(function (str) {
// stdout
fs.appendFileSync(lib.logOutFile(), stripAnsi(str));
}, function (str) {
// stderr
fs.appendFileSync(lib.logErrFile(), stripAnsi(str));
});
I have noticed in the logs that a worker after 1,5 day stopped sending messages. In the master I have worker exit detection:
cluster.on('exit', (worker, code, signal) => {
if (signal) {
console.log(`${lib.dateTimeStamp()} - ${chalk.magenta('[')}${chalk.cyan(worker.process.pid)}${chalk.magenta(']')}\tWorker: ${chalk.yellow(`was killed by signal: ${signal}`)}`);
} else if (code !== 0) {
console.error(`${lib.dateTimeStamp()} - ${chalk.magenta('[')}${chalk.cyan(worker.process.pid)}${chalk.magenta(']')}\tWorker: ${chalk.red(`exited with error code: ${code}`)}`);
let newWorker = cluster.fork();
let data = work[worker.process.pid];
let d = new Date();
status[worker.process.pid].status = 'dead';
status[newWorker.process.pid] = {
started: `${d.toLocaleDateString()} ${d.toLocaleTimeString()}`,
status: 'alive'
};
delete work[worker.process.pid];
work[newWorker.process.pid] = data;
newWorker.send({
options: cfg.options,
websites: work[newWorker.process.pid]
});
} else {
delete work[worker.process.pid];
delete status[worker.process.pid]
console.log(`${lib.dateTimeStamp()} - ${chalk.magenta('[')}${chalk.cyan(worker.process.pid)}${chalk.magenta(']')}\tWorker: ${chalk.green('exited successfully')}`);
}
});
Exit was not triggered as I have seen in the logs. At the moment I have only assumptions and I'd like your opinions. Could it be because:
The synchronous file logging.
A worker disconnected on its own.
A worker exited and the exit event was missed.
Your opinion...
Related
I have a socket connection using zmq.js client:
// routerSocket.ts
const zmqRouter = zmq.socket("router");
zmqRouter.bind(`tcp://*:${PORT}`);
zmqRouter.on("message", async (...frames) => {
try {
const { measurementData, measurementHeader } =
await decodeL2Measurement(frames[frames.length - 1]);
addHeaderInfo(measurementHeader);
// Add cell id to the list
process.send(
{ measurementData, measurementHeader, headerInfoArrays },
(e: any) => {
return;
},
);
} catch (e: any) {
return;
}
});
I run this socket connection within a forked process in index.ts:
// index.ts
const zmqProcess = fork("./src/routerSocket");
zmqProcess.on("message", async (data: ZmqMessage) => {
if (data !== undefined) {
const { measurementData, measurementHeader, headerInfoArrays } = data;
headerInfo = headerInfoArrays;
emitHeaderInfo(headerInfoArrays);
// Emit the message to subscribers of the rnti
const a = performance.now();
io.emit(
measurementHeader.nrCellId,
JSON.stringify({ measurementData, measurementHeader }),
);
// Emit the message to the all channel
io.emit("all", JSON.stringify({ measurementData, measurementHeader }));
const b = performance.now();
console.log("time to emit: ", a - b);
}
});
There is data coming in rapidly, about one message per ms, to the zmqRouter object, which it then processes and sends onto the main process where I use socket.io to distribute the data to clients. But as soon as the stream begins, node can't do anything else. Even a setInterval log stops working when the stream begins.
Thank you for your help!
It is possible to kill a cluster worker that is running an infinite loop? I've tried, but unable to kill the worker. I guess the kill command cannot get onto the worker's js event loop. Any ideas on how else I can do this? When the master receives a "start" message, I want it to fork a worker. When the master receives a "stop" message, I want it to kill the worker.
const cluster = require('cluster');
const numCPUs = require('os').cpus().length;
const process = require('process');
const { nrpin, nrpout } = require("./helpers/PubSub");
const chalk = require('chalk');
//https://leanpub.com/thenodejsclustermodule/read
//https://gist.github.com/jpoehls/2232358
const arrWorkers = [];
if (cluster.isMaster) {
masterProcess();
} else {
childProcess();
}
function masterProcess() {
console.log(chalk.blueBright(`Master ${process.pid} is running`));
nrpin.on("start", async (bot) => {
console.log("Start:", bot._id);
if (arrWorkers.length == numCPUs){
console.log(chalk.yellowBright("No CPUs available to create worker!"));
}
const worker = cluster.fork();
arrWorkers.push({
workerId: worker.id,
botId: bot._id
})
})
nrpin.on("stop", async (bot) => {
console.log("Stop:", bot._id);
const worker = arrWorkers.find(x => x.botId == bot._id);
if (worker){
console.log("killing worker:", worker.workerId);
cluster.workers[worker.workerId].kill();
}
})
// Be notified when workers die
cluster.on('exit', function(worker, code, signal) {
if (worker.isDead()) {
console.info(`${chalk.redBright('worker dead pid')}: ${worker.process.pid}`);
}
});
}
function childProcess() {
console.log(chalk.green(`Worker ${process.pid} started...`));
while(true){
console.log(Date.now());
}
}
Never mind, I solved this using process.kill
let process_id = cluster.workers[worker.workerId].process.pid;
process.kill(process_id);
I'm working on child process using fork. but got totally confused on few things
• will it (process)pass app object instance eg:- let app = express(); using IPC
I m trying to explain my senario, first I have server.js where I initialize (starting point) server and other file is my task.js from where I am doing heavy task like reading a big file data and sending data back to other server. For send I had require authorization from that server whose logic is present in main.js and if any error occur I'm send email with few detail to client. Below provide code for email and authorization in main.js
Let task = require('./task.js')
app.sendEmail = function (message, emailinfo, attachment){
// my email logic
}
app.auth= function(host,port)
// Authorization logic
}
New task(app).run()
In task.js (sample code)
Class Task {
constructor(app){
this.app =app
}
run(){
fs.readfile('myfile',function(err,data){
if(err){ let msg =err;
let clientinf; clientinf.to = "client email";
clientinf.cc = " other user in CC";
this.app.sendEmail(msg, clientinf, attach);
}else{
let host='other server url';
let port='port';
this.app.auth(host,port);
}
})
}
}
I want to run task.js in one more thread . note cluster and worker(because I m using node 10.19 so not confident that worker works properly) I don't want to use . It is possible to use folk or spawn to share data between each other. If not how I can achieve my requirement using thread?
Here are two solutions. The first is using the Worker class from the worker_threads module but since you don't want to update the node version the second solution is using fork function from child_process module. They do pretty much the same thing to be honest I can't tell which is better but the worker_threads solution is more recent.
Solution 1:
const { Worker } = require('worker_threads')
const task_script = path.join(__dirname, "./task.js")
const obj = {data:"data"}
const worker = new Worker(task_script, {
workerData: JSON.stringify(obj)
})
worker.on("error", (err) => console.log(err))
worker.on("exit", () => console.log("exit"))
worker.on("message", (data) => {
console.log(data)
res.send(data)
})
and you have to change the task.js code slightly.Here it is
const { parentPort, workerData, isMainThread } = require('worker_threads')
class Task {
constructor(app){
this.app = app
}
run(){
if (!isMainThread) {
console.log("workerData: ", workerData) //you have worker data here
fs.readfile('myfile',function(err,data){
if(err){ let msg = err;
let clientinf; clientinf.to = "client email";
clientinf.cc = " other user in CC";
this.app.sendEmail(msg, clientinf, attach);
parentPort.postMessage(msg) //use can send message to parent like this
} else {
let host='other server url';
let port='port';
this.app.auth(host,port);
}
})
}
}
}
And here is the second solution
const { fork } = require('child_process');
const forked = fork('task.js');
forked.on('message', (msg) => {
console.log('Message from child', msg);
});
forked.send({ hello: 'world' });
and the taks.js way of sending and recieving data with this method
class Task {
constructor(app){
this.app = app
}
run(){
//receive
process.on('message', (msg) => {
console.log('Message from parent:', msg);
});
fs.readfile('myfile',function(err,data){
if(err){ let msg = err;
let clientinf; clientinf.to = "client email";
clientinf.cc = " other user in CC";
this.app.sendEmail(msg, clientinf, attach);
process.send(msg); //send method
} else {
let host='other server url';
let port='port';
this.app.auth(host,port);
}
})
}
}
I am writing one node js service which receives messages using rabbitmq. But I am facing one issue when I am trying to send concurrent requests to my node js service.
Here is amqp subscriber I have written,
const amqp = require('amqplib/callback_api')
let AmqpConnection = {
// some other methods to make connection
// ....
//....
subscribe: function(){
this.withChannel((channel) => {
let defaultQueueName = "my_queue";
channel.assertQueue(defaultQueueName, { durable: true }, function(err, _ok) {
if (err) throw err;
channel.consume(defaultQueueName, AmqpConnection.processMessage);
Logger.info("Waiting for requests..");
});
})
},
processMessage: function(payload){
debugger
try {
Logger.info("received"+(payload.content.toString()))
}
catch(error){
Logger.error("ERROR: "+ error.message)
//Channel.ack(payload)
}
}
}
And now I am trying to publish messages to this using publisher,
const amqp = require('amqplib/callback_api')
let Publisher = {
// some other methods to make connection
// ....
// ....
sendMessage: function(message){
this.withChannel((channel) => {
let exchangeName = 'exchange';
let exchangeType = 'fanout';
let defaultQueueName = 'my_queue';
channel.assertExchange(exchangeName, exchangeType)
channel.publish(exchangeName, defaultQueueName, new Buffer(message));
})
}
}
let invalidMsg = JSON.stringify({ "content": ""})
let correctMsg = JSON.stringify({ "content": "Test message"})
setTimeout(function () {
for(let i=0; i<2; i++){
Publisher.sendMessage(correctMsg)
Publisher.sendMessage(invalidMsg)
}
}, 3000)
But when I execute both publisher and subscriber, I get following output on subscriber side
2017-02-18T11:27:55.368Z - info: received{"content":""}
2017-02-18T11:27:55.378Z - info: received{"content":""}
2017-02-18T11:27:55.379Z - info: received{"content":""}
2017-02-18T11:27:55.380Z - info: received{"content":""}
It seems like concurrent requests are overriding message received. Can someone help here?
So I have setup a simple nodejs cluster game, I am new to nodejs. basically players connect to to my worker using socket.io then they get created to a Player Object then added to my PlayerManager.LIST array. Now this causes me some issues as the PlayerManager.LIST is on each of workers and are not sync'd.
So my question is, is there a better way of doing this so that if I connect to worker 2 I see same player list as worker 1's.
Structure at the moment:
app.js
-> worker
->-> PlayerManager (Contains List)
->->-> Player
Git Repo: https://github.com/mrhid6/game_app_v2
NodeJS Clusters are based on Nodejs Child Processes. In child processes you can send data between parent (Master in cluster) and child (worker in cluster) via messages over IPC channel. You can do the same with clusters using message events
var cluster = require('cluster');
var _ = require('lodash');
var http = require('http');
var workers = [];
var workerCount = 4;
if (cluster.isMaster) {
for (var i = 0; i < workerCount; i++) {
var worker = cluster.fork();
worker.on('message', function(msg) {
if (msg.task === 'sync') {
syncPlayerList(msg.data);
}
});
}
workers.push[worker];
} else {
var worker = new Worker();
process.on('message', function(msg) {
if (msg.task === 'sync') {
worker.playerList = msg.data;
}
});
}
function syncPlayerList (playerList) {
_.forEach(workers, function (worker) {
worker.send({
task: 'sync',
data: playerList
});
});
};
// worker class
function Worker() {
this.playerList = [];
}
Worker.prototype.sendSyncEvent = function () {
process.send({
task: 'sync',
data: this.playerList
})
};