It is possible to kill a cluster worker that is running an infinite loop? I've tried, but unable to kill the worker. I guess the kill command cannot get onto the worker's js event loop. Any ideas on how else I can do this? When the master receives a "start" message, I want it to fork a worker. When the master receives a "stop" message, I want it to kill the worker.
const cluster = require('cluster');
const numCPUs = require('os').cpus().length;
const process = require('process');
const { nrpin, nrpout } = require("./helpers/PubSub");
const chalk = require('chalk');
//https://leanpub.com/thenodejsclustermodule/read
//https://gist.github.com/jpoehls/2232358
const arrWorkers = [];
if (cluster.isMaster) {
masterProcess();
} else {
childProcess();
}
function masterProcess() {
console.log(chalk.blueBright(`Master ${process.pid} is running`));
nrpin.on("start", async (bot) => {
console.log("Start:", bot._id);
if (arrWorkers.length == numCPUs){
console.log(chalk.yellowBright("No CPUs available to create worker!"));
}
const worker = cluster.fork();
arrWorkers.push({
workerId: worker.id,
botId: bot._id
})
})
nrpin.on("stop", async (bot) => {
console.log("Stop:", bot._id);
const worker = arrWorkers.find(x => x.botId == bot._id);
if (worker){
console.log("killing worker:", worker.workerId);
cluster.workers[worker.workerId].kill();
}
})
// Be notified when workers die
cluster.on('exit', function(worker, code, signal) {
if (worker.isDead()) {
console.info(`${chalk.redBright('worker dead pid')}: ${worker.process.pid}`);
}
});
}
function childProcess() {
console.log(chalk.green(`Worker ${process.pid} started...`));
while(true){
console.log(Date.now());
}
}
Never mind, I solved this using process.kill
let process_id = cluster.workers[worker.workerId].process.pid;
process.kill(process_id);
Related
hello so am doing some web automation and I want to open run puppeteer multithreaded what I mean like open the same page 10s of times and what I understood of what I read the worker thread is the best solution I guess? but I didn't get how to use it properly and I will put a sample code of what I did
const { Worker, isMainThread } = require('worker_threads');
const puppeteer = require('puppeteer') ;
let scrapt = async()=>{
/* -------------------------------------------------------------------------- */
/* Launching puppeteer */
/* -------------------------------------------------------------------------- */
try{
const browser = await puppeteer.launch({headless: true }) ;
const page = await browser.newPage();
await page.setUserAgent(
`Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36`
);
let Browser_b = new Date()
await page.goto('https://www.supremenewyork.com/')
let browser_e = new Date()
console.log(browser_e - Browser_b)
}
catch(e){
console.log(e)
}
let ex = [1,2,3,4]
if (isMainThread) {
// This re-loads the current file inside a Worker instance.asdasd
new Worker(__filename);
} else {
for(let val of ex) {
scrapt();
}
}
this script opens 4 browsers but if I open more the pc lag ALOT since I think it's only using one thread not using them all?
Thank u in advance and sorry for my stupidity
ever tried using Cluster? it's a good way for multi_processing and easier to use than worker_threads in my opinion here is an example from HERE
const cluster = require('cluster');
const http = require('http');
const numCPUs = require('os').cpus().length;
if (cluster.isMaster) {
console.log(`Master ${process.pid} is running`);
// Fork workers.
for (let i = 0; i < numCPUs; i++) {
cluster.fork();
}
cluster.on('exit', (worker, code, signal) => {
console.log(`worker ${worker.process.pid} died`);
});
} else {
// Workers can share any TCP connection
// In this case it is an HTTP server
http.createServer((req, res) => {
res.writeHead(200);
res.end('hello world\n');
}).listen(8000);
console.log(`Worker ${process.pid} started`);
}
I'm working on child process using fork. but got totally confused on few things
• will it (process)pass app object instance eg:- let app = express(); using IPC
I m trying to explain my senario, first I have server.js where I initialize (starting point) server and other file is my task.js from where I am doing heavy task like reading a big file data and sending data back to other server. For send I had require authorization from that server whose logic is present in main.js and if any error occur I'm send email with few detail to client. Below provide code for email and authorization in main.js
Let task = require('./task.js')
app.sendEmail = function (message, emailinfo, attachment){
// my email logic
}
app.auth= function(host,port)
// Authorization logic
}
New task(app).run()
In task.js (sample code)
Class Task {
constructor(app){
this.app =app
}
run(){
fs.readfile('myfile',function(err,data){
if(err){ let msg =err;
let clientinf; clientinf.to = "client email";
clientinf.cc = " other user in CC";
this.app.sendEmail(msg, clientinf, attach);
}else{
let host='other server url';
let port='port';
this.app.auth(host,port);
}
})
}
}
I want to run task.js in one more thread . note cluster and worker(because I m using node 10.19 so not confident that worker works properly) I don't want to use . It is possible to use folk or spawn to share data between each other. If not how I can achieve my requirement using thread?
Here are two solutions. The first is using the Worker class from the worker_threads module but since you don't want to update the node version the second solution is using fork function from child_process module. They do pretty much the same thing to be honest I can't tell which is better but the worker_threads solution is more recent.
Solution 1:
const { Worker } = require('worker_threads')
const task_script = path.join(__dirname, "./task.js")
const obj = {data:"data"}
const worker = new Worker(task_script, {
workerData: JSON.stringify(obj)
})
worker.on("error", (err) => console.log(err))
worker.on("exit", () => console.log("exit"))
worker.on("message", (data) => {
console.log(data)
res.send(data)
})
and you have to change the task.js code slightly.Here it is
const { parentPort, workerData, isMainThread } = require('worker_threads')
class Task {
constructor(app){
this.app = app
}
run(){
if (!isMainThread) {
console.log("workerData: ", workerData) //you have worker data here
fs.readfile('myfile',function(err,data){
if(err){ let msg = err;
let clientinf; clientinf.to = "client email";
clientinf.cc = " other user in CC";
this.app.sendEmail(msg, clientinf, attach);
parentPort.postMessage(msg) //use can send message to parent like this
} else {
let host='other server url';
let port='port';
this.app.auth(host,port);
}
})
}
}
}
And here is the second solution
const { fork } = require('child_process');
const forked = fork('task.js');
forked.on('message', (msg) => {
console.log('Message from child', msg);
});
forked.send({ hello: 'world' });
and the taks.js way of sending and recieving data with this method
class Task {
constructor(app){
this.app = app
}
run(){
//receive
process.on('message', (msg) => {
console.log('Message from parent:', msg);
});
fs.readfile('myfile',function(err,data){
if(err){ let msg = err;
let clientinf; clientinf.to = "client email";
clientinf.cc = " other user in CC";
this.app.sendEmail(msg, clientinf, attach);
process.send(msg); //send method
} else {
let host='other server url';
let port='port';
this.app.auth(host,port);
}
})
}
}
I have a main process that spawns child processes. When the child process is killed, it is restarted, but when it is killed again, it will not restart.
test.js
const fork = require('child_process').fork;
const path = require('path');
const test = path.resolve('test2.js');
let test_child = fork(test);
test_child.on("close", () => {
console.log("child gone");
setTimeout(() => {
test_child = fork(test);
}, 2500)
});
test2.js
setInterval(() => {
console.log("test")
}, 250);
I want the main process (test.js), to continually start up (test2.js) if it ever crashes or stops for some reason.
const fork = require('child_process').fork;
const path = require('path');
const test = path.resolve('child.js');
function repeat() {
let test_child = fork(test);
test_child.on("close", () => {
console.log("child gone");
setTimeout(() => {
repeat();
}, 25)
});
}
repeat();
You can try the above code in your main.js
If you can check the status of your child process, you can add it to a setInterval function checking if it is running or not, and if not just call it again
I have an app (master) which distributes work to n amount of workers. Inside the worker js I have hooked the console output as follows:
console._log = console.log;
console._error = console.error;
console.log = (...args) => {
process.send({
cmd:'log',
channel:'out',
data: args.join(' ')
});
};
console.error = (...args) => {
process.send({
cmd:'log',
channel:'err',
data: args.join(' ')
});
};
The master now is responsible of logging all incoming messages into a file besides std. Accomplished with the following code & module:
const intercept = require('intercept-stdout');
const stripAnsi = require('strip-ansi');
const unhook_intercept = intercept(function (str) {
// stdout
fs.appendFileSync(lib.logOutFile(), stripAnsi(str));
}, function (str) {
// stderr
fs.appendFileSync(lib.logErrFile(), stripAnsi(str));
});
I have noticed in the logs that a worker after 1,5 day stopped sending messages. In the master I have worker exit detection:
cluster.on('exit', (worker, code, signal) => {
if (signal) {
console.log(`${lib.dateTimeStamp()} - ${chalk.magenta('[')}${chalk.cyan(worker.process.pid)}${chalk.magenta(']')}\tWorker: ${chalk.yellow(`was killed by signal: ${signal}`)}`);
} else if (code !== 0) {
console.error(`${lib.dateTimeStamp()} - ${chalk.magenta('[')}${chalk.cyan(worker.process.pid)}${chalk.magenta(']')}\tWorker: ${chalk.red(`exited with error code: ${code}`)}`);
let newWorker = cluster.fork();
let data = work[worker.process.pid];
let d = new Date();
status[worker.process.pid].status = 'dead';
status[newWorker.process.pid] = {
started: `${d.toLocaleDateString()} ${d.toLocaleTimeString()}`,
status: 'alive'
};
delete work[worker.process.pid];
work[newWorker.process.pid] = data;
newWorker.send({
options: cfg.options,
websites: work[newWorker.process.pid]
});
} else {
delete work[worker.process.pid];
delete status[worker.process.pid]
console.log(`${lib.dateTimeStamp()} - ${chalk.magenta('[')}${chalk.cyan(worker.process.pid)}${chalk.magenta(']')}\tWorker: ${chalk.green('exited successfully')}`);
}
});
Exit was not triggered as I have seen in the logs. At the moment I have only assumptions and I'd like your opinions. Could it be because:
The synchronous file logging.
A worker disconnected on its own.
A worker exited and the exit event was missed.
Your opinion...
So I have setup a simple nodejs cluster game, I am new to nodejs. basically players connect to to my worker using socket.io then they get created to a Player Object then added to my PlayerManager.LIST array. Now this causes me some issues as the PlayerManager.LIST is on each of workers and are not sync'd.
So my question is, is there a better way of doing this so that if I connect to worker 2 I see same player list as worker 1's.
Structure at the moment:
app.js
-> worker
->-> PlayerManager (Contains List)
->->-> Player
Git Repo: https://github.com/mrhid6/game_app_v2
NodeJS Clusters are based on Nodejs Child Processes. In child processes you can send data between parent (Master in cluster) and child (worker in cluster) via messages over IPC channel. You can do the same with clusters using message events
var cluster = require('cluster');
var _ = require('lodash');
var http = require('http');
var workers = [];
var workerCount = 4;
if (cluster.isMaster) {
for (var i = 0; i < workerCount; i++) {
var worker = cluster.fork();
worker.on('message', function(msg) {
if (msg.task === 'sync') {
syncPlayerList(msg.data);
}
});
}
workers.push[worker];
} else {
var worker = new Worker();
process.on('message', function(msg) {
if (msg.task === 'sync') {
worker.playerList = msg.data;
}
});
}
function syncPlayerList (playerList) {
_.forEach(workers, function (worker) {
worker.send({
task: 'sync',
data: playerList
});
});
};
// worker class
function Worker() {
this.playerList = [];
}
Worker.prototype.sendSyncEvent = function () {
process.send({
task: 'sync',
data: this.playerList
})
};