Resuming queued jobs after NodeJS worker restart on Heroku - node.js

So I have a fairly simple setup on Heroku. I'm using RabbitMQ for handling background jobs. My setup consists of a node script that runs daily using Heroku Scheduler addon. The scripts adds jobs to the queue, the worker in turn, consumes them and delegates them onto a separate module for handling.
The problem starts after I receive a SIGTERM event that Heroku initiates randomly from time to time, before restarting the instance.
For some reason, after the instance is restarted, the worker is never get back up again. Only when I restart it manually by doing heroku ps:scale worker=0 and heroku ps:scale worker=1 The worker continues to consume the pending jobs.
Here's my worker:
// worker.js
var throng = require('throng');
var jackrabbit = require('jackrabbit');
var logger = require('logfmt');
var syncService = require('./syncService');
var start = function () {
var queue = jackrabbit(process.env.RABBITMQ_BIGWIG_RX_URL || 'amqp://localhost');
logger.log({type: 'msg', msg: 'start', service: 'worker'});
queue
.default()
.on('drain', onDrain)
.queue({name: 'syncUsers'})
.consume(onMessage)
function onMessage(data, ack, nack) {
var promise;
switch (data.type) {
case 'updateUser':
promise = syncService.updateUser(data.target, data.source);
break;
case 'createUser':
promise = syncService.createUser(data.source);
break;
case 'deleteUser':
promise = syncService.deleteUser(data.target);
}
promise.then(ack, nack);
}
function onDrain() {
queue.close();
logger.log({type: 'info', msg: 'sync complete', service: 'worker'});
}
process.on('SIGTERM', shutdown);
function shutdown() {
logger.log({type: 'info', msg: 'shutting down'});
queue.close();
process.exit();
}
};
throng({
workers: 1,
lifetime: Infinity,
grace: 4000
}, start);

The close() method on the jackrabbit object takes a callback, you should avoid exiting the process until that is finished:
function shutdown() {
logger.log({type: 'info', msg: 'shutting down'});
queue.close(function (e) {
process.exit(e ? 1 : 0);
});
}

Related

Can we use electron.nativeImage APIs in a NodeJS worker thread?

Is it possible to use electron APIs from within a NodeJS worker thread spawned from the main Electron process? We have attempted to use Electron's nativeImage module from within a worker thread (Node worker, not web worker) yet nativeImage is always undefined. Our worker:
const { parentPort } = require("worker_threads");
const { nativeImage } = require("electron");
parentPort.on("message", message => {
if (nativeImage === undefined) {
parentPort.postMessage("nativeImage is undefined!");
}
let image = nativeImage.createEmpty(); // fails
});
The main thread which creates this worker receives the "nativeImage is undefined!" message.

worker thread won't respond after first message?

I'm making a server script and, to make it easier for both hosts and clients to do what they want, I made a customizable server script that runs using nw.js(with a visual interface). Said script was made using web workers since nw.js was having problems with support to worker threads.
Now that NW.js fixed their problems with worker threads, I've been trying to move all the things that were inside the web workers to worker threads, but there's a problem: When the main thread receives the answer from the second thread, the later stops responding to any subsequent message.
For example, running the following code with either NW.js or Node.js itself will return "pong" only once
const { Worker } = require('worker_threads');
const worker = new Worker('const { parentPort } = require("worker_threads");parentPort.once("message",message => parentPort.postMessage({ pong: message })); ', { eval: true });
worker.on('message', message => console.log(message));
worker.postMessage('ping');
worker.postMessage('ping');
How do I configure the worker so it will keep responding to whatever message it receives after the first one?
Because you use EventEmitter.once() method. According to the documentation this method does the next:
Adds a one-time listener function for the event named eventName. The
next time eventName is triggered, this listener is removed and then
invoked.
If you need your worker to process more than one event then use EventEmitter.on()
const worker = new Worker('const { parentPort } = require("worker_threads");' +
'parentPort.on("message",message => parentPort.postMessage({ pong: message }));',
{ eval: true });

Exit custom function with loop process on node.js shutdown signal

I have a job that is executed ones per day. My app is running on Heroku, and dyno is restarted ones a day.
So what can happen is that during job execution Heroku starts the restart of dyno.
That itself is not a problem as I can start job two times per day, but what is a problem is to stop the job in the middle of task when it is not in stable status.
I would like now somehow to send this signal to job function so I can break any loops and stop function execution in safe way.
I know how to get signal:
process
.on('SIGTERM', shutdown('SIGTERM'))
.on('SIGINT', shutdown('SIGINT'))
.on('uncaughtException', shutdown('uncaughtException'));
function shutdown(signal) {
console.log(`${ signal }...`);
return (err) => {
if (err) console.error(err.stack || err);
setTimeout(() => {
console.log('...waited 5s, exiting.');
process.exit(err ? 1 : 0);
}, 5000).unref();
};
}
but how to send this signal to my job function and to break from it safely?
Thank you.
So the best solution I came up with is following.
// Manage signals
let shutDownSignal = false;
process
.on('SIGTERM', shutdown('SIGTERM'))
.on('SIGINT', shutdown('SIGINT'))
.on('uncaughtException', shutdown('uncaughtException'));
function shutdown(signal) {
return (err) => {
shutDownSignal = true;
console.log(`Received signal: ${ signal }...`);
if (err) console.error(err.stack || err);
setTimeout(() => {
console.log('...waited 15s, exiting.');
process.exit(err ? 1 : 0);
}, 15000).unref();
};
}
module.exports.getShutDownSingnal = function(){ return shutDownSignal; }
then with getShutDownSingnal() anywhere I can check whether shutdown is initiated.
One more thing. It is necessary to put Procfile in app root with
web: node index.js
in it (or app.js depending what are you using).
This is necessary so that SIGTERM and SIGKILL signals are transferred correctly to node (for example if using npm, it will not transfer this signal correctly). More about this on Heroku docs
Maybe this will be useful for someone.

Shutting down nodejs microservice gracefully when using Docker Swarm

I am have multiple micro services written in Nodejs Koa running in Docker Swarm.
Since container orchestration tools like Kubernetes or Swarm can scale up and down services instantly, I have a question on gracefully shutting down Nodejs service to prevent unfinished running process.
Below is the flow I can think of:
sending a SIGNINT signal to each worker process, Does docker swarm
send SIGNINT to worker when scaling down service?
the worker are responsible to catch the signal, cleanup or free any
used resource and finish the its process, How can I stop new api
request, wait for any running process to finish before shutting
down?
Some code below from reference:
process.on('SIGINT', () => {
const cleanUp = () => {
// How can I clean resources like DB connections using Sequelizer
}
server.close(() => {
cleanUp()
process.exit()
})
// Force close server after 5secs, `Should I do this?`
setTimeout((e) => {
cleanUp()
process.exit(1)
}, 5000)
})
I created a library (https://github.com/sebhildebrandt/http-graceful-shutdown) that can handle graceful shutdowns as you described. Works well with Express and Koa.
This package also allows you to create function (should return a promise) to additionally clean up things like DB stuff, ... here some example code how to use it:
const gracefulShutdown = require('http-graceful-shutdown');
...
server = app.listen(...);
...
// your personal cleanup function - this one takes one second to complete
function cleanup() {
return new Promise((resolve) => {
console.log('... in cleanup')
setTimeout(function() {
console.log('... cleanup finished');
resolve();
}, 1000)
});
}
// this enables the graceful shutdown with advanced options
gracefulShutdown(server,
{
signals: 'SIGINT SIGTERM',
timeout: 30000,
development: false,
onShutdown: cleanup,
finally: function() {
console.log('Server gracefulls shutted down.....')
}
}
);
I personally would increase the final timeout from 5 secs to higher value (10-30 secs). Hope that helps.

Asynchronous tasks when node server.close() invoked

I'm working on an node+express application and want to prepare it to make a graceful. The goal is to make deployments in the "right" way, that is:
Avoid accepting requests
Finish the current ones
Terminate node process
Put new code
Restart server-
For the graceful shutdown I'm using a code similar to:
var express = require('express');
var app = express();
var server = app.listen(1337);
process.on( 'SIGTERM', function () {
server.close(function () {
console.log( "Closed out remaining connections.");
// Close db connections, etc.
//
// How to ensure no tasks is running in the thread pool ???
});
setTimeout( function () {
console.error("Could not close connections in time, forcefully shutting down");
process.exit(1);
}, 30*1000);
});
My doubt is to know if inside server.close() can I be sure no task is running or pending execution ?
What happens if a job in the event loop started reading a big file asynchronously and is working in the thread pool? Does server.close warranties no tasks are running in the thread pool too?

Resources