agenda npm runs only 4 times - node.js

I need a job scheduler that runs a couple of jobs in a specific time interval. So, I used agenda module for doing this task. What happens is when the server is restarted the jobs get executed and this happens for 4 times in regular interval basis and after that agenda stops working without throwing any error. I tried canceling the jobs and stopping agenda whenever the server stops. So that agendaJobs creates new collection every time the server is restarted. But still, I experience the same issue. Since, I have multiple node instances running, I need the jobs to run only once in that time interval. Hence, I used agenda. Is there any different schedulers which can do the same or am I doing something wrong?
var agenda = new Agenda({db: {address: mongoConnectionString}});
var startCron = function(server, callback){
agenda.define('job1', function(job, done) {
job1.execute();
});
agenda.define('job2', function(job, done) {
job2.execute();
});
agenda.define('job3', function(job, done) {
job3.execute();
});
agenda.on('ready', function() {
agenda.every('10 minutes', ['job1', 'job2',
'job3']);
agenda.start();
console.log("Cron job setup completed.");
callback(null, server);
});
}
// this is my kill process
var killProcess = function() {
try {
mongoose.disconnect();
console.log(arguments);
console.log("Connection got closed.");
} catch (e) {
console.log(e);
}
agenda.cancel({}, function(err, numRem){
console.log("Cancelling Agenda Jobs", err, numRem);
agenda.stop(function() {
console.log("Stopping Agenda")
setTimeout(function(){
process.exit(1);
}, 2000);
});
});
};

Related

Do cron jobs in Node.js affect performance or create problems like memory leakage?

I have a cron job which will be created after every request of booking ticket and that job will run after 15 minutes and check if the payment is done. If not, then it will make that ticket available for other users.
new CronJob('*/15 * * * *', function () {
const that = this;
co(function* () {
try {
const order = yield mongoose.model('Orders').findById(orderId);
if (order.orderStatus === 'pending') {
order.orderStatus = 'released';
yield order.save();
yield mongoose.model('Outing').findByIdAndUpdate(outing, {
'$set': {
'showFlag': true
}
});
console.log(`Order:${orderId} has been cancelled due to session time out - ${new Date()}`);
} else if (order.orderStatus === 'released') {
console.log(`Order:${orderId} is cancelled due to Client's request`);
} else if (order.orderStatus === 'reserved') {
console.log(`Order:${orderId} is reserved`);
}
console.log('Job ended:', new Date());
that.stop();
} catch (error) {
console.log(error);
}
});
}, null, true);
};
So what's happening here is a new job is created for every request, 15 minutes later the job is executed and then the respective cron job is stopped!
So will this affect performance in any way?
If yes, then do I have any other way to run this process in background?

Mocha/Chai Tests using Mongoose Occasionally Fail on First Run

I have a series of Mocha/Chai tests that are set up as follows:
var mongoTest = require('../mongoTest.js');
//Connect to test DB before tests and disconnect after
before(function(done) {
mongoTest.mongoConnect(done);
});
after(function(done) {
mongoose.disconnect(done);
})
//Load Data Files
var testData = require('../testData.js')
var deviceAndLocationAnswers = testData.deviceAndLocationAnswers
//Repeated Functions:
var clearCollections = function(coll, callback) {
mongoose.connection.db.dropCollection(coll.collectionName,
function(err, result) {
callback();
});
}
describe('Testing functions that use the Breakers collections', function(){
//Test Setup
var req = {query: {device: testData.powerConsumptionDevice}}
before(function(done) {
this.timeout(15000);
async.forEach(mongoose.connection.collections, clearCollections,
function(err) {
if (err) {console.log(err)};
done();
})
});
before(function(done) {
this.timeout(15000);
Breakers.create(testData.breakersData, function(err, model){
done(err);
});
});
after(function(done) {
this.timeout(15000);
async.forEach(mongoose.connection.collections, clearCollections,
function(err) {
if (err) {console.log(err)};
done();
})
});
// Tests
describe('Testing powerConsumption Function', function() {
it('Should produce some output', function(done) {
this.timeout(15000);
dbFunctions.powerConsumption(req, function(result) {
result.should.exist;
done();
});
});
it('Should produce the same results as the mock up from testData', function(done) {
this.timeout(15000);
dbFunctions.powerConsumption(req, function(result) {
result.should.be.deep.equal(testData.powerConsumptionResults);
done();
});
});
});
});
mongoTest comes from the following file I have:
var mongoose = require('mongoose')
var dBaseURL = 'mongodb://xxxx:yyyyy#ds#####.mongolab.com:zzzz/myDB'; // details removed
exports.mongoConnect = function(callback) {
mongoose.connect(dBaseURL, function(err) {
if(err) {
console.log('MongoDB Connection Error', err);
} else {
console.log('MongoDB Connection Successful')
}
callback();
});
};
I have a total of 14 tests, set up similarly to the ones I've used as examples. The first time I run these tests, a few will always fail (it's never the same few). If I run the tests again immediately after, all of them will pass.
Failure takes the form of the .should.be.deep.equal() calls failing with large diffs. I'm not sure how this could happen, as the data doesn't change between tests.
I've checked the database between tests and the collections are always deleted after the tests have run.
Does anyone have any ideas as to what could be going on here? I'm new to node.js, so it's very likely I've missed some bit of best practice that's causing all of this.
Also, I am aware that it is best practice to mock out the database. I already have a set of tests that do this. I also want a set of tests that use the database so that I can test the actual behavior.
ETA:
Turns out my problems had nothing to do with the code I had posted here.
The functions that would occasionally fail the tests were those that had database calls that didn't sort the output in any particular way. It turns out that Mongoose would sometimes (but not always? WHY?) sort the query results in such a way that they would pass the test. I'd be interested to hear an explanation of how this is the case, but my question can be considered solved.

Jasmine: how to run series of tests after an event has triggered

I'm trying to run tests on my npm module. The code below will work if I comment out either of the 2 it blocks, but times out if I leave them both in as below. How can wait for "ready" before running my tests (I'd like to add more but they too need to await "ready")?
describe("GA analytics", function() {
var report = new Report(private_key, SERVICE_EMAIL, 1);
it("should connect and emit <ready>", function(done) {
report.on('ready', function() {
console.log("test.js: Token: ", report.token);
expect(report.token).toBeDefined();
done();
});
});
it("should get data correctly", function(done) {
report.on('ready', function() {
report.get(query, function(err, data) {
if (err) throw err
expect(data.rows).toEqual([ [ '5140' ] ]);
done();
});
});
});
});
I guess it is happening because you create a new instance of the Report only once per test file, therefore the ready event only fires once and only the first it block in the test will catch it and process. The rest of the it blocks won't receive any more ready events, so they silently wait for it until Jasmine times out. The solution will be to create a new Report instance before each it block, which can be easily done with the help of Jasmine beforeEach:
describe("GA analytics", function() {
var report;
beforeEach(function () {
report = new Report();
});
// ....
});
See the working example here on Plunker (open a "script.js" file)

How to run Cron Job in Node.js application that uses cluster module?

I'm using node-cron module for scheduling tasks in Node.js application. I also want run the application in several processes using core cluster module.
Running application in several processes ends up in scheduled tasks execution in each process (e.g. if task was to send an email the email would be sent multiple times).
What are the best practices/possible ways of running cron job along with cluster module? Should I create some separate process which will handle only cron job and do not accept any requests. If yes, how can I do that in a right way?
If are using PM2,
You can use an environment variable provided by PM2 itself called NODE_APP_INSTANCE which requires PM2 2.5 or greater.
NODE_APP_INSTANCE environment variable can be used to determine difference between process, for example you may want to run a cronjob only on one process, you can just do this
if(process.env.NODE_APP_INSTANCE == 0) {
//schedule your cron job here since this part will be executed for only one cluster
}
,
Since two processes can never have the same number.
More Info on PM2 official doc here.
After some research I ended up with "Distributed locks using Redis" solution.
There is node module for that: node-redis-warlock.
Hope this answer will be useful for someone else.
UPDATE. Minimal sample code:
var Warlock = require('node-redis-warlock'),
redis = require('redis');
// Establish a redis client
redis = redis.createClient();
// and pass it to warlock
var warlock = new Warlock(redis);
function executeOnce (key, callback) {
warlock.lock(key, 20000, function(err, unlock){
if (err) {
// Something went wrong and we weren't able to set a lock
return;
}
if (typeof unlock === 'function') {
setTimeout(function() {
callback(unlock);
}, 1000);
}
});
}
// Executes call back only once
executeOnce('every-three-hours-lock', function(unlock) {
// Do here any stuff that should be done only once...
unlock();
});
UPDATE 2. More detailed example:
const CronJob = require('cron').CronJob;
const Warlock = require('node-redis-warlock');
const redis = require('redis').createClient();
const warlock = new Warlock(redis);
const async = require('async');
function executeOnce (key, callback) {
warlock.lock(key, 20000, function(err, unlock) {
if (err) {
// Something went wrong and we weren't able to set a lock
return;
}
if (typeof unlock === 'function') {
setTimeout(function() {
callback(unlock);
}, 1000);
}
});
}
function everyMinuteJobTasks (unlock) {
async.parallel([
sendEmailNotifications,
updateSomething,
// etc...
],
(err) => {
if (err) {
logger.error(err);
}
unlock();
});
}
let everyMinuteJob = new CronJob({
cronTime: '*/1 * * * *',
onTick: function () {
executeOnce('every-minute-lock', everyMinuteJobTasks);
},
start: true,
runOnInit: true
});
/* Actual tasks */
let sendEmailNotifications = function(done) {
// Do stuff here
// Call done() when finished or call done(err) if error occurred
}
let updateSomething = function(done) {
// Do stuff here
// Call done() when finished or call done(err) if error occurred
}
// etc...
I think you can use the node cluster module, and there you can write your code to run in the master cluster only
const cluster = require('cluster');
if (cluster.isMaster) {
// Write your code which you want to execute in the master cluster only
}
This is a node way to handle cluster, of course, you can use any tool like pm2 to handle this.
I actually do not like the redis approach that is also used in the cron-cluster npm plugin, because I do not want to have that redis server running on my maschine and maintain it, too.
I would like to discuss this approach with you:
Pro: we do not need to use redis
Con: cron jobs are always running on the same worker
I use the message passing only for this, if you use it for other things, you want to pass the information that
if (cluster.isMaster) {
// Count the machine's CPUs
var cpuCount = require('os').cpus().length;;
// Create a worker for each CPU
for (var i = 0; i < cpuCount; i += 1) {
cluster.fork();
}
cluster.on('fork', (worker) => {
console.log("cluster forking new worker", worker.id);
});
// have a mainWorker that does the cron jobs.
var mainWorkerId = null;
cluster.on('listening', (worker, address) => {
console.log("cluster listening new worker", worker.id);
if(null === mainWorkerId) {
console.log("Making worker " + worker.id + " to main worker");
mainWorkerId = worker.id;
worker.send({order: "startCron"});
}
});
// Listen for dying workers if the mainWorker dies, make a new mainWorker
cluster.on('exit', function (worker, code, signal) {
console.log('Worker %d died :(', worker.id);
if(worker.id === mainWorkerId) {
console.log("Main Worker is dead...");
mainWorkerId = null;
}
console.trace("I am here");
console.log(worker);
console.log(code);
console.log(signal);
cluster.fork();
});
// Code to run if we're in a worker process
} else {
// other code like setup app and stuff
var doCron = function() {
// setup cron jobs...
}
// Receive messages from the master process.
process.on('message', function(msg) {
console.log('Worker ' + process.pid + ' received message from master.', message);
if(message.order == "startCron") {
doCron();
}
});
}
I also have a problem with cluster module and finally i found sample way to solve problem.
Let master cluster execute cronJob.
My project use Kue to manage jobs. When cronJob run i get a list of jobs.
index.js
global.cluster = require('cluster');
if (cluster.isMaster) {
const cpuCount = require('os').cpus().length;
for (let i = 0; i < cpuCount; i += 1) {
cluster.fork();
}
} else {
// start your express server here
require('./server')
}
cluster.on('exit', worker => {
logger.warn('Worker %d died :(', worker.id);
cluster.fork();
});
cron.js
const cron = require('cron').CronJob;
const job = new cron('* * * * *', async () => {
if (cluster.isMaster) {
console.log('cron trigger');
}
});
job.start();
Hope this help.

Execute Grunt task using NodeJS cluster

I have a Grunt task and currently I am utilising AsyncJS to run it. AsyncJS worked well but still I feel like it can be more powerful if I can utilise NodeJS cluster to run it. I have checked out Grunt Parallel and Grunt Concurrent and it is not much different to what I doing in my Grunt task. Any suggestions on utilising NodeJS cluster module to speed up Task execution.
Currently I am doing like this
var queue = async.queue(task, function(task, cb){
// Process task with PhantomJS and then
cb();
}, require('os').cpus().length);
async.each(htmlPages, function(val, cb) {
queue.push(val, function() {
cb();
});
}, function() {
console.log('Completed');
done();
});
How can I make this work with NodeJS cluster?
One way to do it is to spawn the number of workers that you want using the cluster module. Then send messages to them when you want to start them working on something.
Below is code that initialises os.cpus().length workers and a queue that sends the work to them. It then pushes everything in htmlPages to that queue, waits for it to finish and then finally kills all the workers.
var os = require('os');
var async = require('async');
var cluster = require('cluster');
if (cluster.isWorker) {
process.on('message', function(msg) {
// Do the Phantom JS stuff
process.send(theResult);
});
}
if (cluster.isMaster) {
var workers = os.cpus().map(function () {
return cluster.fork();
});
var queue = async.queue(function (msg, cb) {
var worker = workers.pop();
worker.once('message', function (msg) {
workers.push(worker);
cb(null, msg);
});
worker.send(msg);
}, workers.length);
async.each(htmlPages, queue.push.bind(queue), function (err) {
if (err) { throw err; }
workers.forEach(function (worker) {
worker.kill();
});
console.log('Completed');
});
}

Resources