Create a Scheduled Job on Startup With Express - node.js

I'm just getting into Express and have ran into an issue. I have an app serving a REST API. That is all working fine. But I wanted to add a scheduled job using node-schedule (https://www.npmjs.com/package/node-schedule). I have implemented this module:
var schedule = require('node-schedule');
var scheduler = {
scheduleJob: function(monitor) {
var job = schedule.scheduleJob('* * * * *', function(){
monitor.check();
});
return job;
}
}
module.exports = scheduler;
In app.js I have added the following to the bottom since I found a single stack overflow question that was similar. This did not work for me:
app.on('listening', function () {
console.log("App started, gathering monitors");
var allMonitors = queries.getAllMonitorsInt();
for (var i = 0; i < allMonitors.length; i++) {
console.log("Monitor found: " + allMonitors[i].name);
shdlr.scheduleJob(allMonitors[i]);
}
});
I don't even get the "App started..." log message.
Am I doing this the right way or am I way off target?

The scheduler should be placed inside of app.listen callback, like this:
app.listen(3000, function () {
console.log("App started, gathering monitors");
var allMonitors = queries.getAllMonitorsInt();
for (var i = 0; i < allMonitors.length; i++) {
console.log("Monitor found: " + allMonitors[i].name);
shdlr.scheduleJob(allMonitors[i]);
}
});
Express doesn't support listening event, see an issue.

Related

child_process.fork() in Electron

Is it possible to fork a child_process from an electron render process? I found some posts across the net, but there were no hint how helps me to get my code working.
I created a module, that fork child processes. This code works, when I run this with cmd and under node. But when I try to integrate it in my electron app, I can not communicate with the child.send() method.
// create fork
const fork = require('child_process').fork;
const fs = require('fs');
const img_path = [
'path/to/an/image1.jpg',
'path/to/an/image2.jpg',
'path/to/an/image3.jpg'
];
const cp = [];
const temp_path = img_path.map((item) => item);
createAndResize(2);
function createAndResize(num) {
return childResize(createChildProcess(num));
}
function createChildProcess(num) {
if(num <= 0) {
return cp;
} else {
let cf = fork('./child.js');
cp.push(cf);
num -= 1;
return createChildProcess(num);
}
}
function childResize(list) {
if(list.length <=0) {
return true;
} else {
// child_process is created
let child = list.shift();
child.on('message', function (data) {
if (!temp_path.length) {
process.kill(data);
} else {
child.send(temp_path.shift());
}
});
child.send(temp_path.shift());
setTimeout(function() {
childResize(list);
}, 1000);
}
}
//child.js
process.on('message', function(msg) {
console.log(msg); //this is never reached
};
EDIT: based on the comment below, I fork child processes on the main process. The comunication seems to work with few exceptions. But first my new code:
// myView.js
const { remote } = require('electron');
const mainProcess = remote.require('./main.js');
const { forkChildfromMain } = mainProcess;
forkChildfromMain();
// main.js
const fork = require('child_process').fork;
let cp = [];
function forkChildfromMain() {
createAndResize(4);
}
function createAndResize(num) {
return childResize(createChildProcess(num));
}
function createChildProcess(num) {
if(num <= 0) {
return cp;
} else {
let cf = fork('./resize.js');
cp.push(cf);
num -= 1;
return createChildProcess(num);
}
}
function childResize(list) {
if(list.length <=0) {
return true;
} else {
let child = list.shift();
child.on('message', function (msg) {
// logs 'Hello World' to the cmd console
console.log(msg);
});
child.send('Hello World');
setTimeout(function() {
childResize(list);
}, 1000);
}
}
exports.forkChildfromMain = forkChildfromMain;
// child.js
process.on('message', function(msg) {
// this console statement get never loged
// I think, I must integrate an icpModule
console.log(msg);
//process send msg back to main.js
process.send(msg);
})
OUTDATED: The main problem now is, that I think electron 'spawn' new child processes and do not fork.
Because, when I look at my task manager I see only one instance from electron. When I run the code in a node env, I see there were fork multiple node instances.
The reason why I prefer to fork my child processes in multiple node instances is, that I want to make many image manipulation. So when I fork childs, then every child has it own node instance with memory and so on. I think that would be more performant then when I only have one instance who shared the memory and resources to all of the childs.
The second unexpected behavior is, that the console.log statement in the child is not printed to my cmd console. But this is the smaller ones :)
EDIT: After I analyse my task manager a little more in depth, I saw, that electron spawn multiple child processes like it should.
Electron's renderer process is not the right place for forking child processes, you should think about moving this to the main process.
Nonetheless, it should work the way you describe. If you'd make a minimal example available somewhere I could take a closer look.

server.close() doesn't work in a Vow teardown

I'm trying to write some Vows-based tests for my run-of-the-mill Express app.
Here's the test source:
var vows = require('vows');
var assert = require('assert');
var startApp = require('./lib/start-app.js');
var suite = vows.describe('tournaments');
suite.addBatch({
"When we setup the app": {
topic: function() {
return startApp();
},
teardown: function(topic) {
if (topic && topic.close) {
topic.close();
}
},
"it works": function(topic) {
assert.isObject(topic);
}
}
});
suite.run();
And here's start-app.js:
var app = require('../../app.js');
function start() {
var server = app.listen(56971, 'localhost');
return server;
}
module.exports = start;
app.js exports a regular Express.js app, created with express().
The problem is that whenever I run the test, topic.close() doesn't work in the teardown function, and the test hangs forever after succeeding. I've tried searching the web and adding lots and lots of console.logs, all to no avail.
I'm on the Windows x64 build of Node.js 4.2.0, and I'm using assert#1.3.0 and vows#0.8.1.
Any idea how I can make my test stop hanging?
Here's what I did to solve the issue in a project I was contributing: a final batch just to close the server.
suite.addBatch({
'terminate server': {
topic: function() {
server.close(this.callback); // this is a regular node require(`http`) server, reused in several batches
},
'should be listening': function() {
/* This test is necessary to ensure the topic execution.
* A topic without tests will be not executed */
assert.isTrue(true);
}
}
}).export(module);
Before adding this test, suite would never end executing. You can check the results at https://travis-ci.org/fmalk/node-static/builds/90381188

simulate multiple socket.io connection

Not a duplicate of : this question, as I'm trying to use the link posted as answer to solve my problem.
I'm creating a little dummy socket client to help testing one of my product, it looks like so :
var ee = require('events').EventEmitter;
require('http').globalAgent.maxSockets = 1000;
function Dummy(){
this.config = require('../config/credentials.js');
this.socket = require('socket.io-client')(this.config.socketIO.url);
var self = this;
this.socket.on('task', function(task) {
self.createTask(task);
});
}
util.inherits(Dummy, ee);
module.exports = Dummy;
Dummy.prototype.createTask = function(name){
var self = this;
setInterval(function sendStuff(){
self.socket.emit("msg")
}, 1000);
};
On its own, it works fine; However, when I try to launch many of them like so :
for (var i = 0; i < 100; i++) {
fakeClients.push(new Dummy());
};
Is appears to pool connections and shows as one client only.
Based on this link, I thought that by using socket.io-client, I'd avoid the pooling behaviour, yet it doesn't work. Am I doing something wrong?
I've simplified the loop btw, I actually make sure there's a delay between creations to avoid sync heartbeats.
Ideas?
Found the answer, it goes like this :
function Dummy(){
this.config = require('../config/credentials.js');
this.socket = require('socket.io-client').connect(this.config.socketIO.url, { "force new connection": true });
var self = this;
this.socket.on('task', function(task) {
self.createTask(task);
});
}
By using the connect() function, we can set the force new connection flag to true and avoid the pooling. Simple!

Getting dynamic "it"s to work based on array defined in "before" in mochajs?

Does anyone know how to accomplish something like this:
describe('dynamic array test', function(){
var checks;
before(function(){
checks = [];
for(var i = 0; i < 50; i++){
checks[i] = Math.floor((Math.random() * 10) + 1);
}
});
for(var check in checks){
it("check #" + check, function(){
expect(checks[check]).to.equal(1);
});
}
});
Basically I have an array I generate in the before that I want the "it" output to reflect and then execute.
You can generate tests in a loop but the code that generates the tests (i.e. that calls it) cannot depend on the result of a computation performed in a hook like before. (Note here I said that the code that generates the test cannot depend on a computation performed in a hook. The code in the tests can depend on a computation performed in a hook.) The reason is due to how Mocha discovers and executes your tests, which I've explained here.
You can do something like this:
var expect = require('chai').expect;
describe('dynamic array test', function(){
for(var i = 0; i < 50; i++){
it("check #" + i, function(){
expect(Math.floor((Math.random() * 10) + 1)).to.equal(1);
});
}
});
What you have looks fine, but you might need to update this:
for(var check in checks){
it("check #" + check, function(){
expect(checks[check]).to.equal(1);
})
}
to
for(var check in checks){
(function () {
it("check #" + check, function(){
expect(checks[check]).to.equal(1);
})
})()
}
To ensure the proper value of check is used in each test.

Cluster workers not listening in debug mode

When I run this script in debug mode the workers are created but none of them are listening, the master's listening event is never triggered and I can't make any HTTP requests.
When I run without debug mode everything's OK, so the problem is not with the script.
var cluster = require('cluster'),
http = require('http');
if(cluster.isMaster){
cluster.fork();
cluster.fork();
cluster.on('listening', function(){
console.log('New process is listening');
});
}
else{
var server = http.createServer(function(req, res){
res.end('hello');
});
server.listen(8080);
}
My command line arguments are simply:
node --debug-brk=59062 testCluster.js (debug)
node testCluster.js (normal)
I would appreciate it if anyone can shead some light on this, and perhaps run the script themselves.
The problem is caused because all processes (workers & master) are trying to listen for debug operations on the same port. I think this causes the workers to hang, the processes are started but no code actually runs.
To solve the problem I incremented the port number for each newly forked worker...
var debugPort = getDebugPort(),
inDebugMode = !!debugPort;
var workerArgv = process.execArgv.concat();
if(inDebugMode){
cluster.setupMaster({
execArgv: workerArgv
});
}
function fork(){
updateArgs(workerArgv, 'debug-brk', ++ debugPort);
cluster.fork();
};
With...
function getDebugPort(){
var args = process.execArgv;
for(var i = 0; i < args.length; i ++){
var arg = args[i];
if(arg.indexOf('--debug-brk=') == 0){
var port = parseInt(arg.replace('--debug-brk=', ''));
return isNaN(port) ? null : port;
}
}
};
function updateArgs(args, name, value){
var search = '--' + name + '=';
for(var i = 0; i < args.length; i ++){
if(args[i].indexOf(search) == 0){
args[i] = search + value;
return;
}
}
};
I was surprised that this even works in the Webstorm debugger, where each new worker process opens in it's own tab but still has its logging routed to the master's tab.

Resources