I'm trying to write a small node application that will search through and parse a large number of files on the file system.
In order to speed up the search, we are attempting to use some sort of map reduce. The plan would be the following simplified scenario:
Web request comes in with a search query
3 processes are started that each get assigned 1000 (different) files
once a process completes, it would 'return' it's results back to the main thread
once all processes complete, the main thread would continue by returning the combined result as a JSON result
The questions I have with this are:
Is this doable in Node?
What is the recommended way of doing it?
I've been fiddling, but come no further then following example using Process:
initiator:
function Worker() {
return child_process.fork("myProcess.js");
}
for(var i = 0; i < require('os').cpus().length; i++){
var process = new Worker();
process.send(workItems.slice(i * itemsPerProcess, (i+1) * itemsPerProcess));
}
myProcess.js
process.on('message', function(msg) {
var valuesToReturn = [];
// Do file reading here
//How would I return valuesToReturn?
process.exit(0);
}
Few sidenotes:
I'm aware the number of processes should be dependent of the number of CPU's on the server
I'm also aware of speed restrictions in a file system. Consider it a proof of concept before we move this to a database or Lucene instance :-)
Should be doable. As a simple example:
// parent.js
var child_process = require('child_process');
var numchild = require('os').cpus().length;
var done = 0;
for (var i = 0; i < numchild; i++){
var child = child_process.fork('./child');
child.send((i + 1) * 1000);
child.on('message', function(message) {
console.log('[parent] received message from child:', message);
done++;
if (done === numchild) {
console.log('[parent] received all results');
...
}
});
}
// child.js
process.on('message', function(message) {
console.log('[child] received message from server:', message);
setTimeout(function() {
process.send({
child : process.pid,
result : message + 1
});
process.disconnect();
}, (0.5 + Math.random()) * 5000);
});
So the parent process spawns an X number of child processes and passes them a message. It also installs an event handler to listen for any messages sent back from the child (with the result, for instance).
The child process waits for messages from the parent, and starts processing (in this case, it just starts a timer with a random timeout to simulate some work being done). Once it's done, it sends the result back to the parent process and uses process.disconnect() to disconnect itself from the parent (basically stopping the child process).
The parent process keeps track of the number of child processes started, and the number of them that have sent back a result. When those numbers are equal, the parent received all results from the child processes so it can combine all results and return the JSON result.
For a distributed problem like this, I've used zmq and it has worked really well. I'll give you a similar problem that I ran into, and attempted to solve via processes (but failed.) and then turned towards zmq.
Using bcrypt, or an expensive hashing algorith, is wise, but it blocks the node process for around 0.5 seconds. We had to offload this to a different server, and as a quick fix, I used essentially exactly what you did. Run a child process and send messages to it and get it to
respond. The only issue we found is for whatever reason our child process would pin an entire core when it was doing absolutely no work.(I still haven't figured out why this happened, we ran a trace and it appeared that epoll was failing on stdout/stdin streams. It would also only happen on our Linux boxes and would work fine on OSX.)
edit:
The pinning of the core was fixed in https://github.com/joyent/libuv/commit/12210fe and was related to https://github.com/joyent/node/issues/5504, so if you run into the issue and you're using centos + kernel v2.6.32: update node, or update your kernel!
Regardless of the issues I had with child_process.fork(), here's a nifty pattern I always use
client:
var child_process = require('child_process');
function FileParser() {
this.__callbackById = [];
this.__callbackIdIncrement = 0;
this.__process = child_process.fork('./child');
this.__process.on('message', this.handleMessage.bind(this));
}
FileParser.prototype.handleMessage = function handleMessage(message) {
var error = message.error;
var result = message.result;
var callbackId = message.callbackId;
var callback = this.__callbackById[callbackId];
if (! callback) {
return;
}
callback(error, result);
delete this.__callbackById[callbackId];
};
FileParser.prototype.parse = function parse(data, callback) {
this.__callbackIdIncrement = (this.__callbackIdIncrement + 1) % 10000000;
this.__callbackById[this.__callbackIdIncrement] = callback;
this.__process.send({
data: data, // optionally you could pass in the path of the file, and open it in the child process.
callbackId: this.__callbackIdIncrement
});
};
module.exports = FileParser;
child process:
process.on('message', function(message) {
var callbackId = message.callbackId;
var data = message.data;
function respond(error, response) {
process.send({
callbackId: callbackId,
error: error,
result: response
});
}
// parse data..
respond(undefined, "computed data");
});
We also need a pattern to synchronize the different processes, when each process finishes its task, it will respond to us, and we'll increment a count for each process that finishes, and then call the callback of the Semaphore when we've hit the count we want.
function Semaphore(wait, callback) {
this.callback = callback;
this.wait = wait;
this.counted = 0;
}
Semaphore.prototype.signal = function signal() {
this.counted++;
if (this.counted >= this.wait) {
this.callback();
}
}
module.exports = Semaphore;
here's a use case that ties all the above patterns together:
var FileParser = require('./FileParser');
var Semaphore = require('./Semaphore');
var arrFileParsers = [];
for(var i = 0; i < require('os').cpus().length; i++){
var fileParser = new FileParser();
arrFileParsers.push(fileParser);
}
function getFiles() {
return ["file", "file"];
}
var arrResults = [];
function onAllFilesParsed() {
console.log('all results completed', JSON.stringify(arrResults));
}
var lock = new Semaphore(arrFileParsers.length, onAllFilesParsed);
arrFileParsers.forEach(function(fileParser) {
var arrFiles = getFiles(); // you need to decide how to split the files into 1k chunks
fileParser.parse(arrFiles, function (error, result) {
arrResults.push(result);
lock.signal();
});
});
Eventually I used http://zguide.zeromq.org/page:all#The-Load-Balancing-Pattern, where the client was using the nodejs zmq client, and the workers/broker were written in C. This allowed us to scale this across multiple machines, instead of just a local machine with sub processes.
Related
I am using async.queue to ensure that certain file copies in a service happen at most n concurrently, but watching the files copy sometimes I see a lot more than what the queue allows. Does anyone see something I may have missed in the below implementation?
createQueue(limit: number) {
let self = this;
return async.queue(function(cmdObj, callback) {
console.log("Beginning copy");
let cmd = cmdObj.cmd;
let args = cmdObj.args;
let request = cmdObj.req;
request.state = State.IN_PROGRESS;
self.reportStatus(request.destination);
const proc = spawn(cmd, args); //uses an rsync command upstream
proc.on("close", code => {
if (code !== 0) {
request.state = State.ERRORED;
self.reportStatus(request.destination); // these just report to the caller
statusMap.delete(request.destination);
} else {
fs.rename(request.destination + ".part", request.destination);
request.state = State.COMPLETED;
self.reportStatus(request.destination); // same here
statusMap.delete(request.destination);
}
callback();
});
proc.on("error", err => {
console.error("COPY ERR: " + err);
});
}, limit); // limit here, for example, may be two, but I see four copies concurrently
}
EDIT:
I now believe this is a side effect of the rest of the system...queues being cleared and reinitialized AFTER copies have started...so when new items are added to the reinitialized queues, they kick off immediately, as the system has no idea if something has been handed off to userland and is currently running.
So, this was user error...PEBCAK! Posting the solution more as a cautionary tale:
The queues above were working as designed, but I had an endpoint for the calling server to clear the queues as necessary; the problem was i was using kill() and re-initializing the queues, losing all track of any jobs in progress and their callbacks. As soon as a new item hit the fresh queue, it would think nothing was happening and spawn a new copy process. I resolved by using remove to clear the queues instead of re-initializing.
I'd like to fork a long running express request in node and send an express response with the child, allowing the parent to serve other requests. I'm already using cluster but I'd like to fork another process in addition to the cluster for specific long running requests. What I'd like to prevent is all the processes in the cluster being consumed by a specific long running processes, while most of the other requests are fast.
Thanks
var express = require('express');
var webserver = express();
webserver.get("/test", function(request, response) {
// long running HTTP request
response.send(...);
});
What I'm thinking of is something like following, although I'm not sure this works:
var cp = require('child_process');
var express = require('express');
var webserver = express();
webserver.get("/test", function(request, response) {
var child = cp.fork('do_nothing.js');
child.on("message", function(message) {
if(message == "start") {
response.send(...);
process.exit();
}
});
child.send("start");
});
Let me know if anyone knows how to do this.
Edit: So, the idea is that the child could take a long time. There are a limited number of processes in the cluster serving express responses and I don't want to consume them all on a specific long-running request type. In the code below, the entire cluster would be consumed by the long running express requests.
while(1) {
if(rand() % 100 == 0) {
if(fork() == 0) {
sleep(hour(1));
exit(0);
}
} else {
sleep(second(1));
}
waitpid(WAIT_ANY, &status, WNOHANG);
}
Edit: I am going to mark the self-answer as solved. I'm sure there's a way to pass a socket to a child but it's not really necessary because the cluster master can manage all child processes. Thanks for your help.
Your second code block is confusing because it appears that you're killing the parent process with process.exit() rather than the child.
In any case, if we assume the problem is this:
You have a cluster of "regular processes".
Occasionally, you want to take an incoming request that was assigned to one of the cluster processes and pass it off to a long running child that will eventually send the response.
After sending the response, the long running child process should exit.
You have a couple options.
You can have the clustered process that was assigned the request, start up a child, send it some initial data and listen for a message back from the child. When it gets the message back from the child, it can send the response and kill the child. This appears to be what you're attempting to do in your second code block.
You can have the clustered process that was assigned the request, start up a child and reassign the request socket to the child process and the child can then own that socket from then on. When it finally sends the response, it can then exit itself.
The first is simpler because no socket assignment from one process to another is required. To implement the second, you'd have to write or find the code to do socket reassignment and then reconstituted as an express request within the child. The cluster module does something like this so the code is there to be found and learned from, but I'm not aware of a trivial way to do it.
Personally, I don't see any particular downside to the first. I suppose if the clustered process were to die for some , you'd lose the long running request socket, but hopefully you can just code your clustered processes not to die unnecessarily.
You can read this article on sending a socket to a new node.js process:
Sending a socket to a forked process
And, this node.js doc on sending a socket:
Example: sending a socket object
So, I've verified that this is not necessary for my use case, but I was able to get it working using the code below. It's not exactly what the OP asks for, but it works.
What it's doing is sending an instruction to the cluster master, which forks the additional process upon receipt of the slow express request.
Since the express request doesn't need to know the status of the newly forked cluster worker, it just handles the slow request as normal and then exits.
The instruction to the cluster master informs the master not to replace the dying slow express request process, so the number of workers reverts to the original number after the slow request finishes.
The pool will increase in size when there are slow requests, but revert to normal. This will prevent like 20 simultaneous slow requests from bringing down the cluster.
var numberOfWorkers = 10;
var workerCount = 0;
var slowRequestPids = { };
if (cluster.isMaster) {
for(var i = 0; i < numberOfWorkers; i++) {
workerCount++;
cluster.fork();
}
cluster.on('exit', function(worker) {
workerCount--;
var pidString = String(worker.process.pid);
if(pidString in slowRequestPids) {
delete slowRequestPids[pidString];
if(workerCount >= numberOfWorkers) {
logger.info('not forking replacement for slow process');
return;
}
}
logger.info('forking replacement for a process that died unexpectedly');
workerCount++;
cluster.fork();
}
cluster.on("message", function(msg) {
if(typeof msg.fork != "undefined" && workerCount < 100) {
logger.info("forking additional process upon slow request");
slowRequestPids[msg.fork] = 1;
workerCount++;
cluster.fork();
}
});
return;
}
webserver.use("/slow", function(req, res) {
process.send({fork: String(process.pid) });
sleep.sleep(300);
res.send({ response_from: "virtual child" });
res.on("finish", function() {
logger.info('process exits, restoring cluster to original size');
process.exit();
});
});
My Node.js program - which is an ordinary command line program that by and large doesn't do anything operationally unusual, nothing system-specific or asynchronous or anything like that - needs to write messages to a file from time to time, and then it will be interrupted with ^C and it needs the contents of the file to still be there.
I've tried using fs.createWriteStream but that just ends up with a 0-byte file. (The file does contain text if the program ends by running off the end of the main file, but that's not the scenario I have.)
I've tried using winston but that ends up not creating the file at all. (The file does contain text if the program ends by running off the end of the main file, but that's not the scenario I have.)
And fs.writeFile works perfectly when you have all the text you want to write up front, but doesn't seem to support appending a line at a time.
What is the recommended way to do this?
Edit: specific code I've tried:
var fs = require('fs')
var log = fs.createWriteStream('test.log')
for (var i = 0; i < 1000000; i++) {
console.log(i)
log.write(i + '\n')
}
Run for a few seconds, hit ^C, leaves a 0-byte file.
Turns out Node provides a lower level file I/O API that seems to work fine!
var fs = require('fs')
var log = fs.openSync('test.log', 'w')
for (var i = 0; i < 100000; i++) {
console.log(i)
fs.writeSync(log, i + '\n')
}
NodeJS doesn't work in the traditional way. It uses a single thread, so by running a large loop and doing I/O inside, you aren't giving it a chance (i.e. releasing the CPU) to do other async operations for eg: flushing memory buffer to actual file.
The logic must be - do one write, then pass your function (which invokes the write) as a callback to process.nextTick or as callback to the write stream's drain event (if buffer was full during last write).
Here's a quick and dirty version which does what you need. Notice that there are no long-running loops or other CPU blockage, instead I schedule my subsequent writes for future and return quickly, momentarily freeing up the CPU for other things.
var fs = require('fs')
var log = fs.createWriteStream('test.log');
var i = 0;
function my_write() {
if (i++ < 1000000)
{
var res = log.write("" + i + "\r\n");
if (!res) {
log.on('drain',my_write);
} else {
process.nextTick(my_write);
}
console.log("Done" + i + " " + res + "\r\n");
}
}
my_write();
This function might also be helpful.
/**
* Write `data` to a `stream`. if the buffer is full will block
* until it's flushed and ready to be written again.
* [see](https://nodejs.org/api/stream.html#stream_writable_write_chunk_encoding_callback)
*/
export function write(data, stream) {
return new Promise((resolve, reject) => {
if (stream.write(data)) {
process.nextTick(resolve);
} else {
stream.once("drain", () => {
stream.off("error", reject);
resolve();
});
stream.once("error", reject);
}
});
}
You are writing into file using for loop which is bad but that's other case. First of all createWriteStream doesn't close the file automatically you should call close.
If you call close immediately after for loop it will close without writing because it's async.
For more info read here: https://nodejs.org/api/fs.html#fs_fs_createwritestream_path_options
Problem is async function inside for loop.
When I change the test.js, the console.log("Source code change, start to restart worker one by one") run 3 times?. I have one master process, so it should be one time
var cluster = require('cluster');
var fs = require('fs');
if (cluster.isMaster) {
for (var i = 0; i < 2; i++) {
cluster.fork();
}
fs.watch('./config/test.js', function(curr, prev) {
console.log("Source code change, start to restart worker one by one");
delete require.cache[require.resolve('./config/config.js')];
})
}else{
var config = require('./config/test.js')
}
Somehow your watchdog (fs.watch) is triggered three times, maybe because you write three blocks of data? Maybe it would trigger more often when the file becomes bigger? (Maybe WinSCP it truncating the file before writing?)
I assume you want to trigger only when the file is uploaded completely?
Unfortunately you can't catch the message IN_CLOSE_WRITE from inotify(..).
So do something like var old = to; to = setTimeout(function() { clearTimeout(old); /* insert real stuff here */ }, 1000) inside your fs.watch. The function then would fire only once, if the file keeps changing within the 1000 ms...
Apache Web Server has a config parameter called MaxRequestsPerChild.
http://httpd.apache.org/docs/2.0/en/mod/mpm_common.html#maxrequestsperchild
"After MaxRequestsPerChild requests, the child process will die."
To avoid crush caused by memory leaks, too many connections, or other unexpected errors, should I do the same thing when using node.js Cluster module?
*I'm using Nginx in front of node.js, not Apache. I mentioned to it so that I could easily explain.
I just implemented it like this:
var maxReqsPerChild = 10; // Small number for debug
var numReqs = 0;
if (cluster.isMaster) {
var numCPUs = require('os').cpus().length;
for (var i = 0; i < numCPUs; i++) {
cluster.fork();
}
cluster.on('death', function(worker) {
// Fork another when one died
cluster.fork();
});
} else {
http.createServer(function(webReq, webRes) {
// Count up
numReqs++;
// Doing something here
// Kill myself
if (numReqs > maxReqsPerChild) {
process.kill(process.pid); // Or more simply, process.exit() is better?
}
}).listen(1338);
}
This has been working well up until now, but I'm wondering there is more proper way.
MaxRequestsPerChild is good to hide memory leak troubles, but shouldn't be used too often, because it just hides real trouble. First try to avoid the memory leaks.
It shouldn't be used to avoid other issues like too many connections, nor other unexpected errors.
When you do use MaxRequetsPerChild, you shouldn't process.kill neither process.exit,
because that immediately closes all undergoing connections.
Instead, you should server.close, which will wait for all undergoing connections finish, and then fires 'close' event.
var server = http.createServer(...);
server.on( "close", function() {
process.exit(0);
});
server.on( "request", function () {
requestCount += 1;
if ( options.max_requests_per_child && (requestCount >= options.max_requests_per_child) ) {
process.send({ cmd: "set", key: "overMaxRequests", value: 1 });
if ( ! server.isClosed ) {
server.close();
server.isClosed = 1;
}
}
});
see a complete working example here:
https://github.com/mash/node_angel