NodeJS on multiple processors (PM2, Cluster, Recluster, Naught) - node.js

I am investigating options for running node in a multi-core environment.
I'm trying to determine the best approach and so far I've seen these options
Use built in cluster library to spin up works and respond to signals
Use PM but, PM2 -i is listed as beta.
Naught
Recluster
Are there other alternatives? What are folks using in production?

I've been using the default cluster library, and it works very well. I've had over 10,000 concurrents(multiple clusters on multiple servers) and it works very well.
It is suggested to use clusters with domain for error handling.
This is lifted straight from http://nodejs.org/api/domain.html I've mades some changes on how it spawns new clusters for each core of your machine. and got rid of if/else and added express.
var cluster = require('cluster'),
http = require('http'),
PORT = process.env.PORT || 1337,
os = require('os'),
server;
function forkClusters () {
var cpuCount = os.cpus().length;
// Create a worker for each CPU
for (var i = 0; i < cpuCount ; i += 1) {
cluster.fork();
}
}
// Master Process
if (cluster.isMaster) {
// You can also of course get a bit fancier about logging, and
// implement whatever custom logic you need to prevent DoS
// attacks and other bad behavior.
//
// See the options in the cluster documentation.
//
// The important thing is that the master does very little,
// increasing our resilience to unexpected errors.
forkClusters ()
cluster.on('disconnect', function(worker) {
console.error('disconnect!');
cluster.fork();
});
}
function handleError (d) {
d.on('error', function(er) {
console.error('error', er.stack);
// Note: we're in dangerous territory!
// By definition, something unexpected occurred,
// which we probably didn't want.
// Anything can happen now!Be very careful!
try {
// make sure we close down within 30 seconds
var killtimer = setTimeout(function() {
process.exit(1);
}, 30000);
// But don't keep the process open just for that!
killtimer.unref();
// stop taking new requests.
server.close();
// Let the master know we're dead.This will trigger a
// 'disconnect' in the cluster master, and then it will fork
// a new worker.
cluster.worker.disconnect();
} catch (er2) {
// oh well, not much we can do at this point.
console.error('Error sending 500!', er2.stack);
}
});
}
// child Process
if (cluster.isWorker) {
// the worker
//
// This is where we put our bugs!
var domain = require('domain');
var express = require('express');
var app = express();
app.set('port', PORT);
// See the cluster documentation for more details about using
// worker processes to serve requests.How it works, caveats, etc.
var d = domain.create();
handleError(d);
// Now run the handler function in the domain.
//
// put all code here. any code included outside of domain.run will not handle errors on the domain level, but will crash the app.
//
d.run(function() {
// this is where we start our server
server = http.createServer(app).listen(app.get('port'), function () {
console.log('Cluster %s listening on port %s', cluster.worker.id, app.get('port'));
});
});
}

We use Supervisor to manage our Node.JS process's, to start them upon boot, and to act as a watchdog in case the process's crash.
We use Nginx as a reverse-proxy to load balance traffic between the process's that listen to different ports
this way each process is isolated from the other.
for example: Nginx listens on port 80 and forwards traffic to ports 8000-8003

I was using PM2 for quite a while, but their pricing is expensive for my needs as I'm having my own analytics environment and I don't require support, so I decided to experiment alternatives. For my case, just forever made the trick, very simple one actually:
forever -m 5 app.js
Another useful example is
forever start app.js -p 8080

Related

Node.js cluster for only a specific function within an Express app

I am trying to run a Node.js cluster within my Express app, but only for one specific function.
My app is a standard Express app generated with the express app generator.
My app initially scrapes an eCommerce website to get a list of categories in an array. I want to be able to then scrape each category's products, concurrently, using child processes.
I do not want to have the whole Express app inside the child processes. When the app starts up I want only one process to scrape for the initial categories. Once that is done I only want the function that scrapes the products to be run concurrently in the cluster.
I have tried the following:
delegation-controller.js
var {em} = require('./entry-controller');
const cluster = require('cluster');
const numCPUs = require('os').cpus().length;
class DelegationController {
links = [];
constructor() {
em.on('PageLinks', links => {
this.links = links;
this.startCategoryCrawl();
});
}
startCategoryCrawl() {
if (cluster.isMaster) {
console.log(`Master ${process.pid} is running`);
for (let i = 0; i < numCPUs; i++) {
cluster.fork();
}
cluster.on('exit', (worker, code, signal) => {
console.log(`worker ${worker.process.pid} died`);
});
} else {
console.log(`Worker ${process.pid} started`);
process.exit();
}
}
}
module.exports = DelegationController;
But then I got an error:
/ecommerce-scraper/bin/www:58
throw error;
^
Error: bind EADDRINUSE null:3000
Which I am guessing is because it is trying to start the express server again, but it is in use.
Am I able to do what I am trying to do, or am I misunderstanding how Node.js clusters work?
I believe this is not the case where you make use of cluster module. Instead you need the child_process module. This module lets you create a separate process. Here is the documentation.
I typically create my own Worker bootstrap that sits on top of my application. For things that need to run once, I have a convenient runonce function that is given a name and callback. The function checks the primary process for an open (non-busy process) and sends back the PID. If the PID matches (because all processes will claim ownership) the callback executes. If not, the function returns.
Example:
https://gist.github.com/jonshipman/abe627c687a46e7f5ea4b36bb919666c
NodeJS clustering creates identical copies of your application (through the cluster.fork(). It's up to your application to ensure that multiple actions aren't run twice (when they aren't expected to).
I believe, when using Express or https.createServer, it's setup in a way so that it doesn't listen to the same port multiple times. Instead each the prime process will distribute the load internally.

How to restart if NodeJS API service failed?

I've the similar NodeJS code:
cluster.js
'use strict';
const cluster = require('cluster');
var express = require('express');
const metricsServer = express();
const AggregatorRegistry = require('prom-client').AggregatorRegistry;
const aggregatorRegistry = new AggregatorRegistry();
var os = require('os');
if (cluster.isMaster) {
for (let i = 0; i < os.cpus().length; i++) {
cluster.fork();
}
metricsServer.get('/metrics', (req, res) => {
aggregatorRegistry.clusterMetrics((err, metrics) => {
if (err) console.log(err);
res.set('Content-Type', aggregatorRegistry.contentType);
res.send(metrics);
});
});
metricsServer.listen(3013);
console.log(
'Cluster metrics server listening to 3013, metrics exposed on /metrics'
);
} else {
require('./app.js'); // Here it'll handle all of our API service and it'll run under port 3000
}
As you can see in the above code I'm using NodeJS Manual cluster method instead of PM2 cluster, because I need to monitor my API via Prometheus. I'm usually starting the cluster.js via pm2 start cluster.js, however due to some DB connection our app.js service failed but cluster.js didn't. It apparently looks like I've not handled the db connection error, even though I've not handle it. I want to know,
How can I make sure my app.js and cluster.js always restarts if it crashes?
Is there a Linux crontab can be place to check the certain ports are always running (i.e 3000 and 3013)? (If this a good idea, I appreciate if you could provide me the code, I'm not much familiar with Linux)
Or I can deploy another NodeJS api to check the certain services are running, but since my API's real-time and catching certain amount of load; I'm not much happy do this?
Any help would be appreciate, Thanks in advance.
You can use monit https://www.digitalocean.com/community/tutorials/how-to-install-and-configure-monit in your server to regular monitor your process, if your project crashes it restart it again and can even notify you. but in this you have to do some configuration in server as monit regularly monitors a port, if it dosent get any reply from thta port then it restarts it.
otherwise you can use forever module. Easy to install and easy to use-https://www.npmjs.com/package/forever
it monitors and within 1 sec it restarts your application
I recently found out that, we can listen to the worker event if it's died/closed and restart it accordingly.
Here is the code:
'use strict';
const cluster = require('cluster');
var express = require('express');
const metricsServer = express();
var os = require('os');
if (cluster.isMaster) {
for (let i = 0; i < os.cpus().length; i++) {
cluster.fork();
}
cluster.on(
"exit",
function handleExit( worker, code, signal ) {
console.log( "Worker has died.", worker.process.pid );
console.log( "Death was suicide:", worker.exitedAfterDisconnect );
// If a Worker was terminated accidentally (such as by an uncaught
// exception), then we can try to restart it.
if ( ! worker.exitedAfterDisconnect ) {
var worker = cluster.fork();
// CAUTION: If the Worker dies immediately, perhaps due to a bug in the
// code, you can run [from what I have READ] into rapid CPU consumption
// as Master continually tries to create new Workers.
}
}
);
} else {
require('./app.js');
}

Node Cluster issue using Socket.io and Redis

Ok, I have an express-powered API where I also have socket.io running to receive/send realtime events...all works just dandy. I need to cluster my app. I set everything up based on the below code. I spin up workers, they get connections and everything works, except the fact that now I can't "blast" to all socket.io connections. Here is the setup (taken from this):
var express = require('express'),
cluster = require('cluster'),
net = require('net'),
sio = require('socket.io'),
sio_redis = require('socket.io-redis');
var port = 3000,
num_processes = require('os').cpus().length;
if (cluster.isMaster) {
// This stores our workers. We need to keep them to be able to reference
// them based on source IP address. It's also useful for auto-restart,
// for example.
var workers = [];
// Helper function for spawning worker at index 'i'.
var spawn = function(i) {
workers[i] = cluster.fork();
// Optional: Restart worker on exit
workers[i].on('exit', function(worker, code, signal) {
console.log('respawning worker', i);
spawn(i);
});
};
// Spawn workers.
for (var i = 0; i < num_processes; i++) {
spawn(i);
}
// Helper function for getting a worker index based on IP address.
// This is a hot path so it should be really fast. The way it works
// is by converting the IP address to a number by removing the dots,
// then compressing it to the number of slots we have.
//
// Compared against "real" hashing (from the sticky-session code) and
// "real" IP number conversion, this function is on par in terms of
// worker index distribution only much faster.
var workerIndex = function (ip, len) {
var _ip = ip.split(/['.'|':']/),
arr = [];
for (el in _ip) {
if (_ip[el] == '') {
arr.push(0);
}
else {
arr.push(parseInt(_ip[el], 16));
}
}
return Number(arr.join('')) % len;
}
// Create the outside facing server listening on our port.
var server = net.createServer({ pauseOnConnect: true }, function(connection) {
// We received a connection and need to pass it to the appropriate
// worker. Get the worker for this connection's source IP and pass
// it the connection.
var worker = workers[worker_index(connection.remoteAddress, num_processes)];
worker.send('sticky-session:connection', connection);
}).listen(port);
} else {
// Note we don't use a port here because the master listens on it for us.
var app = new express();
// Here you might use middleware, attach routes, etc.
// Don't expose our internal server to the outside.
var server = app.listen(0, 'localhost'),
io = sio(server);
// Tell Socket.IO to use the redis adapter. By default, the redis
// server is assumed to be on localhost:6379. You don't have to
// specify them explicitly unless you want to change them.
io.adapter(sio_redis({ host: 'localhost', port: 6379 }));
// Here you might use Socket.IO middleware for authorization etc.
// Listen to messages sent from the master. Ignore everything else.
process.on('message', function(message, connection) {
if (message !== 'sticky-session:connection') {
return;
}
// Emulate a connection event on the server by emitting the
// event with the connection the master sent us.
server.emit('connection', connection);
connection.resume();
});
}
So I connect from various machines to test concurrency, workers do their thing and all is good, but when I get an IO connection, I'm logging the TOTAL "connected" count and it's always 1 per instance. I need a way to say
allClusterForks.emit(stuff)
I get the connection on the correct worker pid, but "ALL CONNECTIONS" always returns 1.
io.on('connection', function(socket) {
console.log('Connected to worker %s', process.pid);
console.log("Adapter ROOMS %s ", io.sockets.adapter.rooms);
console.log("Adapter SIDS %s ", io.sockets.adapter.sids);
console.log("SOCKETS CONNECTED %s ", Object.keys(io.sockets.connected).length);
});
I can see the subscribe/unsubscribe coming in using Redis MONITOR
1454701383.188231 [0 127.0.0.1:63150] "subscribe" "socket.io#/#gXJscUUuVQGzsYJfAAAA#"
1454701419.130100 [0 127.0.0.1:63167] "subscribe" "socket.io#/#geYSvYSd5zASi7egAAAA#"
1454701433.842727 [0 127.0.0.1:63167] "unsubscribe" "socket.io#/#geYSvYSd5zASi7egAAAA#"
1454701444.630427 [0 127.0.0.1:63150] "unsubscribe" "socket.io#/#gXJscUUuVQGzsYJfAAAA#"
These are connections from 2 different machines, I would expect by using the socket io redis adapter that these subscriptions would be coming in on the same redis connection, but they are different.
Am I just totally missing something? There's a surprising lack of documentation/articles out there for this that aren't either completely outdated/wrong/ambiguous.
EDIT:
Node v5.3.0
Redis v3.0.6
Socket.io v1.3.7
So if anyone comes across this, I figured out that actually "looking" at the counts of connected sockets across processes is not a thing, but broadcasting or emitting to them is. So I've basically just been "testing" for no reason. All works as expected. I WILL be rewriting the socket.io-redis adapter to allow checking counts across processes.
There was a pull request a few years ago to implement support for what I was trying to do. https://github.com/socketio/socket.io-redis/pull/15 and I might try cleaning that up and re-submitting.

How do I stop a Node.js HTTP server programmatically such that the process exits?

I'm writing some tests and would like to be able to start/stop my HTTP server programmatically. Once I stop the HTTP server, I would like the process that started it to exit.
My server is like:
// file: `lib/my_server.js`
var LISTEN_PORT = 3000
function MyServer() {
http.Server.call(this, this.handle)
}
util.inherits(MyServer, http.Server)
MyServer.prototype.handle = function(req, res) {
// code
}
MyServer.prototype.start = function() {
this.listen(LISTEN_PORT, function() {
console.log('Listening for HTTP requests on port %d.', LISTEN_PORT)
})
}
MyServer.prototype.stop = function() {
this.close(function() {
console.log('Stopped listening.')
})
}
The test code is like:
// file: `test.js`
var MyServer = require('./lib/my_server')
var my_server = new MyServer();
my_server.on('listening', function() {
my_server.stop()
})
my_server.start()
Now, when I run node test.js, I get the stdout output that I expect,
$ node test.js
Listening for HTTP requests on port 3000.
Stopped listening.
but I have no idea how to get the process spawned by node test.js to exit and return back to the shell.
Now, I understand (abstractly) that Node keeps running as long as there are bound event handlers for events that it's listening for. In order for node test.js to exit to the shell upon my_server.stop(), do I need to unbind some event? If so, which event and from what object? I have tried modifying MyServer.prototype.stop() by removing all event listeners from it but have had no luck.
I've been looking for an answer to this question for months and I've never yet seen a good answer that doesn't use process.exit. It's quite strange to me that it is such a straightforward request but no one seems to have a good answer for it or seems to understand the use case for stopping a server without exiting the process.
I believe I might have stumbled across a solution. My disclaimer is that I discovered this by chance; it doesn't reflect a deep understanding of what's actually going on. So this solution may be incomplete or maybe not the only way of doing it, but at least it works reliably for me. In order to stop the server, you need to do two things:
Call .end() on the client side of every opened connection
Call .close() on the server
Here's an example, as part of a "tape" test suite:
test('mytest', function (t) {
t.plan(1);
var server = net.createServer(function(c) {
console.log("Got connection");
// Do some server stuff
}).listen(function() {
// Once the server is listening, connect a client to it
var port = server.address().port;
var sock = net.connect(port);
// Do some client stuff for a while, then finish the test
setTimeout(function() {
t.pass();
sock.end();
server.close();
}, 2000);
});
});
After the two seconds, the process will exit and the test will end successfully. I've also tested this with multiple client sockets open; as long as you end all client-side connections and then call .close() on the server, you are good.
http.Server#close
https://nodejs.org/api/http.html#http_server_close_callback
module.exports = {
server: http.createServer(app) // Express App maybe ?
.on('error', (e) => {
console.log('Oops! Something happened', e));
this.stopServer(); // Optionally stop the server gracefully
process.exit(1); // Or violently
}),
// Start the server
startServer: function() {
Configs.reload();
this.server
.listen(Configs.PORT)
.once('listening', () => console.log('Server is listening on', Configs.PORT));
},
// Stop the server
stopServer: function() {
this.server
.close() // Won't accept new connection
.once('close', () => console.log('Server stopped'));
}
}
Notes:
"close" callback only triggers when all leftover connections have finished processing
Trigger process.exit in "close" callback if you want to stop the process too
To cause the node.js process to exit, use process.exit(status) as described in http://nodejs.org/api/process.html#process_process_exit_code
Update
I must have misunderstood.
You wrote: "...but I have no idea how to get the process spawned by node test.js to exit and return back to the shell."
process.exit() does this.
Unless you're using the child_processes module, node.js runs in a single process. It does not "spawn" any further processes.
The fact that node.js continues to run even though there appears to be nothing for it to do is a feature of its "event loop" which continually loops, waiting for events to occur.
To halt the event loop, use process.exit().
UPDATE
After a few small modifications, such as the proper use of module.exports, addition of semicolons, etc., running your example on a Linux server (Fedora 11 - Leonidas) runs as expected and dutifully returns to the command shell.
lib/my_server.js
// file: `lib/my_server.js`
var util=require('util'),
http=require('http');
var LISTEN_PORT=3000;
function MyServer(){
http.Server.call(this, this.handle);
}
util.inherits(MyServer, http.Server);
MyServer.prototype.handle=function(req, res){
// code
};
MyServer.prototype.start=function(){
this.listen(LISTEN_PORT, function(){
console.log('Listening for HTTP requests on port %d.', LISTEN_PORT)
});
};
MyServer.prototype.stop=function(){
this.close(function(){
console.log('Stopped listening.');
});
};
module.exports=MyServer;
test.js
// file: `test.js`
var MyServer = require('./lib/my_server');
var my_server = new MyServer();
my_server.on('listening', function() {
my_server.stop();
});
my_server.start();
Output
> node test.js
Listening for HTTP requests on port 3000.
Stopped listening.
>
Final thoughts:
I've found that the conscientious use of statement-ending semicolons has saved me from a wide variety of pernicious, difficult to locate bugs.
While most (if not all) JavaScript interpreters provide something called "automatic semicolon insertion" (or ASI) based upon a well-defined set of rules (See http://dailyjs.com/2012/04/19/semicolons/ for an excellent description), there are several instances where this feature can inadvertently work against the intent of the programmer.
Unless you are very well versed in the minutia of JavaScript syntax, I would strongly recommend the use of explicit semicolons rather than relying upon ASI's implicit ones.

How do I shutdown a Node.js http(s) server immediately?

I have a Node.js application that contains an http(s) server.
In a specific case, I need to shutdown this server programmatically. What I am currently doing is calling its close() function, but this does not help, as it waits for any kept alive connections to finish first.
So, basically, this shutdowns the server, but only after a minimum wait time of 120 seconds. But I want the server to shutdown immediately - even if this means breaking up with currently handled requests.
What I can not do is a simple
process.exit();
as the server is only part of the application, and the rest of the application should remain running. What I am looking for is conceptually something such as server.destroy(); or something like that.
How could I achieve this?
PS: The keep-alive timeout for connections is usually required, hence it is not a viable option to decrease this time.
The trick is that you need to subscribe to the server's connection event which gives you the socket of the new connection. You need to remember this socket and later on, directly after having called server.close(), destroy that socket using socket.destroy().
Additionally, you need to listen to the socket's close event to remove it from the array if it leaves naturally because its keep-alive timeout does run out.
I have written a small sample application you can use to demonstrate this behavior:
// Create a new server on port 4000
var http = require('http');
var server = http.createServer(function (req, res) {
res.end('Hello world!');
}).listen(4000);
// Maintain a hash of all connected sockets
var sockets = {}, nextSocketId = 0;
server.on('connection', function (socket) {
// Add a newly connected socket
var socketId = nextSocketId++;
sockets[socketId] = socket;
console.log('socket', socketId, 'opened');
// Remove the socket when it closes
socket.on('close', function () {
console.log('socket', socketId, 'closed');
delete sockets[socketId];
});
// Extend socket lifetime for demo purposes
socket.setTimeout(4000);
});
// Count down from 10 seconds
(function countDown (counter) {
console.log(counter);
if (counter > 0)
return setTimeout(countDown, 1000, counter - 1);
// Close the server
server.close(function () { console.log('Server closed!'); });
// Destroy all open sockets
for (var socketId in sockets) {
console.log('socket', socketId, 'destroyed');
sockets[socketId].destroy();
}
})(10);
Basically, what it does is to start a new HTTP server, count from 10 to 0, and close the server after 10 seconds. If no connection has been established, the server shuts down immediately.
If a connection has been established and it is still open, it is destroyed.
If it had already died naturally, only a message is printed out at that point in time.
I found a way to do this without having to keep track of the connections or having to force them closed. I'm not sure how reliable it is across Node versions or if there are any negative consequences to this but it seems to work perfectly fine for what I'm doing. The trick is to emit the "close" event using setImmediate right after calling the close method. This works like so:
server.close(callback);
setImmediate(function(){server.emit('close')});
At least for me, this ends up freeing the port so that I can start a new HTTP(S) service by the time the callback is called (which is pretty much instantly). Existing connections stay open. I'm using this to automatically restart the HTTPS service after renewing a Let's Encrypt certificate.
If you need to keep the process alive after closing the server, then Golo Roden's solution is probably the best.
But if you're closing the server as part of a graceful shutdown of the process, you just need this:
var server = require('http').createServer(myFancyServerLogic);
server.on('connection', function (socket) {socket.unref();});
server.listen(80);
function myFancyServerLogic(req, res) {
req.connection.ref();
res.end('Hello World!', function () {
req.connection.unref();
});
}
Basically, the sockets that your server uses will only keep the process alive while they're actually serving a request. While they're just sitting there idly (because of a Keep-Alive connection), a call to server.close() will close the process, as long as there's nothing else keeping the process alive. If you need to do other things after the server closes, as part of your graceful shutdown, you can hook into process.on('beforeExit', callback) to finish your graceful shutdown procedures.
The https://github.com/isaacs/server-destroy library provides an easy way to destroy() a server with the behavior desired in the question (by tracking opened connections and destroying each of them on server destroy, as described in other answers).
As others have said, the solution is to keep track of all open sockets and close them manually. My node package killable can do this for you. An example (using express, but you can call use killable on any http.server instance):
var killable = require('killable');
var app = require('express')();
var server;
app.route('/', function (req, res, next) {
res.send('Server is going down NOW!');
server.kill(function () {
//the server is down when this is called. That won't take long.
});
});
var server = app.listen(8080);
killable(server);
Yet another nodejs package to perform a shutdown killing connections: http-shutdown, which seems reasonably maintained at the time of writing (Sept. 2016) and worked for me on NodeJS 6.x
From the documentation
Usage
There are currently two ways to use this library. The first is explicit wrapping of the Server object:
// Create the http server
var server = require('http').createServer(function(req, res) {
res.end('Good job!');
});
// Wrap the server object with additional functionality.
// This should be done immediately after server construction, or before you start listening.
// Additional functionailiy needs to be added for http server events to properly shutdown.
server = require('http-shutdown')(server);
// Listen on a port and start taking requests.
server.listen(3000);
// Sometime later... shutdown the server.
server.shutdown(function() {
console.log('Everything is cleanly shutdown.');
});
The second is implicitly adding prototype functionality to the Server object:
// .extend adds a .withShutdown prototype method to the Server object
require('http-shutdown').extend();
var server = require('http').createServer(function(req, res) {
res.end('God job!');
}).withShutdown(); // <-- Easy to chain. Returns the Server object
// Sometime later, shutdown the server.
server.shutdown(function() {
console.log('Everything is cleanly shutdown.');
});
My best guess would be to kill the connections manually (i.e. to forcibly close it's sockets).
Ideally, this should be done by digging into the server's internals and closing it's sockets by hand. Alternatively, one could run a shell-command that does the same (provided the server has proper privileges &c.)
I have answered a variation of "how to terminate a HTTP server" many times on different node.js support channels. Unfortunately, I couldn't recommend any of the existing libraries because they are lacking in one or another way. I have since put together a package that (I believe) is handling all the cases expected of graceful HTTP server termination.
https://github.com/gajus/http-terminator
The main benefit of http-terminator is that:
it does not monkey-patch Node.js API
it immediately destroys all sockets without an attached HTTP request
it allows graceful timeout to sockets with ongoing HTTP requests
it properly handles HTTPS connections
it informs connections using keep-alive that server is shutting down by setting a connection: close header
it does not terminate the Node.js process
Usage:
import http from 'http';
import {
createHttpTerminator,
} from 'http-terminator';
const server = http.createServer();
const httpTerminator = createHttpTerminator({
server,
});
await httpTerminator.terminate();
const Koa = require('koa')
const app = new Koa()
let keepAlive = true
app.use(async (ctx) => {
let url = ctx.request.url
// destroy socket
if (keepAlive === false) {
ctx.response.set('Connection', 'close')
}
switch (url) {
case '/restart':
ctx.body = 'success'
process.send('restart')
break;
default:
ctx.body = 'world-----' + Date.now()
}
})
const server = app.listen(9011)
process.on('message', (data, sendHandle) => {
if (data == 'stop') {
keepAlive = false
server.close();
}
})
process.exit(code); // code 0 for success and 1 for fail

Resources