Unable to restart node server after making a change - node.js

Some code changes I made to our test server are not taking effect. I am trying to make sure the node server was properly restarted after the change. Following are the processes with name "node" showing as running -
[root#ip-10-30-30-4 lib]# ps -aux | grep node
root 18643 0.0 0.7 916304 26544 ? Ssl 17:22 0:00 /usr/bin/node /usr/lib/node_modules/forever/bin/monitor app.js
root 21479 0.0 1.7 980380 62528 ? Sl 19:31 0:00 /root/.nvm/v7.2.1/bin/node /usr/local/mvc/MVC2.0/app.js
root 21491 0.0 2.1 972432 78220 ? Sl 19:31 0:00 /usr/bin/node /usr/local/mvc/MVC2.0/app.js
root 21858 0.0 0.0 112652 960 pts/1 S+ 19:48 0:00 grep --color=auto node
root 22515 0.0 0.8 920548 31008 ? Ssl Nov03 0:00 /root/.nvm/v7.2.1/bin/node /usr/lib/node_modules/forever/bin/monitor app.js
I tried killing the node processes running app.js (with IDs 21479 and 21491), excluding the PIDs 18643 and 22515. It seems to be monitoring script which restarts the server as soon as it stops. Here are its contents -
var fs = require('fs'),
path = require('path'),
forever = require(path.resolve(__dirname, '..', 'lib', 'forever')),
started;
//
// ### #function (file, pid)
// #### #file {string} Location of the pid file.
// #### #pid {number} pid to write to disk.
// Write the pidFile to disk for later use
//
function writePid(file, pid) {
fs.writeFileSync(file, pid, 'utf8');
}
//
// ### #function start (options)
// #### #options {Object} Options for the `forever.Monitor` instance.
// Starts the child process and disconnects from the IPC channel.
//
function start(options) {
var script = process.argv[2],
monitor = new forever.Monitor(script, options);
forever.logEvents(monitor);
monitor.start();
monitor.on('start', function () {
//
// This starts an nssocket server, which the forever CLI uses to
// communicate with this monitor process after it's detached.
// Without this, `forever list` won't show the process, even though it
// would still be running in the background unaffected.
//
forever.startServer(monitor);
//
// Disconnect the IPC channel, letting this monitor's parent process know
// that the child has started successfully.
//
process.disconnect();
//
As soon as I kill the process, with the following command -
kill <PID>
I still see the same processes with new process IDs, which confirms that the processes were restarted. However, the changes are still not reflecting. Should I kill the ones with PIDs 18643 and 22515 as well? I am not sure how was it run actually.

Related

nodejs how to find the process id

I have a nodejs application running on a live server. I start a new node process using the following command in Terminal of VSCODE by accessing the server through SSH.
nohup node filename.js &
Mostly I can see the process id using the following command in the VSCODE terminal.
netstat -lpn | grep 30001
This command gives the following output:
tcp6 0 0 :::30001 :::* LISTEN 21552/node
But, sometimes it doesnt show up any process id, as shown in the following output:
tcp6 0 0 :::30001 :::* LISTEN -
In case the process dies to due some technical error, it should get restarted automatically. I have executed the following code through a cron in every 5 minutes for this, which works.
const find = require('find-process');
var spawn = require('child_process').spawn;
find("port", "30001")
.then((list)=> {
console.log("list::", list);
if (!list.length) {
spawn('node', [`${__dirname}/filename.js`], {
detached: true,
stdio: 'ignore'
}).unref();
}
}, function (err) {
console.log(err.stack || err);
});
Following is my cron
*/5 * * * * node path-to-js-file/crontab.js
My Question:
Why my node instance on port 30001 is sometimes not having a pid while the application contained inside it is still accessible?
kill -9 will need a Process Id which I dont have as showed above. How to kill such process through command so that it can be restarted?
To show the proccess pid you can use the process module of nodejs.
var process = require('process');
console.log(`Process pid ${process.pid}`);

Node js process stays on linux ps

I'm currently running a node js server accessible by a specific url on my apache server.
The entire code is:
// server.js
var webshot = require('./lib/webshot');
var fs = require('fs');
var http = require('http');
var bodyParser = require('body-parser');
const used = process.memoryUsage();
var express = require('express');
var app = express();
app.use( bodyParser.urlencoded() );
// your express configuration here
var httpServer = http.createServer(app);
// For http
httpServer.listen(8080);
app.post('/', function (req, res) {
console.log(req.body);
var firstLine = req.body.firstLine;
var secondLine = req.body.secondLine;
var previewID = req.body.previewId;
var productPlate = req.body.prodName;
res.header('Access-Control-Allow-Origin', 'https://citylocs.com');
res.header('Access-Control-Allow-Methods', 'GET, POST, PUT');
res.header('Access-Control-Allow-Headers', 'X-Requested-With, Content-Type');
takeWebshot(firstLine, secondLine, previewID, productPlate)
res.end()
});
function takeWebshot(fLine, sLine, pID, prodPlate) {
var options = {
onLoadFinished: {
fn: function() {
document.getElementById("user_input").value=this.fLineSnapshot;
document.getElementById("second_user_input").value=this.sLineSnapshot;
document.getElementById("preview_btn").click();
},
context: {
fLineSnapshot: fLine,
sLineSnapshot: sLine,
}
},
takeShotOnCallback: true,
captureSelector: '#img_preview_fancybox',
licensePlate: 'Guzman Plate'
};
webshot('example.com/preview/productpreview/testy.html?prod=' + prodPlate, '../screenshot/' + pID +'.png', options, function(err) {
if(!err) { process.exit() }
else { console.log(err);
process.exit() }
});
};
It basically takes some data and does a screen shot of a website using phantom js through this method webshot(). to save memory since this functions multiple times I have process.exit() after webshot() completes in the call back. My expectation is that server.js exists. It's then retired up by pm2. The problem is I'm getting a memory error after some time. After checking running ps aux --sort -rss
I get this:
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
root 12019 0.0 2.5 224036 105992 ? Ss 04:13 0:06 /usr/local/cpanel/3rdparty/perl/528/bin/perl -T -w /usr/local/cpanel/3rdparty/bin/spamd --max-spare=
root 12237 0.0 2.4 225184 103664 ? S 04:26 0:03 spamd child
root 12238 0.0 2.4 224036 102128 ? S 04:26 0:00 spamd child
root 12239 0.0 2.4 224036 102124 ? S 04:26 0:00 spamd child
mysql 1592 0.2 1.3 1586436 57104 ? Sl Aug29 1:56 /usr/sbin/mysqld --basedir=/usr --datadir=/var/lib/mysql --plugin-dir=/usr/lib64/mysql/plugin --user
named 1056 0.0 0.9 1924092 41828 ? Ssl Aug29 0:00 /usr/sbin/named -u named
root 1380 0.0 0.8 902416 37480 ? Ssl Aug29 0:19 PM2 v3.5.1: God Daemon (/root/.pm2)
root 5032 0.0 0.8 2037540 35732 ? Sl Aug29 0:01 phantomjs /home/ogdi/public_html/preview/productpreview/node-webshot/lib/webshot.phantom.js {
root 9778 0.0 0.8 2037500 35708 ? Sl 02:57 0:01 phantomjs /home/ogdi/public_html/preview/productpreview/node-webshot/lib/webshot.phantom.js {
root 18725 0.0 0.8 2037500 35680 ? Sl 08:09 0:00 phantomjs /home/ogdi/public_html/preview/productpreview/node-webshot/lib/webshot.phantom.js {
root 7577 0.0 0.8 2037460 35676 ? Sl 01:46 0:01 phantomjs /home/ogdi/public_html/preview/productpreview/node-webshot/lib/webshot.phantom.js {
It's telling me the phantom js script is still active since yesterday. It appears several more times. Why is that if I process.exit() successfully?
EDIT:
I manually killed a bunch of those phantom js commands. I diagnosed one a little more. and it says STAT Sl which means it's Sleep, multi thread. according to what I read.
Webshot link: https://raw.githubusercontent.com/brenden/node-webshot/master/lib/webshot.js
Webshot phantom link: https://raw.githubusercontent.com/brenden/node-webshot/master/lib/webshot.phantom.js
process.exit() is not called if there is an error. In the event of an error, is there any point in the process continuing?
webshot('example.com/preview/productpreview/testy.html?prod=' + prodPlate, '../screenshot/' + pID +'.png', options, function(err) {
if (err) { console.log(err) }
process.exit(err)
});

gracefully shutdown http server

I write a http + websocket(socket.io) server, and catch SIGINT to stop it.
But I can't stop it gracefully. eg:
in a konsole window, I typed in:
>
xxx ~$ npm start
info - socket.io started
server started at :80
new web socket connection !
^C ### PRESS CTRL+C HERE
xxx ~$ ^C--- SIGINT RECIVED --- # <-- WTF??!! ( I have already been dropped to shell
xxx ~$ ps -f
UID PID PPID C STIME TTY TIME CMD
GongT 18776 2281 0 17:37 pts/11 00:00:00 /bin/bash
GongT 31365 1 0 21:24 pts/11 00:00:00 node main.js
GongT 31548 18776 0 21:24 pts/11 00:00:00 ps -f
# ^ look at PPID of node process
xxx ~$ kill -SIGINT 31365
--- SIGINT RECIVED ---
xxx ~$ kill -SIGINT 31365
--- SIGINT RECIVED ---
xxx ~$ # now I disconnect from client(web browser)
server stopped. bye~
My signal handler like that:
process.on('SIGINT', function (){
console.log(' --- SIGINT RECIVED --- ');
if(isServerRunning){
server.close();
isServerRunning = false;
setInterval(function(){
if(checkEveryThingFinished()){
console.log('server stopped. bye~');
process.exit();
}
});
}
});
My question is :
why I was dropped to shell without end the process, and how to prevent this?
(just waitting until process.exit())

How to fork a child process that listens on a different debug port than the parent

I am trying to use child_process.fork to spawn a process that breaks and listens on the V8 debug protocol.
However, I can't get the forked process to listen on a port that's different from the parent process. Assuming the parent process listens on 6000, the child process also attempts to listen on port 6000:
Failed to open socket on port 6000, waiting 1000 ms before retrying
Here's my code:
// `test.js`, invoked as `node --debug-brk=6000 test.js`
var nodeModule, args, env, child
nodeModule = path.normalize(path.join(__dirname, '..', 'app.js'))
args = [
'--debug-brk=6001'
, '127.0.0.1'
, 3030
, 'api-testing'
]
env = { 'DB_URI': 'mongodb://localhost/test' }
child = require('child_process')
.fork(nodeModule, args, {env: env})
.on('message', callback)
As you can see, I'm trying to get the forked process to listen on port 6001, but the child process attempts to listen on port 6000 which is in use by the parent.
How can I get the child process to listen on port 6001, or some other free port?
There are several threads on this subject. For example:
How to debug Node.JS child forked process?
Debugging Node.js processes with cluster.fork()
However:
These threads deal with the cluster variant of forking
Refer to execArgv, which appear to have been undocumented for process and are still undocumented for cluster.
Simple enough answer, found on this comment and with some help from #Node.js on Freenode:
Just move the --debug-brk into the execArgv key of the options param to fork:
// Excerpt:
args = [
'127.0.0.1'
, 3030
, 'api-testing'
]
env = { 'DB_URI': 'mongodb://localhost/test' }
child = fork(nodeModule, args, {
env: env
, execArgv: ['--debug-brk=6001']
})
.on('message', this.callback)
execArgv is the array of parameters passed to the node process. argv is the set passed to the main module. There's a dedicated parameter to child_process.fork for argv, but execArgvs have to be placed within the opts param. This works, and in the child process we have:
> process.execArgv
["--debug-brk=6001"]
> process.argv
["/usr/local/Cellar/node/0.10.13/bin/node" "/Users/dmitry/dev/linksmotif/app.js", "127.0.0.1", "3030", "api-testing"]
In summary
Node.js consistently treats execArgv and argv as separate sets of values.
Before to fork remove old debug-brk parameter :
process.execArgv = process.execArgv.filter(function(o){
var x = "--debug-brk";
return o.substring(0, x.length) !== x
});
and add a new one:
process.execArgv.push("--debug-brk=9999");

My node.js doesn't respond without reporting any errors

I wrote http server in node.js, which is a like reverse-proxy for Amazon S3, and deployed it in production environment with Node's Cluster module, nodejitsu/forever and Nginx.
It worked very good but one day (today) it stopped responding. I checked node's console.log() outputs and processes but I found nothing strange.
Gist of my code is like:
http.createServer(function(webReq, webRes) {
http.get(s3Options, function(s3Res) {
if (s3Res.statusCode == 200) {
s3Res.on('end', function() {
webRes.end('Found data on S3');
});
} else {
webRes.end('No data on S3');
}
}).on('error', function(e) {
console.log('problem with s3Req: ' + e.message);
});
}).listen(1337);
Node processes are all alive (2 child workers) without forever's restarting:
# ps x | grep node
31436 ? Ss 3:43 node /usr/bin/forever -l LOG -o OUT -e ERR -a start server.js
31437 ? Sl 0:10 node /root/server.js
31440 ? Sl 1:17 /usr/bin/nodejs /root/server.js
31441 ? Sl 1:17 /usr/bin/nodejs /root/server.js
Then I doubted too-many-connection stuffs and did "lsof -p PID | wc -l" but the counts were all in good conditions - only dozens.
My node.js experience is only a week or so. Did I miss something important?

Resources