I'm currently running a node js server accessible by a specific url on my apache server.
The entire code is:
// server.js
var webshot = require('./lib/webshot');
var fs = require('fs');
var http = require('http');
var bodyParser = require('body-parser');
const used = process.memoryUsage();
var express = require('express');
var app = express();
app.use( bodyParser.urlencoded() );
// your express configuration here
var httpServer = http.createServer(app);
// For http
httpServer.listen(8080);
app.post('/', function (req, res) {
console.log(req.body);
var firstLine = req.body.firstLine;
var secondLine = req.body.secondLine;
var previewID = req.body.previewId;
var productPlate = req.body.prodName;
res.header('Access-Control-Allow-Origin', 'https://citylocs.com');
res.header('Access-Control-Allow-Methods', 'GET, POST, PUT');
res.header('Access-Control-Allow-Headers', 'X-Requested-With, Content-Type');
takeWebshot(firstLine, secondLine, previewID, productPlate)
res.end()
});
function takeWebshot(fLine, sLine, pID, prodPlate) {
var options = {
onLoadFinished: {
fn: function() {
document.getElementById("user_input").value=this.fLineSnapshot;
document.getElementById("second_user_input").value=this.sLineSnapshot;
document.getElementById("preview_btn").click();
},
context: {
fLineSnapshot: fLine,
sLineSnapshot: sLine,
}
},
takeShotOnCallback: true,
captureSelector: '#img_preview_fancybox',
licensePlate: 'Guzman Plate'
};
webshot('example.com/preview/productpreview/testy.html?prod=' + prodPlate, '../screenshot/' + pID +'.png', options, function(err) {
if(!err) { process.exit() }
else { console.log(err);
process.exit() }
});
};
It basically takes some data and does a screen shot of a website using phantom js through this method webshot(). to save memory since this functions multiple times I have process.exit() after webshot() completes in the call back. My expectation is that server.js exists. It's then retired up by pm2. The problem is I'm getting a memory error after some time. After checking running ps aux --sort -rss
I get this:
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
root 12019 0.0 2.5 224036 105992 ? Ss 04:13 0:06 /usr/local/cpanel/3rdparty/perl/528/bin/perl -T -w /usr/local/cpanel/3rdparty/bin/spamd --max-spare=
root 12237 0.0 2.4 225184 103664 ? S 04:26 0:03 spamd child
root 12238 0.0 2.4 224036 102128 ? S 04:26 0:00 spamd child
root 12239 0.0 2.4 224036 102124 ? S 04:26 0:00 spamd child
mysql 1592 0.2 1.3 1586436 57104 ? Sl Aug29 1:56 /usr/sbin/mysqld --basedir=/usr --datadir=/var/lib/mysql --plugin-dir=/usr/lib64/mysql/plugin --user
named 1056 0.0 0.9 1924092 41828 ? Ssl Aug29 0:00 /usr/sbin/named -u named
root 1380 0.0 0.8 902416 37480 ? Ssl Aug29 0:19 PM2 v3.5.1: God Daemon (/root/.pm2)
root 5032 0.0 0.8 2037540 35732 ? Sl Aug29 0:01 phantomjs /home/ogdi/public_html/preview/productpreview/node-webshot/lib/webshot.phantom.js {
root 9778 0.0 0.8 2037500 35708 ? Sl 02:57 0:01 phantomjs /home/ogdi/public_html/preview/productpreview/node-webshot/lib/webshot.phantom.js {
root 18725 0.0 0.8 2037500 35680 ? Sl 08:09 0:00 phantomjs /home/ogdi/public_html/preview/productpreview/node-webshot/lib/webshot.phantom.js {
root 7577 0.0 0.8 2037460 35676 ? Sl 01:46 0:01 phantomjs /home/ogdi/public_html/preview/productpreview/node-webshot/lib/webshot.phantom.js {
It's telling me the phantom js script is still active since yesterday. It appears several more times. Why is that if I process.exit() successfully?
EDIT:
I manually killed a bunch of those phantom js commands. I diagnosed one a little more. and it says STAT Sl which means it's Sleep, multi thread. according to what I read.
Webshot link: https://raw.githubusercontent.com/brenden/node-webshot/master/lib/webshot.js
Webshot phantom link: https://raw.githubusercontent.com/brenden/node-webshot/master/lib/webshot.phantom.js
process.exit() is not called if there is an error. In the event of an error, is there any point in the process continuing?
webshot('example.com/preview/productpreview/testy.html?prod=' + prodPlate, '../screenshot/' + pID +'.png', options, function(err) {
if (err) { console.log(err) }
process.exit(err)
});
Related
Some code changes I made to our test server are not taking effect. I am trying to make sure the node server was properly restarted after the change. Following are the processes with name "node" showing as running -
[root#ip-10-30-30-4 lib]# ps -aux | grep node
root 18643 0.0 0.7 916304 26544 ? Ssl 17:22 0:00 /usr/bin/node /usr/lib/node_modules/forever/bin/monitor app.js
root 21479 0.0 1.7 980380 62528 ? Sl 19:31 0:00 /root/.nvm/v7.2.1/bin/node /usr/local/mvc/MVC2.0/app.js
root 21491 0.0 2.1 972432 78220 ? Sl 19:31 0:00 /usr/bin/node /usr/local/mvc/MVC2.0/app.js
root 21858 0.0 0.0 112652 960 pts/1 S+ 19:48 0:00 grep --color=auto node
root 22515 0.0 0.8 920548 31008 ? Ssl Nov03 0:00 /root/.nvm/v7.2.1/bin/node /usr/lib/node_modules/forever/bin/monitor app.js
I tried killing the node processes running app.js (with IDs 21479 and 21491), excluding the PIDs 18643 and 22515. It seems to be monitoring script which restarts the server as soon as it stops. Here are its contents -
var fs = require('fs'),
path = require('path'),
forever = require(path.resolve(__dirname, '..', 'lib', 'forever')),
started;
//
// ### #function (file, pid)
// #### #file {string} Location of the pid file.
// #### #pid {number} pid to write to disk.
// Write the pidFile to disk for later use
//
function writePid(file, pid) {
fs.writeFileSync(file, pid, 'utf8');
}
//
// ### #function start (options)
// #### #options {Object} Options for the `forever.Monitor` instance.
// Starts the child process and disconnects from the IPC channel.
//
function start(options) {
var script = process.argv[2],
monitor = new forever.Monitor(script, options);
forever.logEvents(monitor);
monitor.start();
monitor.on('start', function () {
//
// This starts an nssocket server, which the forever CLI uses to
// communicate with this monitor process after it's detached.
// Without this, `forever list` won't show the process, even though it
// would still be running in the background unaffected.
//
forever.startServer(monitor);
//
// Disconnect the IPC channel, letting this monitor's parent process know
// that the child has started successfully.
//
process.disconnect();
//
As soon as I kill the process, with the following command -
kill <PID>
I still see the same processes with new process IDs, which confirms that the processes were restarted. However, the changes are still not reflecting. Should I kill the ones with PIDs 18643 and 22515 as well? I am not sure how was it run actually.
Im looking to use ngrok on port 4000 and which is a command that will export a Forwarding URL. Every time this runs theres a new randomly generated URL.
I would like to pass that url http://2e1v870f.ngrok.io to a node process.env variable, rather then hard-coding it evey time.
For example in bash:
ngrok http 4000 | <process/define_something> | FORWARDING={something} node index.js
Plus these are in two running processes I've used npm-run-all to do something like this. https://www.npmjs.com/package/npm-run-all
ngrok by #inconshreveable (Ctrl+C to quit)
Session Status online
Version 2.2.8
Region United States (us)
Web Interface http://127.0.0.1:4041
Forwarding http://2e1v870f.ngrok.io -> localhost:4000
Forwarding https://2e1v870f.ngrok.io -> localhost:4000
Connections ttl opn rt1 rt5 p50 p90
0 0 0.00 0.00 0.00 0.00
I've turned to using the node wrapper for ngrok as I couldn't access the output from bash. Here's an example start.js:
if (!process.env.ROOT_URL) {
var ngrok = require('ngrok');
var shell = require('shelljs');
ngrok.connect(3000, function(err, url) {
shell.exec('ROOT_URL=' + url + ' meteor --settings settings.json', function(code, stdout, stderr) {
console.log('Exit code:', code);
console.log('Program output:', stdout);
console.log('Program stderr:', stderr);
});
});
}
I'm using Gulp to generate CSS from LESS-CSS on file save. I want the css file to be uploaded to the server immediately, so I'm experimenting with Vinyl-FTP. I'm a newbie at NPM/NodeJS/Gulp/JavaScript, so I need some help.
In my gulpfile.js I have included this code (hiding of course host, user and password):
// Vinyl FTP
gulp.task( 'deploy', function () {
var conn = ftp.create( {
host: 'ftp-server',
user: 'user',
password: 'password',
parallel: 10,
log: gutil.log
} );
var globs = [
'../site/templates/templatename/css/bootstrap.min.css'
];
return gulp.src( globs, { base: '.', buffer: false } )
.pipe( conn.newer( '/public_html/dev2/templates/templatename/css' ) )
.pipe( conn.dest( '/public_html/dev2/templates/templatename/css' ) );
} );
I want the bootstrap.min.css file uploaded each time I hit 'save'. The file is located at ../site/templates/templatename/css/bootstrap.min.css relative to my gulp directory. I want it uploaded to my development site which is located at /public_html/dev2/templates/templatename/css on the server (yes, this is Joomla).
Apparently, I'm using the wrong path, because this is what it churns out:
[14:44:21] Using gulpfile /mnt/e/Sites/successfulspeakernow.com/gulp/gulpfile.js
[14:44:21] Starting 'less'...
[14:44:21] Finished 'less' after 20 ms
[14:44:21] Starting 'watch'...
[14:44:21] Finished 'watch' after 267 ms
[14:44:21] Starting 'deploy'...
[14:44:21] CONN
[14:44:23] READY
[14:44:23] MLSD /public_html/dev2/templates/templatename/site/templates/templatename/css
[14:44:23] MLSD /public_html/dev2/templates/templatename/site/templates/templatename
[14:44:23] MLSD /public_html/dev2/templates/templatename/site/templates
[14:44:23] MLSD /public_html/dev2/templates/templatename/site
[14:44:23] MLSD /public_html/dev2/templates/templatename
[14:44:23] MLSD /public_html/dev2/templates
[14:44:23] MKDIR /public_html/dev2/templates/templatename/site
[14:44:23] MKDIR /public_html/dev2/templates/templatename/site/templates
[14:44:23] MKDIR /public_html/dev2/templates/templatename/site/templates/templatename
[14:44:23] MKDIR /public_html/dev2/templates/templatename/site/templates/templatename/css
[14:44:23] PUT /public_html/dev2/templates/templatename/site/templates/templatename/css/bootstrap.min.css
[14:44:23] UP 37% /public_html/dev2/templates/templatename/site/templates/templatename/css/bootstrap.min.css
[14:44:23] UP 74% /public_html/dev2/templates/templatename/site/templates/templatename/css/bootstrap.min.css
[14:44:23] UP 100% /public_html/dev2/templates/templatename/site/templates/templatename/css/bootstrap.min.css
[14:44:23] Finished 'deploy' after 1.86 s
[14:44:23] Starting 'default'...
[14:44:23] Finished 'default' after 8.9 μs
[14:44:23] DISC
and when I go there with my FTP program, I find this:
/public_html/dev2/templates/templatename/site/templates/templatename/css/bootstrap.min.css
Can you explain what to adjust so the bootstrap.min.css file gets uploaded to the right directory on the server?
Thanx,
Thom
I had the same problem. Vinylftp creates source folder structure in destination folder. To avoid that problem, just change destination to root of your webserver.
My code is little bit different, but it shows where is the problem is.
var gulp = require('gulp');
var gutil = require( 'gulp-util' );
var ftp = require( 'vinyl-ftp' );
/** Configuration **/
var user = 'username';
var password = 'password';
var host = 'hostname';
var port = 21;
var localFilesGlob = ['public_html/templates/protostar/css/template.css'];
var remoteFolder = ''; // <- HERE MUST BE AN EMPTY STRING TO POINT THE VINYLFTP TO ROOT
function getFtpConnection() {
return ftp.create({
host: host,
port: port,
user: user,
password: password,
parallel: 5,
log: gutil.log
});
}
gulp.task('ftp-deploy', function() {
var conn = getFtpConnection();
return gulp.src(localFilesGlob, { base: '.', buffer: false })
.pipe( conn.newer( remoteFolder ) ) // only upload newer files
.pipe( conn.dest( remoteFolder ) )
;
});
I wrote http server in node.js, which is a like reverse-proxy for Amazon S3, and deployed it in production environment with Node's Cluster module, nodejitsu/forever and Nginx.
It worked very good but one day (today) it stopped responding. I checked node's console.log() outputs and processes but I found nothing strange.
Gist of my code is like:
http.createServer(function(webReq, webRes) {
http.get(s3Options, function(s3Res) {
if (s3Res.statusCode == 200) {
s3Res.on('end', function() {
webRes.end('Found data on S3');
});
} else {
webRes.end('No data on S3');
}
}).on('error', function(e) {
console.log('problem with s3Req: ' + e.message);
});
}).listen(1337);
Node processes are all alive (2 child workers) without forever's restarting:
# ps x | grep node
31436 ? Ss 3:43 node /usr/bin/forever -l LOG -o OUT -e ERR -a start server.js
31437 ? Sl 0:10 node /root/server.js
31440 ? Sl 1:17 /usr/bin/nodejs /root/server.js
31441 ? Sl 1:17 /usr/bin/nodejs /root/server.js
Then I doubted too-many-connection stuffs and did "lsof -p PID | wc -l" but the counts were all in good conditions - only dozens.
My node.js experience is only a week or so. Did I miss something important?
No doubt I'm doing something stupid, but I've been having problems running a simple node.js app using the Nerve micro-framework. Testing with apachebench, it seems that the code within my single controller is being invoked more frequently than the app itself is being called.
I've created a test script like so:
'use strict';
(function () {
var path = require('path');
var sys = require('sys');
var nerve = require('/var/www/libraries/nerve/nerve');
var nerveCounter = 0;
r_server.on("error", function (err) {
console.log("Error " + err);
});
var app = [
["/", function(req, res) {
console.log("nc = " + ++nerveCounter);
}]
];
nerve.create(app).listen(80);
}());
Start the server. From another box, run a load test:
/usr/sbin/ab -n 5000 -c 50 http://<snip>.com/
...
Complete requests: 5000
...
Percentage of the requests served within a certain time (ms)
...
100% 268 (longest request)
But the node script itself is printing all the way up to:
nc = 5003
rc = 5003
In other words, the server is being called 5000 times but the controller code is being called 5003 times.
Any ideas what I'm doing wrong?
Updated
I changed the tone and content of this question significantly to reflect the help Colum, Alfred and GregInYEG gave me in realising that the problem did not lie with Redis or Nerve and probably lie with apachebench.
Program:
const PORT = 3000;
const HOST = 'localhost';
const express = require('express');
const app = module.exports = express.createServer();
const redis = require('redis');
const client = redis.createClient();
app.get('/incr', function(req, res) {
client.incr('counter', function(err, reply) {
res.send('incremented counter to:' + reply.toString() + '\n');
});
});
app.get('/reset', function(req, res) {
client.del('counter', function(err, reply) {
res.send('resetted counter\n');
});
});
app.get('/count', function(req, res) {
client.get('counter', function(err, reply) {
res.send('counter: ' + reply.toString() + '\n');
});
});
if (!module.parent) {
app.listen(PORT, HOST);
console.log("Express server listening on port %d", app.address().port);
}
Conclusion
It works without any flaws on my computer:
$ cat /etc/issue
Ubuntu 10.10 \n \l
$ uname -a
Linux alfred-laptop 2.6.35-24-generic #42-Ubuntu SMP Thu Dec 2 01:41:57 UTC 2010 i686 GNU/Linux
$ node -v
v0.2.6
$ npm install express hiredis redis
npm info build Success: redis#0.5.2
npm info build Success: express#1.0.3
npm info build Success: hiredis#0.1.6
$ ./redis-server --version
Redis server version 2.1.11 (00000000:0)
$ git clone -q git#gist.github.com:02a3f7e79220ea69c9e1.git gist-02a3f7e7; cd gist-02a3f7e7; node index.js
$ #from another tab
$ clear; curl http://localhost:3000/reset; ab -n 5000 -c 50 -q http://127.0.0.1:3000/incr > /dev/null; curl http://localhost:3000/count;
resetted counter
This is ApacheBench, Version 2.3 <$Revision: 655654 $>
Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
Licensed to The Apache Software Foundation, http://www.apache.org/
Benchmarking 127.0.0.1 (be patient)
Completed 500 requests
Completed 1000 requests
Completed 1500 requests
Completed 2000 requests
Completed 2500 requests
Completed 3000 requests
Completed 3500 requests
Completed 4000 requests
Completed 4500 requests
Completed 5000 requests
Finished 5000 requests
Server Software:
Server Hostname: 127.0.0.1
Server Port: 3000
Document Path: /incr
Document Length: 25 bytes
Concurrency Level: 50
Time taken for tests: 1.172 seconds
Complete requests: 5000
Failed requests: 4991
(Connect: 0, Receive: 0, Length: 4991, Exceptions: 0)
Write errors: 0
Total transferred: 743893 bytes
HTML transferred: 138893 bytes
Requests per second: 4264.61 [#/sec] (mean)
Time per request: 11.724 [ms] (mean)
Time per request: 0.234 [ms] (mean, across all concurrent requests)
Transfer rate: 619.61 [Kbytes/sec] received
Connection Times (ms)
min mean[+/-sd] median max
Connect: 0 0 0.5 0 7
Processing: 4 11 3.3 11 30
Waiting: 4 11 3.3 11 30
Total: 5 12 3.2 11 30
Percentage of the requests served within a certain time (ms)
50% 11
66% 13
75% 14
80% 14
90% 15
95% 17
98% 19
99% 24
100% 30 (longest request)
counter: 5000