No doubt I'm doing something stupid, but I've been having problems running a simple node.js app using the Nerve micro-framework. Testing with apachebench, it seems that the code within my single controller is being invoked more frequently than the app itself is being called.
I've created a test script like so:
'use strict';
(function () {
var path = require('path');
var sys = require('sys');
var nerve = require('/var/www/libraries/nerve/nerve');
var nerveCounter = 0;
r_server.on("error", function (err) {
console.log("Error " + err);
});
var app = [
["/", function(req, res) {
console.log("nc = " + ++nerveCounter);
}]
];
nerve.create(app).listen(80);
}());
Start the server. From another box, run a load test:
/usr/sbin/ab -n 5000 -c 50 http://<snip>.com/
...
Complete requests: 5000
...
Percentage of the requests served within a certain time (ms)
...
100% 268 (longest request)
But the node script itself is printing all the way up to:
nc = 5003
rc = 5003
In other words, the server is being called 5000 times but the controller code is being called 5003 times.
Any ideas what I'm doing wrong?
Updated
I changed the tone and content of this question significantly to reflect the help Colum, Alfred and GregInYEG gave me in realising that the problem did not lie with Redis or Nerve and probably lie with apachebench.
Program:
const PORT = 3000;
const HOST = 'localhost';
const express = require('express');
const app = module.exports = express.createServer();
const redis = require('redis');
const client = redis.createClient();
app.get('/incr', function(req, res) {
client.incr('counter', function(err, reply) {
res.send('incremented counter to:' + reply.toString() + '\n');
});
});
app.get('/reset', function(req, res) {
client.del('counter', function(err, reply) {
res.send('resetted counter\n');
});
});
app.get('/count', function(req, res) {
client.get('counter', function(err, reply) {
res.send('counter: ' + reply.toString() + '\n');
});
});
if (!module.parent) {
app.listen(PORT, HOST);
console.log("Express server listening on port %d", app.address().port);
}
Conclusion
It works without any flaws on my computer:
$ cat /etc/issue
Ubuntu 10.10 \n \l
$ uname -a
Linux alfred-laptop 2.6.35-24-generic #42-Ubuntu SMP Thu Dec 2 01:41:57 UTC 2010 i686 GNU/Linux
$ node -v
v0.2.6
$ npm install express hiredis redis
npm info build Success: redis#0.5.2
npm info build Success: express#1.0.3
npm info build Success: hiredis#0.1.6
$ ./redis-server --version
Redis server version 2.1.11 (00000000:0)
$ git clone -q git#gist.github.com:02a3f7e79220ea69c9e1.git gist-02a3f7e7; cd gist-02a3f7e7; node index.js
$ #from another tab
$ clear; curl http://localhost:3000/reset; ab -n 5000 -c 50 -q http://127.0.0.1:3000/incr > /dev/null; curl http://localhost:3000/count;
resetted counter
This is ApacheBench, Version 2.3 <$Revision: 655654 $>
Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
Licensed to The Apache Software Foundation, http://www.apache.org/
Benchmarking 127.0.0.1 (be patient)
Completed 500 requests
Completed 1000 requests
Completed 1500 requests
Completed 2000 requests
Completed 2500 requests
Completed 3000 requests
Completed 3500 requests
Completed 4000 requests
Completed 4500 requests
Completed 5000 requests
Finished 5000 requests
Server Software:
Server Hostname: 127.0.0.1
Server Port: 3000
Document Path: /incr
Document Length: 25 bytes
Concurrency Level: 50
Time taken for tests: 1.172 seconds
Complete requests: 5000
Failed requests: 4991
(Connect: 0, Receive: 0, Length: 4991, Exceptions: 0)
Write errors: 0
Total transferred: 743893 bytes
HTML transferred: 138893 bytes
Requests per second: 4264.61 [#/sec] (mean)
Time per request: 11.724 [ms] (mean)
Time per request: 0.234 [ms] (mean, across all concurrent requests)
Transfer rate: 619.61 [Kbytes/sec] received
Connection Times (ms)
min mean[+/-sd] median max
Connect: 0 0 0.5 0 7
Processing: 4 11 3.3 11 30
Waiting: 4 11 3.3 11 30
Total: 5 12 3.2 11 30
Percentage of the requests served within a certain time (ms)
50% 11
66% 13
75% 14
80% 14
90% 15
95% 17
98% 19
99% 24
100% 30 (longest request)
counter: 5000
Related
I'm writing a script to scan the /config route on some ports on a host.
I have first written the script in node.js and am now porting it to bash to achieve less dependencies.
Why is the bash script more than 300 times slower when scanning localhost. What am I missing?
I guess there are some optimizations built in node-fetch. How can I achieve the same in bash?
node.js: 10 Ports -> 79ms
bash(0.0.0.0): 10 Ports -> 2149ms
bash(localhost): 10 Ports -> 25156ms
I found out that in bash when using 0.0.0.0 instead of localhost, it is only 27 times slower, but still... (In node.js, using 0.0.0.0 does not make a significant difference.)
node.js (IFFE omitted for readability)
import fetch from 'node-fetch';
for (let port = portFrom; port <= portTo; port++) {
try {
const res = await fetch("http://localhost" + port + '/config');
const json = await res.json();
console.log(json);
} catch {/*no-op*/}
}
bash
for ((port=$port_from; port<=$port_to; port++))
do
json="$(curl -s http://localhost:$port/config)"
echo "$json"
done
I have a express app which runs on local EC2 instance. When I run my express app in the localhost I am able to get CPU % utilization with the following code -
import os from "os";
import { memoryUsage } from "process";
import osu from "node-os-utils";
import { usageMetrics } from "../utils/usagemetric";
import * as osUtil from "os-utils";
// in the controller
const cpu = osu.cpu;
usageMetrics(cpu, process, os, osUtil);
export const usageMetrics = (cpu: any, process: any, os: any, osUtil: any) => {
const totalMemory = os.totalmem();
const rss = process.memoryUsage().rss;
const totalUnusedMemory = totalMemory - rss;
const percentageMemoryUnUsed = totalUnusedMemory / totalMemory;
console.log("system memory", totalMemory);
console.log("node process memory usage", rss);
console.log("Memory consumed in %:", 100 - percentageMemoryUnUsed * 100);
cpu.usage().then((info) => {
console.log("Node-OS-utils-CPU Usage(%):", info);
});
osUtil.cpuUsage(function (v) {
console.log(" OS-Util-CPU-Usage(%): " + v);
});
os.cpuUsage(function (v) {
console.log("native OS-CPU Usage(%):", +v);
});
};
The above code work well in the localhost giving value in 43,15,20,etc in %. But when I run in the EC2 instance it is showing me 1% or 2% sometimes. The real question is both the libraries os-utils and node-os-utils are giving 0 % as CPU utilization. Any help on how do I get actual CPU utilization with help of libraries or any native NodeJS methods?
While Running on EC2
While running on localhost
I have encountered peculiar slowness on mac, when using node-postgres or deno-postgres. I have a very simple table with two columns, and when I execute query select * from table it happens very very slowly. I have also tried selecting directly with SQL client and it is very fast.
So to be precise - the table has 60 entries. two columns. on the remote postgres server (12.2)
I have the following three scripts.
#node v13.12.0
const { Client } = require('pg')
const client = new Client({
user: 'u',
host: 'address',
database: 'db',
password: 'pw',
port: 5432,
})
client.connect()
const start = Date.now();
client.query('SELECT * from unit', (err, res) => {
const ms = Date.now() - start;
console.log(`db call ${ms}`);
console.log(res.rows.length);
client.end()
})
#deno 1.1.2
#v8 8.5.216
#typescript 3.9.2
import { Client } from "https://deno.land/x/postgres#v0.4.2/mod.ts";
const client = new Client({
user: "u",
database: "db",
hostname: "addr",
password: "pw",
port: 5432,
});
await client.connect();
const start = Date.now();
const dataset = await client.query("SELECT * FROM unit");
const ms = Date.now() - start;
console.log(`db call ${ms}`);
console.log(dataset.rowsOfObjects().length)
#python 3.7.7
import psycopg2
from datetime import datetime
#try:
connection = psycopg2.connect(user = "u",
password = "p",
host = "addr",
port = "5432",
database = "db")
cursor = connection.cursor()
a = datetime.now()
cursor.execute("select * from unit");
records = cursor.fetchall()
b = datetime.now()
c = b - a
print(len(records))
print(c.total_seconds() * 1000)
and when I execute all three scripts on my macos (10.15.5) I get the following results:
"select * from unit" (60 records)
node ~16'000ms
deno ~16'000ms
python ~240ms
when I execute "select * from unit limit 5"
node ~480ms
deno ~110ms
python ~220ms
when I execute "select * from unit" on the same ubuntu server where postgres is installed then all 3 scripts execute in around 10ms.
I have enabled timing and full logging in the postgres server, and I see that I can see that queries in all the above situations have executed in below one milisecond, around ~0.600ms
At this point, I have feeling that fault lies into intersection of node/deno and my macos, which could probably be v8. or something else that deno and node share.
So, what could it be?
p.s I also tried node profiler and I see this:
[Summary]:
ticks total nonlib name
0 0.0% 0.0% JavaScript
116 84.7% 99.1% C++
22 16.1% 18.8% GC
20 14.6% Shared libraries
1 0.7% Unaccounted
[C++ entry points]:
ticks cpp total name
45 54.9% 32.8% T __ZN2v88internal32Builtin_DatePrototypeSetUTCHoursEiPmPNS0_7IsolateE
36 43.9% 26.3% T __ZN2v88internal21Builtin_HandleApiCallEiPmPNS0_7IsolateE
1 1.2% 0.7% T __ZN2v88internal23Builtin_DateConstructorEiPmPNS0_7IsolateE
but I have no idea what that might mean.
ok, I finally figured it out.
As nothing was working I decided to move my API to the remote server instead of running it locally, started it up, and was pleased to see instant communication between API and database... only to see exactly the same slowness on the frontend running on my machine.
And this is when it dawned on me - this is some sort of traffic shaping from my internet provider. I turned on VPN and everything started working as expected immediately.
No wonder I couldn't understand why it was getting stuck. The issue was way down the stack, this will be a lesson for me - always have to think outside the box that is a computer itself.
this explains why it was sometimes working normally. However, it doesn't explain why this issue never affected python script - maybe it was communicating with the Postgres server in a little bit different manner that didn't trigger the provider's filter. Who knows.
I'm trying to start a web server and test the http request/response in one node.js file.
The server is running and I can open it in my browser with ip 'http://127.0.0.1:8080'.
Also I can run 'curl http://127.0.0.1:8080' in the terminal and get response.
However, when I tried to run my js script, the output showed the connection was denied. Why is it happen and how can I resolve this issue?
const { exec } = require('child_process');
const testDirectory = 'testDirectory';
const demoDirectory = 'packages/my-react-component';
console.log('start server');
yarnRun = exec('yarn run start:demo', {
cwd: process.cwd().toString() + '/' + testDirectory + '/' + demoDirectory + '/',
});
process.stdin.resume(); // I want my server keep alive
const localhost = 'http://127.0.0.1:8080';
getRequestTest = exec('curl ' + localhost);
getRequestTest.stdout.on('data', function(data)){
console.log('stdout: ', data.toString())
}
getRequestTest.stderr.on('data', function(data)){
console.log('stderr: ', data.toString())
}
The output from the curl execution in the js file is:
Failed to connect to 127.0.0.1 port 8080: Connection refused
This is the output from 'curl http://127.0.0.1:8080 -v'
stderr: * Rebuilt URL to: http://127.0.0.1:8080/
stderr: % Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
stderr: * Trying 127.0.0.1...
* TCP_NODELAY set
stderr: * Connection failed
* connect to 127.0.0.1 port 8080 failed: Connection refused
* Failed to connect to 127.0.0.1 port 8080: Connection refused
* Closing connection 0
curl: (7) Failed to connect to 127.0.0.1 port 8080: Connection refused
Try with 0.0.0.0 insted of 127.0.0.1 This is work for both either localhost and by ip also.
I found the problem is when I started the server in my code, it immediately executed 'curl'. In the meantime the server had not been started yet. Therefore I got such connection denied error.
I tried to setTimeout on 'curl' for 5 seconds and the cmd successfully gave me the http response.
However I think the code looks ugly. Is there a better way to write?
startServer = exec('node ' + process.cwd().toString() + '/script/startServer.js');
const localhost = 'http://127.0.0.1:8080';
console.log('localhost: ' + localhost);
function testRequest() {
console.log('run curl ' + localhost + ' -v');
getRequestTest = exec('curl ' + localhost + ' -v');
getRequestTest.stdout.on('data', function(data) {
console.log('stdout: ' + data.toString());
});
getRequestTest.stderr.on('data', function(data) {
console.log('stderr: ' + data.toString());
});
}
setTimeout(testRequest, 5000);
Im looking to use ngrok on port 4000 and which is a command that will export a Forwarding URL. Every time this runs theres a new randomly generated URL.
I would like to pass that url http://2e1v870f.ngrok.io to a node process.env variable, rather then hard-coding it evey time.
For example in bash:
ngrok http 4000 | <process/define_something> | FORWARDING={something} node index.js
Plus these are in two running processes I've used npm-run-all to do something like this. https://www.npmjs.com/package/npm-run-all
ngrok by #inconshreveable (Ctrl+C to quit)
Session Status online
Version 2.2.8
Region United States (us)
Web Interface http://127.0.0.1:4041
Forwarding http://2e1v870f.ngrok.io -> localhost:4000
Forwarding https://2e1v870f.ngrok.io -> localhost:4000
Connections ttl opn rt1 rt5 p50 p90
0 0 0.00 0.00 0.00 0.00
I've turned to using the node wrapper for ngrok as I couldn't access the output from bash. Here's an example start.js:
if (!process.env.ROOT_URL) {
var ngrok = require('ngrok');
var shell = require('shelljs');
ngrok.connect(3000, function(err, url) {
shell.exec('ROOT_URL=' + url + ' meteor --settings settings.json', function(code, stdout, stderr) {
console.log('Exit code:', code);
console.log('Program output:', stdout);
console.log('Program stderr:', stderr);
});
});
}