I want to test how many connections my redis server can hold, so I call redis.createClient() in a loop, while the redis server still runs lively, I got the EMFILE error, I know that I have used out my fds.
but wait, I have just test my mqtt server before, I did the same thing to my mqtt server,
I called mqtt.createClient() in a loop of 10000, of 20000... but I never got the EMFILE error .
so, does the nodejs mqtt library use a different mechanism underneath?
redis-client.js :
var redis = require('redis');
function start() {
var client = redis.createClient();
client.on('error', function(err) {
console.log('Error ' + err);
});
}
exports.start = start;
redis-test.js
var redis_client = require('./redis-client');
for(var i = 0 ; i < 10000 ; ++i) {
redis_client.start();
console.log('redis client ' + i + ' started');
}
mqtt-subclient.js
var mqtt = require('mqtt');
function start() {
var client = mqtt.createClient();
client.subscribe('message');
//client.publish('message', 'hello me!');
client.on('message', function(topic, message) {
console.log('receive message: ');
console.log(message);
});
client.on('connack', function(packet) {
console.log(packet);
if(packet.returnCode == 0) {
console.log('connect successfully');
}
});
client.on('suback', function(packet) {
console.log(packet.messageId);
});
client.on('error', function(err) {
console.log(err);
});
}
exports.start = start;
mqtt-test.js
var subclient = require('./mqtt-subclient.js');
for(var i = 0 ; i < 10000 ; ++i) {
subclient.start();
console.log('client ' + i + ' started');
}
Redis cannot accept more than x simultaneous connection attempts, where x is the backlog parameter of the listen system call.
It is limited by the minimum between the somaxconn kernel parameter (128 is a common default value), and 512. So if you attempt more than min(somaxconn,512) simultaneous connections, you can have errors. If you add a small delay between your connection attempts, it should fix this problem.
Then, you need to check that you have enough resources to open 10000 file descriptors (check the output of ulimit -a), and that your TCP/IP ephemeral port range is big enough to accomodate such a number of client connections.
Related
I trying to create a socket server over the net-module in node.js. There should be different clients connect to it and be able to send/receive commands which will be evaluated on the server in the future.
And also clients should be able to ask for files which the server will send to them.
So, for example, one client is sending the command "file picture.jpg" and then the server will send the content of file picture.jpg through the socket and the client will download it to download.jpg.
This works fine for files <=2kB but not above. It seems like the client gets the files split up in different packages. Which would be fine if I only send files through sockets (So I would write every "package" into the file), but the problem is if I send big files which takes minutes and in that time the server sends other commands (which cant wait for the file transfer to complete).
Here is my simplified code:
server.js
var net = require('net');
var fs = require('fs');
var HOST = '127.0.0.1';
var PORT = 6969;
// All connected socket clients will be in this list:
var client = [];
const server = net.createServer(function(sock) {
// We have a connection - a socket object is assigned to the connection automatically
console.log('Client connected: ' + sock.remoteAddress + ':' + sock.remotePort);
// Put this new client in the list
client.push(sock);
sock.on('data', function(data) {
const dataStr = data.toString();
const cmd = dataStr.split(' ', 1)[0];
const value = dataStr.slice(cmd.length).trim();
if (cmd == "file") {
fs.readFile(value, 'utf8', function(err, contents) {
if (err) {
if (err.code == 'ENOENT') {
sock.write('File not exist: '+value); // Send error to client
} else {
console.error(err);
}
} else {
sock.write('file '+contents); // Send file to client
}
});
console.log("Sending file to client.");
} else if (cmd == "quit") {
sock.write('Bye!');
sock.destroy();
} else {
sock.write('Unknown command: "' + dataStr + '"');
}
});
sock.on('error', (e) => {
if(e.code == "ECONNRESET") {
console.log("Got ECONNRESET, continue!");
} else {
console.log(e);
}
});
sock.on('close', function(data) {
var clientId = client.indexOf(sock);
client.splice(clientId, 1);
console.log('Client closed connection: ' + sock.remoteAddress +':'+ sock.remotePort);
});
});
server.on('error', function (e) {
if (e.code == 'EADDRINUSE') {
console.log('Creating server failed: Address in use ' + host +':'+ port);
}
});
server.listen(PORT, HOST, () => {
console.log('Server listening on ' + HOST +':'+ PORT);
});
client.js
var net = require('net');
var fs = require('fs');
var HOST = '127.0.0.1';
var PORT = 6969;
var client = new net.Socket();
process.stdin.setEncoding('utf8');
var stdin = process.openStdin();
client.on('error', function (e) {
console.log("ERROR: "+e.code);
process.exit(1);
});
client.connect(PORT, HOST, function() {
console.log('Connected to server: ' + HOST + ':' + PORT);
process.stdout.write('> ');
});
stdin.addListener("data", function(d) {
var inp = d.toString().trim(); // clean input functions here
if (inp === "exit") {
//client.write("goodbye");
client.destroy();
stdin.destroy();
} else {
client.write(inp);
}
});
client.on('data', function(data) {
if (data.toString().substr(0, 5) === 'file ') { // If receiving file from server, data starts with "file "
const filename = "downloaded.jpg";
fs.writeFile(filename, data.toString().substr(5), function(err) {
if(err) {
return console.log(err);
}
console.log("Saved "+filename+".");
process.stdout.write('> ');
});
} else { // Unhandeld commands will be printed on console:
console.log('DATA: ' + data);
process.stdout.write('> ');
}
});
client.on('close', function() {
console.log('Connection closed');
process.exit();
});
So any suggestions how I solve this the best? Also, can I simply expand the buffersize somehow in net.sockets to like 32MB?
Since TCP is emulating a stream, you don't want to rely on anything about how the stream is broken into separate data events. The data given to one callback could be the first half of something or 2 things.
Instead, you want to emulate your datagram protocol on top of this reliable stream by appending stream contents to the end of a buffer and removing complete messages from the front for processing.
For example, this simple server from the docs, demonstrates a minimal valid TCP server:
const net = require('net');
const server = net.createServer((socket) => {
let name = '';
socket.setEncoding('utf8');
socket.on('data', (chunk) => name += chunk);
socket.on('end', () => socket.end(`hello ${name}`));
});
server.listen(8000);
It assembles a buffer with no assumption about the number of data call(s) in its simple case, the buffer is a single message to use at the end event.
To process messages before the end of the connection, you also want to examine the front of the buffer at the end of every data event to look if some messages are complete and ready to process. This separation of complete messages needs to be part of your protocol.
While message separation can be done by length indicators or reserved sequences, reserved sequences require encoding files (to avoid accidentally seeing them in data) and scanning data continuously to find them. This makes length indicators preferable for dealing with the file data.
So for example, the file [data] response first becomes file [#####] [data] where ##### tells you how much data to keep assembling on the buffer before a data callback will remove this entire message from the front of the buffer for processing as a fileSave().
Then, to handle more granular interactivity, simply break up these operations into separate smaller interactions, for example replace file [wholefilecount] [data] responses with filechunk [0-maxchunk] [data] responses that require a filenext command to continue and send a fileend to tell the client the file is done.
The code is attached below. my system hangs. is this the right way to create those many channels?
i wanted to compare the cpu usage of rabbitMQ and redis pub/sub for 100k messages and i am running the rabbitMQ server on windows.
var amqp = require("amqplib/callback_api");
var time = 0;
var limit = 100000;
amqp.connect("amqp://localhost",function(err,conn){
if(err){
console.log('Connection closed-----------error connect');
return;
}
var timer = setInterval(() => {
time+=1;
if(time>=limit){
clearInterval(timer);
}
conn.createChannel(function(err,ch){
if(err){
console.log('Connection closed-----------error createChannel');
return;
}
var q = "queue_name"+time.toString();
// console.log(q);
var msg = "this is the message string!!!";
ch.assertQueue(q,{durable: false});
ch.sendToQueue(q,new Buffer(msg),{persistent: false});
// console.log("time = "+time);
});
}, 10);
});
Trying to write a TCP client in Node v0.10.15 and I am having a little trouble getting data back from the server. I know that the server is working properly because I have 3-4 different clients written in different languages communicating with it.
Below is a snippet of a larger piece of code but this should get the point across.
The problem is: I'm expecting 2 packets coming back after writing to the socket (this part is not included in this example). I'm only seeing the "data" event being fired once. Is there something that I need to do to get node to resume reading from the Tcp stream? I can confirm that the server is sending 2 packets(The length and then the actual data) Any help would be appreciated.
var dc = require('./DataContracts.js');
var net = require('net');
require('buffertools').extend();
var client = net.Socket();
var isConnected = false;
var serverHost = '10.2.2.21';
var dataCallback;
var receivedBuffer = new Array();
function InitComm(buffer) {
if (!isConnected) {
client.connect(4987, serverHost, function() {
client.on('data', function(data) {
console.log('Received server packet...');
var buf = new Buffer(data);
receivedBuffer.push(buf);
client.resume();
});
client.on('end', function() {
if (receivedBuffer.length > 1) {
if (dataCallback !== undefined)
dataCallback(receivedBuffer);
}
});
client.on('close', function() {
//clean up
});
client.on('error', function(err) {
console.log('Error!: ' + err);
});
Communicate(buffer);
});
} else {
Communicate(buffer);
}
}
Turns out that node was combining both of the packets together. I must be missing a Carriage return on the first packet
I have the following situation and just wanted to check if I am doing it right. I have a couple of devices at my customers (RS232). Now I have a RS232-WIFI dongle connected with it, so data out of the device is sent over to my server (it outputs a datastring, example: {12,1,etc). On my server I have a NodeJS script running, that opens up a port and fetches all data coming in.
var net = require('net');
var host = '1.1.1.1';
var servers = [];
var ports = [20000, 20001, 20002, 20003, 20004];
// Create servers
ports.forEach(function (port) {
var s = net.createServer(function (sock) {
// We have a connection - a socket object is assigned to the connection automatically
console.log('CONNECTED (' + sock.localPort + '): ' + sock.remoteAddress + ':' + sock.remotePort);
// Add a 'data' event handler to this instance of socket
sock.on('data', function (data) {
// post data to a server so it can be saved and stuff
postData(data.toString(), sock);
// close connection
sock.end();
});
sock.on('error', function (error) {
console.log('******* ERROR ' + error + ' *******');
// close connection
sock.end();
});
});
s.listen(port, host, function () {
console.log('Server listening on ' + host + ':' + s.address().port);
});
servers.push(s);
});
Okay, so this works pretty good. But I am wondering, sometimes not all of the data is posted at once, sometimes I get {12, and after a second I get the rest (or even more times is needed). What can I do to optimize this script further? Do I need to call sock.end(); after receiving data? Does this hurt network performance for me or my customers?
If you guys need more info let me know.
That depends on the protocol of your devices, if the devices use each connection for a chunk of data, you can write the program like so: (Do not close the socket on data event)
....
// socket will close and destroy automatically after the device close the connection
var s = net.createServer(function (sock) {
sock.setEncoding('utf8');
var body = "";
sock.on('data', function (data) {
body = body + data;
});
sock.on('end', function() {
console.log(data);
postData(data);
});
// TODO error handling here
});
....
Note: Socket is not guaranteed to give you all data at once, you should listen data event then concat all chunks before using.
If your devices don't close socket, you will not receive on('end'), then the code should be like this:
....
var s = net.createServer(function (sock) {
sock.setEncoding('utf8');
// var body = "";
sock.on('data', function (data) {
// body = body + data;
postData(data);
});
sock.on('end', function() {
console.log('end');
});
// TODO error handling here
});
....
I would like to know if there's a way to scan through my local network's IP range for open ports of a specific number.
Essentially I'm looking for nodejs to find clients of a specific type without knowing their IP addresses. In this case, RFID readers which listen on port 14150.
I'd like this scan to be quick, so I don't want a long timeout between each IP address. They should all happen rather quickly, perhaps within a few seconds max for an entire local IP range of up to 255 clients, excluding my own IP.
I wrote code that does what I want but it's painfully slow... I would like to see how I can make this faster by blazing through the connections and getting out if a connection cannot be made to a given IP within 20ms. I want to capture the actual connections in an array that I can then use for another purpose.
var net = require('net'); // Required to create socket connections
var ip = 254; //IP address to start with on a C class network
function checkConnect () {
ip--;
var thisIP = '192.168.1.' + ip; //concatenate to a real IP address
var S = new net.Socket();
S.connect(80, thisIP);
if(ip > 0) { checkConnect(); }
S.on('connect', function () { console.log('port 80 found on ' + thisIP); });
S.on('error', function () { console.log('no such port on ' + thisIP); });
S.end();
}
checkConnect();
I've made it for you https://github.com/eviltik/evilscan. (just released v0.0.3 today)
Install:
npm install -g evilscan
Usage (port list + port range) :
root#debian:~# evilscan --target=192.168.0.0/24 --port=21-446,5900 --concurrency=100 --progress
192.168.0.3:5900|open
192.168.0.26:53|open
192.168.0.26:111|open
192.168.0.26:81|open
192.168.0.26:23|open
Scanned 192.168.0.253:446 (100%)
Tips :
For very fast scanning, you can play with "concurrency" parameter, more than 1000, but you have to update ulimit parameter of your linux first :
ulimit -u unlimited
Hope this help.
None of the previous answers really worked how I needed. I found a much lighter weight alternative. With this solution I get my solution quickly. My next upgrade will be to specify a range of hosts based on the current subnet. I imagine I'll want to limit this to the first 254 clients so it's not overkill. Here is the code:
//LLRP DEVICE SCANNER
var net = require('net'), Socket = net.Socket;
var checkPort = function(port, host, callback) {
var socket = new Socket(), status = null;
// Socket connection established, port is open
socket.on('connect', function() {status = 'open';socket.end();});
socket.setTimeout(1500);// If no response, assume port is not listening
socket.on('timeout', function() {status = 'closed';socket.destroy();});
socket.on('error', function(exception) {status = 'closed';});
socket.on('close', function(exception) {callback(null, status,host,port);});
socket.connect(port, host);
}
var LAN = '192.168.1'; //Local area network to scan (this is rough)
var LLRP = 5084; //globally recognized LLRP port for RFID readers
//scan over a range of IP addresses and execute a function each time the LLRP port is shown to be open.
for(var i=1; i <=255; i++){
checkPort(LLRP, LAN+'.'+i, function(error, status, host, port){
if(status == "open"){
console.log("Reader found: ", host, port, status);
}
});
}
You can use arp command to get a list of devices that are alive first. Think outside the box ;) You don't have to scan all the devices blindly.
var child = require("child_process");
var async = require("async");
var net = require("net");
var os = require("os");
function scan(port, cb){
var hosts = {};
var result = [];
async.series([
function scan(next, c){
if(c == 1){
next(); return;
}
// scan twice because arp sometimes does not list all hosts on first time
child.exec("arp -n | awk '{print $1}' | tail -n+2", function(err, res){
if(err) cb(err);
else {
var list = res.split("\n").filter(function(x){return x !== "";});
list.map(function(x){
hosts[x] = x;
});
}
scan(next, 1);
});
},
function(next){
// if you want to scan local addresses as well
var ifs = os.networkInterfaces();
Object.keys(ifs).map(function(x){
hosts[((ifs[x][0])||{}).address] = true;
});
// do the scan
async.each(Object.keys(hosts), function(x, next){
var s = new net.Socket();
s.setTimeout(1500, function(){s.destroy(); next();});
s.on("error", function(){
s.destroy();
next();
});
s.connect(port, x, function(){
result.push(x);
s.destroy();
next();
});
}, function(){
next();
});
}
], function(){
cb(null, result);
});
}
scan(80, function(err, hosts){
if(err){
console.error(err);
} else {
console.log("Found hosts: "+hosts);
}
});
You can also use arp-scan utility it is more reliable. But arp-scan needs root access to work, so it's better to just use arp. It's available on pretty much every linux box.
Instead of just posting the link (the link might go dead at one moment), i'll post the tutorial code here from the site:
var net = require('net');
// the machine to scan
var host = 'localhost';
// starting from port number
var start = 1;
// to port number
var end = 10000;
// sockets should timeout asap to ensure no resources are wasted
// but too low a timeout value increases the likelyhood of missing open sockets, so be careful
var timeout = 2000;
// the port scanning loop
while (start <= end) {
// it is always good to give meaningful names to your variables
// since the context is changing, we use `port` to refer to current port to scan
var port = start;
// we create an anonynous function, pass the current port, and operate on it
// the reason we encapsulate the socket creation process is because we want to preseve the value of `port` for the callbacks
(function(port) {
// console.log('CHECK: ' + port);
var s = new net.Socket();
s.setTimeout(timeout, function() { s.destroy(); });
s.connect(port, host, function() {
console.log('OPEN: ' + port);
// we don't destroy the socket cos we want to listen to data event
// the socket will self-destruct in 2 secs cos of the timeout we set, so no worries
});
// if any data is written to the client on connection, show it
s.on('data', function(data) {
console.log(port +': '+ data);
s.destroy();
});
s.on('error', function(e) {
// silently catch all errors - assume the port is closed
s.destroy();
});
})(port);
start++;
}