I am attempting to figure out how long it takes node js to send n number of packets. My ultimate goal is to find the transmission delay of the network. I did a recursive function so the next packet does not send until the previous one is complete. Here is my code so far. Am I doing it correctly?
var sizes=[200,400,600,800,1000];
// NOTE: the port is different
var host = "127.0.0.1", port = 33334;
var dgram = require( "dgram" );
var client = dgram.createSocket( "udp4" );
/*
client.on( "message", function( msg, rinfo ) {
var end = new Date().getTime();
var total = end - start;
console.log( "The packet came back in "+ total+" MiliSeconds");
});
*/
// client listens on a port as well in order to receive ping
client.bind( port, host );
var number_packets = 50;
var packet_size = 100;
var count = 0;
var end;
var start = new Date().getTime();
var message = new Buffer(packet_size);
send(message);
function send(message) {
client.send(message, 0, message.length, 33333, "127.0.0.1", function(err, bytes){
count++;
console.log("pkt #: "+count);
if(count < number_packets)
send(message);
if( count >= number_packets ) {
end = new Date().getTime();
var total = end - start;
console.log("total: "+total);
}
} );
}
There is a server.js that receives the packet and writes to console that there has been a packet received.
Related
I'm trying to make just a simple BitTorrent tracker for a school project. It's totally hacked together right now but I can't find where I'm going wrong. I'm wondering if I have a misunderstanding of what the server response should be. I am using node.js and express.
The server receives /GET requests with the ?info_hash data no problem. And i'm able to save that info into JSON files. The server is also able to respond to the clients using bencoding. The response is a dictionary which has an interval and a peers list. Inside the list is several dictionaries and each dictionary holds the ip and port of a peer.
Currently though none of the peers will connect to each other. I'll have my laptop on a separate network from my desktop and it will see the desktop as a potential peer, correct ip and port (as far as I know) but after a moment it drops off the peer list. I am using deluge and qBitTorrent on each client.
Here's the code for the app:
var express = require('express');
var app = express();
var fs = require("fs");
var contents = fs.readFileSync("data.json");
var data = JSON.parse(contents);
var findTorrent = function(data, hash) {
for(var i = 0; i < data.length; i++) {
if(data[i].info_hash === hash) {
return data[i];
}
}
return false;
}
var findID = function(data, qPort, qip) {
for(var i = 0; i < data.length; i++) {
//console.log(data[i].peer_id);
if(data[i].port == qPort && data[i].ip === qip) {
return true;
}
}
return false;
}
var findHash = function(data, id) {
for(var i = 0; i < data.length; i++) {
if(data[i].peer_id === id) {
return data[i];
}
}
return false;
}
function hashy (str) {
var url = str;
var hexval = '';
for(var i = 0; i < url.length; i++) {
if(url[i] !== '%') {
var code = url.charCodeAt(i);
var hex = code.toString(16);
hexval += hex;
} else {
hexval += url[i+1] + url[i+2];
i += 2;
}
}
return hexval;
}
app.get('/', function(req, res) {
console.log(req.query);
var info_hash = hashy(req.query.info_hash);
console.log(info_hash);
var peer_id = decodeURIComponent(req.query.peer_id);
var escaped = escape(req.query.peer_id);
console.log('escaped ' + escaped);
console.log('decoded ' + peer_id);
console.log('normal ' + req.query.peer_id);
var ip = req.connection.remoteAddress;
if(ip.substring(0,7) == '::ffff:') {
ip = ip.substring(7);
}
//var port = req.connection.remotePort;
var port = req.query.port;
console.log(ip);
var torrent = findTorrent(data, info_hash);
var completed;
if (torrent === false){
if(req.query.left === '0') {
completed = true;
} else {
completed = false;
}
var obj = { "info_hash" : info_hash, "peers" : [{ "peer_id" : peer_id, "ip" : ip, "port" : port, "completed" : completed }]};
data.push(obj);
torrent = obj;
//console.log(obj.peers);
}
else {
//figure out if completed
if(req.query.left == '0') {
completed = true;
} else {
completed = false;
}
var peer = findHash(torrent.peers, peer_id);
if(peer === false){
var obj = { "peer_id" : peer_id, "ip" : ip, "port" : port, "completed" : completed };
torrent.peers.push(obj);
}
else {
peer.ip = ip;
peer.port = port;
peer.completed = completed;
}
}
if(torrent) {
var response = bencode(torrent);
}
else {
response = 'error';
}
//console.log(data);
fs.writeFileSync("data.json", JSON.stringify(data, null, 2), 'utf-8');
res.send(response);
});
var bencode = function(torrent) {
var response = 'd8:intervali600e12:min intervali30e'
var complete = 0;
var incomplete = 0;
for(var i = 0; i < torrent.peers.length; i++) {
if(torrent.peers[i].completed === true) {
complete++;
} else {
incomplete++;
}
}
var response = response.concat('8:completei' + complete + 'e');
var response = response.concat('10:incompletei' + incomplete + 'e5:peersl');
for(var i = 0; i < torrent.peers.length; i++) {
response = response.concat('d');
response = response.concat('2:ip');
response = response.concat(torrent.peers[i].ip.length + ':');
response = response.concat(torrent.peers[i].ip);
//response = response.concat('7:peer id');
//response = response.concat(torrent.peers[i].peer_id.length + ':');
//response = response.concat(torrent.peers[i].peer_id);
response = response.concat('4:port');
response = response.concat('i' + torrent.peers[i].port + 'e');
response = response.concat('e');
}
response = response.concat('ee');
console.log(response);
return response;
}
app.listen(4000, function() {
console.log('Example app listening on port 4000!');
});
I'm able to connect to the tracker hosted on Amazon AWS and qBitTorrent reports it as "working". I also can see the GET request going out and the server response coming in via wireshark. The request has the following bencoded string which I believe is all that's necessary:
d8:intervali600e12:min intervali30e8:completei2e10:incompletei3e5:peersld2:ip13:73.66.138.2174:porti8999eed2:ip13:73.66.138.2174:porti63014eed2:ip13:73.66.138.2174:porti8999eed2:ip13:73.25.106.1804:porti6881eed2:ip13:73.66.249.1414:porti8999eeee
According to www.bittorrent.org all that is necessary in the response is an interval and a peer list mapped to a list of peers. Each peer needs id, ip, and port.
I've switched the port to the one that the client is reporting in the request and made sure that my torrent client has it's port forwarded and it seems to be working now. Though I'm still going to continue working on this. Currently I don't have a way to remove peers when they stop seeding/leeching.
Careful not to have a trailing carriage return in the tracker response. That makes the bencoded response invalid and some clients don't like it.
This seems to mostly be a issue between the peers and not the tracker. If they are both NATed, at least one of them needs to have the port forwarded throu the NAT for them to be able to connect to each other.
The port in the tracker response should be the one that the peer reports in the request.
The bencoded dict in the tracker response is not sorted, the order of the Keys complete, incomplete, interval, min interval, peers should be sorted as raw strings.
Some clients may have problems if they aren't.
Another thing is, the tracker response specified in BEP3, while still correct, has been obsoleted by the compact=1 response. All modern clients support 'compact'. While I'm not aware of any client that has dropped support for the legacy way, some trackers has.
Bram Cohen has said that "... non-support for the 'compact' extension is considered outright malbehavior today." post #5
A good resource about the BitTorrent protocol is https://wiki.theory.org/BitTorrentSpecification
This answer is a edited version of what was originaly posted as comments.
I have a multiplayer game where my server uses nodejs and TCPSocket (net.createServer) to communicate with a client.
I have thousands of clients connecting to the server and many people are complaining that they are constantly being disconnected from the server.
Here is how my server handles the connections now:
Init:
var net = require("net");
server = net.createServer(function(socket) {
socket.setTimeout(15000);
socket.setKeepAlive(true);
socket.myBuffer = "";
socket.msgsQue = [];
socket.on("data", onData);
socket.on("error", onError);
socket.on("end", onClientDisconnect);
socket.on("timeout", onClientDisconnect);
});
server.listen(port);
Sending to client:
var stringData = JSON.stringify({name:event, message:data});
if (!this.socket.msgsQue || typeof this.socket.msgsQue == "undefined")
this.socket.msgsQue = [];
this.socket.msgsQue.unshift(stringData);
var i = this.socket.msgsQue.length;
while(i--) {
if (this.socket.writable) {
var elem = this.socket.msgsQue[i];
this.socket.write(elem+"\0");
this.socket.msgsQue.splice(i, 1);
} else {
//Unable to write at index "i"
break;//will send the rest on the next attempt
}
}
When disconnected
var onClientDisconnect = function() {
this.myBuffer = "";
delete this.myBuffer;
this.msgsQue = [];
delete this.msgsQue;
this.destroy();
}
Receiving from client
var onData = function(data) {
if (!data || !data.toString()) return;
var raw = data.toString();
this.myBuffer += raw;
var d_index = this.myBuffer.indexOf('\0'); // Find the delimiter
// While loop to keep going until no delimiter can be found
var string;
while (d_index > -1) {
// Create string up until the delimiter
string = this.myBuffer.substring(0,d_index);
// Cuts off the processed chunk
this.myBuffer = this.myBuffer.substring(d_index+1);
onMessage(string, this);//handle
// Find the new delimiter
d_index = this.myBuffer.indexOf('\0');
}
}
A problem I notice is that msgsQue becomes huge because the socket is not writable, but disconnect handler is not fired (or hired later..)
Can you give me some advises on how to optimize this ?
I noticed that sometimes I get disconnected, but I can ping the server, so it is definitely a server-related problem. Can it be because of high load on the server itself?
(I do not want to use socket.io because the last year I had many problems with it like memory leaking, freezing the server app, no support, etc..)
I use PUB/SUB ZeroMQ pattern.
System consists from Web Server ( Publisher ), clustered TCP servers ( Subscribers ) and external applications ( clients, which connect to TCP servers ).
Huge amount of external clients connect to every TCP server. Every external client has unique peerId which I use as topic in Publisher. For some management purposes I send messages to TCP servers ( like remove peer, change, etc. ). But also I need to send messages from TCP server to Web Server ( connect, disconnect, error ). I didn't find right way how to do it. Can anybody suggest how to do it correctly?
Update 1
It looks like using ROUTER/DEALER pattern is the most convenient for that.
Some comments about scripts.
External clients connect to tcp servers ( cluster ) and send unique peerId, on tcp server side tcp socket cached by unique peerId. Then tcp server sends peerId message by ZeroMQ socket to Web Server. Web Server caches envelope by peerId. Every n milliseconds Web Server sends messages to random peer ( generate 'peerId' ). TCP Server receives these messages, gets correct tcp socket from cache and sends theirs to clients. Clients calculate count of messages and every n milliseconds send their to TCP server, which sends count to WEB Server by ZeroMQ socket. On Web Server every n milliseconds count of sended and received messages are printed on console.
Test js script of server part:
var cluster = require('cluster'),
zmq = require('zmq'),
net = require('net'),
zmqport = 'tcp://127.0.0.1:12345';
var count = 10;
var countPeers = 10000;
var interval = 1;
if (cluster.isMaster) {
for (var i = 0; i < count; i++) cluster.fork({
TCP_SERVER: 1
});
cluster.fork({
WEB_SERVER: 1
});
cluster.on('death', function (worker) {
console.log('worker ' + worker.pid + ' died');
});
} else {
if (process.env.TCP_SERVER) {
var sockets = Object.create(null);
var socket = zmq.socket('dealer');
socket.identity = 'process-' + process.pid;
socket.connect(zmqport);
socket.on('message', function (peerIdBuffer) {
var peerId = peerIdBuffer.toString();
if (typeof sockets[peerId] !== 'undefined') {
var buffer = new Buffer(4);
buffer.writeUInt32BE(1, 0);
sockets[peerId].write(buffer);
}
});
var server = net.createServer(function (tcpsocket) {
tcpsocket.on('data', function (data) {
if (!tcpsocket.peerId) {
var peerId = data.toString();
sockets[peerId] = tcpsocket;
tcpsocket.peerId = peerId;
return socket.send(['id', data]);
}
return socket.send(['count', data]);
});
});
server.listen('13333', '0.0.0.0');
} else {
var countMessagesSended = 0;
var countMessagesReceived = 0;
var socket = zmq.socket('router');
var clients = Object.create(null);
socket.bind(zmqport, function (err) {
if (err) throw err;
setInterval(function () {
for (var i = 0; i < countPeers; i++) {
var topic = Math.floor(Math.random() * countPeers) + '-peer';
if (typeof clients[topic] !== 'undefined') {
countMessagesSended++;
socket.send([clients[topic], topic]);
}
}
}, interval);
});
socket.on('message', function (envelope, messageId, data) {
switch (messageId.toString()) {
case "id":
clients[data.toString()] = envelope.toString();
break;
case "count":
countMessagesReceived += data.readUInt32BE(0);
break;
}
});
setInterval(function () {
console.log('%s messages have been sended, %s - received', countMessagesSended, countMessagesReceived);
countMessagesSended = 0;
countMessagesReceived = 0;
}, 5000);
}
}
Test js script for clients:
var cluster = require('cluster'),
net = require('net');
var count = 10;
if (cluster.isMaster) {
for (var i = 0; i < count; i++) cluster.fork({
CLUSTER: i
});
cluster.on('death', function (worker) {
console.log('worker ' + worker.pid + ' died');
});
} else {
var clientspernode = 1000;
var offset = parseInt(process.env.CLUSTER, 10);
for (var j = (offset) * clientspernode; j < (offset + 1) * clientspernode; j++) {
(function (j) {
var countMessages = 0;
var client = net.connect({
port: 13333,
host: '127.0.0.1'
}, function () {
client.write(j + '-peer');
});
client.on('data', function (buffer) {
countMessages += Math.ceil(buffer.length / 8);
});
client.on('error', function () {
});
setInterval(function () {
var buf = new Buffer(4);
buf.writeUInt32BE(countMessages, 0);
client.write(buf);
countMessages = 0;
}, 5000);
})(j);
}
}
The following code generates a steady volume of MAX events and uses the sendEvent() function to send a document to a UDP server.
Currently, for some reason, the ack rate (last parameter in the single console.log call) starts at 100% and gradually reduces to about ~50%, then there's a hiccup and it goes back to 100%, then back to ~50% over and over again. I'm wondering why this happens, why doesn't it stay on 100% or at least close to it? Why does it reduce to around 50% and then goes back to 100%? My goal is to keep it as close as possible to 100% at all times.
var dgram = require('dgram');
var client = dgram.createSocket('udp4');
var HOST = '127.0.0.1';
var PORT = 3333;
var MAX = 100;
var counter = 0;
var start = new Date().getTime();
var eps = 0;
var sent = 0;
var ack=0;
function sendEvent() {
var doc = {
timestamp: new Date().getTime()
};
var message = new Buffer(JSON.stringify(doc));
sent++;
client.send(message, 0, message.length, PORT, HOST, function(err, bytes) {
if (err) throw err;
ack++;
});
}
function run() {
counter++;
var current = new Date().getTime();
var lapsed = current - start;
var lapsedSec = lapsed / 1000;
eps = sent / lapsedSec;
if (eps < (MAX === 0 ? eps + 1 : MAX))
sendEvent();
if (counter % 100000 === 0) {
counter = 0;
console.log('eps', eps, counter, sent, (ack/sent*100).toFixed(2));
}
if (counter % 1000 === 0) {
setInterval(run, 1);
}
else
run();
}
run();
writing this little domain search app, it should sequentially search the .com of each item in an array, but it keeps searching for test1. even if I do a console log within the search function it tells me the value of x is test2, and test 3. do I need to remove the listener or something?
I get the following output
domain test1.com
Domain Name: TEST1.COM
domain test2.com
Domain Name: TEST1.COM
domain test3.com
Domain Name: TEST1.COM
app.js
var port = 43;
var net = require('net');
var host = 'whois.internic.net';
var dotCom = new net.Socket();
var c = 0;
var connections = 0;
var dotComStatus;
dotCom.setEncoding('ascii');
var searches = ['test1', 'test2', 'test3'];
search(searches.shift());
function chkconnections(z) {
if (connections <= 0) {
if (searches.length >= 1) {
process.nextTick(function() {
search(searches.shift());
});
}
}
}
function search(x) {
var q = "domain " + x + ".com\r\n";
dotCom.connect(port, host, function() {
dotCom.write(q);
console.log(q);
connections++;
});
dotCom.on('data', function(data) {
c++;
if (c == 2) {
dotComStatus = data.split('\n')[1];
dotCom.on('close', function() {
console.log(dotComStatus);
connections--;
chkconnections();
});
}
});
}
There are several obvious problems with this code. Firstly putting the close event inside the data event is a bad idea. If the connection closed before data was received that section of code would never be reached.
Next is there is a big problem with the section with
c++;
if (c == 2)
Since you never reset c to 0 the next line dotComStatus = data.split('\n')[1]; is never executed. But then the socket closes and the event closed is triggered. And this is executed again.
console.log(dotComStatus);
connections--;
chkconnections();
But the value of dotComStatus has not changed since c was equal to 0. There are many examples of how to do this connect/data/end flow that is common in NodeJS.
var port = 43;
var net = require('net');
var host = 'whois.internic.net';
var searches = ['test1', 'test2', 'test3'];
search(searches.shift());
function chkconnections(z) {
if(searches.length > 0)
search(searches.shift());
}
function search(x) {
var dotCom = new net.Socket();
dotCom.setEncoding('ascii');
var q = "domain " + x + ".com\r\n";
dotCom.connect(port, host, function() {
dotCom.write(q);
});
var data = ""; // holding place until socket closes
dotCom.on('data', function(chunk) {
data += chunk; // add chunk to data
});
dotCom.on("end", function() {
// socket closed
dotComStatus = data.split('\n')[7]; // Should be 'Domain Name: blah'
console.log(dotComStatus);
chkconnections(); // move on to next
});
};