how i can implement a custom protocol on a Node.js NET-Client?
The problem is:
I want to connect to a server. That server has an simple protocol.
Each packet has a length-prefix, it means the first byte say how long the packet is.
How i can implement that?
How i can read for example the first byte to get the packet-length to read the other stuff?
var net = require('net');
var client = new net.Socket();
client.connect(2204, 'myexampledomain.com', function() {
console.log('Connecting...', protocol);
});
client.on('data', function(data) {
console.log('DATA: ' + data);
});
client.on('end', function() {
console.log('end');
});
client.on('timeout', function() {
console.log('timeout');
});
client.on('drain', function() {
console.log('drain');
});
client.on('error', function() {
console.log('error');
});
client.on('close', function() {
console.log('Connection closed');
});
client.on('connect', function() {
console.log('Connect');
client.write("Hello World");
});
You'll have to maintain an internal buffer holding received data and check if it has length bytes before slicing the packet from it. The buffer must be concatenated with received data until it has length bytes and emptied on receiving a full packet. This can be better handled using a Transform stream.
This is what I used in my json-rpc implementation. Each JSON packet is lengthPrefixed. The message doesn't have to be JSON – you can replace the call to this.push(JSON.parse(json.toString()));.
var Transform = require('stream').Transform;
function JsonTransformer(options) {
if (!(this instanceof JsonTransformer)) {
return new JsonTransformer(options);
}
Transform.call(this, {
objectMode: true
});
/*Transform.call(this);
this._readableState.objectMode = false;
this._writableState.objectMode = true;*/
this.buffer = new Buffer(0);
this.lengthPrefix = options.lengthPrefix || 2;
this._readBytes = {
1: 'readUInt8',
2: 'readUInt16BE',
4: 'readUInt32BE'
}[this.lengthPrefix];
}
JsonTransformer.prototype = Object.create(Transform.prototype, {
constructor: {
value: JsonTransformer,
enumerable: false,
writable: false
}
});
function transform() {
var buffer = this.buffer,
lengthPrefix = this.lengthPrefix;
if (buffer.length > lengthPrefix) {
this.bytes = buffer[this._readBytes](0);
if (buffer.length >= this.bytes + lengthPrefix) {
var json = buffer.slice(lengthPrefix, this.bytes + lengthPrefix);
this.buffer = buffer.slice(this.bytes + lengthPrefix);
try {
this.push(JSON.parse(json.toString()));
} catch(err) {
this.emit('parse error', err);
}
transform.call(this);
}
}
}
JsonTransformer.prototype._transform = function(chunk, encoding, next) {
this.buffer = Buffer.concat([this.buffer, chunk]);
transform.call(this);
next();
}
JsonTransformer.prototype._flush = function() {
console.log('Flushed...');
}
var json = new JsonTransformer({lengthPrefix: 2});
var socket = require('net').createServer(function (socket) {
socket.pipe(json).on('data', console.log);
});
socket.listen(3000);
module.exports = JsonTransformer;
json-transformer
Related
I have a net tcp socket node.js section of code that I would like to convert from using a callback to rx.
I looks like this in feed.js module:
var net = require('net');
var server = net.createServer(function(socket) {
...
// Handle incoming messages from clients.
socket.on('data', function (data) {
broadcast(data, socket);
});
...
}
function broadcast(message, sender)
{
...
onChangeHandler(stock.symbol, 'stock', stock);
}
}
function start(onChange) {
onChangeHandler = onChange;
}
exports.start = start;
server.listen(....);
Then the client of the above call registers a callback:
feed.start(function(room, type, message) {
//...Do something with the message
});
I would like to convert this to use an Rx Observable/Observer. I see that there is a way to make an observable stream from the web socket (although it uses a bidirectional Subject which I don't need):
fromWebSocket(address, protocol) {
var ws = new WebSocket(address, protocol);
// Handle the data
var osbervable = Rx.Observable.create (function (obs) {
// Handle messages
ws.onmessage = obs.onNext.bind(obs);
ws.onerror = obs.onError.bind(obs);
ws.onclose = obs.onCompleted.bind(obs);
// Return way to unsubscribe
return ws.close.bind(ws);
});
var observer = Rx.Observer.create(function (data) {
if (ws.readyState === WebSocket.OPEN) { ws.send(data); }
});
return Rx.Subject.create(observer, observable);
}
var socketSubject = fromWebSocket('ws://localhost:9999', 'sampleProtocol');
// Receive data
socketSubject.subscribe(
function (data) {
// Do something with the data
},
function (error) {
// Do something with the error
},
function () {
// Do something on completion
});
// Send data
socketSubject.onNext(42);
What is the equivalent for a net socket? If there is a standard library to use that is ok.
My initial attempt is thus, but I don't know how to tie Rx and and the socket functions together into an onnext:
var net = require('net');
fromNetSocket(address, protocol) {
var ns = net.createServer(function(socket) {
socket.on('disconnect', function () { // This seems like it maps to onclose
console.log('User disconnected. %s. Socket id %s', socket.id);
});
// Handle incoming messages from clients.
socket.on('data', function (data) { //this should map to onnext
});
// Handle the data
var osbervable = Rx.Observable.create (function (obs) {
// Handle messages
ns.onmessage = obs.onNext.bind(obs);
ns.onerror = obs.onError.bind(obs);
ns.onclose = obs.onCompleted.bind(obs);
// Return way to unsubscribe
return ns.close.bind(ns);
});
});
};
Try the following
const createSubject = () => {
return Rx.Observable.create((observer) => {
const socket = net.connect({port: 1705}, () => {
log.i('Connected to Server!');
let socketObservable = Rx.Observable.create((observer) => {
socket.on('data', (data) => observer.next(JSON.parse(data)));
socket.on('error', (err) => observer.error(err));
socket.on('close', () => observer.complete());
});
let socketObserver = {
next: (data) => {
if (!socket.destroyed) {
socket.write(`${JSON.stringify(data)}\r\n`);
}
}
};
const subject = Rx.Subject.create(socketObserver, socketObservable);
observer.next(subject);
observer.complete();
});
});
};
Then you can use the subject like this
createSubject().subscribe((con) => {
con.subscribe((data) => console.log(data));
con.next({
id: utils.UUID(),
jsonrpc: '2.0',
method: 'Server.GetRPCVersion'
});
});
I made simple binary transmission with Socket.io to transfer file to server from client. I thought it worked, but I realized that size of the file was different. On writableStream.write failed, I attached drain event handler to keep wait until it can be rewrite and continue the writes, but each time drain event occurs, size of the file is increased by times of drain event fired, each 10240 bytes size that I set for each chunk transmission.
Before I'm writing the code here, I need to explain the code flow:
Client request uploading file
Server create empty file(create writable stream) and grant transmission
Client transfer data(chunk) until it ends
Server write the chunk with writable stream
Client ends transmission on sent all
Server close the writable stream.
Done!
This is server side code:
var writeStream = null;
var fileSize = 0;
var wrote = 0;
socket.on('clientRequestFileTransfer', (fileInfo) => {
console.log(`Client request file transfer: ${fileInfo.name}(${fileInfo.size})`);
fileSize = fileInfo.size;
wrote = 0;
writeStream = fs.createWriteStream(__dirname + '/' + fileInfo.name);
writeStream.on('close', () => {
console.log('Write stream ended.');
});
console.log('File created.');
socket.emit('serverGrantFileTransfer');
});
socket.on('clientSentChunk', (chunk) => {
function write() {
let writeDone = writeStream.write(chunk);
if(!writeDone) {
console.log('Back pressure!');
return writeStream.once('drain', write);
}
else {
wrote += chunk.length;
console.log(`Wrote chunks: ${chunk.length} / ${wrote} / ${fileSize}`);
socket.emit('serverRequestContinue');
}
}
write();
});
socket.on('clientFinishTransmission', () => {
writeStream.end();
console.log('Transmission complete!');
});
And it's client(added code for reading binary file):
var fileEl = document.getElementById('file');
fileEl.onchange = function() {
var file = fileEl.files[0];
if(!file) return;
var socket = io('http://localhost:3000');
socket.on('connect', function() {
var fileReader = new FileReader();
fileReader.onloadend = function() {
var bin = fileReader.result;
var chunkSize = 10240;
var sent = 0;
// make server knows the name and size of the file
socket.once('serverGrantFileTransfer', () => {
function beginTransfer() {
if(sent >= bin.byteLength) {
console.log('Transmission complete!');
socket.emit('clientFinishTransmission');
return;
}
var chunk = bin.slice(sent, sent + chunkSize);
socket.once('serverRequestContinue', beginTransfer);
socket.emit('clientSentChunk', chunk);
sent += chunk.byteLength;
console.log('Sent: ' + sent);
}
beginTransfer();
});
socket.emit('clientRequestFileTransfer', {
name: file.name,
size: file.size
});
};
fileReader.readAsArrayBuffer(file);
});
};
I tested this code with 4,162,611 bytes sized file and it had 1 write fails(1 back pressure). After uploads, I checked the size of created file, and it was 4,172,851 bytes which is 10240 bytes bigger than original one, and it is size of chunk(10240).
Sometimes write fails 2 times, than size is 20480 bytes bigger than original one which double size of the chunk that I sent.
I double checked my Backpressure code, but it seems nothing wrong to me. I'm using Node v6.2.2 and using Socket.io v1.6.0, tested from Chrome browser. Is there a something that I missed? Or did I misunderstood of backpresssure?
Any advice will be very appreciated.
UPDATE
Looks like when back pressure happens, it wrote same data twice(as I said in comment). So I modified the code like this:
socket.on('clientSentChunk', (chunk) => {
function write() {
var writeDone = writeStream.write(chunk);
wrote += chunk.length;
if(!writeDone) {
console.log('**************** Back pressure ****************');
// writeStream.once('drain', write);
// no rewrite, just continue transmission
writeStream.once('drain', () => socket.emit('serverRequestContinue'));
}
else {
console.log(`Wrote chunks: ${chunk.length} / ${wrote} / ${fileSize}`);
socket.emit('serverRequestContinue');
}
}
write();
});
It worked. I'm quite confusing because when writable stream fails to write, it won't wrote the data into the stream, but actually not. Does anybody knows about this?
I guess the problem is how you make the bin object in var bin = ...;. Could you please so the code here?
Update
Here is the backend code:
var express = require('express');
var app = express();
var server = app.listen(80);
var io = require('socket.io');
var fs = require('fs');
io = io.listen(server);
app.use(express.static(__dirname + '/public'));
app.use(function(req, res, next) {
//res.send({a:1})
res.sendFile(__dirname + '/socket_test_index.html');
});
io.on('connection', function(client) {
console.log('Client connected...', client);
client.on('join', function(data) {
//console.log(data);
});
setInterval(()=>{
client.emit('news', 'news from server');
}, 10000)
});
io.of('/upload', function(client){
console.log('upload')
logic(client);
})
function logic(socket) {
var writeStream = null;
var fileSize = 0;
var wrote = 0;
socket.on('clientRequestFileTransfer', (fileInfo) => {
console.log(`Client request file transfer: ${fileInfo.name}(${fileInfo.size})`);
fileSize = fileInfo.size;
wrote = 0;
writeStream = fs.createWriteStream(__dirname + '/' + fileInfo.name);
writeStream.on('close', () => {
console.log('Write stream ended.');
});
console.log('File created.');
socket.emit('serverGrantFileTransfer');
});
socket.on('clientSentChunk', (chunk) => {
function write() {
var writeDone = writeStream.write(chunk);
if(!writeDone) {
console.log('Back pressure!');
return writeStream.once('drain', write);
}
else {
wrote += chunk.length;
console.log(`Wrote chunks: ${chunk.length} / ${wrote} / ${fileSize}`);
socket.emit('serverRequestContinue');
}
}
write();
});
socket.on('clientFinishTransmission', () => {
writeStream.end();
console.log('Transmission complete!');
});
socket.emit('serverGrantFileTransfer', {});
}
Here is the html code:
<script src="/socket.io/socket.io.js"></script>
<script>
var socket = io('http://localhost');
socket.on('news', function (data) {
console.log(data);
});
socket.on('connect', function(data) {
socket.emit('join', 'Hello World from client');
});
</script>
<input type="file" id="file" />
<script>
var fileEl = document.getElementById('file');
fileEl.onchange = function() {
var file = fileEl.files[0];
if(!file) return;
var socket = io('http://localhost/upload');
socket.on('connect', function() {
var fileReader = new FileReader();
fileReader.onloadend = function() {
var bin = fileReader.result;
var chunkSize = 10240;
var sent = 0;
// make server knows the name and size of the file
socket.once('serverGrantFileTransfer', () => {
function beginTransfer() {
if(sent >= bin.byteLength) {
console.log('Transmission complete!');
socket.emit('clientFinishTransmission');
return;
}
var chunk = bin.slice(sent, sent + chunkSize);
socket.once('serverRequestContinue', beginTransfer);
socket.emit('clientSentChunk', chunk);
sent += chunk.byteLength;
console.log('Sent: ' + sent);
}
beginTransfer();
});
socket.emit('clientRequestFileTransfer', {
name: file.name,
size: file.size
});
};
fileReader.readAsArrayBuffer(file);
});
};
</script>
Copy them to the root dir in express project. I tested with two pictures and one .pdf file. All of them are transferred exactly the same bytes
In some demo ,them close the connection in timer function, like follow:
var amqp = require('amqplib/callback_api');
amqp.connect('amqp://localhost', function(err, conn) {
conn.createChannel(function(err, ch) {
var q = 'hello';
var msg = 'Hello World!';
ch.assertQueue(q, {durable: false});
ch.sendToQueue(q, new Buffer(msg));
console.log(" [x] Sent %s", msg);
});
setTimeout(function() { conn.close(); process.exit(0) }, 500);
});
Is there some method to close the conn when send the message successfully?
I coded this something like below
var q = 'TEST';
var open = require('amqplib').connect('amqp://user:password#localhost:5672');
var sleep = require('sleep');
//Publisher
function publish() {
let ch;
var connection;
let publisher = open.then(function(conn) {
connection=conn;
return conn.createChannel();
}).then(function(chann) {
ch = chann;
ch.purgeQueue(q);
return ch.assertQueue(q,{noCreate: true});
}).then(function(ok) {
return ch.sendToQueue(q, new Buffer('HELLO WORLD'),{noAck:true});
}).then(function(ok) {
console.log('MESSAGE_SENT', ok);
return ok; //resolve('MESSAGE_SENT')
}).then((ok)=>{
console.log('Assert queue response:', ok);
setTimeout(function() { connection.close();}, 500);
})
.catch(function(err) {
console.log(err);
console.error('Unable to connect PUBLISHER');
process.exit(1);
//reject('MESSAGE_SENT')
});
return publisher;
}
publish();
module.exports = {
publish
};
I was given a task to send JSON string from client to server and from server to client, whenever there is a new record found to send.
I decided to build TCP connection(suggest me if there is any other better way in Node.js) between server and client to transfer data.
The problem is, I was supposed to use a delimiter to separate JSON strings one from another. I am afraid what if the json string contains the delimiter string inside the object. I am looking for a better way to separate two JSON strings.
Below is my code. Please help me.
Client
var net = require('net')
, client = new net.Socket();
var chunk = ''
, dlim_index = -1
, delimit = '~~';
client.connect(config.Port, config.IpAddress, function () {
console.log('Server Connected');
client.write('CLIENTID:' + process.argv[2]);
client.write(delimit);
});
client.on('data', function (data) {
var recvData = data.toString().trim();
chunk += recvData;
dlim_index = chunk.indexOf(recvData);
console.log(data);
while (dlim_index > -1) {
var useData = chunk.substring(0, dlim_index);
if (useData == 'SUCCESS') {
controller.listenOutQueue(function (dataToSend) {
var object = JSON.parse(dataToSend);
client.write(dataToSend);
client.write(delimit);
});
}
else {
var record = JSON.parse(useData);
controller.insertIntoQueue(record, function (status) {
});
}
chunk = chunk.substring(dlim_index + 2);
dlim_index = chunk.indexOf(delimit);
}
});
client.on('close', function () {
console.log('Connection closed');
});
client.setTimeout(50000, function () {
//client.destroy();
});
Server
var net = require('net')
, server = net.createServer()
, delimit = '~~'
, clients = [];
controller.listenOutQueue(function (dataToSend) {
client.write(dataToSend);
client.write(delimit);
});
server.on('connection', function (socket) {
var chunk = '';
var dlim_index = -1;
socket.on('data', function (data) {
var recvData = data.toString().trim();
chunk += recvData;
dlim_index = chunk.indexOf(delimit);
while (dlim_index > -1) {
var useData = chunk.substring(0, dlim_index);
if (useData.substring(0, 9) == 'CLIENTID:') {
socket.clientid = useData.replace('CLIENTID:', '');
console.log('Client Id: ' + socket.clientid);
clients.push(socket);
var successMessage = "SUCCESS";
socket.write(successMessage);
socket.write(delimit);
}
else {
controller.insertIntoQueue(JSON.parse(useData), function (status) {
});
}
chunk = chunk.substring(dlim_index + 2);
dlim_index = chunk.indexOf(delimit);
}
});
socket.on('end', function () {
console.log('Connection Closed (' + socket.clientid + ')');
});
socket.on('error', function (err) {
console.log('SOCKET ERROR:', err);
});
});
server.listen(config.Port, config.IpAddress);
I'm trying to add a STARTTLS upgrade to an existing protocol (which currently works in plaintext).
As a start, I'm using a simple line-based echoing server (it's a horrible kludge with no error handling or processing of packets into lines - but it usually just works as the console sends a line-at-a-time to stdin).
I think my server is right, but both ends exit with identical errors when I type starttls:
events.js:72
throw er; // Unhandled 'error' event
^
Error: 139652888721216:error:140770FC:SSL routines:SSL23_GET_SERVER_HELLO:unknown protocol:../deps/openssl/openssl/ssl/s23_clnt.c:766:
at SlabBuffer.use (tls.js:232:18)
at CleartextStream.read [as _read] (tls.js:450:29)
at CleartextStream.Readable.read (_stream_readable.js:320:10)
at EncryptedStream.write [as _write] (tls.js:366:25)
at doWrite (_stream_writable.js:221:10)
at writeOrBuffer (_stream_writable.js:211:5)
at EncryptedStream.Writable.write (_stream_writable.js:180:11)
at Socket.ondata (stream.js:51:26)
at Socket.EventEmitter.emit (events.js:95:17)
at Socket.<anonymous> (_stream_readable.js:746:14)
Have I completely misunderstood how to do an upgrade on the client side?
Currently, I'm using the same method to add TLS-ness to plain streams at each end. This feels wrong, as both client and server will be trying to play the same role in the negotiation.
tlsserver.js:
r tls = require('tls');
var net = require('net');
var fs = require('fs');
var options = {
key: fs.readFileSync('server-key.pem'),
cert: fs.readFileSync('server-cert.pem'),
// This is necessary only if using the client certificate authentication.
requestCert: true,
// This is necessary only if the client uses the self-signed certificate.
ca: [ fs.readFileSync('client-cert.pem') ],
rejectUnauthorized: false
};
var server = net.createServer(function(socket) {
socket.setEncoding('utf8');
socket.on('data', function(data) {
console.log('plain data: ', data);
// FIXME: this is not robust, it should be processing the stream into lines
if (data.substr(0, 8) === 'starttls') {
console.log('server starting TLS');
//socket.write('server starting TLS');
socket.removeAllListeners('data');
options.socket = socket;
sec_socket = tls.connect(options, (function() {
sec_socket.on('data', function() {
console.log('secure data: ', data);
});
return callback(null, true);
}).bind(this));
} else {
console.log('plain data', data);
}
});
});
server.listen(9999, function() {
console.log('server bound');
});
client.js:
var tls = require('tls');
var fs = require('fs');
var net = require('net');
var options = {
// These are necessary only if using the client certificate authentication
key: fs.readFileSync('client-key.pem'),
cert: fs.readFileSync('client-cert.pem'),
// This is necessary only if the server uses the self-signed certificate
ca: [ fs.readFileSync('server-cert.pem') ],
rejectUnauthorized: false
};
var socket = new net.Socket();
var sec_socket = undefined;
socket.setEncoding('utf8');
socket.on('data', function(data) {
console.log('plain data:', data);
});
socket.connect(9999, function() {
process.stdin.setEncoding('utf8');
process.stdin.on('data', function(data) {
if (!sec_socket) {
console.log('sending plain:', data);
socket.write(data);
} else {
console.log('sending secure:', data);
sec_socket.write(data);
}
if (data.substr(0, 8) === 'starttls') {
console.log('client starting tls');
socket.removeAllListeners('data');
options.socket = socket;
sec_socket = tls.connect(options, (function() {
sec_socket.on('data', function() {
console.log('secure data: ', data);
});
return callback(null, true);
}).bind(this));
}
});
});
Got it working, thanks to Matt Seargeant's answer. My code now looks like:
server.js:
var ts = require('./tls_socket');
var fs = require('fs');
var options = {
key: fs.readFileSync('server-key.pem'),
cert: fs.readFileSync('server-cert.pem'),
// This is necessary only if using the client certificate authentication.
requestCert: false,
// This is necessary only if the client uses the self-signed certificate.
ca: [ fs.readFileSync('client-cert.pem') ],
rejectUnauthorized: false
};
var server = ts.createServer(function(socket) {
console.log('connected');
socket.on('data', function(data) {
console.log('data', data);
if (data.length === 9) {
console.log('upgrading to TLS');
socket.upgrade(options, function() {
console.log('upgraded to TLS');
});
}
});
});
server.listen(9999);
client.js:
var ts = require('./tls_socket');
var fs = require('fs');
var crypto = require('crypto');
var options = {
// These are necessary only if using the client certificate authentication
key: fs.readFileSync('client-key.pem'),
cert: fs.readFileSync('client-cert.pem'),
// This is necessary only if the server uses the self-signed certificate
ca: [ fs.readFileSync('server-cert.pem') ],
rejectUnauthorized: false
};
var socket = ts.connect(9999, 'localhost', function() {
console.log('secured');
});
process.stdin.on('data', function(data) {
console.log('sending:', data);
socket.write(data);
if (data.length === 9) {
socket.upgrade(options);
}
});
tls_socket.js:
"use strict";
/*----------------------------------------------------------------------------------------------*/
/* Obtained and modified from http://js.5sh.net/starttls.js on 8/18/2011. */
/*----------------------------------------------------------------------------------------------*/
var tls = require('tls');
var crypto = require('crypto');
var util = require('util');
var net = require('net');
var stream = require('stream');
var SSL_OP_ALL = require('constants').SSL_OP_ALL;
// provides a common socket for attaching
// and detaching from either main socket, or crypto socket
function pluggableStream(socket) {
stream.Stream.call(this);
this.readable = this.writable = true;
this._timeout = 0;
this._keepalive = false;
this._writeState = true;
this._pending = [];
this._pendingCallbacks = [];
if (socket)
this.attach(socket);
}
util.inherits(pluggableStream, stream.Stream);
pluggableStream.prototype.pause = function () {
if (this.targetsocket.pause) {
this.targetsocket.pause();
this.readable = false;
}
}
pluggableStream.prototype.resume = function () {
if (this.targetsocket.resume) {
this.readable = true;
this.targetsocket.resume();
}
}
pluggableStream.prototype.attach = function (socket) {
var self = this;
self.targetsocket = socket;
self.targetsocket.on('data', function (data) {
self.emit('data', data);
});
self.targetsocket.on('connect', function (a, b) {
self.emit('connect', a, b);
});
self.targetsocket.on('secureConnection', function (a, b) {
self.emit('secureConnection', a, b);
self.emit('secure', a, b);
});
self.targetsocket.on('secure', function (a, b) {
self.emit('secureConnection', a, b);
self.emit('secure', a, b);
});
self.targetsocket.on('end', function () {
self.writable = self.targetsocket.writable;
self.emit('end');
});
self.targetsocket.on('close', function (had_error) {
self.writable = self.targetsocket.writable;
self.emit('close', had_error);
});
self.targetsocket.on('drain', function () {
self.emit('drain');
});
self.targetsocket.on('error', function (exception) {
self.writable = self.targetsocket.writable;
self.emit('error', exception);
});
self.targetsocket.on('timeout', function () {
self.emit('timeout');
});
if (self.targetsocket.remotePort) {
self.remotePort = self.targetsocket.remotePort;
}
if (self.targetsocket.remoteAddress) {
self.remoteAddress = self.targetsocket.remoteAddress;
}
};
pluggableStream.prototype.clean = function (data) {
if (this.targetsocket && this.targetsocket.removeAllListeners) {
this.targetsocket.removeAllListeners('data');
this.targetsocket.removeAllListeners('secureConnection');
this.targetsocket.removeAllListeners('secure');
this.targetsocket.removeAllListeners('end');
this.targetsocket.removeAllListeners('close');
this.targetsocket.removeAllListeners('error');
this.targetsocket.removeAllListeners('drain');
}
this.targetsocket = {};
this.targetsocket.write = function () {};
};
pluggableStream.prototype.write = function (data, encoding, callback) {
if (this.targetsocket.write) {
return this.targetsocket.write(data, encoding, callback);
}
return false;
};
pluggableStream.prototype.end = function (data, encoding) {
if (this.targetsocket.end) {
return this.targetsocket.end(data, encoding);
}
}
pluggableStream.prototype.destroySoon = function () {
if (this.targetsocket.destroySoon) {
return this.targetsocket.destroySoon();
}
}
pluggableStream.prototype.destroy = function () {
if (this.targetsocket.destroy) {
return this.targetsocket.destroy();
}
}
pluggableStream.prototype.setKeepAlive = function (bool) {
this._keepalive = bool;
return this.targetsocket.setKeepAlive(bool);
};
pluggableStream.prototype.setNoDelay = function (/* true||false */) {
};
pluggableStream.prototype.setTimeout = function (timeout) {
this._timeout = timeout;
return this.targetsocket.setTimeout(timeout);
};
function pipe(pair, socket) {
pair.encrypted.pipe(socket);
socket.pipe(pair.encrypted);
pair.fd = socket.fd;
var cleartext = pair.cleartext;
cleartext.socket = socket;
cleartext.encrypted = pair.encrypted;
cleartext.authorized = false;
function onerror(e) {
if (cleartext._controlReleased) {
cleartext.emit('error', e);
}
}
function onclose() {
socket.removeListener('error', onerror);
socket.removeListener('close', onclose);
}
socket.on('error', onerror);
socket.on('close', onclose);
return cleartext;
}
function createServer(cb) {
var serv = net.createServer(function (cryptoSocket) {
var socket = new pluggableStream(cryptoSocket);
socket.upgrade = function (options, cb) {
console.log("Upgrading to TLS");
socket.clean();
cryptoSocket.removeAllListeners('data');
// Set SSL_OP_ALL for maximum compatibility with broken clients
// See http://www.openssl.org/docs/ssl/SSL_CTX_set_options.html
if (!options) options = {};
// TODO: bug in Node means we can't do this until it's fixed
// options.secureOptions = SSL_OP_ALL;
var sslcontext = crypto.createCredentials(options);
var pair = tls.createSecurePair(sslcontext, true, true, false);
var cleartext = pipe(pair, cryptoSocket);
pair.on('error', function(exception) {
socket.emit('error', exception);
});
pair.on('secure', function() {
var verifyError = (pair.ssl || pair._ssl).verifyError();
console.log("TLS secured.");
if (verifyError) {
cleartext.authorized = false;
cleartext.authorizationError = verifyError;
} else {
cleartext.authorized = true;
}
var cert = pair.cleartext.getPeerCertificate();
if (pair.cleartext.getCipher) {
var cipher = pair.cleartext.getCipher();
}
socket.emit('secure');
if (cb) cb(cleartext.authorized, verifyError, cert, cipher);
});
cleartext._controlReleased = true;
socket.cleartext = cleartext;
if (socket._timeout) {
cleartext.setTimeout(socket._timeout);
}
cleartext.setKeepAlive(socket._keepalive);
socket.attach(socket.cleartext);
};
cb(socket);
});
return serv;
}
if (require('semver').gt(process.version, '0.7.0')) {
var _net_connect = function (options) {
return net.connect(options);
}
}
else {
var _net_connect = function (options) {
return net.connect(options.port, options.host);
}
}
function connect(port, host, cb) {
var options = {};
if (typeof port === 'object') {
options = port;
cb = host;
}
else {
options.port = port;
options.host = host;
}
var cryptoSocket = _net_connect(options);
var socket = new pluggableStream(cryptoSocket);
socket.upgrade = function (options) {
socket.clean();
cryptoSocket.removeAllListeners('data');
// Set SSL_OP_ALL for maximum compatibility with broken servers
// See http://www.openssl.org/docs/ssl/SSL_CTX_set_options.html
if (!options) options = {};
// TODO: bug in Node means we can't do this until it's fixed
// options.secureOptions = SSL_OP_ALL;
var sslcontext = crypto.createCredentials(options);
var pair = tls.createSecurePair(sslcontext, false);
socket.pair = pair;
var cleartext = pipe(pair, cryptoSocket);
pair.on('error', function(exception) {
socket.emit('error', exception);
});
pair.on('secure', function() {
var verifyError = (pair.ssl || pair._ssl).verifyError();
console.log("client TLS secured.");
if (verifyError) {
cleartext.authorized = false;
cleartext.authorizationError = verifyError;
} else {
cleartext.authorized = true;
}
if (cb) cb();
socket.emit('secure');
});
cleartext._controlReleased = true;
socket.cleartext = cleartext;
if (socket._timeout) {
cleartext.setTimeout(socket._timeout);
}
cleartext.setKeepAlive(socket._keepalive);
socket.attach(socket.cleartext);
console.log("client TLS upgrade in progress, awaiting secured.");
};
return (socket);
}
exports.connect = connect;
exports.createConnection = connect;
exports.Server = createServer;
exports.createServer = createServer;
tls.connect() doesn't support the server doing the upgrade unfortunately.
You have to use code similar to how Haraka does it - basically creating your own shim using a SecurePair.
See here for the code we use: https://github.com/baudehlo/Haraka/blob/master/tls_socket.js#L171
STARTTLS Client-Server is trivial to implement in node, but it be a little buggy so, we need to go around.
For Server of STARTTLS we need do it:
// sock come from: net.createServer(function(sock) { ... });
sock.removeAllListeners('data');
sock.removeAllListeners('error');
sock.write('220 Go ahead' + CRLF);
sock = new tls.TLSSocket(sock, { secureContext : tls.createSecureContext({ key: cfg.stls.key, cert: cfg.stls.cert }), rejectUnauthorized: false, isServer: true });
sock.setEncoding('utf8');
// 'secureConnect' event is buggy :/ we need to use 'secure' here.
sock.on('secure', function() {
// STARTTLS is done here. sock is a secure socket in server side.
sock.on('error', parseError);
sock.on('data', parseData);
});
For Client of STARTTLS we need do it:
// sock come from: net.connect(cfg.port, curr.exchange);
// here we already read the '220 Go ahead' from the server.
sock.removeAllListeners('data');
sock.removeAllListeners('error');
sock = tls.connect({ socket: sock, secureContext : tls.createSecureContext({ key: cfg.stls.key, cert: cfg.stls.cert }), rejectUnauthorized: false });
sock.on('secureConnect', function() {
// STARTTLS is done here. sock is a secure socket in client side.
sock.on('error', parseError);
sock.on('data', parseData);
// Resend the helo message.
sock.write(helo);
});