How to increase performance of redis sub? - node.js

I have code like that
var subscribeNewMessages = require("redis").createClient(config.redis.port, config.redis.host);
subscribeNewMessages.subscribe('new-messages');
io.of('/new-messages').on('connection', function (client) {
subscribeNewMessages.on("message", function(channel, message) {
var obj = JSON.parse(message);
if (client.userId == obj.toUserId || client.guestId == obj.toUserId) {
client.send(message);
}
obj = null;
});
})
And how can I optimize it? Because this code parses string json for each new messages.
Also when I try to publish to redis chanel I need to JSON.stringify
redis1.publish(channelPrefix, JSON.stringify(clientData));

There isn't going to be a way to avoid JSON.parse()/JSON.stringify() as long as you're storing js objects. You could use a different serialization format like msgpack, but you're still calling functions to serialize/unserialize your data (also JSON.parse()/JSON.stringify() are already pretty hard to beat performance-wise in node).
I think the only real performance adjustment you could make with the code you've provided is to only parse the JSON once for all clients instead of for each client. Example:
var subscribeNewMessages = require('redis').createClient(config.redis.port, config.redis.host);
subscribeNewMessages.subscribe('new-messages');
var nsNewMsgs = io.of('/new-messages');
subscribeNewMessages.on('message', function(channel, message) {
var obj = JSON.parse(message),
clients = nsNewMsgs.connected,
ids = Object.keys(clients);
for (var i = 0, len = ids.length, client; i < len; ++i) {
client = clients[ids[i]];
if (client.userId == obj.toUserId || client.guestId == obj.toUserId)
client.send(message);
}
});
Depending on your application, you might even be able to avoid the for-loop entirely if you can store the socket.id values in your published messages, then you can simply look up clients[obj.userSockId] and clients[obj.guestSockId] because the connected is keyed on socket.id.

Related

room broadcast vs sockets emit - are they the same?

I'm using my own rooms implementation in Socket.io (got my own Room/Player classes), since I need to perform several filters the room. For now, I save all sockets inside "players" property in a Room, and implemented my own "emit" to the room, the loops players and emits to their sockets.
Is it considerably slower to the traditional broadcast.to('room')? or it basically does what I did to my own room implementation? I'm aiming on having thousands of rooms with 2-4 players in each...
Thanks :)
As you can see by looking at the code for the socket.io adapter .broadcast() on GitHub, all socket.io is doing is looping over a list of sockets and sending a packet to each one (see code below).
So, if your code is doing something similar, then performance would likely be something similar.
Where you might notice a feature difference is if you are using a custom adapter such as the redis adapter that is used with clustering, then the logic of broadcasting to users connected to different servers would be handled for you by the built-in adapter, but may be something you'd have to implement yourself if doing your own broadcast.
Here's the socket.io-adapter version of .broadcast():
Adapter.prototype.broadcast = function(packet, opts){
var rooms = opts.rooms || [];
var except = opts.except || [];
var flags = opts.flags || {};
var packetOpts = {
preEncoded: true,
volatile: flags.volatile,
compress: flags.compress
};
var ids = {};
var self = this;
var socket;
packet.nsp = this.nsp.name;
this.encoder.encode(packet, function(encodedPackets) {
if (rooms.length) {
for (var i = 0; i < rooms.length; i++) {
var room = self.rooms[rooms[i]];
if (!room) continue;
var sockets = room.sockets;
for (var id in sockets) {
if (sockets.hasOwnProperty(id)) {
if (ids[id] || ~except.indexOf(id)) continue;
socket = self.nsp.connected[id];
if (socket) {
socket.packet(encodedPackets, packetOpts);
ids[id] = true;
}
}
}
}
} else {
for (var id in self.sids) {
if (self.sids.hasOwnProperty(id)) {
if (~except.indexOf(id)) continue;
socket = self.nsp.connected[id];
if (socket) socket.packet(encodedPackets, packetOpts);
}
}
}
});
};

Node js Buffers for incoming data

I am wondering if it makes sense to use Node's Buffer for incoming client data to a server. My server and clients are TCP based and I am using <EOF> to determine the end of a message. The message is always stringified JSON.
eg: {"name":"Bob"}<EOF>
In case the entire message does not come through, should I be using Node Buffer to build up the data, or a regular string?
If it is a buffer, I don't think I understand how to correctly build one up. This is my Client constructor that is created each time a socket connects to the Node server.
constructor(socket){
var self = this;
// properties
this.socket = socket;
this.buffer = Buffer.alloc(1024);
this.dataEnd = '<EOF>';
// append <EOF> to every msg
this.socket.write = function(msg){
msg += "<EOF>";
return Socket.prototype.write.call(this, msg);
};
// build up buffer
this.socket.on('data', function(data){
var buffLen = self.buffer.length;
var dataBuffer = Buffer.from(data);
if(buffLen + dataBuffer.length < 1024){
if(data.indexOf(self.dataEnd) === -1){
self.buffer.concat(dataBuffer);
}
}
});
return this;
}

Passing a return from one function to another function that already has set parameters?

Edit: I know JS is asynchronous, I have looked over the How to Return thread. The issue I'm having is that going from "foo" examples to something specific = I'm not quite sure where to re-format this.
Also here is some context: https://github.com/sharkwheels/beanballs/blob/master/bean-to-osc-two.js
I have a question about returns in node. It might be a dumb question, but here goes. I have a function that connects to a socket, and gets OSC messages from processing:
var sock = dgram.createSocket("udp4", function(msg, rinfo) {
try {
// get at all that info being sent out from Processing.
//console.log(osc.fromBuffer(msg));
var getMsg = osc.fromBuffer(msg);
var isMsg = getMsg.args[0].value;
var isName = getMsg.args[1].value;
var isAdd = getMsg.address;
var isType = getMsg.oscType;
// make an array out of it
var isAll = [];
isAll.push(isName);
isAll.push(isMsg);
isAll.push(isAdd);
isAll.push(isType);
// return the array
console.log(isAll);
return isAll;
} catch (error) {
console.log(error);
}
});
Below I have the start of another function, to write some of that array to a BLE device. It needs name and characteristics from a different function. How do I get the below function to use isAll AND two existing parameters?
var writeToChars = function (name, characteristics) { // this is passing values from the BLE setup function
// i need to get isAll to here.
// eventually this will write some values from isAll into a scratch bank.
}
Thanks.
async call in this case be written something like this. state can be maintained in the variables in closure if required. In this particular case - you can do without any state (isAll) as well.
var isAll;
var soc = dgram.createSocket('udp4', oncreatesocket);
function oncreatesocket(msg, rinfo)
{
isAll = parseMessage(msg);
writeData(isAll);
}
function parseMessage(msg) {
...
// code to parse msg and return isAll
}
function writeData() {}
if the writeData is small enough function. It can be inside oncreatesocket without impacting the readability of the code.
Alright. So I figured out what to do, at least in this scenario. I'm sure there is a better way to do this, but for now, this works.
I'm mapping an existing global array of peripherals into the write function, while passing the OSC message to it as a parameter. This solved my issue of "how do I get two pieces of information to the same place". It figures out which peripheral is which and writes a different value to each scratch bank of each peripheral accordingly. Leaving here for future reference.
var writeToBean = function(passThrough){
var passThrough = passThrough;
console.log("in Write to bean: ", passThrough);
_.map(beanArray, function(n){
if(n.advertisement.localName === passThrough.name){
//var name = n.advertisement.localName;
n.discoverSomeServicesAndCharacteristics(['a495ff20c5b14b44b5121370f02d74de'], [scratchThr], function(error, services, characteristics){
var service = services[0];
var characteristic = characteristics[0];
var toSend = passThrough.msg;
console.log("service", service);
console.log("characteristic", characteristic);
if (toSend != null) {
characteristic.write(new Buffer([toSend]), false, function(error) {
if (error) { console.log(error); }
console.log("wrote " + toSend + " to scratch bank 3");
});
}
// not sure how to make the program resume, it stops here. No error, just stops processing.
});
}
});
}

What should I be using? Socket.io rooms or Redis pub-sub?

Pretty simple question. I am building a realtime game using nodejs as my backend and I am wondering if there is any information available on which one is more reliable and which one is more efficient?
I am heavily using both Redis and Socket.io throughout my code. So I want to know whether I should be utilizing Socket.io's Rooms or I would be better off using redis' pub-sub ?
Update:
Just realized there is a very important reason why you may want to use redis pub/sub over socket.io rooms. With Socket.io rooms when you publish to listeners, the (browser)clients recieve the message, with redis it is actually the (redis~on server)clients who recieve messages. For this reason, if you want to inform all (server)clients of information specific to each client and maybe do some processing before passing on to browser clients, you are better off using redis. Using redis you can just fire off an event to generate each users individual data, where as with socket.io you have to actually generate all the users unique data at once, then loop through them and send them their individual data, which almost defeats the purpose of rooms, at least for me.
Unfortunately for my purposes I am stuck with redis for now.
Update 2: Ended up developing a plugin to use only 2 redis connections but still allow for individual client processing, see answer below....
Redis pub/sub is great in case all clients have direct access to redis. If you have multiple node servers, one can push a message to the others.
But if you also have clients in the browser, you need something else to push data from a server to a client, and in this case, socket.io is great.
Now, if you use socket.io with the Redis store, socket.io will use Redis pub/sub under the hood to propagate messages between servers, and servers will propagate messages to clients.
So using socket.io rooms with socket.io configured with the Redis store is probably the simplest for you.
I ended up writing a node plugin to allow for many pub-sub clients but only require 2 redis connections instead of a new one on every single socketio connection, it should work in general, figured someone else may find use for it.
This code assumed you have socket.io running and setup, basically in this example any number of socket.io clients can connect and it will always still only use 2 redis connections, but all clients can subscribe to their own channels. In this example, all clients get a message 'sweet message!' after 10 seconds.
Example with socket.io (utilizing redis pub-sub):
var
RPubSubFactory = require('rpss.js');
var
redOne = redis.createClient(port, host),
redTwo = redis.createClient(port, host);
var pSCFactory = new RPubSubFactory(redOne);
io.sockets.on('connection', function(socket){
var cps = pSCFactory.createClient();
cps.onMessage(function(channel, message){
socket.emit('message', message);
});
io.sockets.on('disconnect', function(socket){
// Dont actually need to unsub, because end() will cleanup all subs,
// but if you need to sometime during the connection lifetime, you can.
cps.unsubscribe('cool_channel');
cps.end();
});
cps.subscribe('cool_channel')
});
setTimeout(function(){
redTwo.publish('cool_channel', 'sweet message!');
},10000);
Actual plugin code:
var RPubSubFactory = function(){
var
len,indx,tarr;
var
dbcom = false,
rPubSubIdCounter = 1,
clientLookup = {},
globalSubscriptions = {};
// public
this.createClient = function()
{
return new RPubSupClient();
}
// private
var constructor = function(tdbcom)
{
dbcom = tdbcom;
dbcom.on("message", incommingMessage);
}
var incommingMessage = function(rawchannel, strMessage)
{
len = globalSubscriptions[rawchannel].length;
for(var i=0;i<len;i++){
//console.log(globalSubscriptions[rawchannel][i]+' incomming on channel '+rawchannel);
clientLookup[globalSubscriptions[rawchannel][i]]._incommingMessage(rawchannel, strMessage);
}
}
// class
var RPubSupClient = function()
{
var
id = -1,
localSubscriptions = [];
this.id = -1;
this._incommingMessage = function(){};
this.subscribe = function(channel)
{
//console.log('client '+id+' subscribing to '+channel);
if(!(channel in globalSubscriptions)){
globalSubscriptions[channel] = [id];
dbcom.subscribe(channel);
}
else if(globalSubscriptions[channel].indexOf(id) == -1){
globalSubscriptions[channel].push(id);
}
if(localSubscriptions.indexOf(channel) == -1){
localSubscriptions.push(channel);
}
}
this.unsubscribe = function(channel)
{
//console.log('client '+id+' unsubscribing to '+channel);
if(channel in globalSubscriptions)
{
indx = globalSubscriptions[channel].indexOf(id);
if(indx != -1){
globalSubscriptions[channel].splice(indx, 1);
if(globalSubscriptions[channel].length == 0){
delete globalSubscriptions[channel];
dbcom.unsubscribe(channel);
}
}
}
indx = localSubscriptions.indexOf(channel);
if(indx != -1){
localSubscriptions.splice(indx, 1);
}
}
this.onMessage = function(msgFn)
{
this._incommingMessage = msgFn;
}
this.end = function()
{
//console.log('end client id = '+id+' closing subscriptions='+localSubscriptions.join(','));
tarr = localSubscriptions.slice(0);
len = tarr.length;
for(var i=0;i<len;i++){
this.unsubscribe(tarr[i]);
}
localSubscriptions = [];
delete clientLookup[id];
}
var constructor = function(){
this.id = id = rPubSubIdCounter++;
clientLookup[id] = this;
//console.log('new client id = '+id);
}
constructor.apply(this, arguments);
}
constructor.apply(this, arguments);
};
module.exports = RPubSubFactory;
I mucked around and tried to improve the efficiency as much as I could, but after doing some different speed tests, I concluded this was the fastest I could get it.
For up-to-date version: https://github.com/Jezternz/node-redis-pubsub

Node.js sending an object with function definitions to worker thread

So I am working on a project in Node.js and I want to open up some extra threads to handle the processing load more efficiently. But I am using classes with function definitions with them and when I try to send those objects to the worker thread, the functions defined in the object disappear and I am only left with the other fields in the object. Is there a way to send the worker an object and preserve the functions so they can be called within the worker?
var cluster = require('cluster');
if(cluster.isMaster){
Monster = function(species){
this.attack = function(){
console.log('CHOMP');
};
this.name = species;
};
var vamp = new Monster('vampire'),
worker = cluster.fork();
worker.send({'monster' : vamp});
}
else{
process.on('message', function(msg) {
console.log(msg.monster); //this logs "{ name: 'vampire' }"
msg.monster.attack(); //TypeError: Object #<Object> has no method 'attack'
});
}
No, there is no way to pass functions between threads. You can pass only JS plain objects (data only) and handle it with functions defined in current thread (like create new object with received data).
Charlie, I realize you asked this question a year ago, but I was wanting to do something very similar and I came across your question which you didn't mark an answer to yet. I thought I would take a "stab" at it and show you what I have done with your code. This different way of organizing code is for me a very acceptable workaround in my node.js work. I am pretty sure this gives you a way to accomplish what you want, even though you can't do it in the manner you wanted.
Declare your "class" outside the cluster code, like this:
var cluster = require('cluster');
var Monster = function(species){
this.attack = function(){
console.log('CHOMP!');
};
this.die = function() {
console.log("Oh, what did I eat? I don't feel so good....\r\n");
process.exit(0);
};
this.scare = function() {
console.log("BOO! I am a " + this.name + "!");
};
this.name = species;
};
if(cluster.isMaster){
worker = cluster.fork();
worker.send({'species' : 'Vampire'});
}
else{
process.on('message', function(msg) {
if(typeof msg.species !== "undefined") {
myMonster = new Monster(msg.species);
myMonster.scare();
myMonster.attack();
myMonster.die();
}
});
}
Give that a whirl and see if this is an answer you can accept!
Ok, stumbled upon this answer, and I found it strange that no one brought this up, but it might be a more modern feature than the question:
eval
let str = "() => { console.log('test') }"
let func = eval(str)
func()
Think it's obvious what's going on here, you can parse any string to javascript, and you can send strings to workers, so you can build and object with functions:
let obj = { a: "() => { ... }" }
and send the object over. (JSON.stringify(obj) first, and than you will have to parse the object first, and than all the substrings seperately)

Resources