I have a node.js application subscribed to a channel listening for an event. When it hears that event it publishes another event to a new channel. I have been using MubSub for my publish/subscriptions. My issue is that no matter what I do every time the page is reloaded there are two listeners and two publishes, then 3,4,5,6 etc..
I dont understand why they do not get disconnected?
if (curURL == '/handwash') {
db2.handwash.find({
"event": "handwash"
}).sort({
"message.time": -1
}).limit(10).forEach(function(error, x) {
if (error || !x) {
console.log("error getting items");
} else {
socket.emit('archive', x.message);
}
});
channel2.subscribe('dealer out', function(message) {
console.log("new dealer event " + message);
db.events.find({
"event": "wash"
}).sort({
"message.time": -1
}).limit(1).forEach(function(error, x) {
if (error || !x) {
console.log("error getting items");
} else {
console.log("found last handwash..." + x.message.type + "....publishing");
var message = ({
'tableid': 123,
'time': Date(),
'type': x.message.type,
})
channel.publish('handwash', message );
socket.emit('message', message);
}
});
//socket.emit('message', message);
});
socket.on("disconnect", function(s) {
console.log("Disconnected from global handler");
});
}
The problem was that I was using MubSub for subscription and publishing messages. Fixed it by creating a varaible for my connection and then closing it with a unsubscribe();
var subscription = channelEvents.subscribe('pocket', function(message) {}
subscription.unsubscribe();
Related
I use the code shown below, to setup a connection for an active message queue via nodejs
var reconnectOptions = {
'maxReconnects': 10,
connect: {
connectHeaders: {
'heart-beat': '5000,5000'
}
}
};
var servers = [mqConfig.connectOption];
var manager = new stompit.ConnectFailover(servers, reconnectOptions);
manager.connect(function(error, client, reconnect){
client.subscribe(mqConfig.receiveHeaders, function(error, message) {
client.on('error', function(error) {
reconnect();
});
message.readString('utf-8', function (error, body) {
if (error) {
console.log('read message error ' + error.message);
return;
}
....
It works to connect to activemq and send out messages, but it would generate thousands of consumers with session id equal to -1. Is there any reason for that to happen, any ideas?
I'm quite new to Redis Pub/sub so please bear with me. I'm trying to create an IRC where users can create their own chat rooms, kinda like Gitter. Below is what I've done so far.. I'm subscribing the users to different channels by their username only just for testing.. Thing is that when I publish to channel x, a client who's subbed to channel y still gets the same message.. I'm publishing using redis-cli and PUBLISH command.
function handleIO(socket){
function disconnect(){
console.log("Client disconnected");
socket.broadcast.emit("user d/c", socket.username+" has left!");
}
socket.on("new user", function(username){
socket.username = username;
if(socket.username == "chat"){
redisClient.subscribe("chat");
}else{
redisClient.subscribe("other");
}
socket.userColor = '#'+(Math.random()*0xFFFFFF<<0).toString(16);
socket.emit("new_user", username);
emitter.lrange("messages", 0, -1, function(err, messages){
//reversing to be in correct order
messages = messages.reverse();
messages.forEach(function(message){
message = JSON.parse(message);
socket.emit("messages", message);
});
});
socket.broadcast.emit("user connection", username+" has connected to the Haven!");
});
socket.on("disconnect", disconnect);
socket.on("send", function(msg){
var msg = JSON.stringify( { name: socket.username, messageText: msg, color: socket.userColor } );
emitter.lpush("messages", msg, function(err, response){
//keep newest 10 items
emitter.ltrim("messages", 0, 9);
});
io.sockets.emit("receive", msg, socket.userColor);
});
redisClient.on("message", function (channel, message) {
console.log(channel+":"+message);
socket.emit("message", channel, message);
});
}
For the lost wanderer out there... What I did is implement another event on the client to basically check whether that client is 'entitled' to the message (i.e. whether the message's channel belongs in the client's list of subbed channels if that makes sense).
Client-side
socket.on("message", function(channel, message){
socket.emit("entitled", channel, message);
});
socket.on("entitled", function(reply, channel, message){
if(reply == 1){
$("#msgArea").append(message+"<br/>");
$("#msgArea").prop({ scrollTop: $("#msgArea").prop("scrollHeight") });
}
});
Server-side
socket.on("entitled", function(channel, message){
//check that user is subbed
emitter.sismember('channels:'+socket.username, channel, function(err, reply){
if(err) throw err;
socket.emit("entitled", reply, channel, message);
});
});
What I purposely left out is that I didn't keep using socket.username but started using sessions for persistence.. My word of advice would be to stick with redis store since it's one of the most popular on github.
How can i set timer function for Redis sub, so when client connect and message is not received from pubblisher in 200ms socket emit an event?
This is what i have now:
io.on('connection', function(socket){
const sub = redis.createClient();
sub.subscribe("data");
sub.on("message", function(channel, message) {
io.emit("data", JSON.parse(message));
});
sub.on("error", function(err){
console.log("Error" + err);
});
socket.on("disconnect", function(){
sub.removeListerner('message', function(){
io.emit("disconnected");
})
});
});
I know its a late question but hopefully this answer can help someone else or if you still havent found one. You can use a timeout function that calls a handler function just in case the message isnt received (im guessing that's what your asking)
I'm using redis as well and I do a call to other databases to check if they have the user with the requested id. To make sure the user doesn't stay and wait I added a timeout function which I set to 500ms. so its a race condition and whichever comes first I unsubscribe the redis client to make sure its a one off as well.
return User.mongoose.findAsync({ _id: params.id })
.then ( results => {
if (sails.IS.empty(results)) {
const request_id = Date.now() + '#' + sails.HOST_NAME + '/user/' + params.id
sails.REDIS_SUB.subscribe(request_id)
sails.REDIS_SUB.on('message', (channel, message) => {
if (channel == request_id) {
const data = JSON.parse(message)
sails.dlogwarn(params.id + '\t<=\t(' + data.responder + ')')
sails.REDIS_SUB.unsubscribe(request_id)
return data.params.results
}
})
sails.REDIS_PUB.publish('/user/find', JSON.stringify({
request_id: request_id,
params: {
_id: params.id
}
}))
setTimeout( () => {
sails.REDIS_SUB.unsubscribe(request_id)
return results
}, Number(process.env.REDIS_MAX_QUERY_TIMEOUT))
} else return results
})
I'm new to message queue's and i'm trying to connect to a rabbit mq instance that was setup for me using https://github.com/squaremo/amqp.node and i'm definitely on the struggle bus.
I took the example from here and i'm trying to plug my values in and I'm getting no where.
Here's the info I was given:
Server: myserver
Queue: uinotification
username: myuser
password: mypass
Here's my attempt at using this example but I think instead of having to assert the queue i need to bind to it (i think).
Here's the docs for bindQueue: http://www.squaremobius.net/amqp.node/doc/channel_api.html#toc_39
I think i'm confused by the exchange piece.
amqp.connect('amqp://myuser:mypass#myserver').then(function(conn) {
process.once('SIGINT', function() { conn.close(); });
return conn.createChannel().then(function(ch) {
var ok = ch.assertExchange('logs', 'fanout', {durable: false});
ok = ok.then(function() {
//return ch.assertQueue('', {exclusive: true});
return ch.bindQueue('uinotification', 'logs', '');
});
/*ok = ok.then(function(qok) {
console.log('qok = ');
console.log(qok);
return ch.bindQueue(qok.queue, 'logs', '').then(function() {
return qok.queue;
});
});*/
ok = ok.then(function(queue) {
console.log('queue = ');
console.log(queue);
return ch.consume(queue, logMessage, {noAck: true});
});
return ok.then(function() {
console.log(' [*] Waiting for logs. To exit press CTRL+C');
});
function logMessage(msg) {
console.log(" [x] '%s'", msg.content.toString());
}
}).catch(function(err) {
console.error('err = '+err);
});
}).then(null, console.warn).catch(function(err) {
console.error('connect err = '+err);
});
Here's the error I get with the above code:
Channel closed by server: 404 (NOT-FOUND) with message "NOT_FOUND - no previously declared queue"
A queue has to exist before you can bind to it, thus asserting it into existence first is actually what you need to do before binding to it.
Note that the settings for the queue have to be exactly the same as the ones the queue has been first created with.
I want to receive a message after a certain amount of time in one of my workers. I decided to go with Node and RabbitMQ after discovering so-called dead letter exchanges.
The message seems to get send to the queue in DeadExchange, but the consumer is never receiving the message after the elapsed time in the WorkQueue in the WorkExchange. Either the bindQueue is off, or the dead-letter'ing doesn't work?
I've tried a lot of different values now. Can someone please point out what I'm missing?
var amqp = require('amqplib');
var url = 'amqp://dev.rabbitmq.com';
amqp.connect(url).then(function(conn) {
//Subscribe to the WorkQueue in WorkExchange to which the "delayed" messages get dead-letter'ed (is that a verb?) to.
return conn.createChannel().then(function(ch) {
return ch.assertExchange('WorkExchange', 'direct').then(function() {
return ch.assertQueue('WorkQueue', {
autoDelete: false,
durable: true
})
}).then(function() {
return ch.bindQueue('WorkQueue', 'WorkExchange', '');
}).then(function() {
console.log('Waiting for consume.');
return ch.consume('WorkQueue', function(msg) {
console.log('Received message.');
console.log(msg.content.toString());
ch.ack(msg);
});
});
})
}).then(function() {
//Now send a test message to DeadExchange to a random (unique) queue.
return amqp.connect(url).then(function(conn) {
return conn.createChannel();
}).then(function(ch) {
return ch.assertExchange('DeadExchange', 'direct').then(function() {
return ch.assertQueue('', {
arguments: {
'x-dead-letter-exchange': 'WorkExchange',
'x-message-ttl': 2000,
'x-expires': 10000
}
})
}).then(function(ok) {
console.log('Sending delayed message');
return ch.sendToQueue(ok.queue, new Buffer(':)'));
});
})
}).then(null, function(error) {
console.log('error\'ed')
console.log(error);
console.log(error.stack);
});
I'm using amqp.node (https://github.com/squaremo/amqp.node) which is amqplib in npm. Although node-amqp (https://github.com/postwait/node-amqp) seems to be so much more popular, it doesn't implement the full protocol and there are quite some outstanding issues regarding reconnecting.
dev.rabbitmq.com is running RabbitMQ 3.1.3.
This is a working code.When a message spends more than ttl in DeadExchange, it is pushed to WorkExchange. The key to success is defining the right routing key. The exchange-queue to which you wish to send post ttl, should be bounded with a routing key(note: not default), and 'x-dead-letter-routing-key' attributes value should match that route-key.
var amqp = require('amqplib');
var url = 'amqp://localhost';
amqp.connect(url).then(function(conn) {
//Subscribe to the WorkQueue in WorkExchange to which the "delayed" messages get dead-letter'ed (is that a verb?) to.
return conn.createChannel().then(function(ch) {
return ch.assertExchange('WorkExchange', 'direct').then(function() {
return ch.assertQueue('WorkQueue', {
autoDelete: false,
durable: true
})
}).then(function() {
return ch.bindQueue('WorkQueue', 'WorkExchange', 'rk1');
}).then(function() {
console.log('Waiting for consume.');
return ch.consume('WorkQueue', function(msg) {
console.log('Received message.');
console.log(msg.content.toString());
ch.ack(msg);
});
});
})
}).then(function() {
//Now send a test message to DeadExchange to DEQ queue.
return amqp.connect(url).then(function(conn) {
return conn.createChannel();
}).then(function(ch) {
return ch.assertExchange('DeadExchange', 'direct').then(function() {
return ch.assertQueue('DEQ', {
arguments: {
'x-dead-letter-exchange': 'WorkExchange',
'x-dead-letter-routing-key': 'rk1',
'x-message-ttl': 15000,
'x-expires': 100000
}
})
}).then(function() {
return ch.bindQueue('DEQ', 'DeadExchange', '');
}).then(function() {
console.log('Sending delayed message');
return ch.publish('DeadExchange', '', new Buffer("Over the Hills and Far Away!"));
});
})
}).then(null, function(error) {
console.log('error\'ed')
console.log(error);
console.log(error.stack);
});
Here's an example using AMQP Connection Manager for Node. I noticed no examples seemed to match what we were doing in our code, so I made a repo with a simple example and one with retry counts via republishing back to the main exchange: https://github.com/PritchardAlexander/node-amqp-dead-letter-queue
Here's the simple example:
const amqp = require('amqp-connection-manager');
const username = encodeURIComponent('queue');
const password = encodeURIComponent('pass');
const port = '5672';
const host = 'localhost';
const connectionString = `amqp://${username}:${password}#${host}:${port}`;
// Ask the connection manager for a ChannelWrapper. Specify a setup function to
// run every time we reconnect to the broker.
connection = amqp.connect([connectionString]);
// A channel is your ongoing connection to RabbitMQ.
// All commands go through your channel.
connection.createChannel({
json: true,
setup: function (channel) {
channel.prefetch(100);
// Setup EXCHANGES - which are hubs you PUBLISH to that dispatch MESSAGES to QUEUES
return Promise.all([
channel.assertExchange('Test_MainExchange', 'topic', {
durable: false,
autoDelete: true,
noAck: false
}),
channel.assertExchange('Test_DeadLetterExchange', 'topic', {
durable: false,
autoDelete: true,
maxLength: 1000,
noAck: true // This means dead letter messages will not need an explicit acknowledgement or rejection
})
])
// Setup QUEUES - which are delegated MESSAGES by EXCHANGES.
// The MESSAGES then need to be CONSUMED.
.then(() => {
return Promise.all([
channel.assertQueue(
'Test_MainQueue',
options = {
durable: true,
autoDelete: true,
exclusive: false,
messageTtl: 1000*60*60*1,
deadLetterExchange: 'Test_DeadLetterExchange'
}
),
channel.assertQueue('Test_DeadLetterQueue',
options = {
durable: false,
autoDelete: true,
exclusive: false
}
)
]);
})
// This glues the QUEUES and EXCHANGES together
// The last parameter is a routing key. A hash/pound just means: give me all messages in the exchange.
.then(() => {
return Promise.all([
channel.bindQueue('Test_MainQueue', 'Test_MainExchange', '#'),
channel.bindQueue('Test_DeadLetterQueue', 'Test_DeadLetterExchange', '#')
]);
})
// Setup our CONSUMERS
// They pick MESSAGES off of QUEUES and do something with them (either ack or nack them)
.then(() => {
return Promise.all([
channel.consume('Test_MainQueue', (msg) => {
const stringifiedContent = msg.content ? msg.content.toString() : '{}';
console.log('Test_MainQueue::CONSUME ' + stringifiedContent);
const messageData = JSON.parse(stringifiedContent);
if (messageData.value === 0) {
console.log('Test_MainQueue::REJECT ' + stringifiedContent);
// the 'false' param at the very end means, don't retry! dead letter this instead!
return channel.nack(msg, true, false);
}
return channel.ack(msg);
})
]),
channel.consume('Test_DeadLetterQueue', (msg) => {
const stringifiedContent = msg.content ? msg.content.toString() : '{}';
console.log('');
console.log('Test_DeadLetterQueue::CONSUME ' + stringifiedContent);
console.log('');
});
})
.then(() => {
setInterval(function () {
const messageData = {
text: 'Dead letter if 0',
value: Math.floor(Math.random()*5)
};
const stringifiedMessage = JSON.stringify(messageData);
// Publish message to exchange
if (channel.publish('Test_MainExchange', '', new Buffer(stringifiedMessage))) {
console.log(`Sent ${stringifiedMessage}`);
} else {
console.log(`Failed to send ${stringifiedMessage}`);
};
}, 300);
});
}
});
There was a bug in Channel#assertQueue in AMQP.Node which just got fixed, see https://github.com/squaremo/amqp.node/commit/3749c66b448875d2df374e6a89946c0bdd0cb918. The fix is on GitHub but not in npm just yet.