I'm using ampq.node for my RabbitMQ access in my Node code. I'm trying to use either the publish or sendToQueue methods to include some metadata with my published message (namely timestamp and content type), using the options parameter.
But whatever I'm passing to options is completely ignored. I think I'm missing some formatting, or a field name, but I cannot find any reliable documentation (beyond the one provided here which does not seem to do the job).
Below is my publish function code:
var publish = function(queueName, message) {
let content;
let options = {
persistent: true,
noAck: false,
timestamp: Date.now(),
contentEncoding: 'utf-8'
};
if(typeof message === 'object') {
content = new Buffer(JSON.stringify(message));
options.contentType = 'application/json';
}
else if(typeof message === 'string') {
content = new Buffer(message);
options.contentType = 'text/plain';
}
else { //message is already a buffer?
content = message;
}
return Channel.sendToQueue(queueName, content, options); //Channel defined and opened elsewhere
};
What am I missing?
Update:
Turns out if you choose to use a ConfirmChannel, you must provide the callback function as the last parameter, or else, the options object is ignored. So once I changed the code to the following, I started seeing the options correctly:
Channel.sendToQueue(queueName, content, options, (err, result) => {...});
Somehow, I can't seem to get your example publish to work... though I don't see anything particularly wrong with it. I'm not sure why I wasn't able to get your example code working.
But I was able to modify a version of my own amqplib intro code, and got it working with your options just fine.
Here is the complete code for my example:
// test.js file
var amqplib = require("amqplib");
var server = "amqp://test:password#localhost/test-app";
var connection, channel;
function reportError(err){
console.log("Error happened!! OH NOES!!!!");
console.log(err.stack);
process.exit(1);
}
function createChannel(conn){
console.log("creating channel");
connection = conn;
return connection.createChannel();
}
function sendMessage(ch){
channel = ch;
console.log("sending message");
var msg = process.argv[2];
var message = new Buffer(msg);
var options = {
persistent: true,
noAck: false,
timestamp: Date.now(),
contentEncoding: "utf-8",
contentType: "text/plain"
};
channel.sendToQueue("test.q", message, options);
return channel.close();
}
console.log("connecting");
amqplib.connect(server)
.then(createChannel)
.then(sendMessage)
.then(process.exit, reportError);
to run this, open a command line and do:
node test.js "example text message"
After running that, you'll see the message show up in your "test.q" queue (assuming you have that queue created) in your "test-app" vhost.
Here's a screenshot of the resulting message from the RMQ Management plugin:
side notes:
I recommend not using sendToQueue. As I say in my RabbitMQ Patterns email course / ebook:
It took a while for me to realize this, but I now see the "send to queue" feature of RabbitMQ as an anti-pattern.
Sure, it's built in to the library and protocol. And it's convenient, right? But that doesn't mean you should use it. It's one of those features that exists to make demos simple and to handle some specific scenarios. But generally speaking, "send to queue" is an anti-pattern.
When you're a message producer, you only care about sending the message to the right exchange with the right routing key. When you're a message consumer, you care about the message destination - the queue to which you are subscribed. A message may be sent to the same exchange, with the same routing key, every day, thousands of times per day. But, that doesn't mean it will arrive in the same queue every time.
As message consumers come online and go offline, they can create new queues and bindings and remove old queues and bindings. This perspective of message producers and consumers informs the nature of queues: postal boxes that can change when they need to.
I also recommend not using amqplib directly. It's a great library, but it lacks a lot of usability. Instead, look for a good library on top of amqplib.
I prefer wascally, by LeanKit. It's a much easier abstraction on top of amqplib and provides a lot of great features and functionality.
Lastly, if you're struggling with other details in getting RMQ up and running with Node.js, designing your app to work with it, etc., check out my RabbitMQ For Devs course - it goes from zero to hero, fast. :)
this may help others, but the key name to use for content type is contentType in the javascript code. Using the web Gui for rabbitMQ, they use content_type as the key name. different key names to declare options, so make sure to use the right one in the right context.
Related
Okay so I have a Nodejs/Express app that has an endpoint which allows users to receive notifications by opening up a connection to said endpoint:
var practitionerStreams = [] // this is a list of all the streams opened by pract users to the
backend
async function notificationEventsHandler(req, res){
const headers ={
'Content-Type': 'text/event-stream',
'Connection': 'keep-alive',
'Cache-Control': 'no-cache'
}
const practEmail = req.headers.practemail
console.log("PRACT EMAIL", practEmail)
const data = await ApptNotificationData.findAll({
where: {
practEmail: practEmail
}
})
//console.log("DATA", data)
res.writeHead(200, headers)
await res.write(`data:${JSON.stringify(data)}\n\n`)
// create a new stream
const newPractStream = {
practEmail: practEmail,
res
}
// add the new stream to list of streams
practitionerStreams.push(newPractStream)
req.on('close', () => {
console.log(`${practEmail} Connection closed`);
practitionerStreams = practitionerStreams.filter(pract => pract.practEmail !== pract.practEmail);
});
return res
}
async function sendApptNotification(newNotification, practEmail){
var updatedPractitionerStream = practitionerStreams.map((stream) =>
// iterate through the array and find the stream that contains the pract email we want
// then write the new notification to that stream
{
if (stream["practEmail"]==practEmail){
console.log("IF")
stream.res.write(`data:${JSON.stringify(newNotification)}\n\n`)
return stream
}
else {
// if it doesnt contain the stream we want leave it unchanged
console.log("ELSE")
return stream
}
}
)
practitionerStreams = updatedPractitionerStream
}
Basically when the user connects it takes the response object (that will stay open), will put that in an Object along with a unique email, and write to it in the future in sendApptNotification
But obviously this is slow for a full app, how exactly do I replace this with Redis? Would I still have a Response object that I write to? Or would that be replaced with a redis stream that I can subscribe to on the frontend? I also assume I would store all my streams on redis as well
edit: from what examples I've seen people are writing events from redis to the response object
Thank you in advance
If you want to use Redis Stream as notification system, you can follow this official guide:
https://redis.com/blog/how-to-create-notification-services-with-redis-websockets-and-vue-js/ .
To get this data as real time you need to create a websocket connection. I prefer to send to you an official guide instead of create it for you it's because the quality of this guide. It's perfect to anyone understand how to create it, but you need to adapt for your reality, of course.
However like I've said to you in the comments, I just believe that it's more simple to do requests in your api endpoint like /api/v1/notifications with setInterval in your frontend code and do requests each 5 seconds for example. If you prefer to use a notification system as real time I think you need to understand why do you need it, so in the future you can change your system if you think you need it. Basically it's a trade-off you must to do!
For my example imagine two tables in a relational database, one as Users and the second as Notifications.
The tables of this example:
UsersTable
id name
1 andrew
2 mark
NotificationTable
id message userId isRead
1 message1 1 true
2 message2 1 false
3 message3 2 false
The endpoint of this example will return all cached notifications that isn't read by the user. If the cache doesn't exists, it will return the data from the database, put it on the cache and return to the user. In the next call from API, you'll get the result from cache. There some points to complete in this example, for example the query on the database to get the notifications, the configuration of time expiration from cache and the another important thing is: if you want to update all the time the notifications in the cache, you need to create a middleware and trigger it in the parts of your code that needs to notify the notifications user. In this case you'll only update the database and cache. But I think you can complete these points.
const redis = require('redis');
const redisClient = redis.createClient();
app.get('/notifications', async (request, response) => {
const userId = request.user.id;
const cacheResult = await redisClient.get(`user:${userId}:notifications`)
if (cacheResult) return response.send(cacheResult);
const notifications = getUserNotificationsFromDatabase(userId);
redisClient.set(`user:${userId}:notifications`, notifications);
response.send(notifications);
})
Besides that there's another way, you can simple use only the redis or only the database to manage this notification. Your relational database with the correct index will send to your the results as faster as you expect. You'll only think about how much notifications you'll have been.
I have written a wrapper over node-imap(https://github.com/mscdex/node-imap). Currently I am using the since flag to search for emails that arrive in a particular Inbox. For each email that I listen I call upon a imap.search() method and then imap.fetch() method. Is it possible to directly fetch the emails without the imap.search() method.
Providing snippets from my current code.
self.imap.on("mail", function(id) {
self.parseUnreadEmails(time)
});
MailListener.prototype.parseUnreadEmails = function(time) {
var self = this;
self.imap.search([["SINCE", time]], function(error, searchResults) {
var fetch;
if (error) {
self.emit("error", error);
} else {
fetch = self.imap.fetch(searchResults, {
bodies: '',
markSeen: true
})
}
//do some action
}
}
If you are looking to monitor and fetch new mails arriving for a particular mailbox there are many ways you can do this, below are some ideas that are effective that can be used.
Using IDLE command - yes, using this command you can put a folder in idle state and watch for EXISTS/EXPUNGE responses whenever new mails arrive or if there are any flag changes. Do read this RFC . Once constraint using IDLE command is that you have to have a dedicated tcp connection for this for a particular folder, multiple folders can not be monitored in single IDLE command.
Polling STATUS (UIDNEXT MESSAGECOUNT) command frequently on reasonable interval. This will tell you the uidnext(if there is any change from previous value you should find the difference and fetch those mails).Before proceeding read the IMAP RFC carefully.
I'm using the redis-sentinel-client library to manage a connection to a Redis sentinel group. The issue I have is that upon connecting I need to process records which may or may not already be present in the Redis store.
As I have two clients (due to the fact that one is a subscriber) I am not sure the best way to organise my event listeners so that I guarantee that both clients are ready prior to attempting any operations.
At the moment I have the following:
var sentinelSubscriberClient = RedisSentinel.createClient(opts);
var sentinelPublisherClient = RedisSentinel.createClient(opts);
sentinelSubscriberClient.on('ready', function redisSubscriberClientReady() {
sentinelPublisherClient.removeAllListeners('ready');
sentinelPublisherClient.on('ready', function () {
supportedChannels.forEach(function (channel) {
sentinelSubscriberClient.subscribe(channel);
});
// Includes reading + publishing via `sentinelPublisherClient`
processUnprocessed();
});
});
(there are also error listeners but I've removed them to make the code easier to read)
This current approach falls over if the publisher client emits ready before the subscriber client. My question is how can I organise the event listeners so that I can safely call .subscribe() on the subscriber client and various methods (.lrange(), .publish() etc.) of the publisher listener?
Thanks!
Simply move client creation into the ready callback function.
var sentinelSubscriberClient = RedisSentinel.createClient(opts);
var sentinelPublisherClient = null;
sentinelSubscriberClient.on('ready', function redisSubscriberClientReady() {
sentinelPublisherClient = RedisSentinel.createClient(opts);
sentinelPublisherClient.on('ready', function () {
supportedChannels.forEach(function (channel) {
sentinelSubscriberClient.subscribe(channel);
});
// Includes reading + publishing via `sentinelPublisherClient`
processUnprocessed();
});
});
I can get room's clients list with this code in socket.io 0.9.
io.sockets.clients(roomName)
How can I do this in socket.io 1.0?
Consider this rather more complete answer linked in a comment above on the question: https://stackoverflow.com/a/24425207/1449799
The clients in a room can be found at
io.nsps[yourNamespace].adapter.rooms[roomName]
This is an associative array with keys that are socket ids. In our case, we wanted to know the number of clients in a room, so we did Object.keys(io.nsps[yourNamespace].adapter.rooms[roomName]).length
In case you haven't seen/used namespaces (like this guy[me]), you can learn about them here http://socket.io/docs/rooms-and-namespaces/ (importantly: the default namespace is '/')
Updated (esp. for #Zettam):
checkout this repo to see this working: https://github.com/thegreatmichael/socket-io-clients
Using #ryan_Hdot link, I made a small temporary function in my code, which avoids maintaining a patch. Here it is :
function getClient(roomId) {
var res = [],
room = io.sockets.adapter.rooms[roomId];
if (room) {
for (var id in room) {
res.push(io.sockets.adapter.nsp.connected[id]);
}
}
return res;
}
If using a namespace :
function getClient (ns, id) {
return io.nsps[ns].adapter.rooms[id]
}
Which I use as a temporary fix for io.sockets.clients(roomId) which becomes findClientsSocketByRoomId(roomId).
EDIT :
Most of the time it is worth considering avoiding using this method if possible.
What I do now is that I usually put a client in it's own room (ie. in a room whose name is it's clientID). I found the code more readable that way, and I don't have to rely on this workaround anymore.
Also, I haven't tested this with a Redis adapter.
If you have to, also see this related question if you are using namespaces.
For those of you using namespaces I made a function too that can handle different namespaces. It's quite the same as the answer of nha.
function get_users_by_room(nsp, room) {
var users = []
for (var id in io.of(nsp).adapter.rooms[room]) {
users.push(io.of(nsp).adapter.nsp.connected[id]);
};
return users;
};
As of at least 1.4.5 nha’s method doesn’t work anymore either, and there is still no public api for getting clients in a room. Here is what works for me.
io.sockets.adapter.rooms[roomId] returns an object that has two properties, sockets, and length. The first is another object that has socketId’s for keys, and boolean’s as the values:
Room {
sockets:
{ '/#vQh0q0gVKgtLGIQGAAAB': true,
'/#p9Z7l6UeYwhBQkdoAAAD': true },
length: 2 }
So my code to get clients looks like this:
var sioRoom = io.sockets.adapter.rooms[roomId];
if( sioRoom ) {
Object.keys(sioRoom.sockets).forEach( function(socketId){
console.log("sioRoom client socket Id: " + socketId );
});
}
You can see this github pull request for discussion on the topic, however, it seems as though that functionality has been stripped from the 1.0 pre release candidate for SocketIO.
I am writing a webapp, using express.js.
My webapp achieves the following
User posts 100 json objects
Each json object is processed via a service call
Once the service call is completed, a session variable is incremented
On incrementation of the session variable, a server side event must be sent to the client to update the progress bar
How do i achieve listening on a session variable change to trigger a server-sent event?
Listening to a variable change is not the only solution I seek?
I need to achieve sending a server-sent event once a JSON object is processed.
Any appropriate suggestion is welcome
Edit (based on Alberto Zaccagni's comment)
My code looks like this:
function processRecords(cmRecords,requestObject,responseObject)
{
for (var index = 0; index < cmRecords.length; index++)
{
post_options.body = cmRecords[index];
request.post(post_options,function(err,res,body)
{
if(requestObject.session.processedcount)
requestObject.session.processedcount = requestObject.session.processedcount + 1;
else
requestObject.session.processedcount = 1;
if(err)
{
appLog.error('Error Occured %j',err);
}
else
{
appLog.debug('CMResponse: %j',body);
}
var percentage = (requestObject.session.processedcount / requestObject.session.totalCount) * 100;
responseObject.set('Content-Type','text/event-stream');
responseObject.json({'event':'progress','data':percentage});
});
};
}
When the first record is updated and a server side event is triggered using the responseObject (express response object)
When the second record is updated and I try triggering a server side event using the same responseObject. I get an error saying cannot set header to a response that has already been sent
It's hard to know exactly what the situation is without seeing the routes/actions you have in your main application...
However, I believe the issue you are running into is that you are trying to send two sets of headers to the client (browser), which is not allowed. The reason this is not allowed is because the browser does not allow you to change the content type of a response after you have sent the initial response...as it uses that as an indicator of how to process the response you are sending it. You can't change either of these (or any other headers) after you have sent them to a client once (one request -> one response -> one set of headers back to the client). This prevents your server from appearing schizophrenic (by switching from a "200 Ok" response to a "400 Bad Request," for example).
In this case, on the initial request, you are telling the client "Hey, this was a valid request and here is my response (via the status of 200 which is either set elsewhere or being assumed by ExpressJS), and please keep the communication channel open so I can send you updates (by setting your content type to text/event-stream)".
As far as how to "fix" this, there are many options. When I've done this, I've used the pub/sub feature of redis to act as the "pipe" that connects everything up. So, the flow has been like this:
Some client sends a request to /your-event-stream-url
In this request, you set up your Redis subscriber. Anything that comes in on this subscription can be handled however you want. In your case, you want to "send some data down the pipe to the client in a JSON object with at least a data attribute." After you have set up this client, you just return a response of "200 Ok" and set the content type to "text/event-stream." Redis will take care of the rest.
Then, another request is made to another URL endpoint which accomplishes the task of "posting a JSON object" by hitting /your-endpoint-that-processes-json. (Note: obviously this request may be made by the same user/browser...but the application doesn't know/care about that)
In this action, you do the processing of their JSON data, increment your counters, or do whatever...and return a 200 response. However, one of the things you'd do in this action is "publish" a message on the Redis channel your subscribers from step #1 are listening to so the clients get the updates. Technically, this action does not need to return anything to the client, assuming the user will have some type of feedback based on the 200-status code or on the server-sent event that is sent down the pipe...
A tangible example I can give you is this gist, which is part of this article. Note that the article is a couple years old at this point so some of the code may have to be tweaked a bit. Also note this is not guaranteed to be anything more than an example (ie: it has not been "load tested" or anything like that). However, it may help you get started.
I came up with a solution please let me know if this is the right way to do stuff ?
Will this solution work across sessions ?
Server side Code
var events = require('events');
var progressEmitter = new events.EventEmitter();
exports.cleanseMatch = function(req, res)
{
console.log('cleanseMatch Inovked');
var progressTrigger = new events.EventEmitter;
var id = '';
var i = 1;
id = setInterval(function(){
req.session.percentage = (i/10)*100;
i++;
console.log('PCT is: ' + req.session.percentage);
progressEmitter.emit('progress',req.session.percentage)
if(i == 11) {
req.session.percentage = 100;
clearInterval(id);
res.json({'data':'test'});
}
},1000);
}
exports.progress = function(req,res)
{
console.log('progress Inovked');
// console.log('PCT is: ' + req.session.percentage);
res.writeHead(200, {'Content-Type': 'text/event-stream'});
progressEmitter.on('progress',function(percentage){
console.log('progress event fired for : ' + percentage);
res.write("event: progress\n");
res.write("data: "+percentage+"\n\n");
});
}
Client Side Code
var source = new EventSource('progress');
source.addEventListener('progress', function(e) {
var percentage = JSON.parse(e.data);
//update progress bar in client
App.updateProgressBar(percentage);
}, false);