Here is ws library..
https://github.com/websockets/ws/blob/master/lib/WebSocket.js
Now How can I use send method such as i can detect the message sending failed? I have tried callback and try catch.. may be i am missing something..
Now I am doing this.. it can send the message..
BulletinSenderHelper.prototype.sendMessage = function(bulletin, device) {
var message = JSON.stringify({
action: 'bulletin:add',
data: bulletin.data
});
if (device.is_active) {
logger.debug('Sending message to %s', device.id);
device.conn.send(message); // device.conn == ws . though i am checking it is active or not, sometimes it fails to send the message. i have to detect it.
} else {
logger.debug('Client %s is inactive, queuing bulletin', device.id);
this.queueBulletin(device, bulletin);
}
};
Related
I'm getting this error from the RabbitMq server
Channel closed by server: 406 (PRECONDITION-FAILED) with message "PRECONDITION_FAILED - unknown delivery tag 80"
This happends because the connection is lost during the consumer task and at the end, when the message is acked/nacked, i get this error because I cannot ack a message on a different channel than the one I got it from.
Here is the code for the RabbitMq connection
async connect({ prefetch = 1, queueName }) {
this.queueName = queueName;
console.log(`[AMQP][${this.queueName}] | connecting`);
return queue
.connect(this.config.rabbitmq.connstring)
.then(conn => {
conn.once('error', err => {
this.channel = null;
if (err.message !== 'Connection closing') {
console.error(
`[AMQP][${this.queueName}] (evt:error) | ${err.message}`,
);
}
});
conn.once('close', () => {
this.channel = null;
console.error(
`[AMQP][${this.queueName}] (evt:close) | reconnecting`,
);
this.connect({ prefetch, queueName: this.queueName });
});
return conn.createChannel();
})
.then(ch => {
console.log(`[AMQP-channel][${this.queueName}] created`);
ch.on('error', err => {
console.error(
`[AMQP-ch][${this.queueName}] (evt:error) | ${err.message}`,
);
});
ch.on('close', () => {
console.error(`[AMQP-ch][${this.queueName}] (evt:close)`);
});
this.channel = ch;
return this.channel;
})
.then(ch => {
return this.channel.prefetch(prefetch);
})
.then(ch => {
return this.channel.assertQueue(this.queueName);
})
.then(async ch => {
while (this.buffer.length > 0) {
const request = this.buffer.pop();
await request();
}
return this.channel;
})
.catch(error => {
console.error(error);
console.log(`[AMQP][${this.queueName}] reconnecting in 1s`);
return this._delay(1000).then(() =>
this.connect({ prefetch, queueName: this.queueName }),
);
});
}
async ack(msg) {
try {
if (this.channel) {
console.log(`[AMQP][${this.queueName}] ack`);
await this.channel.ack(msg);
} else {
console.log(`[AMQP][${this.queueName}] ack (buffer)`);
this.buffer.push(() => {
this.ack(msg);
});
}
} catch (e) {
console.error(`[AMQ][${this.queueName}] ack error: ${e.message}`);
}
}
As you can see, after the connection is enstablished a channel is created, and after i get a connection issue, the channel is set to NULL and after 1 second the connection retries, recreating a new channel.
For managing the offline period I'm using a buffer that collects all the ack message that are sent while the channel was NULL and after the connection is reenstabilshed i unload the buffer.
So basically I have to find a way to send an ACK after a connection is lost or a channel is closed for watherver reason.
Thanks for any help
You cannot acknowledge a message once the channel is closed (whatever is the reason). The broker will automatically re-deliver the same message to another consumer.
This is well documented in RabbitMQ message confirmation section.
When Consumers Fail or Lose Connection: Automatic Requeueing
When manual acknowledgements are used, any delivery (message) that was not acked is automatically requeued when the channel (or connection) on which the delivery happened is closed. This includes TCP connection loss by clients, consumer application (process) failures, and channel-level protocol exceptions (covered below).
...
Due to this behavior, consumers must be prepared to handle redeliveries and otherwise be implemented with idempotence in mind. Redeliveries will have a special boolean property, redeliver, set to true by RabbitMQ. For first time deliveries it will be set to false. Note that a consumer can receive a message that was previously delivered to another consumer.
As the documentation suggests, you need to handle such issues at the consumer side by implementing a message idempotency design pattern. In other words, your architecture should be ready to deal with message re-delivery due to errors.
Alternatively, you can disable message acknowledgment and obtain a "once delivery" type of pattern. This implies that in case of errors you will have to deal with message loss.
Further readings in the matter:
https://bravenewgeek.com/you-cannot-have-exactly-once-delivery/
And the follow up once Kafka introduced new semantics:
https://bravenewgeek.com/you-cannot-have-exactly-once-delivery-redux/
There is no way to send an ACK if the connection is dropped or broken for some reason because the connection happens at the socket level and once it is closed there is no way to recreate it with the same socket.
When the connection drops the message remains non-ACK and therefore another listener can process it or it will be processed again by the disconnected listener when it connects again.
In my opinion you are trying to solve a problem that is not given by RabbitMQ but by the socket implementation at the base.
You could solve this by avoiding managing the message buffer and taking advantage of the peculiarity of RabbitMQ which will re-present the last unprocessed message as soon as your listener connects again.
I have millions of messages in a queue and the first ten million or so are irrelevant. Each message has a sequential ActionId so ideally anything < 10000000 I can just ignore or better yet delete from the queue. What I have so far:
let azure = require("azure");
function processMessage(sb, message) {
// Deserialize the JSON body into an object representing the ActionRecorded event
var actionRecorded = JSON.parse(message.body);
console.log(`processing id: ${actionRecorded.ActionId} from ${actionRecorded.ActionTaken.ActionTakenDate}`);
if (actionRecorded.ActionId < 10000000) {
// When done, delete the message from the queue
console.log(`Deleting message: ${message.brokerProperties.MessageId} with ActionId: ${actionRecorded.ActionId}`);
sb.deleteMessage(message, function(deleteError, response) {
if (deleteError) {
console.log("Error deleting message: " + message.brokerProperties.MessageId);
}
});
}
// immediately check for another message
checkForMessages(sb);
}
function checkForMessages(sb) {
// Checking for messages
sb.receiveQueueMessage("my-queue-name", { isPeekLock: true }, function(receiveError, message) {
if (receiveError && receiveError === "No messages to receive") {
console.log("No messages left in queue");
return;
} else if (receiveError) {
console.log("Receive error: " + receiveError);
} else {
processMessage(sb, message);
}
});
}
let connectionString = "Endpoint=sb://<myhub>.servicebus.windows.net/;SharedAccessKeyName=KEYNAME;SharedAccessKey=[mykey]"
let serviceBusService = azure.createServiceBusService(connectionString);
checkForMessages(serviceBusService);
I've tried looking at the docs for withFilter but it doesn't seem like that applies to queues.
I don't have access to create or modify the underlying queue aside from the operations mentioned above since the queue is provided by a client.
Can I either
Filter my results that I get from the queue
speed up the queue processing somehow?
Filter my results that I get from the queue
As you found, filters as a feature are only applicable to Topics & Subscriptions.
speed up the queue processing somehow
If you were to use the #azure/service-bus package which is the newer, faster library to work with Service Bus, you could receive the messages in ReceiveAndDelete mode until you reach the message with ActionId 9999999, close that receiver and then create a new receiver in PeekLock mode. For more on these receive modes, see https://learn.microsoft.com/en-us/azure/service-bus-messaging/message-transfers-locks-settlement#settling-receive-operations
Is it possible to send an event to node js without any data?
Here's what I am trying to do:
Client:
socket.emit('logged out');
Server:
socket.on('logged out', function() {
console.log('User is logged out');
delUser();
sendUsersOnline();
});
The client side is deffinetely being run, but I never get the server side fired. I'm not sure why. It may be because I'm not sending any data?
EDIT:
function delUser()
{
for(i in usersOnline) {
var user = usersOnline[i];
if(user.socket_id == socket.id) {
delete usersOnline[i];
usersTyping.splice(usersTyping.indexOf(myUser.id), 1);
console.log('Disconnected: ' + myUser.display_name + '(' + myUser.id + ')');
}
}
}
function sendUsersOnline()
{
io.emit('users online', usersOnline);
}
According to Socket.io 2.0.3 docs :
socket.emit(eventName[, ...args][, ack])
socket.emit('hello', 'world');
socket.emit('with-binary', 1, '2', { 3:'4', 5: new Buffer(6) });
Emits an event to the socket identified by the string name. Any other
parameters can be included. All serializable datastructures are
supported, including Buffer.
The ack argument is optional and will be called with the server
answer.
It says that any other parameters can be included but it is not necessary. So you can definitely send/emit an event without any data from server/client like this
socket.emit('logged out');
And you can receive any event without data on server/client like this
socket.on('logged out', function(){
// Code to execute in response to this event
});
I had recently used this type of events to send signals from peer-to-peer without any data being sent with it and it works perfectly. Basically one peer(client) send signal to server and than server gets that signal and emits that signal to other peer(client) using broadcast.
I am using the amqplib node module and following the hello world send/receive tutorial.
https://github.com/squaremo/amqp.node/tree/master/examples/tutorials
My receivers/workers take that message and perform a CPU intensive task in the background, so I can only process about 5 messages at once.
What is the best way to control the number of messages that are being accepted by the receiver.
Code sample:
var amqp = require('amqplib');
amqp.connect('amqp://localhost').then(function(conn) {
process.once('SIGINT', function() { conn.close(); });
return conn.createChannel().then(function(ch) {
var ok = ch.assertQueue('hello', {durable: false});
ok = ok.then(function(_qok) {
return ch.consume('hello', function(msg) {
console.log(" [x] Received '%s'", msg.content.toString());
}, {noAck: true});
});
return ok.then(function(_consumeOk) {
console.log(' [*] Waiting for messages. To exit press CTRL+C');
});
});
}).then(null, console.warn);
You need to set the Quality Of Service on the model. Here is how you would do that in C#
var _model = rabbitConnection.CreateModel();
// Configure the Quality of service for the model. Below is how what each setting means.
// BasicQos(0="Dont send me a new message untill I’ve finshed", _fetchSize = "Send me N messages at a time", false ="Apply to this Model only")
_model.BasicQos(0, _fetchSize, false);
The QOS works with the Ack process. So until you Ack a message, it will only send you N (_fetchSize) at a time. I think you'll have to set noAck: false in your code to get this working.
Good luck!
I'm receiving the same message on each line event. I am receiving 1 event for each message.
Anyone know why. It seems straight forward.
var rli = require("readline").createInterface({
input: socket,
output: socket,
terminal: false
});
rli.on("line", handleServerMessage);
function handleServerMessage(msg) {
console.log("Received msg from from server. msg=", msg);
}