I have a question about MQTT broker. My code is here and it works with Mosca server.
var mqtt = require('mqtt');
var scanf = require('scanf');
var client = mqtt.connect('mqtt://localhost');
client.on('message', function (topic, message) {
// message is Buffer
console.log(message.toString())
//client.end()
})
client.on('connect', function () {
setInterval(function() {
console.log('Please scan data');
var data = scanf('%s');
console.log('Message Sent');
while(data==0){
client.subscribe('/sensors/#');
break;
}
if(data==1){
client.unsubscribe("/sensors/#");
}
},500);
});
I am trying to take sensor data after enter 0 but I entered 0 they come all of them data until that time. Arduino ide goes on sending data every 500ms but I just want to take data when I entered 0.
There are a number of problems with this.
Firstly scanf('%s') will block until a enter is pressed which is not what you want inside a setInterval() callback.
Next your while loop, it doesn't do what you think it does.
You should only call client.subscribe('/sensors/#') once, this tells the client to inform the broker that it this client is to be told about all messages that match the topic "/sensors/#". After that the MQTT client will receive all messages published until client.unsubscribe('/sensors/#') is called.
Also since scanf('%s') is only called once the data will never change
this means that the loop will either run forever or never run at all depending on what key is pressed.
What you want is something like this:
var mqtt = require('mqtt');
var scanf = require('scanf');
var data = "0";
var client = mqtt.connect('mqtt://localhost');
client.on('message', function (topic, message) {
if (data == "1") {
console.log(message.toString())
}
})
client.on('connect', function () {
client.subscribe('/sensors/#');
});
do {
data = scanf('%s');
} while (true)
This will start and subscribe to topic, it will then wait for you to enter 1, after that it will print every matching message, if you then enter 0 it will stop printing out values until you enter 1 again.
p.s. good MQTT topic design says that topics should not start with a leading '/'
Related
In my node app,
socket.on('test', function (req) {
controller.test(socket, req, 'test');
})
This way I store all users socket connections in server...
var userSockets = []; //Used to store all connected users data.
userSockets[user.id] = socket; // Storing whole socket object.
// E.x: userSockets[14] = socket; // Stored like this.
Function to get all socket data
getUserSocket() {
return userSockets;
}
Now I need to emit to the specific socket, I have tried this but I got an error.
let allUserSocketListData = databaseHelper.getUserSocket();
allUserSocketListData[result.data[0].id].emit('response' , data);
// E.x: allUserSocketListData[14].emit('response' , data);
Error:
.emit() is not a function.
Update
I have one function in that I'm storing all user's socket data.
validateUser(user, socket) {
... // My some code
userSockets[user.id] = socket;
}
Namespaces and rooms were build for that specific reason, but when you need something handy without extra code, sending emits directly can do job.
Here's a quick approach:
1: Your connection event should store the socket ID and not the whole object
let socketIds = []
socketServer.on("connection",(socket)=>{
socketIds.push(socket.id)
socket.emit("message", "hey")
})
2: Now if you want to send something on the first client only for example, you should first check if it's already registered and proceed with the emit.
if (socketServer.sockets.connected.hasOwnProperty(socketIds[0])){
socketServer.sockets.connected[socketIds[0]].emit("message", "hey again")
} else {
console.error("Error: Wrong Id")
}
I have a kafka topic that I want to consume with a node app. The node app must process the messages from the topic in sequence, one by one, not many at the same time.
I tried this kind of code but this is not doing what I want. When there is messages in the topic waiting for processing and this code is started the on 'message' event gets triggered immediately for all the the messages. The first message gets mutex lock first but the rest of the messages are processed in random order.
var mutex = require( 'node-mutex' )();
var crypto = require('crypto');
var mutexToken = crypto.randomBytes(64).toString('hex');
var kafka = require('kafka-node');
var Consumer = kafka.Consumer;
var client = new kafka.Client('localhost:2181');
var consumer = new Consumer(
client,
[
{ topic: 'my_topic' }
]
);
consumer.on('message', function(message) {
console.log("new message")
mutex
.lock( mutexToken )
.then( function( unlock ) {
console.log(message);
unlock();
} );
});
Is it possible to consume the messages one by one, synchronously? Maybe with some other library?
I believe you can control the message offset directly by explicitly disabling the autoCommit feature.
Here is the link to the consumer documentation:
https://www.npmjs.com/package/kafka-node#highlevelconsumer
Here is a link to an example with autoCommit set off:
https://github.com/SOHU-Co/kafka-node/blob/master/example/consumer.js
https://github.com/SOHU-Co/kafka-node/blob/master/example/offset.js
I have never needed to disable the autoCommit feature, so I can't speak to the implementation.
From the test code it looks like it should be something along these lines:
var Offset = kafka.Offset;
var offset = new Offset(client);
var topics = [ { topic: EXISTS_TOPIC_2 } ];
var options = { autoCommit: false, groupId: '_groupId_1_test' };
var consumer = new Consumer(client, topics, options);
var count = 0;
consumer.on('error', noop);
consumer.on('offsetOutOfRange', function (topic) {
offsetOutOfRange(topic, this);
});
consumer.on('message', function (message) {
message.topic.should.equal(EXISTS_TOPIC_2);
message.value.should.equal('hello kafka');
message.partition.should.equal(0);
offset.commit('_groupId_1_test', [message], function (err) {
if (count++ === 0) done(err);
});
});
OK I looked at the API a little more and thought this might eb an angle worth investigating for you:
Consumer.prototype.pauseTopics = function (topics) {
if (!this.pausedPayloads) this.pausedPayloads = [];
pauseOrResume(this.payloads, this.pausedPayloads, topics);
};
Consumer.prototype.resumeTopics = function (topics) {
if (!this.pausedPayloads) this.pausedPayloads = [];
var reFetch = !this.payloads.length;
pauseOrResume(this.pausedPayloads, this.payloads, topics);
reFetch = reFetch && this.payloads.length;
if (reFetch) this.fetch();
};
from the documentation:
pause()
Pause the consumer. Calling pause does not automatically stop messages
from being emitted. This is because pause just stops the kafka
consumer fetch loop. Each iteration of the fetch loop can obtain a
batch of messages (limited by fetchMaxBytes).
So if you only fetch one message (perhaps your bytes are small enough that a max fetch is only 1), then pause will stop the next fetch from happening. But if you fetched multiple messages, pause will not prevent more than one message being emitted.
I think to be 100% certain you would need to write the logic to handle the messages synchronously. Perhaps emit messages into a queue, and process off of the queue?
Trying to send data from a serial device to web clients. I am using a serial to network proxy, ser2Net to make the data available to a server that acts on the data and sends a manipulated version of the data to web clients. The clients specify the location of the ser2net host and port. The core of this action is coded in node.js as shown here:
function getDataStream(socket, dataSourcePort, host) {
var dataStream = net.createConnection(dataSourcePort, host),
dataLine = "";
dataStream.on('error', function(error){
socket.emit('error',{message:"Source not found on host:"+ host + " port:"+dataSourcePort});
console.log(error);
});
dataStream.on('connect', function(){
socket.emit('connected',{message:"Data Source Found"});
});
dataStream.on('close', function(){
console.log("Close socket");
});
dataStream.on('end',function(){
console.log('socket ended');
dataConnection.emit('lost',{connectInfo:{host:host,port:dataSourcePort}});
});
dataStream.on('data', function(data) {
// Collect a line from the host
line += data.toString();
// Split collected data by delimiter
line.split(delimiter).forEach(function (part, i, array) {
if (i !== array.length-1) { // Fully delimited line.
//push on to buffer and emit when bufferSendCommand is present
dataLine = part.trim();
buffer.push(part.trim());
if(part.substring(0, bufferSendCommand.length) == bufferSendCommand){
gotALine.emit('new', buffer);
buffer=[];
}
}
else {
// Last split part might be partial. We can't announce it just yet.
line = part;
}
});
});
return dataStream;
}
io.sockets.on('connection', function(socket){
var stream = getDataStream(socket, dataSourcePort, host);
//dispense incoming data from data server
gotALine.on('new', function(buffer){
socket.emit('feed', {feedLines: buffer});
});
dataConnection.on('lost', function(connectInfo){
setTimeout(function(){
console.log("Trying --- to reconnect ");
stream = getDataStream(socket, connectInfo.port, connectInfo.host);
},5000);
});
// Handle Client request to change stream
socket.on('message',function(data) {
var clientMessage = JSON.parse(data);
if('connectString' in clientMessage
&& clientMessage.connectString.dataHost !== ''
&& clientMessage.connectString.dataPort !== '') {
stream.destroy();
stream = getDataStream(socket,
clientMessage.connectString.dataPort,
clientMessage.connectString.dataHost);
}
});
});
This works well enough until the serial device drops off and ser2net stops sending data. My attempt to catch the end of the socket and reconnect is not working. The event gets emitted properly but the setTimeout only goes once. I would like to find a way to keep on trying to reconnect while sending a message to the client informing or retry attempts. I am node.js newbie and this may not be the best way to do this. Any suggestions would be appreciated.
Ok I think I figured it out in the dataStream.on('data' ... I added a setTimeout
clearTimeout(connectionMonitor);
connectionMonitor = setTimeout(function(){doReconnect(socket);}, someThresholdTime);
The timeout executes if data stops coming in, as it is repeatedly cleared each time data comes in. The doReconnect function keeps trying to connect and sends a message to the client saying something bad is going on.
I am trying to implement a small chatservice using flapjax. I use an eventStream to get all the clients that connect to the server, and when broadcasting a message (the function on 'message') I map over this eventStream with the function that emits the message to the current client.
// Event stream yielding received clients
var clientReceiverE = receiverE();
// Event stream collecting all the clients
var clientsE = clientReceiverE.collectE([], function (client, clients) {return clients.concat([client]);});
socket.on('connection', function(client) {
clientReceiverE.sendEvent(client);
for (i = 0; i < chatMessages.length; i++) {
client.emit('message', chatMessages[i]);
}
client.on('message', function(message) {
chatMessages.push(message);
//for (i = 0; i < clients.length; i++) {
// clients[i].emit('message', message);
//}
mapE(clientReceiverE, function(client) {console.log(client); client.emit('message', message); return client});
});
client.on('nickname', function(name) {
});
});
The registring of the clients on the eventstream succeeds with this code, but the mapE doesn't result in a loop over all this clients. Does anybody know what is wrong here?
If you are still not guessed :) I think it's because mapE doesn't produce any action itself, mapE only creates and returns another EventStream which behaves like a given source, but with modified value by means of a given function.
You should not be using mapE like that. In your code you are attempting to recreate the mapE event bindings with each client.on('message', ...).
This problem is solved using a receiverE. This function is used to translate, external event streams into flapjax EventStream 's.
// Event stream yielding received clients
var clientReceiverE = receiverE();
// Event stream collecting all the clients
var clientsE = clientReceiverE.collectE([], function (client, clients) {return clients.concat([client]);});
var clientsB = clientsE.startsWith(undefined); //Turn the event stream into a behaviour (event + value)
var messagesE = receiverE();
messagesE.mapE(function(messagePacket){
var clients = clientsB.valueNow(); //Grab current value of client list behaviour
if(clients==undefined){
return;
}
var from = messagePacket.client;
for(var index in clients){
clients[index].emit('message', messagePacket.message);
console.log(messagePacket.message);
}
});
socket.on('connection', function(client) {
clientReceiverE.sendEvent(client);
client.on('message', function(message) {
messagesE.sendEvent({client: client, message: message});
});
});
The difference is this. The flapjax tree is isolated from the WebSocket event code and there is no shared state between them.
I'm getting a weird result when writing to a socket. I wrote a simple experiment with a client and a server:
server.js
var net = require('net');
net.createServer(function (connection) {
connection.on('data', function (data) {
console.log('data: ' + data);
});
}).listen(1337);
client.js
var net = require('net');
var client = net.connect({port: 1337}, function () {
var i = 0;
function send() {
client.write('a');
if (++i < 100) {
process.nextTick(send);
} else {
client.end();
}
}
send();
});
I expected the server to show 100 lines of data: a, but I ended up getting a smaller number of data: aaaaaaa lines. There's socket.setNoDelay() that seems to be what I want, but it doesn't seem to have any effect.
What am I missing?
Thanks a lot,
The TCP protocol only sends exactly the bytes you write in the socket. They will not be separated into messages, that's up to you. If you would like to get 100 lines of a then you would have to define 100 separate messages, and choose a delimiter for them. Usually people delimit messages sent to a TCP socket by \r\n.
So you would need to change your server to
var net = require('net');
net.createServer(function (connection) {
connection.on('data', function (buffer) {
var data = buffer.toString();
if (data.indexOf('\r\n') > -1) { // If there's more than one line in the buffer
var lines = data.split('\r\n'); // Split the lines
var i = lines.length;
while (i--) { // This will read your lines in reverse, be careful
console.log(lines[i]); // Print each line
}
} else {
console.log(data); // If only one line came through, print it
}
});
}).listen(1337);
And your client to
var net = require('net');
var client = net.connect({port: 1337}, function () {
var i = 0;
function send() {
client.write('a\r\n'); // Notice the \r\n part. This is what will help you separate messages on the server
if (++i < 100) {
process.nextTick(send);
} else {
client.end();
}
}
send();
});
And then I believe you would get 100 lines of a.
This module also provides a very interesting way to do it, and of course ZeroMQ would also shine in this because it already has a nice protocol that puts things in envelopes and sends them.
Also interestingly but out of the scope of your question, the messages you send write to the socket on one side will not arrive in the same order on the server. If you change your send function to
function send() {
if (++i < 100) {
client.write('a'+i+'\r\n');
process.nextTick(send);
} else {
client.end();
}
}
you can see them arriving not in the order you sent them.
By "The TCP protocol only sends exactly the bytes you write in the socket" I mean that if you do socket.write("1"); socket.write("2"), you will receive "12" on the server, because that's what you wrote on the socket. You have to explicitly separate your messages by something so that the server can know when a message starts and when a message ends.
About receiving things in order or not, you'll notice that if you remove the process.nexTick and have your client like:
var net = require('net');
var client = net.connect({port: 1337}, function () {
var i = 100;
while (i--) {
client.write('a'+i+'\r\n');
}
});
you'll get two messages on the server (at least I got): first numbers 83 - 99 and then 0 - 82, despite having wrote them in order.
Its because TCP splits it in packets in some magic way. The first package was actually larger than the second one, so it got there last. You can read more about how TCP works in the wikipedia page of course, and this video is probably going to say more than what you need to hear but its good to understand everything you're working with.