Connecting to Accumulo from NodeJS - node.js

I have been trying to connect to Accumulo from NodeJS through the Thrift proxy, but have been unsuccessful.
var thrift = require("thrift");
var AccumuloClient = require("./AccumuloProxy");
var transport = thrift.TFramedTransport;
var protocol = thrift.TBinaryProtocol;
var connection = thrift.createConnection("localhost", 42424, {
transport: transport,
protocol: protocol
});
var client = thrift.createClient(AccumuloClient, connection);
client.login("root", {'password': "password"});
When I try to login I get
org.apache.thrift.protocol.TProtocolException: Expected protocol id ffffff82 but got ffffff80
Is anyone able to help me out and give me an idea of what I'm doing wrong here?
UPDATE:
I modified protocolFactory line in the proxy.properties file located in Accumulo and restarted the proxy.
protocolFactory=org.apache.thrift.protocol.TBinaryProtocol$Factory
I performed the same steps as above, but added a callback to the createClient call.
var login;
var client = thrift.createClient(AccumuloClient, connection,
function(err, success) { login = success });
This populates the login variable. I then try to use that login variable to execute other functions
client.listTables(login, function(a) { console.log(a) })
results in
{name: 'TApplicationException',
type: 6,
message: 'Internal error processing listTables'}
Trying to create a table
client.createTable(login, "testTable", 1, "", function(a) { console.log(a)})
results in
{name: 'AccumuloSecurityException',
msg: 'org.apache.accumulo.core.client.AccumuloSecurityException: Error SERIALIZATION_ERROR for user unknown - Unknown security exception'}
See answer below.

It turns out that the problem existed because of the handling of the response back from Accumulo. In the AccumuloProxy.js file when the login result is received and read in AccumuloProxy_login_result.prototype.read it will set the success as this.success = input.readString()
The readString() function will take the Buffer and call toString() using the utf8 encoding. This was resulting in characters showing up incorrectly.
I modified the AccumuloProxy_login_result.prototype.read function to set success as this.success = input.readBinary() so that a Buffer is returned. This Buffer can be passed in to the other function calls and will get a correct result back from Accumulo instead of an Exception.
This was put in as an issue with Thrift here and has apparently been fixed in the master branch.

Seems that Accumulo uses the compact protocol, not the binary protocol. It also seems, that there is currently no compact protocol support available for NodeJS.
Please have a look at this SO question as well. It deals with C#, but nevertheless it can be helpful. There are also some solutions out there utilizing RabbitMQ or other message brokers, see here.

Related

Trouble Connecting to Google Cloud IoT via MQTT with Node.js

I'm trying to create a MQTT client that'll connect to the Google Cloud IoT Core, but for some reason, it won't connect at all. Here's what I have so far
mqtt = require("mqtt")
fs = require("fs")
var jwt = require('jsonwebtoken');
const projectId = "my-project"
const deviceId = "my-device"
const registryId = "my-degistry"
const region = "us-central1"
const algorithm = "RS256"
const privateKeyFile = "./rsa_private.pem"
const mqttBridgeHostname = "mqtt.googleapis.com"
const mqttBridgePort = 8883
const messageType = "events"
//The mqttClientId is a unique string that identifies a particular device.
//For Google Cloud IoT Core, it must be the format below
const mqttClientId = `projects/${projectId}/locations/${region}/registries/${registryId}/devices/${deviceId}`
const mqttTopic = `/devices/${deviceId}/${messageType}`;
const createJwt = (projectId, privateKeyFile, algorithm) => {
// Create a JWT to authenticate this device. The device will be disconnected
// after the token expires, and will have to reconnect with a new token. The
// audience field should always be set to the GCP project id.
const token = {
iat: parseInt(Date.now() / 1000),
exp: parseInt(Date.now() / 1000) + 20 * 60, // 20 minutes
aud: projectId,
};
const privateKey = fs.readFileSync(privateKeyFile);
return jwt.sign(token, privateKey, {algorithm: algorithm});
};
//Username field is ignored in Cloud IoT Core, but it must be set to something
//Password field sends a JWT (javascript web token) to authorize the device
//mqtts protocol causes library to connecti using SSL, which is required for IoT Core
const connectionArgs = {
host: mqttBridgeHostname,
port: mqttBridgePort,
clientId: mqttClientId,
username: "unused",
password: createJwt(projectId, privateKeyFile, algorithm),
protocol: "mqtts",
secureProtocol: "TLSv1_2_method"
}
const client = mqtt.connect(connectionArgs)
client.on("connect", (connected)=>{
console.log("Attempting to connect")
if (!connected) {
console.log("Client failed to connect")
} else {
console.log("Client is connected!")
}
})
client.on("error", err => {
console.log(err)
setTimeout(( ()=> {
console.log('Terminating process')
return process.kill(process.pid);
}), 1000);
})
client.on("packetsend", (payload) => {
console.log("Payload has been sent")
return process.kill(process.pid)
})
client.on("packetreceive", packet => {
console.log("Killing")
//return process.kill(process.pid)
})
client.on("reconnect", ()=>{
console.log("Attempting a reconnect")
//return process.kill(process.pid)
})
client.on("close", ()=>{
console.log("A disconnect occurred")
// return process.kill(process.pid)
})
client.on("offline", () => {
console.log("Client is offline")
//return process.kill(process.pid)
})
I'm not getting any errors when I try to connect to the server. In other words, everything seems to be authenticating properly and I get no error messages, but the client never connects to the Cloud and instead repeatedly tries to reconnect in an endless cycle (which is why I included code to kill the script). I tried going through the Google Cloud troubleshooting page but nothing there really seemed to help. I don't get any sort of errors messages or helpful tidbits of information when using the Cloud SDK like the guide suggested.
I've opened up the port 8883 through my firewall just in case that was the issue but it doesn't appear to be.
I based this code off some of Google's guides and based on this guide here. I have a registry, project, and device all set up with a proper RSA key.
So I'm not really sure how to proceed! If there's any additional information that would help, please let me know.
Thank you.
I realized that when I was creating the project and registry on the Google Console, I actually mistyped the name I was intending (I thought it was "testmqtt" but it was actually "tesmqtt").
So if you're having an issue similar to this, I'd suggest trying the follwing:
Make sure your you've spelled everything right. Make sure the project title is correct, the registry title, etc. It sounds dumb but these types of mistakes happen, so it doesn't hurt to check them first. Otherwise you'll overthink things like I did.
Check out this this page for troubleshooting. There's two parts of this troubleshooting page that might really help you. The first is trying to see if you can actually connect to the cloud at all. You can test if you're able to make a connection by issuing a command like openssl s_client -connect mqtt.googleapis.com:8883 on the command line. You'll need to have openssl downloaded in order to issue that command, however. You can see the page I just linked for more details about testing your connection. The second thing you can do is check to see if you have authentication by running a gcloud command using Google's sdk. The troubleshooting page I linked also has more details in this regard.
This quickstart guide is also particularly helpful. It might be confusing to navigate at first but understanding it will be your best bet.
Google's github repository also has some good examples of how to make an mqtt connection to the cloud IoT core.
DavidC in the comments below helped me find some of these helpful links, so credit should go to him.
Apart from the links I provided in the comment section and as additional to what you have found out, some users use the Project Number instead of the Project ID which leads to a similar concern which you have encountered. It really pays to double check everything in your configuration as you troubleshoot.
If you need to have a refresher about the authentication, you may also refer to this link.

Socket.IO: "UnhandledPromiseRejectionWarning: Error: Callbacks are not supported when broadcasting"

im trying to create a login system with Node.js, Socket.IO and MongoDB.
At one point i have to get a certain cookie of the client.
So i "send" an event to the client which should return the cookie so i can work with that data within the same function.
My code is as follows:
Server:
async function checklogin(user) {
user = user;
console.log("user:", user);
await User.find({username:user}).then(function(docs) {
servercookieid = docs[0].cookieid;
servercookiedate = docs[0].cookiedate;
});
io.emit('getCookie', function(responseData) {
console.log(responseData)
}).catch(error)
}
Client:
socket.on('getCookie', function(callback) {
console.log('getting cookie...');
var Cookie = document.cookie;
callback(Cookie)
});
I really dont know why i get this error, because as you can see i am not even broadcasting, sooo...
/shrug
If you need more information, please dont hesitate to ask.
The error message says:
Socket.IO: “UnhandledPromiseRejectionWarning: Error: Callbacks are not
supported when broadcasting”
But in fact when you do io.emit you're broadcasting.
Currently you're broadcasting when you do the command:
io.emit('getCookie', function(responseData) {
console.log(responseData)
}).catch(error)
But for broadcasting you just need set a parameter, it's not a function.
Just fix your code to fix the error.
Changing to:
io.emit('getCookie', responseData);//must set the cookie
by the code you provided responseData is not defined. But by the name it should be a cookie.

nodejs: http listen interferes with serialport reads

I'm trying to read in data from an arduino using serialport, and serve it to a web browser.
Without the webserver (ie. if I just leave out that 'listen' call at the end), the serial data gets constantly streamed in with the expected 5 updates per second shown in the console.
But when I add the 'listen' call, nothing is shown on the console until I make a request to the server with my web browser, at which time the console gets at most only one log entry added (but sometimes still nothing).
The data shown in the web browser is the 'old' data from whenever the last request was made, not the current latest data from the arduino. In other words, the serial data is processed a little after each http request is served - not very useful.
const http = require('http');
const serialport = require('serialport');
var serial = new serialport('/dev/ttyUSB0', {
baudRate: 115200
});
var jsonStr = '';
var jsonObj = {};
function handleData(data) {
jsonStr += data;
if ( data.indexOf('}') > -1 ) {
try {
jsonObj = JSON.parse(jsonStr);
console.log(jsonObj);
}
catch(e) {}
jsonStr = '';
}
};
serial.on('data', function (data) {
handleData(data);
});
const app = http.createServer((request, response) => {
response.writeHead(200, {"Content-Type": "text/html"});
response.write(JSON.stringify(jsonObj));
response.end();
});
app.listen(3000);
(The data coming from the arduino is already a JSON string which is why I'm looking for a '}' to start parsing it.)
I also tried using the 'readable' event for getting the serial data but it makes no difference:
serial.on('readable', function () {
handleData(serial.read());
});
If I understand it correctly, the listen call itself is not blocking, it merely registers an event listener/callback to be triggered later. As an accepted answer in a related question says: "Think of server.listen(port) as being kinda similar to someElement.addEventListener('click', handler) in the browser."
If node.js is single threaded then why does server.listen() return?
So why is that 'listen' preventing the serial connection from receiving anything, except for briefly each time a request is served? Is there no way I can use these two features without them interfering with each other?
I discovered that the code worked as expected on a different computer, even though the other computer was using the exact same operating system (Fedora 20) and the exact same version of node.js (v10.15.0) which had been installed in the exact same way (built from source).
I also found that it worked ok on the original computer with a more recent version of Fedora (29).
This likely points to some slight difference in usb/serial drivers which I don't have the time, knowledge or need to delve into. I'll just use the configurations I know will work.

Can't publish options with RabbitMQ message?

I'm using ampq.node for my RabbitMQ access in my Node code. I'm trying to use either the publish or sendToQueue methods to include some metadata with my published message (namely timestamp and content type), using the options parameter.
But whatever I'm passing to options is completely ignored. I think I'm missing some formatting, or a field name, but I cannot find any reliable documentation (beyond the one provided here which does not seem to do the job).
Below is my publish function code:
var publish = function(queueName, message) {
let content;
let options = {
persistent: true,
noAck: false,
timestamp: Date.now(),
contentEncoding: 'utf-8'
};
if(typeof message === 'object') {
content = new Buffer(JSON.stringify(message));
options.contentType = 'application/json';
}
else if(typeof message === 'string') {
content = new Buffer(message);
options.contentType = 'text/plain';
}
else { //message is already a buffer?
content = message;
}
return Channel.sendToQueue(queueName, content, options); //Channel defined and opened elsewhere
};
What am I missing?
Update:
Turns out if you choose to use a ConfirmChannel, you must provide the callback function as the last parameter, or else, the options object is ignored. So once I changed the code to the following, I started seeing the options correctly:
Channel.sendToQueue(queueName, content, options, (err, result) => {...});
Somehow, I can't seem to get your example publish to work... though I don't see anything particularly wrong with it. I'm not sure why I wasn't able to get your example code working.
But I was able to modify a version of my own amqplib intro code, and got it working with your options just fine.
Here is the complete code for my example:
// test.js file
var amqplib = require("amqplib");
var server = "amqp://test:password#localhost/test-app";
var connection, channel;
function reportError(err){
console.log("Error happened!! OH NOES!!!!");
console.log(err.stack);
process.exit(1);
}
function createChannel(conn){
console.log("creating channel");
connection = conn;
return connection.createChannel();
}
function sendMessage(ch){
channel = ch;
console.log("sending message");
var msg = process.argv[2];
var message = new Buffer(msg);
var options = {
persistent: true,
noAck: false,
timestamp: Date.now(),
contentEncoding: "utf-8",
contentType: "text/plain"
};
channel.sendToQueue("test.q", message, options);
return channel.close();
}
console.log("connecting");
amqplib.connect(server)
.then(createChannel)
.then(sendMessage)
.then(process.exit, reportError);
to run this, open a command line and do:
node test.js "example text message"
After running that, you'll see the message show up in your "test.q" queue (assuming you have that queue created) in your "test-app" vhost.
Here's a screenshot of the resulting message from the RMQ Management plugin:
side notes:
I recommend not using sendToQueue. As I say in my RabbitMQ Patterns email course / ebook:
It took a while for me to realize this, but I now see the "send to queue" feature of RabbitMQ as an anti-pattern.
Sure, it's built in to the library and protocol. And it's convenient, right? But that doesn't mean you should use it. It's one of those features that exists to make demos simple and to handle some specific scenarios. But generally speaking, "send to queue" is an anti-pattern.
When you're a message producer, you only care about sending the message to the right exchange with the right routing key. When you're a message consumer, you care about the message destination - the queue to which you are subscribed. A message may be sent to the same exchange, with the same routing key, every day, thousands of times per day. But, that doesn't mean it will arrive in the same queue every time.
As message consumers come online and go offline, they can create new queues and bindings and remove old queues and bindings. This perspective of message producers and consumers informs the nature of queues: postal boxes that can change when they need to.
I also recommend not using amqplib directly. It's a great library, but it lacks a lot of usability. Instead, look for a good library on top of amqplib.
I prefer wascally, by LeanKit. It's a much easier abstraction on top of amqplib and provides a lot of great features and functionality.
Lastly, if you're struggling with other details in getting RMQ up and running with Node.js, designing your app to work with it, etc., check out my RabbitMQ For Devs course - it goes from zero to hero, fast. :)
this may help others, but the key name to use for content type is contentType in the javascript code. Using the web Gui for rabbitMQ, they use content_type as the key name. different key names to declare options, so make sure to use the right one in the right context.

How to check if ElasticSearch client is connected?

I'm working with elasticsearch-js (NodeJS) and everything works just fine as long as long as ElasticSearch is running. However, I'd like to know that my connection is alive before trying to invoke one of the client's methods. I'm doing things in a bit of synchronous fashion, but only for the purpose of performance testing (e.g., check that I have an empty index to work in, ingest some data, query the data). Looking at a snippet like this :
var elasticClient = new elasticsearch.Client({
host: ((options.host || 'localhost') + ':' + (options.port || '9200'))
});
// Note, I already have promise handling implemented, omitting it for brevity though
var promise = elasticClient.indices.delete({index: "_all"});
/// ...
Is there some mechanism to send in on the client config to fail fast, or some test I can perform on the client to make sure it's open before invoking delete?
Update: 2015-05-22
I'm not sure if this is correct, but perhaps attempting to get client stats is reasonable?
var getStats = elasticClient.nodes.stats();
getStats.then(function(o){
console.log(o);
})
.catch(function(e){
console.log(e);
throw e;
});
Via node-debug, I am seeing the promise rejected when ElasticSearch is down / inaccessible with: "Error: No Living connections". When it does connect, o in my then handler seems to have details about connection state. Would this approach be correct or is there a preferred way to check connection viability?
Getting stats can be a heavy call to simply ensure your client is connected. You should use ping, see 2nd example https://github.com/elastic/elasticsearch-js#examples
We are using ping too, after instantiating elasticsearch-js client connection on start up.
// example from above link
var elasticsearch = require('elasticsearch');
var client = new elasticsearch.Client({
host: 'localhost:9200',
log: 'trace'
});
client.ping({
// ping usually has a 3000ms timeout
requestTimeout: Infinity,
// undocumented params are appended to the query string
hello: "elasticsearch!"
}, function (error) {
if (error) {
console.trace('elasticsearch cluster is down!');
} else {
console.log('All is well');
}
});

Resources