I'm trying to do a get() from my AWS Lambda (NodeJS) on ElastiCache Redis using node_redis client. I believe that I'm able to connect to redis but I'm getting Time out (Lambda 60 sec time out) when I'm trying to perform a get() operation.
I have also granted my AWS lambda Administrator access just to be certain that it's not a permissions issue. I'm hitting lambda by going to AWS console and clicking the Test button.
Here is my redisClient.js:
const util = require('util');
const redis = require('redis');
console.info('Start to connect to Redis Server');
const client = redis.createClient({
host: process.env.ElastiCacheEndpoint,
port: process.env.ElastiCachePort
});
client.get = util.promisify(client.get);
client.set = util.promisify(client.set);
client.on('ready',function() {
console.log(" subs Redis is ready"); //Can see this output in logs
});
client.on('connect',function(){
console.log('subs connected to redis'); //Can see this output in logs
})
exports.set = async function(key, value) {
console.log("called set!");
return await client.set(key, value);
}
exports.get = async function(key) {
console.log("called get!"); //Can see this output in logs
return await client.get(key);
}
Here's my index.js which calls the redisClient.js:
const redisclient = require("./redisClient");
exports.handler = async (event) => {
const params = event.params
const operation = event.operation;
try {
console.log("Checking RedisCache by calling client get") // Can see this output in logs
const cachedVal = await redisclient.get('mykey');
console.log("Checked RedisCache by calling client get") // This doesn't show up in logs.
console.log(cachedVal);
if (cachedVal) {
return {
statusCode: 200,
body: JSON.stringify(cachedVal)
}
} else {
const setCache = await redisclient.set('myKey','myVal');
console.log(setCache);
console.log("*******")
let response = await makeCERequest(operation, params, event.account);
console.log("CE Request returned");
return response;
}
}
catch (err) {
return {
statusCode: 500,
body: err,
};
}
}
This is the output (time out error message) that I get:
{
"errorMessage": "2020-07-05T19:04:28.695Z 9951942c-f54a-4b18-9cc2-119eed65e9f1 Task timed out after 60.06 seconds"
}
I have tried using Bluebird (changing get to getAsync()) per this: https://github.com/UtkarshYeolekar/promisify-redis-client/blob/master/redis.js but still got the same behavior.
I also changed the port to use a random value (like 8088) that I'm using to create client (to see the behavior of connect event for a failed connection) - in this case I still see a Timed Out error response but I don't see the subs Redis is ready and subs connected to redis in my logs.
Can anyone please point me in the right direction? I don't seem to understand why I'm able to connect to redis but the get() request times out.
I figured out the issue and posting here in case it helps anyone in future as the behavior wasn't very intuitive for me.
I had enabled AuthToken param while setting up my redis. I was passing the param to lambda with the environment variables but wasn't using it while sending the get()/set() requests. When I disabled the AuthToken requirement from redis configuration - Lambda was able to hit redis with get/set requests. More details on AuthToken can be found here: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticache-replicationgroup.html#cfn-elasticache-replicationgroup-authtoken
Related
First, we are quite new in the area Hyperledger Fabric and MQTT. We are building a prototype with a Blockchain platform (Hyperledger Fabric) written in nodeJS and a raspberry pi for transmitting the IoT data (working with mqtt). In our nodeJS file we create a MQTT client that subscribes to a topic and gets as a message a JSON object. The parameters of that object have to be passed on to the smart contract (submitContract). We realized that the gateway for Hyperledger Fabric disconnects before it runs the MQTT event functions (client.on) and therefore we thought we let the gateway disconnect on the condition of a submitted contract (see below code). In order to do so we want to get the success variable out of the client.on("message") function so that we can use it for the if statement at the end. However, the success variable doesn't seem to get updated to success=true by simply using return within client.on() so that eventually the program never exits (aka gateway is not disconnected). Below, you see parts of the code (the relevant parts). Does anyone know how to pass on properly an updated variable out of the MQTT event function?
'use strict';
const { FileSystemWallet, Gateway } = require('fabric-network');
const mqtt = require("mqtt");
async function main() {
try {
// Create a new gateway for connecting to our peer node.
const gateway = new Gateway();
await gateway.connect(ccpPath, { wallet, identity: 'user1', discovery: { enabled: true, asLocalhost: true } });
// Get the network (channel) our contract is deployed to.
const network = await gateway.getNetwork('mychannel');
// Get the contract from the network.
const contract = network.getContract('fabcar');
//MQTT client connection & contract submission
console.log("connecting to broker");
const client = mqtt.connect("mqtt://192.168.43.217");
var success = false;
client.on("connect", () =>{
console.log("Subscribing");
client.subscribe("rfidData");
console.log("Please hold your tag on the RFID reader. Wait...");
});
client.on("message", (topic, message) =>{
var rfidPayload = JSON.parse(message.toString());
var carKeyIn = rfidPayload.carKey;
var renterIDIn = rfidPayload.renterID;
var timestampIn = rfidPayload.timestamp;
console.log(rfidPayload);
contract.submitTransaction('openCar', carKeyIn, renterIDIn, timestampIn);
success = true;
console.log("Success? " + success);
return success;
});
client.stream.on('error', (err) => {
console.log('errorMessage', err);
client.end()
});
client.on("offline",() =>{
console.log("offline");
});
client.on("reconnect", ()=>{
console.log("reconnect");
});
// Disconnect from the gateway.
if (success === true){
client.end();
gateway.disconnect();
};
} catch (error) {
console.error(`Failed to submit transaction: ${error}`);
process.exit(1);
}
}
main();
You need to take on the concept of asynchronous execution and event driven systems. The application you have written does not execute in the order it is written.
The on('message',....) call back will only be executed when a message is delivered to client. This function also does not return any values (it is not actually called by any of your code, it is called by the underlying MQTT client code) so it will not "return" the value of success.
If you want the application to disconnect after it has received the first message then the easiest thing to do is to move the code that disconnects the MQTT client and the Fabric gateway to inside the callback. e.g.
client.on("message", (topic, message) =>{
var rfidPayload = JSON.parse(message.toString());
var carKeyIn = rfidPayload.carKey;
var renterIDIn = rfidPayload.renterID;
var timestampIn = rfidPayload.timestamp;
console.log(rfidPayload);
contract.submitTransaction('openCar', carKeyIn, renterIDIn, timestampIn);
gateway.disconnect();
client.end();
});
EDIT:
Looking at the doc it appears that submitTransaction() is flagged as being an async function, so you should be able to use await to block until it completes
...
console.log(rfidPayload);
await contract.submitTransaction('openCar', carKeyIn, renterIDIn, timestampIn);
gateway.disconnect();
client.end();
});
I want to add row to Amazon DDB table from node app deployed on Zeit-now every time I get a post reqest but after sending response to post request. My ddb.putItem stops as pending promise and no errors are logged. I don't understand why.
The app is a slack bot. I get a message from slack api and it fires response by my bot. I want to quickly send 200 to slack to avoid getting message sent again.
I tried different approaches, using EventEmitter or res.on('finish'...
I did test that the module sending update to table in ddb works as when I fire it from command line node it works.
But neither from now dev or deployed now app it does not.
I made a simplified test case in this repository:
https://github.com/halas/now-test-case
Basically entrypoint for node app looks like this:
const send = require('./send.js');
module.exports = (req, res) => {
res.on('finish', send);
res.end(`Hello from Node.js on Now 2.0!`);
console.log('still here'); // this gets logged
};
And the send module:
[ require aws-sdk and set credentials ]
const ddb = new AWS.DynamoDB({apiVersion: '2012-08-10'});
module.exports = () => {
let params = {
TableName: 'now-test-case',
Item: {
'id': { N: String(Date.now()) },
'message': { S: 'hello World' },
}
};
console.log('hello'); //we get here
try {
console.log('try'); //we get here
const data = ddb.putItem(params).promise();
console.log(data); //this is pending promise now
data
.then((data) => {console.log(data)})
.catch((error) => {console.log(error)});
// and it never gets resolved or rejected
} catch(e) {
console.log(e); //and no error catched here either
}
};
As suggested by Rob Evans on Zeit Spectrum chat, I prepared the version
with async-await (on branch in test repo), but results are the same.
I would expect to get the update on DynamoDB (resolved promise).
While I get only pending promise and now resolve or reject.
I'm developing a NodeJS web app to receive Real Time updates from Firestore DB through Admin SDK.
This is the init code for the Firestore object. It's executed just once, when the app is deployed (on AWS Elastic Beanstalk):
const admin = require('firebase-admin');
var serviceAccount = require('./../key.json');
admin.initializeApp({
credential: admin.credential.cert(serviceAccount)
});
var db = admin.firestore();
FUNC.firestore = db;
Then I use this firestore object in a websocket comunication to send realtime updates to browser. The idea is to use the server as a proxy between browser and Firestore.
socket.on('open', function (client) {
var query = FUNC.firestore.collection("notifications").doc(client.user.id.toString()).collection("global");
query.onSnapshot(querySnapshot => {
querySnapshot.docChanges().forEach(change => {
client.send({ id: change.doc.id, body: change.doc.data(), type: change.type });
});
}, err => {
console.log(`Encountered error: ${err}`);
});
});
socket.on('close', function (client) {
var unsub = FUNC.firestore.collection("notifications").doc(client.user.id.toString()).collection("global").onSnapshot(() => {
});
unsub();
});
It works well for a while, but after few hours the client stop receiving onSnapshot() updates, and after a while the server log the error: Encountered error: Error: 10 ABORTED: The operation was aborted.
What's wrong? Should I initialized firestore on each connection? Is there some lifecycle mistake?
Thank you
EDIT (A very bad solution)
I've tried to create a single firebase-admin app instance for each logged user and changed my code in this way
const admin = require('firebase-admin');
var serviceAccount = require('./../key.json');
admin.initializeApp({
credential: admin.credential.cert(serviceAccount)
});
FUNC.getFirestore = function (user) {
try {
user.firebase = admin.app(user.id.toString());
return user.firebase.firestore();
} catch(e) {
//ignore
}
var app = admin.initializeApp({
credential: admin.credential.cert(serviceAccount)
}, user.id.toString());
user.firebase = app;
return user.firebase.firestore();
}
FUNC.removeFirebase = function (user) {
if (user.firebase) {
user.firebase.delete();
}
}
And then socket listeners:
self.on('open', function (client) {
var query = FUNC.getFirestore(client.user).collection("notifications").doc(client.user.id.toString()).collection("global");
query.onSnapshot(querySnapshot => {
querySnapshot.docChanges().reverse();
querySnapshot.docChanges().forEach(change => {
client.send({ id: change.doc.id, body: change.doc.data(), type: change.type });
});
}, err => {
console.log(`Encountered error: ${err}`);
});
});
self.on('close', function (client) {
var unsub = FUNC.getFirestore(client.user).collection("notifications").doc(client.user.id.toString()).collection("global").onSnapshot(() => {
});
unsub();
FUNC.removeFirebase(client.user);
});
So when a client disconnect for a reason the server removes its firebase app, it works, but I've noticed a huge memory leak on server, I need some help
UPDATED ANSWER
After many reaserch I've understand that this kind of approach is wrong. Of course, the old answer could be a workaround but is not the real solution of the problem, because Firestore was not designed to do something like: Firestore <--(Admin SDK)--> Server <--(WebSocket)--> Client.
In order to create the best comunication I have understand and applied Firestore Security Rules (https://firebase.google.com/docs/firestore/security/get-started) together with Custom token generation (https://firebase.google.com/docs/auth/admin/create-custom-tokens). So the correct flow is:
Client login request --> Server + Admin SDK generate custom auth token and return to client
Then, the real time comunication will be only between Client and Firestore itself, so: Client + Custom Auth Token <--(Firebase JS SDK)--> Firestore DB
As you can see, the server is not involved anymore in real-time comunication, but client receive updates directly from Firestore.
OLD ANSWER
Finally I can answer from myself. First of all the second solution I've tried is a very bad one, because each new app created through Admin SDK is stored in RAM, with 20/30 users the app reaches more then 1GB of RAM, absolutely unacceptable.
So the first implementation was the better solution, anyway I've wrong the register/unregister onSnapshot listener lifecycle. Each onSnapshot() call returns a different function, even if called on the same reference. So, instead of close the listener when socket close, I opened another one. This is how should be:
socket.on('open', function (client) {
var query = FUNC.firestore.collection("notifications").doc(client.user.id.toString()).collection("global");
client.user.firestoreUnsub = query.onSnapshot(querySnapshot => {
querySnapshot.docChanges().forEach(change => {
client.send({ id: change.doc.id, body: change.doc.data(), type: change.type });
});
}, err => {
console.log(`Encountered error: ${err}`);
});
});
socket.on('close', function (client) {
client.user.firestoreUnsub();
});
After almost 48h, listeners still works without problems and no memory leaks occurs.
The Issue:
I have a node.js (8.10) AWS Lambda function that takes a json object and publishes it to an IOT topic. The function successfully publishes to the topic, however, once fired it is continuously called until I throttle the concurrency to zero to halt any further calling of the function.
I'm trying to figure out what I've implemented incorrectly that causes more than one instance the of the function to be called.
The Function:
Here is my function:
var AWS = require('aws-sdk');
exports.handler = function (event, context) {
var iotdata = new AWS.IotData({endpoint: 'xxxxxxxxxx.iot.us-east-1.amazonaws.com'});
var params = {
topic: '/PiDevTest/SyncDevice',
payload: JSON.stringify(event),
qos: 0
};
iotdata.publish(params, function(err, data) {
if (err) {
console.log(err, err.stack);
} else {
console.log("Message sent.");
context.succeed();
}
});
};
My test json is:
{
"success": 1,
"TccvID": "TestID01"
}
The test console has a response of "null", but the IOT topic shows the data from the test json, published to the topic about once per second.
What I've Tried
-I've attempted to define the handler in it's own, non-anonymous function called handler, and then having the exports.handler = handler; This didn't produce any errors, but didn't successfully post to the iot topic either.
-I thought maybe the issues was with the node.js callback. I've tried implementing it and leaving it out (Current iteration above), but neither way seemed to make a difference. I had read somewhere that the function will retry if it errors, but I believe that only happens three times so it wouldn't explain the indefinite calling of the function.
-I've also tried calling the function from another lambda to make sure that the issue wasn't the aws test tool. This produced the same behavior, though.
Summary:
What am I doing incorrectly that causes this function to publish the json data indefinitely to the iot topic?
Thanks in advance for your time and expertise.
Use aws-iot-device-sdk to create a MQTT client and use it's messageHandler and publish method to publish your messages to IOT topic. Sample MQTT client code is below,
import * as DeviceSdk from 'aws-iot-device-sdk';
import * as AWS from 'aws-sdk';
let instance: any = null;
export default class IoTClient {
client: any;
/**
* Constructor
*
* #params {boolean} createNewClient - Whether or not to use existing client instance
*/
constructor(createNewClient = false, options = {}) {
}
async init(createNewClient, options) {
if (createNewClient && instance) {
instance.disconnect();
instance = null;
}
if (instance) {
return instance;
}
instance = this;
this.initClient(options);
this.attachDebugHandlers();
}
/**
* Instantiate AWS IoT device object
* Note that the credentials must be initialized with empty strings;
* When we successfully authenticate to the Cognito Identity Pool,
* the credentials will be dynamically updated.
*
* #params {Object} options - Options to pass to DeviceSdk
*/
initClient(options) {
const clientId = getUniqueId();
this.client = DeviceSdk.device({
region: options.region || getConfig('iotRegion'),
// AWS IoT Host endpoint
host: options.host || getConfig('iotHost'),
// clientId created earlier
clientId: options.clientId || clientId,
// Connect via secure WebSocket
protocol: options.protocol || getConfig('iotProtocol'),
// Set the maximum reconnect time to 500ms; this is a browser application
// so we don't want to leave the user waiting too long for reconnection after
// re-connecting to the network/re-opening their laptop/etc...
baseReconnectTimeMs: options.baseReconnectTimeMs || 500,
maximumReconnectTimeMs: options.maximumReconnectTimeMs || 1000,
// Enable console debugging information
debug: (typeof options.debug === 'undefined') ? true : options.debug,
// AWS access key ID, secret key and session token must be
// initialized with empty strings
accessKeyId: options.accessKeyId,
secretKey: options.secretKey,
sessionToken: options.sessionToken,
// Let redux handle subscriptions
autoResubscribe: (typeof options.debug === 'undefined') ? false : options.autoResubscribe,
});
}
disconnect() {
this.client.end();
}
attachDebugHandlers() {
this.client.on('reconnect', () => {
logger.info('reconnect');
});
this.client.on('offline', () => {
logger.info('offline');
});
this.client.on('error', (err) => {
logger.info('iot client error', err);
});
this.client.on('message', (topic, message) => {
logger.info('new message', topic, JSON.parse(message.toString()));
});
}
updateWebSocketCredentials(accessKeyId, secretAccessKey, sessionToken) {
this.client.updateWebSocketCredentials(accessKeyId, secretAccessKey, sessionToken);
}
attachMessageHandler(onNewMessageHandler) {
this.client.on('message', onNewMessageHandler);
}
attachConnectHandler(onConnectHandler) {
this.client.on('connect', (connack) => {
logger.info('connected', connack);
onConnectHandler(connack);
});
}
attachCloseHandler(onCloseHandler) {
this.client.on('close', (err) => {
logger.info('close', err);
onCloseHandler(err);
});
}
publish(topic, message) {
this.client.publish(topic, message);
}
subscribe(topic) {
this.client.subscribe(topic);
}
unsubscribe(topic) {
this.client.unsubscribe(topic);
logger.info('unsubscribed from topic', topic);
}
}
***getConfig() is to get environment variables from a yml file or else you can directly specify it here.
While he only posted it as an comment, MarkB pointed me in the correct direction.
The problem was the solution was related to another lambda who was listening to the same topic and invoking the lambda I was working on. This resulted in circular logic as the exit condition was never met. Fixing that code solved this issue.
I want my application (lets say a simple node file for now) to work as it is even if redis is not available. I'm not able to do it the correct way. This is what I've tried.
var redis = require('redis');
var redisClient = null;
var getRedisClient = function(){
if(redisClient){
return redisClient;
}
try {
redisClient = redis.createClient({connect_timeout : 5000, max_attempts : 1});
redisClient.on("error", function(err) {
console.error("Error connecting to redis", err);
redisClient = null;
});
return redisClient;
} catch(ex){
console.log("error initialising redis client " + ex);
return null;
}
};
try {
var client = getRedisClient();
console.log("done!");
} catch (ex){
console.log("Exception");
}
However, with this code my application exits if redis is not available (it shouldn't because i've not given a process.exit() command).
How can I solve this?
Checking for Successful Connection on Start
Using a promise, you could guarantee that at least initially, you were able to connect to redis without error within a specified time period:
const redis = require('redis');
const Promise = require('bluebird');
function getRedisClient(timeoutMs){
return new Promise((resolve, reject) => {
const redisClient = redis.createClient();
const timer = setTimeout(() => reject('timeout'), timeoutMs);
redisClient.on("ready", () => {
clearTimeout(timer);
resolve(redisClient);
});
redisClient.on("error", (err) => {
clearTimeout(timer);
reject(err);
});
});
};
const redisReadyTimeoutMs = 10000;
getRedisClient(redisReadyTimeoutMs)
.then(redisClient => {
// the client has connected to redis sucessfully
return doSomethingUseful();
}, error => {
console.log("Unable to connect to redis", error);
});
You Need Proper Error Handling
The redis client being non-null does NOT guarantee using it won't throw an error.
you could experience infrastructure misfortune e.g. crashed redis process, out of memory or network being down.
a bug in your code could cause an error e.g. invalid or missing arguments to a redis command.
You should be handling redis client errors as a matter of course.
DON'T null the Redis Client on Error
It won't give you much but it will force you to check for null every time you try and use it.
The redis client also has inbuilt reconnect and retry mechanisms that you'll miss out on if you null it after the first error. See the redis package docs, look for retry_strategy.
DO Wrap your redis client code with try .. catch ... or use .catch in your promise chain.
DO Make use of a retry_strategy.