Snowflake Node Lambda Connection Hangs - node.js

I'm using the snowflake node driver to connect to a DB. When running the connector from a local server I have no issues. However, when I try the same function running in lambda I can't seem to connect. There are no errors, exceptions, or timeouts... just nothing. Here is the code I'm using per their documentation.
var snowflake = require("snowflake-sdk");
var connection = snowflake.createConnection({
account: "****",
username: "******",
password: "******",
});
connect(connection);
const response = {
statusCode: 200,
body: JSON.stringify("Hello from Lambda!"),
};
return response;
function connect(connection) {
console.log("in connection");
let connection_ID;
try {
connection.connect(function (err, conn) {
if (err) {
console.error("Unable to connect: " + err);
} else {
console.log("Successfully connected to Snowflake");
// Optional: store the connection ID.
connection_ID = conn.getId();
}
console.log(connection_ID);
});
} catch (err) {
console.log(err);
}
}
For clarity, my lambda has no issues connecting to other API's, and is not running behind a VPC.
Any help would be greatly appreciated.

If you have not selected any VPC for your lambda function, it will use the default VPC of the region.
Can you select a VPC, which has access to the snowflake public endpoints and check.
If still an issue, please post the Cloud watch logs, it should give a clue.
You can also check on snowflake History page, if you get any Client-side connection request from the lambda or not.

Related

Not getting any console readings from AWS Lambda when trying to connect to RDS

I'm trying to verify that I'm able to connect to my RDS database, but nothing is logged in the connection promise / callback. I was able to connect from my local machine, but not on AWS. I'm out of ideas on how else I can debug this as I've done everything I can find online.
I've verified that the RDS instance and Lambda function in the same VPC, VPC security group, and subnets as suggested by this question. For that VPC security group, I've added 0.0.0.0/0 and ::/0, and the inbound rules can be seen below:
The RDS instance is set to be publicly accessible, and setting to not publicly accessible doesn't make a difference. Below is the output I get from running the lambda function.
START RequestId: 9567a1be-d8d1-4b61-b9c4-4dd06ff36a4b Version: $LATEST
2021-07-21T23:52:47.115Z 9567a1be-d8d1-4b61-b9c4-4dd06ff36a4b INFO Lambda invoked
END RequestId: 9567a1be-d8d1-4b61-b9c4-4dd06ff36a4b
REPORT RequestId: 9567a1be-d8d1-4b61-b9c4-4dd06ff36a4b Duration: 52.71 ms Billed Duration: 53 ms Memory Size: 128 MB Max Memory Used: 71 MB Init Duration: 193.40 ms
I'm using the pg code I got from the node-postgres documentation. I went through the Amazon tutorial for connecting lambda function to rds, giving it a role with AWSLambdaVPCAccessExecutionRole (I didn't use the CLI as they have, I used the GUI on the website). I also read that the console object inside promises don't always return, so I've wrapped every promise in a try catch block and still nothing is returned.
const {Client, Pool} = require('pg')
const pool = new Pool({
user: 'myusername',
password: 'mypassword',
host: 'blahblah.somestuff.us-east-2.rds.amazonaws.com',
port: 5432,
database: 'postgres'
})
pool.on('error', (err, client) => {
console.error('Unexpected error on idle client', err)
process.exit(-1)
})
exports.handler = function(event, context) {
console.log('Lambda invoked') // this logs just fine
try {
var client = pool.connect((err, client, done) => {
if (err) throw err
console.log('connected')
try {
client.query('SELECT * FROM users WHERE id = $1', [1], (err, res) => {
done()
console.log('query has run')
if (err) {
throw err;
// console.log(err.stack)
} else {
console.log(res.rows[0])
}
})
} catch(err) {
throw err
}
})
}
catch(err) {
console.warn(err)
}
};
Node-postgres 6.4.2, PostgreSQL 12.6R1
The proper configuration of the Security Groups would be:
A Security Group on the Lambda function (Lambda-SG) with default settings of Allow All Outbound
A Security Group on the Amazon RDS database (DB-SG) with an Inbound rule that permits traffic from Lambda-SG on port 5432 (for PostgreSQL)
That is, DB-SG specifically references Lambda-SG as permissible for inbound traffic. This is much cleaner than putting resources "in the same Security Group", which is an incorrect model because Security Groups apply to each resource individually.

Able to connect to redis but set/get times out

I'm trying to do a get() from my AWS Lambda (NodeJS) on ElastiCache Redis using node_redis client. I believe that I'm able to connect to redis but I'm getting Time out (Lambda 60 sec time out) when I'm trying to perform a get() operation.
I have also granted my AWS lambda Administrator access just to be certain that it's not a permissions issue. I'm hitting lambda by going to AWS console and clicking the Test button.
Here is my redisClient.js:
const util = require('util');
const redis = require('redis');
console.info('Start to connect to Redis Server');
const client = redis.createClient({
host: process.env.ElastiCacheEndpoint,
port: process.env.ElastiCachePort
});
client.get = util.promisify(client.get);
client.set = util.promisify(client.set);
client.on('ready',function() {
console.log(" subs Redis is ready"); //Can see this output in logs
});
client.on('connect',function(){
console.log('subs connected to redis'); //Can see this output in logs
})
exports.set = async function(key, value) {
console.log("called set!");
return await client.set(key, value);
}
exports.get = async function(key) {
console.log("called get!"); //Can see this output in logs
return await client.get(key);
}
Here's my index.js which calls the redisClient.js:
const redisclient = require("./redisClient");
exports.handler = async (event) => {
const params = event.params
const operation = event.operation;
try {
console.log("Checking RedisCache by calling client get") // Can see this output in logs
const cachedVal = await redisclient.get('mykey');
console.log("Checked RedisCache by calling client get") // This doesn't show up in logs.
console.log(cachedVal);
if (cachedVal) {
return {
statusCode: 200,
body: JSON.stringify(cachedVal)
}
} else {
const setCache = await redisclient.set('myKey','myVal');
console.log(setCache);
console.log("*******")
let response = await makeCERequest(operation, params, event.account);
console.log("CE Request returned");
return response;
}
}
catch (err) {
return {
statusCode: 500,
body: err,
};
}
}
This is the output (time out error message) that I get:
{
"errorMessage": "2020-07-05T19:04:28.695Z 9951942c-f54a-4b18-9cc2-119eed65e9f1 Task timed out after 60.06 seconds"
}
I have tried using Bluebird (changing get to getAsync()) per this: https://github.com/UtkarshYeolekar/promisify-redis-client/blob/master/redis.js but still got the same behavior.
I also changed the port to use a random value (like 8088) that I'm using to create client (to see the behavior of connect event for a failed connection) - in this case I still see a Timed Out error response but I don't see the subs Redis is ready and subs connected to redis in my logs.
Can anyone please point me in the right direction? I don't seem to understand why I'm able to connect to redis but the get() request times out.
I figured out the issue and posting here in case it helps anyone in future as the behavior wasn't very intuitive for me.
I had enabled AuthToken param while setting up my redis. I was passing the param to lambda with the environment variables but wasn't using it while sending the get()/set() requests. When I disabled the AuthToken requirement from redis configuration - Lambda was able to hit redis with get/set requests. More details on AuthToken can be found here: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticache-replicationgroup.html#cfn-elasticache-replicationgroup-authtoken

ioredis in nodejs does nothing when 'get' from AWS ElastiCache Redis

I am running the following code in a nodejs container hosted on ECS. This runs great locally using redis. In AWS, it appears to connect (if I use an invalid address it errors on connection so I'm assuming it's connected). When I run redis.get( nothing happens. I've enabled debugging for ioredis and I get 1 message when I attempt the get:
2020-04-17T22:56:10.701Z ioredis:redis status[replica.virtual-happy-hour-redis.fnt3zc.usw2.cache.amazonaws.com:6379]: [empty] -> connecting
2020-04-17T22:56:11.042Z ioredis:redis status[10.200.0.37:6379]: connecting -> connect
2020-04-17T22:56:11.045Z ioredis:redis write command[10.200.0.37:6379]: 0 -> info([])
2020-04-17T23:02:02.627Z ioredis:redis queue command[10.200.0.37:6379]: 0 -> get([ 'friday' ])
# suspense is killing me....
Here's the code
var Redis = require('ioredis'),
console.log('cache connecting to', CONFIG.CACHE_URL);
var redis = new Redis(CONFIG.CACHE_URL);
console.log('cache connected');
const getRoom = (roomName, callback) => {
let room;
console.log('getRoom', roomName); // this logs as expected, nothing after this does
try {
redis.get(roomName, (err, result) => {
if (err) {
console.log('get cache error', err);
} else {
if (result) {
console.log('cache result', result);
room = JSON.parse(result);
} else {
console.log('no cache', roomName);
room = defaultRoom(roomName);
redis.set(roomName, JSON.stringify(room));
}
}
if (callback) callback(room);
console.log('getRoom done');
});
} catch (ex) {
console.log('getRoom error', ex.toString());
}
};
I've confirmed security groups, ElastiCache is in the same VPC as my ECS container. What can I do to troubleshoot this?
UPDATE
I swapped out ioredis with redis and it still happens, nada...
Fixed it! I wasn't aware I (newb to AWS) configured ElastiCache for Encryption in Transit. Once I set the auth token and used that with ioredis it works! I'm back in business!

AWS RDSDataService times out whereas external mysql package works

I have a AWS Lambda (Node.js) talking to an Aurora database. Both belong to the same VPC, with internet access enabled via subnet. The RDS cluster also has a inbound rule that allows traffic from the VPC, used for the Lambda (which should be the same VPC). To my surprise, I found that the RDSDataService from AWS-SDK fails to connect to the database, whereas when I use mysql pacakge, it works. Following are the 2 code snippets.
I would like it very much to use AWS-SDK, as that will reduce the deployment bundle size, as I don't have to include that in the bundle that at all. Is there anyway to achieve that?
Failed attempt to use RDSDataService
const AWS = require("aws-sdk");
const rdsData = new AWS.RDSDataService({
params: {
dbClusterOrInstanceArn: 'rds.cluster.arn',
awsSecretStoreArn: 'rds.cluster.secret.arn',
database: 'mydb'
},
endpoint: 'mydb.endpoint'
});
return new Promise((resolve, reject) => {
try {
rdsData.executeSql({
dbClusterOrInstanceArn: 'rds.cluster.arn',
awsSecretStoreArn: 'rds.cluster.secret.arn',
database: 'mydb',
sqlStatements: "select 1 + 1 as result;"
}, (err, data) => {
if (err) {
reject(err);
}
const response = {
statusCode: 200,
body: JSON.stringify(data),
};
resolve(response);
});
} catch (er) {
reject(er);
}
});
Working implementation using mysql
const mysql = require('mysql');
const connection = mysql.createConnection({
host: 'mydb.endpoint',
user: 'user',
password: 'password',
port: 3306,
database: 'mydb',
debug: false
});
connection.connect(function (err) {
if (err) context.fail();
else {
connection.query('select 1 + 1 as result', function (error, results, fields) {
if (error) throw error;
resolve('The solution is: ' + JSON.stringify(results, undefined, 2));
});
}
});
connection.end();
As it turned out, Data API is not yet available for my region. The supported regions are listed here: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html#data-api.regions

How to avoid a broken connection with ORACLEDB? Nodejs

I have this database connection. Inside the function where the comment is located, there is a data update cycle for rest api. The data is updated, but when the data in the Oracle database is updated, the connection may fail and after that all subsequent updated data will get undefined. How can you properly connect to the database so that there are no failures?
oracledb.getConnection(
{
user: db.user,
password: db.password,
connectString: db.connectString
},
connExecute
);
function connExecute(err, connection) {
if (err) {
console.error(err.message);
return;
}
sql = `SELECT * FROM db.test`;
connection.execute(sql, {}, { outFormat: oracledb.OBJECT },
function (err, db) {
if (err) {
console.error(err.message);
connRelease(connection);
return;
}
// data update loop
connRelease(connection);
});
}
function connRelease(connection) {
connection.close(
function (err) {
if (err) {
console.error(err.message);
}
});
}
You should be using a connection pool. Connection pools have built-in logic to detect connections with issues and create new connections transparently. See this series on creating a REST API for more details: https://jsao.io/2018/03/creating-a-rest-api-with-node-js-and-oracle-database/
Keep in mind that issues can still happen, so you have to handle errors as needed for your application.
Mostly you add listener on connection object and on dissociation or failure again create connection. With minor changes you can adopt this approach and use listeners to check if connection is available if not connect again. There could be several reason that results in connection closing better handle exceptions, check if still connected and reconnect in case of error.
Or you can try this NPM this will do reconnection for you
https://www.npmjs.com/package/oracledb-autoreconnect
Ping me if you need calcification.
var dbConfig = {
host: '----',
user: '----',
password: '----',
database: '----',
port: ----
};
var connection;
function handleDisconnect() {
connection = <obj>.getConnection(dbConfig);
// Recreate the connection, since the old one cannot be reused.
connection.connect( function onConnect(err) {
// The server is either down
if (err) {
// or restarting (takes a while sometimes).
console.log('error when connecting to db:', err);
setTimeout(handleDisconnect, 10000);
// We introduce a delay before attempting to reconnect,
}
// to avoid a hot loop, and to allow our node script to
});
// process asynchronous requests in the meantime.
// If you're also serving http, display a 503 error.
connection.on('error', function onError(err) {
console.log('db error', err);
if (err.code == 'PROTOCOL_CONNECTION_LOST') {
handleDisconnect();
// lost due to either server restart, or a
} else {
// connnection idle timeout (the wait_timeout
throw err;
// server variable configures this)
}
});
}
handleDisconnect();

Resources