I have an AWS Lambda function setup using NodeJS, which makes a call to a postgres database using the pg-promise library to retrieve data - which then sends HTTPS GET requests etc. but none of the rest is important.
I was using the 'pg' library originally but ran into connection closing & async issues which is why I switched to pg-promise (Which DID fix my other problem!). With the regular pg library I was getting expected latency of <1000ms
I have boiled down and redacted the code to just a simple query, and I am getting these response times from the last 3 test runs: 11790.78 ms, 11232.22 ms, 12002.04 ms. Every single time it is over 10000ms...
EDIT: Fixed code
const pgp = require('pg-promise')();
const https = require('https');
const xmlParser = require('xml2js').Parser();
const client = pgp({
database: process.env.DATABASE,
host: process.env.HOST,
port: process.env.PORT,
user: process.env.USERNAME,
password: process.env.PASSWORD
});
exports.handler = function(event, context, callback) {
client.one("SELECT period FROM pay WHERE company='XXX' ORDER BY moddate DESC LIMIT 1;")
.then(function(data) {
callback(null, {
"statusCode": 200,
"headers": {
'Content-Type' : 'application/json'
},
"body": data.period
});
})
.catch(function(error) {
console.error(error);
});
};
As stated, I was having no problems with latency when using the 'pg' library, so I know there is no problem with the lambda-RDS postgres connection.
Does anyone have any idea why this is?
Thanks,
In the end I figured out what the problem was... leaving pg-promise to automatically shut down the connection pool is what was causing the latency.
Chaining a
.finally(pgp.end);
after the .catch gave me a 200ms response time.
Thanks everyone
Related
I am trying the Optimizing Networking of firebase cloud functions like here with Typescript
const http = require('http');
const functions = require('firebase-functions');
const agent = new http.Agent({keepAlive: true});
export const getXXX = functions.https.onRequest((request, response) => {
const req = http.request({
host: 'localhost',
port: 443,
path: '',
method: 'GET',
agent: agent,
}, res => {
let rawData = '';
res.setEncoding('utf8');
res.on('data', chunk => { rawData += chunk; });
res.on('end', () => {
response.status(200).send(`Data: ${rawData}`);
});
});
req.on('error', e => {
response.status(500).send(`Error: ${e.message}`);
});
req.end();
});
but I keep getting
error: connect ECONNREFUSED 127.0.0.1:443
I am not very familiar with TypeScript and js so please help me.
Another question when is res.on 'Data' gets triggered ?
Turns out I need to be on a paid plan in order to make external HTTP requests from inside my function.
You can't access anything on "localhost" (127.0.0.1) in Cloud Functions. I suspect that you meant to put a different host in there, and ensure that your project is on the Blaze plan to enable outgoing connections to services not fully controlled by Google.
You can run Cloud Functions on localhost. All you need to do is run a local emulator of the Cloud services. Which Google has provided! It's a really awesome tool and a great setup!
Follow these steps for the Firebase tool suite: https://firebase.google.com/docs/functions/local-emulator
Follow these steps for the Cloud tool suite: https://cloud.google.com/functions/docs/emulator
They are pretty much similar.
You do not need Blaze plan, you can use the "pay as you go" plan, which includes the free tier quota. "Free usage from Spark plan included*" https://firebase.google.com/pricing
I'm trying to do a get() from my AWS Lambda (NodeJS) on ElastiCache Redis using node_redis client. I believe that I'm able to connect to redis but I'm getting Time out (Lambda 60 sec time out) when I'm trying to perform a get() operation.
I have also granted my AWS lambda Administrator access just to be certain that it's not a permissions issue. I'm hitting lambda by going to AWS console and clicking the Test button.
Here is my redisClient.js:
const util = require('util');
const redis = require('redis');
console.info('Start to connect to Redis Server');
const client = redis.createClient({
host: process.env.ElastiCacheEndpoint,
port: process.env.ElastiCachePort
});
client.get = util.promisify(client.get);
client.set = util.promisify(client.set);
client.on('ready',function() {
console.log(" subs Redis is ready"); //Can see this output in logs
});
client.on('connect',function(){
console.log('subs connected to redis'); //Can see this output in logs
})
exports.set = async function(key, value) {
console.log("called set!");
return await client.set(key, value);
}
exports.get = async function(key) {
console.log("called get!"); //Can see this output in logs
return await client.get(key);
}
Here's my index.js which calls the redisClient.js:
const redisclient = require("./redisClient");
exports.handler = async (event) => {
const params = event.params
const operation = event.operation;
try {
console.log("Checking RedisCache by calling client get") // Can see this output in logs
const cachedVal = await redisclient.get('mykey');
console.log("Checked RedisCache by calling client get") // This doesn't show up in logs.
console.log(cachedVal);
if (cachedVal) {
return {
statusCode: 200,
body: JSON.stringify(cachedVal)
}
} else {
const setCache = await redisclient.set('myKey','myVal');
console.log(setCache);
console.log("*******")
let response = await makeCERequest(operation, params, event.account);
console.log("CE Request returned");
return response;
}
}
catch (err) {
return {
statusCode: 500,
body: err,
};
}
}
This is the output (time out error message) that I get:
{
"errorMessage": "2020-07-05T19:04:28.695Z 9951942c-f54a-4b18-9cc2-119eed65e9f1 Task timed out after 60.06 seconds"
}
I have tried using Bluebird (changing get to getAsync()) per this: https://github.com/UtkarshYeolekar/promisify-redis-client/blob/master/redis.js but still got the same behavior.
I also changed the port to use a random value (like 8088) that I'm using to create client (to see the behavior of connect event for a failed connection) - in this case I still see a Timed Out error response but I don't see the subs Redis is ready and subs connected to redis in my logs.
Can anyone please point me in the right direction? I don't seem to understand why I'm able to connect to redis but the get() request times out.
I figured out the issue and posting here in case it helps anyone in future as the behavior wasn't very intuitive for me.
I had enabled AuthToken param while setting up my redis. I was passing the param to lambda with the environment variables but wasn't using it while sending the get()/set() requests. When I disabled the AuthToken requirement from redis configuration - Lambda was able to hit redis with get/set requests. More details on AuthToken can be found here: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticache-replicationgroup.html#cfn-elasticache-replicationgroup-authtoken
I was trying to make a post query the db (RDS) using handler.async.
However, I ran into the following issues.
Half of the time, the lambda function completes but the query is not successfully sent to RDS. The other half of the time, it will be completely send to lambda. Tried adding a setTimeout function to increase lambda execution time by 3 secs and the query will be sent all the time.
Also the log will shows the error:
INFO Error: Cannot enqueue Query after fatal error.
The following are my code:
var mysql = require('mysql');
var connection = mysql.createConnection({
host : '***',
user : '***',
password : '***',
database : '***'
});
exports.handler = async (event) => {
const sql = `INSERT INTO forms VALUES(777,2,3,4,5,6,7,8,9,10,11);`;
const query = (x) => {
return new Promise ((resolve,reject)=>{
resolve(connection.query(x, function (error, results, fields) {
console.log(error)
console.log(results)
console.log(fields)
}))})}
await query(sql)
}
With the timeout function,
var mysql = require('mysql');
var connection = mysql.createConnection({
host : '***',
user : '***',
password : '***',
database : '***'
});
exports.handler = async (event) => {
const sql = `INSERT INTO forms VALUES(777,2,3,4,5,6,7,8,9,10,11);`;
const query = (x) => {
return new Promise ((resolve,reject)=>{
resolve(connection.query(x, function (error, results, fields) {
console.log(error)
console.log(results)
console.log(fields)
}))})}
await query(sql)
await wait(3000)
}
const wait = (x) => {
return new Promise ((resolve,reject)=>{
setTimeout(()=>{resolve(console.log("delay"))}, x);
})
}
The first value is a primary key. A constant 777 is sent to check, if error shows duplicate primary key, it will mean that the query is successfully sent. If there is no error, it means that the query is unsuccessfully sent although lambda finishes.
execution result succeeded but shows:
START RequestId: e541fe4b-6927-4fbb-90b4-750f77e5f460 Version: $LATEST
2019-12-19T01:54:45.212Z e541fe4b-6927-4fbb-90b4-750f77e5f460 INFO Error: **Cannot enqueue Query after fatal error**.
at Protocol._validateEnqueue (/var/task/node_modules/mysql/lib/protocol/Protocol.js:212:16)
at Protocol._enqueue (/var/task/node_modules/mysql/lib/protocol/Protocol.js:138:13)
at Connection.query (/var/task/node_modules/mysql/lib/Connection.js:201:25)
at /var/task/index.js:14:24
at new Promise (<anonymous>)
at query (/var/task/index.js:13:10)
at Runtime.exports.handler (/var/task/index.js:20:7)
at Runtime.handleOnce (/var/runtime/Runtime.js:66:25) {
code: 'PROTOCOL_ENQUEUE_AFTER_FATAL_ERROR',
fatal: false
}2019-12-19T01:54:45.213Z e541fe4b-6927-4fbb-90b4-750f77e5f460 INFO undefined2019-12-19T01:54:45.213Z e541fe4b-6927-4fbb-90b4-750f77e5f460 INFO undefined2019-12-19T01:54:45.262Z e541fe4b-6927-4fbb-90b4-750f77e5f460 INFO delayEND RequestId: e541fe4b-6927-4fbb-90b4-750f77e5f460
REPORT RequestId: e541fe4b-6927-4fbb-90b4-750f77e5f460 Duration: 51.09 ms Billed Duration: 100 ms Memory Size: 128 MB Max Memory Used: 80 MB
May you please advise and also tell me which is the best way to execute it??
Managing RDBMS connections in any environment is not a trivial task. Lambda adds a layer of complexity here. You need to understand the distinction between warm and cold restarts, what it means for resources created outside of your handler function, when connection pools are appropriate, and when and how to release connections.
Persistent connections to a database are not particularly suitable in a microservices, FaaS environment like Lambda. That's one reason that Aurora Serverless supports an HTTP Data API (and hopefully other DB engines will too at some point).
Read How To: Manage RDS Connections from AWS Lambda Serverless Functions.
Also be aware of the new Amazon RDS Proxy with AWS Lambda.
In your particular case, the most obvious concern is that you are repeatedly creating DB connections but never releasing them (unless that is a built-in feature of the mysql package's query function that I'm not aware of).
You can increase the lambda timeout upto 15 minutes. But if you are calling the lambda through api gateway, the timeout is 29 seconds.
here is the code working for me.
const mysql = require('mysql');
const con = mysql.createConnection({
host: process.env.RDS_HOSTNAME,
user: process.env.RDS_USERNAME,
password: process.env.RDS_PASSWORD,
port: process.env.RDS_PORT,
connectionLimit: 10,
multipleStatements: true,// Prevent nested sql statements
debug: true
// ,database:'testdb1'
});
exports.handler = async (event) => {
try {
const data = await new Promise((resolve, reject) => {
con.connect(function (err) {
if (err) {
reject(err);
}
const sql = `INSERT INTO forms VALUES(777,2,3,4,5,6,7,8,9,10,11);`;
con.query(sql, function (err, result) {
if (err) {
console.log("Error->" + err);
reject(err);
}
resolve(result);
});
})
});
return {
statusCode: 200,
body: JSON.stringify(data)
}
} catch (err) {
return {
statusCode: 400,
body: err.message
}
}
};
reference: aws lambda with rds mysql DDL command not working
I have an AWS RDS which is publicly accessible and I want to connect to that RDS using AWS Lambda. I am using API Gateway to create a Rest API named "/hello", which needs to return the RDS connection status.
The Lambda code is given below.
var mysql = require('mysql');
var ApiBuilder = require('claudia-api-builder'),
api = new ApiBuilder();
var con = mysql.createConnection({
host: "host",
user: "user",
password: "password"
});
module.exports = api;
api.get('/hello', function () {
//any code written here, works perfectly, when this is called
con.connect(function(err) { //this part doesn't work
if (err) return err;
return "connected";
});
});
The database is publicly accessible, even from my local machine. The Lambda function also has the required permissions with AWS RDS.
The response I get on the browser is as follows
{}
I checked the cloud watch logs, and it is given below.
08:16:11
START RequestId: bf804be1-4797-11e8-8b3f-5b09118631a6 Version: $LATEST
08:16:11
END RequestId: bf804be1-4797-11e8-8b3f-5b09118631a6
08:16:11
REPORT RequestId: bf804be1-4797-11e8-8b3f-5b09118631a6 Duration: 34.28 ms Billed Duration: 100 ms Memory Size: 128 MB Max Memory Used: 26 MB
The Lambda function doesn't go past con.connect(...) where there is no error being shown anywhere (not even on RDS logs). The API Gateway works, because any code before con.connect(...) works perfectly. What could be the issue here?
con.connect(function(err) {
if (err) return err;
return "connected";
});
This is an asynchronous call. Before connect callback function is called lambda will complete and exit.
Change this to
con.connect(function(err) {
if (err) context.done(err);
context.done(null, "connected");
});
As #ConfusedCoder explained, this is caused due to con.connect(...) being an asynchronous call. Using the context in Lambda is not an option here, as Lambda is being invoked using API Gateway, and that there is no exports.myhandler being used in the scenario. Therefore the context object cannot be used to control the flow of Lamda.
exports.myHandler = function(event, context) {
...
}
I tried playing with promises, async await and other techniques until I found a node package named sync-mysql. This makes synchronous queries to a mysql database, where AWS Lambda does not proceed without executing the database call. The updated code is given below, using 'sync-mysql'.
var ApiBuilder = require('claudia-api-builder'), api = new ApiBuilder();
var MySql = require('sync-mysql');
var connection = new MySql({
host: "host",
user: "user",
password: "password"
});
module.exports = api;
api.get('/hello', function(request) {
return connection.query('SELECT * FROM DB.DummyTable');
});
I have one AWS lambda function, which is in node.js
var pg = require("pg");
exports.handler = (event, context, callback) => {
var client = new pg.Client({
user: "41231qd123",
password: "lkjlkasjdlkasldkjas",
database: "12312312asdasd",
port: 5432,
host: "kdoiedjeifk.compute-1.amazonaws.com",
ssl: true
});
client.connect();
console.log('Connected to PostgreSQL database');
client.query("SELECT products.* from products where location_id =
"+event.location_id+" AND company_id = "+event.company_id+" order
by products.name ASC;", (err, res) => {
if (err) {
callback(err);
}
var jsonString = JSON.stringify(res.rows);
var jsonObj = JSON.parse(jsonString);
console.log(jsonString);
client.end();
context.succeed(jsonObj);
});
};
When i test this function in aws lambda "Test", it was working perfectly, but after i added the api-gateway and test it ther i am getting this message 'Process exited before completing request', is there any specific reason with the code or configuration?
First, check your logs for a lambda error.
If there is no lambda crash
If the function is taking longer than 29 seconds then the API Gateway integration can time out. Check the lambda logs to see if there is a crash there, and if not check the time taken.
If there is a lambda crash
Your lambda logs in cloudwatch will give a better reason why. "Process exited before completing request" is usually the boilerplate AWS Lambda error for a limits error. You likely either:
Ran out of memory in the lambda VM
Ran out of disk space in the lambda VM
Timed out.
Check the REPORT line of your logs, they should look like this:
REPORT RequestId: asdf-wertvgh-12345-fdghdfgh Duration: 12345.13 ms Billed Duration: 12400 ms Memory Size: 128 MB Max Memory Used: 64 MB
If that isn't the error, your logs should give you some idea of what happened. Posting a traceback might help.