I have an AWS Lambda function written in nodejs that is doing a set of recursive postgres database calls that result in the following error every time on the 81st call:
remaining connection slots are reserved for non-replication superuser
connections
I'm assuming I am leaking something at the postgres levels but I believe I am adhering to the recommended calls for performing a single pooled query as defined at https://node-postgres.com/features/pooling. I've simplified my code as shown below so that I'm only executing the same query every time and the result is still the same. The function testHarness is what initiates the logic within my lamba function. The intent here is execute a query against postgres, once it is completed to fire off the query again, repeating 500 times for this example. It always fails when the 81st call occurs. The DB_CONNECT environment variable contains the connection information including a "MAX" value of 3.
function testHarness(cb){
_test(0, cb);
}
function _test(pos, cb){
console.log(pos);
_testData(function (err, data){
if (err) return cb(err);
if (pos < 500){
_test(pos + 1, cb);
}
else{
return cb(null, 'done');
}
});
}
function _testData(cb){
const { Pool } = require('pg')
const pool = new Pool(JSON.parse(process.env.DB_CONNECT));
const sql = 'SELECT id, url, pub_date, description, title, duration FROM episodes WHERE feed_id = $1 ORDER BY pub_date DESC LIMIT 10';
pool.query(sql, ['28c65c8d-f96a-4499-a854-187eed7050bd'], (err, result) => {
if (err) throw err;
return cb(err, result);
})
}
So the problem is leaking Pool objects that you create in _testData function. After using a Pool you have to shut it down you and find the documentation here under "Shutdown" title, as it says:
pool.end()
But, the way you are using Pool does not make sense. It is better to put it in _testHarness function to be able to reuse the connection and save the connection overhead time to let your code run faster:
function testHarness(cb){
const { Pool } = require('pg')
const pool = new Pool(JSON.parse(process.env.DB_CONNECT));
_test(pool, 0, function(err, data){
pool.end();
cb(err, data);
});
}
function _test(pool, pos, cb){
console.log(pos);
_testData(pool, function (err, data){
if (err) return cb(err);
if (pos < 500){
_test(pos + 1, cb);
}
else{
return cb(null, 'done');
}
});
}
function _testData(pool, cb){
const sql = 'SELECT id, url, pub_date, description, title, duration FROM episodes WHERE feed_id = $1 ORDER BY pub_date DESC LIMIT 10';
pool.query(sql, ['28c65c8d-f96a-4499-a854-187eed7050bd'], (err, result) => {
if (err) throw err;
return cb(err, result);
})
}
I am not AWS user, but I guess it should be like any other postgres database service, you might need to change it a bit to fit AWS service.
Also, don't you have the ability to use async/await pattern? It is lot easier to comprehend.
Related
I send two query sequentially
Query the data from A tables, and then accoring to the result, query the data from B table.
So, I query the data like that,
var async = require('async');
var mysql = require('mysql');
var config = require('./config.json');
var connection = mysql.createConnection({
host : config.dbhost,
user : config.dbuser,
password : config.dbpassword,
database : config.dbname
});
exports.handler = (event, context, callback) => {
// TODO implement
var tasks = [
function (callback) {
connection.query("SELECT email FROM Visitor WHERE id =?;", [1], function (err, row) {
if (err) return callback(err);
if (row.length == 0) return callback('No Result Error');
callback(null, row[0]);
})
},
function (data, callback) {
connection.query("SELECT id,signtime FROM Board WHERE email =?;", data.email, function (err, row) {
if (err) return callback(err);
if (row.length == 0) {
return callback('No Result Error');
}else {
callback(null, row[0])
}
})
}
];
async.waterfall(tasks, function (err, result) {
if (err)
console.log('err');
else
***return result;***
console.log('done');
connection.end();
});
};
I log the data with console.log(), it take the data in command line.
But in lambda, put the function into exports.handler, it response null.
If I change the 'return result' to callback(result), it occurs error.
I think it maybe too simple to solve this problem
If you know about that, please help me
In the first case, response is null because you didn't use neither Promise, nor callback to let the Lambda sandbox know that the job is done. In the second case, you used the callback, but you passed the result as the first argument to it. Lambda programming model for Node.js follows a principle called "error first callback". Long story short, if any error occurred during execution, you should go with callback(error), and if everything is ok and you need to return some result from lambda, you should go with callback(null, result). So basically on your line before console.log('done'); use callback(null, result) and it will work for you.
I would like to know if it's possible to run a series of SQL statements and have them all committed in a single transaction.
The scenario I am looking at is where an array has a series of values that I wish to insert into a table, not individually but as a unit.
I was looking at the following item which provides a framework for transactions in node using pg. The individual transactions appear to be nested within one another so I am unsure of how this would work with an array containing a variable number of elements.
https://github.com/brianc/node-postgres/wiki/Transactions
var pg = require('pg');
var rollback = function(client, done) {
client.query('ROLLBACK', function(err) {
//if there was a problem rolling back the query
//something is seriously messed up. Return the error
//to the done function to close & remove this client from
//the pool. If you leave a client in the pool with an unaborted
//transaction weird, hard to diagnose problems might happen.
return done(err);
});
};
pg.connect(function(err, client, done) {
if(err) throw err;
client.query('BEGIN', function(err) {
if(err) return rollback(client, done);
//as long as we do not call the `done` callback we can do
//whatever we want...the client is ours until we call `done`
//on the flip side, if you do call `done` before either COMMIT or ROLLBACK
//what you are doing is returning a client back to the pool while it
//is in the middle of a transaction.
//Returning a client while its in the middle of a transaction
//will lead to weird & hard to diagnose errors.
process.nextTick(function() {
var text = 'INSERT INTO account(money) VALUES($1) WHERE id = $2';
client.query(text, [100, 1], function(err) {
if(err) return rollback(client, done);
client.query(text, [-100, 2], function(err) {
if(err) return rollback(client, done);
client.query('COMMIT', done);
});
});
});
});
});
My array logic is:
banking.forEach(function(batch){
client.query(text, [batch.amount, batch.id], function(err, result);
}
pg-promise offers a very flexible support for transactions. See Transactions.
It also supports partial nested transactions, aka savepoints.
The library implements transactions automatically, which is what should be used these days, because too many things can go wrong, if you try organizing a transaction manually as you do in your example.
See a related question: Optional INSERT statement in a transaction
Here's a simple TypeScript solution to avoid pg-promise
import { PoolClient } from "pg"
import { pool } from "../database"
const tx = async (callback: (client: PoolClient) => void) => {
const client = await pool.connect();
try {
await client.query('BEGIN')
try {
await callback(client)
await client.query('COMMIT')
} catch (e) {
await client.query('ROLLBACK')
}
} finally {
client.release()
}
}
export { tx }
Usage:
...
let result;
await tx(async client => {
const { rows } = await client.query<{ cnt: string }>('SELECT COUNT(*) AS cnt FROM users WHERE username = $1', [username]);
result = parseInt(rows[0].cnt) > 0;
});
return result;
i have kept insert & update code in 2 different files and based on condition
always insert should execute first and then update.but somehow update executes first then insert
test.js : simplified code
i am using these packages :pg , uuid
var pg = require('pg');
var uuid = require('node-uuid').v4;
var id = uuid().toString();
var conString = 'postgres://postgres:pass#127.0.0.1:5432/testdb';
// ------INSERT
pg.connect(conString, function(err, client, done) {
console.log('Executing Insert query');
client.query('insert into testdb (id,data,iscancelled) values ($1,$2,$3)',[id,'hello','no'], function(err, result) {
done();
if(err) { return console.error('error running query', err); }
console.log('finished executing Insert query');
});
});
// ------UPDATE
pg.connect(conString, function(err, client, done) {
console.log('Executing update query');
client.query("update testdb set iscancelled = 'yes' where id = $1",[id], function(err, result) {
done();
if(err) { return console.error('error running query', err); }
console.log('finished executing Update query');
});
});
output
tom#tom:~$node test.js
Executing Insert query
Executing update query
finished executing Update query //WHY UPDATE FINISHES FIRST
finished executing Insert query
Note :
this problem can be easily solved by using async.But my insert code and update code are in different files and depending on some situation update code might execute.so don't want to use async
Problem
Even though Insert query goes to execute first why does update finishes first in output
am i missing any thing ..?
Lets solve this question step by step
you "stated so don't want to use async" libraries
solution 1 :
if PostgreSQL make update faster, update will return result before insert. If you want start executing update query only after finishing insert then
you should set connection pool capacity to 1.
pg.defaults.poolSize = 1
but you should do this before any pg.connect()
The connect method retrieves a Client from the client pool, or if all pooled clients are busy and the pool is not full, the connect method will create a new client passing its first argument directly to the Client constructor. In either case, your supplied callback will only be called when the Client is ready to issue queries or an error is encountered. The callback will be called once and only once for each invocation of connect.
Conclusion : your queries will execute in sequence.BUT BUT BUT this solution is BAD for scaling app as there will be always only one connection serving all users .So till one connection is serving one user , other users will have to wait for response.
Solution 2 :
you also stated "i have kept insert & update code in 2 different files"
it looks like you need to designed your code in such a way that it you are able to use asynchronus libraries , that solves this problem
As I already mentioned, the only way to ensure that update function will be fired only after insert function is done, is to call it inside of insert function callback. That are the basics of asynchronous programming.
pg.connect(conString, function(err, client, done) {
console.log('Executing Insert query');
client.query('insert into testdb (id,data,iscancelled) values ($1,$2,$3)',[id,'hello','no'], function(err, result) {
done();
if(err) { return console.error('error running query', err); }
console.log('finished executing Insert query');
// ------UPDATE
pg.connect(conString, function(err, client, done) {
console.log('Executing update query');
client.query("update testdb set iscancelled = 'yes' where id = $1",[id], function(err, result) {
done();
if(err) { return console.error('error running query', err); }
console.log('finished executing Update query');
});
});
});
You are missing the asynchronous nature of the pg.connect and also client.query. The call to these return a callback which passes the control to next expression before the completion of execution and hence non-blocking nature of nodejs. If you want to assure the correct flow, either call the successive ones inside the callback success
var pg = require('pg');
var uuid = require('node-uuid').v4;
var id = uuid().toString();
// ------INSERT
return pg.connect;
// ------UPDATE
return pg.connect;
// your calling file
var insert = require('/path/to/insertfile');
var conString = 'postgres://postgres:pass#127.0.0.1:5432/testdb';
var update = require('/path/to/updatefile');
insert(conString, function (err, client, done) {
console.log('Executing Insert query');
client.query('insert into testdb (id,data,iscancelled) values ($1,$2,$3)',[id,'hello','no'], function (err, result) {
if (err) {
return console.error('error running query', err);
}
console.log('finished executing Insert query');
update(conString, function (error, client, done) {
console.log('Executing update query');
client.query("update testdb set iscancelled = 'yes' where id = $1",[id], function (err, result) {
if (err) {
return console.error('error running query', err);
}
console.log('finished executing Update query');
done();
});
});
done();
});
});
But this is very prone to callback hell. So consider making all async call return a promise. Take a look at bluebird. If you want an ORM that has built in promise based call, you can take a look at sequelize. It might be handy for you.
It has syntax as easy as:
var Model1 = require('/path/to/model1');
var Model2 = require('/path/to/model2');
var insertObj = {
"someKey": "value"
};
Model1.create(insertObj)
.then( function (createdObj1) {
return Model2.findOne({
where: {
"filter": "filterValue"
}
});
})
.then( function (documentToUpdate) {
return documentToUpdate.update({
"fieldToUpdate": "value"
});
})
.then( null, function (err) {
console.log(err);
});
In the code
var stuff_i_want = '';
stuff_i_want = get_info(parm);
And the function get_info:
get_info(data){
var sql = "SELECT a from b where info = data"
connection.query(sql, function(err, results){
if (err){
throw err;
}
console.log(results[0].objid); // good
stuff_i_want = results[0].objid; // Scope is larger than function
console.log(stuff_i_want); // Yep. Value assigned..
}
in the larger scope
stuff_i_want = null
What am i missing regarding returning mysql data and assigning it to a variable?
============ New code per Alex suggestion
var parent_id = '';
get_info(data, cb){
var sql = "SELECT a from b where info = data"
connection.query(sql, function(err, results){
if (err){
throw err;
}
return cb(results[0].objid); // Scope is larger than function
}
==== New Code in Use
get_data(parent_recording, function(result){
parent_id = result;
console.log("Parent ID: " + parent_id); // Data is delivered
});
However
console.log("Parent ID: " + parent_id);
In the scope outside the function parent_id is null
You're going to need to get your head around asynchronous calls and callbacks with javascript, this isn't C#, PHP, etc...
Here's an example using your code:
function get_info(data, callback){
var sql = "SELECT a from b where info = data";
connection.query(sql, function(err, results){
if (err){
throw err;
}
console.log(results[0].objid); // good
stuff_i_want = results[0].objid; // Scope is larger than function
return callback(results[0].objid);
})
}
//usage
var stuff_i_want = '';
get_info(parm, function(result){
stuff_i_want = result;
//rest of your code goes in here
});
When you call get_info this, in turn, calls connection.query, which takes a callback (that's what function(err, results) is
The scope is then passed to this callback, and so on.
Welcome to javascript callback hell...
It's easy when you get the hang of it, just takes a bit of getting used to, coming from something like C#
I guess what you really want to do here is returning a Promise object with the results. This way you can deal with the async operation of retrieving data from the DBMS: when you have the results, you make use of the Promise resolve function to somehow "return the value" / "resolve the promise".
Here's an example:
getEmployeeNames = function(){
return new Promise(function(resolve, reject){
connection.query(
"SELECT Name, Surname FROM Employee",
function(err, rows){
if(rows === undefined){
reject(new Error("Error rows is undefined"));
}else{
resolve(rows);
}
}
)}
)}
On the caller side, you use the then function to manage fulfillment, and the catch function to manage rejection.
Here's an example that makes use of the code above:
getEmployeeNames()
.then(function(results){
render(results)
})
.catch(function(err){
console.log("Promise rejection error: "+err);
})
At this point you can set up the view for your results (which are indeed returned as an array of objects):
render = function(results){ for (var i in results) console.log(results[i].Name) }
Edit
I'm adding a basic example on how to return HTML content with the results, which is a more typical scenario for Node. Just use the then function of the promise to set the HTTP response, and open your browser at http://localhost:3001
require('http').createServer( function(req, res){
if(req.method == 'GET'){
if(req.url == '/'){
res.setHeader('Content-type', 'text/html');
getEmployeeNames()
.then(function(results){
html = "<h2>"+results.length+" employees found</h2>"
html += "<ul>"
for (var i in results) html += "<li>" + results[i].Name + " " +results[i].Surname + "</li>";
html += "</ul>"
res.end(html);
})
.catch(function(err){
console.log("Promise rejection error: "+err);
res.end("<h1>ERROR</h1>")
})
}
}
}).listen(3001)
Five years later, I understand asynchronous operations much better.
Also with the new syntax of async/await in ES6 I refactored this particular piece of code:
const mysql = require('mysql2') // built-in promise functionality
const DB = process.env.DATABASE
const conn = mysql.createConnection(DB)
async function getInfo(data){
var sql = "SELECT a from b where info = data"
const results = await conn.promise().query(sql)
return results[0]
}
module.exports = {
getInfo
}
Then, where ever I need this data, I would wrap it in an async function, invoke getInfo(data) and use the results as needed.
This was a situation where I was inserting new records to a child table and needed the prent record key, based only on a name.
This was a good example of understanding the asynchronous nature of node.
I needed to wrap the all the code affecting the child records inside the call to find the parent record id.
I was approaching this from a sequential (PHP, JAVA) perspective, which was all wrong.
Easier if you send in a promise to be resolved
e.g
function get_info(data, promise){
var sql = "SELECT a from b where info = data";
connection.query(sql, function(err, results){
if (err){
throw err;
}
console.log(results[0].objid); // good
stuff_i_want = results[0].objid; // Scope is larger than function
promise.resolve(results[0].objid);
}
}
This way Node.js will stay fast because it's busy doing other things while your promise is waiting to be resolved
I've been working on this goal since few weeks, without any result, and I finally found a way to assign in a variable the result of any mysql query using await/async and promises.
You don't need to understand promises in order to use it, eh, I don't know how to use promises neither anyway
I'm doing it using a Model class for my database like this :
class DB {
constructor(db) {
this.db = db;
}
async getUsers() {
let query = "SELECT * FROM asimov_users";
return this.doQuery(query)
}
async getUserById(array) {
let query = "SELECT * FROM asimov_users WHERE id = ?";
return this.doQueryParams(query, array);
}
// CORE FUNCTIONS DON'T TOUCH
async doQuery(queryToDo) {
let pro = new Promise((resolve,reject) => {
let query = queryToDo;
this.db.query(query, function (err, result) {
if (err) throw err; // GESTION D'ERREURS
resolve(result);
});
})
return pro.then((val) => {
return val;
})
}
async doQueryParams(queryToDo, array) {
let pro = new Promise((resolve,reject) => {
let query = queryToDo;
this.db.query(query, array, function (err, result) {
if (err) throw err; // GESTION D'ERREURS
resolve(result);
});
})
return pro.then((val) => {
return val;
})
}
}
Then, you need to instantiate your class by passing in parameter to constructor the connection variable given by mysql. After this, all you need to do is calling one of your class methods with an await before. With this, you can chain queries without worrying of scopes.
Example :
connection.connect(function(err) {
if (err) throw err;
let DBModel = new DB(connection);
(async function() {
let oneUser = await DBModel.getUserById([1]);
let allUsers = await DBModel.getUsers();
res.render("index.ejs", {oneUser : oneUser, allUsers : allUsers});
})();
});
Notes :
if you need to do another query, you just have to write a new method in your class and calling it in your code with an await inside an async function, just copy/paste a method and modify it
there are two "core functions" in the class, doQuery and doQueryParams, the first one only takes a string as a parameter which basically is your mysql query. The second one is used for parameters in your query, it takes an array of values.
it's relevant to notice that the return value of your methods will always be an array of objects, it means that you'll have to do var[0] if you do a query which returns only one row. In case of multiple rows, just loop on it.
I'm trying to export one function this way:
exports.query = function(request){
conn.query(request, function(err, rows, fields){
if(err) console.log(err);
return rows[0].id;
});
}
and using it:
var mysql = require('./mysql');
console.log(mysql.query('SELECT * FROM tablename'));
Proceeding this way for getting a result involves undefined as output.
How do I to fix this, please?
Note that when I just type console.log(rows[0].id) instead of return rows[0].id it sends back 123.
Thanks in advance!
In your example, the output is being returned to the anonymous function of the database query instead of the caller of the module. You can use a callback to return output to the caller of the module.
exports.query = function(request, callback){
conn.query(request, function(err, rows, fields){
if (err) {
callback(err);
} else {
callback(null, rows[0].id);
}
});
}
Then call it like
var mysql = require('./mysql');
mysql.query('SELECT * FROM tablename', function(err, results){
if (err) {
console.error(err);
} else {
console.log(results);
}
});
That's a problem of synchrony.
the conn.query function returns undefined because it finish its execution before the results are fetched (like almost any i/o related operation on js/node).
One possible solution to that, is to provide a callback to your query function.
exports.query = function(request, cb){
conn.query(request, function(err, rows, fields){
// some "special" processing
cb(err, rows, fields);
});
};
If you're not familiar with async functions, take a look on some articles about that:
http://justinklemm.com/node-js-async-tutorial/
https://www.promisejs.org/