AWS lambda function throwing error "newPromise is not defined" - node.js

I am using AWS lambda function with below code
'use strict';
var newPromise = require('es6-promise').Promise;
const childProcess= require("child_process");
const path= require("path");
const backupDatabase = () => {
const scriptFilePath =path.resolve(__dirname, "./backup.sh");
return newPromise((resolve, reject) => {
childProcess.execFile(scriptFilePath, (error) => {
if (error) {
console.error(error);
resolve(false);
}
resolve(true);
});
});
};
module.exports.handler = async (event) => {
const isBackupSuccessful = await backupDatabase();
if (isBackupSuccessful) {
return {
status: "success",
message: "Database backup completed successfully!"
};
}
return {
status: "failed",
message: "Failed to backup the database! Check out the logs for more details"
};
};
The code above run's with in the docker container, tries to run the below backup script
#!/bin/bash
#
# Author: Bruno Coimbra <bbcoimbra#gmail.com>
#
# Backups database located in DB_HOST, DB_PORT, DB_NAME
# and can be accessed using DB_USER. Password should be
# located in $HOME/.pgpass and this file should be
# chmod 0600[1].
#
# Target bucket should be set in BACKUP_BUCKET variable.
#
# AWS credentials should be available as needed by aws-cli[2].
#
# Dependencies:
#
# * pg_dump executable (can be found in postgresql-client-<version> package)
# * aws-cli (with python environment configured execute 'pip install awscli')
#
#
# References
# [1] - http://www.postgresql.org/docs/9.3/static/libpq-pgpass.html
# [2] - http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html
#
#
###############
### Variables
export AWS_ACCESS_KEY_ID=
export AWS_SECRET_ACCESS_KEY=
DB_HOST=
DB_PORT="5432"
DB_USER="postgres"
BACKUP_BUCKET=
###############
#
# **RISK ZONE** DON'T TOUCH below this line unless you know
# exactly what you are doing.
#
###############
set -e
export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
### Variables
S3_BACKUP_BUCKET=${BACKUP_BUCKET:-test-db-backup-bucket}
TEMPFILE_PREFIX="db-$DB_NAME-backup"
TEMPFILE="$(mktemp -t $TEMPFILE_PREFIX-XXXXXXXX)"
DATE="$(date +%Y-%m-%d)"
TIMESTAMP="$(date +%s)"
BACKUPFILE="backup-$DB_NAME-$TIMESTAMP.sql.gz"
LOGTAG="DB $DB_NAME Backup"
### Validations
if [[ ! -r "$HOME/.pgpass" ]]; then
logger -t "$LOGTAG" "$0: Can't find database credentials. $HOME/.pgpass file isn't readable. Aborted."
exit 1
fi
if ! which pg_dump > /dev/null; then
logger -t "$LOGTAG" "$0: Can't find 'pg_dump' executable. Aborted."
exit 1
fi
if ! which aws > /dev/null; then
logger -t "$LOGTAG" "$0: Can't find 'aws cli' executable. Aborted."
exit 1
fi
logger -t "$LOGTAG" "$0: remove any previous dirty backup file"
rm -f /tmp/$TEMPFILE_PREFIX*
### Generate dump and compress it
logger -t "$LOGTAG" "Dumping Database..."
pg_dump -O -x -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -w "$DB_NAME" > "$TEMPFILE"
logger -t "$LOGTAG" "Dumped."
logger -t "$LOGTAG" "Compressing file..."
nice gzip -9 "$TEMPFILE"
logger -t "$LOGTAG" "Compressed."
mv "$TEMPFILE.gz" "$BACKUPFILE"
### Upload it to S3 Bucket and cleanup
logger -t "$LOGTAG" "Uploading '$BACKUPFILE' to S3..."
aws s3 cp "$BACKUPFILE" "s3://$S3_BACKUP_BUCKET/$DATE/$BACKUPFILE"
logger -t "$LOGTAG" "Uploaded."
logger -t "$LOGTAG" "Clean-up..."
rm -f $TEMPFILE
rm -f $BACKUPFILE
rm -f /tmp/$TEMPFILE_PREFIX*
logger -t "$LOGTAG" "Finished."
if [ $? -eq 0 ]; then
echo "script passed"
exit 0
else
echo "script failed"
exit 1
fi
I created a docker image with above app.js content and bakup.sh with the below docker file
ARG FUNCTION_DIR="/function"
FROM node:14-buster
RUN apt-get update && \
apt install -y \
g++ \
make \
cmake \
autoconf \
libtool \
wget \
openssh-client \
gnupg2
RUN wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - && \
echo "deb http://apt.postgresql.org/pub/repos/apt/ buster-pgdg main" | tee /etc/apt/sources.list.d/pgdg.list && \
apt-get update && apt-get -y install postgresql-client-12
ARG FUNCTION_DIR
RUN mkdir -p ${FUNCTION_DIR} && chmod -R 755 ${FUNCTION_DIR}
WORKDIR ${FUNCTION_DIR}
COPY package.json .
RUN npm install
COPY backup.sh .
RUN chmod +x backup.sh
COPY app.js .
ENTRYPOINT ["/usr/local/bin/npx", "aws-lambda-ric"]
CMD ["app.handler"]
I am running the docker container created with the image created from the above docker file
docker run -v ~/aws:/aws -it --rm -p 9000:8080 --entrypoint /aws/aws-lambda-rie backup-db:v1 /usr/local/bin/npx aws-lambda-ric app.handler
And trying to hit that docker container with below curl command
curl -XPOST "http://localhost:9000/2015-03-31/functions/function/invocations" -d '{}'
when I run curl command I am seeing the below error
An error I see is :"newPromise is not defined","trace":["ReferenceError: newPromise is not defined"," at backupDatabase (/function/app.js:9:3)","
Tried adding the variable var newPromise = require('es6-promise').Promise;, but that gave a new error "Cannot set property 'scqfkjngu7o' of undefined","trace"
Could someone help me with fixing the error ? My expected output is the message as described in the function, but am seeing the errors.
Thank you

Node 14 supports promises natively. You should do:
return new Promise((resolve, reject) => {
childProcess.execFile(scriptFilePath, (error) => {
if (error) {
console.error(error);
resolve(false);
}
resolve(true);
});
Note the space between new and Promise. Promise is the object and you are using a constructor. There is no need to import any module.

Related

Docker return error DPI-1047: cannot locate a 64-bit Oracle library: libclntsh.so. Node.js Windows 10

I'm using in docker container on windows 10 with nodejs. When I try to get data from oracle database - get request (the connection to data base in nodejs code) I get the message:
DPI-1047: Cannot locate a 64-bit Oracle Client library: "libclntsh.so: cannot open shared object file: No such file or directory". See https://oracle.github.io/node-oracledb/INSTALL.html for help
When I make a get request without the container(run server) the data was return well.
Dockerfile:
FROM node:latest
WORKDIR /app
COPY package*.json app.js ./
RUN npm install
COPY . .
EXPOSE 9000
CMD ["npm", "start"]
connection to oracle:
async function send2db(sql_command, res) {
console.log("IN");
console.log(sql_command);
try {
await oracledb.createPool({
user: dbConfig.user,
password: dbConfig.password,
connectString: dbConfig.connectString,
});
console.log("Connection pool started");
const result = await executeSQLCommand(sql_command
// { outFormat: oracledb.OUT_FORMAT_OBJECT }
);
return result;
} catch (err) {
// console.log("init() error: " + err.message);
throw err;
}
}
From Docker for Oracle Database Applications in Node.js and Python here is one solution:
FROM node:12-buster-slim
WORKDIR /opt/oracle
RUN apt-get update && \
apt-get install -y libaio1 unzip wget
RUN wget https://download.oracle.com/otn_software/linux/instantclient/instantclient-basiclite-linuxx64.zip && \
unzip instantclient-basiclite-linuxx64.zip && \
rm -f instantclient-basiclite-linuxx64.zip && \
cd instantclient* && \
rm -f *jdbc* *occi* *mysql* *jar uidrvci genezi adrci && \
echo /opt/oracle/instantclient* > /etc/ld.so.conf.d/oracle-instantclient.conf && \
ldconfig
You would want to use a later Node.js version now. The referenced link shows installs on other platforms too.

AWS lambda function throwing error "constchildProcess is not defined"

I am using AWS lambda function with below code
'use strict';
constchildProcess= require("child_process");
constpath= require("path");
const backupDatabase = () => {
const scriptFilePath =path.resolve(__dirname, "./backup.sh");
return newPromise((resolve, reject) => {
childProcess.execFile(scriptFilePath, (error) => {
if (error) {
console.error(error);
resolve(false);
}
resolve(true);
});
});
};
module.exports.handler = async (event) => {
const isBackupSuccessful = await backupDatabase();
if (isBackupSuccessful) {
return {
status: "success",
message: "Database backup completed successfully!"
};
}
return {
status: "failed",
message: "Failed to backup the database! Check out the logs for more details"
};
};
The code above run's with in the docker container, tries to run the below backup script
#!/bin/bash
#
# Author: Bruno Coimbra <bbcoimbra#gmail.com>
#
# Backups database located in DB_HOST, DB_PORT, DB_NAME
# and can be accessed using DB_USER. Password should be
# located in $HOME/.pgpass and this file should be
# chmod 0600[1].
#
# Target bucket should be set in BACKUP_BUCKET variable.
#
# AWS credentials should be available as needed by aws-cli[2].
#
# Dependencies:
#
# * pg_dump executable (can be found in postgresql-client-<version> package)
# * aws-cli (with python environment configured execute 'pip install awscli')
#
#
# References
# [1] - http://www.postgresql.org/docs/9.3/static/libpq-pgpass.html
# [2] - http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html
#
#
###############
### Variables
export AWS_ACCESS_KEY_ID=
export AWS_SECRET_ACCESS_KEY=
DB_HOST=
DB_PORT="5432"
DB_USER="postgres"
BACKUP_BUCKET=
###############
#
# **RISK ZONE** DON'T TOUCH below this line unless you know
# exactly what you are doing.
#
###############
set -e
export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
### Variables
S3_BACKUP_BUCKET=${BACKUP_BUCKET:-test-db-backup-bucket}
TEMPFILE_PREFIX="db-$DB_NAME-backup"
TEMPFILE="$(mktemp -t $TEMPFILE_PREFIX-XXXXXXXX)"
DATE="$(date +%Y-%m-%d)"
TIMESTAMP="$(date +%s)"
BACKUPFILE="backup-$DB_NAME-$TIMESTAMP.sql.gz"
LOGTAG="DB $DB_NAME Backup"
### Validations
if [[ ! -r "$HOME/.pgpass" ]]; then
logger -t "$LOGTAG" "$0: Can't find database credentials. $HOME/.pgpass file isn't readable. Aborted."
exit 1
fi
if ! which pg_dump > /dev/null; then
logger -t "$LOGTAG" "$0: Can't find 'pg_dump' executable. Aborted."
exit 1
fi
if ! which aws > /dev/null; then
logger -t "$LOGTAG" "$0: Can't find 'aws cli' executable. Aborted."
exit 1
fi
logger -t "$LOGTAG" "$0: remove any previous dirty backup file"
rm -f /tmp/$TEMPFILE_PREFIX*
### Generate dump and compress it
logger -t "$LOGTAG" "Dumping Database..."
pg_dump -O -x -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -w "$DB_NAME" > "$TEMPFILE"
logger -t "$LOGTAG" "Dumped."
logger -t "$LOGTAG" "Compressing file..."
nice gzip -9 "$TEMPFILE"
logger -t "$LOGTAG" "Compressed."
mv "$TEMPFILE.gz" "$BACKUPFILE"
### Upload it to S3 Bucket and cleanup
logger -t "$LOGTAG" "Uploading '$BACKUPFILE' to S3..."
aws s3 cp "$BACKUPFILE" "s3://$S3_BACKUP_BUCKET/$DATE/$BACKUPFILE"
logger -t "$LOGTAG" "Uploaded."
logger -t "$LOGTAG" "Clean-up..."
rm -f $TEMPFILE
rm -f $BACKUPFILE
rm -f /tmp/$TEMPFILE_PREFIX*
logger -t "$LOGTAG" "Finished."
if [ $? -eq 0 ]; then
echo "script passed"
exit 0
else
echo "script failed"
exit 1
fi
I created a docker image with above app.js content and bakup.sh with the below docker file
ARG FUNCTION_DIR="/function"
FROM node:14-buster
RUN apt-get update && \
apt install -y \
g++ \
make \
cmake \
autoconf \
libtool \
wget \
openssh-client \
gnupg2
RUN wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - && \
echo "deb http://apt.postgresql.org/pub/repos/apt/ buster-pgdg main" | tee /etc/apt/sources.list.d/pgdg.list && \
apt-get update && apt-get -y install postgresql-client-12
ARG FUNCTION_DIR
RUN mkdir -p ${FUNCTION_DIR} && chmod -R 755 ${FUNCTION_DIR}
WORKDIR ${FUNCTION_DIR}
COPY package.json .
RUN npm install
COPY backup.sh .
RUN chmod +x backup.sh
COPY app.js .
ENTRYPOINT ["/usr/local/bin/npx", "aws-lambda-ric"]
CMD ["app.handler"]
I am running the docker container created with the image created from the above docker file
docker run -v ~/aws:/aws -it --rm -p 9000:8080 --entrypoint /aws/aws-lambda-rie backup-db:v1 /usr/local/bin/npx aws-lambda-ric app.handler
And trying to hit that docker container with below curl command
curl -XPOST "http://localhost:9000/2015-03-31/functions/function/invocations" -d '{}'
when I run curl command I am seeing the below error
29 Nov 2021 10:57:30,838 [INFO] (rapid) extensionsDisabledByLayer(/opt/disable-extensions-jwigqn8j) -> stat /opt/disable-extensions-jwigqn8j: no such file or directory
29 Nov 2021 10:57:30,838 [WARNING] (rapid) Cannot list external agents error=open /opt/extensions: no such file or directory
START RequestId: 053246ef-4687-438d-aade-a6794b917b79 Version: $LATEST
2021-11-29T10:57:30.912Z undefined INFO Executing 'app.handler' in function directory '/function'
2021-11-29T10:57:30.919Z undefined ERROR constchildProcess is not defined
29 Nov 2021 10:57:30,926 [WARNING] (rapid) First fatal error stored in appctx: Runtime.ExitError
29 Nov 2021 10:57:30,927 [WARNING] (rapid) Process 53(npx) exited: Runtime exited with error: exit status 1
29 Nov 2021 10:57:30,927 [ERROR] (rapid) Init failed error=Runtime exited with error: exit status 1 InvokeID=
29 Nov 2021 10:57:30,927 [WARNING] (rapid) Reset initiated: ReserveFail
29 Nov 2021 10:57:30,927 [WARNING] (rapid) Cannot list external agents error=open /opt/extensions: no such file or directory
Could someone help me with fixing the error ? My expected output is the message as described in the function, but am seeing the errors.
Thank you
Because they both do not exist. There is a typo on your first 2 lines:
constchildProcess= require("child_process");
constpath= require("path");
Should be:
const childProcess= require("child_process");
const path= require("path");

Unable to access local variable via a function call through ssh

This the code I,m trying to execute:
#!/bin/bash
set -e -a
err_report() {
echo "Error on line $1"
}
trap 'err_report $LINENO' ERR
IFS=$'\n'
value=($(cat builds.txt))
prevBuild=${value[0]}
prevBuildDir=$prevBuild
currentBuild=${value[1]}
currentBuildDir=$currentBuild
splitPrevBuild=(${prevBuild//./$'\n'})
splitcurrentBuild=(${currentBuild//./$'\n'})
prevBuildSR=${splitPrevBuild[2]}
prevBuildHF=${splitPrevBuild[3]}
prevBuildNum=${splitPrevBuild[4]}
currentBuildSR=${splitcurrentBuild[2]}
currentBuildHF=${splitcurrentBuild[3]}
currentBuildNum=${splitcurrentBuild[4]}
function change {
cd ~/InstallationFiles/${currentBuildSR}/${currentBuildDir}/${currentBuild}extracted
}
change
pwd
ssh -T atao52#kvs-in-hsglap17.in.kronos.com <<-EOF
$(typeset -f)
change
pwd
echo hi
exit
EOF
How to access the variables as currently, it says
cd: /home/atao52/InstallationFiles///extracted: No such file or directory

nodejs give unwanted message while running shell script in linux

try to execute shell script using nodejs and nodejs server gives wrong error message
shell script
sudo mkdir updateinprogress
servicebranch="Development"
currentpath="$PWD"
tarfilename="$(date +'%d-%m-%Y_%H-%M-%S')"
cd ..
if [ ! -d "backups" ]; then
sudo mkdir backups
fi
cd backups
if [ ! -d "AppInEngine" ]; then
sudo mkdir AppInEngine
fi
cd $currentpath
if [ ! -d "current_file_backup" ]; then
sudo mkdir current_file_backup
fi
sudo cp -r /var/www/AppInEngine $currentpath/current_file_backup/
sudo git clone https://github.com/abc/AppInEngine --branch $servicebranch -q
if [ $? != 0 ]; then
echo -e "{\"status\":\"false\", \"message\":\""error occerd while cloning git repository "\"}"
sudo rm -r AppInEngine updateinprogress current_file_backup
exit
fi
sudo cp -r /var/www/AppInEngine/configs AppInEngine
sudo cp -r /var/www/AppInEngine/Logs AppInEngine/
sudo rm -r /var/www/AppInEngine
sudo cp -r AppInEngine /var/www
sudo chmod -R 777 /var/www/AppInEngine
service apache2 restart > /dev/null
if [ $? != 0 ]; then
sudo rm -r /var/www/AppInEngine
sudo cp -r $currentpath/current_file_backup/AppInEngine /var/www/
sudo service apache2 restart > /dev/null
sudo rm -r AppInEngine updateinprogress current_file_backup
echo -e "{\"status\":\"false\", \"message\":\""error occerd while starting apache2 .restart with backup "\"}"
exit
fi
sleep 160
if curl -s --head --request GET http://localhost/AppInEngine/get_version | grep "200 OK" > /dev/null; then
sudo tar -cf AppInEngine_$tarfilename.tar.gz current_file_backup/AppInEngine
sudo mv AppInEngine_$tarfilename.tar.gz ../backups/AppInEngine/
cd ../backups/AppInEngine
#sudo cp AppInEngine$tarfilename.tar.gz
sudo rm -f $(ls -1t | tail -n +3)
cd $currentpath
sudo rm -r AppInEngine updateinprogress current_file_backup
echo -e "{\"status\":\"true\", \"message\":\""service update success"\"}"
exit
else
echo -e "{\"status\":\"false\", \"message\":\""service update faild .trying to start with backup "\"}"
sudo service apache2 stop
sudo rm -r /var/www/AppInEngine
sudo cp -r $currentpath/current_file_backup/AppInEngine /var/www/
sudo rm -r AppInEngine ls current_file_backup
chmod -R 777 /var/www/AppInEngine
sudo service apache2 start > /dev/null
exit
fi
nodejs server
var express = require('express');
var app = express();
var exec = require('child_process').exec, child;
app.listen(4105);
app.get('/updateEngin',function (req, res){
var testscript = exec('sh test.sh /var/www/update/serviceupdate');
testscript.stdout.on('data', function(data){
console.log(data);
res.json(data);
});
});
output when running script alone in terminal (./test.sh )
{"status":"true", "message":"service update success"}
ouput when running using nodejs
{"status":"false", "message":"error occerd while cloning git repository "}
{"status":"true", "message":"service update success"}
set time out to nodejs server
issue will fixed
var express = require('express');
var app = express();
var timeout = require('connect-timeout');
var exec = require('child_process').exec, child;
app.listen(4105);
app.get('/updateEngin', timeout('160s'), function (req, res){
var testscript = exec('sh test.sh /var/www/update/serviceupdate');
testscript.stdout.on('data', function(data){
console.log(data);
res.json(data);
});
});

run Exec only if another Exec ran

how can I configure a Exec to run only if another Exec ran?
I have a manifest like this:
file { $target:
ensure => directory
}
exec { "unzip foobar.zip -C ${target}":
unless => "file ${target}/some-file-form-archive"
}
exec { "chown -R $user ${target}":
onlyif => ???
}
I would like the chown to run only if unzip foobar.zip ran. Of course I could start checking whether some-file-from-archive is already owned by $user, but somehow it does not seem right.
There's an answer here already: http://ask.puppetlabs.com/question/14726/run-exec-only-if-another-exec-ran/
Changing the manifest like this fixes my problem:
exec { 'unpack file':
command => "unzip foobar.zip -C ${target}",
path => '/usr/bin',
creates => "${target}/some-file-form-archive",
require => File[$target, '<archive>'],
notify => Exec[fix archive],
}
exec { 'fix archive':
command => "chown -R ${user} ${target}",
path => '/bin',
refreshonly => true,
}
UPDATE 28.11.2014
motivated by Felix Frank's comment i tried out something else. instead of notify/refreshonly you can ensure all resource in a file-tree are owned by a user like this:
exec { 'fix archive':
command => "chown -R ${user} ${target}",
path => '/bin',
unless => "test 0 -eq $(find ${target} \\! -user ${user} | wc -l)"
}
this way owner is ensured to be $user even if it was changed after unpack file ran.

Resources