AWS lambda never completes but doesn't appear to timeout either - node.js

I'm attempting to create a simple application. A user emails an attachment to a special inbox, AWS SES gets the email and stores it in S3, a lambda function is triggered and the lambda finds the email in S3, parses out the attachment (in this case a jpg) and then stores it in a different S3 bucket. Finally, the application creates a new row in an Airtable with the image as an attachment.
When I invoked this function locally using serverless, everything works fine. The email with the image are already stored in an S3 bucket so I've created a mock which passes the key explicitly to my lambda. However, when I deploy the application and send a test email, the following happens:
Email is stored in S3 bucket 'to-airtable-temp'
Lambda function is called
Before the email can be found and the attachment striped off and stored in a seperate S3 bucket the function just stops. No error message or timeout. It just stops. Cloudwatch logs look like the following:
START RequestId: 6a0364ae-0879-4ee1-9dcd-c8747de1a650 Version: $LATEST
2020-02-17T07:39:55.465Z 6a0364ae-0879-4ee1-9dcd-c8747de1a650 INFO {
s3SchemaVersion: '1.0',
configurationId: 'emailtoairtable-dev-parse-224c3963d3a3f9c35018ae93a9fffea4',
bucket: {
name: 'to-airtable-temp',
ownerIdentity: { principalId: 'AI5RGHFD5AFHE' },
arn: 'arn:aws:s3:::to-airtable-temp'
},
object: {
key: 'mtvuuiokqj55l2a8b0qser7tn9dhfignoh9c1vg1',
size: 3804230,
eTag: 'c821fb0a2a9c3b060e20e7d177f8b972',
sequencer: '005E4A434810147365'
}
}
2020-02-17T07:39:55.465Z 6a0364ae-0879-4ee1-9dcd-c8747de1a650 INFO key mtvuuiokqj55l2a8b0qser7tn9dhfignoh9c1vg1
2020-02-17T07:39:55.465Z 6a0364ae-0879-4ee1-9dcd-c8747de1a650 INFO Key pushed to list. mtvuuiokqj55l2a8b0qser7tn9dhfignoh9c1vg1
END RequestId: 6a0364ae-0879-4ee1-9dcd-c8747de1a650
REPORT RequestId: 6a0364ae-0879-4ee1-9dcd-c8747de1a650 Duration: 1113.17 ms Billed Duration: 1200 ms Memory Size: 1024 MB Max Memory Used: 114 MB Init Duration: 119.56 ms
Here is my handler.js file:
'use strict';
module.exports.parse = async event => {
try {
const aws = require('aws-sdk');
const s3 = new aws.S3();
const simpleParser = require('mailparser').simpleParser;
const Airtable = require('airtable');
const dateformat = require('dateformat');
var base = new Airtable({ apiKey: process.env.airtableApiKey}).base(process.env.airtableBaseId);
var data = [];
var keys = [];
event["Records"].forEach(async record => {
console.log(record["s3"]);
console.log('key', record["s3"]["object"]["key"]);
keys.push(record["s3"]["object"]["key"]);
console.log('Key pushed to list. ', record["s3"]["object"]["key"]); // <-- this is the last line that I am sure processes because I see it in the CloudWatch logs.
var temp_data = await s3.getObject(
{
Bucket: 'to-airtable-temp',
Key: record["s3"]["object"]["key"]
}).promise();
console.log('temp_data', temp_data);
data.push(temp_data);
});
setTimeout( async function() {
// console.log('data', data[0].Body.toString());
let parsed = await simpleParser(data[0].Body.toString());
console.log(parsed);
// save the file to a public S3 bucket so it can be uploaded to airtable
parsed["attachments"].forEach(function(attachment) {
let now = new Date();
s3.upload({
Bucket: 'to-airtable-images',
Key: keys[0] + dateformat(now, "yyyy-mm-dd") + '.jpg',
Body: attachment.content,
ContentType: "image/jpeg"
},
function(error, data) {
if (error) {
throw error;
}
console.log('File uploaded successfully. ' +data.Location);
// Now upload to airtable
base('Input').create([
{
"fields": {"Attachments": [
{
"url": data.Location
}
]}
}
], function(err, records) {
if (err) {
console.error(err);
return;
}
records.forEach(function (record) {
console.log(record.getId());
});
});
});
});
return {
statusCode: 200,
body: JSON.stringify(
{
message: 'Go Serverless v1.0! Your function executed successfully!',
input: event,
data: JSON.stringify(data),
},
null,
2
),
};
}, 500); // I've tried increasing this time but it still hangs.
} catch (error) {
console.error(error);
}
};

You shouldn't use async/await with the forEach function. Using async/await with a forEach loop. Instead, use the more modern for of syntax:
for (let record of event["Records"]) {
// you can include await calls in this block
}

Related

Use AWS Lambda to get CloudWatch Alarm States

I have updated my original post as have got a bit further and have the querying of my CW Alarms part of my code working. The below now outputs the state of my CW Alarms in the console, and in the format I want. What I'm now trying to do is take the output and upload this as a text file to an S3 bucket. Is this possible?
CW Alarm Code
import { CloudWatchClient, DescribeAlarmsCommand } from "#aws-sdk/client-cloudwatch";
const REGION = "eu-west-2";
const cwclient = new CloudWatchClient({ region: REGION });
export const handler = async() => {
const cwparams = {};
const cw = new DescribeAlarmsCommand(cwparams);
try {
const cwdata = await cwclient.send(cw);
cwdata.MetricAlarms.forEach(function (item) {
console.log('\n%j', {alarmname:item.AlarmName,alarmstate:item.StateValue});
});
} catch (error) {
}
};
Output
Function Logs
START RequestId: xxxxxxxxxxxxxxxxxxx Version: $LATEST
2022-11-30T09:48:34.655Z xxxxxxxxxxxxxxxxxxx INFO
{"alarmname":"my-alarm-1","alarmstate":"OK"}
2022-11-30T09:48:34.655Z xxxxxxxxxxxxxxxxxxx INFO
{"alarmname":"my-alarm-2","alarmstate":"OK"}
END RequestId: xxxxxxxxxxxxxxxxxxx
I have looked at the sdk for the s3 PutObjectCommand and have tested the below, which allows me to upload a file with some text content, but I'm not sure how I can combine my CW Alarm data with this code, so that the "Body" of the text file is my CW Alarm data.
S3 Code
import { S3Client, PutObjectCommand } from "#aws-sdk/client-s3";
export const handler = async() => {
const bucketName = "mybucket";
const keyName = "test.json";
const s3 = new S3Client({});
const s3putCommand = new PutObjectCommand({
Bucket: bucketName,
Key: keyName,
Body: "Hello" // I would like this to be my CW Alarm data
});
try {
await s3.send(s3putCommand);
console.log('Successfully uploaded data to ' + bucketName + '/' + keyName);
} catch (error) {
}
};
Output
Function Logs
START RequestId: xxxxxxxxxxxxxxxxxxx Version: $LATEST
2022-11-30T09:56:45.585Z xxxxxxxxxxxxxxxxxxx INFO Successfully uploaded data to mybucket/test.json
END RequestId: xxxxxxxxxxxxxxxxxxx
My goal is to end up with the test.json file looking like this:
{"alarmname":"my-alarm-1","alarmstate":"OK"} {"alarmname":"my-alarm-2","alarmstate":"OK"}
Thanks.
You are using an outdated AWS SDK for JavaScript. Refer to the new AWS Code Library for the latest recommended SDK to use here:
URL is:
https://docs.aws.amazon.com/code-library/latest/ug/javascript_3_cloudwatch_code_examples.html
With help from a colleague I have found the answer to this. As long as in your lambda function your index file is named "index.mjs"
import { S3Client, PutObjectCommand } from "#aws-sdk/client-s3";
import { CloudWatchClient, DescribeAlarmsCommand } from "#aws-sdk/client-cloudwatch";
import { Upload } from "#aws-sdk/lib-storage";
const REGION = "eu-west-2";
const cwclient = new CloudWatchClient({ region: REGION });
export const handler = async () => {
const cwparams = {};
const cw = new DescribeAlarmsCommand(cwparams);
const alarmData = [];
const bucketName = "mybucket";
const keyName = "test.json";
const s3 = new S3Client({});
try {
const cwdata = await cwclient.send(cw);
cwdata.MetricAlarms.forEach(function (item) {
alarmData.push({
alarmname: item.AlarmName,
alarmstate: item.StateValue,
});
});
} catch (error) {}
const s3putCommand = new Upload({
client: s3,
params: {
Bucket: bucketName,
Key: keyName,
Body: JSON.stringify(alarmData),
},
});
try {
const data = await s3putCommand.done();
console.log(data);
} catch (error) {
console.log(error);
}
};

I Cant delete multiple items using DynamoDB BatchWrite including a primary key and sort key

So I'm trying to delete two records for the same user. However, when I run the Lambda function below it really does not console log either a success or failure and the records are not deleted in DynamoDB. Any advice on how I could modify the below to get the expected result of having both records deleted would be appreciated. Thanks
Included several console logs for troubleshooting
both PK and SK are strings in DynamoDB
Both delete request are for the same user just two different entries in the same table
'use strict'
const AWS = require('aws-sdk');
AWS.config.update({ region: "us-east-1" });
exports.handler = async (event, context) => {
var ddb = new AWS.DynamoDB.DocumentClient();
var records = [{
DeleteRequest : {
Key : {
'PK' : 'username',
'SK' : 'C297329360'
}
},
DeleteRequest : {
Key : {
'PK' : 'username',
'SK' : 'R297329360'
}
}
}];
var params = {
RequestItems : {
'userTable' : records
}
};
console.log(JSON.stringify(records));
console.log(records);
console.log(JSON.stringify(params));
console.log(params);
ddb.batchWrite(params, function(err, data) {
if (err) {
console.log('Batch delete unsuccessful ...');
console.log(err, err.stack); // an error occurred
} else {
console.log('Batch delete successful ...');
console.log(data); // successful response
}
});
}
Test Event Name
DeleteReportById
Response
null
Function Logs
START RequestId: 6c40c1f7-5cdb-4cef-95a7-306d540aa9f9 Version: $LATEST
2022-02-11T13:49:56.007Z 6c40c1f7-5cdb-4cef-95a7-306d540aa9f9 INFO [{"DeleteRequest":{"Key":{"PK":"username","SK":"R297329360"}}}]
2022-02-11T13:49:56.046Z 6c40c1f7-5cdb-4cef-95a7-306d540aa9f9 INFO [ { DeleteRequest: { Key: [Object] } } ]
2022-02-11T13:49:56.047Z 6c40c1f7-5cdb-4cef-95a7-306d540aa9f9 INFO {"RequestItems":{"FitTool_Prod":[{"DeleteRequest":{"Key":{"PK":"username","SK":"R297329360"}}}]}}
2022-02-11T13:49:56.065Z 6c40c1f7-5cdb-4cef-95a7-306d540aa9f9 INFO { RequestItems: { userTable: [ [Object] ] } }
END RequestId: 6c40c1f7-5cdb-4cef-95a7-306d540aa9f9
REPORT RequestId: 6c40c1f7-5cdb-4cef-95a7-306d540aa9f9 Duration: 589.81 ms Billed Duration: 590 ms Memory Size: 128 MB Max Memory Used: 77 MB Init Duration: 474.35 ms
Request ID
6c40c1f7-5cdb-4cef-95a7-306d540aa9f9
You are not waiting for the batchWrite call to complete, the lambda terminates before actually sending the request to DynamoDB. Your handler is async, so you can do :
await ddb.batchWrite(...).promise()

Try to upload file from lambda tmp folder to s3 bucket

I am new to lambda function and I am trying to upload file to s3 bucket from lambda function.
creating multiple zip file to tmp folder and after that I want to upload that zip file to s3 bucket, I have given permission to access s3 bucket to lambda function its not showing any error
I tried different approach to resolve this but not able to fixed it.
import fs from 'fs';
import AWS from 'aws-sdk';
import delay from "delay";
const s3 = new AWS.S3({
accessKeyId: "***",
secretAccessKey: "***",
region: "***"
});
const uploadFullfile = () =>{
// reg ex to match
var re = /\.zip$/;
// ensure that this file is in the directory of the files you want to run the cronjob on
fs.readdir("/tmp/",function(err: any, files: any) {
if (err) {
console.log( "Could not list the directory.", err);
process.exit( 1 )
}
var matches = files.filter( function(text: any) { return re.test(text) } )
var numFiles = matches.length;
if ( numFiles ) {
// Read in the file, convert it to base64, store to S3
for(let i = 0; i < numFiles; i++ ) {
uploadCandidate(matches[i])
}
}
})
const uploadCandidate = (fileName:any) => {
fs.readFile('/tmp/'+fileName, async(err:any, data:any) => {
console.log("entry",fileName);
if (err) throw err;
console.log("params")
await s3.putObject({
Bucket: 'testbucket', // pass your bucket name
Key: fileName, // file will be saved as testBucket/contacts.csv
ContentType: 'application/zip',
Body: data,
},function (resp) {
console.log('Done');
});
//delay(1000);
//fs.unlink('/tmp/'+fileName, function(){
// console.log("deleting file");
// console.log('/tmp/'+fileName);
//});
});
}
}
export default uploadFullfile;
I am not getting any error and I have give permission to access s3 bucket
output I am getting
2021-01-14T17:22:38.354+05:30 2021-01-14T11:52:38.354Z *** INFO entry state_fullfile_2021-01-14-11:01:03_part0.zip
2021-01-14T17:22:38.354+05:30 2021-01-14T11:52:38.354Z *** INFO params
2021-01-14T17:22:38.375+05:30 2021-01-14T11:52:38.374Z *** INFO entry association_fullfile_2021-01-14-11:01:03_part5.zip
2021-01-14T17:22:38.375+05:30 2021-01-14T11:52:38.375Z *** INFO params
2021-01-14T17:22:38.378+05:30 2021-01-14T11:52:38.378Z *** INFO entry remark_table_fullfile_2021-01-14-11:01:03_part1.zip
2021-01-14T17:22:38.378+05:30 2021-01-14T11:52:38.378Z **** INFO params
2021-01-14T17:22:38.394+05:30 END RequestId: ****
2021-01-14T17:22:38.394+05:30 REPORT RequestId: *** Duration: 83.91 ms Billed Duration: 84 ms Memory Size: 1024 MB Max Memory Used: 322 MB
did you tried increasing the lambda function time and tried?
This Problem occurs due to permission issue of VPC endpoint. Here is the solution
new-vpc-endpoint-for-amazon-s3

Unable to upload multiple images to AWS S3 if I don't first upload one image through a AWS NodeJS Lambda endpoint using Promises

I have the code below on AWS Lambda as an endpoint exposed through API Gateway. The point of this endpoint is to upload images to an S3 bucket. I've been experiencing an interesting bug and could use some help. This code is unable to upload multiple images to S3 if it does not first upload one image. I've listed the scenarios below. The reason I want to use Promises is because I intend to insert data into a mysql table in the same endpoint. Any advice or feedback will be greatly appreciated!
Code Successfully uploads multiple images:
Pass one image to the endpoint to upload to S3 first
Pass several images to the endpoint to upload to S3 after uploading one image first
Code fails to upload images:
Pass several images to the endpoint to upload to s3 first. A random amount of images might be uploaded, but it consistently fails to upload all of them. A 502 error code is returned because it failed to upload all images.
Code
const AWS = require('aws-sdk');
const s3 = new AWS.S3({});
function uploadAllImagesToS3(imageMap) {
console.log('in uploadAllImagesToS3')
return new Promise((resolve, reject) => {
awaitAll(imageMap, uploadToS3)
.then(results => {
console.log('awaitAllFinished. results: ' + results)
resolve(results)
})
.catch(e => {
console.log("awaitAllFinished error: " + e)
reject(e)
})
})
}
function awaitAll(imageMap, asyncFn) {
const promises = [];
imageMap.forEach((value, key) => {
promises.push(asyncFn(key, value));
})
console.log('promises length: ' + promises.length)
return Promise.all(promises)
}
function uploadToS3(key, value) {
return new Promise((resolve, reject) => {
console.log('Promise uploadToS3 | key: ' + key)
// [key, value] = [filePath, Image]
var params = {
"Body": value,
"Bucket": "userpicturebucket",
"Key": key
};
s3.upload(params, function (err, data) {
console.log('uploadToS3. s3.upload. data: ' + JSON.stringify(data))
if (err) {
console.log('error when uploading to s3 | error: ' + err)
reject(JSON.stringify(["Error when uploading data to S3", err]))
} else {
let response = {
"statusCode": 200,
"headers": {
"Access-Control-Allow-Origin": "http://localhost:3000"
},
"body": JSON.stringify(data),
"isBase64Encoded": false
};
resolve(JSON.stringify(["Successfully Uploaded data to S3", response]))
}
});
})
}
exports.handler = (event, context, callback) => {
if (event !== undefined) {
let jsonObject = JSON.parse(event.body)
let pictures = jsonObject.pictures
let location = jsonObject.pictureLocation
let imageMap = new Map()
for (let i = 0; i < pictures.length; i++) {
let base64Image = pictures[i].split('base64,', 2)
let decodedImage = Buffer.from(base64Image[1], 'base64'); // image string is after 'base64'
let base64Metadata = base64Image[0].split(';', 3) // data:image/jpeg,name=coffee.jpg,
let imageNameData = base64Metadata[1].split('=', 2)
let imageName = imageNameData[1]
var filePath = "test/" + imageName
imageMap.set(filePath, decodedImage)
}
const promises = [uploadAllImagesToS3(imageMap)]
Promise.all(promises)
.then(([uploadS3Response]) => {
console.log('return promise!! | uploadS3Response: ' + JSON.stringify([uploadS3Response]))
let res = {
body: JSON.stringify(uploadS3Response),
headers: {
"Access-Control-Allow-Origin": "http://localhost:3000"
}
};
callback(null, res);
})
.catch((err) => {
callback(err);
});
} else {
callback("No pictures were uploaded")
}
};
Reason for problem and solution :
After several hours of debugging this issue I realized what the error was! My Lambda endpoint was timing out early. The reason I was able to upload multiple images after first uploading one image was because my the lambda endpoint was being executed from a warm start - as it was already up and running. The scenario where I was unable to upload multiple images was actually only occurring when I would try to do so after not executing the endpoint in 10+ minutes - therefore a cold start. Therefore, the solution was to increase the Timeout from the default of 3 seconds. I increased it to 20 seconds, but might need to play around with that time.
How to increase the lambda timeout?
Open Lambda function
Scroll down to Basic Settings and select Edit
Increase time in Timeout
TLDR
This error was occurring because Lambda would timeout. Solution is to increase lambda timeout.

JSON files does not contain all the results in AWS Lambda using NodeJS

I'm currently working on a project using AWS S3, Rekognition and Lambda. I'm writing in NodeJS and created a working solution to what I want to achieve. The workflow in short is: an image of a face is loaded onto a S3 bucket, then the 'searchFacesByImage' API is called to see if that face has been indexed to the Master collection in the past. If it is a new face, the result will be false, and the 'indexFaces' API is called to index that face to the Master collection. Once that is done, I write the output to 3 separate JSON files that is in the same S3 bucket, called: 'metadata.json', 'indexing.json', 'rekognition.json'.
The 'metadata.json' file only contains the ExternalImageID (that I create myself), the date and time of indexing, the filename that was indexed, and a count that counts how many times that face has been indexed in the past.
The 'indexing.json' file contains the same ExternalImageID, the same data and time of indexing, and the response from the 'searchFacesByImage' API.
The 'rekognition.json' file contains the same ExternalImageID and date and time, as well as the response from the 'indexFaces' API.
The problem comes in that when I load on image at a time, the 3 JSON files will start to populate accordingly, but as soon as I load more than a few (I've tested it with 7) images at the same time, all 7 images will run through the workflow and the response data is written out to each file according to the Cloudwatch logs, but when I actually go to view the JSON files, not all the response data is there for all 7 images. Sometimes the data of 5 images are in the JSON, other times its 4 images. The data doesn't have to be in any specific order, it must just be there. I've also tested it where I uploaded 18 images at once and only the response of 10 images was in the JSON.
I believe the problem comes in that I'm calling the 'getObject' API on the JSON files, then I append the response data to those files, and then I'm calling the 'putObject' API on those JSON files to put them back into the S3 bucket, but while the first image is going through this process, the next image wants to do the same, but there is no file to use the 'getObject' on, because it is busy with the previous image, so then it just skips over the image, although the Cloudwatch logs said I has been added to the files.
I have no idea how to work around this. I believe the answer lies in Asynchronous JavaScript (which I don't know that much of so I have no idea where to begin)
My apologies for the long post. Here is my code below:
const AWS = require('aws-sdk');
const s3 = new AWS.S3({apiVersion: "2006-03-01"});
const rekognition = new AWS.Rekognition();
//const docClient = new AWS.DynamoDB.DocumentClient();
const uuidv4 = require('uuid/v4');
let bucket, key;
let dataSaveDate = new Date();
console.log('Loading function');
//-----------------------------------Exports Function---------------------------
exports.handler = function(event, context) {
bucket = event.Records[0].s3.bucket.name;
key = event.Records[0].s3.object.key;
console.log(bucket);
console.log(key);
searchingFacesByImage(bucket, key);
};
//---------------------------------------------------------------------------
// Search for a face in an input image
function searchingFacesByImage(bucket, key) {
let params = {
CollectionId: "allFaces",
FaceMatchThreshold: 95,
Image: {
S3Object: {
Bucket: bucket,
Name: key
}
},
MaxFaces: 5
};
const searchingFace = rekognition.searchFacesByImage(params, function(err, searchdata) {
if (err) {
console.log(err, err.stack); // an error occurred
} else {
// console.log(JSON.stringify(searchdata, null, '\t'));
// if data.FaceMatches > 0 : There that face in the image exists in the collection
if (searchdata.FaceMatches.length > 0) {
console.log("Face is a match");
} else {
console.log("Face is not a match");
let mapping_id = createRandomId();
console.log(`Created mapping_id: ${mapping_id}`);
console.log("Start indexing face to 'allFaces'");
indexToAllFaces(mapping_id, searchdata, bucket, key);
}
}
});
return searchingFace;
}
//---------------------------------------------------------------------------
// If face is not a match in 'allFaces', index face to 'allFaces' using mapping_id
function indexToAllFaces(mapping_id, searchData, bucket, key) {
let params = {
CollectionId: "allFaces",
DetectionAttributes: ['ALL'],
ExternalImageId: mapping_id,
Image: {
S3Object: {
Bucket: bucket,
Name: key
}
}
};
const indexFace = rekognition.indexFaces(params, function(err, data) {
if (err) {
console.log(err, err.stack); // an error occurred
} else {
console.log("INDEXING TO 'allFaces'");
//console.log(JSON.stringify(data, null, '\t'));
logAllData(mapping_id, bucket, key, searchData, data);
}
});
return indexFace;
}
//---------------------------------------------------------------------------
// Counting how many times a face has been indexed and logging ALL data in a single log
function logAllData(mapping_id, bucket, key, searchData, data) {
let params = {
CollectionId: mapping_id,
MaxResults: 20
};
const faceDetails = rekognition.listFaces(params, function(err, facedata) {
if (err) {
console.log(err, err.stack); // an error occurred
} else {
//console.log(JSON.stringify(facedata, null, '\t'));
metadata(mapping_id, bucket, key, facedata);
indexing(mapping_id, bucket, searchData);
rekognitionData(mapping_id, bucket, data);
}
});
return faceDetails;
}
//-----------------------------------------------------------------------------
function metadata(mapping_id, bucket, key, faceData) {
let body = [
{
"mapping_id": mapping_id,
"time": dataSaveDate,
"image_name": key,
"indexing_count": faceData.Faces.length - 1
}
];
//console.log(JSON.stringify(body, null, '\t'));
logData("metadata.json", bucket, body);
}
//------------------------------------------------------------------------------
function indexing(mapping_id, bucket, searchData) {
let body = [
{
"mapping_id": mapping_id,
"time": dataSaveDate,
"IndexingData": searchData
}
];
logData("indexing.json", bucket, body);
}
//------------------------------------------------------------------------------
function rekognitionData(mapping_id, bucket, data) {
let body = [
{
"mapping_id": mapping_id,
"time": dataSaveDate,
"rekognition": data
}
];
logData("rekognition.json", bucket, body);
}
//------------------------------------------------------------------------------
// Function to log all data to JSON files
function logData(jsonFileName, bucket, body) {
let params = {
Bucket: bucket,
Key: jsonFileName
};
const readFile = s3.getObject(params, function(err, filedata) {
if (err) {
console.log(err, err.stack); // an error occurred
} else {
console.log(`READING ${jsonFileName} CONTENTS`);
// Read data from 'jsonFileName'
let raw_content = filedata.Body.toString();
let content = JSON.parse(raw_content);
// Add new data to 'jsonFileName'
content.push(...body);
// Put new data back into jsonFileName
s3.putObject(
{
Bucket: bucket,
Key: jsonFileName,
Body: JSON.stringify(content, null, '\t'),
ContentType: "application/json"
},
function(err, res) {
if (err) {
console.log(err);
} else {
console.log(`DATA SAVED TO ${jsonFileName}`);
}
}
);
}
});
return readFile;
}
//----------------------------------SCRIPT ENDS---------------------------------
When a Node.js Lambda reaches the end of the main thread, it ends all other threads.
To make sure that the lambda does not prematurely terminate those threads, wait until that Promise is complete by using await.
The functions s3.getObject and s3.putObject can be made into a Promise like this:
await s3.getObject(params).promise()
await s3.putObject(params).promise()

Resources