Lambda function unable to create file on S3 - node.js

I have written the following code to be used in AWS Lambda function. The objective is to
Fetch data from MSSQL DB
Create a file from this data in S3 bucket.
Create a file on FTP Server.
This whole thing is working just fine but as soon as I push this code to AWS Lambda, it fails. It is able to fetch the data but not able to create file on S3. Here is the complete code:
"use strict";
var express = require('express');
var app = express();
var config = require('./config/dev');
var sql = require("mssql");
var AWS = require('aws-sdk');
var PromiseFtp = require('promise-ftp');
var fs = require('fs');
exports.handler = (event, context, callback) => {
CreateFileOnS3("This is a sample text file created by lambda function", event, context, callback);
};
function CreateFileOnS3(fileData, event, context, callback) {
const fileName = generateFileName();
console.log('Sending file to S3...');
const s3 = new AWS.S3(config.awsCredentials);
const params = {
Bucket: config.app.s3Bucket,
Key: fileName,
Body: fileData
};
s3.upload(params, function (s3Err, data) {
if (s3Err) {
console.log('There was an error creating file on S3');
callback(true, 'There was an error creating file on S3');
}
else {
console.log(`File uploaded successfully at ${data.Location}`);
callback(null, 'File uploaded successfully at ${data.Location}');
}
});
}
function generateFileName() {
var _d = new Date(),
y = _d.getFullYear(),
m = _d.getMonth() + 1,
d = _d.getDate();
return y + '-' + (m < 10 ? '0' + m : m) + '-' + (d < 10 ? '0' + d : d) + '.txt';
}
Here is what the CloudWatch log says:
10:38:51
START RequestId: 0965c2ef-94a2-439e-b61b-83f17b5ad3d3 Version: $LATEST
10:38:51
2019-04-22T10:38:51.737Z 0965c2ef-94a2-439e-b61b-83f17b5ad3d3
Fetching data from database...
10:38:53
2019-04-22T10:38:53.364Z 0965c2ef-94a2-439e-b61b-83f17b5ad3d3
Sending file to S3...
10:39:21
END RequestId: 0965c2ef-94a2-439e-b61b-83f17b5ad3d3
10:39:21
REPORT RequestId: 0965c2ef-94a2-439e-b61b-83f17b5ad3d3 Duration: 30030.33 ms Billed Duration: 30000 ms Memory Size: 512 MB Max Memory Used: 95 MB
10:39:21
2019-04-22T10:39:21.760Z 0965c2ef-94a2-439e-b61b-83f17b5ad3d3 Task timed out after 30.03 seconds
Memory: 2048MB Timeout: 60Sec
EDIT: Updated code only sends the string to be created as text file on S3 which is also not working.

Check the timeout that's set on the lambda script. Increase it to 2 minutes.

Related

Corrupt or damaged file when uploading in s3 aws with nodejs

When I upload a file in local environment, I have no problem with s3 and I download the file in aws without problem, but I upload the file in development environment in aws, the file is corrupted or does not open or opens empty, the parameters are the same in both cases
module.exports.handler = async event => {
const body = multipart.parse(event, true)
const { loan_id, document } = body
const lang = event.headers?.Lang ? event.headers.Lang : event.headers?.lang
try {
const date = new Date()
const stringDate = `${date.getFullYear()}${
date.getMonth() + 1 < 10 ? `0${date.getMonth() + 1}` : date.getMonth() + 1
}${date.getDay() < 10 ? `0${date.getDay()}` : date.getDay()}-${date.getHours()}${date.getMinutes()}`
// Upload the file to S3
const fullFileName = `${document.type}/${loan_id}-${stringDate}-${document.filename}`
try {
console.log('Uploading the file in S3 with key', fullFileName)
//console.log(buf);
var data = {
Bucket: DOCUMENTS_BUCKET_NAME,
Key: fullFileName,
Body: document.content,
ContentType: document.ContentType
}
await s3
.putObject(data)
.promise()
} catch (err) {
console.error(err)
return serverlessResponse(HTTP_CODES.BAD_REQUEST, 'The file cannot be uploaded', err)
}
}
headers postman
Body form data postman

Try to upload file from lambda tmp folder to s3 bucket

I am new to lambda function and I am trying to upload file to s3 bucket from lambda function.
creating multiple zip file to tmp folder and after that I want to upload that zip file to s3 bucket, I have given permission to access s3 bucket to lambda function its not showing any error
I tried different approach to resolve this but not able to fixed it.
import fs from 'fs';
import AWS from 'aws-sdk';
import delay from "delay";
const s3 = new AWS.S3({
accessKeyId: "***",
secretAccessKey: "***",
region: "***"
});
const uploadFullfile = () =>{
// reg ex to match
var re = /\.zip$/;
// ensure that this file is in the directory of the files you want to run the cronjob on
fs.readdir("/tmp/",function(err: any, files: any) {
if (err) {
console.log( "Could not list the directory.", err);
process.exit( 1 )
}
var matches = files.filter( function(text: any) { return re.test(text) } )
var numFiles = matches.length;
if ( numFiles ) {
// Read in the file, convert it to base64, store to S3
for(let i = 0; i < numFiles; i++ ) {
uploadCandidate(matches[i])
}
}
})
const uploadCandidate = (fileName:any) => {
fs.readFile('/tmp/'+fileName, async(err:any, data:any) => {
console.log("entry",fileName);
if (err) throw err;
console.log("params")
await s3.putObject({
Bucket: 'testbucket', // pass your bucket name
Key: fileName, // file will be saved as testBucket/contacts.csv
ContentType: 'application/zip',
Body: data,
},function (resp) {
console.log('Done');
});
//delay(1000);
//fs.unlink('/tmp/'+fileName, function(){
// console.log("deleting file");
// console.log('/tmp/'+fileName);
//});
});
}
}
export default uploadFullfile;
I am not getting any error and I have give permission to access s3 bucket
output I am getting
2021-01-14T17:22:38.354+05:30 2021-01-14T11:52:38.354Z *** INFO entry state_fullfile_2021-01-14-11:01:03_part0.zip
2021-01-14T17:22:38.354+05:30 2021-01-14T11:52:38.354Z *** INFO params
2021-01-14T17:22:38.375+05:30 2021-01-14T11:52:38.374Z *** INFO entry association_fullfile_2021-01-14-11:01:03_part5.zip
2021-01-14T17:22:38.375+05:30 2021-01-14T11:52:38.375Z *** INFO params
2021-01-14T17:22:38.378+05:30 2021-01-14T11:52:38.378Z *** INFO entry remark_table_fullfile_2021-01-14-11:01:03_part1.zip
2021-01-14T17:22:38.378+05:30 2021-01-14T11:52:38.378Z **** INFO params
2021-01-14T17:22:38.394+05:30 END RequestId: ****
2021-01-14T17:22:38.394+05:30 REPORT RequestId: *** Duration: 83.91 ms Billed Duration: 84 ms Memory Size: 1024 MB Max Memory Used: 322 MB
did you tried increasing the lambda function time and tried?
This Problem occurs due to permission issue of VPC endpoint. Here is the solution
new-vpc-endpoint-for-amazon-s3

AWS lambda never completes but doesn't appear to timeout either

I'm attempting to create a simple application. A user emails an attachment to a special inbox, AWS SES gets the email and stores it in S3, a lambda function is triggered and the lambda finds the email in S3, parses out the attachment (in this case a jpg) and then stores it in a different S3 bucket. Finally, the application creates a new row in an Airtable with the image as an attachment.
When I invoked this function locally using serverless, everything works fine. The email with the image are already stored in an S3 bucket so I've created a mock which passes the key explicitly to my lambda. However, when I deploy the application and send a test email, the following happens:
Email is stored in S3 bucket 'to-airtable-temp'
Lambda function is called
Before the email can be found and the attachment striped off and stored in a seperate S3 bucket the function just stops. No error message or timeout. It just stops. Cloudwatch logs look like the following:
START RequestId: 6a0364ae-0879-4ee1-9dcd-c8747de1a650 Version: $LATEST
2020-02-17T07:39:55.465Z 6a0364ae-0879-4ee1-9dcd-c8747de1a650 INFO {
s3SchemaVersion: '1.0',
configurationId: 'emailtoairtable-dev-parse-224c3963d3a3f9c35018ae93a9fffea4',
bucket: {
name: 'to-airtable-temp',
ownerIdentity: { principalId: 'AI5RGHFD5AFHE' },
arn: 'arn:aws:s3:::to-airtable-temp'
},
object: {
key: 'mtvuuiokqj55l2a8b0qser7tn9dhfignoh9c1vg1',
size: 3804230,
eTag: 'c821fb0a2a9c3b060e20e7d177f8b972',
sequencer: '005E4A434810147365'
}
}
2020-02-17T07:39:55.465Z 6a0364ae-0879-4ee1-9dcd-c8747de1a650 INFO key mtvuuiokqj55l2a8b0qser7tn9dhfignoh9c1vg1
2020-02-17T07:39:55.465Z 6a0364ae-0879-4ee1-9dcd-c8747de1a650 INFO Key pushed to list. mtvuuiokqj55l2a8b0qser7tn9dhfignoh9c1vg1
END RequestId: 6a0364ae-0879-4ee1-9dcd-c8747de1a650
REPORT RequestId: 6a0364ae-0879-4ee1-9dcd-c8747de1a650 Duration: 1113.17 ms Billed Duration: 1200 ms Memory Size: 1024 MB Max Memory Used: 114 MB Init Duration: 119.56 ms
Here is my handler.js file:
'use strict';
module.exports.parse = async event => {
try {
const aws = require('aws-sdk');
const s3 = new aws.S3();
const simpleParser = require('mailparser').simpleParser;
const Airtable = require('airtable');
const dateformat = require('dateformat');
var base = new Airtable({ apiKey: process.env.airtableApiKey}).base(process.env.airtableBaseId);
var data = [];
var keys = [];
event["Records"].forEach(async record => {
console.log(record["s3"]);
console.log('key', record["s3"]["object"]["key"]);
keys.push(record["s3"]["object"]["key"]);
console.log('Key pushed to list. ', record["s3"]["object"]["key"]); // <-- this is the last line that I am sure processes because I see it in the CloudWatch logs.
var temp_data = await s3.getObject(
{
Bucket: 'to-airtable-temp',
Key: record["s3"]["object"]["key"]
}).promise();
console.log('temp_data', temp_data);
data.push(temp_data);
});
setTimeout( async function() {
// console.log('data', data[0].Body.toString());
let parsed = await simpleParser(data[0].Body.toString());
console.log(parsed);
// save the file to a public S3 bucket so it can be uploaded to airtable
parsed["attachments"].forEach(function(attachment) {
let now = new Date();
s3.upload({
Bucket: 'to-airtable-images',
Key: keys[0] + dateformat(now, "yyyy-mm-dd") + '.jpg',
Body: attachment.content,
ContentType: "image/jpeg"
},
function(error, data) {
if (error) {
throw error;
}
console.log('File uploaded successfully. ' +data.Location);
// Now upload to airtable
base('Input').create([
{
"fields": {"Attachments": [
{
"url": data.Location
}
]}
}
], function(err, records) {
if (err) {
console.error(err);
return;
}
records.forEach(function (record) {
console.log(record.getId());
});
});
});
});
return {
statusCode: 200,
body: JSON.stringify(
{
message: 'Go Serverless v1.0! Your function executed successfully!',
input: event,
data: JSON.stringify(data),
},
null,
2
),
};
}, 500); // I've tried increasing this time but it still hangs.
} catch (error) {
console.error(error);
}
};
You shouldn't use async/await with the forEach function. Using async/await with a forEach loop. Instead, use the more modern for of syntax:
for (let record of event["Records"]) {
// you can include await calls in this block
}

Jimp not reading from url in lambda trigger at aws

Log from aws cloudwatch
20:42:36
START RequestId: 3b39ddb6-f2f5-4e11-a3d6-59f47f16240b Version: $LATEST
20:42:39
END RequestId: 3b39ddb6-f2f5-4e11-a3d6-59f47f16240b
20:42:39
REPORT RequestId: 3b39ddb6-f2f5-4e11-a3d6-59f47f16240b Duration: 3003.18 ms Billed Duration: 3000 ms Memory Size: 128 MB Max Memory Used: 128 MB Init Duration: 703.55 ms
REPORT RequestId: 3b39ddb6-f2f5-4e11-a3d6-59f47f16240b Duration: 3003.18 ms Billed Duration: 3000 ms Memory Size: 128 MB Max Memory Used: 128 MB Init Duration: 703.55 ms
20:42:39
2020-01-06T15:12:39.657Z 3b39ddb6-f2f5-4e11-a3d6-59f47f16240b Task timed out after 3.00 seconds
2020-01-06T15:12:39.657Z 3b39ddb6-f2f5-4e11-a3d6-59f47f16240b Task timed out after 3.00 seconds
Node.js AWS Lambda code
const aws = require('aws-sdk');
const Jimp = require("jimp");
const uuid = require("uuid/v4");
const s3 = new aws.S3();
//lambda trigger handler for triggering event after object being uploaded into bucket
exports.handler = async (event, context) => {
const key = event.Records[0].s3.object.key; // Uploaded object key
const sanitizedKey = key.replace(/\+/g, ' ');
const keyWithoutExtension = sanitizedKey.replace(/.[^.]+$/, '');
const objectKey = keyWithoutExtension+'_mb.';
//read object using jimp to resize it accordingly
const image = await Jimp.read(prefix+key)
.then((image) => {
console.log( "Before resizing" , image)
return image
.resize(256, 256) // resize
.quality(90) // set JPEG quality
})
.then((image) => {
return uploadToS3(image, objectKey+image.getExtension(), image.getExtension());
})
.catch(err => {
throw err;
})
.finally(() => {
console.info("Function ran successfully")
})
console.log(image);
return image
}
//upload file to s3 after resizing
async function uploadToS3(data, key, ContentType) {
console.log("Inside uploadToS3: ", data, key, ContentType)
const resp = await s3
.putObject({
Bucket: Bucket,
Key: key,
Body: data,
ContentType: ContentType
})}
console.log("Response from S3: ", resp);
return resp
}
Everything looks fine apart from a small change in uploadToS3 method. Be default is works with callback pattern unless you do .promise() at the end. see the updated method
//upload file to s3 after resizing
async function uploadToS3(data, key, ContentType) {
console.log("Inside uploadToS3: ", data, key, ContentType)
const resp = await s3
.putObject({
Bucket: Bucket,
Key: key,
Body: data,
ContentType: ContentType
}).promise();
console.log("Response from S3: ", resp);
return resp
}
Also worth increasing your lambda timeout to somethign else apart from default 3 seconds just to rule out that it is timing out before operation complete.
Hope this helps

Uploading an audio file using Node.js Streams in AWS Lambda

So I'm trying to retrieve an mp3 audio file (approximately 9 MB) from s3, pipe the data to a write stream, and then upload to another destination s3 bucket using a readStream from the /tmp/ file. This is a lambda function that receives an s3 upload event and attempts to write the data from the created object to another bucket.
const fs = require('fs');
const AWS = require('aws-sdk');
const s3 = new AWS.S3();
exports.handler = (event, context, callback) => {
var sourceBucket = event.Records[0].s3.bucket.name;
var sourceKey = event.Records[0].s3.object.key;
var getParams = {
Bucket: sourceBucket,
Key: sourceKey
};
const inputFilename = '/tmp/' + sourceKey;
//writing and reading streams
const writeStream = fs.createWriteStream(inputFilename);
s3.getObject(getParams).createReadStream().pipe(writeStream);
var putParams = {
Body: fs.createReadStream(inputFilename),
Bucket: "example-destination-bucket",
Key: 'transfer-' + sourceKey
};
s3.upload(putParams, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log('logging data' + data); // successful response
});
};
This results in the key successfully being put to the s3 bucket, but the file uploaded is 0 bytes in size. Any idea why this may result in an empty upload?
The file needs to be downloaded, which takes some time, so you need to use the file.on('finish') call like this..
const writeStream = fs.createWriteStream(inputFilename);
s3.getObject(getParams).createReadStream().pipe(writeStream);
writeStream.on('finish', function() {
//upload to S3 code
}
Instead of writing a lambda to copy from one s3 bucket to another, why not set a replication rule on the source s3 bucket? It'll automatically copy over any files that get uploaded, and you can do it cross-account.

Resources