AWS lambda function delete files from S3 folder - node.js

I import some data from Funnel to S3 bucket. After that, Lambda function copy data to table in Redshift and I tried to delete all copied object from bucket folder but I keep getting timeout.
This is my code:
const Promise = require('bluebird');
const {Pool} = require('pg');
const AWS = require('aws-sdk');
async function emptyS3Directory(bucket, dir) {
const listParams = {
Bucket: bucket,
Prefix: dir
};
var s3 = new AWS.S3();
s3.listObjectsV2(listParams, function(err, data) // Here I always getting timeout{
});
.....
}
EDIT....
This is code of the function.
async function DeleteAllDataFromDir(bucket, dir) {
const listParams = {
Bucket: bucket,
Prefix: dir
};
var s3 = new AWS.S3();
const listedObjects = await s3.listObjects(listParams).promise();
console.log("reponse", listedObjects);
if (listedObjects.Contents.length === 0) return;
const deleteParams = {
Bucket: bucket,
Delete: { Objects: [] }
};
listedObjects.Contents.forEach(({ Key }) => {
deleteParams.Delete.Objects.push({ Key });
});
await s3.deleteObjects(deleteParams).promise();
if (listedObjects.IsTruncated) await DeleteAllDataFromDir(bucket, dir);
}
The first time I set the time out to 2 minutes, then I changed it to 10 minutes and I get the same error::
{
"errorType": "NetworkingError",
"errorMessage": "connect ETIMEDOUT IP:port",
"code": "NetworkingError",
"message": "connect ETIMEDOUT IP:port",
"errno": "ETIMEDOUT",
"syscall": "connect",
"address": "IP",
"port": port,
"region": "eu-west-2",
"hostname": "hostName",
"retryable": true,
"time": "2020-12-10T08:36:29.984Z",
"stack": [
"Error: connect ETIMEDOUT 52.95.148.74:443",
" at TCPConnectWrap.afterConnect [as oncomplete] (net.js:1107:14)"
]
}

It appears that your bucket may reside in a different region than your lambda function based on the nature of the error.
Provide the region hash as an option when constructing your S3 client.
var s3 = new AWS.S3({region: 'bucket-region-hash'});
To figure the region hash, go to S3 Management Console.
Then from the sidebar, click "Buckets". In the resulting view, you'll find the region hash. It's the one marked in gold as shown in the picture below.

Related

How to resolve ENOENT: no such file or directory, open '/index.js' issue occurred in Lambda Function?

I have an API written in node.js which was working fine with async calls then suddenly throwing below error after a recent code deployment.
2022-08-01T12:11:41.332Z d759bb19-f1d3-4fd7-8c5f-852fb41afe6a ERROR Unhandled Promise Rejection
{
"errorType": "Runtime.UnhandledPromiseRejection",
"errorMessage": "Error: ENOENT: no such file or directory, open '/index.js'",
"reason": {
"errorType": "Error",
"errorMessage": "ENOENT: no such file or directory, open '/index.js'",
"code": "ENOENT",
"errno": -2,
"syscall": "open",
"path": "/index.js",
"stack": [
"Error: ENOENT: no such file or directory, open '/index.js'"
]
},
"promise": {},
"stack": [
"Runtime.UnhandledPromiseRejection: Error: ENOENT: no such file or directory, open '/index.js'",
" at process.<anonymous> (/var/runtime/index.js:35:15)",
" at process.emit (events.js:314:20)",
" at process.emit (/var/task/src/api/ecmAuthApi/webpack:/Project/node_modules/source-map-support/source-map-support.js:516:1)",
" at processPromiseRejections (internal/process/promises.js:209:33)",
" at processTicksAndRejections (internal/process/task_queues.js:98:32)"
]
}
And below is my relevant source code
// service.js
const getUserPermissions = async (token, correlationId) => {
const testUserBucketParams = {
Bucket: authConstants.META_DATA_TEMPLATE_S3_BUCKET_NAME,
Key: authConstants.TEST_USERS,
};
try {
const testUsersFile = await s3Connector
.getData(testUserBucketParams, { awsRequestId: correlationId });
const testUsersMapping = getJson(testUsersFile);
const payloadData = await Promise.resolve(
validateToken(
token, authConstants.LIBERTY_USER_POOL_JWK_URL, testUsersMapping, correlationId,
),
);
return await Promise.resolve(getDataFromDB(payloadData, correlationId));
} catch (error) {
return 1;
}
and the Util function
// utils
import AWS from 'aws-sdk';
import { validateS3Fetch } from '../util/commonUtil';
const s3 = new AWS.S3();
const getData = async (params, context) => {
const file = await s3.getObject(params).promise(); // the error occurs here
validateS3Fetch(file, params, context);
return file.Body.toString('utf-8');
};
export default { getData };
FYI, this implementation was working perfect until i redeploy the code again today. And there were no changes in package.json too. But it works fine locally.
What am I missing here? Is it an issue with AWS lambda?
There is nothing wrong in the implementation. But my code was attached with datadog and it's API key was expired. After updating the key, it's working fine both locally and remotely :)

MethodNotAllowed: The specified method is not allowed against this resource error in nodejs Aws S3

I just create one lambda function and configure this lambda function with S3 Object Lambda Access Points. When i call this lambda function with S3 Object Lambda Access Points Then It's return following Error.
ERROR:
{
"errorType": "MethodNotAllowed",
"errorMessage": "The specified method is not allowed against this resource.",
"code": "MethodNotAllowed",
"message": "The specified method is not allowed against this resource.",
"region": null,
"time": "2021-03-25T06:53:27.593Z",
"requestId": "2PSZNM1V2A5H5YA5",
"extendedRequestId": "cWn4JmcBTbRyrl+IqCakNYeu4Zeca9/l+jdUwlWgVZRZ8H5NPnCCixK3gweUe1logjU9QpSYgK4=",
"statusCode": 405,
"retryable": false,
"retryDelay": 27.356515607009,
"stack": [
"MethodNotAllowed: The specified method is not allowed against this resource.",
" at Request.extractError (/var/task/node_modules/aws-sdk/lib/services/s3.js:712:35)",
" at Request.callListeners (/var/task/node_modules/aws-sdk/lib/sequential_executor.js:106:20)",
" at Request.emit (/var/task/node_modules/aws-sdk/lib/sequential_executor.js:78:10)",
" at Request.emit (/var/task/node_modules/aws-sdk/lib/request.js:688:14)",
" at Request.transition (/var/task/node_modules/aws-sdk/lib/request.js:22:10)",
" at AcceptorStateMachine.runTo (/var/task/node_modules/aws-sdk/lib/state_machine.js:14:12)",
" at /var/task/node_modules/aws-sdk/lib/state_machine.js:26:10",
" at Request.<anonymous> (/var/task/node_modules/aws-sdk/lib/request.js:38:9)",
" at Request.<anonymous> (/var/task/node_modules/aws-sdk/lib/request.js:690:12)",
" at Request.callListeners (/var/task/node_modules/aws-sdk/lib/sequential_executor.js:116:18)"
]
}
I tried to convert this s3 python example in nodejs. Read More ===> Introducing Amazon S3 Object Lambda – Use Your Code to Process Data as It Is Being Retrieved from S3
Lambda Function:
const AWS = require('aws-sdk');
const s3 = new AWS.S3();
exports.handler = async (event) => {
const object_get_context = event["getObjectContext"];
const request_route = object_get_context["outputRoute"];
const request_token = object_get_context["outputToken"];
var params = {
RequestRoute: request_route,
RequestToken: request_token,
Body: 'TEST TEXT',
};
const data = await s3.writeGetObjectResponse(params).promise();
console.log('before return data --->', data);
return {'status_code': 200};
}
Call Object Lambda Access Points With This Code:
const AWS = require('aws-sdk');
const s3 = new AWS.S3();
var getParams = {
Bucket: 'arn:aws:s3-object-lambda:xxxxxxxxxxx/xxxxxxxxxxxxxxxxxxx', // Object Lambda Access Points ARN,
Key: 'test.txt' // path to the object you're looking for
}
s3.getObject(getParams, function(err, data) {
// Handle any error and exit
if (err){
console.log('err ====>', err);
return err;
}
let objectData = data.Body.toString('utf-8'); // Use the encoding necessary
console.log('objectData ===>', objectData);
});

s3 client.uploadFile() hangs with no error

My credentials work successfully with AWS cli to upload s3 files, the credentials are associated with an admin user on a bucket which is a static site. Update: Download works as expected. For some reason the node 's3' package fails, but no error is displayed, it just logs:
progress 0 4561 4561
progress 0 4561 4561
...
unable to upload: RequestTimeout: Your socket connection to the server was not read from or written to within the timeout period. Idle connections will be closed.
at Request.extractError (/Users/../node_modules/aws-sdk/lib/services/s3.js:700:35)
at Request.callListeners (/Users/../node_modules/aws-sdk/lib/sequential_executor.js:106:20)
at Request.emit (/Users/../node_modules/aws-sdk/lib/sequential_executor.js:78:10)
at Request.emit (/Users/../node_modules/aws-sdk/lib/request.js:688:14)
And I'm not sure what the problem would be? Any insights? The code is below, it's taken directly from the s3 package https://www.npmjs.com/package/s3
// RUNNING NODE 11.0 (due to un-updated dependencies in s3 or aws-sdk it must be under 11.15)
const { accessKeyId, secretAccessKey } = require('./tools/AWS.json')
const s3 = require('s3')
const AWS = require('aws-sdk')
const awsS3Client = new AWS.S3({
accessKeyId,
secretAccessKey,
signatureVersion: 'v4',
region: 'us-east-2',
})
const client = s3.createClient({
s3Client: awsS3Client,
})
const liveBucket = 'my-bucket'
const params = {
localFile: 'out/404.html',
s3Params: {
Bucket: liveBucket,
Key: '404.html',
},
}
const uploader = client.uploadFile(params)
uploader.on('error', function(err) {
// never runs ...
console.error('unable to upload:', err.stack)
})
uploader.on('progress', function() {
// runs like 5 times before failing after a long wait
console.log('progress', uploader.progressMd5Amount, uploader.progressAmount, uploader.progressTotal)
})
uploader.on('end', function() {
// never runs ...
console.log('done uploading')
})
Permissions:
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "PublicReadGetObject",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::my-bucket/*"
}
]
}
It looks like your upload hangs because of a bucket permissions issue. Please try to add the following permissions to your bucket:
s3:PutObject
s3:PutObjectTagging
s3:ListBucketMultipartUploads
s3:ListMultipartUploadParts

Problem with aws-sdk while creating S3 Buckets nodejs

I am currently using aws-sdk in order to list and create buckets.
My code is as follows
var AWS = require('aws-sdk');
const router = require('express').Router();
require('dotenv').config();
AWS.config.logger = console;
// AWS.config.update({region: process.env.AWS_REGION}); // AWS bucket region
s3 = new AWS.S3({
// apiVersion: '2006-03-01',
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
region: process.env.AWS_REGION
}); // s3 svc object
// Accepts Bucket Name, Bucket Created User/ Belonging Organization
router.post('/create', (req, res, next)=>{
var bucketParams = {
Bucket: req.body.bucket
}
console.log(process.env.AWS_ACCESS_KEY_ID);
s3.createBucket(bucketParams, (err, data)=>{
console.log("sample");
if (err) {
console.log("Error", err);
} else {
console.log("Success", data.Location);
}
})
});
router.get('/', (req, res, next)=>{
s3.listBuckets((err, data)=>{
if(err){
}else{
}
})
});
The AWS IAM user policy that I use for these are as follows.
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": "*"
}
]
}
The problem is, whenever I submit a http request, the request doesn't get terminated or return a valid response.
For an example.
I get the followings for creating a bucket named sample-1
[AWS s3 409 1.052s 0 retries] createBucket({
Bucket: 'sample1',
CreateBucketConfiguration: { LocationConstraint: 'eu-west-1' }
})
sample
Error BucketAlreadyExists: The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.
at Request.extractError (/home/caesar/Workspace/res-s3/res-s3-backend/node_modules/aws-sdk/lib/services/s3.js:837:35)
at Request.callListeners (/home/caesar/Workspace/res-s3/res-s3-backend/node_modules/aws-sdk/lib/sequential_executor.js:106:20)
at Request.emit (/home/caesar/Workspace/res-s3/res-s3-backend/node_modules/aws-sdk/lib/sequential_executor.js:78:10)
at Request.emit (/home/caesar/Workspace/res-s3/res-s3-backend/node_modules/aws-sdk/lib/request.js:688:14)
at Request.transition (/home/caesar/Workspace/res-s3/res-s3-backend/node_modules/aws-sdk/lib/request.js:22:10)
at AcceptorStateMachine.runTo (/home/caesar/Workspace/res-s3/res-s3-backend/node_modules/aws-sdk/lib/state_machine.js:14:12)
at /home/caesar/Workspace/res-s3/res-s3-backend/node_modules/aws-sdk/lib/state_machine.js:26:10
at Request.<anonymous> (/home/caesar/Workspace/res-s3/res-s3-backend/node_modules/aws-sdk/lib/request.js:38:9)
at Request.<anonymous> (/home/caesar/Workspace/res-s3/res-s3-backend/node_modules/aws-sdk/lib/request.js:690:12)
at Request.callListeners (/home/caesar/Workspace/res-s3/res-s3-backend/node_modules/aws-sdk/lib/sequential_executor.js:116:18) {
code: 'BucketAlreadyExists',
region: 'us-east-1',
time: 2020-08-18T15:24:22.974Z,
requestId: '3C1F8277A6CAD712',
extendedRequestId: 'bfRp36yH8Gh64zjM5VrUqCJi0V1AY5Sc5Snpf5yROPyV0HHgWTtE7gIEz70HRHb2JoOcO6jfLvQ=',
cfId: undefined,
statusCode: 409,
retryable: false,
retryDelay: 5.386495440613426
}
The strangest thing is that there is no bucket with the same name and the region which I used in this case had been eu-west-1. Anyone have any clue on why this happens and why the requests are not getting terminated.
Bucket names are global to AWS. see below
Amazon S3 bucket names must be unique globally. If you get the "Bucket name already exists" or "BucketAlreadyExists" error, then you must use a different bucket name to create the bucket. These error messages indicate that another AWS account owns a bucket with the same name.
You should try using a unique name.

Not able to connect to AWS through aws-sdk node js

I am new to AWS SDK and trying to follow the documents there. https://aws.amazon.com/developers/getting-started/nodejs/ I am trying to create a bucket and upload some text file there. But I am getting the below mentioned error.
{ Error: connect ETIMEDOUT 52.216.206.51:443
at TCPConnectWrap.afterConnect [as oncomplete] (net.js:1104:14)
message: 'connect ETIMEDOUT 52.216.206.51:443',
errno: 'ETIMEDOUT',
code: 'NetworkingError',
syscall: 'connect',
address: '52.216.206.51',
port: 443,
region: 'us-east-1',
hostname:
'node-sdk-sample-c58bc12a-70be-4578-9c18-1c815962653b.s3.amazonaws.com',
retryable: true,
time: 20
I have tried giving credential in credential file under userid/.aws/credentials
[default]
aws_access_key_id = my cred...
aws_secret_access_key = my cred....
Also I have tried giving the credential in the code itself
AWS.config.update({accessKeyId: 'my cred', secretAccessKey: 'my secret key', region: 'us-east-1'});
`// Load the SDK and UUID
var AWS = require('aws-sdk');
var uuid = require('node-uuid');
AWS.config.update({accessKeyId: '......', secretAccessKey: '.....', region: 'us-east-1'});
var s3 = new AWS.S3();
var bucketName = 'node-sdk-sample-' + uuid.v4();
var keyName = 'hello_world.txt';
s3.createBucket({Bucket: bucketName}, function() {
var params = {Bucket: bucketName, Key: keyName, Body: 'Hello World!'};
s3.putObject(params, function(err, data) {
if (err)
console.log(err)
else
console.log("Successfully uploaded data to " + bucketName + "/" + keyName);
});
});`
I am able to resolve this issue. The issue was related to proxy. I was trying to access this from behind a firewall. After configuring the proxy setting, I am able to connect to AWS.
var proxy = require('proxy-agent');
AWS.config.update({ httpOptions: { agent: proxy('ur proxy url') } });
const AWS = require('aws-sdk'); //require aws sdk library
var params = {
Bucket: awsBucketName,
Key: path + fileName,
Body: value.data
};
//s3 upload start
s3bucket.upload(params, (err, data) => {
if (err) {
return reject(err);
}
return resolve(data.key);
});
//get signed url for downloading the uploaded file
const url = await s3bucket.getSignedUrl('getObject', {
Bucket: awsBucketName,
Key: key,
Expires: 60*3
});

Resources