Nodejs adm-zip fails in Lambda with Access Denied Error - node.js

My goal is to read a file inside a zip file from Lambda which is called by S3 Put, here is the code, but it fails Access Denied, even though Lambda has full access to S3:
var AWS = require('aws-sdk');
var fs = require('fs');
var s3 = new AWS.S3();
var AdmZip = require('adm-zip');
exports.handler = function(event, context, callback) {
const bucket = event.Records[0].s3.bucket.name;
const key = event.Records[0].s3.object.key;
var params = {Bucket: 'bucket', Key: 'key'};
console.log('Bucket Name:', bucket);
console.log('Object Name:', key);
var buffer = s3.getObject(params).createReadStream();
var zip = new AdmZip(buffer);
var zipEntries = zip.getEntries();
zipEntries.forEach(function(zipEntry) {
console.log(zipEntry.toString());
console.log('File Name: ',zipEntry.entryName);
if (zipEntry.entryName == "text.txt") {
console.log(zipEntry.data.toString('utf8'));
}
});
};
I am getting the following error:
START RequestId: fc524797-4d28-11e6-a092-5b3299c063a6 Version: $LATEST
2016-07-18T20:48:33.983Z fc524797-4d28-11e6-a092-5b3299c063a6 Bucket Name: myTestBucket123
2016-07-18T20:48:33.984Z fc524797-4d28-11e6-a092-5b3299c063a6 Object Name: test.zip
2016-07-18T20:48:35.145Z fc524797-4d28-11e6-a092-5b3299c063a6 AccessDenied: Access Denied
at Request.extractError (/var/runtime/node_modules/aws-sdk/lib/services/s3.js:524:35)
at Request.callListeners (/var/runtime/node_modules/aws-sdk/lib/sequential_executor.js:105:20)
at Request.emit (/var/runtime/node_modules/aws-sdk/lib/sequential_executor.js:77:10)
at Request.emit (/var/runtime/node_modules/aws-sdk/lib/request.js:615:14)
at Request.transition (/var/runtime/node_modules/aws-sdk/lib/request.js:22:10)
at AcceptorStateMachine.runTo (/var/runtime/node_modules/aws-sdk/lib/state_machine.js:14:12)
at /var/runtime/node_modules/aws-sdk/lib/state_machine.js:26:10
at Request.<anonymous> (/var/runtime/node_modules/aws-sdk/lib/request.js:38:9)
at Request.<anonymous> (/var/runtime/node_modules/aws-sdk/lib/request.js:617:12)
at Request.callListeners (/var/runtime/node_modules/aws-sdk/lib/sequential_executor.js:115:18)
END RequestId: fc524797-4d28-11e6-a092-5b3299c063a6
REPORT RequestId: fc524797-4d28-11e6-a092-5b3299c063a6 Duration: 4318.87 ms Billed Duration: 4400 ms Memory Size: 128 MB Max Memory Used: 24 MB
Process exited before completing request
Here is the policy for the role that Lambda is using:
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Resource": "arn:aws:logs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": [
"arn:aws:s3:::*"
]
}
]
}

Related

MethodNotAllowed: The specified method is not allowed against this resource error in nodejs Aws S3

I just create one lambda function and configure this lambda function with S3 Object Lambda Access Points. When i call this lambda function with S3 Object Lambda Access Points Then It's return following Error.
ERROR:
{
"errorType": "MethodNotAllowed",
"errorMessage": "The specified method is not allowed against this resource.",
"code": "MethodNotAllowed",
"message": "The specified method is not allowed against this resource.",
"region": null,
"time": "2021-03-25T06:53:27.593Z",
"requestId": "2PSZNM1V2A5H5YA5",
"extendedRequestId": "cWn4JmcBTbRyrl+IqCakNYeu4Zeca9/l+jdUwlWgVZRZ8H5NPnCCixK3gweUe1logjU9QpSYgK4=",
"statusCode": 405,
"retryable": false,
"retryDelay": 27.356515607009,
"stack": [
"MethodNotAllowed: The specified method is not allowed against this resource.",
" at Request.extractError (/var/task/node_modules/aws-sdk/lib/services/s3.js:712:35)",
" at Request.callListeners (/var/task/node_modules/aws-sdk/lib/sequential_executor.js:106:20)",
" at Request.emit (/var/task/node_modules/aws-sdk/lib/sequential_executor.js:78:10)",
" at Request.emit (/var/task/node_modules/aws-sdk/lib/request.js:688:14)",
" at Request.transition (/var/task/node_modules/aws-sdk/lib/request.js:22:10)",
" at AcceptorStateMachine.runTo (/var/task/node_modules/aws-sdk/lib/state_machine.js:14:12)",
" at /var/task/node_modules/aws-sdk/lib/state_machine.js:26:10",
" at Request.<anonymous> (/var/task/node_modules/aws-sdk/lib/request.js:38:9)",
" at Request.<anonymous> (/var/task/node_modules/aws-sdk/lib/request.js:690:12)",
" at Request.callListeners (/var/task/node_modules/aws-sdk/lib/sequential_executor.js:116:18)"
]
}
I tried to convert this s3 python example in nodejs. Read More ===> Introducing Amazon S3 Object Lambda – Use Your Code to Process Data as It Is Being Retrieved from S3
Lambda Function:
const AWS = require('aws-sdk');
const s3 = new AWS.S3();
exports.handler = async (event) => {
const object_get_context = event["getObjectContext"];
const request_route = object_get_context["outputRoute"];
const request_token = object_get_context["outputToken"];
var params = {
RequestRoute: request_route,
RequestToken: request_token,
Body: 'TEST TEXT',
};
const data = await s3.writeGetObjectResponse(params).promise();
console.log('before return data --->', data);
return {'status_code': 200};
}
Call Object Lambda Access Points With This Code:
const AWS = require('aws-sdk');
const s3 = new AWS.S3();
var getParams = {
Bucket: 'arn:aws:s3-object-lambda:xxxxxxxxxxx/xxxxxxxxxxxxxxxxxxx', // Object Lambda Access Points ARN,
Key: 'test.txt' // path to the object you're looking for
}
s3.getObject(getParams, function(err, data) {
// Handle any error and exit
if (err){
console.log('err ====>', err);
return err;
}
let objectData = data.Body.toString('utf-8'); // Use the encoding necessary
console.log('objectData ===>', objectData);
});

s3 client.uploadFile() hangs with no error

My credentials work successfully with AWS cli to upload s3 files, the credentials are associated with an admin user on a bucket which is a static site. Update: Download works as expected. For some reason the node 's3' package fails, but no error is displayed, it just logs:
progress 0 4561 4561
progress 0 4561 4561
...
unable to upload: RequestTimeout: Your socket connection to the server was not read from or written to within the timeout period. Idle connections will be closed.
at Request.extractError (/Users/../node_modules/aws-sdk/lib/services/s3.js:700:35)
at Request.callListeners (/Users/../node_modules/aws-sdk/lib/sequential_executor.js:106:20)
at Request.emit (/Users/../node_modules/aws-sdk/lib/sequential_executor.js:78:10)
at Request.emit (/Users/../node_modules/aws-sdk/lib/request.js:688:14)
And I'm not sure what the problem would be? Any insights? The code is below, it's taken directly from the s3 package https://www.npmjs.com/package/s3
// RUNNING NODE 11.0 (due to un-updated dependencies in s3 or aws-sdk it must be under 11.15)
const { accessKeyId, secretAccessKey } = require('./tools/AWS.json')
const s3 = require('s3')
const AWS = require('aws-sdk')
const awsS3Client = new AWS.S3({
accessKeyId,
secretAccessKey,
signatureVersion: 'v4',
region: 'us-east-2',
})
const client = s3.createClient({
s3Client: awsS3Client,
})
const liveBucket = 'my-bucket'
const params = {
localFile: 'out/404.html',
s3Params: {
Bucket: liveBucket,
Key: '404.html',
},
}
const uploader = client.uploadFile(params)
uploader.on('error', function(err) {
// never runs ...
console.error('unable to upload:', err.stack)
})
uploader.on('progress', function() {
// runs like 5 times before failing after a long wait
console.log('progress', uploader.progressMd5Amount, uploader.progressAmount, uploader.progressTotal)
})
uploader.on('end', function() {
// never runs ...
console.log('done uploading')
})
Permissions:
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "PublicReadGetObject",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::my-bucket/*"
}
]
}
It looks like your upload hangs because of a bucket permissions issue. Please try to add the following permissions to your bucket:
s3:PutObject
s3:PutObjectTagging
s3:ListBucketMultipartUploads
s3:ListMultipartUploadParts

Problem with aws-sdk while creating S3 Buckets nodejs

I am currently using aws-sdk in order to list and create buckets.
My code is as follows
var AWS = require('aws-sdk');
const router = require('express').Router();
require('dotenv').config();
AWS.config.logger = console;
// AWS.config.update({region: process.env.AWS_REGION}); // AWS bucket region
s3 = new AWS.S3({
// apiVersion: '2006-03-01',
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
region: process.env.AWS_REGION
}); // s3 svc object
// Accepts Bucket Name, Bucket Created User/ Belonging Organization
router.post('/create', (req, res, next)=>{
var bucketParams = {
Bucket: req.body.bucket
}
console.log(process.env.AWS_ACCESS_KEY_ID);
s3.createBucket(bucketParams, (err, data)=>{
console.log("sample");
if (err) {
console.log("Error", err);
} else {
console.log("Success", data.Location);
}
})
});
router.get('/', (req, res, next)=>{
s3.listBuckets((err, data)=>{
if(err){
}else{
}
})
});
The AWS IAM user policy that I use for these are as follows.
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": "*"
}
]
}
The problem is, whenever I submit a http request, the request doesn't get terminated or return a valid response.
For an example.
I get the followings for creating a bucket named sample-1
[AWS s3 409 1.052s 0 retries] createBucket({
Bucket: 'sample1',
CreateBucketConfiguration: { LocationConstraint: 'eu-west-1' }
})
sample
Error BucketAlreadyExists: The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.
at Request.extractError (/home/caesar/Workspace/res-s3/res-s3-backend/node_modules/aws-sdk/lib/services/s3.js:837:35)
at Request.callListeners (/home/caesar/Workspace/res-s3/res-s3-backend/node_modules/aws-sdk/lib/sequential_executor.js:106:20)
at Request.emit (/home/caesar/Workspace/res-s3/res-s3-backend/node_modules/aws-sdk/lib/sequential_executor.js:78:10)
at Request.emit (/home/caesar/Workspace/res-s3/res-s3-backend/node_modules/aws-sdk/lib/request.js:688:14)
at Request.transition (/home/caesar/Workspace/res-s3/res-s3-backend/node_modules/aws-sdk/lib/request.js:22:10)
at AcceptorStateMachine.runTo (/home/caesar/Workspace/res-s3/res-s3-backend/node_modules/aws-sdk/lib/state_machine.js:14:12)
at /home/caesar/Workspace/res-s3/res-s3-backend/node_modules/aws-sdk/lib/state_machine.js:26:10
at Request.<anonymous> (/home/caesar/Workspace/res-s3/res-s3-backend/node_modules/aws-sdk/lib/request.js:38:9)
at Request.<anonymous> (/home/caesar/Workspace/res-s3/res-s3-backend/node_modules/aws-sdk/lib/request.js:690:12)
at Request.callListeners (/home/caesar/Workspace/res-s3/res-s3-backend/node_modules/aws-sdk/lib/sequential_executor.js:116:18) {
code: 'BucketAlreadyExists',
region: 'us-east-1',
time: 2020-08-18T15:24:22.974Z,
requestId: '3C1F8277A6CAD712',
extendedRequestId: 'bfRp36yH8Gh64zjM5VrUqCJi0V1AY5Sc5Snpf5yROPyV0HHgWTtE7gIEz70HRHb2JoOcO6jfLvQ=',
cfId: undefined,
statusCode: 409,
retryable: false,
retryDelay: 5.386495440613426
}
The strangest thing is that there is no bucket with the same name and the region which I used in this case had been eu-west-1. Anyone have any clue on why this happens and why the requests are not getting terminated.
Bucket names are global to AWS. see below
Amazon S3 bucket names must be unique globally. If you get the "Bucket name already exists" or "BucketAlreadyExists" error, then you must use a different bucket name to create the bucket. These error messages indicate that another AWS account owns a bucket with the same name.
You should try using a unique name.

dynamodb TransactWriteItems error: An unknown operation was requested

I'm trying to update multiple items using TransactWriteItems, But I have got the following error:
{
UnknownOperationException: An unknown operation was requested.
at Request.extractError (project-dir\node_modules\aws-sdk\lib\protocol\json.js:51:27)
at Request.callListeners (project-dir\node_modules\aws-sdk\lib\sequential_executor.js:106:20)
at Request.emit (project-dir\node_modules\aws-sdk\lib\sequential_executor.js:78:10)
at Request.emit (project-dir\node_modules\aws-sdk\lib\request.js:683:14)
at Request.transition (project-dir\node_modules\aws-sdk\lib\request.js:22:10)
at AcceptorStateMachine.runTo (project-dir\node_modules\aws-sdk\lib\state_machine.js:14:12)
at project-dir\node_modules\aws-sdk\lib\state_machine.js:26:10
at Request.<anonymous> (project-dir\node_modules\aws-sdk\lib\request.js:38:9)
at Request.<anonymous> (project-dir\node_modules\aws-sdk\lib\request.js:685:12)
at Request.callListeners (project-dir\node_modules\aws-sdk\lib\sequential_executor.js:116:18)
message: 'An unknown operation was requested.',
code: 'UnknownOperationException',
time: 2019-06-21T18:28:46.776Z,
requestId: '',
statusCode: 400,
retryable: false,
retryDelay: 17.98291928629798
}
My Code is given below:
const dynamodb = new aws.DynamoDB({ endpoint: "http://localhost:8000" });
const result = await dynamodb
.transactWriteItems({
TransactItems: [{
"Update":{
"TableName":"dbTable1",
"Key":{
"id": { "S":"table-primary-key-id-01" }
},
"ConditionExpression": "#id = :id",
"UpdateExpression":"set #orderNo = :orderNo",
"ExpressionAttributeNames": {
"#id": "id",
"#orderNo":"orderNo"
},
"ExpressionAttributeValues":{
":id":{"S":"table-primary-key-id-01"},
":orderNo":{"N":"9"}
}
}
}]
})
.promise();
Any help would be very much appreciable. Thanks in advance.
I see you are running the TransactWriteItems operation on a local dynamodb instance. Unfortunately AWS has not implemented support for Transactions API call for dynamodb local instances.

Why is my access denied on s3 (using the aws-sdk for Node.js)?

I'm trying to read an existing file from my s3 bucket, but I keep getting "Access Denied" with no explanation or instructions on what to do about it. Here is the code I am using:
'use strict'
var AWS = require('aws-sdk')
const options = {
apiVersion: '2006-03-01',
params: {
Bucket: process.env['IMAGINATOR_BUCKET']
},
accessKeyId: process.env['IMAGINATOR_AWS_ACCESS_KEY_ID'],
secretAccessKey: process.env['IMAGINATOR_AWS_SECRET_ACCESS_KEY'],
signatureVersion: 'v4'
}
console.log('options', options)
var s3 = new AWS.S3(options)
module.exports = exports = {
get (name, cb) {
const params = {
Key: name + '.json'
}
console.log('get params', params)
return s3.getObject(params, cb)
},
set (name, body, cb) {
const params = {
Key: name + '.json',
Body: body
}
console.log('set params', params)
return s3.putObject(params, cb)
}
}
And this is what I'm getting as output when using the get method and logging the error provided in the callback (with sensitive information censored out):
options { apiVersion: '2006-03-01',
params: { Bucket: CENSORED_BUT_CORRECT },
accessKeyId: CENSORED_BUT_CORRECT,
secretAccessKey: CENSORED_BUT_CORRECT,
signatureVersion: 'v4' }
get params { Key: 'whitelist.json' }
err { [AccessDenied: Access Denied]
message: 'Access Denied',
code: 'AccessDenied',
region: null,
time: Wed Sep 21 2016 11:17:50 GMT-0400 (EDT),
requestId: CENSORED,
extendedRequestId: CENSORED,
cfId: undefined,
statusCode: 403,
retryable: false,
retryDelay: 20.084538962692022 }
/Users/shawn/git/vigour-io/imaginate/node_modules/aws-sdk/lib/request.js:31
throw err;
^
AccessDenied: Access Denied
at Request.extractError (/Users/shawn/git/vigour-io/imaginate/node_modules/aws-sdk/lib/services/s3.js:538:35)
at Request.callListeners (/Users/shawn/git/vigour-io/imaginate/node_modules/aws-sdk/lib/sequential_executor.js:105:20)
at Request.emit (/Users/shawn/git/vigour-io/imaginate/node_modules/aws-sdk/lib/sequential_executor.js:77:10)
at Request.emit (/Users/shawn/git/vigour-io/imaginate/node_modules/aws-sdk/lib/request.js:668:14)
at Request.transition (/Users/shawn/git/vigour-io/imaginate/node_modules/aws-sdk/lib/request.js:22:10)
at AcceptorStateMachine.runTo (/Users/shawn/git/vigour-io/imaginate/node_modules/aws-sdk/lib/state_machine.js:14:12)
at /Users/shawn/git/vigour-io/imaginate/node_modules/aws-sdk/lib/state_machine.js:26:10
at Request.<anonymous> (/Users/shawn/git/vigour-io/imaginate/node_modules/aws-sdk/lib/request.js:38:9)
at Request.<anonymous> (/Users/shawn/git/vigour-io/imaginate/node_modules/aws-sdk/lib/request.js:670:12)
at Request.callListeners (/Users/shawn/git/vigour-io/imaginate/node_modules/aws-sdk/lib/sequential_executor.js:115:18)
Now I'm not sure what to do beacuse I think I'm doing things correctly according to the docs, but it's not working and the error message doesn't say why my access is denied... Any idea what the next step should be to get this working?
The problem was that my new IAM user didn't have a policy attached to it. I assigned it the AmazonS3FullAccess policy and now it works.
As pointed out in the comments, a more restrictive policy would be much safer
This could also happen if you're trying to set ACL to "public-read" but the bucket is blocking public access. For example if you mean to upload static assets to a misconfigured S3 bucket. You can change it in your bucket settings.
FullAccess in your policy is not required. You can try something like this:
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"s3:Put*",
"s3:Get*",
"s3:List*",
"s3:Delete*"
],
"Resource": [
"arn:aws:s3:::bucket/*",
"arn:aws:s3:::bucket"
]
},
{
"Sid": "VisualEditor1",
"Effect": "Allow",
"Action": "s3:ListAllMyBuckets",
"Resource": "*"
}
]
}
These errors can occur when the object which you are trying to read does not exist. From what I understood the AWS errors are not so clear in these situations.
Validate if your key/bucket is correct and if you are sending the correct params on the API method.
I already got this problem two times:
when I was replacing the key param with the bucket param and vice versa and I was trying to read an s3 object using the getObject() method.
when I was trying to copy a file to a location that did not exist using the copyObject() method.
Steps
1: click on Users in IAM (in AWS)
2: click on permission tab
3: click on add permission then click on add group
4: search s3fullaccess in searchbar
5: select AmazonS3FullAccess and type any group name then click on create
6: perform action through your API again
7: done
"code":"AccessDenied","region":null,"time":"2020-05-24T05:20:56.219Z","requestId": ...
Applied the below policy in s3 aws console > Bucket policy editor of 'Permissions' tab to get rid of the above error,
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::<IAM-user-ID>:user/testuser"
},
"Action": [
"s3:ListBucket",
"s3:ListBucketVersions",
"s3:GetBucketLocation",
"s3:Get*",
"s3:Put*"
],
"Resource": "arn:aws:s3:::srcbucket"
}
]
}
In my case, I updated the variable holding s3 bucket name in .env file but didn't update the variable in the program, thus the program was receiving undefined value for the bucket name variable which caused my program throw access denied error.
So do make sure you are using correct bucket name or correct variable name if you are storing bucket name in a variable
I had the same error and it was because the file I was trying to access was not in the bucket. So make sure you are using the right bucket name and that the name of the file you are looking for is exactly the same as the one that exists in that bucket.
https://www.diffchecker.com/diff is a good tool to look for differences in strings

Resources