I'm trying to create an S3 bucket and immediately assign a lambda notification event to it.
Here's the node test script I wrote:
const aws = require('aws-sdk');
const uuidv4 = require('uuid/v4');
aws.config.update({
accessKeyId: 'key',
secretAccessKey:'secret',
region: 'us-west-1'
});
const s3 = new aws.S3();
const params = {
Bucket: `bucket-${uuidv4()}`,
ACL: "private",
CreateBucketConfiguration: {
LocationConstraint: 'us-west-1'
}
};
s3.createBucket(params, function (err, data) {
if (err) {
throw err;
} else {
const bucketUrl = data.Location;
const bucketNameRegex = /bucket-[a-z0-9\-]+/;
const bucketName = bucketNameRegex.exec(bucketUrl)[0];
const params = {
Bucket: bucketName,
NotificationConfiguration: {
LambdaFunctionConfigurations: [
{
Id: `lambda-upload-notification-${bucketName}`,
LambdaFunctionArn: 'arn:aws:lambda:us-west-1:xxxxxxxxxx:function:respondS3Upload',
Events: ['s3:ObjectCreated:CompleteMultipartUpload']
},
]
}
};
// Throws "Unable to validate the following destination configurations" until an event is manually added and deleted from the bucket in the AWS UI Console
s3.putBucketNotificationConfiguration(params, function(err, data) {
if (err) {
console.error(err);
console.error(this.httpResponse.body.toString());
} else {
console.log(data);
}
});
}
});
The creation works fine but calling s3.putBucketNotificationConfiguration from the aws-sdk throws:
{ InvalidArgument: Unable to validate the following destination configurations
at Request.extractError ([...]/node_modules/aws-sdk/lib/services/s3.js:577:35)
at Request.callListeners ([...]/node_modules/aws-sdk/lib/sequential_executor.js:105:20)
at Request.emit ([...]/node_modules/aws-sdk/lib/sequential_executor.js:77:10)
at Request.emit ([...]/node_modules/aws-sdk/lib/request.js:683:14)
at Request.transition ([...]/node_modules/aws-sdk/lib/request.js:22:10)
at AcceptorStateMachine.runTo ([...]/node_modules/aws-sdk/lib/state_machine.js:14:12)
at [...]/node_modules/aws-sdk/lib/state_machine.js:26:10
at Request.<anonymous> ([...]/node_modules/aws-sdk/lib/request.js:38:9)
at Request.<anonymous> ([...]/node_modules/aws-sdk/lib/request.js:685:12)
at Request.callListeners ([...]/node_modules/aws-sdk/lib/sequential_executor.js:115:18)
message: 'Unable to validate the following destination configurations',
code: 'InvalidArgument',
region: null,
time: 2017-11-10T02:55:43.004Z,
requestId: '9E1CB35811ED5828',
extendedRequestId: 'tWcmPfrAu3As74M/0sJL5uv+pLmaD4oBJXwjzlcoOBsTBh99iRAtzAloSY/LzinSQYmj46cwyfQ=',
cfId: undefined,
statusCode: 400,
retryable: false,
retryDelay: 4.3270874729153475 }
<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>InvalidArgument</Code>
<Message>Unable to validate the following destination configurations</Message>
<ArgumentName1>arn:aws:lambda:us-west-1:xxxxxxxxxx:function:respondS3Upload, null</ArgumentName1>
<ArgumentValue1>Not authorized to invoke function [arn:aws:lambda:us-west-1:xxxxxxxxxx:function:respondS3Upload]</ArgumentValue1>
<RequestId>9E1CB35811ED5828</RequestId>
<HostId>tWcmPfrAu3As74M/0sJL5uv+pLmaD4oBJXwjzlcoOBsTBh99iRAtzAloSY/LzinSQYmj46cwyfQ=</HostId>
</Error>
I've run it with a role assigned to lambda with what I think are all the policies it needs. I could be missing something. I'm using my root access keys to run this script.
I've thought it might be a timing error where S3 needs time to create the bucket before adding the event, but I've waited a while, hardcoded the bucket name, and run my script again which throws the same error.
The weird thing is that if I create the event hook in the S3 UI and immediately delete it, my script works if I hardcode that bucket name into it. It seems like creating the event in the UI adds some needed permissions but I'm not sure what that would be in the SDK or in the console UI.
Any thoughts or things to try? Thanks for your help
You are getting this message because your s3 bucket is missing permissions for invoking your lambda function.
According to AWS documentation! there are two types of permissions required:
Permissions for your Lambda function to invoke services
Permissions for Amazon S3 to invoke your Lambda function
You should create an object of type 'AWS::Lambda::Permission' and it should look similar to this:
{
"Version": "2012-10-17",
"Id": "default",
"Statement": [
{
"Sid": "<optional>",
"Effect": "Allow",
"Principal": {
"Service": "s3.amazonaws.com"
},
"Action": "lambda:InvokeFunction",
"Resource": "<ArnToYourFunction>",
"Condition": {
"StringEquals": {
"AWS:SourceAccount": "<YourAccountId>"
},
"ArnLike": {
"AWS:SourceArn": "arn:aws:s3:::<YourBucketName>"
}
}
}
]
}
Finally looked at this again after a year. This was a hackathon project from last year that we revisted. #davor.obilinovic's answer was very helpful in pointing me to the Lambda permission I needed to add. Still took me a little bit to figure out exactly what I needed it to look like.
Here are the AWS JavaScript SDK and Lambda API docs
https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/Lambda.html#addPermission-property
https://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html
The JS SDK docs have this line:
SourceArn: "arn:aws:s3:::examplebucket/*",
I couldn't get it working for the longest time and was still getting the Unable to validate the following destination configurations error.
Changing it to
SourceArn: "arn:aws:s3:::examplebucket",
fixed that issue. The /* was apparently wrong and I should have looked at the answer I got here more closely but was trying to follow the AWS docs.
After developing for a while and creating lots of buckets, Lambda permissions and S3 Lambda notifications, calling addPermission started throwing a The final policy size (...) is bigger than the limit (20480). Adding new, individual, permissions for each bucket adds them to the bottom of the Lambda Function Policy and apparently that policy has a max size.
The policy doesn't seem editable in the AWS Management Console so I had fun deleting each entry with the SDK. I copied the policy JSON, pulled the Sids out and called removePermission in a loop (which threw rate limit errors and I had to run it many times).
Finally I discovered that omitting the SourceArn key will give Lambda permission to all S3 buckets.
Here's my final code using the SDK to add the permission I needed. I just ran this once for my function.
const aws = require('aws-sdk');
aws.config.update({
accessKeyId: process.env.AWS_ACCESS,
secretAccessKey: process.env.AWS_SECRET,
region: process.env.AWS_REGION,
});
// Creates Lambda Function Policy which must be created once for each Lambda function
// Must be done before calling s3.putBucketNotificationConfiguration(...)
function createLambdaPermission() {
const lambda = new aws.Lambda();
const params = {
Action: 'lambda:InvokeFunction',
FunctionName: process.env.AWS_LAMBDA_ARN,
Principal: 's3.amazonaws.com',
SourceAccount: process.env.AWS_ACCOUNT_ID,
StatementId: `example-S3-permission`,
};
lambda.addPermission(params, function (err, data) {
if (err) {
console.log(err);
} else {
console.log(data);
}
});
}
If it still useful for someone, this is how I add the permission to the lambda function using java:
AWSLambda client = AWSLambdaClientBuilder.standard().withRegion(clientRegion).build();
AddPermissionRequest requestLambda = new AddPermissionRequest()
.withFunctionName("XXXXX")
.withStatementId("XXXXX")
.withAction("lambda:InvokeFunction")
.withPrincipal("s3.amazonaws.com")
.withSourceArn("arn:aws:s3:::XXXXX" )
.withSourceAccount("XXXXXX");
client.addPermission(requestLambda);
Please check
https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/lambda/AWSLambda.html#addPermission-com.amazonaws.services.lambda.model.AddPermissionRequest-
The Console is another way to allow s3 to invoke a lambda:
Note
When you add a trigger to your function with the Lambda console, the
console updates the function's resource-based policy to allow the
service to invoke it. To grant permissions to other accounts or
services that aren't available in the Lambda console, use the AWS CLI.
So you just need to add and configure an s3 trigger to your lambda from aws console
https://docs.aws.amazon.com/lambda/latest/dg/access-control-resource-based.html
For me it was Lambda expecting permission over the entire bucket and not the bucket and keys
It may be helpful to look here: AWS Lambda : creating the trigger
Theres kind of an unclear error when you have conflicting events in a bucket. You need to purge the other events to create a new one.
You have to add s3 resource based policy on lambda too. From your Lambda, go to Configurations > Permissions > Resource-based policy .
Explained in more details here
If the accepted answer by #davor.obilinovic (https://stackoverflow.com/a/47674337) still doesn't fix it, note that you'll get this error even if an existing event notification is now invalid.
Ex. Let's say you configured bucketA/prefix1 to trigger lambda1, bucketA/prefix2 to trigger lambda2. After a while you decide to delete lambda2 but don't remove the event notification for bucketA/prefix2.
Now if you try to configure bucketA/prefix3 to trigger lambda3, you'll see this error "Unable to validate the following destination configurations" even though you are trying to only add lambda3 and lambda3 is configured correctly as #davor.obilinovic answered.
Additional Context:
The reason for this behavior is because AWS does not have a "add_event_notification" api. They only have a "put_bucket_notification" which takes in the complete list of all the old event notifications plus the new one that we want to add. So each time we want to add an event notification, we have to send the entire list and they validate the entire list. It would have been easier/clearer had they specified which "following destination" they were referring to in their error message.
For me it was a totally different issue causing the "Unable to validate the following destination configurations" error.
Apparently, there was an old Lambda function on the same bucket, that was dropped a while before. However, AWS does not always remove the event notification from the S3 bucket, even if the old Lambda and its trigger are long gone.
This causes the conflict and the strange error message.
Resolution -
Navigate to the S3 bucket => Properties => Event notifications,
and drop any old remaining events still defined.
After this, everything went back to normal and worked like a charm.
Good luck!
I faced the same com.amazonaws.services.s3.model.AmazonS3Exception: Unable to validate the following destination configurations error when I tried to execute putBucketNotificationConfiguration
Upon checking around, I found that every time you update the bucket notification configuration, AWS will do a test notification check on all the existing notification configurations. If any of the test fails, say for the reason such as you removed the destination lambda or SNS topic of an older configuration, AWS will fail the entire bucket notification configuration request with above exception.
To address this, either identify/fix the configuration that is failing the test or remove all the existing configurations(if plausible) in the bucket using aws s3api put-bucket-notification-configuration --bucket=myBucketName --notification-configuration="{}" and then try updating the bucket configuration.
Related
This problem is driving me mad for 2 days now.
I am trying to run a NodeJS (NestJS) application in a Docker Container.
The application does some things with AWS SDK S3 (v3).
Code
To get the Client I use the following code:
private client = new S3Client({
credentials: fromIni({
profile: 'default',
filepath: '~/.aws/credentials',
configFilepath: '~/.aws/config',
}),
region: this.bucketRegion,
});
Then I try to get all S3 objects:
const command = new ListObjectsCommand({
// eslint-disable-next-line #typescript-eslint/naming-convention
Bucket: CONSTANTS.FILES.S3.BUCKET,
});
const filesInS3Response = await this.client.send(command);
const filesInS3 = filesInS3Response.Contents;
Error Message
When I start the Docker Container, and query this endpoint, I get the following error in docker-compose logs:
[Nest] 1 - 02/16/2023, 11:40:15 AM ERROR [ExceptionsHandler] The specified key does not exist.
NoSuchKey: The specified key does not exist.
at deserializeAws_restXmlNoSuchKeyResponse (/usr/src/app/node_modules/#aws-sdk/client-s3/dist-cjs/protocols/Aws_restXml.js:6155:23)
at deserializeAws_restXmlGetObjectAttributesCommandError (/usr/src/app/node_modules/#aws-sdk/client-s3/dist-cjs/protocols/Aws_restXml.js:4450:25)
at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
at async /usr/src/app/node_modules/#aws-sdk/client-s3/node_modules/#aws-sdk/middleware-serde/dist-cjs/deserializerMiddleware.js:7:24
at async /usr/src/app/node_modules/#aws-sdk/client-s3/node_modules/#aws-sdk/middleware-signing/dist-cjs/middleware.js:14:20
at async /usr/src/app/node_modules/#aws-sdk/client-s3/node_modules/#aws-sdk/middleware-retry/dist-cjs/retryMiddleware.js:27:46
at async /usr/src/app/node_modules/#aws-sdk/client-s3/node_modules/#aws-sdk/middleware-logger/dist-cjs/loggerMiddleware.js:5:22
at async AdminS3FilesService.showS3Files (/usr/src/app/dist/src/admin/admin_s3files.service.js:57:37)
Dockerfile
The relevant part from the Dockerfile:
RUN mkdir -p /root/.aws
COPY --from=builder /root/.aws/credentials /root/.aws/credentials
COPY --from=builder /root/.aws/config /root/.aws/config
RUN ls -la /root/.aws
RUN whoami
And when I look in the running Container, there is indeed a credentials and config file in the ~/.aws directory.
They look like:
(Credentials)
[default]
aws_access_key_id=AKIA3UHGDIBNT3MSM2WN
aws_secret_access_key=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
And config:
[profile default]
region=eu-central-1
Full code (NestJS)
#Injectable()
export class AdminS3FilesService {
constructor(
private readonly configService: ConfigService,
private filesService: FilesService,
) {}
private readonly logger = new Logger(AdminS3FilesService.name);
private bucketRegion = this.configService.get('AWS_S3_REGION');
private client = new S3Client({
credentials: fromIni({
profile: 'default',
filepath: '~/.aws/credentials',
configFilepath: '~/.aws/config',
}),
region: this.bucketRegion,
});
async showS3Objects(): Promise<any> {
this.logger.log(
`In showS3Objects with bucket [${CONSTANTS.FILES.S3.BUCKET}]`,
);
const messages: any[] = [];
const command = new ListObjectsCommand({
// eslint-disable-next-line #typescript-eslint/naming-convention
Bucket: CONSTANTS.FILES.S3.BUCKET,
});
const filesInS3Response = await this.client.send(command);
const filesInS3 = filesInS3Response.Contents;
for (const f of filesInS3) {
messages.push(
`Bucket = ${CONSTANTS.FILES.S3.BUCKET}; Key = ${f.Key}; Size = ${f.Size}`,
);
}
return {
messages: messages,
}; // <-- This is line 57 in the code
}
}
I've tried many different things, like naming the profile (into something else than 'default'), leaving out the config file, leaving out the filepath in the code (since ~/.aws/credentials is the default).
But no luck with any of that.
What am I doing wrong here?
Does anybody have AWS SDK S3 V3 running in a Docker Container (NodeJS/NestJS) and how did you do the credentials?
Hope somebody can help me.
Solution
Thanks to Frank I've found the solution:
Just ignore all that FromINI method and specify the keys in the call to S3Client.
The method of specifying the keys in the call was not in the docs (at least, I haven't found it in the V3 docs)
Code :
private client = new S3Client({
credentials: {
accessKeyId: this.configService.get('AWS_S3_ACCESS_KEY_ID'),
secretAccessKey: this.configService.get('AWS_S3_SECRET_ACCESS_KEY'),
},
region: this.bucketRegion,
});
The error message you're seeing suggests that the specified key does not exist in your S3 bucket. However, the code you've provided doesn't include any reference to a specific key or object in your bucket. Instead, you're simply trying to list all objects in the bucket.
The issue may be related to the credentials you're using to authenticate with AWS S3. Here are a few things you can try:
Check that the profile you're using in your credentials file has the necessary permissions to list objects in the S3 bucket. You can do this in the AWS Management Console by navigating to the IAM service, selecting "Users" from the left-hand menu, and then selecting the user associated with the access key ID in your credentials file. From there, you can review the user's permissions and make sure they have the necessary permissions to list objects in the S3 bucket.
Try providing your access key ID and secret access key directly in the S3Client constructor instead of using a profile. For example:
private client = new S3Client({
credentials: {
accessKeyId: 'YOUR_ACCESS_KEY_ID',
secretAccessKey: 'YOUR_SECRET_ACCESS_KEY',
},
region: this.bucketRegion,
});
If this works, it may indicate an issue with your profile configuration.
Check that the region specified in your S3Client constructor matches the region of your S3 bucket.
Check that your Docker container is able to access your credentials file. You can try running a command inside the container to check if the file exists and is readable, for example:
docker exec -it CONTAINER_NAME ls -la /root/.aws/credentials
If the file isn't accessible, you may need to adjust the permissions on the file or the directory containing it.
I hope these suggestions help you solve the issue. Let me know if you have any further questions!
If you have confirmed that the credentials are correct and accessible in the container, the issue may be related to the way that you are setting the region. You are setting the region using the bucketRegion variable, which you are getting from the ConfigService. Make sure that the value of AWS_S3_REGION that you are getting from the ConfigService is correct.
You can also try setting the region directly in the S3 client constructor like this:
private client = new S3Client({
credentials: fromIni({
profile: 'default',
filepath: '~/.aws/credentials',
configFilepath: '~/.aws/config',
}),
region: 'eu-central-1',
});
Replace 'eu-central-1' with the actual region you are using.
If the issue still persists, you can try adding some debug logs to your code to see where the issue is happening. For example, you can log the response from await this.client.send(command) to see if it contains any helpful information.
I have the following code:
const sqs = new aws.SQS({region: 'us-east-1', apiVersion: '2012-11-05'});
...
const result = await sqs.sendMessage({
MessageBody: JSON.stringify(item),
QueueUrl: QUEUE_URL,
MessageGroupId: groupId,
MessageDeduplicationID: deduplicationId,
}).promise();
and I'm receiving this error:
Unexpected key 'MessageDeduplicationID' found in params
My aws sdk in dev dependencies is "aws-sdk": "^2.692.0",
I've tried changing the apiVersion to 2020-01-09 and also tried no apiVersion passed but same result.
By the way the MessageDeduplicationID is part of an update to support sqs triggers on FIFO queues on November 25, 2019 as said on that doc. But I can't seem to make it work :(
Please help me. Thanks
The key or property names in the params object for all AWS SDK functions are case sensitive. In your case you supplied MessageDeduplicationID, but as per the documentation it should be MessageDeduplicationId, hence the validation error about an unexpected key.
< premise>
I'm new cloud computing in general, AWS specifically, and REST API, and am trying to cobble together a "big-picture" understanding.
I am working with LocalStack - which, by my understanding, simulates the real AWS by responding identically to (a subset of) the AWS API if you specify the endpoint address/port that LocalStack listens at.
Lastly, I've been working from this tutorial: https://dev.to/goodidea/how-to-fake-aws-locally-with-localstack-27me
< /premise>
Using the noted tutorial, and per its guidance, I successfully creating a S3 bucket using the AWS CLI.
To demonstrate uploading a local file to the S3 bucket, though, the tutorial switches to node.js, which I think demonstrates the AWS node.js SDK:
# aws.js
# This code segment comes from https://dev.to/goodidea/how-to-fake-aws-locally-with-localstack-27me
#
const AWS = require('aws-sdk')
require('dotenv').config()
const credentials = {
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_SECRET_KEY,
}
const useLocal = process.env.NODE_ENV !== 'production'
const bucketName = process.env.AWS_BUCKET_NAME
const s3client = new AWS.S3({
credentials,
/**
* When working locally, we'll use the Localstack endpoints. This is the one for S3.
* A full list of endpoints for each service can be found in the Localstack docs.
*/
endpoint: useLocal ? 'http://localhost:4572' : undefined,
/**
* Including this option gets localstack to more closely match the defaults for
* live S3. If you omit this, you will need to add the bucketName to the `Key`
* property in the upload function below.
*
* see: https://github.com/localstack/localstack/issues/1180
*/
s3ForcePathStyle: true,
})
const uploadFile = async (data, fileName) =>
new Promise((resolve) => {
s3client.upload(
{
Bucket: bucketName,
Key: fileName,
Body: data,
},
(err, response) => {
if (err) throw err
resolve(response)
},
)
})
module.exports = uploadFile
.
# test-upload.js
# This code segment comes from https://dev.to/goodidea/how-to-fake-aws-locally-with-localstack-27me
#
const fs = require('fs')
const path = require('path')
const uploadFile = require('./aws')
const testUpload = () => {
const filePath = path.resolve(__dirname, 'test-image.jpg')
const fileStream = fs.createReadStream(filePath)
const now = new Date()
const fileName = `test-image-${now.toISOString()}.jpg`
uploadFile(fileStream, fileName).then((response) => {
console.log(":)")
console.log(response)
}).catch((err) => {
console.log(":|")
console.log(err)
})
}
testUpload()
Invocation :
$ node test-upload.js
:)
{ ETag: '"c6b9e5b1863cd01d3962c9385a9281d"',
Location: 'http://demo-bucket.localhost:4572/demo-bucket/test-image-2019-03-11T21%3A22%3A43.511Z.jpg',
key: 'demo-bucket/test-image-2019-03-11T21:22:43.511Z.jpg',
Key: 'demo-bucket/test-image-2019-03-11T21:22:43.511Z.jpg',
Bucket: 'demo-bucket' }
I do not have prior experience with node.js, but my understanding of the above code is that it uses the AWS.S3.upload() AWS node.js SDK method to copy a local file to a S3 bucket, and prints the HTTP response (is that correct?).
Question: I observe that the HTTP response includes a "Location" key whose value looks like a URL I can copy/paste into a browser to view the image directly from the S3 bucket; is there a way to get this location using the AWS CLI?
Am I correct to assume that AWS CLI commands are analogues of the AWS SDK?
I tried uploading a file to my S3 bucket using the aws s3 cp CLI command, which I thought would be analogous to the AWS.S3.upload() method above, but it didn't generate any output, and I'm not sure what I should have done - or should do - to get a Location the way the HTTP response to the AWS.S3.upload() AWS node SDK method did.
$ aws --endpoint-url=http://localhost:4572 s3 cp ./myFile.json s3://myBucket/myFile.json
upload: ./myFile.json to s3://myBucket/myFile.json
Update: continued study makes me now wonder whether it is implicit that a file uploaded to a S3 bucket by any means - whether by CLI command aws s3 cp or node.js SDK method AWS.S3.upload(), etc. - can be accessed at http://<bucket_name>.<endpoint_without_http_prefix>/<bucket_name>/<key> ? E.g. http://myBucket.localhost:4572/myBucket/myFile.json?
If this is implicit, I suppose you could argue it's unnecessary to ever be given the "Location" as in that example node.js HTTP response.
Grateful for guidance - I hope it's obvious how painfully under-educated I am on all the involved technologies.
Update 2: It looks like the correct url is <endpoint>/<bucket_name>/<key>, e.g. http://localhost:4572/myBucket/myFile.json.
AWS CLI and the different SDKs offer similar functionality but some add extra features and some format the data differently. It's safe to assume that you can do what the CLI does with the SDK and vice-versa. You might just have to work for it a little bit sometimes.
As you said in your update, not every file that is uploaded to S3 is publicly available. Buckets have policies and files have permissions. Files are only publicly available if the policies and permissions allow it.
If the file is public then you can just construct the URL as you described. If you have the bucket setup for website hosting, you can also use the domain you setup.
But if the file is not public or you just want a temporary URL, you can use aws presign s3://myBucket/myFile.json. This will give you a URL that can be used by anyone to download the file with the permissions of whoever executed the command. The URL will be valid for one hour unless you choose a different time with --expires-in. The SDK has similar functionality as well but you have to work a tiny bit harder to use it.
Note: Starting with version 0.11.0, all APIs are exposed via a single edge service, which is accessible on http://localhost:4566 by default.
Considering that you've added some files to your bucket
aws --endpoint-url http://localhost:4566 s3api list-objects-v2 --bucket mybucket
{
"Contents": [
{
"Key": "blog-logo.png",
"LastModified": "2020-12-28T12:47:04.000Z",
"ETag": "\"136f0e6acf81d2d836043930827d1cc0\"",
"Size": 37774,
"StorageClass": "STANDARD"
}
]
}
you should be able to access your file with
http://localhost:4566/mybucket/blog-logo.png
If use this code within a Lambda which complies with everything I read on stackoverflow and on the AWS SDK documentation.
However, it neither returns anything nor throws an error. The code is simply stuck on s3.getObject(params).promise() so the lambda function runs on a timeout, even after more then 30 seconds. The file i try to fetch is actually 25kb.
Any idea why this happens?
var AWS = require('aws-sdk');
var s3 = new AWS.S3({httpOptions: {timeout: 3000}});
async function getObject(bucket, objectKey) {
try {
const params = {
Bucket: bucket,
Key: objectKey
}
console.log("Trying to fetch " + objectKey + " from bucket " + bucket)
const data = await s3.getObject(params).promise()
console.log("Done loading image from S3")
return data.Body.toString('utf-8')
} catch (e) {
console.log("error loading from S3")
throw new Error(`Could not retrieve file from S3: ${e.message}`)
}
}
When testing the function, i receive the following timeout.
START RequestId: 97782eac-019b-4d46-9e1e-3dc36ad87124 Version: $LATEST
2019-03-19T07:51:30.225Z 97782eac-019b-4d46-9e1e-3dc36ad87124 Trying to fetch public-images/low/ZARGES_41137_PROD_TECH_ST_LI.jpg from bucket zarges-pimdata-test
2019-03-19T07:51:54.979Z 97782eac-019b-4d46-9e1e-3dc36ad87124 error loading from S3
2019-03-19T07:51:54.981Z 97782eac-019b-4d46-9e1e-3dc36ad87124 {"errorMessage":"Could not retrieve file from S3: Connection timed out after 3000ms","errorType":"Error","stackTrace":["getObject (/var/task/index.js:430:15)","","process._tickDomainCallback (internal/process/next_tick.js:228:7)"]}
END RequestId: 97782eac-019b-4d46-9e1e-3dc36ad87124
REPORT RequestId: 97782eac-019b-4d46-9e1e-3dc36ad87124 Duration: 24876.90 ms
Billed Duration: 24900 ms Memory Size: 512 MB Max Memory Used: 120 MB
The image I am fetching is actually public available:
https://s3.eu-central-1.amazonaws.com/zarges-pimdata-test/public-images/low/ZARGES_41137_PROD_TECH_ST_LI.jpg
const data = (await (s3.getObject(params).promise())).Body.toString('utf-8')
If your Lambda function is associated with a VPC it loses internet access which is required to access S3. However, instead of following the Lambda warning that says "Associate a NAT" etc, you can create an S3 endpoint in the VPC > Endpoints settings, and your Lambda function will work as expected, with no need to manually set up Internet access for your VPC.
https://aws.amazon.com/blogs/aws/new-vpc-endpoint-for-amazon-s3/
Default timeout of AWS SDK is 120000 ms. If your lambda's timeout is shorter then that, you will never receive the actual error.
Either extend your AWS timeout
var AWS = require('aws-sdk');
var s3 = new AWS.S3({httpOptions: {timeout: 3000}});
or extend the timout of your lambda.
This issue is definitely related to connection.
Check out your VPC settings as it is likely blocking the Lambda connection to the Internet (AWS managed services as S3 are accessible only via Internet).
If you are using localstack, make sure SSL is false and s3ForcePathStyle is true.
That was my problem
AWS.S3({endpoint: '0.0.0.0:4572', sslEnabled: false, s3ForcePathStyle:true})
More details here
Are you sure you are providing your accessKeyId and secretAccessKey? I was having timeouts with no error message until I added them to the config:
AWS.config.update({ signatureVersion: 'v4', region: "us-east-1",
accessKeyId: secret.accessKeyID,
secretAccessKey: secret.secretAccessKey });
I am using Claduiajs to create lambda functions and i'm still discovering what it can and can't do, so far I was able to create a lambda function that connects to AWS DynamoDB and everything is working as expected, but when I try adding in an API by using Claudia API Builder by following this tutorial the trigger doesn't get added to the lambda function.
The steps I took after setting npm and installing the dependencies were:
Step 1: Write this function up in main.js:
const AWS = require('aws-sdk'); // Used for DynamoDB
const performance = require('performance-now'); // Used to measure performance
const deepExtend = require('deep-extend'); // Used to deep extend json arrays
const docClient = new AWS.DynamoDB.DocumentClient({region: 'ap-southeast-2'});
const TableName = 'Agent-commands';
var ApiBuilder = require('claudia-api-builder'),
api = new ApiBuilder();
module.exports = api;
api.post('/',function(request){
console.info('request',request);
});
Step 2: Run this command to create the function & api claudia create --name add-command-for-agent --region ap-southeast-2 --api-module main --timeout 10 --policies policies/*.json
Once I do that I get this in the terminal:
saving configuration
{
"lambda": {
"role": "add-command-for-agent-executor",
"name": "add-command-for-agent",
"region": "ap-southeast-2"
},
"api": {
"id": "l223cd1rl7",
"module": "main",
"url": "https://l223cd1rl7.execute-api.ap-southeast-2.amazonaws.com/latest"
}
}
When I go to that url I get {"message":"Missing Authentication Token"}
When I check my claduia.json file that's created by the create command i don't see the url in there, just the id and module.
When I check the lambda function on the AWS console there are no triggers attached.
Am I doing something wrong or is that a bug?
I've had this issues before and in my case the IAM role did not have the necessary permissions. I'd recheck your IAM role to make sure it has all of the permissions it needs.