I have a Node 4.3 Lambda function in AWS. I want to be able to write a text file to S3 and have read many tutorials about how to integrate with S3. However, all of them are about how to call Lambda functions after writing to S3.
How can I create a text file in S3 from Lambda using node? Is this possible? Amazons documentation doesn't seem to cover it.
Yes it is absolutely possible!
var AWS = require('aws-sdk');
function putObjectToS3(bucket, key, data){
var s3 = new AWS.S3();
var params = {
Bucket : bucket,
Key : key,
Body : data
}
s3.putObject(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
}
Make sure that you give your Lambda function the required write permissions to the target s3 bucket / key path by selecting or updating the IAM Role your lambda executes under.
IAM Statement to add:
{
"Sid": "Stmt1468366974000",
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::my-bucket-name-goes-here/optional-path-before-allow/*"
]
}
Further reading:
AWS JavaScript SDK
The specific "Put Object" details
After long long time of silence-failing of 'Task timed out after X' without any good error message, i went back to the beginning, to Amazon default template example, and that worked!
> Lambda > Functions > Create function > Use a blueprints > filter: s3.
Here is my tweaked version of amazon example:
const aws = require('aws-sdk');
const s3 = new aws.S3({ apiVersion: '2006-03-01' });
async function uploadFileOnS3(fileData, fileName){
const params = {
Bucket: "The-bucket-name-you-want-to-save-the-file-to",
Key: fileName,
Body: JSON.stringify(fileData),
};
try {
const response = await s3.upload(params).promise();
console.log('Response: ', response);
return response;
} catch (err) {
console.log(err);
}
};
IAM Statement for serverless.com - Write to S3 to specific bucket
service: YOURSERVICENAME
provider:
name: aws
runtime: nodejs8.10
stage: dev
region: eu-west-1
timeout: 60
iamRoleStatements:
- Effect: "Allow"
Action:
- s3:PutObject
Resource: "**BUCKETARN**/*"
- Effect: "Deny"
Action:
- s3:DeleteObject
Resource: "arn:aws:s3:::**BUCKETARN**/*"
You can upload file on s3 using
aws-sdk
If you are using IAM user then you have to provide access key and secret key and make sure you have provided necessary permission to IAM user.
var AWS = require('aws-sdk');
AWS.config.update({accessKeyId: "ACCESS_KEY",secretAccessKey: 'SECRET_KEY'});
var s3bucket = new AWS.S3({params: {Bucket: 'BUCKET_NAME'}});
function uploadFileOnS3(fileName, fileData){
var params = {
Key: fileName,
Body: fileData,
};
s3bucket.upload(params, function (err, res) {
if(err)
console.log("Error in uploading file on s3 due to "+ err)
else
console.log("File successfully uploaded.")
});
}
Here I temporarily hard-coded AWS access and secret key for testing purposes. For best practices refer to the documentation.
One more option (export file as multipartFormFata):
React > Node.js (AWS Lambda) > S3 Bucket
https://medium.com/#mike_just_mike/aws-lambda-node-js-export-file-to-s3-4b35c400f484
Related
I have this particular scenario where I am calling an external API which returns csv file as response, now my requirement is to send this CSV file to S3 bucket on the fly, this means without downloading the file to temporary storage, I want to send it to S3.
I am writing lambda using node JS.
Sample pieces of code to do this would be helpful
Thanks
The lambda function looks like follows:
const AWS = require('aws-sdk');
const axios = require('axios');
const apiUrl = 'https://sample-videos.com/csv/Sample-Spreadsheet-100-rows.csv';
function putObjectToS3(bucket, key, data){
var s3 = new AWS.S3();
var params = {
Bucket: bucket,
Key: key,
Body: data
}
s3.putObject(params, function(err, data) {
if (err) console.log(err, err.stack);
else console.log(data);
});
}
exports.handler = async function(event, context) {
const response = await axios.get(apiUrl);
console.log(response.data);
putObjectToS3('bucket-name-goes-here', 'file-name.csv', response.data);
return context.logStreamName;
};
NOTE: Remember to add IAM permission to the AWS Lambda Role under policies
{
"Sid": "AnyNameForThePermission",
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::bucket-name-goes-here/*"
]
}
PREREQUISITE: Since Axios is not present by default in Lambda, create a layer for it or add it to an existing layer. Check the link for more details.
You will need to download the CSV file to local storage. The S3 API supports copyObject() which is for copying from one s3 location to another s3 location. But if the location of the source CSV is not in s3, then you'll need to download the CSV file to the local storage of the Lambda first.
I'm trying to read from a S3 Bucket, currently I have the code:
var AWS = require('aws-sdk');
AWS.config.update({accessKeyId: 'myAccesID', secretAccessKey: 'superRandomSecretKey', region: 'us-west-2'});
var s3 = new AWS.S3();
var params = {
Bucket: 'my-bucket',
Delimiter: '/'
}
s3.listObjects(params, function (err, data) {
if(err)throw err;
console.log(data);
});
But I get Access Denied, I know that my named profile works because I can list my files with the aws cli command:
aws s3 ls s3://my-bucket --recursive --profile my-named-profile
So, how can I initialize my aws instance with a named profile?
The recommended way to access S3 from an instance is through IAM roles for Amazon EC2.
The basic role could just contain AWS Managed policy AmazonS3ReadOnlyAccess:
Having the role attached to your instance, you don't need to do anything special for aws-sdk to use it. The SDK will automatically recognize it. Therefore, you could be:
var AWS = require('aws-sdk');
var s3 = new AWS.S3();
var params = {
Bucket: 'my-bucket',
Delimiter: '/'
}
s3.listObjects(params, function (err, data) {
if(err)throw err;
console.log(data);
});
I am both new to Node js and AWS. I am trying to create a bucket in S3 using node js in lambda function. Consequently, I am trying to create folders inside this S3 bucket.
I followed all the questions answered before and tried different iterations of code, but none of them seem to be working. Following is my code which is executing without giving any issues, yet the bucket and the folders are not getting created.
const AWS = require('aws-sdk');
let s3Client = new AWS.S3({
accessKeyId: '<access_key_id>',
secretAccessKey: '<secret_access_key>'
});
var params = {
Bucket : 'pshycology06'
};
exports.handler = async (event, context, callback) => {
// call spaces to create the bucket
s3Client.createBucket(params, function(err, data) {
if (err) {
console.log("\r\n[ERROR] : ", err);
} else {
console.log("\r\n[SUCCESS] : data = ",data);
}
});
};
The code for creating folders inside the Lambda function is as following --
var AWS = require('aws-sdk');
AWS.config.region = 'us-east-1';
var s3Client = new AWS.S3({apiVersion: '2006-03-01'});
exports.handler = async (event, context) => {
let params1 = { Bucket: 'travasko', Key: '2/dir1/dir2', Body:'body does not matter' };
s3Client.putObject(params1, function (err, data) {
if (err) {
console.log("Error creating the folder: ", err);
} else {
console.log("Successfully created a folder on S3");
}
});
Both of them doesn't work. I read a lot of documents on this issue and answers previously asked, but none of them are working for me.
The lambda function has a timeout of 1 minute. It has following policies for the IAM role -
1. AmazonRDSFullAccess
2. AmazonS3FullAccess
3. AWSLambdaVPCExecutionRole
The VPC security group is the default one.
Also, when I am trying to create the same bucket using the following AWS CLI command, it creates the bucket.
aws s3api create-bucket --bucket psychology06 --region us-east-1
I am not sure, where am i making a mistake.
Make sure the bucket with same name is not present.Please share log if possible.
You need to chain the .promise() method to your aws-sdk calls and await on them because you are creating async functions.
await s3Client.createBucket(params).promise();
await s3Client.putObject(params1).promise();
Furthermore, S3 doesn't work with directories although you may be thrown over by the way the S3 console looks like when you add / to your filenames. You can read more about it here
As you are new, always try aws cli(not recommended) and then search for search for equivalent sdk function while implementing.As it(your code) is async it won't wait until the call back function executes , so you can try something like below.(This is not actual solution , it just tells how to wait until the call back does it's work.)
'use strict'
var AWS = require('aws-sdk');
AWS.config.region = 'us-east-1';
var s3Client = new AWS.S3({ apiVersion: '2006-03-01' });
exports.handler = async (event, context) => {
let params1 = { Bucket: 'travasko', Key: '2/dir1/dir2', Body: 'body does not matter' };
try {
let obj = await something(params1);
callback(null, obj);
}
catch (err) {
callback('error', err)
}
}
async function something(params1) {
return new Promise(async (resolve, reject) => {
await s3Client.putObject(params1, function (err, data) {
if (err) {
console.log('Error creating the folder:', err);
reject('error during putObject');
} else {
console.log('success' + JSON.stringify(data));
resolve('success');
}
});
});
}
To your question in the comments :
Hi Vinit , let me give you little background , the question you have asked is very generic. Firstly VPC is something which you create where you will have your organization private and public subnets that are used to run your ec2 or any hosted services (non-managed services by aws). But as lambda is managed service it runs in aws vpc , they usually take your code and lambda configurations and execute the code.Now coming to your question if we attach vpc in your lambda configurations ( only if your lambda needs to use services hosted in your vpc, else don't use it) then as we discussed lambda runs in aws vpc , so during cold start it created an ENI(think of it as elastic IP) and tries to communicate with your VPC. Before Re-invent an ENI was created for each lambda that was the reason it takes time for the first time and lambda used to time out even though your execution takes lesser time. Now after re-invent EIP's are created per subnet per security group. So now coming to your question when u have attached vpc, if lambda execution is taking more time or not working as expected, then you have to see how your vpc(configs, routes, subnets) is set up and it's very hard to answer as so many parameters are involved unless we debug. So short answer do not attach vpc if your function(code) does not need to talk to any of your own managed instances in vpc (usually private subnet
) etc.
Since, you are using async functionality. Thus, you have to use await on calling "s3Client.createBucket". Then resolve the received promise.
For creating folders, use trailing "/". For example "pshycology06/travasko/".
Do post error logs if these doesn't work.
I'm trying to use the basic tutorial to create an S3 bucket as follows
var AWS = require('aws-sdk');
AWS.config.loadFromPath('./myawsconfig.json');
AWS.config.update({region: 'eu-west-1'});
var s3 = new AWS.S3();
s3.client.createBucket({Bucket: 'pBucket'}, function() {
var data = {Bucket: 'pBucket', Key: 'myKey', Body: 'Hello!'};
s3.client.putObject(data, function(err, data) {
if (err) {
console.log("Error uploading data: ", err);
} else {
console.log("Successfully uploaded data to myBucket/myKey");
}
});
});
But I'm receiving the following error
node createbucket.js
Error uploading data: { [NoSuchBucket: The specified bucket does not exist]
message: 'The specified bucket does not exist',
code: 'NoSuchBucket',
name: 'NoSuchBucket',
statusCode: 404,
retryable: false }
I just ran into this problem, apparently the Node.js tutorial code doesn't compile. I got a var Object doesn't have createBucket method.
This worked:
var AWS = require('aws-sdk');
AWS.config.loadFromPath('./credentials.json');
// Set your region for future requests.
AWS.config.update({region: 'us-east-1'});
// Create a bucket and put something in it.
var s3 = new AWS.S3();
s3.client.createBucket({Bucket: 'hackathon-test'}, function() {
var data = {Bucket: 'hackathon-test', Key: 'myKey', Body: 'Hello!'};
s3.client.putObject(data, function(err, data) {
if (err) {
console.log("Error uploading data: ", err);
} else {
console.log("Successfully uploaded data to myBucket/myKey");
}
});
});
I had this issue, discovering that my api-user didn't have permission to create the bucket.
Slightly more thorough error checking revealed this...
s3.client.createBucket({Bucket: 'someBucket'}, function(err) {
if (err) {
console.log("Error creating bucket: ", err);
} else {
console.log("Successfully created bucket 'someBucket'");
}
// ...
According to aws S3 bucket name restrictions.
your bucket name shouldn't contain any uppercase letter. so 'pBucket' is invalid.
http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
The rules for DNS-compliant bucket names are:
Bucket names must be at least 3 and no more than 63 characters long.
Bucket names must be a series of one or more labels. Adjacent labels are separated by a single period (.). Bucket names can contain
lowercase letters, numbers, and hyphens. Each label must start and end
with a lowercase letter or a number.
Bucket names must not be
formatted as an IP address (e.g., 192.168.5.4).
When using virtual
hosted–style buckets with SSL, the SSL wild card certificate only
matches buckets that do not contain periods. To work around this, use
HTTP or write your own certificate verification logic.
A couple of pointers that I missed and someone may find useful
if you set the region as part of the S3 object var s3 = new AWS.S3({region: 'us-west-1'}); then the call will fail (in my experience).
You can therefore set the region via either
a) AWS.config.update({ region: 'eu-west-1' });
b) as part of the params on createBucket
s3.createBucket({
Bucket: bucketName,
CreateBucketConfiguration: {
LocationConstraint: "eu-west-1"
}
}, function () {
also, watch out for caps or underscores in the bucket name as that took an hour of my life too (DNS compliant only).
I've tried using aws-sdk and knox and I get status code 301 trying to upload images. I get status code 301 and message - 'The bucket you are attempting to access must be addressed using the specified endpoint. Please send all future requests to this endpoint. This works in php.
AWS.config.loadFromPath(__dirname + '/config/config.json');
fs.readFile(source, function (err, data) {
var s3 = new AWS.S3();
s3.client.createBucket({Bucket: 'mystuff'}, function() {
var d = {
Bucket: 'mystuff',
Key: 'img/test.jpg',
Body: data,
ACL: 'public-read'
};
s3.client.putObject(d, function(err, res) {
if (err) {
console.log("Error uploading data: ", err);
callback(err);
} else {
console.log("Successfully uploaded data to myBucket/myKey");
callback(res);
}
});
});
});
I actually solved this problem. In your config you have to have a region, since my bucket was "US Standard", I left my region blank and it worked.
config.json -
{ "accessKeyId": "secretKey", "secretAccessKey": "secretAccessKey", "region": ""}
go to s3 management console select one of your files and click on proporties - > look at the file link.
US standard
https://s3.amazonaws.com/yourbucket/
host in your console window
yourbucket.s3.amazonaws.com/
us-west-1
https://s3-us-west-1.amazonaws.com/yourbucket/
host in your console window
yourbucket.s3-us-west-1.amazonaws.com/
Did you try .send()?
I can upload to S3 by below code.
http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/AWSRequest.html
var s3object = {Bucket: 'mystuff', Key: name, Body : data['data']};
s3.client.putObject(s3object).done(function(resp){
console.log("Successfully uploaded data");
}).fail(function(resp){
console.log(resp);
}).send();
I have the same problem with the new SDK and solved it by setting the endpoint option explicitly.
Reference : http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#constructor_details
Snippet:
var AWS = require('aws-sdk');
var s3 = new AWS.S3({ endpoint :'https://s3-your-region-varies.amazonaws.com' }),
myBucket = 'your-bucket-name';
var params = {Bucket: myBucket, Key: 'myUpload', Body: "Test"};
s3.putObject(params, function(err, data) {
if (err) {
console.log(err)
} else {
console.log("Successfully uploaded data to "+myBucket+"/testKeyUpload");
}
});
Alternatively, you can solve this by setting the region in your config file and you just have to be precise of your region name.