I'm trying to use the basic tutorial to create an S3 bucket as follows
var AWS = require('aws-sdk');
AWS.config.loadFromPath('./myawsconfig.json');
AWS.config.update({region: 'eu-west-1'});
var s3 = new AWS.S3();
s3.client.createBucket({Bucket: 'pBucket'}, function() {
var data = {Bucket: 'pBucket', Key: 'myKey', Body: 'Hello!'};
s3.client.putObject(data, function(err, data) {
if (err) {
console.log("Error uploading data: ", err);
} else {
console.log("Successfully uploaded data to myBucket/myKey");
}
});
});
But I'm receiving the following error
node createbucket.js
Error uploading data: { [NoSuchBucket: The specified bucket does not exist]
message: 'The specified bucket does not exist',
code: 'NoSuchBucket',
name: 'NoSuchBucket',
statusCode: 404,
retryable: false }
I just ran into this problem, apparently the Node.js tutorial code doesn't compile. I got a var Object doesn't have createBucket method.
This worked:
var AWS = require('aws-sdk');
AWS.config.loadFromPath('./credentials.json');
// Set your region for future requests.
AWS.config.update({region: 'us-east-1'});
// Create a bucket and put something in it.
var s3 = new AWS.S3();
s3.client.createBucket({Bucket: 'hackathon-test'}, function() {
var data = {Bucket: 'hackathon-test', Key: 'myKey', Body: 'Hello!'};
s3.client.putObject(data, function(err, data) {
if (err) {
console.log("Error uploading data: ", err);
} else {
console.log("Successfully uploaded data to myBucket/myKey");
}
});
});
I had this issue, discovering that my api-user didn't have permission to create the bucket.
Slightly more thorough error checking revealed this...
s3.client.createBucket({Bucket: 'someBucket'}, function(err) {
if (err) {
console.log("Error creating bucket: ", err);
} else {
console.log("Successfully created bucket 'someBucket'");
}
// ...
According to aws S3 bucket name restrictions.
your bucket name shouldn't contain any uppercase letter. so 'pBucket' is invalid.
http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
The rules for DNS-compliant bucket names are:
Bucket names must be at least 3 and no more than 63 characters long.
Bucket names must be a series of one or more labels. Adjacent labels are separated by a single period (.). Bucket names can contain
lowercase letters, numbers, and hyphens. Each label must start and end
with a lowercase letter or a number.
Bucket names must not be
formatted as an IP address (e.g., 192.168.5.4).
When using virtual
hosted–style buckets with SSL, the SSL wild card certificate only
matches buckets that do not contain periods. To work around this, use
HTTP or write your own certificate verification logic.
A couple of pointers that I missed and someone may find useful
if you set the region as part of the S3 object var s3 = new AWS.S3({region: 'us-west-1'}); then the call will fail (in my experience).
You can therefore set the region via either
a) AWS.config.update({ region: 'eu-west-1' });
b) as part of the params on createBucket
s3.createBucket({
Bucket: bucketName,
CreateBucketConfiguration: {
LocationConstraint: "eu-west-1"
}
}, function () {
also, watch out for caps or underscores in the bucket name as that took an hour of my life too (DNS compliant only).
Related
I'm new at AWS, maybe I'm missing something obvious so help is required.
I have 2 version of code, the only different is passing bucket as a 4 chars string vs 5 chars. getting different response from aws.
why is that?
var AWS = require('aws-sdk');
var s3 = new AWS.S3();
s3.createBucket({Bucket: 'node4'}, function() {
var params = {Bucket: 'node4', Key: 'myKey', Body: 'Hello!'};
s3.putObject(params, function(err, data) {
if (err)
console.log(err)
else
console.log("Successfully uploaded data to myBucket/myKey");
});
});
running the app.js:
➜ aws node app.js
{ [AllAccessDisabled: All access to this object has been disabled]
message: 'All access to this object has been disabled',
code: 'AllAccessDisabled',
region: null,
time: Fri Feb 05 2016 20:45:11 GMT+0200 (IST),
requestId: 'somerequestId',
extendedRequestId: 'someextendedRequestId',
statusCode: 403,
retryable: false,
retryDelay: 30 }
second code:
var AWS = require('aws-sdk');
var s3 = new AWS.S3();
s3.createBucket({Bucket: 'node4e'}, function() {
var params = {Bucket: 'node4e', Key: 'myKey', Body: 'Hello!'};
s3.putObject(params, function(err, data) {
if (err)
console.log(err)
else
console.log("Successfully uploaded data to myBucket/myKey");
});
});
running the app.js:
➜ aws node app.js
Successfully uploaded data to myBucket/myKey
the only different is passing bucket as a 4 chars string vs 5 chars
Actually the only difference is that you don't have write access to the bucket named 'node4' and you do have access to a bucket named 'node4e'. Did you check to see if both buckets were actually created successfully? I notice you aren't checking for errors in the createBucket() call, just the putObject() call.
Those are fairly generic bucket names you are using, I wouldn't be surprised if the one that is failing, 'node4', is already in use by another AWS account.
I'm trying out AWS S3 for the first time and I wrote the following function to generate a bucket policy.
// Load the AWS SDK for Node.js
var AWS = require('aws-sdk');
// Load configuration
AWS.config = new AWS.Config();
AWS.config.accessKeyId = sails.config.accessKeyId;
AWS.config.secretAccessKey = sails.config.secretAccessKey;
AWS.config.region = sails.config.region
// Create S3 object
var s3 = new AWS.S3();
// Defining the required parameters
var params = {
Bucket: "bucket-name-here"
};
s3.getBucketPolicy(params, function(error, date) {
if(error) {
// An error occurred
console.log("Error\n" + error);
return res.json({
message: "Error",
'error': error
});
} else {
// Successful
console.log("Data\n" + date);
return res.json({
message: "Successful",
'data': date
});
}
});
But the response is always NoSuchBucketPolicy: The bucket policy does not exist
I tried uploading a test file into the bucket, listing all buckets and it both worked as expected. What is wrong with the code?
Your code doesn't "generate" a bucket policy... it tries to fetch the existing policy of a bucket. Buckets don't have a policy until you create one, so this error would be normal in that case.
Error Code: NoSuchBucketPolicy
Description: specified bucket does not have a bucket policy.
HTTP Status Code: 404 Not Found
http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
I'm using AWS SDK for Node.js to create a folder or key on s3. I searched on google, but I got nothing. Does anybody know how can I create a folder under my bucket with AWS SDK for Node.js?
and how can you check if this folder exists in your bucket already?
if you use console.aws.amazon.com, you can create a folder in your bucket easily. it seems I didn't figure it out how to create it with AWS SDK for Node.js?
S3 is not your typical file system. It's an object store. It has buckets and objects. Buckets are used to store objects, and objects comprise data (basically a file) and metadata (information about the file). When compared to a traditional file system, it's more natural to think of an S3 bucket as a drive rather than as a folder.
You don't need to pre-create a folder structure in an S3 bucket. You can simply put an object with the key cars/ford/focus.png even if cars/ford/ does not exist.
It's valuable to understand what happens at the API level in this case:
the putObject call will create an object at cars/ford/focus.png but it will not create anything representing the intermediate folder structure of cars/ or cars/ford/.
the actual folder structure does not exist, but is implied through delimiter=/ when you call listObjects, returning folders in CommonPrefixes and files in Contents.
you will not be able to test for the ford sub-folder using headObject because cars/ford/ does not actually exist (it is not an object). Instead you have 2 options to see if it (logically) exists:
call listObjects with prefix=cars/ford/ and find it in Contents
call listObjects with prefix=cars/, delimiter=/ and find it in CommonPrefixes
It is possible to create an S3 object that represents a folder, if you really want to. The AWS S3 console does this, for example. To create myfolder in a bucket named mybucket, you can issue a putObject call with bucket=mybucket, key=myfolder/, and size 0. Note the trailing forward slash.
Here's an example of creating a folder-like object using the awscli:
aws s3api put-object --bucket mybucket --key cars/ --content-length 0
In this case:
the folder is actually a zero-sized object whose key ends in /. Note that if you leave off the trailing / then you will get a zero-sized object that appears to be a file rather than a folder.
you are now able to test for the presence of cars/ in mybucket by issuing a headObject call with bucket=mybucket and key=cars/.
Finally, note that your folder delimiter can be anything you like, for example +, because it is simply part of the key and is not actually a folder separator (there are no folders). You can vary your folder delimiter from listObjects call to call if you like.
The code from #user2837831 doesn't seem to work anymore, probably with the new version of javascript sdk. So I am adding here the version of code that I am using to create a folder inside a bucket using node.js. This works with the 2.1.31 sdk. What is important is the '/' at the end of the Key value in params - using that it thinks you are trying to create a folder and not a file.
var AWS = require('aws-sdk');
AWS.config.region = 'us-east-1';
var s3Client = new AWS.S3();
var params = { Bucket: 'your_bucket_goes_here', Key: 'folderInBucket/', ACL: 'public-read', Body:'body does not matter' };
s3Client.upload(params, function (err, data) {
if (err) {
console.log("Error creating the folder: ", err);
} else {
console.log("Successfully created a folder on S3");
}
});
This is really straightforward you can do it by using the following, just remember the trailing slash.
var AWS = require("aws-sdk");
var s3 = new AWS.S3();
var params = {
Bucket: "mybucket",
Key: "mykey/"
};
s3.putObject(params).promise();
I find that we do not need an explicit directory creation call anymore.
Just the following works for me and automatically creates a directory hierarchy as I need.
var userFolder = 'your_bucket_name' + '/' + variable-with-dir-1-name + '/' + variable-with-dir-2-name;
// IMPORTANT : No trailing '/' at the end of the last directory name
AWS.config.region = 'us-east-1';
AWS.config.update({
accessKeyId: 'YOUR_KEY_HERE',
secretAccessKey: 'your_secret_access_key_here'
});
var bucket = new AWS.S3({
params: {
Bucket: userFolder
}
});
var contentToPost = {
Key: <<your_filename_here>>,
Body: <<your_file_here>>,
ContentEncoding: 'base64',
ContentType: <<your_file_content_type>>,
ServerSideEncryption: 'AES256'
};
bucket.putObject(contentToPost, function (error, data) {
if (error) {
console.log("Error in posting Content [" + error + "]");
return false;
} /* end if error */
else {
console.log("Successfully posted Content");
} /* end else error */
})
.on('httpUploadProgress',function (progress) {
// Log Progress Information
console.log(Math.round(progress.loaded / progress.total * 100) + '% done');
});
In console, the link generated first would be the bucket created path and second would be the folder structure.
var AWS = require("aws-sdk");
var path = require('path')
// Set the region
AWS.config.update({
region: "us-east-2",
accessKeyId: "your aws acces id ",
secretAccessKey: "your secret access key"
});
s3 = new AWS.S3();
var bucketParams = {
Bucket: "imageurrllll",
ACL: "public-read"
};
s3.createBucket(bucketParams, function(err, data) {
if (err) {
console.log("Error", err);
} else {
console.log("Success", data.Location);
var folder_name = 'root_folder'
//this is for local folder data path
var filePath = "./public/stylesheets/user.png"
//var child_folder='child'
var date = Date.now()
var imgData = `${folder_name}_${date}/` +
path.basename(filePath);
var params = {
Bucket: 'imageurrllll',
Body: '', //here you can give image data url from your local directory
Key: imgData,
ACL: 'public-read'
};
//in this section we are creating the folder structre
s3.upload(params, async function(err, aws_uploaded_url) {
//handle error
if (err) {
console.log("Error", err);
}
//success
else {
console.log("Data Uploaded in:", aws_uploaded_url.Location)
}
})
}
});
I'm trying to upload a PDF to an S3 bucket using the Knox library, but I keep getting 505 errors and the PDFs won't save. My code:
// all of this works well
var knox = require('knox');
var client = knox.createClient(require('../path/to/config.js').knox);
client.putFile('tmp/file', '/prefix/key',
function(err, res) {
if (err) {
console.log("Error PUTing file in S3:", err);
}
console.log("S3 RESPONSE:", res.statusCode); // returns 505
}
);
Anyone have any insight into what I'm doing wrong? I've also tried setting my own headers using client.put(..), but I got the same 505 response.
2 Possible reasons.
1) If this is your complete code, then you forgot to enter the key,secret and bucket.
var client = knox.createClient({
key: '<api-key-here>'
, secret: '<secret-here>'
, bucket: 'learnboost'
});
2) There is a space in the file name that you are trying to upload.
This isn't an answer per se, and I'm still unsure about the 505 response above, but the AWS SDK that Amazon puts out works great if anyone is having similar issues with Knox. The above just becomes:
var aws = require('aws-sdk');
aws.config.loadFromPath('./path/to/config.json');
var s3 = new aws.S3();
var params = { Bucket: 'your-bucket',
Key: 'your-key',
Body: fs.readFileSync('/path/to/file.pdf') };
s3.putObject(params, function(err, data) {
if (err) {
console.log("Error PUTing file:", err);
}
console.log("S3 RESPONSE:", data);
});
I've tried using aws-sdk and knox and I get status code 301 trying to upload images. I get status code 301 and message - 'The bucket you are attempting to access must be addressed using the specified endpoint. Please send all future requests to this endpoint. This works in php.
AWS.config.loadFromPath(__dirname + '/config/config.json');
fs.readFile(source, function (err, data) {
var s3 = new AWS.S3();
s3.client.createBucket({Bucket: 'mystuff'}, function() {
var d = {
Bucket: 'mystuff',
Key: 'img/test.jpg',
Body: data,
ACL: 'public-read'
};
s3.client.putObject(d, function(err, res) {
if (err) {
console.log("Error uploading data: ", err);
callback(err);
} else {
console.log("Successfully uploaded data to myBucket/myKey");
callback(res);
}
});
});
});
I actually solved this problem. In your config you have to have a region, since my bucket was "US Standard", I left my region blank and it worked.
config.json -
{ "accessKeyId": "secretKey", "secretAccessKey": "secretAccessKey", "region": ""}
go to s3 management console select one of your files and click on proporties - > look at the file link.
US standard
https://s3.amazonaws.com/yourbucket/
host in your console window
yourbucket.s3.amazonaws.com/
us-west-1
https://s3-us-west-1.amazonaws.com/yourbucket/
host in your console window
yourbucket.s3-us-west-1.amazonaws.com/
Did you try .send()?
I can upload to S3 by below code.
http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/AWSRequest.html
var s3object = {Bucket: 'mystuff', Key: name, Body : data['data']};
s3.client.putObject(s3object).done(function(resp){
console.log("Successfully uploaded data");
}).fail(function(resp){
console.log(resp);
}).send();
I have the same problem with the new SDK and solved it by setting the endpoint option explicitly.
Reference : http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#constructor_details
Snippet:
var AWS = require('aws-sdk');
var s3 = new AWS.S3({ endpoint :'https://s3-your-region-varies.amazonaws.com' }),
myBucket = 'your-bucket-name';
var params = {Bucket: myBucket, Key: 'myUpload', Body: "Test"};
s3.putObject(params, function(err, data) {
if (err) {
console.log(err)
} else {
console.log("Successfully uploaded data to "+myBucket+"/testKeyUpload");
}
});
Alternatively, you can solve this by setting the region in your config file and you just have to be precise of your region name.