Solved:
I want to get a signed URL from my amazon S3 server. I am new to AWS. where do i set my secret-key and access_id_key so that S3 identifies request from my server.
var express=require('express');
var app=express();
var AWS = require('aws-sdk')
, s3 = new AWS.S3()
, params = {Bucket: 'my-bucket', Key: 'path/to/key', Expiration: 20}
s3.getSignedUrl('getObject', params, function (err, url) {
console.log('Signed URL: ' + url)
})
app.listen(8000)
You can also set the credentials for each bucket if you are working with multiple buckets, you just need to pass the credentials into the constructor of the S3 object, like so:
var AWS = require('aws-sdk');
var credentials = {
accessKeyId: AWS_CONSTANTS.S3_KEY,
secretAccessKey: AWS_CONSTANTS.S3_SECRET,
region: AWS_CONSTANTS.S3_REGION
};
var s3 = new AWS.S3(credentials);
var params = {Bucket:'bucket-name', Key: 'key-name', Expires: 20};
s3.getSignedUrl('getObject', params, function (err, url) {
console.log('Signed URL: ' + url);
});
Later i solved my issue.
This was pretty helpful
http://aws.amazon.com/sdkfornodejs/ Moreover you can hardcode your credentials also as
var express=require("express");
var app=express();
var AWS = require('aws-sdk')
, s3 = new AWS.S3()
, params = {Bucket:'your-bucket-name on s3', Key: 'key-name-on s3 you want to store under', Expires: 20}
AWS.config.update({accessKeyId: 'Your-Access-Key-Id', secretAccessKey:
'Your-secret-key'});
AWS.config.region = 'us-west-2';
s3.getSignedUrl('getObject', params, function (err, url) {
console.log('Signed URL: ' + url);
});
app.listen(8000);
Related
I just need to get my url to do the upload in the front end.
But in my situation I don't know why, I can get the url but only after the second call.
const AWS = require('aws-sdk')
const s3 = new AWS.S3()
AWS.config.update({
accessKeyId: 'secretId',
secretAccessKey: 'secretAccessKeyId'
})
return s3.getSignedUrl('putObject', {
Bucket: 'eps-file-default',
Key: 'picture-test.png',
Expires: 300
})
Here you can see the first response :
"https://s3.amazonaws.com/" // the problem is here
And here you can see the second response :
"https://eps-file-default.s3.amazonaws.com/picture-test.png?AWSAccessKeyId=mysecret&Expires=1595246561&Signature=3uEK7zrqUDUv6hGriN3TraUnoOo%3D"
If you have the solution thank you so much.
I found the solution! I hope this can help someone, the config have the be before.
const AWS = require('aws-sdk')
AWS.config.update({
accessKeyId: 'secret',
secretAccessKey: 'secret2'
})
const s3 = new AWS.S3()
return s3.getSignedUrl('putObject', {
Bucket: bucketname,
Key: filename,
Expires: 900
})
I have upload an image using node.js in AWS S3 and that have successfully uploaded on AWS S3 bucket, but I try to view the image instead of download. I cant view that downloded file. I have used following code to upload an image on AWS S3:
var AWS = require('aws-sdk');
var config = require('../../server/config');
AWS.config.update({
accessKeyId: config.aws.accessKeyId,
secretAccessKey: config.aws.secretAccessKey,
region: config.aws.region
});
var s3 = new AWS.S3();
var Busboy = require('busboy');
var busboyBodyParser = require('busboy-body-parser');
app.use(busboyBodyParser());
app.post('/upload', function(req,res){
var directory = req.body.directory;
console.log(req.files.file);
var image = req.files.file.name;
var contenttype = req.files.file.mimetype;
if(req.body.directory) {
var file = directory+'/'+image;
} else {
var file = image;
}
var data = req.files.file.data;
var keys = {
Bucket: req.body.bucket,
Key: file,
Body: data,
ACL: 'public-read',
contentType: contenttype
};
s3.upload(keys, function(err, result) {
if (err) {
res.send({
isError:true,
status:400,
message:"File Not Uplaod",
data:err
});
} else {
var data = {
Location: result.Location,
key:result.key,
Bucket:result.Bucket
};
res.send({
isError:false,
status:200,
message:"File Uplaod",
data:data
});
}
});
});
I was stuck with this as well, but the following works:
let params = {
ACL: 'public-read',
Bucket: process.env.BUCKET_NAME,
Body: fs.createReadStream(req.file.path),
ContentType: req.file.mimetype,
Key: `avatar/${req.file.originalname}`
};
req.file.mimetype is what fixed it, which is basically the same as ContentType: image/jpeg but it identifies the extension of the file the user has uploaded as opposed to having to hardcode image/jpeg or image/png
I hope your issue is fixed though.
I have find that answer :
used ContentType:'image/jpeg' or ContentType: 'your variable' in keys to upload image in aws s3
I work in a financial institution and for security reasons my employer cannot give out the access key id and the access key secret to the AWS account. This means I can't use aws-sdk.
As a next option, would it be possible to upload files using HTTP PUT to a public S3 bucket without using the AWS-SDK that requires the access key id and the access key secret?
I had a look at this answer: How to upload a file using a rest client for node
And was thinking of this approach:
var request = require('request');
var options = {
method: 'PUT',
preambleCRLF: true,
postambleCRLF: true,
uri: 'https://s3-ap-southeast-2.amazonaws.com/my-bucket/myFile.pdf',
multipart: [
{
'content-type': 'application/pdf'
body: fs.createReadStream('/uploads/uploaded-file.pdf')
}
]
}
request(options, function(err, response, body){
if(err){
return console.log(err);
}
console.log('File uploaded to s3');
});
Could that work?
Your above code works only if you have custom storage(that too it should be public) and not for AWS storage.
For AWS storage access key id and the access key secret is mandatory, without these you cannot upload the files to storage
This is a bit old but for anyone looking for the same you can now use a pre signed url to achieve this, how it works is you create a preSigned url on your server, share it with the client and use this to upload the file to s3
server to generate an url:
const AWS = require('aws-sdk')
const s3 = new AWS.S3({
region: 'us-east-1',
signatureVersion: 'v4'
})
AWS.config.update({accessKeyId: 'access-key', secretAccessKey: 'access-pass'})
const myBucket = 'clearg-developers'
const myKey = 'directory/newFile.zip'
const signedUrlExpireSeconds = 60 * 5 //seconds the url expires
const url = s3.getSignedUrl('putObject', {
Bucket: myBucket,
Key: myKey,
Expires: signedUrlExpireSeconds
});
return url
and on the client from node you can put to get an empty body:
var fileName = '/path/to/file.ext';
var stats = fs.statSync(fileName);
fs.createReadStream(fileName).pipe(request({
method: 'PUT',
url: url,
headers: {
'Content-Length': stats['size']
}
}, function (err, res, body) {
console.log('success');
}));
I am using the NodeJS AWS SDK to generate a presigned S3 URL. The docs give an example of generating a presigned URL.
Here is my exact code (with sensitive info omitted):
const AWS = require('aws-sdk')
const s3 = new AWS.S3()
AWS.config.update({accessKeyId: 'id-omitted', secretAccessKey: 'key-omitted'})
// Tried with and without this. Since s3 is not region-specific, I don't
// think it should be necessary.
// AWS.config.update({region: 'us-west-2'})
const myBucket = 'bucket-name'
const myKey = 'file-name.pdf'
const signedUrlExpireSeconds = 60 * 5
const url = s3.getSignedUrl('getObject', {
Bucket: myBucket,
Key: myKey,
Expires: signedUrlExpireSeconds
})
console.log(url)
The URL that generates looks like this:
https://bucket-name.s3-us-west-2.amazonaws.com/file-name.pdf?AWSAccessKeyId=[access-key-omitted]&Expires=1470666057&Signature=[signature-omitted]
I am copying that URL into my browser and getting the following response:
<Error>
<Code>NoSuchBucket</Code>
<Message>The specified bucket does not exist</Message>
<BucketName>[bucket-name-omitted]</BucketName>
<RequestId>D1A358D276305A5C</RequestId>
<HostId>
bz2OxmZcEM2173kXEDbKIZrlX508qSv+CVydHz3w6FFPFwC0CtaCa/TqDQYDmHQdI1oMlc07wWk=
</HostId>
</Error>
I know the bucket exists. When I navigate to this item via the AWS Web GUI and double click on it, it opens the object with URL and works just fine:
https://s3-us-west-2.amazonaws.com/[bucket-name-omitted]/[file-name-omitted].pdf?X-Amz-Date=20160808T141832Z&X-Amz-Expires=300&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Signature=[signature-omitted]&X-Amz-Credential=ASIAJKXDBR5CW3XXF5VQ/20160808/us-west-2/s3/aws4_request&X-Amz-SignedHeaders=Host&x-amz-security-token=[really-long-key]
So I am led to believe that I must be doing something wrong with how I'm using the SDK.
Dustin,
Your code is correct, double check following:
Your bucket access policy.
Your bucket permission via your API key.
Your API key and secret.
Your bucket name and key.
Since this question is very popular and the most popular answer is saying your code is correct, but there is a bit of problem in the code which might lead a frustrating problem. So, here is a working code
AWS.config.update({
accessKeyId: ':)))',
secretAccessKey: ':DDDD',
region: 'ap-south-1',
signatureVersion: 'v4'
});
const s3 = new AWS.S3()
const myBucket = ':)))))'
const myKey = ':DDDDDD'
const signedUrlExpireSeconds = 60 * 5
const url = s3.getSignedUrl('getObject', {
Bucket: myBucket,
Key: myKey,
Expires: signedUrlExpireSeconds
});
console.log(url);
The noticeable difference is the s3 object is created after the config update, without this the config is not effective and the generated url doesn't work.
Here is the complete code for generating pre-signed (put-object) URL for any type of file in S3.
If you want you can include expiration time using Expire parameter in parameter.
The below code will upload any type of file like excel(xlsx, pdf, jpeg)
const AWS = require('aws-sdk');
const fs = require('fs');
const axios = require('axios');
const s3 = new AWS.S3();
const filePath = 'C:/Users/XXXXXX/Downloads/invoice.pdf';
var params = {
Bucket: 'testing-presigned-url-dev',
Key: 'dummy.pdf',
"ContentType": "application/octet-stream"
};
s3.getSignedUrl('putObject', params, function (err, url) {
console.log('The URL is', url);
fs.writeFileSync("./url.txt", url);
axios({
method: "put",
url,
data: fs.readFileSync(filePath),
headers: {
"Content-Type": "application/octet-stream"
}
})
.then((result) => {
console.log('result', result);
}).catch((err) => {
console.log('err', err);
});
});
I had a use case where using node.js ; I wanted to get object from s3 and download it to some temp location and then give it as attachment to third-party service! This is how i broke the code:
get signed url from s3
make rest call to get object
write that into local location
It may help anyone; if there is same use case; chekout below link;
https://medium.com/#prateekgawarle183/fetch-file-from-aws-s3-using-pre-signed-url-and-store-it-into-local-system-879194bfdcf4
For me, I was getting a 403 because the IAM role I had used to get the signed url was missing the S3:GetObject permission for the bucket/object in question. Once I added this permission to the IAM role, the signed url began to work correctly afterwards.
Probably not the answer you are looking for, But it turned our I swapped AWS_ACCESS_KEY_ID with AWS_SECRET_ACCESS_KEY
for future visitors, you might want to double check that.
Try this function with promise.
const AWS = require("aws-sdk");
const s3 = new AWS.S3({
accessKeyId: 'AK--------------6U',
secretAccessKey: 'kz---------------------------oGp',
Bucket: 'bucket-name'
});
const getSingedUrl = async () => {
const params = {
Bucket: 'bucket_name',
Key: 'file-name.pdf',
Expires: 60 * 5
};
try {
const url = await new Promise((resolve, reject) => {
s3.getSignedUrl('getObject', params, (err, url) => {
err ? reject(err) : resolve(url);
});
});
console.log(url)
} catch (err) {
if (err) {
console.log(err)
}
}
}
getSingedUrl()
How do you use request to download contents of a file and directly stream it up to s3 using the aws-sdk for node?
The code below gives me Object #<Request> has no method 'read' which makes it seem like request does not return a readable stream...
var req = require('request');
var s3 = new AWS.S3({params: {Bucket: myBucket, Key: s3Key}});
var imageStream = req.get(url)
.on('response', function (response) {
if (200 == response.statusCode) {
//imageStream should be read()able by now right?
s3.upload({Body: imageStream, ACL: "public-read", CacheControl: 5184000}, function (err, data) { //2 months
console.log(err,data);
});
}
});
});
Per the aws-sdk docs Body needs to be a ReadableStream object.
What am I doing wrong here?
This can be pulled off using the s3-upload-stream module, however I'd prefer to limit my dependencies.
Since I had the same problem as #JoshSantangelo (zero byte files on S3) with request#2.60.0 and aws-sdk#2.1.43, let me add an alternative solution using Node's own http module (caveat: simplified code from a real life project and not tested separately):
var http = require('http');
function copyToS3(url, key, callback) {
http.get(url, function onResponse(res) {
if (res.statusCode >= 300) {
return callback(new Error('error ' + res.statusCode + ' retrieving ' + url));
}
s3.upload({Key: key, Body: res}, callback);
})
.on('error', function onError(err) {
return callback(err);
});
}
As far as I can tell, the problem is that request does not fully support the current Node streams API, while aws-sdk depends on it.
References:
request issue about the readable event not working right
generic issue for "new streams" support in request
usage of the readable event in aws-sdk
You want to use the response object if you're manually listening for the response stream:
var req = require('request');
var s3 = new AWS.S3({params: {Bucket: myBucket, Key: s3Key}});
var imageStream = req.get(url)
.on('response', function (response) {
if (200 == response.statusCode) {
s3.upload({Body: response, ACL: "public-read", CacheControl: 5184000}, function (err, data) { //2 months
console.log(err,data);
});
}
});
});
As Request has been deprecated, here's a solution utilizing Axios
const AWS = require('aws-sdk');
const axios = require('axios');
const downloadAndUpload = async function(url, fileName) {
const res = await axios({ url, method: 'GET', responseType: 'stream' });
const s3 = new AWS.S3(); //Assumes AWS credentials in env vars or AWS config file
const params = {
Bucket: IMAGE_BUCKET,
Key: fileName,
Body: res.data,
ContentType: res.headers['content-type'],
};
return s3.upload(params).promise();
}
Note, that the current version of the AWS SDK doesn't throw an exception if the AWS credentials are wrong or missing - the promise simply never resolves.