when uploadingimages, If i use the data thats in my req.file.buffer which is an array of numbers.. the buffer. It uploads correctly the image to aws s3.
But i need to resize theimage before... so im trying to use jimp, like so:
const photo = await jimp.read(req.file.buffer)
await photo.cover(300, 300);
And then pass it to aws settings:
const s3 = new AWS.S3()
const params = {
Bucket: 'jamsession-images',
Key: req.body.photo,
// here in body is a buffer just like the one in req.file.buffer
Body: photo.bitmap.data
};
s3.upload(params, function (err, data) {
if (err) {
console.log(err);
}
console.log('****************** success');
});
But if i do this.. it uploads the image to aws s3.. but the image is corrupted
What im i doing here? i think aws s3 needs in the budy a buffer... and i think after jimp finished scaling the image.. that new buffer would work.. but it doesnt.. any ideas?
Full code:
exports.resize = async (req, res, next) => {
// check if there is no new file to resize
if (!req.file) {
next(); // skip to the next middlewaree
return;
}
const extension = req.file.mimetype.split('/')[1]
req.body.photo = `${uuid.v4()}.${extension}`
// now we resize
const photo = await jimp.read(req.file.buffer)
await photo.cover(300, 300);
AWS.config.update({
secretAccessKey: process.env.SECRETACCESSKEY,
accessKeyId: process.env.ACCESSKEYID,
region: 'us-east-1'
})
const s3 = new AWS.S3()
const params = {
Bucket: 'jamsession-images',
Key: req.body.photo,
// this line seems to be the issue..
// even though photo.bitmap.data its also a buffer
Body: photo.bitmap.data
};
s3.upload(params, function (err, data) {
if (err) {
console.log('%%%%%%%%%%%%%%% error in callback');
console.log(err);
}
console.log('****************** success');
console.log(data);
});
// await photo.write(`./public/uploads/${req.body.photo}`);
// once we have written the photo to our filesystem, keep going!
next()
};
I had have this problem too, to get the correct buffer of the result image we have to use Jimp's getBuffer function.
image.getBuffer(mime, cb);
Supported MIME types
Jimp.MIME_PNG; // "image/png"
Jimp.MIME_JPEG; // "image/jpeg"
Jimp.MIME_BMP; // "image/bmp"
But with Jimp.AUTO can have the mime type of the original image and use it.
You can read more of getBuffer function in https://www.npmjs.com/package/jimp
photo.getBuffer(Jimp.AUTO, function(error, result){
const params = {
Bucket: 'jamsession-images',
Key: req.body.photo,
// correct buffer
Body: result
};
s3.upload(...);
});
Related
I have a program Model, and i the program has an image attribute which I use multers3 to upload when creating the Program.
The challenge that I am facing now is that, when I delete the program, everything gets deleted on my local machine but I realized that the file(image) still exists on my Aws s3 console. How do I get the file deleted both on my database and on Amazon s3?
Here are my Program routes
This is how I delete my Program
router.delete("/:id/delete", function (req, res) {
const ObjectId = mongoose.Types.ObjectId;
let query = { _id: new ObjectId(req.params.id) };
Program.deleteOne(query, function (err) {
if (err) {
console.log(err);
}
res.send("Success");
});
});
and this is how i creates my program.
router.post("/create", upload.single("cover"), async (req, res, next) => {
const fileName = req.file != null ? req.file.filename : null;
const program = new Program({
programtype: req.body.programtype,
title: req.body.title,
description: req.body.description,
programImage: req.file.location,
});
try {
console.log(program);
const programs = await program.save();
res.redirect("/programs");
} catch {
if (program.programImage != null) {
removeprogramImage(program.programImage);
}
res.render("programs/new");
}
});
Looking through the Multer-s3 repo, I can't find anything which mentions deleting from s3. There is this function in the source code, but, I can't figure out how to use it.
You could try using the AWS SDK directly via deleteObject:
const s3 = new aws.S3({
accessKeyId: 'access-key-id',
secretAccessKey: 'access-key',
Bucket: 'bucket-name',
});
s3.deleteObject({ Bucket: 'bucket-name', Key: 'image.jpg' }, (err, data) => {
console.error(err);
console.log(data);
});
I had exactly the same problem which is "that the file(image) still exists on my Aws s3 console" it could be because of passing image location instead of image name
When uploading the image to aws here is the respone
{
fieldname: 'name',
originalname: 'apple.png',
encoding: '7bit',
mimetype: 'image/png',
size: 59654,
bucket: 'my-bucket-name',
key: 'apple-1426277135446.png', //=> what i needed to pass as(key)
acl: 'public-read',
contentType: 'application/octet-stream',
contentDisposition: null,
storageClass: 'STANDARD',
serverSideEncryption: null,
metadata: null,
location: 'https://my-bucket-name.Xx.xu-eXst-3.amazonaws.com/apple-
1426277135446.png', // => this is what i was passing to deleteObject as "key"
etag: '"CXXFE*#&SHFLSKKSXX"',
versionId: undefined
}
my problem was that i was passing the image location instead of the image name
in deleteObject function
s3.deleteObject({ Bucket: 'bucket-name', Key: 'image.jpg' }, (err, data)
// key in the argument has to be the filename with extension without
// URL like: https://my-bucket-name.s3.ff-North-1.amazonaws.com/
=> {
console.error(err);
console.log(data);
});
so eventually i could extract the name of the file(image) with extension
and passed to the function above
here is what i used the function from this answer answer
function parseUrlFilename(url, defaultFilename = null) {
// 'https://my-bucket-name.Xx.xu-eXst-3.amazonaws.com/apple-
1426277135446.png'
let filename = new URL(url,
"https://example.com").href.split("#").shift().split("?").shift().split("/").pop(); //No need to change "https://example.com"; it's only present to allow for processing relative URLs.
if(!filename) {
if(defaultFilename) {
filename = defaultFilename;
//No default filename provided; use a pseudorandom string.
} else {
filename = Math.random().toString(36).substr(2, 10);
}
}
// resulting apple-1426277135446.png'
return filename;
}
I had exactly the same problem and fixed by given code,
s3.deleteObjects(
{
Bucket: 'uploads-images',
Delete: {
Objects: [{ Key: 'product-images/slider-image.jpg' }],
Quiet: false,
},
},
function (err, data) {
if (err) console.log('err ==>', err);
console.log('delete successfully', data);
return res.status(200).json(data);
}
);
This works exactly for me.
Example of file deletion from url (file location) on amazone server
This code allows you to have the fileKey from the url
Before you need install urldecode
npm i urldecode
public async deleteFile(location: string) {
let fileKey = decoder(location)
const datas = fileKey.split('amazonaws.com/')
fileKey = datas.pop();
const params = {
Bucket: 'Your Bucket',
Key: fileKey,
};
await this.AWS_S3.deleteObject(params).promise();
}
I'm trying to upload a base64 encoded image to S3 through this route, but the callbacks get completely ignored and the code jumps straight to res.json("SUCCESS");
route
AWS.config.update({
accessKeyId: "xxxxxxxxxxxxxx",
secetAccessKey: "xxxxxxxxxxxxxxxxxxxxxx",
region: "us-east-1"
});
const s3 = new AWS.S3();
....
router.post("/imageupload", async (req, res) => {
const base64 = req.body.base64;
try {
const params = {
Bucket: process.env.bucketName,
Key: "images/newImage",
Body: base64
};
await s3.putObject(params, function(err, data) {
if (err) res.json(err);
else res.json(data);
});
res.json("SUCCESS");
} catch (e) {
console.log(e.message);
res.status(500).json(e.message);
}
});
Any help is much appreciated thanks!
EDIT FIXED:
I figured out what the problem was:
I had recently reformatted my computer which meant I had to reinstall AWS cli AND reconfigure aws creds.
That was it.
The AWS documentation for using-promises.
var s3 = new AWS.S3({apiVersion: '2006-03-01', region: 'us-west-2'});
var params = {
Bucket: 'bucket',
Key: 'example2.txt',
Body: 'Uploaded text using the promise-based method!'
};
var putObjectPromise = s3.putObject(params).promise();
putObjectPromise.then(function(data) {
console.log('Success');
}).catch(function(err) {
console.log(err);
});
You can also promisify all functions by using a library such as bluebird
AWS.config.setPromisesDependency(require('bluebird'));
Here's an example using your code
router.post("/imageupload", async (req, res) => {
const base64 = req.body.base64;
try {
const params = {
Bucket: process.env.bucketName,
Key: "images/newImage",
Body: base64
};
const data = await s3.putObject(params).promise()
res.json(data);
} catch (e) {
console.log(e.message);
res.status(500).json(e.message);
}
});
I am trying to upload a file to AWS S3 using [putObject][1] but it results in files of 0 byte size.
I do get successful response back from the putObject call.
Node.js code:
const aws = require("aws-sdk");
const s3 = new aws.S3();
module.exports = {
upload: function(req, res, next) {
console.log("Going to upload");
console.log(req.files);
let uploadFile = req.files.file;
const s3PutParams = {
Bucket: process.env.S3_BUCKET_NAME,
Key: uploadFile.name,
Body: uploadFile.data,
ACL: "public-read"
};
const s3GetParams = {
Bucket: process.env.S3_BUCKET_NAME,
Key: uploadFile.name
};
console.log(s3PutParams);
s3.putObject(s3PutParams, function(err, response) {
if (err) {
console.error(err);
} else {
console.log("Response is", response);
var url = s3.getSignedUrl("getObject", s3GetParams);
console.log("The URL is", url);
res.json({
returnedUrl: url,
publicUrl: `https://${process.env.S3_BUCKET_NAME}.s3.amazonaws.com/${uploadFile.name}`
});
}
});
}
};
Testing through POSTMAN:
Backend Console log
Can anyone help me in figuring out what is wrong?
EDIT on 11/20:
#EmmanuelNK helped in spotting the fact that Buffer.byteLength(req.files.file.data) is 0. He had the below questions:
Are you trying to write the whole buffer into memory or are you trying to stream it to s3?
Sorry if the answer is not to the point, still getting my feet wet.
Basically I want to upload an image to S3 and then later use that URL to show it on a webpage. In other words like a photobucket
how you are using upload
For now I am just testing my backend code (posted in the question) using postman. Once I get that going, will have a file upload form on the front end calling this route.
Is that helpful? Thanks in advance for your help.
If you're using express-fileupload as the file uploading middleware, and you've set the useTempFiles option to true, keep in mind that your data file buffer will be empty (check usage), which correlates to the issue you're facing. To get around this, simply read the temp. file once more to get the intended file buffer.
import fs from 'fs';
// OR
const fs = require('fs');
// in your route
let uploadFile = req.files.file;
// THIS
fs.readFile(uploadedFile.tempFilePath, (err, uploadedData) => {
if (err) { throw err; }
const s3PutParams = {
Bucket: process.env.S3_BUCKET_NAME,
Key: uploadFile.name,
Body: uploadData, // <--- THIS
ACL: "public-read"
};
const s3GetParams = {
Bucket: process.env.S3_BUCKET_NAME,
Key: uploadFile.name
};
console.log(s3PutParams);
s3.putObject(s3PutParams, function(err, response) {
if (err) {
console.error(err);
throw err;
} else {
console.log("Response is", response);
var url = s3.getSignedUrl("getObject", s3GetParams);
console.log("The URL is", url);
res.json({
returnedUrl: url,
publicUrl: `https://${process.env.S3_BUCKET_NAME}.s3.amazonaws.com/${uploadFile.name}`
});
}
});
});
Please let me know how to retrieve image from s3 with nodejs? Honestly, I could upload an image to s3 with nodejs as follows but the problem is how can I complete to retrieve image from s3?
router.get('/image/:imageId', function (req, res, next) {
// ????
});
var s3 = new aws.S3({ accessKeyId: config.awsAccessId, secretAccessKey: config.awsAccessKey});
var upload = multer({
storage: multerS3({
s3: s3,
bucket: config.bucket,
key: function (req, file, cb) {
cb(null, file.originalname);
}
})
});
router.post('/upload/:id', upload.array('file', 3), function(req, res, next) {
res.send('Successfully uploaded ' + req.files.length + ' files!');
});
I've finally found that,
var params = { Bucket: config.bucket, Key: req.params.imageId };
s3.getObject(params, function(err, data) {
res.writeHead(200, {'Content-Type': 'image/jpeg'});
res.write(data.Body, 'binary');
res.end(null, 'binary');
});
If you use lambda with API gateway to retrieve images then there will be no need to using security keys with appropriate permissions.
Read an image from the bucket and send it as base64 to directly use it in source of image tag in HTML.
const AWS = require('aws-sdk');
//*/ get reference to S3 client
var s3 = new AWS.S3();
exports.handler = (event, context, callback) => {
var params = {
"Bucket": "bucket-name",
"Key": "object-name"
};
s3.getObject(params, function(err, data){
if(err) {
callback(err, null);
} else {
let image = new Buffer(data.Body).toString('base64');
image = "data:"+data.ContentType+";base64,"+image;
let response = {
"statusCode": 200,
"headers": {
"Access-Control-Allow-Origin": "*",
'Content-Type': data.ContentType
},
"body":image,
"isBase64Encoded": true
};
callback(null, response);
}
});
};
You're looking for the getObject() method.
Assuming that you are using aws-sdk then you can use getObject method.
Here is sample code
exports.getObjects = function (req, res) {
var item = req.body;
var params = { Bucket: req.params.bucketName, Key: 'keyname'}; // keyname can be a filename
s3.getObject(params, function (err, data) {
if (err) {
return res.send({ "error": err });
}
res.send({ data });
});
}
This link may be helpful to you.
A better and faster approach is piping the stream to response, works on Minio S3 Client but I believe it also works on aws amazon js client.
const Minio = require('minio');
const s3Client = new Minio.Client({
endPoint: 'ep',
accessKey: 'ak',
secretKey: 'sk'
});
router.get('/image/:imageId', (req, res) => {
const { imageId } = req.params;
s3Client.getObject('bucket', imageId, (err, stream) => {
if (err) return res.status(500).send(err);
const contentType = stream.headers['content-type'];
contentType && res.setHeader('Content-Type', contentType);
stream.pipe(res);
});
});
This is what I use with aws-sdk
const params = { Bucket: "YOUR_BUCKET_NAME", Key: "YOUR_FILENAME"};
s3.getObject(params).createReadStream().pipe(res);
I am attempting to read a file that is in a aws s3 bucket using
fs.readFile(file, function (err, contents) {
var myLines = contents.Body.toString().split('\n')
})
I've been able to download and upload a file using the node aws-sdk, but I am at a loss as to how to simply read it and parse the contents.
Here is an example of how I am reading the file from s3:
var s3 = new AWS.S3();
var params = {Bucket: 'myBucket', Key: 'myKey.csv'}
var s3file = s3.getObject(params)
You have a couple options. You can include a callback as a second argument, which will be invoked with any error message and the object. This example is straight from the AWS documentation:
s3.getObject(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
Alternatively, you can convert the output to a stream. There's also an example in the AWS documentation:
var s3 = new AWS.S3({apiVersion: '2006-03-01'});
var params = {Bucket: 'myBucket', Key: 'myImageFile.jpg'};
var file = require('fs').createWriteStream('/path/to/file.jpg');
s3.getObject(params).createReadStream().pipe(file);
This will do it:
new AWS.S3().getObject({ Bucket: this.awsBucketName, Key: keyName }, function(err, data)
{
if (!err)
console.log(data.Body.toString());
});
Since you seem to want to process an S3 text file line-by-line. Here is a Node version that uses the standard readline module and AWS' createReadStream()
const readline = require('readline');
const rl = readline.createInterface({
input: s3.getObject(params).createReadStream()
});
rl.on('line', function(line) {
console.log(line);
})
.on('close', function() {
});
If you are looking to avoid the callbacks you can take advantage of the sdk .promise() function like this:
const s3 = new AWS.S3();
const params = {Bucket: 'myBucket', Key: 'myKey.csv'}
const response = await s3.getObject(params).promise() // await the promise
const fileContent = response.Body.toString('utf-8'); // can also do 'base64' here if desired
I'm sure the other ways mentioned here have their advantages but this works great for me. Sourced from this thread (see the last response from AWS): https://forums.aws.amazon.com/thread.jspa?threadID=116788
here is the example which i used to retrive and parse json data from s3.
var params = {Bucket: BUCKET_NAME, Key: KEY_NAME};
new AWS.S3().getObject(params, function(err, json_data)
{
if (!err) {
var json = JSON.parse(new Buffer(json_data.Body).toString("utf8"));
// PROCESS JSON DATA
......
}
});
I couldn't figure why yet, but the createReadStream/pipe approach didn't work for me. I was trying to download a large CSV file (300MB+) and I got duplicated lines. It seemed a random issue. The final file size varied in each attempt to download it.
I ended up using another way, based on AWS JS SDK examples:
var s3 = new AWS.S3();
var params = {Bucket: 'myBucket', Key: 'myImageFile.jpg'};
var file = require('fs').createWriteStream('/path/to/file.jpg');
s3.getObject(params).
on('httpData', function(chunk) { file.write(chunk); }).
on('httpDone', function() { file.end(); }).
send();
This way, it worked like a charm.
I prefer Buffer.from(data.Body).toString('utf8'). It supports encoding parameters. With other AWS services (ex. Kinesis Streams) someone may want to replace 'utf8' encoding with 'base64'.
new AWS.S3().getObject(
{ Bucket: this.awsBucketName, Key: keyName },
function(err, data) {
if (!err) {
const body = Buffer.from(data.Body).toString('utf8');
console.log(body);
}
}
);
I had exactly the same issue when downloading from S3 very large files.
The example solution from AWS docs just does not work:
var file = fs.createWriteStream(options.filePath);
file.on('close', function(){
if(self.logger) self.logger.info("S3Dataset file download saved to %s", options.filePath );
return callback(null,done);
});
s3.getObject({ Key: documentKey }).createReadStream().on('error', function(err) {
if(self.logger) self.logger.error("S3Dataset download error key:%s error:%#", options.fileName, error);
return callback(error);
}).pipe(file);
While this solution will work:
var file = fs.createWriteStream(options.filePath);
s3.getObject({ Bucket: this._options.s3.Bucket, Key: documentKey })
.on('error', function(err) {
if(self.logger) self.logger.error("S3Dataset download error key:%s error:%#", options.fileName, error);
return callback(error);
})
.on('httpData', function(chunk) { file.write(chunk); })
.on('httpDone', function() {
file.end();
if(self.logger) self.logger.info("S3Dataset file download saved to %s", options.filePath );
return callback(null,done);
})
.send();
The createReadStream attempt just does not fire the end, close or error callback for some reason. See here about this.
I'm using that solution also for writing down archives to gzip, since the first one (AWS example) does not work in this case either:
var gunzip = zlib.createGunzip();
var file = fs.createWriteStream( options.filePath );
s3.getObject({ Bucket: this._options.s3.Bucket, Key: documentKey })
.on('error', function (error) {
if(self.logger) self.logger.error("%#",error);
return callback(error);
})
.on('httpData', function (chunk) {
file.write(chunk);
})
.on('httpDone', function () {
file.end();
if(self.logger) self.logger.info("downloadArchive downloaded %s", options.filePath);
fs.createReadStream( options.filePath )
.on('error', (error) => {
return callback(error);
})
.on('end', () => {
if(self.logger) self.logger.info("downloadArchive unarchived %s", options.fileDest);
return callback(null, options.fileDest);
})
.pipe(gunzip)
.pipe(fs.createWriteStream(options.fileDest))
})
.send();
With the new version of sdk, the accepted answer does not work - it does not wait for the object to be downloaded. The following code snippet will help with the new version:
// dependencies
const AWS = require('aws-sdk');
// get reference to S3 client
const s3 = new AWS.S3();
exports.handler = async (event, context, callback) => {
var bucket = "TestBucket"
var key = "TestKey"
try {
const params = {
Bucket: Bucket,
Key: Key
};
var theObject = await s3.getObject(params).promise();
} catch (error) {
console.log(error);
return;
}
}
If you want to save memory and want to obtain each row as a json object, then you can use fast-csv to create readstream and can read each row as a json object as follows:
const csv = require('fast-csv');
const AWS = require('aws-sdk');
const credentials = new AWS.Credentials("ACCESSKEY", "SECRETEKEY", "SESSIONTOKEN");
AWS.config.update({
credentials: credentials, // credentials required for local execution
region: 'your_region'
});
const dynamoS3Bucket = new AWS.S3();
const stream = dynamoS3Bucket.getObject({ Bucket: 'your_bucket', Key: 'example.csv' }).createReadStream();
var parser = csv.fromStream(stream, { headers: true }).on("data", function (data) {
parser.pause(); //can pause reading using this at a particular row
parser.resume(); // to continue reading
console.log(data);
}).on("end", function () {
console.log('process finished');
});
var fileStream = fs.createWriteStream('/path/to/file.jpg');
var s3Stream = s3.getObject({Bucket: 'myBucket', Key: 'myImageFile.jpg'}).createReadStream();
// Listen for errors returned by the service
s3Stream.on('error', function(err) {
// NoSuchKey: The specified key does not exist
console.error(err);
});
s3Stream.pipe(fileStream).on('error', function(err) {
// capture any errors that occur when writing data to the file
console.error('File Stream:', err);
}).on('close', function() {
console.log('Done.');
});
Reference: https://docs.aws.amazon.com/sdk-for-javascript/v2/developer-guide/requests-using-stream-objects.html