I'm using ImageMagick to make thumbnails of my photos on S3.
This is the flow:
Get the image from S3.
Make thumbnail from the data.Body.
Put the thumbnail on S3.
Here is the make thumbnail function:
function makeThumbnail(image) {
var defer = q.defer();
im.resize({
srcData: image.Body,
width: 256
}, function (err, stdout) {
if (err) {
defer.reject(err);
} else {
image.Key.replace(/^images/, "thumbs");
image.Body = new Buffer(stdout, 'binary');
defer.resolve(image);
}
});
return defer.promise;
}
The image object is of the form that S3 SDK expects to get it:
var image = {
Bucket: 'bucket name',
Key: 'object key',
Body: 'body',
ContentType: 'image/png'
};
Yet, when putting the thumbnail on S3, the MIME is set to application/octet-stream for some reason. When downloading the thumbnail it opens like any other picture, yet the browsers do not treat it like it's an image and that is a problem for me.
What causes this issue? and how can I solve it?
Thanks
The docs for node-imagemagick.resize() says the default format is jpg. Maybe your output is not a png like you think.
We use the im-resize package and that contains input and output parameters, where we typically convert .jpeg images uploaded from a web form into .png output file. (We read from the file system after conversion, not stream buffers, just fyi).
Our code is something like this:
var resize = require('im-resize');
var metadata = require('im-metadata');
var AWS = require('aws-sdk');
metadata(data.params.file.path, {}, function(error, metadataResults) {
//error checking
var image = {
//set from metadataResults
};
var output = {
versions: [{
//set as desired
}]
};
resize(image, output, function(error, versions) {
var filename = 'images/' + uuid.v4() + '.png'; //this was enough for us
var fileStream = fs.createReadStream(versions[0].path);
fileStream.on('error', function(err) {
//handle error
});
fileStream.on('open', function() {
var s3 = new AWS.S3();
s3.putObject({
Bucket: 'bucketname',
Key: filename,
Body: fileStream
}, function(err) {
//handle errors
//know the file has been saved to S3
//close up all the squiggly braces and return HTTP response or whatever
Related
I am able to download the file from s3 bucket like so:
const fileStream = s3.getObject(options).createReadStream();
const writableStream = createWriteStream(
"./files/master_driver_profile_pic/image.jpeg"
);
fileStream.pipe(fileStream).pipe(writableStream);
But the image is not getting written properly. Only a little bit of the image is visible and the rest is blank.
I think you should first createWriteStream and then createReadStream. (Check the docs)
var s3 = new AWS.S3();
var params = {Bucket: 'myBucket', Key: 'myImageFile.jpg'};
var file = require('fs').createWriteStream('/path/to/file.jpg');
s3.getObject(params).createReadStream().pipe(file);
OR
you can go without streams:
// Download file
let content = await (await s3.getObject(params).promise()).Body;
// Write file
fs.writeFile(downloadPath, content, (err) => {
if (err) { console.log(err); }
});
I have an app where user can upload a ZIP archive of resources. My app handles the upload and saves this to S3. At some point I want to run a transformation that will read this S3 bucket unzip it, and write it to a new S3 bucket. This is all happening on a node service.
I am using the unzipper library to handle unzipping. Here is my initial code.
async function downloadFromS3() {
let s3 = new AWS.S3();
try {
const object = s3
.getObject({
Bucket: "zip-bucket",
Key: "Archive.zip"
})
.createReadStream();
object.on("error", err => {
console.log(err);
});
await streaming_unzipper(object, s3);
} catch (e) {
console.log(e);
}
}
async function streaming_unzipper(s3ObjectStream, s3) {
await s3.createBucket({ Bucket: "unzip-bucket" }).promise();
const unzipStream = s3ObjectStream.pipe(unzipper.Parse());
unzipStream.pipe(
stream.Transform({
objectMode: true,
transform: function(entry, e, next) {
const fileName = entry.path;
const type = entry.type; // 'Directory' or 'File'
const size = entry.vars.uncompressedSize; // There is also compressedSize;
if (type === "File") {
s3.upload(
{ Bucket: "unzip-bucket", Body: entry, Key: entry.path },
{},
function(err, data) {
if (err) console.error(err);
console.log(data);
entry.autodrain();
}
);
next();
} else {
entry.autodrain();
next();
}
}
})
);
This code is works but I feel like it could be optimized. Ideally I would like to pipe the download stream -> unzipper stream -> uploader stream. So that chunks are uploaded to S3 as they get unzipped, instead of waiting for the full fill uzip to finish then uploading.
The problem I am running into is that I need the file name (to set as an S3 key), which I only have after unzipping. Before I can start to upload.
Is there any good way to create a streaming upload to S3. Initiated with a temporaryId, that gets rewritten with the final final name after the full stream is finished.
I'm letting users upload multiple images directly to Amazon-S3 using Multer-S3 and then displaying those images on the front end via a loop. All works perfectly.
However when the images are uploaded via mobile (image taken on an iPhone or Android) the orientation is correct on mobile but does NOT have correct orientation on desktops. Major problem.
This is due to the images EXIF data I believe.
Seems like ImageMagick or Kraken JS https://kraken.io/docs/storage-s3 might be a way to solve it but for the life of me I cannot figure out how to implement either with the way I'm uploading and showing images shown below.
How would I change my code below to auto-orient the images? Note: It must work for multiple images.
Thanks for any help!
Heres's how I'm letting users upload multiple images at a time directly to Amazon-S3:
aws.config.update({
secretAccessKey: 'AccessKey',
accessKeyId: 'KeyID',
region: 'us-east-2'
});
var s3 = new aws.S3();
var storage = multerS3({
limits : { files: 25 },
s3: s3,
bucket: 'files',
key: function (req, file, cb) {
var fileExtension = file.originalname.split(".")[1];
var path = "uploads/" + req.user._id + Date.now() + "." + fileExtension;
cb(null, path);
},
})
var upload = multer({storage: storage}).any("images", 25);
router.post("/", middleware.isLoggedIn, function(req, res, next){
upload(req,res,function(err) {
if(err) {
console.log(err);
res.redirect('/')
}
Listings.findById(req.params.id, function(err, foundListings){
var allimages = []
if(typeof req.files !== "undefined") {
for(var i = 0; i < req.files.length; i++) {
allimages.push(req.files[i].key);
}
}
var currentimages = allimages;
var newListings = {currentimages:currentimages}
//Removed the other Model aspects
Listings.create(newListings, function(err, newlyCreated){
if(err){
console.log(err);
} else {
res.redirect("/listings");
}
});
});
How I'm displaying the images on the front end. Listings.currentimages is an array containing all image links.
app.locals.awspath = "https://s3.us-east-2.amazonaws.com/myfiles/";
// awspath is the file path to my Amazon-S3 path
<div id='allimages'>
<% for(var i = 0; i < listings.currentimages.length; i++ ) { %>
<div class='smallerImages'>
<% var url2 = awspath + listings.currentimages[i] %>
<img class="small" src="<%= url2 %>">
</div>
<% } %>
</div>
The problem is that iOS sets the image's EXIF metadata which causes this behavior. You can use a library that can read the EXIF metadata and rotate the image for you.
jpeg-autorotate (https://github.com/johansatge/jpeg-autorotate) is a very simple lib and has very nice documentation (you should check it out).
Example
var jo = require('jpeg-autorotate');
var fs = require('fs');
// var options = {quality: 85};
var options = {};
var path = '/tmp/Portrait_8.jpg'; // You can use a Buffer, too
jo.rotate(path, options, function(error, buffer, orientation) {
if (error) {
console.log('An error occurred when rotating the file: ' + error.message);
return;
}
console.log('Orientation was: ' + orientation);
// upload the buffer to s3, save to disk or more ...
fs.writeFile("/tmp/output.jpg", buffer, function(err) {
if(err) {
return console.log(err);
}
console.log("The file was saved!");
});
});
You can find some sample images with different EXIF rotation metadata from here
Converted as an AWS Lambda Function
// Name this file index.js and zip it + the node_modules then upload to AWS Lambda
console.log('Loading function');
var aws = require('aws-sdk');
var s3 = new aws.S3({apiVersion: '2006-03-01'});
var jo = require('jpeg-autorotate');
// Rotate an image given a buffer
var autorotateImage = function(data, callback) {
jo.rotate(data, {}, function(error, buffer, orientation) {
if (error) {
console.log('An error occurred when rotating the file: ' + error.message);
callback(error, null);
} else {
console.log('Orientation was: ' + orientation);
callback(null, buffer);
}
});
};
// AWS Lambda runs this on every new file upload to s3
exports.handler = function(event, context, callback) {
console.log('Received event:', JSON.stringify(event, null, 2));
// Get the object from the event and show its content type
var bucket = event.Records[0].s3.bucket.name;
var key = event.Records[0].s3.object.key;
s3.getObject({Bucket: bucket, Key: key}, function(err, data) {
if (err) {
console.log("Error getting object " + key + " from bucket " + bucket +
". Make sure they exist and your bucket is in the same region as this function.");
callback("Error getting file: " + err, null);
} else {
// log the content type, should be an image
console.log('CONTENT TYPE:', data.ContentType);
// rotate the image
autorotateImage(data.Body, function(error, image) {
if (error) {
callback("Error rotating image: " + error, null);
}
const params = {
Bucket: bucket,
Key: 'rotated/' + key,
Body: image
};
// Upload new image, careful not to upload it in a path that will trigger the function again!
s3.putObject(params, function (err, data) {
if (error) {
callback("Error uploading rotated image: " + error, null);
} else {
console.log("Successfully uploaded image on S3", data);
// call AWS Lambda's callback, function was successful!!!
callback(null, data);
}
});
});
}
});
};
Notes This function upload the rotated images to the same bucket but you can easily change that. If you are just starting with AWS Lambda, I'd suggest you learn more about it (https://www.youtube.com/watch?v=eOBq__h4OJ4, https://www.youtube.com/watch?v=PEatXsXIkLc)
Make sure you've the right permissions (read and write), correct function trigger, correct "Handler" when creating the function! Make sure to checkout the function logs in CloudWatch too, makes debugging a lot easier. If it starts timing out, increase the function timeout and increase it's memory.
I want to concatenate the files uploaded on Amazon S3 server.
How can I do this.
Concatenation on local machine i can do using following code.
var fs = require('fs'),
files = fs.readdirSync('./files'),
clips = [],
stream,
currentfile,
dhh = fs.createWriteStream('./concatfile.mp3');
files.forEach(function (file) {
clips.push(file.substring(0, 6));
});
function main() {
if (!clips.length) {
dhh.end("Done");
return;
}
currentfile = './files/' + clips.shift() + '.mp3';
stream = fs.createReadStream(currentfile);
stream.pipe(dhh, {end: false});
stream.on("end", function() {
main();
});
}
main();
You can achieve what you want by breaking it into two steps:
Manipulating files on s3
Since s3 is a remote file storage, you can't run code on s3 server to do the operation locally (as #Andrey mentioned).
what you will need to do in your code is to fetch each input file, process them locally and upload the results back to s3. checkout the code examples from amazon:
var s3 = new AWS.S3();
var params = {Bucket: 'myBucket', Key: 'mp3-input1.mp3'};
var file = require('fs').createWriteStream('/path/to/input.mp3');
s3.getObject(params).createReadStream().pipe(file);
at this stage you'll run your concatenation code, and upload the results back:
var fs = require('fs');
var zlib = require('zlib');
var body = fs.createReadStream('bigfile.mp3').pipe(zlib.createGzip());
var s3obj = new AWS.S3({params: {Bucket: 'myBucket', Key: 'myKey'}});
s3obj.upload({Body: body}).
on('httpUploadProgress', function(evt) { console.log(evt); }).
send(function(err, data) { console.log(err, data) });
Merging two (or more) mp3 files
Since MP3 file include a header that specifies some information like bitrate, simply concatenating them together might introduce playback issues.
See: https://stackoverflow.com/a/5364985/1265980
what you want to use a tool to that. you can have one approach of saving your input mp3 files in tmp folder, and executing an external program like to change the bitrate, contcatenate files and fix the header.
alternatively you can use an library that allows you to use ffmpeg within node.js.
in their code example shown, you can see how their merge two files together within the node api.
ffmpeg('/path/to/part1.avi')
.input('/path/to/part2.avi')
.input('/path/to/part2.avi')
.on('error', function(err) {
console.log('An error occurred: ' + err.message);
})
.on('end', function() {
console.log('Merging finished !');
})
.mergeToFile('/path/to/merged.avi', '/path/to/tempDir');
Here's my quick take on the problem of downloading and processing S3 objects. My example is focused mostly on getting the data local and then processing it once it's all downloaded. I suggest you use one of the ffmpeg approaches mentioned above.
var RSVP = require('rsvp');
var s3 = new AWS.S3();
var bucket = '<your bucket name>';
var getFile = function(key, filePath) {
return new RSVP.Promise(function(resolve, reject) {
var file = require('fs').createWriteStream(filePath);
if(!file) {
reject('unable to open file');
}
s3.getObject({
Bucket: bucket,
Key: key
}).on('httpData', function(chunk) {
file.write(chunk);
}).on('httpDone', function() {
file.end();
resolve(filePath);
});
});
};
var tempFiles = ['<local temp filename 1>', '<local temp filename 2>'];
var keys = ['<s3 object key 1>', '<s3 object key 2>'];
var promises = [];
for(var i = 0; i < keys.length; ++i) {
var promise = getFile(keys[i], tempFiles[i]);
promises.push(promise);
}
RSVP.all(promises).then(function(data) {
//do something with your files
}).catch(function(error) {
//handle errors
});
I'm using Nodejs to try and push an image to an S3 instance with the aws-sdk. Currently, it reads from a file on the client and then saves it on the server (I'm using a meteor framework.) I'd like to push it to the S3 server instead of saving it on the meteor server. When I tried to migrate it over, the images seem to gain about 30% when they are on S3. If I try and download them off of S3 the image is no longer viewable either, so it looks like it has changed encoding or something.
Here is the code to load the file on the client side:
saveFile = function( blob, name, path, type, callback ) {
var fileReader = new FileReader();
var method;
var encoding = 'binary';
var type = type || 'binary';
switch( type ) {
case 'text':
method = 'readAsText';
encoding = 'utf8';
break;
case 'binary':
method = 'readAsBinaryString';
encoding = 'binary';
break;
default:
method = 'readAsBinaryString';
encoding = 'binary';
break;
}
// Call the save function on the server after the file has been read.
fileReader.onload = function( file ) {
console.log( "File loaded..." );
Meteor.call( 'saveFile', file.srcElement.result, name, path, encoding, callback );
}
// Read the file
fileReader[ method ]( blob );
}
On the server side:
saveFile: function( file, name, path, encoding ) {
s3.createBucket({Bucket: bucketName}, function() {
var params = {Bucket: bucketName, Key: keyName, ContentType: 'binary', ContentEncoding: 'utf8', Body: file};
s3.putObject(params, function(err, data) {
if (err)
console.log(err)
else
console.log("Successfully uploaded data to " + bucketName + "/" + keyName);
});
});
I figured out the solution, it was to encapsulate the 'file' object in a
new Buffer()
Simple, but oh so difficult to find!!