Getting cvs rows from aws s3.getObject.createReadStream() in node lambda - node.js

I'm try to grab a csv file out of an s3 bucket using a node lambda and do database operations with each row. I'm able to log the stream itself, but don't get anything beyond that before my lambda closes.
Been shifting around trying different ways with different npm libraries but can't seem to ever get into any operations beyond getting the stream itself.
const csv = require('fast-csv');
class S3Service {
constructor(s3=new AWS.S3()) {
this.s3 = s3;
}
async _createReadStream(bucket, key) {
console.log('getting a stream');
return this.s3.getObject({ Bucket: bucket, Key: key }).createReadStream();
}
async readFileStreamRowByRow(bucket, key) {
return new Promise(async (resolve, reject) => {
const rows = [];
console.log('inside readFileStreamRowByRow');
const stream = await this._createReadStream(bucket, key);
console.log('here is the stream', stream);
stream.pipe(csv.parse({headers: true}))
.on('error', error => console.error(error))
.on('data', row => rows.push(row))
.on('end', () => {
console.log(rows);
resolve(rows);
})
})
}

To answer my own question, createReadStream() instantiates an instance whether or not S3 has successfully fetched an object or not. In my case, I had a VPC issue and the lambda was not even accessing the bucket and timing out later, but I was still logging a stream synchronously.

Related

NodeJS: upload image file to s3 bucket using streams [duplicate]

I'm currently making use of a node.js plugin called s3-upload-stream to stream very large files to Amazon S3. It uses the multipart API and for the most part it works very well.
However, this module is showing its age and I've already had to make modifications to it (the author has deprecated it as well). Today I ran into another issue with Amazon, and I would really like to take the author's recommendation and start using the official aws-sdk to accomplish my uploads.
BUT.
The official SDK does not seem to support piping to s3.upload(). The nature of s3.upload is that you have to pass the readable stream as an argument to the S3 constructor.
I have roughly 120+ user code modules that do various file processing, and they are agnostic to the final destination of their output. The engine hands them a pipeable writeable output stream, and they pipe to it. I cannot hand them an AWS.S3 object and ask them to call upload() on it without adding code to all the modules. The reason I used s3-upload-stream was because it supported piping.
Is there a way to make aws-sdk s3.upload() something I can pipe the stream to?
Wrap the S3 upload() function with the node.js stream.PassThrough() stream.
Here's an example:
inputStream
.pipe(uploadFromStream(s3));
function uploadFromStream(s3) {
var pass = new stream.PassThrough();
var params = {Bucket: BUCKET, Key: KEY, Body: pass};
s3.upload(params, function(err, data) {
console.log(err, data);
});
return pass;
}
A bit late answer, it might help someone else hopefully. You can return both writeable stream and the promise, so you can get response data when the upload finishes.
const AWS = require('aws-sdk');
const stream = require('stream');
const uploadStream = ({ Bucket, Key }) => {
const s3 = new AWS.S3();
const pass = new stream.PassThrough();
return {
writeStream: pass,
promise: s3.upload({ Bucket, Key, Body: pass }).promise(),
};
}
And you can use the function as follows:
const { writeStream, promise } = uploadStream({Bucket: 'yourbucket', Key: 'yourfile.mp4'});
const readStream = fs.createReadStream('/path/to/yourfile.mp4');
const pipeline = readStream.pipe(writeStream);
Now you can either check promise:
promise.then(() => {
console.log('upload completed successfully');
}).catch((err) => {
console.log('upload failed.', err.message);
});
Or using async/await:
try {
await promise;
console.log('upload completed successfully');
} catch (error) {
console.log('upload failed.', error.message);
}
Or as stream.pipe() returns stream.Writable, the destination (writeStream variable above), allowing for a chain of pipes, we can also use its events:
pipeline.on('close', () => {
console.log('upload successful');
});
pipeline.on('error', (err) => {
console.log('upload failed', err.message)
});
In the accepted answer, the function ends before the upload is complete, and thus, it's incorrect. The code below pipes correctly from a readable stream.
Upload reference
async function uploadReadableStream(stream) {
const params = {Bucket: bucket, Key: key, Body: stream};
return s3.upload(params).promise();
}
async function upload() {
const readable = getSomeReadableStream();
const results = await uploadReadableStream(readable);
console.log('upload complete', results);
}
You can also go a step further and output progress info using ManagedUpload as such:
const manager = s3.upload(params);
manager.on('httpUploadProgress', (progress) => {
console.log('progress', progress) // { loaded: 4915, total: 192915, part: 1, key: 'foo.jpg' }
});
ManagedUpload reference
A list of available events
I think it's worth updating the answer for AWS SDK v3 :).
S3 Client doesn't have upload function anymore and the #aws-sdk/lib-storage package is suggested instead as per https://github.com/aws/aws-sdk-js-v3/blob/main/lib/lib-storage/README.md
Hence the resulting snippet should look like this:
import { S3Client } from '#aws-sdk/client-s3';
import { Upload } from '#aws-sdk/lib-storage';
const stream = require('stream');
...
const client = new S3Client({
credentials: {
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
},
region: process.env.AWS_DEFAULT_REGION,
});
...
async function uploadStream(readableStream) {
const Key = 'filename.pdf';
const Bucket = 'bucket-name';
const passThroughStream = new stream.PassThrough();
let res;
try {
const parallelUploads3 = new Upload({
client,
params: {
Bucket,
Key,
Body: passThroughStream,
ACL:'public-read',
},
queueSize: 4,
partSize: 1024 * 1024 * 5,
leavePartsOnError: false,
});
readableStream.pipe(passThroughStream);
res = await parallelUploads3.done();
} catch (e) {
console.log(e);
}
return res;
}
None of the answers worked for me because I wanted to:
Pipe into s3.upload()
Pipe the result of s3.upload() into another stream
The accepted answer doesn't do the latter. The others rely on the promise api, which is cumbersome to work when working with stream pipes.
This is my modification of the accepted answer.
const s3 = new S3();
function writeToS3({Key, Bucket}) {
const Body = new stream.PassThrough();
s3.upload({
Body,
Key,
Bucket: process.env.adpBucket
})
.on('httpUploadProgress', progress => {
console.log('progress', progress);
})
.send((err, data) => {
if (err) {
Body.destroy(err);
} else {
console.log(`File uploaded and available at ${data.Location}`);
Body.destroy();
}
});
return Body;
}
const pipeline = myReadableStream.pipe(writeToS3({Key, Bucket});
pipeline.on('close', () => {
// upload finished, do something else
})
pipeline.on('error', () => {
// upload wasn't successful. Handle it
})
Type Script solution:
This example uses:
import * as AWS from "aws-sdk";
import * as fsExtra from "fs-extra";
import * as zlib from "zlib";
import * as stream from "stream";
And async function:
public async saveFile(filePath: string, s3Bucket: AWS.S3, key: string, bucketName: string): Promise<boolean> {
const uploadStream = (S3: AWS.S3, Bucket: string, Key: string) => {
const passT = new stream.PassThrough();
return {
writeStream: passT,
promise: S3.upload({ Bucket, Key, Body: passT }).promise(),
};
};
const { writeStream, promise } = uploadStream(s3Bucket, bucketName, key);
fsExtra.createReadStream(filePath).pipe(writeStream); // NOTE: Addition You can compress to zip by .pipe(zlib.createGzip()).pipe(writeStream)
let output = true;
await promise.catch((reason)=> { output = false; console.log(reason);});
return output;
}
Call this method somewhere like:
let result = await saveFileToS3(testFilePath, someS3Bucket, someKey, someBucketName);
The thing here to note in the most accepted answer above is that:
You need to return the pass in the function if you are using pipe like,
fs.createReadStream(<filePath>).pipe(anyUploadFunction())
function anyUploadFunction () {
let pass = new stream.PassThrough();
return pass // <- Returning this pass is important for the stream to understand where it needs to write to.
}
Otherwise it will silently move onto next without throwing an error or will throw an error of TypeError: dest.on is not a function depending upon how you have written the function
Following the other answers and using the latest AWS SDK for Node.js, there's a much cleaner and simpler solution since the s3 upload() function accepts a stream, using await syntax and S3's promise:
var model = await s3Client.upload({
Bucket : bucket,
Key : key,
ContentType : yourContentType,
Body : fs.createReadStream(path-to-file)
}).promise();
For those complaining that the when they use the s3 api upload function and a zero byte file ends up on s3 (#Radar155 and #gabo) - I also had this problem.
Create a second PassThrough stream and just pipe all data from the first to the second and pass the reference to that second to s3. You can do this in a couple of different ways - possibly a dirty way is to listen for the "data" event on the first stream and then write that same data to the second stream - the similarly for the "end" event - just call the end function on the second stream. I've no idea whether this is a bug in the aws api, the version of node or some other issue - but it worked around the issue for me.
Here is how it might look:
var PassThroughStream = require('stream').PassThrough;
var srcStream = new PassThroughStream();
var rstream = fs.createReadStream('Learning/stocktest.json');
var sameStream = rstream.pipe(srcStream);
// interesting note: (srcStream == sameStream) at this point
var destStream = new PassThroughStream();
// call your s3.upload function here - passing in the destStream as the Body parameter
srcStream.on('data', function (chunk) {
destStream.write(chunk);
});
srcStream.on('end', function () {
dataStream.end();
});
If it helps anyone I was able to stream from the client to s3 successfully:
https://gist.github.com/mattlockyer/532291b6194f6d9ca40cb82564db9d2a
The serverside code assumes req is a stream object, in my case it was sent from the client with file info set in the headers.
const fileUploadStream = (req, res) => {
//get "body" args from header
const { id, fn } = JSON.parse(req.get('body'));
const Key = id + '/' + fn; //upload to s3 folder "id" with filename === fn
const params = {
Key,
Bucket: bucketName, //set somewhere
Body: req, //req is a stream
};
s3.upload(params, (err, data) => {
if (err) {
res.send('Error Uploading Data: ' + JSON.stringify(err) + '\n' + JSON.stringify(err.stack));
} else {
res.send(Key);
}
});
};
Yes it breaks convention but if you look at the gist it's much cleaner than anything else I found using multer, busboy etc...
+1 for pragmatism and thanks to #SalehenRahman for his help.
If you're using AWS node SDK v3 there is dedicated module for uploading streams/blobs/buffers.
https://www.npmjs.com/package/#aws-sdk/lib-storage
I'm using KnexJS and had a problem using their streaming API. I finally fixed it, hopefully the following will help someone.
const knexStream = knex.select('*').from('my_table').stream();
const passThroughStream = new stream.PassThrough();
knexStream.on('data', (chunk) => passThroughStream.write(JSON.stringify(chunk) + '\n'));
knexStream.on('end', () => passThroughStream.end());
const uploadResult = await s3
.upload({
Bucket: 'my-bucket',
Key: 'stream-test.txt',
Body: passThroughStream
})
.promise();
Create a new stream.PassThrough() and pipe the input stream to it, then pass the passthrough instance to the body.
Check the following example:
function upload(s3, inputStream) {
const pass = new PassThrough();
inputStream.pipe(pass);
return s3.upload(
{
Bucket: 'bucket name',
Key: 'unique file name',
Body: pass,
},
{
queueSize: 4, // default concurrency
},
).promise()
.then((data) => console.log(data))
.catch((error) => console.error(error));
}
If you know the size of the stream you can use minio-js to upload the stream like this:
s3Client.putObject('my-bucketname', 'my-objectname.ogg', stream, size, 'audio/ogg', function(e) {
if (e) {
return console.log(e)
}
console.log("Successfully uploaded the stream")
})

How do decode base64 file when reading from GridFS via Node?

I'm trying to read a file encoded in base64 from a MongoDB GridFS collection using Node. I have been able to get the file saved from MongoDB to my local machine, but it's in base64 format and I want to save it unencoded.
Ideally I would like to decode the file "on-the-fly" without having to save once, to then read > decode > write it back to the filesystem.
My code currently looks like this...
return new Promise(async (resolve, reject) => {
let bucket = new mongodb.GridFSBucket(db, {bucketName: 'Binaries'});
let objectID = new mongodb.ObjectID(fileID);
// create the download stream
bucket.openDownloadStream(objectID)
.once('error', async (error) => {
reject(error);
})
.once('end', async () => {
resolve(downloadPath);
})
// pipe the file to the stream
.pipe(fs.createWriteStream(downloadPath));
});
Any ideas?
Just in case anyone else is looking at this, here's where I landed...
return new Promise(async (resolve, reject) => {
let bucket = new mongodb.GridFSBucket(db, {
bucketName: 'Binaries'
});
let objectID = new mongodb.ObjectID(fileInformation._id);
// temporary variable to hold image
var data = [];
// create the download stream
let downloadStream = bucket.openDownloadStream(objectID);
downloadStream.on('data', (chunk) => {
data.push(chunk);
});
downloadStream.on('error', async (error) => {
reject(error);
});
downloadStream.on('end', async () => {
// convert from base64 and write to file system
let bufferBase64 = Buffer.concat(data)
let bufferDecoded = Buffer.from(bufferBase64.toString(), 'base64');
fs.writeFileSync(fileName, bufferDecoded, 'binary');
resolve(fileName);
});
});
Node has a built-in buffer parser Buffer.from(string[, encoding]) that you can pass the base64 encoded string to it, and get a stream of bytes from the other side, which you can convert it .toString() easily afterwards.
Ex.
let whatYouNeed = Buffer.from(gridFsData, 'base64').toString();
More about Buffer.from() function here.

Question about end of request for node/JS request package

I'm trying to understand what .on('end', ...) does in the node package request.
My code:
const fs = require('fs');
const request = require('request');
function downloadAsset(relativeAssetURL, fileName) {
return new Promise((resolve, reject) => {
try {
let writeStream = fs.createWriteStream(fileName);
var remoteImage = request(`https:${relativeAssetURL}`);
remoteImage.on('data', function(chunk) {
writeStream.write(chunk);
});
remoteImage.on('end', function() {
let stats = fs.statSync(fileName);
resolve({ fileName: fileName, stats: stats });
});
} catch (err) {
reject(err);
}
});
}
What I'm trying to do is download a remote image, get some file statistics, and then resolve the promise so my code can do other things.
What I'm finding is that the promise doesn't always resolve after the file has been downloaded; it may resolve a little before then. I thought that's what .on('end', ... ) was for.
What can I do to have this promise resolve after the image has been downloaded in full?
As the docs say:
The writable.write() method writes some data to the stream, and calls the supplied callback once the data has been fully handled.
So, writable.write() is asynchronous. Just because your last writeStream.write has been called does not necessarily mean that all write operations have been completed. You probably want to call the .end method, which means:
Calling the writable.end() method signals that no more data will be written to the Writable. The optional chunk and encoding arguments allow one final additional chunk of data to be written immediately before closing the stream. If provided, the optional callback function is attached as a listener for the 'finish' event.
So, try calling writeStream.end when the remoteImage request ends, and pass a callback to writeStream.end that resolves the Promise once the writing is finished:
function downloadAsset(relativeAssetURL, fileName) {
return new Promise((resolve, reject) => {
try {
const writeStream = fs.createWriteStream(fileName);
const remoteImage = request(`https:${relativeAssetURL}`);
remoteImage.on('data', function(chunk) {
writeStream.write(chunk);
});
remoteImage.on('end', function() {
writeStream.end(() => {
const stats = fs.statSync(fileName);
resolve({ fileName: fileName, stats: stats });
});
});
} catch (err) {
reject(err);
}
});
}
(also try not to mix var and let/const - in an ES6+ environment, prefer const, which is generally easier to read and has fewer problems, like hoisting)

Upload from writestream to S3 in node.js [duplicate]

I'm currently making use of a node.js plugin called s3-upload-stream to stream very large files to Amazon S3. It uses the multipart API and for the most part it works very well.
However, this module is showing its age and I've already had to make modifications to it (the author has deprecated it as well). Today I ran into another issue with Amazon, and I would really like to take the author's recommendation and start using the official aws-sdk to accomplish my uploads.
BUT.
The official SDK does not seem to support piping to s3.upload(). The nature of s3.upload is that you have to pass the readable stream as an argument to the S3 constructor.
I have roughly 120+ user code modules that do various file processing, and they are agnostic to the final destination of their output. The engine hands them a pipeable writeable output stream, and they pipe to it. I cannot hand them an AWS.S3 object and ask them to call upload() on it without adding code to all the modules. The reason I used s3-upload-stream was because it supported piping.
Is there a way to make aws-sdk s3.upload() something I can pipe the stream to?
Wrap the S3 upload() function with the node.js stream.PassThrough() stream.
Here's an example:
inputStream
.pipe(uploadFromStream(s3));
function uploadFromStream(s3) {
var pass = new stream.PassThrough();
var params = {Bucket: BUCKET, Key: KEY, Body: pass};
s3.upload(params, function(err, data) {
console.log(err, data);
});
return pass;
}
A bit late answer, it might help someone else hopefully. You can return both writeable stream and the promise, so you can get response data when the upload finishes.
const AWS = require('aws-sdk');
const stream = require('stream');
const uploadStream = ({ Bucket, Key }) => {
const s3 = new AWS.S3();
const pass = new stream.PassThrough();
return {
writeStream: pass,
promise: s3.upload({ Bucket, Key, Body: pass }).promise(),
};
}
And you can use the function as follows:
const { writeStream, promise } = uploadStream({Bucket: 'yourbucket', Key: 'yourfile.mp4'});
const readStream = fs.createReadStream('/path/to/yourfile.mp4');
const pipeline = readStream.pipe(writeStream);
Now you can either check promise:
promise.then(() => {
console.log('upload completed successfully');
}).catch((err) => {
console.log('upload failed.', err.message);
});
Or using async/await:
try {
await promise;
console.log('upload completed successfully');
} catch (error) {
console.log('upload failed.', error.message);
}
Or as stream.pipe() returns stream.Writable, the destination (writeStream variable above), allowing for a chain of pipes, we can also use its events:
pipeline.on('close', () => {
console.log('upload successful');
});
pipeline.on('error', (err) => {
console.log('upload failed', err.message)
});
In the accepted answer, the function ends before the upload is complete, and thus, it's incorrect. The code below pipes correctly from a readable stream.
Upload reference
async function uploadReadableStream(stream) {
const params = {Bucket: bucket, Key: key, Body: stream};
return s3.upload(params).promise();
}
async function upload() {
const readable = getSomeReadableStream();
const results = await uploadReadableStream(readable);
console.log('upload complete', results);
}
You can also go a step further and output progress info using ManagedUpload as such:
const manager = s3.upload(params);
manager.on('httpUploadProgress', (progress) => {
console.log('progress', progress) // { loaded: 4915, total: 192915, part: 1, key: 'foo.jpg' }
});
ManagedUpload reference
A list of available events
I think it's worth updating the answer for AWS SDK v3 :).
S3 Client doesn't have upload function anymore and the #aws-sdk/lib-storage package is suggested instead as per https://github.com/aws/aws-sdk-js-v3/blob/main/lib/lib-storage/README.md
Hence the resulting snippet should look like this:
import { S3Client } from '#aws-sdk/client-s3';
import { Upload } from '#aws-sdk/lib-storage';
const stream = require('stream');
...
const client = new S3Client({
credentials: {
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
},
region: process.env.AWS_DEFAULT_REGION,
});
...
async function uploadStream(readableStream) {
const Key = 'filename.pdf';
const Bucket = 'bucket-name';
const passThroughStream = new stream.PassThrough();
let res;
try {
const parallelUploads3 = new Upload({
client,
params: {
Bucket,
Key,
Body: passThroughStream,
ACL:'public-read',
},
queueSize: 4,
partSize: 1024 * 1024 * 5,
leavePartsOnError: false,
});
readableStream.pipe(passThroughStream);
res = await parallelUploads3.done();
} catch (e) {
console.log(e);
}
return res;
}
None of the answers worked for me because I wanted to:
Pipe into s3.upload()
Pipe the result of s3.upload() into another stream
The accepted answer doesn't do the latter. The others rely on the promise api, which is cumbersome to work when working with stream pipes.
This is my modification of the accepted answer.
const s3 = new S3();
function writeToS3({Key, Bucket}) {
const Body = new stream.PassThrough();
s3.upload({
Body,
Key,
Bucket: process.env.adpBucket
})
.on('httpUploadProgress', progress => {
console.log('progress', progress);
})
.send((err, data) => {
if (err) {
Body.destroy(err);
} else {
console.log(`File uploaded and available at ${data.Location}`);
Body.destroy();
}
});
return Body;
}
const pipeline = myReadableStream.pipe(writeToS3({Key, Bucket});
pipeline.on('close', () => {
// upload finished, do something else
})
pipeline.on('error', () => {
// upload wasn't successful. Handle it
})
Type Script solution:
This example uses:
import * as AWS from "aws-sdk";
import * as fsExtra from "fs-extra";
import * as zlib from "zlib";
import * as stream from "stream";
And async function:
public async saveFile(filePath: string, s3Bucket: AWS.S3, key: string, bucketName: string): Promise<boolean> {
const uploadStream = (S3: AWS.S3, Bucket: string, Key: string) => {
const passT = new stream.PassThrough();
return {
writeStream: passT,
promise: S3.upload({ Bucket, Key, Body: passT }).promise(),
};
};
const { writeStream, promise } = uploadStream(s3Bucket, bucketName, key);
fsExtra.createReadStream(filePath).pipe(writeStream); // NOTE: Addition You can compress to zip by .pipe(zlib.createGzip()).pipe(writeStream)
let output = true;
await promise.catch((reason)=> { output = false; console.log(reason);});
return output;
}
Call this method somewhere like:
let result = await saveFileToS3(testFilePath, someS3Bucket, someKey, someBucketName);
The thing here to note in the most accepted answer above is that:
You need to return the pass in the function if you are using pipe like,
fs.createReadStream(<filePath>).pipe(anyUploadFunction())
function anyUploadFunction () {
let pass = new stream.PassThrough();
return pass // <- Returning this pass is important for the stream to understand where it needs to write to.
}
Otherwise it will silently move onto next without throwing an error or will throw an error of TypeError: dest.on is not a function depending upon how you have written the function
Following the other answers and using the latest AWS SDK for Node.js, there's a much cleaner and simpler solution since the s3 upload() function accepts a stream, using await syntax and S3's promise:
var model = await s3Client.upload({
Bucket : bucket,
Key : key,
ContentType : yourContentType,
Body : fs.createReadStream(path-to-file)
}).promise();
For those complaining that the when they use the s3 api upload function and a zero byte file ends up on s3 (#Radar155 and #gabo) - I also had this problem.
Create a second PassThrough stream and just pipe all data from the first to the second and pass the reference to that second to s3. You can do this in a couple of different ways - possibly a dirty way is to listen for the "data" event on the first stream and then write that same data to the second stream - the similarly for the "end" event - just call the end function on the second stream. I've no idea whether this is a bug in the aws api, the version of node or some other issue - but it worked around the issue for me.
Here is how it might look:
var PassThroughStream = require('stream').PassThrough;
var srcStream = new PassThroughStream();
var rstream = fs.createReadStream('Learning/stocktest.json');
var sameStream = rstream.pipe(srcStream);
// interesting note: (srcStream == sameStream) at this point
var destStream = new PassThroughStream();
// call your s3.upload function here - passing in the destStream as the Body parameter
srcStream.on('data', function (chunk) {
destStream.write(chunk);
});
srcStream.on('end', function () {
dataStream.end();
});
If it helps anyone I was able to stream from the client to s3 successfully:
https://gist.github.com/mattlockyer/532291b6194f6d9ca40cb82564db9d2a
The serverside code assumes req is a stream object, in my case it was sent from the client with file info set in the headers.
const fileUploadStream = (req, res) => {
//get "body" args from header
const { id, fn } = JSON.parse(req.get('body'));
const Key = id + '/' + fn; //upload to s3 folder "id" with filename === fn
const params = {
Key,
Bucket: bucketName, //set somewhere
Body: req, //req is a stream
};
s3.upload(params, (err, data) => {
if (err) {
res.send('Error Uploading Data: ' + JSON.stringify(err) + '\n' + JSON.stringify(err.stack));
} else {
res.send(Key);
}
});
};
Yes it breaks convention but if you look at the gist it's much cleaner than anything else I found using multer, busboy etc...
+1 for pragmatism and thanks to #SalehenRahman for his help.
If you're using AWS node SDK v3 there is dedicated module for uploading streams/blobs/buffers.
https://www.npmjs.com/package/#aws-sdk/lib-storage
I'm using KnexJS and had a problem using their streaming API. I finally fixed it, hopefully the following will help someone.
const knexStream = knex.select('*').from('my_table').stream();
const passThroughStream = new stream.PassThrough();
knexStream.on('data', (chunk) => passThroughStream.write(JSON.stringify(chunk) + '\n'));
knexStream.on('end', () => passThroughStream.end());
const uploadResult = await s3
.upload({
Bucket: 'my-bucket',
Key: 'stream-test.txt',
Body: passThroughStream
})
.promise();
Create a new stream.PassThrough() and pipe the input stream to it, then pass the passthrough instance to the body.
Check the following example:
function upload(s3, inputStream) {
const pass = new PassThrough();
inputStream.pipe(pass);
return s3.upload(
{
Bucket: 'bucket name',
Key: 'unique file name',
Body: pass,
},
{
queueSize: 4, // default concurrency
},
).promise()
.then((data) => console.log(data))
.catch((error) => console.error(error));
}
If you know the size of the stream you can use minio-js to upload the stream like this:
s3Client.putObject('my-bucketname', 'my-objectname.ogg', stream, size, 'audio/ogg', function(e) {
if (e) {
return console.log(e)
}
console.log("Successfully uploaded the stream")
})

Pipe a stream to s3.upload()

I'm currently making use of a node.js plugin called s3-upload-stream to stream very large files to Amazon S3. It uses the multipart API and for the most part it works very well.
However, this module is showing its age and I've already had to make modifications to it (the author has deprecated it as well). Today I ran into another issue with Amazon, and I would really like to take the author's recommendation and start using the official aws-sdk to accomplish my uploads.
BUT.
The official SDK does not seem to support piping to s3.upload(). The nature of s3.upload is that you have to pass the readable stream as an argument to the S3 constructor.
I have roughly 120+ user code modules that do various file processing, and they are agnostic to the final destination of their output. The engine hands them a pipeable writeable output stream, and they pipe to it. I cannot hand them an AWS.S3 object and ask them to call upload() on it without adding code to all the modules. The reason I used s3-upload-stream was because it supported piping.
Is there a way to make aws-sdk s3.upload() something I can pipe the stream to?
Wrap the S3 upload() function with the node.js stream.PassThrough() stream.
Here's an example:
inputStream
.pipe(uploadFromStream(s3));
function uploadFromStream(s3) {
var pass = new stream.PassThrough();
var params = {Bucket: BUCKET, Key: KEY, Body: pass};
s3.upload(params, function(err, data) {
console.log(err, data);
});
return pass;
}
A bit late answer, it might help someone else hopefully. You can return both writeable stream and the promise, so you can get response data when the upload finishes.
const AWS = require('aws-sdk');
const stream = require('stream');
const uploadStream = ({ Bucket, Key }) => {
const s3 = new AWS.S3();
const pass = new stream.PassThrough();
return {
writeStream: pass,
promise: s3.upload({ Bucket, Key, Body: pass }).promise(),
};
}
And you can use the function as follows:
const { writeStream, promise } = uploadStream({Bucket: 'yourbucket', Key: 'yourfile.mp4'});
const readStream = fs.createReadStream('/path/to/yourfile.mp4');
const pipeline = readStream.pipe(writeStream);
Now you can either check promise:
promise.then(() => {
console.log('upload completed successfully');
}).catch((err) => {
console.log('upload failed.', err.message);
});
Or using async/await:
try {
await promise;
console.log('upload completed successfully');
} catch (error) {
console.log('upload failed.', error.message);
}
Or as stream.pipe() returns stream.Writable, the destination (writeStream variable above), allowing for a chain of pipes, we can also use its events:
pipeline.on('close', () => {
console.log('upload successful');
});
pipeline.on('error', (err) => {
console.log('upload failed', err.message)
});
In the accepted answer, the function ends before the upload is complete, and thus, it's incorrect. The code below pipes correctly from a readable stream.
Upload reference
async function uploadReadableStream(stream) {
const params = {Bucket: bucket, Key: key, Body: stream};
return s3.upload(params).promise();
}
async function upload() {
const readable = getSomeReadableStream();
const results = await uploadReadableStream(readable);
console.log('upload complete', results);
}
You can also go a step further and output progress info using ManagedUpload as such:
const manager = s3.upload(params);
manager.on('httpUploadProgress', (progress) => {
console.log('progress', progress) // { loaded: 4915, total: 192915, part: 1, key: 'foo.jpg' }
});
ManagedUpload reference
A list of available events
I think it's worth updating the answer for AWS SDK v3 :).
S3 Client doesn't have upload function anymore and the #aws-sdk/lib-storage package is suggested instead as per https://github.com/aws/aws-sdk-js-v3/blob/main/lib/lib-storage/README.md
Hence the resulting snippet should look like this:
import { S3Client } from '#aws-sdk/client-s3';
import { Upload } from '#aws-sdk/lib-storage';
const stream = require('stream');
...
const client = new S3Client({
credentials: {
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
},
region: process.env.AWS_DEFAULT_REGION,
});
...
async function uploadStream(readableStream) {
const Key = 'filename.pdf';
const Bucket = 'bucket-name';
const passThroughStream = new stream.PassThrough();
let res;
try {
const parallelUploads3 = new Upload({
client,
params: {
Bucket,
Key,
Body: passThroughStream,
ACL:'public-read',
},
queueSize: 4,
partSize: 1024 * 1024 * 5,
leavePartsOnError: false,
});
readableStream.pipe(passThroughStream);
res = await parallelUploads3.done();
} catch (e) {
console.log(e);
}
return res;
}
None of the answers worked for me because I wanted to:
Pipe into s3.upload()
Pipe the result of s3.upload() into another stream
The accepted answer doesn't do the latter. The others rely on the promise api, which is cumbersome to work when working with stream pipes.
This is my modification of the accepted answer.
const s3 = new S3();
function writeToS3({Key, Bucket}) {
const Body = new stream.PassThrough();
s3.upload({
Body,
Key,
Bucket: process.env.adpBucket
})
.on('httpUploadProgress', progress => {
console.log('progress', progress);
})
.send((err, data) => {
if (err) {
Body.destroy(err);
} else {
console.log(`File uploaded and available at ${data.Location}`);
Body.destroy();
}
});
return Body;
}
const pipeline = myReadableStream.pipe(writeToS3({Key, Bucket});
pipeline.on('close', () => {
// upload finished, do something else
})
pipeline.on('error', () => {
// upload wasn't successful. Handle it
})
Type Script solution:
This example uses:
import * as AWS from "aws-sdk";
import * as fsExtra from "fs-extra";
import * as zlib from "zlib";
import * as stream from "stream";
And async function:
public async saveFile(filePath: string, s3Bucket: AWS.S3, key: string, bucketName: string): Promise<boolean> {
const uploadStream = (S3: AWS.S3, Bucket: string, Key: string) => {
const passT = new stream.PassThrough();
return {
writeStream: passT,
promise: S3.upload({ Bucket, Key, Body: passT }).promise(),
};
};
const { writeStream, promise } = uploadStream(s3Bucket, bucketName, key);
fsExtra.createReadStream(filePath).pipe(writeStream); // NOTE: Addition You can compress to zip by .pipe(zlib.createGzip()).pipe(writeStream)
let output = true;
await promise.catch((reason)=> { output = false; console.log(reason);});
return output;
}
Call this method somewhere like:
let result = await saveFileToS3(testFilePath, someS3Bucket, someKey, someBucketName);
The thing here to note in the most accepted answer above is that:
You need to return the pass in the function if you are using pipe like,
fs.createReadStream(<filePath>).pipe(anyUploadFunction())
function anyUploadFunction () {
let pass = new stream.PassThrough();
return pass // <- Returning this pass is important for the stream to understand where it needs to write to.
}
Otherwise it will silently move onto next without throwing an error or will throw an error of TypeError: dest.on is not a function depending upon how you have written the function
Following the other answers and using the latest AWS SDK for Node.js, there's a much cleaner and simpler solution since the s3 upload() function accepts a stream, using await syntax and S3's promise:
var model = await s3Client.upload({
Bucket : bucket,
Key : key,
ContentType : yourContentType,
Body : fs.createReadStream(path-to-file)
}).promise();
For those complaining that the when they use the s3 api upload function and a zero byte file ends up on s3 (#Radar155 and #gabo) - I also had this problem.
Create a second PassThrough stream and just pipe all data from the first to the second and pass the reference to that second to s3. You can do this in a couple of different ways - possibly a dirty way is to listen for the "data" event on the first stream and then write that same data to the second stream - the similarly for the "end" event - just call the end function on the second stream. I've no idea whether this is a bug in the aws api, the version of node or some other issue - but it worked around the issue for me.
Here is how it might look:
var PassThroughStream = require('stream').PassThrough;
var srcStream = new PassThroughStream();
var rstream = fs.createReadStream('Learning/stocktest.json');
var sameStream = rstream.pipe(srcStream);
// interesting note: (srcStream == sameStream) at this point
var destStream = new PassThroughStream();
// call your s3.upload function here - passing in the destStream as the Body parameter
srcStream.on('data', function (chunk) {
destStream.write(chunk);
});
srcStream.on('end', function () {
dataStream.end();
});
If it helps anyone I was able to stream from the client to s3 successfully:
https://gist.github.com/mattlockyer/532291b6194f6d9ca40cb82564db9d2a
The serverside code assumes req is a stream object, in my case it was sent from the client with file info set in the headers.
const fileUploadStream = (req, res) => {
//get "body" args from header
const { id, fn } = JSON.parse(req.get('body'));
const Key = id + '/' + fn; //upload to s3 folder "id" with filename === fn
const params = {
Key,
Bucket: bucketName, //set somewhere
Body: req, //req is a stream
};
s3.upload(params, (err, data) => {
if (err) {
res.send('Error Uploading Data: ' + JSON.stringify(err) + '\n' + JSON.stringify(err.stack));
} else {
res.send(Key);
}
});
};
Yes it breaks convention but if you look at the gist it's much cleaner than anything else I found using multer, busboy etc...
+1 for pragmatism and thanks to #SalehenRahman for his help.
If you're using AWS node SDK v3 there is dedicated module for uploading streams/blobs/buffers.
https://www.npmjs.com/package/#aws-sdk/lib-storage
I'm using KnexJS and had a problem using their streaming API. I finally fixed it, hopefully the following will help someone.
const knexStream = knex.select('*').from('my_table').stream();
const passThroughStream = new stream.PassThrough();
knexStream.on('data', (chunk) => passThroughStream.write(JSON.stringify(chunk) + '\n'));
knexStream.on('end', () => passThroughStream.end());
const uploadResult = await s3
.upload({
Bucket: 'my-bucket',
Key: 'stream-test.txt',
Body: passThroughStream
})
.promise();
Create a new stream.PassThrough() and pipe the input stream to it, then pass the passthrough instance to the body.
Check the following example:
function upload(s3, inputStream) {
const pass = new PassThrough();
inputStream.pipe(pass);
return s3.upload(
{
Bucket: 'bucket name',
Key: 'unique file name',
Body: pass,
},
{
queueSize: 4, // default concurrency
},
).promise()
.then((data) => console.log(data))
.catch((error) => console.error(error));
}
If you know the size of the stream you can use minio-js to upload the stream like this:
s3Client.putObject('my-bucketname', 'my-objectname.ogg', stream, size, 'audio/ogg', function(e) {
if (e) {
return console.log(e)
}
console.log("Successfully uploaded the stream")
})

Resources