Transform stream coming from S3 buckets - node.js

Here is my function to get the contents of multiple objects and merge in one single file:
const getAllObjectsInRegions = (config, keys) => {
keys.forEach((key) => {
const params = {
Bucket: `${config.metadata.s3.bucket}`,
Key: key
}
const file = fs.createWriteStream('./out.txt')
return s3.getObject(params)
.createReadStream()
.pipe(file)
})
basically the object that I read is just a json file. Do you have any idea how to transform the content of the object before writing to the file? I need to extract some property instead to write the entire file. Thank you
EDIT:
const getAllObjectsInRegions = (config, keys) => {
const promise = new Promise((resolve, reject) => {
keys.forEach((key) => {
const params = {
Bucket: `${config.metadata.s3.bucket}`,
Key: key
}
const readStream = s3.getObject(params).createReadStream()
readStream.on('data', (data) => {
let playlist
try {
playlist = JSON.parse(data)
} catch(err) {
reject(err)
}
const slicedPlaylist = _.pick(playlist, ['contentKey', 'title', 'text' ])
fs.writeFile('./out.txt', slicedPlaylist, (err) => {
if (err) {
reject(err)
}
resolve()
})
})
})
})
return promise
}

Related

AWS S3 Angular 14 with Nodejs - Multi Part Upload sending the same ETag for every part

AWS S3 Angular 14 with Nodejs - Multi Part Upload sending the same ETag for every part
Backend Nodejs Controller Looks Like -
const AWS = require('aws-sdk');
const S3 = new AWS.S3({
// endpoint: "http://bucket.analysts24x7.com.s3-website-us-west-1.amazonaws.com",
// accessKeyId: S3_KEY,
// secretAccessKey: S3_SECRET,
// region: process.env.POOL_REGION,
apiVersion: '2006-03-01',
signatureVersion: 'v4',
// maxRetries: 10
});
exports.startUpload = (req, res) => {
try {
const filesData = JSON.parse(JSON.stringify(req.files));
const eachFiles = Object.keys(filesData)[0];
console.log(filesData[eachFiles]);
let params = {
Bucket: process.env.STORE_BUCKET_NAME,
Key: filesData[eachFiles].name,
// Body: Buffer.from(filesData[eachFiles].data.data, "binary"),
ContentType: filesData[eachFiles].mimetype
// ContentType: filesData[eachFiles].data.type
};
return new Promise((resolve, reject) => {
S3.createMultipartUpload(params, (err, uploadData) => {
if (err) {
reject(res.send({
error: err
}));
} else {
resolve(res.send({ uploadId: uploadData.UploadId }));
}
});
});
} catch(err) {
res.status(400).send({
error: err
})
}
}
exports.getUploadUrl = async(req, res) => {
try {
let params = {
Bucket: process.env.STORE_BUCKET_NAME,
Key: req.body.fileName,
PartNumber: req.body.partNumber,
UploadId: req.body.uploadId
}
return new Promise((resolve, reject) => {
S3.getSignedUrl('uploadPart', params, (err, presignedUrl) => {
if (err) {
reject(res.send({
error: err
}));
} else {
resolve(res.send({ presignedUrl }));
}
});
})
} catch(err) {
res.status(400).send({
error: err
})
}
}
exports.completeUpload = async(req, res) => {
try {
let params = {
Bucket: process.env.STORE_BUCKET_NAME,
Key: req.body.fileName,
MultipartUpload: {
Parts: req.body.parts
},
UploadId: req.body.uploadId
}
// console.log("-----------------")
// console.log(params)
// console.log("-----------------")
return new Promise((resolve, reject) => {
S3.completeMultipartUpload(params, (err, data) => {
if (err) {
reject(res.send({
error: err
}));
} else {
resolve(res.send({ data }));
}
})
})
} catch(err) {
res.status(400).send({
error: err
})
};
}
FrontEnd Angular 14 Code --
uploadSpecificFile(index) {
const fileToUpload = this.fileInfo[index];
const formData: FormData = new FormData();
formData.append('file', fileToUpload);
this.shared.startUpload(formData).subscribe({
next: (response) => {
const result = JSON.parse(JSON.stringify(response));
this.multiPartUpload(result.uploadId, fileToUpload).then((resp) => {
return this.completeUpload(result.uploadId, fileToUpload, resp);
}).then((resp) => {
console.log(resp);
}).catch((err) => {
console.error(err);
})
},
error: (error) => {
console.log(error);
}
})
}
multiPartUpload(uploadId, fileToUpload) {
return new Promise((resolve, reject) => {
const CHUNKS_COUNT = Math.floor(fileToUpload.size / CONSTANTS.CHUNK_SIZE) + 1;
let promisesArray = [];
let params = {};
let start, end, blob;
for (let index = 1; index < CHUNKS_COUNT + 1; index++) {
start = (index - 1) * CONSTANTS.CHUNK_SIZE
end = (index) * CONSTANTS.CHUNK_SIZE
blob = (index < CHUNKS_COUNT) ? fileToUpload.slice(start, end) : fileToUpload.slice(start);
// blob.type = fileToUpload.type;
params = {
fileName: fileToUpload.name,
partNumber: index,
uploadId: uploadId
}
console.log("Start:", start);
console.log("End:", end);
console.log("Blob:", blob);
this.shared.getUploadUrl(params).subscribe({
next: (response) => {
const result = JSON.parse(JSON.stringify(response));
// Send part aws server
const options = {
headers: { 'Content-Type': fileToUpload.type }
}
let uploadResp = axios.put(result.presignedUrl, blob, options);
promisesArray.push(uploadResp);
if(promisesArray.length == CHUNKS_COUNT) {
resolve(promisesArray)
}
},
error: (error) => {
console.log(error);
reject(error);
}
})
}
})
}
async completeUpload(uploadId, fileToUpload, resp) {
let resolvedArray = await Promise.all(resp)
let uploadPartsArray = [];
console.log("I am etag -----");
console.log(resolvedArray);
resolvedArray.forEach((resolvedPromise, index) => {
uploadPartsArray.push({
ETag: resolvedPromise.headers.etag,
PartNumber: index + 1
})
})
// Complete upload here
let params = {
fileName: fileToUpload.name,
parts: uploadPartsArray,
uploadId: uploadId
}
return new Promise((resolve, reject) => {
this.shared.completeUpload(params).subscribe({
next: (response) => {
resolve(response);
},
error: (error) => {
reject(error);
}
})
})
}
What I am trying to do --
Initiate a multipart upload ( API - /start-upload ) --> to get the uploadId
Upload the object’s parts ( API - /get-upload-url ) --> to get the presignedUrl
Call the Presigned URL and put blob as part --- To get the Etag
Complete multipart upload ( API - /complete-upload ) --> to send the complete parts.
**Sample Example of code --- **
FrontEnd --
https://github.com/abhishekbajpai/aws-s3-multipart-upload/blob/master/frontend/pages/index.js
BackEnd --
https://github.com/abhishekbajpai/aws-s3-multipart-upload/blob/master/backend/server.js
Attach the screenshot below how the API call looks like --
Now the problem here, Each and everytime I am getting same Etag from the -- above 3 steps while I am calling presignedURL using Axios. For that reason, I am getting the error in the final upload ---
Your proposed upload is smaller than the minimum allowed size
**Note --
**
Each and every chuck size I am uploading
CHUNK_SIZE: 5 * 1024 * 1024, // 5.2 MB
Apart from last part.
Also all the API are giving success response, apart from /complete-upload. Because all the API giving same Etag.
Same question also asked here, but there are no solutions --
https://github.com/aws/aws-sdk-java/issues/2615
Any idea about this ? How to resolve it ?
This is so uncommon problem, Provide me the solution of the problem.

How to upload any type of file to aws s3 bucket?

I am trying to upload files like docs ppts etc. I have a front end in react and my upload function looks like this:
const reader = new FileReader()
const toBase64 = (file) =>
new Promise((resolve, reject) => {
reader.readAsDataURL(file);
reader.onload = () => resolve(reader.result);
reader.onerror = (error) => reject(error);
});
` const UploadMinues = async (event: any) => {
console.log(event, 'envent from forntend');
if (event.target && event?.target?.files[0]) {
try {
await toBase64(event.target.files[0]);
console.log(reader.result, 'reader.result');
const res = (await API.graphql({
query: `mutation MyMutation {
updateMinutes(input: { projectId:"${event.target.id}", adviserId: "${user.username}", file: "${reader.result}", fileName: "${event.target.files[0].name}", fileType: "${event.target.files[0].type}"}) {
minutesKey
}
}`,
authMode: GRAPHQL_AUTH_MODE.AMAZON_COGNITO_USER_POOLS
})) as any;
} catch (e) {
console.log('UpdateImage_Error==>', e);
setMinutesErr(e.message);
setOpenAlert(true);
}
} else {
console.log('errorrrrrrrrrrr');
return;
}
};`
And on the back end which is in I have a lambda function like this:
const AWS = require('aws-sdk');
const docClient = new AWS.DynamoDB.DocumentClient();
export async function updateMinutes(data: any) {
let { adviserId, projectId, file, fileName, fileType } = data;
console.log(data, "received from front end")
let s3bucket = new AWS.S3({ params: { Bucket: `${process.env.S3_BUCKET_NAME}` } });
try {
// const buf = Buffer.from(file.replace(/^data:application\/\w+;base64,/, ""), 'base64')
let params_upload = {
Key: `minutes/${adviserId}/${projectId}/${fileName}`,
Body: Buffer.from(file, "base64"),
ContentType: fileType,
CacheControl: 'max-age=86400'
};
const minutes_save = await s3bucket.upload(params_upload).promise()
const minutesKey = minutes_save.Key
let params = {
TableName: process.env.CONSULTATION_TABLE,
Key: {
adviserId: adviserId,
projectId: projectId,
},
UpdateExpression: `set minutes = :edu`,
ExpressionAttributeValues: {':edu' : [minutesKey]}
}
const data = await docClient.update(params).promise()
return {
minutesKey: minutesKey
}
} catch (err) {
console.log(err, "IMAGE_UPLOAD_ERROR")
}
}
The file get uploaded to s3 bucket but when I open it it is in some symbols format. Could someone please explain what i am doing wrong here. because same approach is working fine when I try to upload pdf or image but not with docs or excels files.
My input look like this:
<Input
id={data.projectId}
name={data.projectId}
onChange={UploadMinues}
accept="application/*"
multiple
type="file"
/>

node.js Aws Lambda: getObject to base64

I have modified the zip function from here https://dzone.com/articles/serverless-zipchamp-update-your-zip-files-in-s3-al because it can only zip text and not zip images.
The Problem occurs in the function base64_encode at the end of the code. I can write the base64 string to the console but can not give it back to the calling function.
Any help is welcome.
let AWS = require('aws-sdk');
let JSZip = require("jszip");
let fs = require("fs");
const s3 = new AWS.S3();
let thebase='';
exports.handler = function (event, context, callback) {
let myzip = event.zip;
let modified = 0, removed = 0;
let mypath = event.path;
let mynewname = event.newname;
let filename = event.filename;
//get Zip file
s3.getObject({
'Bucket': "tripmasterdata",
'Key': event.path+'/'+myzip,
}).promise()
.then(data => {
let jszip = new JSZip();
jszip.loadAsync(data.Body).then(zip => {
// add or remove file
if (filename !== '') {
//here I get the Image to be stored in the zip as base64 encoded string
thebase = base64_encode(mypath,filename,thebase);
console.log('AD:'+thebase); //<- this is always empty, WHY????
zip.file(mynewname, thebase, {createFolders: false,compression: "STORE",base64: true});
modified++;
} else {
console.log(`Remove ${filename}`);
zip.remove(filename);
removed++;
}
let tmpzip = `/tmp/${myzip}`
let tmpPath = `${event.path}`
//Generating the zip
console.log(`Writing to temp file ${tmpzip}`);
zip.generateNodeStream({ streamFiles: true })
.pipe(fs.createWriteStream(tmpzip))
.on('error', err => callback(err))
.on('finish', function () {
console.log(`Uploading to ${event.path}`);
s3.putObject({
"Body": fs.createReadStream(tmpzip),
"Bucket": "xxx/"+tmpPath,
"Key": myzip,
"Metadata": {
"Content-Length": String(fs.statSync(tmpzip).size)
}
})
.promise()
.then(data => {
console.log(`Successfully uploaded ${event.path}`);
callback(null, {
modified: modified,
removed: removed
});
})
.catch(err => {
callback(err);
});
});
})
.catch(err => {
callback(err);
});
})
.catch(err => {
callback(err);
});
}
//function that should return my base64 encoded image
function base64_encode(path,file,thebase) {
var leKey = path+'/'+file;
var params = {
'Bucket': "xxx",
'Key': leKey
}
s3.getObject(params, function(error, data) {
console.log('error: '+error);
}).promise().then(data => {
thebase = data.Body.toString('base64');
console.log('thebase: '+thebase); //<- here I see the base64 encoded string
return thebase; //<- does not return thebase
});
return thebase; //<- does not return thebase
}
This is a promise related issue the last call in the function 'return thebase;' will most likely return undefined as the promise has not resolved yet. when the function returns. I find it useful to use the keywords async and await this really reduces the code into a lot more readable format (it flattens the code a lot).
function base64_encode(path,file,thebase) {
var leKey = path+'/'+file;
var params = {
'Bucket': "xxx",
'Key': leKey
}
return s3.getObject(params).promise();
}
Then in the main function you want to handle the promise with a .then()
If you were using async / await it would look like the following:
async function base64_encode(path,file,thebase) {
var leKey = path+'/'+file;
var params = {
'Bucket': "xxx",
'Key': leKey
}
return s3.getObject(params).promise();
}
let thebase = await base64_encode('stuff');
Hope this helps

How to control Apollo upload server multiple stream? like make buffer?

I am trying To upload multiple images for use Apollo server
I succeeded in using the createWriteStream module to convert the stream to a file
but i don't need make file, just need buffer string
But this is not return ever never buffer string
Please see my code
type Mutation {
createProduct(product_name: String!, product_info: String!, price: Int!, sale: Int, files: [Upload!]): Result!
}
this is my Mutation
createProduct(parent, {product_name, product_info, price, sale ,files }) {
db.Product.create({
product_name, product_info, price, sale
}).then( (product) => {
storeImages(files, product.id)
.then ( (result) => {
console.log(result)
})
})
and this is my resolver
const outStream = new Writable({
write(chunk, encoding, callback){
console.log(chunk)
},
})
const inStream = new Readable({
read(size) {
console.log(size)
}
})
const test2 = (chunk) => {
console.log(chunk)
outStream.destroy();
}
function test(){
let pass = new PassThrough();
console.log(pass)
console.log('pass!!')
return 'end';
}
const processUpload = async upload => {
const { stream, filename, mimetype } = await upload;
console.log('stream')
console.log(upload)
// const { id, path } = await storeFS({ stream, filename })
// return storeDB({ id, filename, mimetype, path })
}
const lastStream = (chunk) => {
console.log(chunk)
}
const storeImages = (files, product_id) => {
return new Promise( async (resolve, reject) => {
let save = []
files.forEach( async (image, index) => {
const { stream } = await image;
stream
.pipe(outStream)
// .pipe(inStream)
})
resolve(save)
Finally, the way I've tried
I'll summarize.
How can I use the apollo upload server to take a stream of multiple files into a blob and control it?

Create a zip file on S3 from files on S3 using Lambda Node

I need to create a Zip file that consists of a selection of files (videos and images) located in my s3 bucket.
The problem at the moment using my code below is that I quickly hit the memory limit on Lambda.
async.eachLimit(files, 10, function(file, next) {
var params = {
Bucket: bucket, // bucket name
Key: file.key
};
s3.getObject(params, function(err, data) {
if (err) {
console.log('file', file.key);
console.log('get image files err',err, err.stack); // an error occurred
} else {
console.log('file', file.key);
zip.file(file.key, data.Body);
next();
}
});
},
function(err) {
if (err) {
console.log('err', err);
} else {
console.log('zip', zip);
content = zip.generateNodeStream({
type: 'nodebuffer',
streamFiles:true
});
var params = {
Bucket: bucket, // name of dest bucket
Key: 'zipped/images.zip',
Body: content
};
s3.upload(params, function(err, data) {
if (err) {
console.log('upload zip to s3 err',err, err.stack); // an error occurred
} else {
console.log(data); // successful response
}
});
}
});
Is this possible using Lambda, or should I look at a different
approach?
Is it possible to write to a compressed zip file on the fly, therefore eliminating the memory issue somewhat, or do I need to have the files collected before compression?
Any help would be much appreciated.
Okay, I got to do this today and it works. Direct Buffer to Stream, no disk involved. So memory or disk limitation won't be an issue here:
'use strict';
const AWS = require("aws-sdk");
AWS.config.update( { region: "eu-west-1" } );
const s3 = new AWS.S3( { apiVersion: '2006-03-01'} );
const _archiver = require('archiver');
//This returns us a stream.. consider it as a real pipe sending fluid to S3 bucket.. Don't forget it
const streamTo = (_bucket, _key) => {
var stream = require('stream');
var _pass = new stream.PassThrough();
s3.upload( { Bucket: _bucket, Key: _key, Body: _pass }, (_err, _data) => { /*...Handle Errors Here*/ } );
return _pass;
};
exports.handler = async (_req, _ctx, _cb) => {
var _keys = ['list of your file keys in s3'];
var _list = await Promise.all(_keys.map(_key => new Promise((_resolve, _reject) => {
s3.getObject({Bucket:'bucket-name', Key:_key})
.then(_data => _resolve( { data: _data.Body, name: `${_key.split('/').pop()}` } ));
}
))).catch(_err => { throw new Error(_err) } );
await new Promise((_resolve, _reject) => {
var _myStream = streamTo('bucket-name', 'fileName.zip'); //Now we instantiate that pipe...
var _archive = _archiver('zip');
_archive.on('error', err => { throw new Error(err); } );
//Your promise gets resolved when the fluid stops running... so that's when you get to close and resolve
_myStream.on('close', _resolve);
_myStream.on('end', _resolve);
_myStream.on('error', _reject);
_archive.pipe(_myStream); //Pass that pipe to _archive so it can push the fluid straigh down to S3 bucket
_list.forEach(_itm => _archive.append(_itm.data, { name: _itm.name } ) ); //And then we start adding files to it
_archive.finalize(); //Tell is, that's all we want to add. Then when it finishes, the promise will resolve in one of those events up there
}).catch(_err => { throw new Error(_err) } );
_cb(null, { } ); //Handle response back to server
};
I formated the code according to #iocoker.
main entry
// index.js
'use strict';
const S3Zip = require('./s3-zip')
const params = {
files: [
{
fileName: '1.jpg',
key: 'key1.JPG'
},
{
fileName: '2.jpg',
key: 'key2.JPG'
}
],
zippedFileKey: 'zipped-file-key.zip'
}
exports.handler = async event => {
const s3Zip = new S3Zip(params);
await s3Zip.process();
return {
statusCode: 200,
body: JSON.stringify(
{
message: 'Zip file successfully!'
}
)
};
}
Zip file util
// s3-zip.js
'use strict';
const fs = require('fs');
const AWS = require("aws-sdk");
const Archiver = require('archiver');
const Stream = require('stream');
const https = require('https');
const sslAgent = new https.Agent({
KeepAlive: true,
rejectUnauthorized: true
});
sslAgent.setMaxListeners(0);
AWS.config.update({
httpOptions: {
agent: sslAgent,
},
region: 'us-east-1'
});
module.exports = class S3Zip {
constructor(params, bucketName = 'default-bucket') {
this.params = params;
this.BucketName = bucketName;
}
async process() {
const { params, BucketName } = this;
const s3 = new AWS.S3({ apiVersion: '2006-03-01', params: { Bucket: BucketName } });
// create readstreams for all the output files and store them
const createReadStream = fs.createReadStream;
const s3FileDwnldStreams = params.files.map(item => {
const stream = s3.getObject({ Key: item.key }).createReadStream();
return {
stream,
fileName: item.fileName
}
});
const streamPassThrough = new Stream.PassThrough();
// Create a zip archive using streamPassThrough style for the linking request in s3bucket
const uploadParams = {
ACL: 'private',
Body: streamPassThrough,
ContentType: 'application/zip',
Key: params.zippedFileKey
};
const s3Upload = s3.upload(uploadParams, (err, data) => {
if (err) {
console.error('upload err', err)
} else {
console.log('upload data', data);
}
});
s3Upload.on('httpUploadProgress', progress => {
// console.log(progress); // { loaded: 4915, total: 192915, part: 1, key: 'foo.jpg' }
});
// create the archiver
const archive = Archiver('zip', {
zlib: { level: 0 }
});
archive.on('error', (error) => {
throw new Error(`${error.name} ${error.code} ${error.message} ${error.path} ${error.stack}`);
});
// connect the archiver to upload streamPassThrough and pipe all the download streams to it
await new Promise((resolve, reject) => {
console.log("Starting upload of the output Files Zip Archive");
streamPassThrough.on('close', resolve());
streamPassThrough.on('end', resolve());
streamPassThrough.on('error', reject());
archive.pipe(streamPassThrough);
s3FileDwnldStreams.forEach((s3FileDwnldStream) => {
archive.append(s3FileDwnldStream.stream, { name: s3FileDwnldStream.fileName })
});
archive.finalize();
}).catch((error) => {
throw new Error(`${error.code} ${error.message} ${error.data}`);
});
// Finally wait for the uploader to finish
await s3Upload.promise();
}
}
The other solutions are great for not so many files (less than ~60). If they handle more files, they just quit into nothing with no errors. This is because they open too many streams.
This solution is inspired by https://gist.github.com/amiantos/16bacc9ed742c91151fcf1a41012445e
It is a working solution, which works well even with many files (+300) and returns a presigned URL to the zip which contains the files.
Main Lambda:
const AWS = require('aws-sdk');
const S3 = new AWS.S3({
apiVersion: '2006-03-01',
signatureVersion: 'v4',
httpOptions: {
timeout: 300000 // 5min Should Match Lambda function timeout
}
});
const archiver = require('archiver');
import stream from 'stream';
const UPLOAD_BUCKET_NAME = "my-s3-bucket";
const URL_EXPIRE_TIME = 5*60;
export async function getZipSignedUrl(event) {
const prefix = `uploads/id123123/}`; //replace this with your S3 prefix
let files = ["12314123.png", "56787567.png"] //replace this with your files
if (files.length == 0) {
console.log("No files to zip");
return result(404, "No pictures to download");
}
console.log("Files to zip: ", files);
try {
files = files.map(file => {
return {
fileName: file,
key: prefix + '/' + file,
type: "file"
};
});
const destinationKey = prefix + '/' + 'uploads.zip'
console.log("files: ", files);
console.log("destinationKey: ", destinationKey);
await streamToZipInS3(files, destinationKey);
const presignedUrl = await getSignedUrl(UPLOAD_BUCKET_NAME, destinationKey, URL_EXPIRE_TIME, "uploads.zip");
console.log("presignedUrl: ", presignedUrl);
if (!presignedUrl) {
return result(500, null);
}
return result(200, presignedUrl);
}
catch(error) {
console.error(`Error: ${error}`);
return result(500, null);
}
}
Helper functions:
export function result(code, message) {
return {
statusCode: code,
body: JSON.stringify(
{
message: message
}
)
}
}
export async function streamToZipInS3(files, destinationKey) {
await new Promise(async (resolve, reject) => {
var zipStream = streamTo(UPLOAD_BUCKET_NAME, destinationKey, resolve);
zipStream.on("error", reject);
var archive = archiver("zip");
archive.on("error", err => {
throw new Error(err);
});
archive.pipe(zipStream);
for (const file of files) {
if (file["type"] == "file") {
archive.append(getStream(UPLOAD_BUCKET_NAME, file["key"]), {
name: file["fileName"]
});
}
}
archive.finalize();
})
.catch(err => {
console.log(err);
throw new Error(err);
});
}
function streamTo(bucket, key, resolve) {
var passthrough = new stream.PassThrough();
S3.upload(
{
Bucket: bucket,
Key: key,
Body: passthrough,
ContentType: "application/zip",
ServerSideEncryption: "AES256"
},
(err, data) => {
if (err) {
console.error('Error while uploading zip')
throw new Error(err);
reject(err)
return
}
console.log('Zip uploaded')
resolve()
}
).on("httpUploadProgress", progress => {
console.log(progress)
});
return passthrough;
}
function getStream(bucket, key) {
let streamCreated = false;
const passThroughStream = new stream.PassThrough();
passThroughStream.on("newListener", event => {
if (!streamCreated && event == "data") {
const s3Stream = S3
.getObject({ Bucket: bucket, Key: key })
.createReadStream();
s3Stream
.on("error", err => passThroughStream.emit("error", err))
.pipe(passThroughStream);
streamCreated = true;
}
});
return passThroughStream;
}
export async function getSignedUrl(bucket: string, key: string, expires: number, downloadFilename?: string): Promise<string> {
const exists = await objectExists(bucket, key);
if (!exists) {
console.info(`Object ${bucket}/${key} does not exists`);
return null
}
let params = {
Bucket: bucket,
Key: key,
Expires: expires,
};
if (downloadFilename) {
params['ResponseContentDisposition'] = `inline; filename="${encodeURIComponent(downloadFilename)}"`;
}
try {
const url = s3.getSignedUrl('getObject', params);
return url;
} catch (err) {
console.error(`Unable to get URL for ${bucket}/${key}`, err);
return null;
}
};
Using streams may be tricky as I'm not sure how you could pipe multiple streams into an object. I've done this several times using standard file object. It's a multistep process and it's quite fast. Remember that Lambda operates in Linux so you have all Linux resources at hand including the system /tmp directory.
Create a sub-directory in /tmp call "transient" or whatever works for you
Use s3.getObject() and write file objects to /tmp/transient
Use the GLOB package to generate an array[] of paths from /tmp/transient
Loop the array and zip.addLocalFile(array[i]);
zip.writeZip('tmp/files.zip');
I've used a similar approach, but I'm facing the issue that some of the files in the generated ZIP file don't have the correct size (and corresponding data). Is there any limitation on the size of the files this code can manage? In my case I'm zipping large files (a few larger than 1GB) and the overall amount of data may reach 10GB.
I do not get any error/warning message, so it seems it all works fine.
Any idea what may be hapenning?

Resources