Upload image from buffer in request - node.js

I am trying to handle a image upload request from an iOS/Android app. In the request I am getting a buffer of the image and want to upload that to Rackspace files without having to download the image to re-upload it. I could write the file to file system and read from that but I want to know if it is possible to create a readableStream with the buffer in order to pipe that to cloud.
var options = {
container: _this3._container,
remote: filename,
contentType: contentType
}
var readStream = new Readable(data);
readStream._read();
readStream.on('data', function() {
console.log("Has data!");
});
function upload() {
return new Promise(function (resolve, reject) {
var writeStream = _this3._RackClient.upload(options);
writeStream.on('success', function() {
resolve();
});
writeStream.on('error', function(err) {
if (err !== null) {
return reject(err);
}
});
readStream.pipe(writeStream);
});
}
return upload();
Is how I am currently trying to do it but I continue to get not implemented errors.

I was actually able to achieve this using a PassThrough stream.
var options = {
container: _this3._container,
remote: filename,
contentType: contentType
};
function upload() {
return new Promise(function (resolve, reject) {
var writeStream = _this3._RackClient.upload(options);
var bufferStream = new stream.PassThrough();
bufferStream.end(new Buffer(data));
writeStream.on('success', function(file) {
resolve(file);
});
writeStream.on('error', function(err) {
if (err !== null) {
console.log(err);
return reject(err);
}
});
bufferStream.pipe(writeStream);
});
}
return upload();

Related

Buffer/Base64 encoding issue in Node.js and Azure Web APP

My code is working perfectly on local, the file being downloaded is never corrupt (I tried it dozens of time and the files always come complete), however when I run it on Azure (Web App); in a lot of times the returned file is corrupt (and with the wrong file size) - it's extremely random, sometimes it is returned fine and sometimes it is not.
My function that downloads the file from Azure blob storage is the following, the "setTimeOut" things are desperate attempts from my side to make it work without corruption
storageUtils.downloadBlobFromCloud = (subDirectory, fileName) => {
var dir = './temp';
var tempFilePath = dir + '/' + Date.now().toString() + '.temp';
console.log(tempFilePath)
var absolutePath = pathLib.resolve(tempFilePath);
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir);
}
if (!fileName) {
return null;
}
return new Promise( (resolve, reject) => {
blobService.getBlobToLocalFile(
container,
`${subDirectory}/${fileName}`,
tempFilePath,
async function (error, serverBlob) {
if (!error) {
try {
let base64 = await fileStream(tempFilePath);
console.log("STREAMMMINNNGGGG")
setTimeout(() => {
resolve(base64);
}, 2000);
setTimeout(()=>{
console.log('deleting file')
fs.unlinkSync(tempFilePath);
},10000);
} catch (e) {
console.log('error1')
console.log(e)
reject(e);
}
} else {
console.log("fi error2")
console.log(error)
reject(error);
}
}
);
});
};
The stream function is
function fileStream( filePath) {
return new Promise((resolve, reject) => {
let buffers = [];
let myStream = fs.createReadStream(filePath);
myStream.on('data', (chunk) => { buffers.push(chunk); });
myStream.once('end', () => {
let buffer = Buffer.concat(buffers);
console.log("BUFFER SIZE " + buffer.length)
let base64 = buffer.toString('base64')
/* fs.writeFile('./temp/buffer.txt', base64, function (err) {
if (err) return console.log(err);
console.log(err)
}); */
resolve(base64);
});
myStream.once('error', (err) => {
reject(err);
});
});
}
the function that returns the response to client:
let downloadFile = async (directory, fileName, attempts) => {
attempts++;
let file = await storageUtils.downloadBlobFromCloud(directory, fileName);
if(attempts < 20 && !file) {
file = await downloadFile(directory, fileName, attempts);
}
return file;
}
server.post('/api/download-file', async function (req, res) {
try{
console.log(`downloading file ${req.body.fileName} ${req.body.directory}` )
let attempts = 0;
let file = await downloadFile(req.body.directory, req.body.fileName, attempts);
return res.json({ file });
}catch(error) {
console.log(error);
return res.json(error);
};
});
you may notice the commented "buffer.txt", I've checked the base64 code in the txt and it's indeed incomplete (ie. expecting 1.5MB file but it's 1MB) when the file is returned corrupt to client.
I am out of ideas, what am I doing wrong? and why things working perfectly on local but not on the cloud hosting?
NOTE: I have even did a side by side comparison between the base64 of the file downloaded fine vs when it's corrupt using a text checker tool, the two base64 blocs are identical for a large portion while scrolling down, then...it's blank for the "corrupt one", while the non-corrupt base64 is longer. As if it's something is "cutting" the base64 encoding.

Read from Dropbox stream into Buffer

I need to download a file from Dropbox into buffer on my server. Due to a security issues I can't download a file directly to a client. Therefore I send request to my server, then fetch the file from Dropbox and then forward it to the client. I managed to implement this writing Dropbox stream to a file on my server and then sending it to a client.
I need to implement this mechanism without writing Dropbox stream into file on my server. I need to create a buffer and write into it and then forward the buffer to a client.
export const downloadFileFromDropbox = async function downloadFileFromDropbox(fileName,
folderNameOnDropbox) {
let isSucceeded;
const message = [];
let downloadResult;
let fileBuffered = "";
// authentication
const dropbox = dropboxV2Api.authenticate({
token: process.env.DEV_DROPBOX_SECRET_KEY
});
// configuring parameters
const params = Object.freeze({
resource: "files/download",
parameters: {
path: `/${folderNameOnDropbox}/${fileName}`
}
});
let dropboxPromise = new Promise(function (resolve, reject) {
dropbox(params, function (err, result) {
if (err) {
reject(err);
} else {
resolve(result);
}
}).pipe(fs.createWriteStream(/* need to implement buffer here */));
});
await dropboxPromise.then(async function (resultObj) {
isSucceeded = true;
message.push("fileDownload_OK");
}).catch(async function (err) {
isSucceeded = false;
message.push(err.message);
});
downloadResult = {
isSucceeded,
message,
/* Buffer */
};
return downloadResult;
};
There is no way to convert a file into buffer without writing it in some location
What worked for me is:
write the file in a temporary folder
Read it and convert into buffer
send the buffer back to the client
delete the file from the temporary location
here's my code :
dropbox = dropboxV2Api.authenticate({ token: credentials.access_token });
dropbox(
{
resource: 'files/download',
parameters: {
path: `${req.query.folder}` + `/${req.query.filename}`,
},
},
(err, result, response) => {
//download completed
if (err) {
return res.status(500).send(err);
} else {
console.log('download completed');
}
}
).pipe(
fs
.createWriteStream(`./app/tmp/` + `${req.query.filename}`)
.on('finish', () => {
fs.readFile(
`./app/tmp/` + `${req.query.filename}`,
function (err, buffer) {
if (err) {
res.status(500).send(err);
} else {
fs.unlink(`./app/tmp/` + `${req.query.filename}`, (err) => {
if (err) {
console.error(err);
return;
}
//file removed
res.status(200).send(buffer);
});
}
}
);
})
);
I hope this will help you even though it's a little bit late

Creating a GIF from remote stream in graphicsmagick

I am creating a GIF from remote files in node currently by downloading each image to the file system into a tmp folder.
I want to bypass saving the image to a tmp folder and save in memory instead. Is this possible?
As you can see, I have a download function in my AWS class which saves to a tmp folder:
download(key){
return new Promise((resolve, reject) => {
request.head(`${this.base_url}/${this.bucket}/${key}`, (err, res, body) => {
request(`${this.base_url}/${this.bucket}/${key}`)
.pipe(fs.createWriteStream(`tmp/${key}`)).on('close', resolve )
})
})
};
Once they have all downloaded, I have a createGif function in my GifService class which adds each file path as a custom argument of gm, adds a delay of 50ms, resizes then outputs as buffer which I am then uploading to AWS s3.
import gm from 'gm';
...
constructor(){
this.gm = gm()
}
generateGif(images, prefix){
return new Promise((resolve, reject) => {
// for each image we want in array, we pass to gm
images.forEach(image => {
this.gm.in(`tmp/${image.Key}`)
})
// Now we create the gif with 50sec delay between images, sized to 600px x 2
this.gm
.delay(50)
.resize(600,600)
.toBuffer('gif', async (err, buffer) => {
if (err) reject(err)
const params = {
ACL: 'public-read',
Bucket: config.aws_bucket,
ContentType: 'image/gif',
Key: `${prefix}/${uuid()}.gif`,
Body: buffer
}
try{
// uplaod to S3
const upload = await this.aws.upload(params)
// resolve s3 URL
resolve(upload)
}catch(err) {
console.log('err', err)
reject(err)
}
});
})
}
Ideally if I could pass a remote file stream as custom argument, or pass a buffer in as a custom argument as opposed to how I am currently passing in the tmp file path:
images.forEach(image => {
this.gm.in(`tmp/${image.Key}`)
})
I managed to make it work using only streams by converting first the images to miff and concat them into a single stream. Then passing the buffer or the stream into gm again with delay does the trick.
You will need to instal concat-stream npm for this to work.
Sorry for the mixed ES5 code.
import gm from 'gm';
var concat = require('concat-stream');
...
constructor() {
this.gm = gm()
}
start() {
return getYourReadAbleStreamsSomehow().then(streams => {
return generateGif(streams);
}).then(gifBuffer => {
return uploadToAWS(gifBuffer, prefix);
}).catch(err => {
console.log(err)
})
}
uploadToAWS(buffer, prefix) {
const params = {
ACL: 'public-read',
Bucket: config.aws_bucket,
ContentType: 'image/gif',
Key: `${prefix}/${uuid()}.gif`,
Body: buffer
}
try {
// uplaod to S3
const upload = await this.aws.upload(params)
// resolve s3 URL
resolve(upload)
} catch (err) {
console.log('err', err)
reject(err)
}
}
generateGif(imageStreams, delay) {
return new Promise((resolve, reject) => {
var write = concat(function(buffer) {
gm(buffer)
.delay(delay)
.toBuffer('gif', function(err, buffer) {
if (err)
reject(err);
resolve(buffer);
})
})
//Convert to miff and concat streams
var i = 0;
var streamHandler = function() {
gm(imageStreams[i])
.resize('600', '600')
.stream('miff', function(err, stdout, stderr) {
if (err)
reject(err)
var lastOne = i === streams.length - 1;
if (!lastOne)
stdout.once('end', streamHandler)
stdout.pipe(write, {
end: lastOne
});
i++;
});
}
streamHandler();
})
}

Get buffer from Rackspace download using pkgcloud

This may not be possible but I am trying to return a buffer object of an image on Rackspace using the pkgcloud module without having to write to the filesystem. I've seen this done before however both examples show piping the download to the File System.
function get() {
return new Promise(function (resolve, reject) {
_this._RackClient.download(options, function(err, results) {
if (err !== null) {
return reject(err);
console.log("Errow Downloading:", err);
}
resolve(buffer);
});
});
}
return get();
This is ideally how I would like it to work but there currently is not a body present in the request. Can I use a stream.passThrough() and return that similar to uploading a buffer?
.download() returns a Readable stream, so it should just be a matter of buffering that output. For example:
var stream = _this._RackClient.download(options);
var buf = [];
var nb = 0;
var hadErr = false;
stream.on('data', function(chunk) {
buf.push(chunk);
nb += chunk.length;
}).on('end', function() {
if (hadErr)
return;
switch (buf.length) {
case 0:
return resolve(new Buffer(0));
case 1:
return resolve(buf[0]);
default:
return resolve(Buffer.concat(buf, nb));
}
}).on('error', function(err) {
hadErr = true;
reject(err);
});

S3 upload sending events is working locally with deferred.notify() but not on server

I am sending an object to s3 to upload and locally I get back an 'httpUploadProgress' event. This notifies a .progress call on the function and then updates a record with its progress. It works fine locally but not on the server. Any ideas would be appreciated. I've included two snippets of code. I'm using google servers, not sure if it's an issue with the code or the server settings :/
s3.uploadVideo = function (filepath, videoName, publisher_id) {
var deferred = q.defer();
var body = fs.createReadStream(filepath);
var s3obj = new AWS.S3({
params: {
Bucket: transcodeConfig.NonTranscodedVideoBucket,
Key: publisher_id + '/' + videoName
}
});
s3obj.upload({Body: body}).
on('httpUploadProgress', function (evt) {
deferred.notify(evt);
return
}).
send(function (err, data) {
if (!err) {
deferred.resolve(data);
return
} else {
deferred.reject(err);
return
}
});
//deferred.resolve({})
return deferred.promise;
}
aws_api.s3.uploadVideo(file.path, fileName, publisher_id ).progress(function(progress){
return models.videos.findOneAndUpdate({_id : trackingId},{uploadProgress: progress, file_type:fileExtension},function(err,data){
if(err){
return next(err);
}else{
return data;
}
})
})

Resources