I have a client code using form-data module to hit a url that returns a content-type of image/jpeg. Below is my code
var FormData = require('form-data');
var fs = require('fs');
var form = new FormData();
//form.append('POLICE', "hello");
//form.append('PAYSLIP', fs.createReadStream("./Desert.jpg"));
console.log(form);
//https://fbcdn-profile-a.akamaihd.net/hprofile-ak-xfp1/v/t1.0- 1/c8.0.50.50/p50x50/10934065_1389946604648669_2362155902065290483_n.jpg?oh=13640f19512fc3686063a4703494c6c1&oe=55ADC7C8&__gda__=1436921313_bf58cbf91270adcd7b29241838f7d01a
form.submit({
protocol: 'https:',
host: 'fbcdn-profile-a.akamaihd.net',
path: '/hprofile-ak-xfp1/v/t1.0-1/c8.0.50.50/p50x50/10934065_1389946604648669_2362155902065290483_n.jpg?oh=13640f19512fc3686063a3494c6c1&oe=55ADCC8&__gda__=1436921313_bf58cbf91270adcd7b2924183',
method: 'get'
}, function (err, res) {
var data = "";
res.on("data", function (chunks) {
data += chunks;
});
res.on("end", function () {
console.log(data);
console.log("Response Headers - " + JSON.stringify(res.headers));
});
});
I'm getting some chunk data and the response headers i received was
{"last-modified":"Thu, 12 Feb 2015 09:49:26 GMT","content-type":"image/jpeg","timing-allow-origin":"*","access-control-allow-origin":"*","content-length":"1443","cache-control":"no-transform, max-age=1209600","expires":"Thu, 30 Apr 2015 07:05:31 GMT","date":"Thu, 16 Apr 2015 07:05:31 GMT","connection":"keep-alive"}
I am now stuck as how to process the response that i received to a proper image.I tried base64 decoding but it seemed to be a wrong approach any help will be much appreciated.
I expect that data, once the file has been completely downloaded, contains a Buffer.
If that is the case, you should write the buffer as is, without any decoding, to a file:
fs.writeFile('path/to/file.jpg', data, function onFinished (err) {
// Handle possible error
})
See fs.writeFile() documentation - you will see that it accepts either a string or a buffer as data input.
Extra awesomeness by using streams
Since the res object is a readable stream, you can simply pipe the data directly to a file, without keeping it in memory. This has the added benefit that if you download really large file, Node.js will not have to keep the whole file in memory (as it does now), but will write it to the filesystem continuously as it arrives.
form.submit({
// ...
}, function (err, res) {
// res is a readable stream, so let's pipe it to the filesystem
var file = fs.createWriteStream('path/to/file.jpg')
res.on('end', function writeDone (err) {
// File is saved, unless err happened
})
.pipe(file) // Send the incoming file to the filesystem
})
The chunk you got is the raw image. Do whatever it is you want with the image, save it to disk, let the user download it, whatever.
So if I understand your question clearly, you want to download a file from an HTTP endpoint and save it to your computer, right? If so, you should look into using the request module instead of using form-data.
Here's a contrived example for downloading things using request:
var fs = require('fs');
var request = require('request')
request('http://www.example.com/picture.jpg')
.pipe(fs.createWriteStream('picture.jpg'))
Where 'picture.jpg' is the location to save to disk. You can open it up using a normal file browser.
Related
I'm fairly new to node and streaming, and I am having an issue when attempting to stream a large amount of data to a file on the client browser.
So for example, if on the server if i have a large file, test.txt, i can easily stream this to the client browser by setting the header attachment and piping the file to the request response as follows.
res.setHeader('Content-Type', 'text/csv');
res.setHeader('Content-disposition', 'attachment;filename=myfile.text');
fs.createReadStream('./test.txt')
.pipe(res);
When the user clicks the button, the download begins, and we see the data getting streamed to the download file. The stream takes several minutes, but during this time the client is not blocked and they can continue to do other things while the file is downloaded by the browser.
However my data is not stored in a file, I need to retrieve it one string at a time from another server. So I'm attempting to create my own read stream and push my data chunk by chunk, but it does not work, when i do something like this:
var s = new Readable();
s.pipe(res);
for(let i=0; i<=total; i++) {
dataString = //code here to get next string needed to push
s.push(dataString);
};
s.push(null);
With this code, when the user request the download, once the download begins, the client is blocked and cannot do any other actions until the download is completed. Also if the data takes more than 30 seconds to stream, we hit the server timeout in this case, and the download fails. With the file stream this is not an issue
How to I get this to act like a file stream and not block the client from doing other request while it downloads. Any recommendations on the best way to implement this would be appreciated.
I was able resolve this issue by doing something similar to here:
How to call an asynchronous function inside a node.js readable stream
My basic code is as follows, and this is not blocking the client or timing out on the request as the data is continuously piped to the file download on the client side.
res.setHeader('Content-Type', 'text/csv');
res.setHeader('Content-disposition', 'attachment;filename=myfile.text');
function MyStream() {
var rs = new Readable();
var hitsadded = 0;
rs._read = function() {}; // needed to avoid "Not implemented" exception
getResults(queryString, function getMoreUntilDone(err, res) {
if (err){
logger.logError(err);
}
rs.push(res.data);
hitsadded += res.records;
if (res.recordsTotal > hitsadded) {
getNextPage(query, getMoreUntilDone);
} else {
rs.push(null);
}
});
return rs;
}
MyStream().pipe(zlib.createGzip()).pipe(res);
suppose I make a multipart, application/octet-stream request with responseType as 'arraybuffer'...suppose I receive this in nodejs and I try to write the response into a file. How can I handle this such that I don't corrupt the contents?
My current approach is something like this
var req = var req = restler.post(url, opts)
.on('data', function (data){
console.log('receiving data...');
console.log(data);
}).on('complete', function (data) {
var buff = new Buffer(data) //this is prolly incorrect, but I can't figure this out at all
fs.writeFile(file_name, buff.toString('binary'), function(err){
console.log('done!')
});
Here I write the contents into filename.
Suppose I fetch a microsoft word file...fetching it only leads me to a corrupt file. Also using restler package for this
According to the restler documentation, you can set decoding: 'buffer' in your opts and it will keep the binary data intact as a Buffer instead of the default utf8-encoded string. From there it's just a matter of passing the buffer directly to fs.writeFile() without calling buffer.toString().
I'm trying to fetch an image, apply a transform and save it in a database like mongodb. Her's my code
var stor = function(inStream, sizeType) {
console.log('entering stor function');
var hashCode = '';
var img = new Buffer(1024 * 1024 * 5 * 1.1, 'binary'); //5mb size + 10% space
var hash = crypto.createHash('sha1');
inStream.on('data', function (chunk){
Buffer.concat([img, chunk]);
hash.update(chunk);
});
inStream.on('end', function() {
hashCode = hash.digest('hex');
var retUrl = "http://playground.com/" + hashCode;
//post this url using requests, set encoding : binary
});
};
server.post('/resize', function(req, res) {
req.accepts('application/json');
console.log('received a resize request for image =', req.body.host + req.body.path);
var request = requests.get({
url: req.body.url,
headers: {'accept-encoding': 'gzip'}
});
//error handling
request.on('response', function (response) {
console.log('succesfully fetched image...');
response.setEncoding('binary');
//save original in stor
stor(response, 'original');
res.writeHead(201);
res.send();
});
});
module.exports = server;
When i do this, where i receive some image from the internet and then save it in my database for future use, the image saved data in the database is not the original image i stored. It is corrupt. I have narrowed the problem down to the encoding of the data I buffer, in the function store( variable 'img'). I did this by directly piping the data from response to the post to database call. I can't do that for my purpose because i need to compute the hash of the image.
I want to know if my assumptions are correct.
Images from the internet can be read as 'binary'.
You can load that data onto a buffer as 'binary'.
PUT the image onto a store with encoding set to 'binary'.
I think one or all of these assumptions are wrong, as i get back only corrupted data back from the database.
The issue was that I was using exec. Exec outputs a buffer. Using spawn solved this issue. Spawn outputs a STREAM, which handles binary correctly. Ofcourse, I also set the encoding to binary as well.
I've written a simple image manipulation service that uses node gm on an image from an http response stream. If I use nodejs' default transfer-encoding: chunked, things work just fine. But, as soon as I try and add the content-length implementation, nodejs hangs the response or I get content-length mismatch errors.
Here's the gist of the code in question (variables have been omitted due to example):
var image = gm(response);
// gm getter used to get origin properties of image
image.identify({bufferStream: true}, function(error, value){
this.setFormat(imageFormat)
.compress(compression)
.resize(width,height);
// instead of default transfer-encoding: chunked, calculate content-length
this.toBuffer(function(err, buffer){
console.log(buffer.length);
res.setHeader('Content-Length', buffer.length);
gm(buffer).stream(function (stError, stdout, stderr){
stdout.pipe(res);
});
});
});
This will spit out the desired image and a content length that looks right, but the browser will hang suggesting that there's a bit of a mismatch or something else wrong. I'm using node gm 1.9.0.
I've seen similar posts on nodejs gm content-length implementation, but I haven't seen anyone post this exact problem yet.
Thanks in advance.
I ended up changing my approach. Instead of using this.toBuffer(), I save the new file to disk using this.write(fileName, callback), then read it with fs.createReadStream(fileName) and piping it to the response. Something like:
var filePath = './output/' + req.param('id') +'.' + imageFormat;
this.write(filePath, function (writeErr) {
var stat = fs.statSync(filePath);
res.writeHead(200, {
'Content-Type': 'image/' + imageFormat,
'Content-Length': stat.size
});
var readStream = fs.createReadStream(filePath);
readStream.pipe(res);
// async delete the file from filesystem
...
});
You end up getting all of the headers you need including your new content-length to return to the client.
I've been googling this and looking around stackoverflow for a while but haven't found a solution - hence the post.
I am playing around with Node.js and WebSockets out of curiosity. I am trying to stream some binary data (an mp3) to the client. My code so far is below but is obviously not working as intended.
I suspect that my problem is that I am not actually sending binary data from the server and would like some clarification/help.
Heres my server...
var fs = require('fs');
var WebSocketServer = require('ws').Server;
var wss = new WebSocketServer({port: 8080,host:"127.0.0.1"});
wss.on('connection', function(ws) {
var readStream =
fs.createReadStream("test.mp3",
{'flags': 'r',
'encoding': 'binary',
'mode': 0666,
'bufferSize': 64 * 1024});
readStream.on('data', function(data) {
ws.send(data, {binary: true, mask: false});
});
});
And my client...
context = new webkitAudioContext();
var ws = new WebSocket("ws://localhost:8080");
ws.binaryType = 'arraybuffer';
ws.onmessage = function (evt) {
context.decodeAudioData(
evt.data,
function(buffer) {
console.log("Success");
},
function(error) {
console.log("Error");
});
};
The call to decode always end up in the error callback. I am assuming this is because it is receiving bad data.
So my question is how to I correctly stream the file as binary?
Thanks
What your server is doing is that it is sending messages consisting of binary audio data in 64 KB chunks to your client. Your client should rebuild the audio file before calling decodeAudioData.
You are calling decodeAudioDataevery time your client is getting message on websocket. You have to create a separate buffer to add all the chunks to it. Then on completion of transfer, the buffer should be given input to decodeAudioData.
You have two options now:
You load entire file (fs.read) without using stream events and send the whole file with ws.send (easy to do)
You use stream events, modify your client to accept chunks of data and assemble them before calling decodeAudioData
Problem solved.
I fixed this issue with a combination of removing the "'encoding': 'binary'" parameter from the options passed to "createReadStream()" and the solution at...
decodeAudioData returning a null error
As per some of my comments, when I updated the createReadStream options, the first chunk was playing but all other chunks were executing the onError callback from decodeAudioData(). The solution in the link above fixed this for me.
It seems that decodeAudioData() is a bit picky as to how the chunks it receives should be formatted. They should be valid chunks apparently...
Define 'valid mp3 chunk' for decodeAudioData (WebAudio API)