I need to stream files from a client (nodejs command line) and a server (express nodejs).
This is the client side:
var request = require('request');
var fs = require('fs');
// ...
var readStream = fs.createReadStream(file.path);
readStream.on('end', function() {
that.emit('finished');
});
readStream.pipe(request.post(target));
// ...
This is the server side:
var fs = require('fs');
var path = require('path');
// ...
app.post('/:filename', function(req, res) {
req.setEncoding('binary');
var filename = path.basename(req.params.filename);
filename = path.resolve(destinationDir, filename);
var dst = fs.createWriteStream(filename);
req.pipe(dst);
req.on('end', function() {
res.send(200);
});
});
// ...
All is working, files are saved correctly on the server side... but they are about 50% bigger than the source files. I tried to see difference between the two files with hexdump and the server side file has similar content but with 0xC2 sometimes. I guess this is related to encoding.
Don't call req.setEncoding('binary').
This will convert every single chunk into strings and is mainly intended if you want to read strings from the stream. As you directly pipe the request to a file, you don't need to do it.
Related
I wish to read a file from a url and create a download stream with a different file name using nodejs on lambda.
Currently I am trying but failing with this code.
var fs= require('fs');
var url="https://upload.wikimedia.org/wikipedia/commons/5/51/Google.png";
fs.rename(url, "download.png", function(err) {
if ( err ) console.log('ERROR: ' + err);
});
fs.rename should be used for renaming the local file.
In your case, you would like to download a file from external URL and save it to new name, you can try this solution instead
var http = require('http');
var fs = require('fs');
var file = fs.createWriteStream("download.png");
var request = http.get("https://upload.wikimedia.org/wikipedia/commons/5/51/Google.png", function(response) {
response.pipe(file);
});
I have a file in D: Drive of my local system. I need to download the file into the E: Drive. How to do this using node.js and http request? I am a beginner in node.js. Please give me valuable suggestions.
Note: The file may be in any type.
Here is an example:
// upload.js
var fs = require('fs')
var newPath = "E:\\newPath";
var oldPath = "D:\\oldPath";
exports.uploadFile = function (req, res) {
fs.readFile(oldPath, function(err, data) {
fs.writeFile(newPath, data, function(err) {
fs.unlink(oldPath, function(){
if(err) throw err;
res.send("File uploaded to: " + newPath);
});
});
});
};
// app.js
var express = require('express'), // fetch express js library
upload = require('./upload'); // fetch upload.js you have just written
var app = express();
app.get('/upload', upload.uploadFile);
app.listen(3000);
Basically there are two parts, one doing the copying from one drive to another, and the other one is for triggering. Once you run you app.js and make a GET request to localhost:3000/upload it will copy the file from newPath to the oldPath. For further information have a look to expressjs and fs.
Assuming it's a text file, you would have to write two node.js server.
The first would answer (all/specific, your choice) http get with the content of the file, the other would make a get and download the file.
server.js: Will work only for text file
var http = require('http'),
fs = require('fs'),
server = http.createServer(function (req, res){
res.writeHead(200, {'content-type': 'text/text'});
fs.readFile('E:/path/to/file.txt', function (data) {
res.write('' + data);
res.end();
});
}).listen(8080);
client.js
var http = require('http'),
fs = require('fs'),
file = fs.createWriteStream('D:/path/to/new.txt', {flags: 'w'});
http.get('http://localhost:8080', function (res) {
res.pipe(file, {end: 'false'});
res.on('end', function() {
file.end();
});
});
EDIT:
The only advantage versus anvarik's solution is that I don t use express...
I write a simple http server,and download a large file(1.5G),the server crash by out of memory
my code as follows:
var http = require("http");
var fs = require("fs");
var filename = "file.iso";
var serv = http.createServer(function(req,res){
var stat = fs.statSync(filename);
res.writeHeader(200,{"Content-Length":stat.size});
var fReadStream = fs.createReadStream(filename);
fReadStream.on('data', function (chunk) {
res.write(chunk);
});
fReadStream.on('end', function () {
res.end();
});
});
serv.listen(8888);
but when I change to use pipe method of stream,it ok,like this:
var http = require("http");
var fs = require("fs");
var filename = "file.iso";
var serv = http.createServer(function(req,res){
var stat = fs.statSync(filename);
res.writeHeader(200,{"Content-Length":stat.size});
var fReadStream = fs.createReadStream(filename);
fReadStream.pipe(res);
});
serv.listen(8888);
my question is why the first code do not work?
I have been working with large files in Node and have noticed that the async calls to load the data can complete far faster than the write calls in the http stream. This results in a bottleneck that will crash your server because it runs out of memory with all the outstanding write requests (sitting in the buffer). Piping is designed to manage the stream so that this does not happen.
From Node documentation:
readable.pipe(destination, [options])#
destination Writable Stream The destination for writing data
options Object Pipe options
end Boolean End the writer when the reader ends. Default = true
This method pulls all the data out of a readable stream, and writes it to the supplied destination, automatically managing the flow so that the destination is not overwhelmed by a fast readable stream.
we can balance read and write without pipelike this:
var http = require("http");
var fs = require("fs");
var filename = "file.iso";
var serv = http.createServer(function (req, res) {
var stat = fs.statSync(filename);
res.writeHeader(200, {"Content-Length": stat.size});
var fReadStream = fs.createReadStream(filename);
fReadStream.on('data', function (chunk) {
if(!res.write(chunk)){
fReadStream.pause();
}
});
fReadStream.on('end', function () {
res.end();
});
res.on("drain", function () {
fReadStream.resume();
});
});
serv.listen(8888);
Let's say you create a zip file in-memory following the example from node-zip's documentation:
var zip = new require('node-zip')()
zip.file('test.file', 'hello there')
var data = zip.generate({type:'string'})
How do you then send data to a browser such that it will accept it as a download?
I tried this, but the download hangs at 150/150 bytes AND makes Chrome start eating 100% CPU:
res.setHeader('Content-type: application/zip')
res.setHeader('Content-disposition', 'attachment; filename=Zippy.zip');
res.send(data)
So what's the proper way to send zip data to a browser?
Using the archiver and string-stream packages:
var archiver = require('archiver')
var fs = require('fs')
var StringStream = require('string-stream')
http.createServer(function(request, response) {
var dl = archiver('zip')
dl.pipe(response)
dl.append(new fs.createReadStream('/path/to/some/file.txt'), {name:'YoDog/SubFolder/static.txt'})
dl.append(new StringStream("Ooh dynamic stuff!"), {name:'YoDog/dynamic.txt'})
dl.finalize(function (err) {
if (err) res.send(500)
})
}).listen(3000)
I recommend you to use streams for this approach.
var fs = require('fs');
var zlib = require('zlib');
var http = require('http');
http.createServer(function(request, response) {
response.writeHead(200, { 'Content-Type': 'application/octet-stream' });
var readStream = fs.createReadStream('test.file');
var unzipStream = zlib.createUnzip();
readStream.pipe(unzipStream.pipe(response));
}).listen(3000);
This will properly not work in real world (as I am not common with zlib) but it may give you the direction
In most examples you find on the web, an index.html file is served like the following:
function serveIndexPage(response) {
fs.readFile('__dirname + /public/index.html', function (err, data) {
response.end(data);
});
};
This seems like a bad idea, as the whole file is read into memory and then send to the client. Is there some better way to do this? I know that libaries like Connect and Express provide such a functionality, but for my project, I'd like to use plain node.js.
EDIT
Also, you sometimes see readFileSync used, which is even worse IMHO.
Pipe your data through, so a simple static HTTP server looks like:
var Http = require('http'),
Url = require('url'),
Path = require('path'),
Fs = require('fs');
Http.createServer(function(req, res) {
var path = Url.parse(req.url).pathname;
var file = Path.join(process.cwd(), path);
path.exists(filename, function(exists) {
if(!exists) {
res.writeHead(404);
res.end();
}
res.writeHead(200, /* mime type */);
var fileStream = Fs.createReadStream(filename);
fileStream.pipe(res);
});
}).listen(process.env.PORT || 1999);
The pipe'ing is shorthand for something like
var s = Fs.createReadStream(filename);
s.on('data', function (data) {
res.write(data);
});
s.on('end', function() {
res.end();
});
In theory you could read the file line by line response.write()'ing every line to the client.