In my node application im writing data to the file using write method in the createWriteStream method.Now i need to find whether the write for the particular stream is complete or not.How can i find that.
var stream = fs.createWriteStream('myFile.txt', {flags: 'a'});
var result = stream.write(data);
writeToStream();
function writeToStream() {
var result = stream.write(data + '\n');
if (!result) {
stream.once('drain',writeToStream());
}
}
I need to call other method for every time when write completes.How can i do this.
From the node.js WritableStream.write(...) documentation you can give the "write" method a callback that is called when the written data is flushed:
var stream = fs.createWriteStream('myFile.txt', {flags: 'a'});
var data = "Hello, World!\n";
stream.write(data, function() {
// Now the data has been written.
});
Note that you probably don't need to actually wait for each call to "write" to complete before queueing the next call. Even if the "write" method returns false you can still call subsequent writes and node will buffer the pending write requests into memory.
I am using maerics's answer along with error handling. The flag 'a' is used to Open file for appending. The file is created if it does not exist. There Other flags you can use.
// Create a writable stream & Write the data to stream with encoding to be utf8
var writerStream = fs.createWriteStream('MockData/output.txt',{flags: 'a'})
.on('finish', function() {
console.log("Write Finish.");
})
.on('error', function(err){
console.log(err.stack);
});
writerStream.write(outPutData,function() {
// Now the data has been written.
console.log("Write completed.");
});
// Mark the end of file
writerStream.end();
Related
I'm quite new to javascripts. I'm using node js writable stream to write a .txt file; It works well, but I cannot understand how to properly close the file, as its content is blank as long as the program is running. More in detail I need to read from that .txt file after it has been written, but doing it this way returns an empty buffer.
let myWriteStream = fs.createWriteStream("./filepath.txt");
myWriteStream.write(stringBuffer + "\n");
myWriteStream.on('close', () => {
console.log('close event emitted');
});
myWriteStream.end();
// do things..
let data = fs.readFileSync("./filepath.txt").toString().split("\n");
Seems like the event emitted by the .end() method is triggered after the file reading, causing it to be read as empty. If I put a while() to wait for the event to be triggered, so that I know for sure the stream is closed before the reading, the program waits forever.
Do you have any clue of what I'm doing wrong?
your missing 2 things one test that write is succeed
then you need to wait for stream finish event
const { readFileSync, createWriteStream } = require('fs')
const stringBuffer = Buffer.from(readFileSync('index.js')
)
const filePath = "./filepath.txt"
const myWriteStream = createWriteStream(filePath)
let backPressureTest = false;
while (!backPressureTest) {
backPressureTest = myWriteStream.write(stringBuffer + "\n");
}
myWriteStream.on('close', () => {
console.log('close event emitted');
});
myWriteStream.on('finish', () => {
console.log('finish event emitted');
let data = readFileSync(filePath).toString().split("\n");
console.log(data);
});
myWriteStream.end();
Given a situation where I use the nodejs readline library to iterate over each line in the STDIN stream, do some processing on it and write it back out to STDOUT as in the following example:
var rl = readline.createInterface({
input: process.stdin,
output: process.stdout,
terminal: false
});
function my_function(line) {
var output = ...(line);
process.stdout.write(output);
}
rl.on('line', my_function);
I'm concerned that the processing I'm doing will take very different amounts of time depending on the line content so some lines will return very quickly while others takes some time to sort out. Is it possible that my_function() will ever run out of order and hence cause the output stream to be scrambled? Should I be looking into using a synchronous loop of some kind instead of this asynchronous event handler?
The JavaScript execution itself is single-threaded, so as long as you're only performing synchronous operations inside the event handler, there is no problem.
If you are performing asynchronous operations inside the event handler, then it is possible that another 'line' event could be emitted before your asynchronous operation(s) are complete. In that case, you would need to rl.pause() first and then rl.resume() once you are finished with your asynchronous operations. However, this isn't foolproof since 'line' events could still be emitted after a rl.pause() if the current chunk of data read from the input stream had multiple line breaks.
So if you are performing asynchronous operations inside the event handler, you are probably better off just reading from the stream yourself so that you have more control over the parsing behavior. This is actually pretty easy to do, for example:
function parseStream(stream, callback) {
// Assuming all stream data is text and not binary ...
var buffer = '';
var RE_EOL = /\r?\n/g;
stream.on('data', function(data) {
buffer += data;
processBuffer();
});
stream.on('end', callback);
stream.on('error', callback);
function processBuffer() {
var idx = RE_EOL.exec(buffer);
if (~idx) {
// Found a line ending
var line = buffer.slice(0, RE_EOL.index);
buffer = buffer.slice(RE_EOL.index + RE_EOL[0].length);
stream.pause();
callback(null, line, processBuffer);
} else {
stream.resume();
}
}
}
// ...
processStream(process.stdin, function(err, line, done) {
if (err) throw err;
if (line === undefined) {
// No more data will be available (stream ended)
console.log('(Stream ended!)');
return;
}
// Do something with `line`
console.log(line);
// Call `done()` whenever your async operation(s) are all finished
done();
});
var http = require('http');
var map = require('through2-map');
uc = map(function(ch) {
return ch.toString().toUpperCase();
});
server = http.createServer(function(request, response) {
request.on('data',function(chunk){
if (request.method == 'POST') {
//change the data from request to uppercase letters and
//pipe to response.
}
});
});
server.listen(8000);
I have two questions about the code above. First, I read the documentation for request, it said that request is an instance of IncomingMessage, which implements Readable Stream. However, I couldn't find .on method in the Stream documentation. So I don't know what chunk in the callback function in request.on does. Secondly, I want to do some manipulation to the data from request and pipe it to response. Should I pipe from chunk or from request? Thank you for consideration!
is chunk a stream?
nop. The stream is the flow among what the chunks of the whole data are sent.
A simple example, If you read a 1gb file, a stream will read it by chunks of 10k, each chunk will go through your stream, from the beginning to the end, with the right order.
I use a file as example, but a socket, request or whatever streams is based on that idea.
Also, whenever someone sends a request to this server would that entire thing be a chunk?
In the particular case of http requests, only the request body is a stream. It can be the posted files/data. Or the response body of the response. Headers are treated as Objects to apply on the request before the body is written on the socket.
A small example to help you with some concrete code,
var through2 = require('through2');
var Readable = require('stream').Readable;
var s1 = through2(function transform(chunk, enc, cb){
console.log("s1 chunk %s", chunk.toString())
cb(err=null, chunk.toString()+chunk.toString() )
});
var s2 = through2(function transform(chunk, enc, cb){
console.log("s2 chunk %s", chunk.toString())
cb(err=null, chunk)
});
s2.on('data', function (data) {
console.log("s2 data %s", data.toString())
})
s1.on('end', function (data) {
console.log("s1 end")
})
s2.on('end', function (data) {
console.log("s2 end")
})
var rs = new Readable;
rs.push('beep '); // this is a chunk
rs.push('boop'); // this is a chunk
rs.push(null); // this is a signal to end the stream
rs.on('end', function (data) {
console.log("rs end")
})
console.log(
".pipe always return piped stream: %s", rs.pipe(s1)===s1
)
s1.pipe(s2)
I would like to suggest you to read more :
https://github.com/substack/stream-handbook
http://maxogden.com/node-streams.html
https://github.com/maxogden/mississippi
All Streams are instances of EventEmitter (docs), that is where the .on method comes from.
Regarding the second question, you MUST pipe from the Stream object (request in this case). The "data" event emits data as a Buffer or a String (the "chunk" argument in the event listener), not a stream.
Manipulating Streams is usually done by implementing a Transform stream (docs). Though there are many NPM packages available that make this process simpler (like through2-map or the like), though in reality, they produce Transform streams.
Consider the following:
var http = require('http');
var map = require('through2-map');
// Transform Stream to uppercase
var uc = map(function(ch) {
return ch.toString().toUpperCase();
});
var server = http.createServer(function(request, response) {
// Pipe from the request to our transform stream
request
.pipe(uc)
// pipe from transfrom stream to response
.pipe(response);
});
server.listen(8000);
You can test by running curl:
$ curl -X POST -d 'foo=bar' http://localhost:8000
# logs FOO=BAR
I have a stream I'm sending over the wire and takes a bit of time to fully send, so I want to display how far along it is on the fly. I know you can listen on the 'data' event for streams, but in newer versions of node, it also puts the stream into "flowing mode". I want to make sure i'm doing this correctly.
Currently I have the following stuff:
deploymentPackageStream.pause() // to prevent it from entering "flowing mode"
var bytesSent = 0
deploymentPackageStream.on('data', function(data) {
bytesSent+=data.length
process.stdout.write('\r ')
process.stdout.write('\r'+(bytesSent/1000)+'kb sent')
})
deploymentPackageStream.resume()
// copy over the deployment package
execute(conn, 'cat > deploymentPackage.sh', deploymentPackageStream).wait()
This gives me the right bytesSent output, but the resulting package seems to be missing some data off the front. If I put the 'resume' line after executing the copy line (the last line), it doesn't copy anything. If I don't resume, it also doesn't copy anything. What's going on and how do I do this properly without disrupting the stream and without entering flowing mode (I want back pressure)?
I should mention, i'm still using node v0.10.x
Alright, I made something that essentially is a passthrough, but calls a callback with data as it comes in:
// creates a stream that can view all the data in a stream and passes the data through
// parameters:
// stream - the stream to peek at
// callback - called when there's data sent from the passed stream
var StreamPeeker = exports.StreamPeeker = function(stream, callback) {
Readable.call(this)
this.stream = stream
stream.on('readable', function() {
var data = stream.read()
if(data !== null) {
if(!this.push(data)) stream.pause()
callback(data)
}
}.bind(this))
stream.on('end', function() {
this.push(null)
}.bind(this))
}
util.inherits(StreamPeeker, Readable)
StreamPeeker.prototype._read = function() {
this.stream.resume()
}
If I understand streams properly, this should appropriately handle backpressure.
Using this, I can just count up data.length in the callback like this:
var peeker = new StreamPeeker(stream, function(data) {
// use data.length
})
peeker.pipe(destination)
I'm using the excellent flow.js library to handle file uploads. It's a resumable HTML5 upload that produces a bunch of chunks on the server that must be reassembled. For example, foo.mov might become
timestamp-foomov.1
timestamp-foomov.2
...
timestamp-foomov.n
Uploads are working but I'm having trouble recombining the parts into a single binary. I have the following code from the Node.js server example the library authors provided on Github (https://github.com/flowjs/flow.js/tree/master/samples/Node.js).
$.write = function(identifier, writableStream, options) {
options = options || {};
options.end = (typeof options['end'] == 'undefined' ? true : options['end']);
// Iterate over each chunk
var pipeChunk = function(number) {
var chunkFilename = getChunkFilename(number, identifier);
fs.exists(chunkFilename, function(exists) {
if (exists) {
// If the chunk with the current number exists,
// then create a ReadStream from the file
// and pipe it to the specified writableStream.
var sourceStream = fs.createReadStream(chunkFilename);
sourceStream.pipe(writableStream, {
end: false
});
sourceStream.on('end', function() {
// When the chunk is fully streamed,
// jump to the next one
pipeChunk(number + 1);
});
} else {
// When all the chunks have been piped, end the stream
if (options.end) writableStream.end();
if (options.onDone) options.onDone();
}
});
}
pipeChunk(1);
}
I'm invoking this code with the following route and am expecting it to produce a reassembled binary in the tmp directory (that's where I'm saving my chunks). However nothing is happening. What am I missing?
exports.download = function(req, res, next) {
switch(req.method) {
case 'GET':
var stream = fs.createWriteStream('foobar');
flow.write(req.params.identifier, res);
break;
}
}
Reassembling all chunks is easy, just call this:
var stream = fs.createWriteStream(filename);
r.write(identifier, stream);
And that is it!
But other question is, when this method should be called?
Maybe when all chunks are uploaded and present at tmp folder.
But there is another issue with duplicate calls of the done.
This can be solved by creating and locking the file, once all chunks exists.
Then call
r.write(identifier, stream);
Then clean all chunks, release the lock and close the file.
Same approuch is done in php server side library: https://github.com/flowjs/flow-php-server/blob/master/src/Flow/File.php#L102