Is there any stdout flush for nodejs just like python or other languages?
sys.stdout.write('some data')
sys.stdout.flush()
Right now I only saw process.stdout.write() for nodejs.
process.stdout is a WritableStream object, and the method WritableStream.write() automatically flushes the stream (unless it was explicitly corked). However, it will return true if the flush was successful, and false if the kernel buffer was full and it can't write yet. If you need to write several times in succession, you should handle the drain event.
See the documentation for write.
In newer NodeJS versions, you can pass a callback to .write(), which will be called once the data is flushed:
sys.stdout.write('some data', () => {
console.log('The data has been flushed');
});
This is exactly the same as checking .write() result and registering to the drain event:
let write = sys.stdout.write('some data');
if (!write) {
sys.stdout.once('drain', () => {
console.log('The data has been flushed');
});
}
write returns true if the data has been flushed. If it returns false, you can wait for the 'drain' event.
I think there is no flush, because that would be a blocking operation.
There is another function stdout which to clear last output to the terminal which is kind of work like flush
function flush() {
process.stdout.clearLine();
process.stdout.cursorTo(0);
}
var total = 5000;
var current = 0;
var percent = 0;
var waitingTime = 500;
setInterval(function() {
current += waitingTime;
percent = Math.floor((current / total) * 100);
flush();
process.stdout.write(`downloading ... ${percent}%`);
if (current >= total) {
console.log("\nDone.");
clearInterval(this);
}
}, waitingTime);
cursorTo will move the cursor to position 0 which is the starting point
use the flush function before stdout.write because it will clear the screen, if you put after you will not see any output
Related
My Node.js program - which is an ordinary command line program that by and large doesn't do anything operationally unusual, nothing system-specific or asynchronous or anything like that - needs to write messages to a file from time to time, and then it will be interrupted with ^C and it needs the contents of the file to still be there.
I've tried using fs.createWriteStream but that just ends up with a 0-byte file. (The file does contain text if the program ends by running off the end of the main file, but that's not the scenario I have.)
I've tried using winston but that ends up not creating the file at all. (The file does contain text if the program ends by running off the end of the main file, but that's not the scenario I have.)
And fs.writeFile works perfectly when you have all the text you want to write up front, but doesn't seem to support appending a line at a time.
What is the recommended way to do this?
Edit: specific code I've tried:
var fs = require('fs')
var log = fs.createWriteStream('test.log')
for (var i = 0; i < 1000000; i++) {
console.log(i)
log.write(i + '\n')
}
Run for a few seconds, hit ^C, leaves a 0-byte file.
Turns out Node provides a lower level file I/O API that seems to work fine!
var fs = require('fs')
var log = fs.openSync('test.log', 'w')
for (var i = 0; i < 100000; i++) {
console.log(i)
fs.writeSync(log, i + '\n')
}
NodeJS doesn't work in the traditional way. It uses a single thread, so by running a large loop and doing I/O inside, you aren't giving it a chance (i.e. releasing the CPU) to do other async operations for eg: flushing memory buffer to actual file.
The logic must be - do one write, then pass your function (which invokes the write) as a callback to process.nextTick or as callback to the write stream's drain event (if buffer was full during last write).
Here's a quick and dirty version which does what you need. Notice that there are no long-running loops or other CPU blockage, instead I schedule my subsequent writes for future and return quickly, momentarily freeing up the CPU for other things.
var fs = require('fs')
var log = fs.createWriteStream('test.log');
var i = 0;
function my_write() {
if (i++ < 1000000)
{
var res = log.write("" + i + "\r\n");
if (!res) {
log.on('drain',my_write);
} else {
process.nextTick(my_write);
}
console.log("Done" + i + " " + res + "\r\n");
}
}
my_write();
This function might also be helpful.
/**
* Write `data` to a `stream`. if the buffer is full will block
* until it's flushed and ready to be written again.
* [see](https://nodejs.org/api/stream.html#stream_writable_write_chunk_encoding_callback)
*/
export function write(data, stream) {
return new Promise((resolve, reject) => {
if (stream.write(data)) {
process.nextTick(resolve);
} else {
stream.once("drain", () => {
stream.off("error", reject);
resolve();
});
stream.once("error", reject);
}
});
}
You are writing into file using for loop which is bad but that's other case. First of all createWriteStream doesn't close the file automatically you should call close.
If you call close immediately after for loop it will close without writing because it's async.
For more info read here: https://nodejs.org/api/fs.html#fs_fs_createwritestream_path_options
Problem is async function inside for loop.
I am writing quite simple application to transform data - read one file and write to another. Files are relatively large - 2 gb. However, what I found is that flush to the file system is not happening, on cork-uncork cycle, it only happens on end(), so the end() basically hangs the system until it's fully flashed.
I simplified the example so it just writes a line to the stream a lot of times.
var PREFIX = 'E:\\TEST\\';
var line = 'AA 11 999999999 20160101 123456 20160101 AAA 00 00 00 0 0 0 2 2 0 0 20160101 0 00';
var fileSystem = require('fs');
function writeStrings() {
var stringsCount = 0;
var stream = fileSystem.createWriteStream(PREFIX +'output.txt');
stream.once('drain', function () {
console.log("drained");
});
stream.once('open', function (fileDescriptor) {
var started = false;
console.log('writing file ');
stream.cork();
for (i = 0; i < 2000000; i++) {
stream.write(line + i);
if (i % 10000 == 0) {
// console.log('passed ',i);
}
if (i % 100000 == 0) {
console.log('uncorcked ',i,stream._writableState.writing);
stream.uncork();
stream.cork();
}
}
stream.end();
});
stream.once('finish', function () {
console.log("done");
});
}
writeStrings();
going inside the node _stream_writable.js, I found that it flushes the buffer only on this condition:
if (!state.writing &&
!state.corked &&
!state.finished &&
!state.bufferProcessing &&
state.buffer.length)
clearBuffer(this, state);
and, as you can see from example, the writing flag doesn't set back after first uncork(), which prevents the uncork to flush.
Also, I don't see drain events evoking at all. Playing with highWaterMark doesn't help (actually doesn't seems to have effect on anything). Manually setting the writing to false (+ some other flags) indeed helped but this is surely wrong.
Am I am misunderstanding the concept of this?
From the node.js documentation I found that number of uncork() should match the number of cork() call, I am not seeing matching stream.uncork() call for stream.cork(), which is called before the for loop. That might be the issue.
Looking at a guide on nodejs.org, you aren't supposed to call stream.uncork() twice in the same event loop. Here is an excerpt:
// Using .uncork() twice here makes two calls on the C++ layer, rendering the
// cork/uncork technique useless.
ws.cork();
ws.write('hello ');
ws.write('world ');
ws.uncork();
ws.cork();
ws.write('from ');
ws.write('Matteo');
ws.uncork();
// The correct way to write this is to utilize process.nextTick(), which fires
// on the next event loop.
ws.cork();
ws.write('hello ');
ws.write('world ');
process.nextTick(doUncork, ws);
ws.cork();
ws.write('from ');
ws.write('Matteo');
process.nextTick(doUncork, ws);
// as a global function
function doUncork(stream) {
stream.uncork();
}
.cork() can be called as many times we want, we just need to be careful to call .uncork() the same amount of times to make it flow again.
While attempting to experiment with Node.JS streams I ran into an interesting conundrum. When the input (Readable) stream pushes more data then the destination (Writable) cares about I was unable to apply back-pressure correctly.
The two methods I attempted was to return false from the Writable.prototype._write and to retain a reference to the Readable so I can call Readable.pause() from the Writable. Neither solution helped much which I'll explain.
In my exercise (which you can view the full source as a Gist) I have three streams:
Readable - PasscodeGenerator
util.inherits(PasscodeGenerator, stream.Readable);
function PasscodeGenerator(prefix) {
stream.Readable.call(this, {objectMode: true});
this.count = 0;
this.prefix = prefix || '';
}
PasscodeGenerator.prototype._read = function() {
var passcode = '' + this.prefix + this.count;
if (!this.push({passcode: passcode})) {
this.pause();
this.once('drain', this.resume.bind(this));
}
this.count++;
};
I thought that the return code from this.push() was enough to self pause and wait for the drain event to resume.
Transform - Hasher
util.inherits(Hasher, stream.Transform);
function Hasher(hashType) {
stream.Transform.call(this, {objectMode: true});
this.hashType = hashType;
}
Hasher.prototype._transform = function(sample, encoding, next) {
var hash = crypto.createHash(this.hashType);
hash.setEncoding('hex');
hash.write(sample.passcode);
hash.end();
sample.hash = hash.read();
this.push(sample);
next();
};
Simply add the hash of the passcode to the object.
Writable - SampleConsumer
util.inherits(SampleConsumer, stream.Writable);
function SampleConsumer(max) {
stream.Writable.call(this, {objectMode: true});
this.max = (max != null) ? max : 10;
this.count = 0;
}
SampleConsumer.prototype._write = function(sample, encoding, next) {
this.count++;
console.log('Hash %d (%s): %s', this.count, sample.passcode, sample.hash);
if (this.count < this.max) {
next();
} else {
return false;
}
};
Here I want to consume the data as fast as possible until I reach my max number of samples and then end the stream. I tried using this.end() instead of return false but that caused the dreaded write called after end problem. Returning false does stop everything if the sample size is small but when it is large I get an out of memory error:
FATAL ERROR: CALL_AND_RETRY_LAST Allocation failed - process out of memory
Aborted (core dumped)
According to this SO answer in theory the Write stream would return false causing the streams to buffer until the buffers were full (16 by default for objectMode) and eventually the Readable would call it's this.pause() method. But 16 + 16 + 16 = 48; that's 48 objects in buffer till things fill up and the system is clogged. Actually less because there is no cloning involved so the objects passed between them is the same reference. Would that not mean only 16 objects in memory till the high water mark halts everything?
Lastly I realize I could have the Writable reference the Readable to call it's pause method using closures. However, this solution means the Writable stream knows to much about another object. I'd have to pass in a reference:
var foo = new PasscodeGenerator('foobar');
foo
.pipe(new Hasher('md5'))
.pipe(new SampleConsumer(samples, foo));
And this feels out of norm for how streams would work. I thought back-pressure was enough to cause a Writable to stop a Readable from pushing data and prevent out of memory errors.
An analogous example would be the Unix head command. Implementing that in Node I would assume that the destination could end and not just ignore causing the source to keep pushing data even if the destination has enough data to satisfy the beginning portion of the file.
How do I idiomatically construct custom streams such that when the destination is ready to end the source stream doesn't attempt to push more data?
This is a known issue with how _read() is called internally. Since your _read() is always pushing synchronously/immediately, the internal stream implementation can get into a loop in the right conditions. _read() implementations are generally expected to do some sort of async I/O (e.g. reading from disk or network).
The workaround for this (as noted in the link above) is to make your _read() asynchronous at least some of the time. You could also just make it async every time it's called with:
PasscodeGenerator.prototype._read = function(n) {
var passcode = '' + this.prefix + this.count;
var self = this;
// `setImmediate()` delays the push until the beginning
// of the next tick of the event loop
setImmediate(function() {
self.push({passcode: passcode});
});
this.count++;
};
I have a stream process like this:
Incomming file via HTTP (original stream)
-> Check if zipfile
- Yes -> push through an unzip2-stream
- No -> push to S3
When the unzip2-stream finds zip-entries, these are pushed through the same chain of streams, i.e.
Incomming file entry from zip file ("child" stream)
-> Check if zipfile
- Yes -> push through an unzip2-stream
- No -> push to S3
Thanks to https://stackoverflow.com/users/3580261/eljefedelrodeodeljefe I managed to solve the main problem after this conversation:
How to redirect a stream to other stream depending on data in first chunk?
The problem with creating new "child" streams for every zip entry is that these will have no connection to the original stream, so I cannot get a unified onFinish for all the streams.
I don't want to send a 202 of to the sender before I have processed (unzipped and sent to S3) every file. How can I accomplish this?
I'm thinking that I might need some kind of control object which awaits onFinish for all child streams and forces the process to dwell in the original onFinish event until all files are processed. Would this be overkill? Is there a simpler solution?
I ended up making a separate counter for the streams. There is probably a better solution, but this works.
I send the counter object as an argument to the first call to my saveFile() function. The counter is passed along to the unzip stream so it can be passed to saveFile for every file entry.
Just before a stream is started (i.e. piped) I call streamCounter.streamStarted().
In the last onFinish in the pipe chain I call streamCounter.streamFinished()
In the event of a stream going bad I call streamCounter.streamFailed()
Just before I send the 202 in the form post route I wait for streamCounter.streamPromise to resolve.
I'm not very proud of the setInterval solution. It'd probably be better with some kind of event emitting.
module.exports.streamCounter = function() {
let streamCount = 0;
let isStarted = false;
let errors = [];
this.streamStarted = function(options) {
isStarted = true;
streamCount += 1;
log.debug(`Stream started for ${options.filename}. New streamCount: ${streamCount}`);
};
this.streamFinished = function(options) {
streamCount -= 1;
log.debug(`Finished stream for ${options.filename}. New streamCount: ${streamCount}`);
};
this.streamFailed = function(err) {
streamCount -= 1;
errors.push(err);
log.debug(`Failed stream because (${err.message}). New streamCount: ${streamCount}`);
};
this.streamPromise = new Promise(function(resolve, reject) {
let interval = setInterval(function() {
if(isStarted && streamCount === 0) {
clearInterval(interval);
if(errors.length === 0) {
log.debug('StreamCounter back on 0. Resolving streamPromise');
resolve();
} else {
log.debug('StreamCounter back on 0. Errors encountered.. Rejecting streamPromise');
reject(errors[errors.length-1]);
}
}
}, 100);
});
};
At first I tried this concept with a promise array and waited for Promise.all() before sending status 202. But Promise.all() only works with static arrays as far as I can tell. My "streamCount" is changing during the streaming so I needed a more dynamic "Promise.all".
I am trying to learn about streams in node.js!
server.js
var net = require("net");
var server = net.createServer(function(conn) {
conn.write("welcome!");
# echo the user input!
conn.pipe(conn);
});
server.listen("1111", function() {
console.log("port 1111 opened");
});
telnet test
The server currently echos the user's input
$ telnet localhost 1111
welcome!
hello
hello
desired output
To demonstrate where/how I should process the stream on the server side, I would like to wrap the user's input in {} before echoing it back
$ telnet localhost 1111
welcome!
hello
{hello}
This will basically accomplish the exact output you've requested:
var net = require('net');
var server = net.createServer(function(c) {
c.setEncoding('utf8');
c.on('data', function(d) {
c.write('{' + d.trim() + '}\n');
});
});
server.listen(9871);
First let me call your attention to c.setEncoding('utf8'). This will set a flag on the connection that will automatically convert the incoming Buffer to a String in the utf8 space. This works well for your example, but just note that for improved performance between Sockets it would be better to perform Buffer manipulations.
Simulating the entirety of .pipe() will take a bit more code.
.pipe() is a method of the Stream prototype, which can be found in lib/stream.js. If you take a look at the file you'll see quite a bit more code than what I've shown above. For demonstration, here's an excerpt:
function ondata(chunk) {
if (dest.writable) {
if (false === dest.write(chunk) && source.pause) {
source.pause();
}
}
}
source.on('data', ondata);
First a check is made if the destination is writable. If not, then there is no reason to attempt writing the data. Next the check if dest.write === false. From the documentation:
[.write] returns true if the entire data was flushed successfully to the kernel buffer. Returns false if all or part of the data was queued in user memory.
Since Streams live in kernel space, outside of the v8 memory space, it is possible to crash your machine by filling up memory (instead of just crashing the node app). So checking if the message has drained is a safety prevention mechanism. If it hasn't finished draining, then the source will be paused until the drain event is emitted. Here is the drain event:
function ondrain() {
if (source.readable && source.resume) {
source.resume();
}
}
dest.on('drain', ondrain);
Now there is a lot more we could cover with how .pipe() handles errors, cleans up its own event emitters, etc. but I think we've covered the basics.
Note: When sending a large string, it is possible that it will be sent in multiple packets. For this reason it may be necessary to do something like the following:
var net = require('net');
var server = net.createServer(function(c) {
var tmp = '';
c.setEncoding('utf8');
c.on('data', function(d) {
if (d.charCodeAt(d.length - 1) !== 10) {
tmp += d;
} else {
c.write('{' + tmp + d.trim() + '}\n');
tmp = '';
}
});
});
server.listen(9871);
Here we use the assumption that the string is ended by the new line character (\n, or ascii character code 10). We check the end of the message to see if this is the case. If not, then we temporarily store the message from the connection until the new line character is received.
This may not be a problem for your application, but thought it would be worth noting.
you can do something like
conn.on 'data', (d) ->
conn.write "{#{d}}"
the .pipe method is basically just attaching the data event of the input stream to write to the output stream
I'm not sure about net() actually, but I imagine it's quite similar to http:
http.createServer(function (req, res) {
res.writeHead(200, {'Content-Type': 'text/event-stream'});
http.get(options, function(resp){
resp.on('data', function(chunk){
res.write("event: meetup\n");
res.write("data: "+chunk.toString()+"\n\n");
});
}).on("error", function(e){
console.log("Got error: " + e.message);
});
});
https://github.com/chovy/nodejs-stream