I'm using node-tail to read a file in linux and send it down to a socket.
node.js sending data read from a text file
var io = require('socket.io');
Tail = require('tail').Tail;
tail = new Tail("/tmp/test.txt");
io.sockets.on('connection', function (socket) {
tail.on("line", function(data) {
socket.emit('Message', { test: data });
});
});
Receiving side
var socket = io.connect();
socket.on('Message', function (data) {
console.log(data.test);
});
This works but when I try to modify this part
tail = new Tail("/tmp/test.txt");
to this
tail = new Tail("/tmp/FIFOFILE");
I can't get any data from it.
Is there anyway to read a named pipe in linux? or a package that can read a named pipe?
I can get it to work in a silly way:
// app.js
process.stdin.resume();
process.stdin.on('data', function(chunk) {
console.log('D', chunk);
});
And start like this:
node app.js < /tmp/FIFOFILE
If I create a readable stream for the named pipe, it ends after having read the first piece of data written to the named pipe. Not sure why stdin is special.
The OS will send an EOF when the last process finishes writing to the FIFO. If only one process is writing to the FIFO then you get an EOF when that process finishes writing its stuff. This EOF triggers Node to close the stream.
The trick to avoiding this is given by #JoshuaWalsh in this answer, namely: you open the pipe yourself FOR READING AND WRITING - even though you have no intention of ever writing to it. This means that the OS sees that there is always at least one process writing to the file and so you never get the EOF.
So... just add in something like:
let fifoHandle = fs.open(fifoPath, fs.constants.O_RDWR,function(){console.log('FIFO open')});
You don't ever have to do anything with fifoHandle - just make sure it sticks around and doesn't get garbage collected.
In fact... in my case I was using createReadStream, and I found that simply adding the fs.constants.O_RDWR to this was enough (even though I have no intention of ever writing to the fifo.
let fifo = fs.createReadStream(fifoPath,{flags: fs.constants.O_RDWR});
fifo.on('data',function(data){
console.log('Got data:'+data.toString());
}
Related
I'm just starting learning about streams in node. I have a string in memory and I want to put it in a stream that applies a transformation and pipe it through to process.stdout. Here is my attempt to do it:
var through = require('through');
var stream = through(function write(data) {
this.push(data.toUpperCase());
});
stream.push('asdf');
stream.pipe(process.stdout);
stream.end();
It does not work. When I run the script on the cli via node, nothing is sent to stdout and no errors are thrown. A few questions I have:
If you have a value in memory that you want to put into a stream, what is the best way to do it?
What is the difference between push and queue?
Does it matter if I call end() before or after calling pipe()?
Is end() equivalent to push(null)?
Thanks!
Just use the vanilla stream API
var Transform = require("stream").Transform;
// create a new Transform stream
var stream = new Transform({
decodeStrings: false,
encoding: "ascii"
});
// implement the _transform method
stream._transform = function _transform(str, enc, done) {
this.push(str.toUpperCase() + "\n";
done();
};
// connect to stdout
stream.pipe(process.stdout);
// write some stuff to the stream
stream.write("hello!");
stream.write("world!");
// output
// HELLO!
// WORLD!
Or you can build your own stream constructor. This is really the way the stream API is intended to be used
var Transform = require("stream").Transform;
function MyStream() {
// call Transform constructor with `this` context
// {decodeStrings: false} keeps data as `string` type instead of `Buffer`
// {encoding: "ascii"} sets the encoding for our strings
Transform.call(this, {decodeStrings: false, encoding: "ascii"});
// our function to do "work"
function _transform(str, encoding, done) {
this.push(str.toUpperCase() + "\n");
done();
}
// export our function
this._transform = _transform;
}
// extend the Transform.prototype to your constructor
MyStream.prototype = Object.create(Transform.prototype, {
constructor: {
value: MyStream
}
});
Now use it like this
// instantiate
var a = new MyStream();
// pipe to a destination
a.pipe(process.stdout);
// write data
a.write("hello!");
a.write("world!");
Output
HELLO!
WORLD!
Some other notes about .push vs .write.
.write(str) adds data to the writable buffer. It is meant to be called externally. If you think of a stream like a duplex file handle, it's just like fwrite, only buffered.
.push(str) adds data to the readable buffer. It is only intended to be called from within our stream.
.push(str) can be called many times. Watch what happens if we change our function to
function _transform(str, encoding, done) {
this.push(str.toUpperCase());
this.push(str.toUpperCase());
this.push(str.toUpperCase() + "\n");
done();
}
Output
HELLO!HELLO!HELLO!
WORLD!WORLD!WORLD!
First, you want to use write(), not push(). write() puts data in to the stream, push() pushes data out of the stream; you only use push() when implementing your own Readable, Duplex, or Transform streams.
Second, you'll only want to write() data to the stream after you've setup the pipe() (or added some event listeners). If you write to a stream with nothing wired to the other end, the data you've written will be lost. As #naomik pointed out, this isn't true in general since a Writable stream will buffer write()s. In your example you do need to write() after pipe() though. Otherwise, the process will end before writing anything to STDOUT. This is possibly due to how the through module is implemented, but I don't know that for sure.
So, with that in mind, you can make a couple simple changes to your example to get it to work:
var through = require('through');
var stream = through(function write(data) {
this.push(data.toUpperCase());
});
stream.pipe(process.stdout);
stream.write('asdf');
stream.end();
Now, for your questions:
The easiest way to get data from memory in to a writable stream is to simply write() it, just like we're doing with stream.wrtie('asdf') in your example.
As far as I know, the stream doesn't have a queue() function, did you mean write()? Like I said above, write() is used to put data in to a stream, push() is used to push data out of the stream. Only call push() in your owns stream implementations.
Only call end() after all your data has been written to your stream. end() basically says: "Ok, I'm done now. Please finish what you're doing and close the stream."
push(null) is pretty much equivalent to end(). That being said, don't call push(null) unless you're doing it inside your own stream implementation (as stated above). It's almost always more appropriate to call end().
Based on the examples for stream (http://nodejs.org/api/stream.html#stream_readable_pipe_destination_options)
and through (https://www.npmjs.org/package/through)
it doesn't look like you are using your stream correctly... What happens if you use write(...) instead of push(...)?
I was trying to write a node.js script that only takes input from stdin if it's piped (as opposed to wait input from keyboard). Therefore I need to determine whether the stdin piped in is null.
First I tried using the readable event:
var s = process.stdin;
s.on('readable', function () {
console.log('Event "readable" is fired!');
var chunk = s.read();
console.log(chunk);
if (chunk===null) s.pause();
});
And the result is as expected:
$ node test.js
Event "readable" is fired!
null
$
Then I tried to do the same thing using data event, because I like to use flowing mode:
var s = process.stdin;
s.on('data', function (chunk) {
console.log('Event "data" is fired!');
console.log(chunk);
if (chunk===null) s.pause();
});
but this time it waited for keyboard input before the null check, and stucked there. I was wondering why it does that? Does that mean in order to do a null check, I need to pause it first, and wait readable to be fired, do the null check, and then resume the stream, just to prevent node.js from waiting keyboard input? This seems awkward to me. Is there a way to avoid using readable event?
Use tty.isatty() from the node core library. That function will return false if stdin is a pipe.
I'm writing an app in Node and have been running into a rare but detrimental occurrence.
So I have a schedule.txt and I write to it when the user makes a change but then also read it every second and then parse it for use throughout the program.
Rarely what happens is as a user is writing to the file (asynchronously) the app (based on the timer) reads the same file and attempts to parse it and fails.
I know from a design stand-point maybe this is just bound to happen... but I'm wondering if there is a quick fix I can do now. Would using writeFileSync help my situation? (make it more 'atomic'?) I just want to make sure that the app doesn't read the file while another process is still writing to the file.
TIA!
Niko
Seems like you'd want to serialize your read/writes. If it were me, I might try having a "manager" object which encapsulates the serialization, which you'd use like:
var fileManager = require('./file-manager');
// somewhere in the program
fileManager.scheduleWrite(data, function(err){
// now the write is done
});
// somewhere else in the program
fileManager.scheduleRead(function(err, data){
// `data` contains the data
});
Then implement it using Q or a similar promises lib, like:
// in file-manager.js
var wait = Q();
module.exports = {
scheduleWrite: function(data, cb){
wait = wait.then(function(){
// write data and call cb()
});
},
scheduleRead: function(){
wait = wait.then(function(){
// read data and call cb(data)
});
}
};
The wait var will "stack up" into a serialized chain of tasks where the next one won't start until the previous one completes.
I am using node.js and reading input from a serial port by opening a /dev/tty file, I send a command and read the result of the command and I want to close the stream once I've read and parsed all the data. I know that I'm done reading data by and end of data marker. I'm finding that once I've closed the stream my program does not terminate.
Below is an example of what I am seeing but uses /dev/random to slowly generate data (assuming your system isn't doing much). What I find is that the process will terminate once the device generates data after the stream has been closed.
var util = require('util'),
PassThrough = require('stream').PassThrough,
fs = require('fs');
// If the system is not doing enough to fill the entropy pool
// /dev/random will not return much data. Feed the entropy pool with :
// ssh <host> 'cat /dev/urandom' > /dev/urandom
var readStream = fs.createReadStream('/dev/random');
var pt = new PassThrough();
pt.on('data', function (data) {
console.log(data)
console.log('closing');
readStream.close(); //expect the process to terminate immediately
});
readStream.pipe(pt);
Update:1
I am back on this issue and have another sample, this one just uses a pty and is easily reproduced in the node repl. Login on 2 terminals and use the pty of the terminal you're not running node in the below call to createReadStream.
var fs = require('fs');
var rs = fs.createReadStream('/dev/pts/1'); // a pty that is allocated in another terminal by my user
//wait just a second, don't copy and paste everything at once
process.exit(0);
at this point node will just hang and not exit. This is on 10.28.
Instead of using
readStream.close(),
try using
readStream.pause().
But, if you are using the newest version of node, wrap the readstream with the object created from stream module by isaacs, like this :
var Readable = require('stream').Readable;
var myReader = new Readable().wrap(readStream);
and use myReader in place of readStream after that.
Best of luck! Tell me if this works.
You are closing the /dev/random stream, but you still have a listener for the 'data' event on the pass-through, which will keep the app running until the pass-through is closed.
I'm guessing there is some buffered data from the read stream and until that is flushed the pass-through is not closed. But this is just a guess.
To get the desired behaviour you can remove the event listener on the pass-through like this:
pt.on('data', function (data) {
console.log(data)
console.log('closing');
pt.removeAllListeners('data');
readStream.close();
});
i am actually pipe to a http request.. so for me it's about :
pt.on('close', (chunk) => {
req.abort();
});
In a node program I'm reading from a file stream with fs.createReadStream. But when I pause the stream the program exits. I thought the program would keep running since the file is still opened, just not being read.
Currently to get it to not exit I'm setting an interval that does nothing.
setInterval(function() {}, 10000000);
When I'm ready to let the program exit, I clear it. But is there a better way?
Example Code where node will exit:
var fs = require('fs');
var rs = fs.createReadStream('file.js');
rs.pause();
Node will exit when there is no more queued work. Calling pause on a ReadableStream simply pauses the data event. At that point, there are no more events being emitted and no outstanding work requests, so Node will exit. The setInterval works since it counts as queued work.
Generally this is not a problem since you will probably be doing something after you pause that stream. Once you resume the stream, there will be a bunch of queued I/O and your code will execute before Node exits.
Let me give you an example. Here is a script that exits without printing anything:
var fs = require('fs');
var rs = fs.createReadStream('file.js');
rs.pause();
rs.on('data', function (data) {
console.log(data); // never gets executed
});
The stream is paused, there is no outstanding work, and my callback never runs.
However, this script does actually print output:
var fs = require('fs');
var rs = fs.createReadStream('file.js');
rs.pause();
rs.on('data', function (data) {
console.log(data); // prints stuff
});
rs.resume(); // queues I/O
In conclusion, as long as you are eventually calling resume later, you should be fine.
Short way based on answers below
require('fs').createReadStream('file.js').pause();