Remove NodeJs Stream padding - node.js

I'm writing an application where I need to strip the first X and last Y bytes from a stream. So what I need is basically a function I can pass to pipe that takes X and Y as parameters and removes the desired number of bytes from the stream as it comes through. My simplified setup is like this:
const rs = fs.createReadStream('some_file')
const ws = fs.createWriteStream('some_other_file')
rs.pipe(streamPadding(128, 512)).pipe(ws)
After that, some_other_fileshould contain all the contents of some_fileminus the first 128 Bytes and the last 512 bytes. I've read up on streams, but couldn't figure out how to properly do this, so that it also handles errors during the transfer and does backpressure correctly.
As far as I know, I'd need a duplex stream, that, whenever I read from it, reads from its input stream, keeps track of where in the stream we are and skips the first 128 bytes before emitting data. Some tips on how to implement that would be very helpful.
The second part seems more difficult, if not impossible to do, because how would I know whether I already reached the last 512 bytes or not, before the input stream actually closed. I suspect that might not be possible, but I'm sure there must be a way to solve this problem, so if you have any advice on that, I'd be very thankful!

You can create a new Transform Stream which does what you wish. As for losing the last x bytes, you can always keep the last x bytes buffered and just ignore them when the stream ends.
Something like this (assuming you're working with buffers).
const {Transform} = require('stream');
const ignoreFirst = 128,
ignoreLast = 512;
let lastBuff,
cnt = 0;
const MyTrimmer = new Transform({
transform(chunk,encoding,callback) {
let len = Buffer.byteLength(chunk);
// If we haven't ignored the first bit yet, make sure we do
if(cnt <= ignoreFirst) {
let diff = ignoreFirst - cnt;
// If we have more than we want to ignore, adjust pointer
if(len > diff)
chunk = chunk.slice(diff,len);
// Otherwise unset chunk for later
else
chunk = undefined;
}
// Keep track of how many bytes we've seen
cnt += len;
// If we have nothing to push after trimming, just get out
if(!chunk)
return callback();
// If we already have a saved buff, concat it with the chunk
if(lastBuff)
chunk = Buffer.concat([lastBuff,chunk]);
// Get the new chunk length
len = Buffer.byteLength(chunk);
// If the length is less than what we ignore at the end, save it and get out
if(len < ignoreLast) {
lastBuff = chunk;
return callback();
}
// Otherwise save the piece we might want to ignore and push the rest through
lastBuff = chunk.slice(len-ignoreLast,len);
this.push(chunk.slice(0,len-ignoreLast));
callback();
}
});
Then you add that your pipeline, assuming you're reading from a file and writing to a file:
const rs = fs.createReadStream('some_file')
const ws = fs.createWriteStream('some_other_file')
myTrimmer.pipe(ws);
rs.pipe(myTrimmer);

Related

read expressjs request body in 4k chunks

I've got an ExpressJS endpoint that needs to take a request body and divide it into sequential 4k byte chunks or pages (aligned every 4k bytes) and do something different with each chunk. Since this is binary data extra care would need to be taken so there isn't any interpretation as unicode or anything.
My first thought was something like this:
req.on("data", chunk => {
// do something here
})
But the "do something here" would have to be, "take whatever size of data is in the chunk, process it in 4k increments, and retain whatever is left < 4k to append the next chunk to. When I ran a test I saw that the first chunk received was just under 32k bytes, so indeed with every request I would have the overhead of shuffling bytes around so that I got 4k byte aligned chunks.
My second thought was to do something like this:
req.on("readable", () => {
var chunk
while (null !== (chunk = req.read(4096))) {
// do something here
}
})
In this case the "do something here" would be similar, but since I would only be reading 4k at a time I should theoretically have a bit less work to do. In a test that I ran, I found that every read did return exactly 4k until the last read, which returned less than 4k. It would be ideal if it always happened like this that I never had to deal with storing part of a 4k page to then append the remainder of the page, but I don't know of any guarantees that read() will always nicely provide this amount.
I'm not sure if I'm trying to re-invent the wheel here either, if there is a mechanism already in place to (asychronously) always pull off the next 4k without having to reassemble partial chunks.
What's the best practice for doing this?
You can accumulate the data into a higher scoped buffer variable as data events arrive and when you have 4k or more, break out a 4k chunk and process it:
const dataSizeToProcess = 1024 * 4; // 4k
let accumulatedData;
req.on('data', chunk => {
// accumulate this chunk of data
if (!accumulatedData) {
// first chunk of data
accumulatedData= chunk;
} else {
// add this chunk to the existing buffer
accumulatedData= Buffer.concat([accumulatedData, chunk]);
}
// process as many whole chunks as we have in the accumulatedData
while (accumulatedData.length >= dataSizeToProcess) {
// get a whole chunk into its own buffer from the start
const piece = accumulatedData.slice(0, dataSizeToProcess);
// make accumulatedData be the rest of the data
accumulatedData = accumulatedData.slice(dataSizeToProcess);
// now process the data in the piece buffer
}
});
req.on('end', () => {
// process the last bit of data in accumulatedData
});
The documentation for "read" says:
The optional size argument specifies a specific number of bytes to read. If size bytes are not available to be read, null will be returned unless the stream has ended, in which case all of the data remaining in the internal buffer will be returned.
This ensures that you will either read the entire 4k or, at the end of file you will read whatever is remaining. So there is no need for anything more complicated.

How to improve the memory usage in nodejs code?

I tried one code at hackerearth : https://www.hackerearth.com/practice/data-structures/stacks/basics-of-stacks/practice-problems/algorithm/fight-for-laddus/description/
The speed seems fine however the memory usage exceed the 256mb limit by nearly 2.8 times.
In java and python the memory is 5 times less however the time is nearly twice.
What factor can be used to optimise the memory usage in nodejs code implementation?
Here is nodejs implementation:
// Sample code to perform I/O:
process.stdin.resume();
process.stdin.setEncoding("utf-8");
var stdin_input = "";
process.stdin.on("data", function (input) {
stdin_input += input; // Reading input from STDIN
});
process.stdin.on("end", function () {
main(stdin_input);
});
function main(input) {
let arr = input.split("\n");
let testCases = parseInt(arr[0], 10);
arr.splice(0,1);
finalStr = "";
while(testCases > 0){
let inputArray = (arr[arr.length - testCases*2 + 1]).split(" ");
let inputArrayLength = inputArray.length;
testCases = testCases - 1;
frequencyObject = { };
for(let i = 0; i < inputArrayLength; ++i) {
if(!frequencyObject[inputArray[i]])
{
frequencyObject[inputArray[i]] = 0;
}
++frequencyObject[inputArray[i]];
}
let finalArray = [];
finalArray[inputArrayLength-1] = -1;
let stack = [];
stack.push(inputArrayLength-1);
for(let i = inputArrayLength-2; i>=0; i--)
{
let stackLength = stack.length;
while(stackLength > 0 && frequencyObject[inputArray[stack[stackLength-1]]] <= frequencyObject[inputArray[i]])
{
stack.pop();
stackLength--;
}
if (stackLength > 0) {
finalArray[i] = inputArray[stack[stackLength-1]];
} else {
finalArray[i] = -1;
}
stack.push(i);
}
console.log(finalArray.join(" ") + "\n")
}
}
What factor can be used to optimize the memory usage in nodejs code implementation?
Here are some things to consider:
Don't buffer any more input data than you need to before you process it or output it.
Try to avoid making copies of data. Use the data in place if possible. Remember that all string operations create a new string that is likely a copy of the original data. And, many array operations like .map(), .filter(), etc... create new copies of the original array.
Keep in mind that garbage collection is delayed and is typically done during idle time. So, for example, modifying strings in a loop may create a lot of temporary objects that all must exist at once, even though most or all of them will be garbage collected when the loop is done. This creates a poor peak memory usage.
Buffering
The first thing I notice is that you read the entire input file into memory before you process any of it. Right away for large input files, you're going to use a lot of memory. Instead, what you want to do is read enough of a chunk to get the next testCase and then process it.
FYI, this incremental reading/processing will make the code significantly more complicated to write (I've written an implementation myself) because you have to handle partially read lines, but it will hold down memory use a bunch and that's what you asked for.
Copies of Data
After reading the entire input file into memory, you immediately make a copy of it all with this:
let arr = input.split("\n");
So, now you've more than doubled the amount of memory the input data is taking up. Instead of just one string for all the input, you now still have all of that in memory, but you've now broken it up into hundreds of other strings (each with a little overhead of its own for a new string and of course a copy of each line).
Modifying Strings in a Loop
When you're creating your final result which you call finalStr, you're doing this over and over again:
finalStr = finalStr + finalArray.join(" ") + "\n"
This is going to create tons and tons of incremental strings that will likely end up all in memory at once because garbage collection probably won't run until the loop is over. As an example, if you had 100 lines of output that were each 100 characters long so the total output (not count line termination characters) was 100 x 100 = 10,000 characters, then constructing this in a loop like you are would create temporary strings of 100, 200, 300, 400, ... 10,000 which would consume 5000 (avg length) * 100 (number of temporary strings) = 500,000 characters. That's 50x the total output size consumed in temporary string objects.
So, not only does this create tons of incremental strings each one larger than the previous one (since you're adding onto it), it also creates your entire output in memory before writing any of it out to stdout.
Instead, you can incremental output each line to stdout as you construct each line. This will put the worst case memory usage at probably about 2x the output size whereas you're at 50x or worse.

How do I effectively send a large packet / combine smaller packets?

I have a larger buffer I'm trying to send as a packet. Nodejs splits the buffer into smaller (65k packets). Once they are received by the client, how can I ensure the packets go together and effectively recombine them into a buffer?
Pretty much using this as a test:
// tcp socket
var buf = Buffer.alloc(265000);
socket.write(buf);
Then on client side I need to combine the 65k packets somehow together back into a buffer.
Thanks
TCP is free to break data up on the wire into packets of any size. The size can be different based on different implementations or physical transports. You cannot know exactly how this will happen and should not depend upon exactly how it is implemented. It can even vary depending upon which route your data takes.
Further, the .on('data', ...) event just gives you whatever data has arrived so far. While the order of the packets is guaranteed, there is no guarantee that if you write a certain set of bytes that they will all arrive in the same data event. They can be broken into smaller pieces and may arrive in smaller pieces. This is what happens at the lower level of TCP when you have no real protocol on top of TCP.
So, if you're sending a chunk of data over TCP, you have to invent your own protocol to know when you've got an entire set of data. There are a variety of different schemes for doing this.
Delimiter character. Some sort of delimiter character that won't occur in the actual data and indicates the end of a set of data. You read and parse the data until you get a delimiter character and then you know you have a complete set of data you can process. The HTTP protocol uses a newline character as a delimiter. Sometimes a zero byte is used as a delimiter.
Send length first. For binary data, the length of the data is often sent first and then the recipient knows how many bytes of data they're reading until they have a whole set.
Existing protocols. Something like the webSocket protocol lets you send messages of any size and it will automatically wrap them into packets that contain information about length so that they can be recombined for you automatically into the original set of data without you have to do this yourself. There are thousands of other protocols, one of which may be a perfect match for your needs and you can just use an existing implementation without having to write your own.
One you have some mechanism of knowing when you've received a complete set of data, you then set up your data event handler to read data, collect it into a buffer and watch for the end of the data (using whichever mechanism you have selected). When you see the end of a set, you separate that out from any other data that may have arrived after it and then process it.
So, let's say you were using a zero byte as your delimiter and you've made sure that a zero cannot and does not occur in your real data. Then, you'd set up a data handler like this:
let accumulatedData = Buffer.alloc(0);
socket.on('data', data => {
// check for delimiter character
let offset = data.indexOf(0);
if (offset !== -1) {
// get the whole message into one Buffer
let msg = Buffer.concat(accumulatedData, data.slice(0, offset));
// put rest of data into the accumulatedData buffer as part of next piece of data
// skip past the delimiter
accumulatedData = data.slice(offset + 1);
// emit that we now have a whole msg
socket.emit('_msg', msg);
} else {
// no delimiter yet, just accumulate the data
accumulatedData = Buffer.concat(accumulatedData, data);
}
});
// if any accumulated data still here at end of socket
// notify about it
// this is optional as it may be a partial piece of data (no delimiter)
socket.on('end', () => {
if (accumulatedData.length) {
socket.emit('_msg', accumulatedData);
}
});
// this is my own event which is emitted when a whole message is available
// for processing
socket.on('_msg', msg => {
// code here to process whole msg
});
Note: This implementation removes the delimiter from the end of the msg
Nodejs is not splitting up the data; TCP/IP is. The maximum amount of data allowed in an IP payload is 64Kb. This is why your packets are being split up (fragmented).
This also means that TCP/IP will piece together the data at the receiving end. This is why you don't have to reassemble REST requests or websites. This is all handled by the lower network layers.
You may want to look at this example. You can edit the createServer() function to send more data like so:
var server = net.createServer(function(socket) {
let buf = Buffer.alloc(265000);
for (var i = 0; i < 264900; i++) {
buf[i] = 'E';
}
buf[264900] = '\r'; // newline
buf[264901] = '\n';
buf[264902] = 0; // string terminator
socket.write(buf);
socket.pipe(socket);
});
The above (along with the other code from the gist) will respond to any request with a string containing 264900 'E's and a newline.
Now, you can use netcat (if on linux) to receive your request:
$ netcat 127.0.0.1 1337
EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE ... etc
The buffer can contain whatever and it will all still be transferred. A string is just easy to demonstrate.
In conclusion: Let the network do the work. You will need to read the incoming buffer on the client and save it to its own local buffer but that's pretty much it.
Further reading:
https://nodejs.org/api/net.html#net_socket_write_data_encoding_callback
https://techterms.com/definition/packet

Buffers filled with unicode zeroes

I'm trying to synchronously read parameters from console in node, I managed to do the following:
var load = function () {
const BUFFER_LENGTH = 1024;
const stdin = fs.openSync('/dev/stdin', 'rs');
const buffer = Buffer.alloc(BUFFER_LENGTH);
console.log('Provide parameter: ');
fs.readSync(stdin, buffer, 0, BUFFER_LENGTH);
fs.closeSync(stdin);
return buffer.toString().replace(/\n*/, '');
}
It works, but here's a strange thing:
var loadedValue = load();
console.log(loadedValue); // displays "a", if I typed "a", so the result is correct
console.log({loadedValue}); // displays {a: 'a\n\u0000\u0000....'}
When I wrap the value in an object, the remaining BUFFER bits are showed in a string. Why is that? How can I get rid of them? Regexp on a string before making an object doesn't work.
Buffer.alloc(BUFFER_LENGTH) creates a buffer of a particular length (1024 in your case), and fills that buffer with NULL characters (as documented here).
Next, you read some (say 2) bytes from stdin into that buffer, which replaces the first two of those NULL characters with the characters read from stdin. The rest of the buffer still consists of NULL's.
If you don't truncate the buffer to the amount of bytes read, your function returns a buffer of length 1024, mostly filled with NULL's. Since those aren't printable, they don't show up in the first console.log(), but they're still there.
So after reading from stdin, you should truncate the buffer to the right size:
let bytesRead = fs.readSync(stdin, buffer, 0, BUFFER_LENGTH);
buffer = buffer.slice(0, bytesRead);

How to append binary data to a buffer in node.js

I have a buffer with some binary data:
var b = new Buffer ([0x00, 0x01, 0x02]);
and I want to append 0x03.
How can I append more binary data? I'm searching in the documentation but for appending data it must be a string, if not, an error occurs (TypeError: Argument must be a string):
var b = new Buffer (256);
b.write ("hola");
console.log (b.toString ("utf8", 0, 4)); //hola
b.write (", adios", 4);
console.log (b.toString ("utf8", 0, 11)); //hola, adios
Then, the only solution I can see here is to create a new buffer for every appended binary data and copy it to the major buffer with the correct offset:
var b = new Buffer (4); //4 for having a nice printed buffer, but the size will be 16KB
new Buffer ([0x00, 0x01, 0x02]).copy (b);
console.log (b); //<Buffer 00 01 02 00>
new Buffer ([0x03]).copy (b, 3);
console.log (b); //<Buffer 00 01 02 03>
But this seems a bit inefficient because I have to instantiate a new buffer for every append.
Do you know a better way for appending binary data?
EDIT
I've written a BufferedWriter that writes bytes to a file using internal buffers. Same as BufferedReader but for writing.
A quick example:
//The BufferedWriter truncates the file because append == false
new BufferedWriter ("file")
.on ("error", function (error){
console.log (error);
})
//From the beginning of the file:
.write ([0x00, 0x01, 0x02], 0, 3) //Writes 0x00, 0x01, 0x02
.write (new Buffer ([0x03, 0x04]), 1, 1) //Writes 0x04
.write (0x05) //Writes 0x05
.close (); //Closes the writer. A flush is implicitly done.
//The BufferedWriter appends content to the end of the file because append == true
new BufferedWriter ("file", true)
.on ("error", function (error){
console.log (error);
})
//From the end of the file:
.write (0xFF) //Writes 0xFF
.close (); //Closes the writer. A flush is implicitly done.
//The file contains: 0x00, 0x01, 0x02, 0x04, 0x05, 0xFF
LAST UPDATE
Use concat.
Updated Answer for Node.js ~>0.8
Node is able to concatenate buffers on its own now.
var newBuffer = Buffer.concat([buffer1, buffer2]);
Old Answer for Node.js ~0.6
I use a module to add a .concat function, among others:
https://github.com/coolaj86/node-bufferjs
I know it isn't a "pure" solution, but it works very well for my purposes.
Buffers are always of fixed size, there is no built in way to resize them dynamically, so your approach of copying it to a larger Buffer is the only way.
However, to be more efficient, you could make the Buffer larger than the original contents, so it contains some "free" space where you can add data without reallocating the Buffer. That way you don't need to create a new Buffer and copy the contents on each append operation.
This is to help anyone who comes here looking for a solution that wants a pure approach. I would recommend understanding this problem because it can happen in lots of different places not just with a JS Buffer object. By understanding why the problem exists and how to solve it you will improve your ability to solve other problems in the future since this one is so fundamental.
For those of us that have to deal with these problems in other languages it is quite natural to devise a solution, but there are people who may not realize how to abstract away the complexities and implement a generally efficient dynamic buffer. The code below may have potential to be optimized further.
I have left the read method unimplemented to keep the example small in size.
The realloc function in C (or any language dealing with intrinsic allocations) does not guarantee that the allocation will be expanded in size with out moving the existing data - although sometimes it is possible. Therefore most applications when needing to store a unknown amount of data will use a method like below and not constantly reallocate, unless the reallocation is very infrequent. This is essentially how most file systems handle writing data to a file. The file system simply allocates another node and keeps all the nodes linked together, and when you read from it the complexity is abstracted away so that the file/buffer appears to be a single contiguous buffer.
For those of you who wish to understand the difficulty in just simply providing a high performance dynamic buffer you only need to view the code below, and also do some research on memory heap algorithms and how the memory heap works for programs.
Most languages will provide a fixed size buffer for performance reasons, and then provide another version that is dynamic in size. Some language systems opt for a third-party system where they keep the core functionality minimal (core distribution) and encourage developers to create libraries to solve additional or higher level problems. This is why you may question why a language does not provide some functionality. This small core functionality allows costs to be reduced in maintaining and enhancing the language, however you end up having to write your own implementations or depending on a third-party.
var Buffer_A1 = function (chunk_size) {
this.buffer_list = [];
this.total_size = 0;
this.cur_size = 0;
this.cur_buffer = [];
this.chunk_size = chunk_size || 4096;
this.buffer_list.push(new Buffer(this.chunk_size));
};
Buffer_A1.prototype.writeByteArrayLimited = function (data, offset, length) {
var can_write = length > (this.chunk_size - this.cur_size) ? (this.chunk_size - this.cur_size) : length;
var lastbuf = this.buffer_list.length - 1;
for (var x = 0; x < can_write; ++x) {
this.buffer_list[lastbuf][this.cur_size + x] = data[x + offset];
}
this.cur_size += can_write;
this.total_size += can_write;
if (this.cur_size == this.chunk_size) {
this.buffer_list.push(new Buffer(this.chunk_size));
this.cur_size = 0;
}
return can_write;
};
/*
The `data` parameter can be anything that is array like. It just must
support indexing and a length and produce an acceptable value to be
used with Buffer.
*/
Buffer_A1.prototype.writeByteArray = function (data, offset, length) {
offset = offset == undefined ? 0 : offset;
length = length == undefined ? data.length : length;
var rem = length;
while (rem > 0) {
rem -= this.writeByteArrayLimited(data, length - rem, rem);
}
};
Buffer_A1.prototype.readByteArray = function (data, offset, length) {
/*
If you really wanted to implement some read functionality
then you would have to deal with unaligned reads which could
span two buffers.
*/
};
Buffer_A1.prototype.getSingleBuffer = function () {
var obuf = new Buffer(this.total_size);
var cur_off = 0;
var x;
for (x = 0; x < this.buffer_list.length - 1; ++x) {
this.buffer_list[x].copy(obuf, cur_off);
cur_off += this.buffer_list[x].length;
}
this.buffer_list[x].copy(obuf, cur_off, 0, this.cur_size);
return obuf;
};
insert byte to specific place.
insertToArray(arr,index,item) {
return Buffer.concat([arr.slice(0,index),Buffer.from(item,"utf-8"),arr.slice(index)]);
}

Resources