Why a nodejs write stream causes an increasingly memory consumption? - node.js

I'm reading a 45Mb text file in NodeJS and doing some processing to each char on it.
The first version of my script took one char from the original file, store it in an accumulator variable (result += char), and at then end saves result in a text file. This does not work since the file was so big I was putting too much data on RAM, so I got an error: Javascript Heap out of memory. I decided to use a write stream so I can write data directly to disk one chat at the time so this could solve the issue:
fs = require('fs');
var proportion = 2;
var c = '';
fs.readFile('./concat/all.txt', 'utf8', function (err,data) {
if (err) {
return console.log(err);
}
var h = parseInt(Math.sqrt(data.length / proportion));
var w = parseInt(h * proportion);
console.log(w, h);
var wstream = fs.createWriteStream('output.txt');
var lineCount = 0;
for(var x = 0; x < data.length; x++) {
if(data.charCodeAt(x) === 32 && x > 0 && data.charCodeAt(x - 1) === 32)
continue;
if(data.charCodeAt(x) === 10 && x > 0 && data.charCodeAt(x - 1) === 10)
continue;
c = (data.charCodeAt(x) === 10 || data.charCodeAt(x) === 13 || data.charCodeAt(x) === 9) ? " " : data.charAt(x);
lineCount++;
if(lineCount > w) {
c += "\n";
lineCount = 0;
}
wstream.write(c);
}
wstream.end();
});
But still, I'm getting an out of memory error. The script runs with no problems if I comment wstream.write(c). Why?

Stream, stream, stream ...
Throttle the I/O :
Stream the input in chunks, pause the stream for each chunk, parse & manipulate each character of the current chunk and write it to the output.
Then you may resume the input stream to continue with the next one, repeat until finish.
I had no problem with throttling your code using createReadStream & createWriteStream
Here is the code (tested successfully with a 64MB file)
const fs = require('fs')
var w; var charCount = 0; var proportion = 2
//:This step was required to populate 'w'
fs.readFile('input.txt', 'utf8', function (err,data) {
if(err){return console.log(err)}
let h = parseInt(Math.sqrt(data.length / proportion))
w = parseInt(h * proportion); data = undefined;
console.log('[+] starting ( w:',w,')'); EMX()
});
//:Here is the magick
function EMX(){
const I = fs.createReadStream('input.txt')
const O = fs.createWriteStream('output.txt')
I.on('end',()=>{console.log("[+] operation completed")})
I.on('data', (chunk) => {
I.pause(); let data = chunk.toString()
for(var x=0;x<data.length;x++){
if(data.charCodeAt(x) === 32 && x > 0 && data.charCodeAt(x - 1) === 32) continue
if(data.charCodeAt(x) === 10 && x > 0 && data.charCodeAt(x - 1) === 10) continue
c = (data.charCodeAt(x) === 10 || data.charCodeAt(x) === 13 || data.charCodeAt(x) === 9) ? " " : data.charAt(x)
if(charCount>w){c+="\n";charCount=0;} charCount++
O.write(c)
}
I.resume()
})
}

Related

Why does array this take up 1GB of memory?

I have files that store game map data, and I am using this function to read the map file and load it into memory when the server starts. Basically 4 bytes at the start tell the server how big the map is, and then each tile on the map has 6 bytes of data. I have roughly 33MB of data stored in multiple files, but when I read them into an array to access from memory, it takes up almost 1GB of RAM. I'm just wondering if something I am doing here is redundant or not needed and causing too much memory to be allocated.
Example: a 256x256 map would have 256 * 256 * 6 + 4 bytes of data
let mapData = [];
function loadMapFiles() {
fs.readdir("./maps", (err, files) => {
for (let file of files) {
let pointer = 4;
fs.readFile("./maps/" + file, (error, data) => {
if (error) throw error;
let sizex = data.readUInt16BE(0);
let sizey = data.readUInt16BE(2);
let mapNameNumber = Number(file);
mapData[mapNameNumber] = [];
for (let y = 0; y < sizey; y++) {
mapData[mapNameNumber][y] = [];
for (let x = 0; x < sizex; x++) {
mapData[mapNameNumber][y][x] = [];
mapData[mapNameNumber][y][x][0] = data.readUInt16BE(pointer);
mapData[mapNameNumber][y][x][1] = data.readUInt16BE(pointer + 2);
mapData[mapNameNumber][y][x][2] = data.readUInt16BE(pointer + 4);
pointer = pointer + 6;
}
}
});
}
});
}

Asynchronously writing files in a loop, how to manage streams

I am trying to write over 100 png files via node-canvas in a loop. Only 40 files get generated and then the process completes.
I have tried creating a png stream via createPNGStream() and piping the results to a write stream created by fs.createWriteStream().
Write function:
function writeFile(row, col) {
// canvas code ...
const stream = canvas.createPNGStream();
let path = __dirname + '/imgs/heatmapRow' + row + "Col" + col + '.png';
const out = fs.createWriteStream(path);
stream.pipe(out);
out.on('finish', () => console.log('The PNG file was created.'))
}
Calling function:
function generateImages(){
var numRows = 20;
var numCols = 5;
for(let row = 0; row < numRows; ++row) {
for (let col = 0; col < numCols; ++col) {
writeFile(row, col);
}
}
}
The loop runs and completes, and at the end I get a bunch of the following lines all at once:
The PNG file was created.
The PNG file was created.
The PNG file was created.
The PNG file was created.
I'm thinking that on each loop a writeStream is being created and is asynchronous. The process is terminating because I can have only so many streams open.
How can I write all of my files and do so asynchronously to speed up the process time? (I prefer not to write the files synchronously) Do I need to add each writeFile to a queue? How do I determine limit the number of streams I have open and how do I manage them?
You have to use Promises to make your asynchronous calls.
I give you a solution:
function writeFile(row, col) {
// canvas code ...
const stream = canvas.createPNGStream();
let path = __dirname + "/imgs/heatmapRow" + row + "Col" + col + ".png";
return new Promise(resolve => {
const out = fs.createWriteStream(path);
stream.pipe(out);
out.on("finish", () => {
console.log("The PNG file was created.");
resolve();
});
});
}
function generateImages() {
var numRows = 20;
var numCols = 5;
var imagesToGenerate = [];
for (let row = 0; row < numRows; ++row) {
for (let col = 0; col < numCols; ++col) {
imagesToGenerate.push(writeFile(row, col));
}
}
Promise.all(imagesToGenerate).then(() => {
console.log("All images generated");
});
}
Take a look at Promise.all docs if you don't clearly understand how it is working

How do I reverse a scanline using the jpeg-js module/node JS buffer?

I've been fiddling around with the jpeg-js module and Node JS Buffer, and attempting to create a small command line program that modifies the decoded JPEG buffer data and creates a pattern of X number of reversed scanlines and X number of normal scanlines before saving a new JPEG. In other words, I'm looking to flip portions of the image, but not the entire image itself (plenty of modules that do such a thing, of course, but not the specific use case I have).
To create the reversed/normal line patterns, I've been reading/writing line by line, and saving a slice of that line to a variable, then starting at the end of scanline and incrementally going down by slices of 4 bytes (the alloc for an RGBA value) until I'm at the beginning of the line. Code for the program:
'use strict';
const fs = require('fs');
const jpeg = require('jpeg-js');
const getPixels = require('get-pixels');
let a = fs.readFileSync('./IMG_0006_2.jpg');
let d = Buffer.allocUnsafe(a.width * a.height * 4);
let c = jpeg.decode(a);
let val = false; // track whether normal or reversed scanlines
let lineWidth = b.width * 4;
let lineCount = 0;
let track = 0;
let track2 = 0;
let track3 = 0;
let curr, currLine; // storage for writing/reading scnalines, respectively
let limit = {
one: Math.floor(Math.random() * 141),
two: Math.floor(Math.random() * 151),
three: Math.floor(Math.random() * 121)
};
if (limit.one < 30) {
limit.one = 30;
}
if (limit.two < 40) {
limit.two = 40;
}
if (limit.two < 20) {
limit.two = 20;
}
let calc = {};
calc.floor = 0;
calc.ceil = 0 + lineWidth;
d.forEach(function(item, i) {
if (i % lineWidth === 0) {
lineCount++;
/* // alternate scanline type, currently disabled to figure out how to succesfully reverse image
if (lineCount > 1 && lineCount % limit.one === 0) {
// val = !val;
}
*/
if (lineCount === 1) {
val = !val; // setting alt scanline check to true initially
} else if (calc.floor + lineWidth < b.data.length - 1) {
calc.floor += lineWidth;
calc.ceil += lineWidth;
}
currLine = c.data.slice(calc.floor, calc.ceil); // current line
track = val ? lineWidth : 0; // tracking variable for reading from scanline
track2 = val ? 4 : 0; // tracking variable for writing from scanline
}
//check if reversed and writing variable has written 4 bytes for RGBA
//if so, set writing source to 4 bytes at end of line and read from there incrementally
if (val && track2 === 4) {
track2 = 0; // reset writing count
curr = currLine.slice(track - 4, track); // store 4 previous bytes as writing source
if (lineCount === 1 && lineWidth - track < 30) console.log(curr); //debug
} else {
curr = currLine; //set normal scanline
}
d[i] = curr[track2];
// check if there is no match between data source and decoded image
if (d[i] !== curr[track2]) {
if (track3 < 50) {
console.log(i);
}
track3++;
}
track2++; //update tracking variable
track = val ? track - 1 : track + 1; //update tracking variable
});
var rawImageData = {
data: d,
width: b.width,
height: b.height
};
console.log(b.data.length);
console.log('errors\t', track3);
var jpegImageData = jpeg.encode(rawImageData, 100);
fs.writeFile('foo2223.jpg', jpegImageData.data);
Alas, the reversed scanline code I've written does not properly. Unfortunately, I've only been able successfully reverse the red channel of my test image (see below left), with the blue and green channels just turning into vague blurs. The color scheme should look something like the right image.
What am I doing wrong here?
For reversed lines, you stored slices of 4 bytes(4 bytes = 1 pixel), then write the first value of the pixel(red) correctly.
But in the next iteration, you overwrite the slice curr with currLine, rest of channels gets wrong values.
if (val && track2 === 4) {
track2 = 0; // reset writing count
curr = currLine.slice(track - 4, track); // store 4 previous bytes as writing source
if (lineCount === 1 && lineWidth - track < 30) console.log(curr); //debug
} else {
curr = currLine; //set normal scanline
}
Iteration 0: val == true, track2 == 4, set curr to next pixel, write red channel.
Iteration 1: val == true, track2 == 1, (val && track2 === 4) == false, set curr to currLine, write green channel.
You can move track2 === 4 branch to avoid this:
if (val) {
if (track2 === 4) {
track2 = 0; // reset writing count
curr = currLine.slice(track - 4, track); // store 4 previous bytes as writing source
if (lineCount === 1 && lineWidth - track < 30) console.log(curr); //debug
}
} else {
curr = currLine; //set normal scanline
}
Fixed code should look like this:
function flipAlt(input, output) {
const fs = require('fs');
const jpeg = require('jpeg-js');
let a = fs.readFileSync(input);
let b = jpeg.decode(a);
let d = Buffer.allocUnsafe(b.width * b.height * 4);
let val = false; // track whether normal or reversed scanlines
let lineWidth = b.width * 4;
let lineCount = 0;
let track = 0;
let track2 = 0;
let track3 = 0;
let curr, currLine; // storage for writing/reading scnalines, respectively
let limit = {
one: Math.floor(Math.random() * 141),
two: Math.floor(Math.random() * 151),
three: Math.floor(Math.random() * 121)
};
if (limit.one < 30) {
limit.one = 30;
}
if (limit.two < 40) {
limit.two = 40;
}
if (limit.two < 20) {
limit.two = 20;
}
let calc = {};
calc.floor = 0;
calc.ceil = 0 + lineWidth;
d.forEach(function(item, i) {
if (i % lineWidth === 0) {
lineCount++;
if (lineCount > 1) {
val = !val;
}
if (lineCount === 1) {
val = !val; // setting alt scanline check to true initially
} else if (calc.floor + lineWidth < b.data.length - 1) {
calc.floor += lineWidth;
calc.ceil += lineWidth;
}
currLine = b.data.slice(calc.floor, calc.ceil); // current line
track = val ? lineWidth : 0; // tracking variable for reading from scanline
track2 = val ? 4 : 0; // tracking variable for writing from scanline
}
//check if reversed and writing variable has written 4 bytes for RGBA
//if so, set writing source to 4 bytes at end of line and read from there incrementally
if (val) {
if (track2 === 4) {
track2 = 0; // reset writing count
curr = currLine.slice(track - 4, track); // store 4 previous bytes as writing source
if (lineCount === 1 && lineWidth - track < 30) console.log(curr); //debug
}
} else {
curr = currLine; //set normal scanline
}
d[i] = curr[track2];
// check if there is no match between data source and decoded image
if (d[i] !== curr[track2]) {
if (track3 < 50) {
console.log(i);
}
track3++;
}
track2++; //update tracking variable
track = val ? track - 1 : track + 1; //update tracking variable
});
var rawImageData = {
data: d,
width: b.width,
height: b.height
};
console.log(b.data.length);
console.log('errors\t', track3);
var jpegImageData = jpeg.encode(rawImageData, 100);
fs.writeFile(output, jpegImageData.data);
}
flipAlt('input.jpg', 'output.jpg');
Instead of tracking array indices, you can use utility library like lodash, it should make things easier:
function flipAlt(input, output) {
const fs = require('fs');
const jpeg = require('jpeg-js');
const _ = require('lodash');
const image = jpeg.decode(fs.readFileSync(input));
const lines = _.chunk(image.data, image.width*4);
const flipped = _.flatten(lines.map((line, index) => {
if (index % 2 != 0) {
return line;
}
const pixels = _.chunk(line, 4);
return _.flatten(pixels.reverse());
}));
const imageData = jpeg.encode({
width: image.width,
height: image.height,
data: new Buffer(flipped)
}, 100).data;
fs.writeFile(output, imageData);
}
flipAlt('input.jpg', 'output.jpg');

Node.js throws "btoa is not defined" error

In my node.js application I did an npm install btoa-atob so that I could use the btoa() and atob() functions which are native in client-side javascript but for some reason weren't included in node. The new directory showed up in my node_modules folder, which itself is in root alongside app.js. Then I made sure to add btoa-atob as a dependency in my package.json file which is in root.
However, for some reason, it still will not work.
console.log(btoa("Hello World!"));
^ should output "SGVsbG8gV29ybGQh" to the console, but instead, I get the error:
btoa is not defined.
Did I not do the install properly? What did I overlook?
The 'btoa-atob' module does not export a programmatic interface, it only provides command line utilities.
If you need to convert to Base64 you could do so using Buffer:
console.log(Buffer.from('Hello World!').toString('base64'));
Reverse (assuming the content you're decoding is a utf8 string):
console.log(Buffer.from(b64Encoded, 'base64').toString());
Note: prior to Node v4, use new Buffer rather than Buffer.from.
The solutions posted here don't work in non-ascii characters (i.e. if you plan to exchange base64 between Node.js and a browser). In order to make it work you have to mark the input text as 'binary'.
Buffer.from('Hélló wórld!!', 'binary').toString('base64')
This gives you SOlsbPMgd/NybGQhIQ==. If you make atob('SOlsbPMgd/NybGQhIQ==') in a browser it will decode it in the right way. It will do it right also in Node.js via:
Buffer.from('SOlsbPMgd/NybGQhIQ==', 'base64').toString('binary')
If you don't do the "binary part", you will decode wrongly the special chars.
I got it from the implementation of the btoa npm package:
My team ran into this problem when using Node with React Native and PouchDB. Here is how we solved it...
NPM install buffer:
$ npm install --save buffer
Ensure Buffer, btoa, and atob are loaded as a globals:
global.Buffer = global.Buffer || require('buffer').Buffer;
if (typeof btoa === 'undefined') {
global.btoa = function (str) {
return new Buffer(str, 'binary').toString('base64');
};
}
if (typeof atob === 'undefined') {
global.atob = function (b64Encoded) {
return new Buffer(b64Encoded, 'base64').toString('binary');
};
}
export const universalBtoa = str => {
try {
return btoa(str);
} catch (err) {
return Buffer.from(str).toString('base64');
}
};
export const universalAtob = b64Encoded => {
try {
return atob(b64Encoded);
} catch (err) {
return Buffer.from(b64Encoded, 'base64').toString();
}
};
I found that although the shims from answers above worked, they did not match the behaviour of desktop browsers' implementations of btoa() and atob():
const btoa = function(str){ return Buffer.from(str).toString('base64'); }
// returns "4pyT", yet in desktop Chrome would throw an error.
btoa('✓');
// returns "fsO1w6bCvA==", yet in desktop Chrome would return "fvXmvA=="
btoa(String.fromCharCode.apply(null, new Uint8Array([0x7e, 0xf5, 0xe6, 0xbc])));
As it turns out, Buffer instances represent/interpret strings encoded in UTF-8 by default. By contrast, in desktop Chrome, you can't even input a string that contains characters outside of the latin1 range into btoa(), as it will throw an exception: Uncaught DOMException: Failed to execute 'btoa' on 'Window': The string to be encoded contains characters outside of the Latin1 range.
Therefore, you need to explicitly set the encoding type to latin1 in order for your Node.js shim to match the encoding type of desktop Chrome:
const btoaLatin1 = function(str) { return Buffer.from(str, 'latin1').toString('base64'); }
const atobLatin1 = function(b64Encoded) {return Buffer.from(b64Encoded, 'base64').toString('latin1');}
const btoaUTF8 = function(str) { return Buffer.from(str, 'utf8').toString('base64'); }
const atobUTF8 = function(b64Encoded) {return Buffer.from(b64Encoded, 'base64').toString('utf8');}
btoaLatin1('✓'); // returns "Ew==" (would be preferable for it to throw error because this is undecodable)
atobLatin1(btoa('✓')); // returns "\u0019" (END OF MEDIUM)
btoaUTF8('✓'); // returns "4pyT"
atobUTF8(btoa('✓')); // returns "✓"
// returns "fvXmvA==", just like desktop Chrome
btoaLatin1(String.fromCharCode.apply(null, new Uint8Array([0x7e, 0xf5, 0xe6, 0xbc])));
// returns "fsO1w6bCvA=="
btoaUTF8(String.fromCharCode.apply(null, new Uint8Array([0x7e, 0xf5, 0xe6, 0xbc])));
I have a code shared between server and client and I needed an implementation of btoa inside it.
I tried doing something like:
const btoaImplementation = btoa || (str => Buffer.from(str).toString('base64'));
but the Server would crush with:
ReferenceError: btoa is not defined
while Buffer is not defined on the client.
I couldn't check window.btoa (it's a shared code, remember?)
So I ended up with this implementation:
const btoaImplementation = str => {
try {
return btoa(str);
} catch(err) {
return Buffer.from(str).toString('base64')
}
};
I was able to use btoa for binary data to base 64 string conversion using below npm package:
https://www.npmjs.com/package/btoa
As described in their documentation, I did below steps in node JS application:
Install => npm install --save btoa
Declare at top => const btoa = require('btoa');
Use => const b64 = btoa("stringToEncode");
Here's a concise universal solution for base64 encoding:
const nodeBtoa = (b) => Buffer.from(b).toString('base64');
export const base64encode = typeof btoa !== 'undefined' ? btoa : nodeBtoa;
Anybody looking to decode:
let decoded = Buffer.from(<encoded string>, 'base64').toString()
Because I came here looking for decoding, ended up figuring it out from an answer here.
Same problem with the 'script' plugin in the Atom editor, which is an old version of node, not having btoa(), nor atob(), nor does it support the Buffer datatype. Following code does the trick:
var Base64 = new function() {
var keyStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="
this.encode = function(input) {
var output = "";
var chr1, chr2, chr3, enc1, enc2, enc3, enc4;
var i = 0;
input = Base64._utf8_encode(input);
while (i < input.length) {
chr1 = input.charCodeAt(i++);
chr2 = input.charCodeAt(i++);
chr3 = input.charCodeAt(i++);
enc1 = chr1 >> 2;
enc2 = ((chr1 & 3) << 4) | (chr2 >> 4);
enc3 = ((chr2 & 15) << 2) | (chr3 >> 6);
enc4 = chr3 & 63;
if (isNaN(chr2)) {
enc3 = enc4 = 64;
} else if (isNaN(chr3)) {
enc4 = 64;
}
output = output + keyStr.charAt(enc1) + keyStr.charAt(enc2) + keyStr.charAt(enc3) + keyStr.charAt(enc4);
}
return output;
}
this.decode = function(input) {
var output = "";
var chr1, chr2, chr3;
var enc1, enc2, enc3, enc4;
var i = 0;
input = input.replace(/[^A-Za-z0-9\+\/\=]/g, "");
while (i < input.length) {
enc1 = keyStr.indexOf(input.charAt(i++));
enc2 = keyStr.indexOf(input.charAt(i++));
enc3 = keyStr.indexOf(input.charAt(i++));
enc4 = keyStr.indexOf(input.charAt(i++));
chr1 = (enc1 << 2) | (enc2 >> 4);
chr2 = ((enc2 & 15) << 4) | (enc3 >> 2);
chr3 = ((enc3 & 3) << 6) | enc4;
output = output + String.fromCharCode(chr1);
if (enc3 != 64) {
output = output + String.fromCharCode(chr2);
}
if (enc4 != 64) {
output = output + String.fromCharCode(chr3);
}
}
output = Base64._utf8_decode(output);
return output;
}
this._utf8_encode = function(string) {
string = string.replace(/\r\n/g, "\n");
var utftext = "";
for (var n = 0; n < string.length; n++) {
var c = string.charCodeAt(n);
if (c < 128) {
utftext += String.fromCharCode(c);
} else if ((c > 127) && (c < 2048)) {
utftext += String.fromCharCode((c >> 6) | 192);
utftext += String.fromCharCode((c & 63) | 128);
} else {
utftext += String.fromCharCode((c >> 12) | 224);
utftext += String.fromCharCode(((c >> 6) & 63) | 128);
utftext += String.fromCharCode((c & 63) | 128);
}
}
return utftext;
}
this._utf8_decode = function(utftext) {
var string = "";
var i = 0;
var c = 0,
c1 = 0,
c2 = 0,
c3 = 0;
while (i < utftext.length) {
c = utftext.charCodeAt(i);
if (c < 128) {
string += String.fromCharCode(c);
i++;
} else if ((c > 191) && (c < 224)) {
c2 = utftext.charCodeAt(i + 1);
string += String.fromCharCode(((c & 31) << 6) | (c2 & 63));
i += 2;
} else {
c2 = utftext.charCodeAt(i + 1);
c3 = utftext.charCodeAt(i + 2);
string += String.fromCharCode(((c & 15) << 12) | ((c2 & 63) << 6) | (c3 & 63));
i += 3;
}
}
return string;
}
}()
var btoa = Base64.encode;
var atob = Base64.decode;
console.log("btoa('A') = " + btoa('A'));
console.log("btoa('QQ==') = " + atob('QQ=='));
console.log("btoa('B') = " + btoa('B'));
console.log("btoa('Qg==') = " + atob('Qg=='));
If you end up here, looking for atob is not defined solution (like me). Try upgrade your nodejs version - it helps me
I understand this is a discussion point for a node application, but in the interest of universal JavaScript applications running on a node server, which is how I arrived at this post, I have been researching this for a universal / isomorphic react app I have been building, and the package abab worked for me. In fact it was the only solution I could find that worked, rather than using the Buffer method also mentioned (I had typescript issues).
(This package is used by jsdom, which in turn is used by the window package.)
Getting back to my point; based on this, perhaps if this functionality is already written as an npm package like the one you mentioned, and has it's own algorithm based on W3 spec, you could install and use the abab package rather than writing you own function that may or may not be accurate based on encoding.
---EDIT---
I started having weird issues today with encoding (not sure why it's started happening now) with package abab. It seems to encode correctly most of the time, but sometimes on front end it encodes incorrectly. Spent a long time trying to debug, but switched to package base-64 as recommended, and it worked straight away. Definitely seemed to be down to the base64 algorithm of abab.

Node is not launching

I'm desperately trying to make node.js work again on Ubuntu 12.04 LTS.
I installed it before 2-3 weeks ago and everything went fine, I used it daily for that period of time.
But today, suddenly it just wouldn't work anymore. The way it bugs is really strange :
node -v works and returns v0.8.2
the node command works too, I can access the console and do a console.log
but when I use node with a file like this node server.js, Ubuntu just goes to a new line :
kollektiv#kollektiv-PC:~/node-projects$ node server.js
kollektiv#kollektiv-PC:~/node-projects$
I already reinstalled Ubuntu this evening but I get the same result.
I also did multiple apt-get upgrade and apt-get update in case some node.js dependencies would be out of date.
The way I installed node.js is by compiling the source following this tutorial : --> Compiling Node.js from source on Ubuntu 10.24 - shapeshed
I even did a chmod 777 server.js on the server file just to be sure but that didn't change anything either.
Thank you a lot in advance for your help !
EDIT : Content of server.js
var net = require('net'),
server = net.createServer();
var crypto = require('crypto'),
shasum = crypto.createHash('sha256');
var alpha = [],
i = 0,
cle = '';
while(i < 256) {
alpha.push(String.fromCharCode(i));
i++;
}
// CRYPTAGE -- START --
function cryptProcess(cle, txt) {
var k = txt.length,
j = k / cle.length,
cledeBase = cle,
txtc = '',
i = 1;
while(i < j) {
cle = cle + cledeBase;
i++;
}
function crypt(cleu, letr) {
//if(alpha.indexOf(letr) == -1) return "§";
var biIndex = alpha.indexOf(letr) + alpha.indexOf(cleu), x;
sumIndex = biIndex - alpha.length;
x = sumIndex >= 0 ? alpha[sumIndex] : alpha[biIndex];
return x;
}
while(k--) {
txtc = crypt(cle[k], txt[k]) + txtc;
}
return txtc;
}
function decryptProcess(cle, txtc) {
var k = txtc.length,
j = k / cle.length,
cledeBase = cle,
txt = '',
i = 1;
while(i < j) {
cle = cle + cledeBase;
i++;
}
txt = '';
function decrypt(cleu, letc) {
//if(alpha.indexOf(letc) == -1) return "§";
var biIndex = letc - alpha.indexOf(cleu), x;
x = biIndex >= 0 ? alpha[biIndex] : alpha[biIndex + alphabet.length];
return x;
}
while(k--) {
txt = decrypt(cle[k], txtc[k]) + txt;
}
return txt;
}
// CRYPTAGE -- END --
server.on('connection', function(client) {
var connecOne = 0;
function talk(data) {
var msg = data.toString('utf8');
var msgEnc = cryptProcess(cle, msg);
client.write(msgEnc);
console.log(msg + '\nsend as\n' + msgEnc);
}
client.once('data', function(data) {
function triHandShake() {
}
});
client.on('data', function(data) {
var msg = data.toString('utf8');
if(connecOne === 0) {
connectionOne(msg);
connecOne++;
}
else if(connecOne === 1) {
// Check for paragraph symbol
//authentification with cookie as cle
}
var msgDec = decryptProcess(cle, msg);
console.log(msgDec + '\nreiceved as\n' + msgDec);
});
client.on('end', function() {
connecOne = 0;
});
});
You need to call server.listen to listen for connections and start the process as expected.
server.listen(8124, function() { //'listening' listener
console.log('server bound');
});

Resources