Writing at end of every line in a file in Node.js? - node.js

I have a file
one
two
three
I want to append a word at the end of every line in this file. How can I achieve that in node ?
eg.
onecandy
twocandy
threecandy
Then I want to use this file in another function ,i.e after allcandy has been added . How do i do that ?

Because you will have to read the line to know where is ending and also you have to write at the end of the each line.
In conclusion you have to read everything and write at the end of each line just appending won't save to much performance it only complicate the things.
var fs = require("fs");
var allLines = fs.readFileSync('./input.txt').toString().split('\n');
fs.writeFileSync('./input.txt', '', function(){console.log('file is empty')})
allLines.forEach(function (line) {
var newLine = line + "candy";
console.log(newLine);
fs.appendFileSync("./input.txt", newLine.toString() + "\n");
});
// each line would have "candy" appended
allLines = fs.readFileSync('./input.txt').toString().split('\n');
Note: For replacing just some specified lines you can go through this answer.

Related

log line number in vim whenever a line is deleted

I have an application that generates a txt file with thousands of lines. I have to delete some lines manually by going through the file (using vim). However, I might need to generate the same file again if a change in format is required. That will make me go through the file again to delete the same lines.
The solution to avoid deleting manually repeatedly is that vim somehow logs the line number when I delete a line. I can then use some script to remove those lines. Is it possible to get this behavior in vim?
Otherwise, is there any other editor to get this behavior? There are many lines I have to delete and it's not feasible for me to log each line number manually.
As suggested by phd and wxz, I was able to use git-diff of the file to extract the deleted lines by using node package gitdiff-parser for parsing the diff.
const gitDiffParser = require('gitdiff-parser')
const { exec } = require("child_process");
let p = new Promise( (res,rej) => {
exec("git diff -U0 file.txt", (error, stdout) => {
res(stdout)
});
});
p.then(s=>{
diff = gitDiffParser.parse(s);
diff[0].hunks.forEach(element => {
console.log(`start: ${element.oldStart}, end: ${element.oldStart + element.oldLines - 1}`)
});
})
Another solution or say hack was to append line number in each line of the file and extract the undeleted line numbers after removing the required lines.

multiple column copy format postgresql Node.js

I using postgres stream to insert record into postgres ,
for single column works fine , but what is ideal data format for copy for multiple columns
code snippets
var sqlcopysyntax = 'COPY srt (starttime, endtime) FROM STDIN delimiters E\'\\t\'';
var stream = client.query(copyFrom(sqlcopysyntax));
console.log(sqlcopysyntax)
var interndataset = [
['1', '4'],
['6', '12.074'],
['13.138', '16.183'],
['17.226', '21.605'],
['22.606', '24.733'],
['24.816', '27.027'],
['31.657', '33.617'],
['34.66', '37.204'],
['37.287', '38.58'],
['39.456', '43.669'],
['43.752', '47.297'],
['47.381', '49.55'],
];
var started = false;
var internmap = through2.obj(function(arr, enc, cb) {
/* updated this part by solution provided by #VaoTsun */
var rowText = arr.map(function(item) { return (item.join('\t') + '\n') }).join('')
started = true;
//console.log(rowText)
rowText=rowText+'\\\.';
/* end here*/
started = true;
cb(null, rowText);
})
internmap.write(interndataset);
internmap.end();
internmap.pipe(stream);
wherein i got error: (due to delimiter)missing data for column "endtime"(resolved) but got below error
error: end-of-copy marker corrupt
COPY intern (starttime, endtime) FROM STDIN
1 4
6 12.074
13.138 16.183
17.226 21.605
22.606 24.733
24.816 27.027
31.657 33.617
34.66 37.204
37.287 38.58
39.456 43.669
43.752 47.297
47.381 49.55
any pointer on how to resolve this .
what would be ideal format for multiple column inserts using copy command
With immense help from #jeromew from github community.
and proper implementation of node-pg-copy-streams(takes away copy command complexity ). we were able to solve this issue
https://github.com/brianc/node-pg-copy-streams/issues/65
below is working code snippets
var sqlcopysyntax = 'COPY srt (starttime, endtime) FROM STDIN ;
var stream = client.query(copyFrom(sqlcopysyntax));
console.log(sqlcopysyntax)
var interndataset = [
['1', '4'],
['6', '12.074'],
['13.138', '16.183'],
['17.226', '21.605'],
['22.606', '24.733'],
['24.816', '27.027'],
['31.657', '33.617'],
['34.66', '37.204'],
['37.287', '38.58'],
['39.456', '43.669'],
['43.752', '47.297'],
['47.381', '49.55'],
];
var started = false;
var internmap = through2.obj(function(arr, enc, cb) {
var rowText = (started ? '\n' : '') + arr.join('\t');
started = true;
cb(null, rowText);
})
data.forEach(function(r) {
internmap.write(r);
})
internmap.end();
internmap.pipe(stream);
https://www.postgresql.org/docs/current/static/sql-copy.html
DELIMITER
Specifies the character that separates columns within each row (line)
of the file. The default is a tab character in text format, a comma in
CSV format. This must be a single one-byte character. This option is
not allowed when using binary format.
try using not default delimiter (as tabulation can be replaced on copy/paste), eg:
t=# create table intern(starttime float,endtime float);
CREATE TABLE
t=# \! cat 1
COPY intern(starttime,endtime) FROM STDIN delimiter ';';
1;4
6;12.074
13.138;16.183
17.226;21.605
22.606;24.733
24.816;27.027
31.657;33.617
34.66;37.204
37.287;38.58
39.456;43.669
43.752;47.297
47.381;49.55
49.633;54.68
54.763;58.225
59.142;62.98
64.189;68.861
69.82;71.613
72.364;76.201
76.285;78.787
78.871;81.832
\.
t=# \i 1
COPY 20
Also in your question you lack \., try typing in psql - you will see instructions:
t=# COPY intern(starttime,endtime) FROM STDIN delimiter ';';
Enter data to be copied followed by a newline.
End with a backslash and a period on a line by itself.
End with a backslash and a period on a line by itself.

How to save a table to a file from Lua

I'm having trouble printing a table to a file with lua (and I'm new to lua).
Here's some code I found here to print the table;
function print_r ( t )
local print_r_cache={}
local function sub_print_r(t,indent)
if (print_r_cache[tostring(t)]) then
print(indent.."*"..tostring(t))
else
print_r_cache[tostring(t)]=true
if (type(t)=="table") then
for pos,val in pairs(t) do
if (type(val)=="table") then
print(indent.."["..pos.."] => "..tostring(t).." {")
sub_print_r(val,indent..string.rep(" ",string.len(pos)+8))
print(indent..string.rep(" ",string.len(pos)+6).."}")
elseif (type(val)=="string") then
print(indent.."["..pos..'] => "'..val..'"')
else
print(indent.."["..pos.."] => "..tostring(val))
end
end
else
print(indent..tostring(t))
end
end
end
if (type(t)=="table") then
print(tostring(t).." {")
sub_print_r(t," ")
print("}")
else
sub_print_r(t," ")
end
print()
end
I have no idea where the 'print' command goes to, I'm running this lua code from within another program. What I would like to do is save the table to a .txt file. Here's what I've tried;
function savetxt ( t )
local file = assert(io.open("C:\temp\test.txt", "w"))
file:write(t)
file:close()
end
Then in the print-r function I've changed everywhere it says 'print' to 'savetxt'. This doesn't work. It doesn't seem to access the text file in any way. Can anyone suggest an alternative method?
I have a suspicion that this line is the problem;
local file = assert(io.open("C:\temp\test.txt", "w"))
Update;
I have tried the edit suggested by Diego Pino but still no success. I run this lua script from another program (for which I don't have the source), so I'm not sure where the default directory of the output file might be (is there a method to get this programatically?). Is is possible that since this is called from another program there's something blocking the output?
Update #2;
It seems like the problem is with this line:
local file = assert(io.open("C:\test\test2.txt", "w"))
I've tried changing it "C:\temp\test2.text", but that didn't work. I'm pretty confident it's an error at this point. If I comment out any line after this (but leave this line in) then it still fails, if I comment out this line (and any following 'file' lines) then the code runs. What could be causing this error?
I have no idea where the 'print' command goes to,
print() output goes to default output file, you can change that with io.output([file]), see Lua manuals for details on querying and changing default output.
where do files get created if I don't specify the directory
Typically it will land in current working directory.
Your print_r function prints out a table to stdout. What you want is to print out the output of print_r to a file. Change the print_r function so instead of printing to stdout, it prints out to a file descriptor. Perhaps the easiest way to do that is to pass a file descriptor to print_r and overwrite the print function:
function print_r (t, fd)
fd = fd or io.stdout
local function print(str)
str = str or ""
fd:write(str.."\n")
end
...
end
The rest of the print_r doesn't need any change.
Later in savetxt call print_r to print the table to a file.
function savetxt (t)
local file = assert(io.open("C:\temp\test.txt", "w"))
print_r(t, file)
file:close()
end
require("json")
result = {
["ip"]="192.168.0.177",
["date"]="2018-1-21",
}
local test = assert(io.open("/tmp/abc.txt", "w"))
result = json.encode(result)
test:write(result)
test:close()
local test = io.open("/tmp/abc.txt", "r")
local readjson= test:read("*a")
local table =json.decode(readjson)
test:close()
print("ip: " .. table["ip"])
2.Another way:
http://lua-users.org/wiki/SaveTableToFile
Save Table to File
function table.save( tbl,filename )
Load Table from File
function table.load( sfile )

Extracting pattern which does not necessarily repeat

I am working with ANSI 835 plain text files and am looking to capture all data in segments which start with “BPR” and end with “TRN” including those markers. A given file is a single line; within that line the segment can, but not always, repeats. I am running the process on multiple files at a time and ideally I would be able to record the file name in which the segment(s) occur.
Here is what I have so far, based on an answer to another question:
#!/bin/sed -nf
/BPR.*TRN/ {
s/.*\(BPR.*TRN\).*/\1/p
d
}
/from/ {
: next
N
/BPR/ {
s/^[^\n]*\(BPR.*TRN\)[^n]*/\1/p
d
}
$! b next
}
I run all files I have through this and write the results to a file which looks like this:
BPR*I*393.46*C*ACH*CCP*01*011900445*DA*0000009046*1066033492**01*071923909*DA*72
34692932*20150120~TRN
BPR*I*1611.07*C*ACH*CCP*01*031100209*DA*0000009108*1066033492**01*071923909*DA*7
234692932*20150122~TRN
BPR*I*1415.25*C*CHK************20150108~TRN
BPR*H*0*C*NON************20150113~TRN
BPR*I*127.13*C*CHK************20150114~TRN
BPR*I*22431.28*C*ACH*CCP*01*071000152*DA*99643*1361236610**01*071923909*DA*72346
92932*20150112~TRN
BPR*I*182.62*C*ACH*CCP*01*071000152*DA*99643*1361236610**01*071923909*DA*7234692
932*20150115~TRN
Ideally each line would be prepended with the file name like this:
IDI.Aetna.011415.64539531.rmt:BPR*I*393.46*C*ACH*CCP*01*011900445*DA*0000009046*1066033492**01*071923909*DA*72
34692932*20150120~TRN
IDI.BCBSIL.010915.6434438.rmt:BPR*I*1611.07*C*ACH*CCP*01*031100209*DA*0000009108*1066033492**01*071923909*DA*7
234692932*20150122~TRN
IDI.CIGNA.010215.64058847.rmt:BPR*I*1415.25*C*CHK************20150108~TRN
IDI.GLDRULE.011715.646719.rmt:BPR*H*0*C*NON************20150113~TRN
IDI.MCREIN.011915.6471442.rmt:BPR*I*127.13*C*CHK************20150114~TRN
IDI.UHC.011915.64714417.rmt:BPR*I*22431.28*C*ACH*CCP*01*071000152*DA*99643*1361236610**01*071923909*DA*72346
92932*20150112~TRN
IDI.UHC.011915.64714417.rmt:BPR*I*182.62*C*ACH*CCP*01*071000152*DA*99643*1361236610**01*071923909*DA*7234692
932*20150115~TRN
The last two lines would be an example of a file where the segment pattern repeats.
Again, prepending each line with the file name is ideal. What I really need is to be able to process a given single-line file which has the “BPR…TRN” segment repeating and write all segments in that file to my output file.
Try with awk:
awk '
/BPR/ { sub(".*BPR","BPR") }
/TRN/ { sub("TRN.*","TRN") }
/BPR/,/TRN/ { print FILENAME ":" $0 }
' *.rmt

NodeSchool IO Exercies 3

I've started learning node.js
I'm currently on exercise 3, where we have to, based on a file buffer, calculate the number of new line characters "\n"
I pass the tester but somehow if I create my own file file.txt, I am able to get the buffer, and print out the string, but it is unable to calculate the number of new lines (console.log(newLineNum)) returns 0
Here is the code
//import file system module
var fs = require("fs");
//get the buffer object based on argv[2]
var buf = fs.readFileSync(process.argv[2]);
//convert buffer to string
var str_buff = buf.toString();
//length of str_buff
var str_length = str_buff.length;
var numNewLines = 0;
for (var i = 0; i < str_length; i ++)
{
if(str_buff.charAt(i) == '\n')
{
numNewLines++;
}
}
console.log(numNewLines);
If i understand your question correctly, you are trying to get the line length of current file.
From the documentation:
The first element will be 'node', the second element will be the name
of the JavaScript file.
So you should replace process.argv[2] with process.argv[1].
Edit:
If you are passing a parameter for a file name on command-line like:
node server.py 'test.txt'
your code should work without any problem.
Your code is fine. You should check the file that you are using for the input.

Resources