Node Js Acessing a Jason File - node.js

still new to JSON and while ive searched the net and created a function to create a init file if none exists i'm coming up blank for search and retrive the data of the new existing file or how I add new entries or update new entries
so far i can do a read file and export the resits in a console log so i know the assignment work, its a global variable so the data should persist out of the read file loop but when i try and access it later to make the local array i'll pull data from and use for updating later it reads as undefined.
fs.readFile(path, 'utf8', (error, data) => {
if(error){
console.log(error);
return;
}
//console.log(JSON.parse(data));
JSONData = JSON.parse(data);
for (let i = 0; i < JSONData.length; i++) {
console.log(i+": ["+JSONData[i].unique+"] "+JSONData[i].name);
}
});//fs.readFile
var playerKey = "KuroTO";
playerKey = playerKey.toLowerCase();
for (let i = 0; i < JSONData.length; i++) {
if (JSONData[i].unique.toLowerCase() == playerKey){
console.log("["+i+"] "+JSONData[i].unique.toLowerCase()+": "+playerKey);
PlayerCard1.push(JSONData[i].userid);//0
PlayerCard1.push(JSONData[i].username);//1
PlayerCard1.push(JSONData[i].unique);//2
PlayerCard1.push(JSONData[i].name);//3
PlayerCard1.push(JSONData[i].avatarurl);//4
PlayerCard1.push(JSONData[i].level);//5
PlayerCard1.push(JSONData[i].Rank);//6
PlayerCard1.push(JSONData[i].henshined);//7
PlayerCard1.push(JSONData[i].Strength);//8
PlayerCard1.push(JSONData[i].Perception);//9
PlayerCard1.push(JSONData[i].Endurance);//10
PlayerCard1.push(JSONData[i].Wisdom);//11
PlayerCard1.push(JSONData[i].Intelligence)//12;
PlayerCard1.push(JSONData[i].Luck)//13;
PlayerCard1.push(JSONData[i].Agility)//14;
PlayerCard1.push(JSONData[i].Flexability)//15;
PlayerCard1.push(JSONData[i].RatedSpeed)//16;
};//if unique matches
};//for
this is ther psudo code concept im trying to do
if (JSONData.stringify.unique == {SearchUID}){toonname = JSONData.stringify.name;}
as i understand it you cant really apend just rewrite the file over again with new data and i think i can figure that out on my own once i cand figure out how to real the file into an array i can search like above

To read JSON, simply require the file.
JSON:
{
"key": "H"
}
JS:
let jsonFile = require("./path/to/json");
console.log(jsonFile.key); // H
Editing is just as simple.
let jsonFile = require("./path/to/json");
jsonFile.key = "A"
console.log(jsonFile.key) // A
Saving edits requires use of FileSystem:
const fs = require("fs")
let jsonFile = require("./path/to/json");
jsonFile.key = "A"
// first argument is the file path
// second argument is the JSON to write - the file is overwritten already
// due to above, so just JSON.stringify() the required file.
// third argument is an error callback
fs.writeFile("./path/to/jsonFile", JSON.stringify(jsonFile), (err) => {
if (err) throw new Error(err);
});
This can also be used to slightly clean up your current init function if you wanted, but that's up to you of course.

Related

cant get variable value in node js

I tried to make the function async but when I print the attacks it prints out {} without anything in it but when I print the values right after adding them in attacks I can print them why is it like that? how can I use the value?
var fs = require('fs');
var http = require('http');
var attacks = {};
var phase_name;
var directory = 'cti-master\\enterprise-attack\\attack-pattern\\';
// getting all files names.
async function getData(directory){
fs.readdir(directory, (err, files) => {
if(err) { return;}
var fileNum = 0;
// opening all the files and sorting the data in them.
while (fileNum < files.length - 1)
{
fs.readFile(directory + files[fileNum], 'utf8', (err, data) =>
{
// parsing the data from json.
var fileData = JSON.parse(data);
// sometimes there is no phase name.
if(fileData['objects'][0]['kill_chain_phases'] == undefined){phase_name = undefined;}
else{phase_name = fileData['objects'][0]['kill_chain_phases'][0]['phase_name'];}
// sorting data by name to make it easier later.
attacks[fileData['objects'][0]['name']] = {
id: fileData['objects'][0]['id'],
type: fileData['objects'][0]['type'],
description: fileData['objects'][0]['description'],
x_mitre_platforms: fileData['objects'][0]['x_mitre_platforms'],
x_mitre_detection: fileData['objects'][0]['x_mitre_detection'],
phase_name: phase_name};
});
fileNum += 1;
};
});
var keys = Object.keys(attacks);
console.log(attacks);
}
getData(directory);
The reason for the empty log here because the node does not wait to finish while loop Hence you are getting empty log. Basically, you can improve this code by using the async-await method.
But if you are stick with this code, I am just suggesting this logic.
Just bring your log inside an if condition block. which have condition "print only if expected file count reached"
for example.
if(fileNum === files.length) {
var keys = Object.keys(attacks);
console.log(attacks);
}
So now log print only when this condition is satisfied which means after completion of while loop

GCF "No Such Object" when the Object in question was just created

I'm setting up a Google Cloud Functions (GCF) function that gets triggered often enough that there are multiple instances running at the same time.
I am getting errors from a readStream the source file of the stream does not exist, but at this point in my program I've actually just created it.
I've made sure the file exists before the start of the stream by console.log()-ing the file JSON, so the file does actually exist. I've also made sure that the file I'm trying to access has finished being written by a previous stream with an await, but no dice.
EDIT: The code now contains the entire script. The section that seems to be throwing the error is the function columnDelete().
var parse = require('fast-csv');
var Storage = require('#google-cloud/storage');
var Transform = require('readable-stream').Transform;
var storage = new Storage();
var bucket = storage.bucket('<BUCKET>');
const DMSs = ['PBS','CDK','One_Eighty','InfoBahn'];
class DeleteColumns extends Transform{
constructor(){
super({objectMode:true})
}
_transform(row, enc, done){
//create an array 2 elements shorter than received
let newRow = new Array(row.length - 2);
//write all data but the first two columns
for(let i = 0; i < newRow.length; i++){
newRow[i] = row[i+2];
}
this.push(newRow.toString() + '\n');
done();
}
}
function rename(file, originalFile, DMS){
return new Promise((resolve, reject) => {
var dealer;
var date;
var header = true;
var parser = parse({delimiter : ",", quote:'\\'});
//for each row of data
var stream = originalFile.createReadStream();
stream.pipe(parser)
.on('data', (row)=>{
//if this is the first line do nothing
if(header){
header = false;
}
//otherwise record the contents of the first two columns and then destroy the stream
else {
dealer = row[0].toString().replace('"', '').replace('"', '');
date = row[1].toString().replace('"', '').replace('"', '');
stream.end();
}
})
.on('finish', function(){
var newName = dealer + ' ' + date + '_' + DMS + 'temp.csv';
//if this was not triggered by the renaming of a file
if(!file.name.includes(dealer)&&!file.name.includes(':')){
console.log('Renamed ' + file.name);
originalFile.copy(newName);
originalFile.copy(newName.replace('temp',''));
}else{
newName = 'Not Renamed';
console.log('Oops, triggered by the rename');
}
resolve(newName);
});
});
}
function columnDelete(fileName){
return new Promise((resolve, reject) =>{
console.log('Deleting Columns...');
console.log(bucket.file(fileName));
var parser = parse({delimiter : ",", quote:'\\'});
var del = new DeleteColumns();
var temp = bucket.file(fileName);
var final = bucket.file(fileName.replace('temp', ''));
//for each row of data
temp.createReadStream()
//parse the csv
.pipe(parser)
//delete first two columns
.pipe(del)
//write to new file
.pipe(final.createWriteStream()
.on('finish', function(){
console.log('Columns Deleted');
temp.delete();
resolve();
})
);
});
}
exports.triggerRename = async(data, context) => {
var DMS = 'Triple';
var file = data;
//if not a temporary file
if(!file.name.includes('temp')){
//create a new File object from the name of the data passed
const originalFile = bucket.file(file.name);
//identify which database this data is from
DMSs.forEach(function(database){
if(file.name.includes(database)){
DMS = database;
}
});
//rename the file
var tempName = await rename(file, originalFile, DMS);
//if it was renamed, delete the extra columns
if (!tempName.includes('Not Renamed')){
await columnDelete(tempName);
}
} else if(file.name.includes('undefined')){
console.log(file.name + ' is invalid. Deleted.');
bucket.file(file.name).delete();
}
else {
console.log( file.name + ' is a temporary file. Did not rename.');
}
};
What I expect to be output is as below:
Deleting Columns...
Columns Deleted
Nice and simple, letting us know when it has started and finished.
However, I get this instead:
Deleting Columns...
ApiError: No such object: <file> at at Object.parseHttpRespMessage(......)
finished with status: 'crash'
Which is not wanted for obvious reasons. My next thought is to make sure that the file hasn't been deleted by another instance of the script midway through, but to do that I would have to check to see if the file is being used by another stream, which is, to my knowledge, not possible.
Any ideas out there?
When I was creating the file I called the asynchronous function copy() and moved on, meaning that when trying to access the file it was not finished copying. Unknown to me, the File Object is a reference variable, and did not actually contain the file itself. While the file was copying, the pointer was present but it was pointing to an unfinished file.
Thus, "No Such Object". To fix this, I simply used a callback to make sure that the copying was finished before I was accessing the file.
Thanks to Doug Stevenson for letting me know about the pointer!

How to append some data directly to a file with node.js?

Let's say we have:
a file data.json or data.txt with this content: {"data":[]}
and an array of paths: ["C:\\path1", "C:\\path2", "C:\\path3"]
Question
How would we append the array of paths into this file with node.js data stream (or whatnot) so that we get this in the end:
{"data":["C:\\path1", "C:\\path2", "C:\\path3"]}
Code
let filePath = 'C:\test\data.json'
let paths = ["C:\\path1", "C:\\path2", "C:\\path3"]
for (let index = 0; index < paths.length; index++) {
// ... streaming paths to the file one by one
}
I cannot put paths in the file without a loop - in my project I have walkdir(drive, options, (path) => {}) instead of for loop. It also returns paths one by one like the above for loop, it's just for demonstration.
Because it is JSON, you can't actually append to the file. You have to read out the entire document, parse the JSON to a POJO, make your changes, stringify the JSON, and write it back.
import { readFile, writeFile } from 'fs';
readFile(filePath, (err, data) => {
if (err) throw new Error(err);
const json = JSON.parse(data);
paths.forEach(path => json.data.push(path));
writeFile(filePath, JSON.stringify(json), err => { /* handle err */ });
});
If it was a plaintext file, you could do an append by writing to it and setting the flag option to a (for append).
writeFile(filePath, { flag: 'a' }, 'text to append');

Using socketio-file-upload to upload multiple files

Im using NodeJS with socket.io and socketio-file-upload to upload multiple files, it works great! However I'm having an issue where I'm trying to save the name attribute of the input these files come to save them into my DB.
When I upload 1 or more files, I can't seem to access the input field name or something that shows me which of the files come from which input field.
Here is my front:
var uploader = new SocketIOFileUpload(socket);
var array_files_lvl_3 = [
document.getElementById("l3_id_front"),
document.getElementById("l3_id_back"),
document.getElementById("l3_address_proof_1"),
document.getElementById("l3_address_proof_2"),
document.getElementById("l3_passport")
];
uploader.listenOnArraySubmit(document.getElementById("save_level_3"), array_files_lvl_3);
And here is my back:
var uploader = new siofu();
uploader.dir = "uploads/userL3";
uploader.listen(socket);
uploader.on('saved', function(evnt){
console.log(evnt);
//this "event" variable has a lot of information
//but none of it tells me the input name where it came from.
});
This is what the "evnt" variable holds:
Unfortunately the library doesn't send that information. So there is nothing existing config you can do. So this needs code modification.
client.js:374
var _fileSelectCallback = function (event) {
var files = event.target.files || event.dataTransfer.files;
event.preventDefault();
var source = event.target;
_baseFileSelectCallback(files, source);
client.js:343
var _baseFileSelectCallback = function (files, source) {
if (files.length === 0) return;
// Ensure existence of meta property on each file
for (var i = 0; i < files.length; i++) {
if (source) {
if (!files[i].meta) files[i].meta = {
sourceElementId: source.id || "",
sourceElementName: source.name || ""
};
} else {
if (!files[i].meta) files[i].meta = {};
}
}
After these changes I am able to get the details in event.file.meta
I'm the author of socketio-file-upload.
It looks like the specific input field is not currently being recorded, but this would not be a hard feature to add. Someone opened a new issue and left a backpointer to this SO question.
A workaround would be to directly use submitFiles instead of listenOnArraySubmit. Something like this might work (untested):
// add a manual listener on your submit button
document.getElementById("save_level_3").addEventListener("click", () => {
let index = 0;
for (let element of array_files_lvl_3) {
let files = element.files;
for (let file of files) {
file.meta = { index };
}
uploader.submitFiles(files);
index++;
}
});

Add a mongo request into a file and archive this file

I'm having some troubles while trying to use streams with a MongoDB request. I want to :
Get the results from a collection
Put this results into a file
Put this file into a CSV
I'm using the archiver package for the file compression. The file contains csv formatted values, so for each row I have to parse them in the CSV format.
My function take a res (output) parameters, which means that I can send the result to a client directly. For the moment, I can put this results into a file without streams. I think I'll get memory troubles for a large amount of data that's why I want to use streams.
Here is my code (with no stream)
function getCSV(res,query) {
<dbRequest>.toArray(function(err,docs){
var csv = '';
if(docs !== null){
for(var i = 0; i< docs.length; i++){
var line = '';
for(var index in docs[i]){
if(docs[i].hasOwnProperty(index) && (index !== '_id' ) ){
if(line !== '') line+= ',';
line += docs[i][index];
}
}
console.log("line",line);
csv += line += '\r\n';
}
}
}.bind(this));
fileManager.addToFile(csv);
archiver.initialize();
archiver.addToArchive(fileManager.getName());
fileManager.deleteFile();
archiver.sendToClient(res);
};
Once the csv is completed, I had it to a file with a Filemanager Object. The latter one handles file creation and manipulation. The addToArchive method add the file to the current archive, and the sendToClient method send the archive through the output (res parameter is the function).
I'm using Express.js so I call this method with a server request.
Sometimes the file contains data, sometimes it is empty, could you explain me why ?
I'd like to understand how streams works, how could I implement this to my code ?
Regards
I'm not quite sure why you're having issue with the data sometimes showing up, but here is a way to send it with a stream. A couple of points of info before the code:
.stream({transform: someFunction})
takes a stream of documents from the database and runs whatever data manipulation you want on each document as it passes through the stream. I put this function into a closure to make it easier to keep the column headers, as well as allow you to pick and choose which keys from the document to use as columns. This will allow you to use it on different collections.
Here is the function that runs on each document as it passes through:
// this is a closure containing knowledge of the keys you want to use,
// as well as whether or not to add the headers before the current line
function createTransformFunction(keys) {
var hasHeaders = false;
// this is the function that is run on each document
// as it passes through the stream
return function(document) {
var values = [];
var line;
keys.forEach(function(key) {
// explicitly use 'undefined'.
// if using !key, the number 0 would get replaced
if (document[key] !== "undefined") {
values.push(document[key]);
}
else {
values.push("");
}
});
// add the column headers only on the first document
if (!hasHeaders) {
line = keys.join(",") + "\r\n";
line += values.join(",");
hasHeaders = true;
}
else {
// add the line breaks at the beginning of each line
// to avoid having an extra line at the end
line = "\r\n";
line += values.join(",");
}
// return the document to the stream and move on to the next one
return line;
}
}
You pass that function into the transform option for the database stream. Now assuming you have a collection of people with the keys _id, firstName, lastName:
function (req, res) {
// create a transform function with the keys you want to keep
var transformPerson = createTransformFunction(["firstName", "lastName"]);
// Create the mongo read stream that uses your transform function
var readStream = personCollection.find({}).stream({
transform: transformPerson
});
// write stream to file
var localWriteStream = fs.createWriteStream("./localFile.csv");
readStream.pipe(localWriteStream);
// write stream to download
res.setHeader("content-type", "text/csv");
res.setHeader("content-disposition", "attachment; filename=downloadFile.csv");
readStream.pipe(res);
}
If you hit this endpoint, you'll trigger a download in the browser and write a local file. I didn't use archiver because I think it would add a level of complexity and take away from the concept of what's actually happening. The streams are all there, you'd just need to fiddle with it a bit to work it in with archiver.

Resources