node console.log() output array in one line - node.js

I use node v10.6.0.
Here's my codes:
console.log([{a:1, b:2}, {a:1, b:2}, {a:1, b:2}])
console.log([{a:1, b:2}, {a:1, b:2}, {a:1, b:2}, {a:1, b:2}, {a:1, b:2}, {a:1, b:2}, {a:1, b:2}, {a:1, b:2}, {a:1, b:2}])
the output is as following:
[ { a: 1, b: 2 }, { a: 1, b: 2 }, { a: 1, b: 2 } ]
[ { a: 1, b: 2 },
{ a: 1, b: 2 },
{ a: 1, b: 2 },
{ a: 1, b: 2 },
{ a: 1, b: 2 },
{ a: 1, b: 2 },
{ a: 1, b: 2 },
{ a: 1, b: 2 },
{ a: 1, b: 2 } ]
How can I make the second array output in one line, instead of spreading to multiple lines.

Although the output is not exactly the same as if console.log is used, it's possible to use JSON.stringify to convert the array to a string, then print it:
console.log(JSON.stringify(array))
Try it online!
It cannot process circular structures, however.

I suggest using the following:
console.log(util.inspect(array, {breakLength: Infinity}))
Plus, util.inspect has a bunch of extra options to format and limit the output:
https://nodejs.org/api/util.html#utilinspectobject-options

Why not using console.table instead?
This give this nice table: https://tio.run/##y0osSyxOLsosKNHNy09J/Z9YVJRYaRtdnWhlqKOQZGVUq6NAW3bs/#T8vOL8nFS9ksSknFQNsAM0//8HAA
┌─────────┬───┬───┐
│ (index) │ a │ b │
├─────────┼───┼───┤
│ 0 │ 1 │ 2 │
│ 1 │ 1 │ 2 │
│ 2 │ 1 │ 2 │
│ 3 │ 1 │ 2 │
│ 4 │ 1 │ 2 │
│ 5 │ 1 │ 2 │
│ 6 │ 1 │ 2 │
│ 7 │ 1 │ 2 │
│ 8 │ 1 │ 2 │
└─────────┴───┴───┘

Related

Handling list of maps in for loop in terraform

I have the following locals file. I need to get the child and parent names separately in for each in terraform.
locals:
{
l3_crm:
[
{ parent: "crm", child: ["crm-sap", "crm-sf"] },
{ parent: "fin", child: ["fin-mon"] },
]
}
For the following ou creation code in aws, parent_id needs the parent name from the locals and ou_name needs the corresponding child name iterated.
module "l3_crm" {
source = "./modules/ou"
for_each = { for idx, val in local.l3_crm : idx => val }
ou_name = [each.value.child]
parent_id = module.l2[each.key.parent].ou_ids[0]
depends_on = [module.l2]
ou_tags = var.l2_ou_tags
}
I get the following error:
│ Error: Unsupported attribute
│
│ on main.tf line 30, in module "l3_rnd":
│ 30: parent_id = module.l2[each.key.parent].ou_ids[0]
│ ├────────────────
│ │ each.key is a string, known only after apply
│
│ This value does not have any attributes.
╵
Let me know what I am doing wrong in for loop.
I tried this as well:
module "l3_rnd" {
source = "./modules/ou"
for_each = { for parent, child in local.l3_crm : parent => child }
ou_name = [each.value]
parent_id = module.l2[each.key].ou_ids[0]
depends_on = [module.l2]
ou_tags = var.l2_ou_tags
}
with the local.tf:
locals {
l3_crm = [
{ "rnd" : ["crm-sap", "crm-sf"] },
{ "trade" : ["fin-mon"] }
]
}
I get these errors:
╷
│ Error: Invalid value for module argument
│
│ on main.tf line 28, in module "l3_crm":
│ 28: ou_name = [each.value]
│
│ The given value is not suitable for child module variable "ou_name" defined
│ at modules\ou\variables.tf:1,1-19: element 0: string required.
╵
╷
│ Error: Invalid value for module argument
│
│ on main.tf line 28, in module "l3_crm":
│ 28: ou_name = [each.value]
│
│ The given value is not suitable for child module variable "ou_name" defined
│ at modules\ou\variables.tf:1,1-19: element 0: string required.
╵
╷
│ Error: Invalid index
│
│ on main.tf line 29, in module "l3_crm":
│ 29: parent_id = module.l2[each.key].ou_ids[0]
│ ├────────────────
│ │ each.key is "1"
│ │ module.l2 is object with 2 attributes
│
│ The given key does not identify an element in this collection value.
╵
╷
│ Error: Invalid index
│
│ on main.tf line 29, in module "l3_crm":
│ 29: parent_id = module.l2[each.key].ou_ids[0]
│ ├────────────────
│ │ each.key is "0"
│ │ module.l2 is object with 2 attributes
│
│ The given key does not identify an element in this collection value.
╵
time=2022-11-11T13:24:15Z level=error msg=Hit multiple errors:
Hit multiple errors:
exit status 1
With your current structure you can reconstruct the map in your meta-argument like:
for_each = { for l3_crm in local.l3_crm : l3_crm.parent => l3_crm.child }
to access the values of each key in the list element and reconstruct to a map of parent keys and child values.
You can also optimize the structure like:
l3_crm:
[
{ "crm" = ["crm-sap", "crm-sf"] },
{ "fin" = ["fin-mon"] },
]
and then:
for_each = { for parent, child in local.l3_crm : parent => child }
where you cannot simply convert to a set type with toset because set(map) is not allowed as an argument value type.
Either way the references are updated fully accordingly:
ou_name = [each.key]
parent_id = module.l2[each.value].ou_ids[0]

How to add Column names in a Polars DataFrame while using CsvReader

I can read a csv file which does not have column headers in the file. With the following code using polars in rust:
use polars::prelude::*;
fn read_wine_data() -> Result<DataFrame> {
let file = "datastore/wine.data";
CsvReader::from_path(file)?
.has_header(false)
.finish()
}
fn main() {
let df = read_wine_data();
match df {
Ok(content) => println!("{:?}", content.head(Some(10))),
Err(error) => panic!("Problem reading file: {:?}", error)
}
}
But now I want to add column names into the dataframe while reading or after reading, how can I add the columns names. Here is a column name vector:
let COLUMN_NAMES = vec![
"Class label", "Alcohol",
"Malic acid", "Ash",
"Alcalinity of ash", "Magnesium",
"Total phenols", "Flavanoids",
"Nonflavanoid phenols",
"Proanthocyanins",
"Color intensity", "Hue",
"OD280/OD315 of diluted wines",
"Proline"
];
How can I add these names to the dataframe. The data can be downloaded with the following code:
wget https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data
This seemed to work, by creating a schema object and passing it in with the with_schema method on the CsvReader:
use polars::prelude::*;
use polars::datatypes::DataType;
fn read_wine_data() -> Result<DataFrame> {
let file = "datastore/wine.data";
let mut schema: Schema = Schema::new();
schema.with_column("wine".to_string(), DataType::Float32);
CsvReader::from_path(file)?
.has_header(false)
.with_schema(&schema)
.finish()
}
fn main() {
let df = read_wine_data();
match df {
Ok(content) => println!("{:?}", content.head(Some(10))),
Err(error) => panic!("Problem reading file: {:?}", error)
}
}
Granted I don't know what the column names should be, but this is the output I got when adding the one column:
shape: (10, 1)
┌──────┐
│ wine │
│ --- │
│ f32 │
╞══════╡
│ 1.0 │
├╌╌╌╌╌╌┤
│ 1.0 │
├╌╌╌╌╌╌┤
│ 1.0 │
├╌╌╌╌╌╌┤
│ 1.0 │
├╌╌╌╌╌╌┤
│ ... │
├╌╌╌╌╌╌┤
│ 1.0 │
├╌╌╌╌╌╌┤
│ 1.0 │
├╌╌╌╌╌╌┤
│ 1.0 │
├╌╌╌╌╌╌┤
│ 1.0 │
└──────┘
Here is the full solution working for me:
fn read_csv_into_df(path: PathBuf) -> Result<DataFrame> {
let schema = Schema::from(vec![
Field::new("class_label", Int64),
Field::new("alcohol", Float64),
Field::new("malic_acid", Float64),
Field::new("ash", Float64),
Field::new("alcalinity_of_ash", Float64),
Field::new("magnesium", Float64),
Field::new("total_phenols", Float64),
Field::new("flavanoids", Float64),
Field::new("nonflavanoid_phenols", Float64),
Field::new("color_intensity", Float64),
Field::new("hue", Float64),
Field::new("od280/od315_of_diluted_wines", Float64),
Field::new("proline", Float64),
]);
CsvReader::from_path(path)?.has_header(false).with_schema(&schema).finish()
}
I had Use Field and types for each field to create a schema then use the schema in CsvReader to read the data.

Terrafrom reading yaml file and assign local varible

I am trying to read the yaml file and assign value to local variable, below code giving `Invalid index' error. how to fix this error message?
YAML file server.yaml
vm:
- name: vmingd25
- system_cores: 4
Code block
locals {
vm_raw = yamldecode(file("server.yaml"))["vm"]
vm_name= local.vm_raw["name"]
vm_cpu = local.vm_raw["system_cores"]
}
Error message
╷
│ Error: Invalid index
│
│ on main.tf line 16, in locals:
│ 16: vm_name= local.vm_raw["name"]
│ ├────────────────
│ │ local.vm_raw is tuple with 10 elements
│
│ The given key does not identify an element in this collection value: a number is required.
╵
╷
│ Error: Invalid index
│
│ on main.tf line 17, in locals:
│ 17: vm_cpu = local.vm_raw["system_cores"]
│ ├────────────────
│ │ local.vm_raw is tuple with 10 elements
│
│ The given key does not identify an element in this collection value: a number is required.
Your YAML is equivalent to the following JSON:
{
"vm": [
{
"name": "vmingd25"
},
{
"system_cores": 4
}
]
}
As you can see the vm element is a list of objects because you are using the - character. This means you need to either:
Change you YAML to remove the - list definition. e.g.
vm:
name: vmingd25
system_cores: 4
This would turn the list into a dictionary so you could index with the keys as you have done in your question. OR
If you cannot change the YAML then you will need to index with an integer. This might work if your YAML never changes but is definitely not recommended.

MongoDB aggregate sum returns invalid result

This is MongoDB Tabe I am using when it tries to calculate the sum qty it returns the wrong result and I don't know why
┌─────────┬───────────────┬───────┬─────────┬────────┐
│ (index) │ code │ qty │ product │ branch │
├─────────┼───────────────┼───────┼─────────┼────────┤
│ 0 │ '114=>115-41' │ 0.34 │ '4009' │ '1' │
│ 1 │ '114=>115-41' │ -0.02 │ '4009' │ '1' │
│ 2 │ '114=>115-41' │ -0.06 │ '4009' │ '1' │
│ 3 │ '114=>115-41' │ -0.12 │ '4009' │ '1' │
│ 4 │ '114=>115-41' │ -0.14 │ '4009' │ '1' │
└─────────┴───────────────┴───────┴─────────┴────────┘
The query:
let AggreArray=[
{
$group:{
_id: {
code:"$code",
product:"$product",
branch:"$branch",
},
qty: {$sum: "$qty" },
}
}
]
db.coll.aggregate(AggreArray).toArray((err, result) => {
console.log(result);
});
instead of returning qty: 0 it returns
[
{
_id: {
product: 5fec20bc26b1650017003f4a,
code: '114=>115-41',
branch: 5fec43f269cdbd300ccf9275
},enter code here
qty: 2.7755575615628914e-17, // expected 0
}
]
instead of returning qty: 0 it returns Qty: 2.775575.
and I don't know why plz help me I was trying to fix it all mayday and I cannot.
In MongoDB all numbers are 64-bit floating-point double values, by default. The NumberDecimal type is used to explicitly specify 128-bit decimal-based floating-point values capable of emulating decimal rounding with exact precision. This data type can give you the desired result of 0(zero).
That is, in your aggregation you need to convert the qty to a NumberDecimal type. This is a two step process - first the number is to be converted to a string type, and then to the NumberDecimal type (this is because the NumberDecimal constructor expects a string as its parameter).
So, your aggregation works with the following changes to return a result of 0 (zero):
db.coll.aggregate([
{
$match: {
product: ObjectId('5fec20bc26b1650017003f4a'),
code: '114=>115-41',
branch: ObjectId('5fec43f269cdbd300ccf9275'),
}
},
{
$addFields: { qty: { $toDecimal: { $toString: "$qty" } } }
},
{
$group:{
_id: { code:"$code", product:"$product", branch:"$branch" },
qty: { $sum: "$qty" },
}
},
{
$addFields: { qty: { $toDouble: "$qty" } }
}
])
NOTE: In MongoDB NodeJS Driver, NumberDecimal is represented as Decimal128.

Keep only n record when inserting new records in couchbase

I have a huge bucket that contains all user's notification data. like this:
┌────┬─────────┬─────────────────────────┐
│ id │ user_id │ data │
├────┼─────────┼─────────────────────────┤
│ 1 │ 1 │ {"somekey":"someValue"} │
│ 2 │ 2 │ {"somekey":"someValue"} │
│ 3 │ 1 │ {"somekey":"someValue"} │
│ 4 │ 1 │ {"somekey":"someValue"} │
│ 5 │ 1 │ {"somekey":"someValue"} │
│ 6 │ 2 │ {"somekey":"someValue"} │
│ 7 │ 2 │ {"somekey":"someValue"} │
│ 8 │ 1 │ {"somekey":"someValue"} │
│ 9 │ 2 │ {"somekey":"someValue"} │
│ 10 │ 2 │ {"somekey":"someValue"} │
└────┴─────────┴─────────────────────────┘
So, anytime I want to insert a new record, for example for user_id=2, I want to remove earliest record for user_id=2 to have only N record for each user (of course if total number of records is less than N, there will be no remove)
#ehsan, another alternative is to use the Eventing Service and feed your documents to an Eventing function. You could use a compound key of both the id (for the notification) and also the user_id.
For example I use keys of the form "nu:#:#". Your data or notifications would then be processed by Eventing to build up a user document like #MatthewGroves proposed.
In fact you could optionally delete your input documents when they are successfully added.
Consider your input keys and documents as follows:
┌──────────┬─────────────────────────┐
│ key │ data │
├──────────┼─────────────────────────┤
│ nu:1:u1 │ {"somekey":"someValue"} │
│ nu:2:u2 │ {"somekey":"someValue"} │
│ nu:3:u1 │ {"somekey":"someValue"} │
│ nu:4:u1 │ {"somekey":"someValue"} │
│ nu:5:u1 │ {"somekey":"someValue"} │
│ nu:6:u2 │ {"somekey":"someValue"} │
│ nu:7:u2 │ {"somekey":"someValue"} │
│ nu:8:u1 │ {"somekey":"someValue"} │
│ nu:9:u2 │ {"somekey":"someValue"} │
│ nu:10:u2 │ {"somekey":"someValue"} │
└──────────┴─────────────────────────┘
Now we can make an use Eventing function with a parameter say MAX_ARRAY = 3 (adjust to what you want) to control the max number of notifications to keep on a per user basis.
Note I also added a parameter MAX_RETRY = 16 to retry the operation if there was a contention (sort of a poor man's CAS done via checking a field holding Math.random()).
I assume the notification ids always increments since JavaScript handles 2^53 -1 (or 9,007,199,254,740,991) this shouldn't be an issue.
A working Eventing Function is shown below:
/*
KEY nu:10:2 // Example input document where 10 is the notify_id 2 is the user_id
{
"somekey": "someValue"
}
KEY user_plus_ntfys:2 // Example output doc where "id": 2 is the user_id built from above
{
"type": "user_plus_ntfys",
"id": 2,
"notifications" : [
{"nid": 7, "doc": { "somekey": "someValue"}},
{"nid": 9, "doc": { "somekey": "someValue"}},
{"nid": 10, "doc": { "somekey": "someValue"}}
]
}
*/
function OnUpdate(doc, meta) {
const MAX_RETRY = 16;
const MAX_ARRAY = 3;
// will process ALL data like nu:#:#
var parts = meta.id.split(':');
if (!parts || parts.length != 3 || parts[0] != "nu") return;
var ntfy_id = parseInt(parts[1]);
var user_id = parseInt(parts[2]);
//log("Doc created/updated " + meta.id + " ntfy_id " + ntfy_id + " user_id " + user_id);
var insert_json = {"nid": ntfy_id, doc};
for (var tries=0; tries < 16; tries++) {
var user_doc = addToNtfyArray(src_bkt, user_id, insert_json, MAX_ARRAY);
if (user_doc == null) {
// do nothing
return;
}
var random_csum = user_doc.random;
// this is a read write alias to the functons source bucket
src_bkt["user_plus_ntfys:" + user_id] = user_doc;
user_doc = src_bkt["user_plus_ntfys:" + user_id];
if (random_csum !== user_doc.random) {
// failure need to retry
tries++;
} else {
// success could even delete the input notification doc here
return;
}
}
log ("FAILED to insert id: " + meta.id, doc)
}
function addToNtfyArray(src_bkt, user_id, insert_json, max_ary) {
var ntfy_id = insert_json.nid;
var random_csum;
var user_doc = src_bkt["user_plus_ntfys:" + user_id];
if (!user_doc) {
// generate unique random #
random_csum = Math.random();
user_doc = { "type": "user_plus_ntfys", "id": user_id, "notifications" : [], "random": random_csum };
user_doc.notifications.push(insert_json);
} else {
if (user_doc.notifications[0].nid >= ntfy_id && user_doc.notifications.length === max_ary) {
// do nothing this is older data, we assume that nid always increases
return null;
} else {
// find insert position
for(var i=0; i<=user_doc.notifications.length + 1 ; i++) {
if (i < user_doc.notifications.length && user_doc.notifications[i].nid === ntfy_id) {
// do nothing this is duplicate data we already have it, assume no updates to notifys
return null;
}
if (i == user_doc.notifications.length || user_doc.notifications[i].nid > ntfy_id) {
// add to array middle or end
user_doc.notifications.splice(i, 0, insert_json);
random_csum = Math.random();
// update unique random #
user_doc.random = random_csum;
break;
}
}
}
while (user_doc.notifications.length > max_ary) {
// ensure proper size
user_doc.notifications.shift();
}
}
return user_doc;
}
There might be a better data modeling approach. Does all of this data need to be in separate documents? If "N" is a relatively small number, you could fit all of these into an array within a single document. Like:
{
"type": "user",
"name": "ehsan",
"notifications" : [
{"somekey":"someValue"},
{"somekey":"someValue"},
{"somekey":"someValue"}
]
}
Then the process would be:
Get the document
Add a record to the notifications array
Determine if you need to remove an old record (and then remove it)
Save the updated document.
This approach has the benefits of simplicity and of not needing to update multiple pieces of data. The way you've modeled it could work, but you would need ACID transactions (which aren't available in Couchbase's Node SDK yet) or maybe an Eventing function to check to make sure there aren't too many notification documents for each user whenever a new notification is created.

Resources