Insert Unicode text file data into database - node.js

I need a solution for getting data out of a text file that is unicode encoded and write this data into an existing database. I am working with node.js
My text file looks like this (whitespaces are shown)
Text file
Column Active Description Number
1 True test test 0000
1 True test1 test1 0001
1 True test2 test2 0002
1 True test3 test3 0003
1 True test4 test4 0004
I need to get the values of column, active, description and number for each row in order to create a new entry in my database with the respective values. My database already exists and has the columns Column, Active, Description and Number. What is the eaisest way to write the data of the text file into my database. I tried a csv parser
const csv = require('csv-streamify');
const fs = require('fs');
const parser = csv({
delimiter: '\t',
columns: true,
});
parser.on('data', (line) => {
console.log(line)
})
fs.createReadStream('test.txt').pipe(parser)
but that only showed me output like this:
{ '��C\u0000o\u0000l\u0000u\u0000m\u0000n ...
How do i get the correct output and what do i have to do to write the data into the database?
I think it has to be something like this:
connection.query('INSERT INTO Table1 SET ...
I really don't know how to continue.

Using 'byline' instead of 'csv-streamify', you can retrieve the content as follows:
const byline = require('byline');
const fs = require('fs');
var stream = byline(fs.createReadStream('test.txt'));
var index = 0;
var headers;
var data = [];
stream.on('data', function(line) {
var currentData;
var entry;
var i;
line = line.toString(); // Convert the buffer stream to a string line
if (index === 0) {
headers = line.split(/[ ]+/);
} else {
currentData = line.split(/[ ]+/);
entry = {};
for (i = 0; i < headers.length; i++) {
entry[headers[i]] = currentData[i];
}
data.push(entry);
}
index++;
});
stream.on("error", function(err) {
console.log(err);
});
stream.on("end", function() {
console.log(data);
console.log("Done");
});
Everything is stored in the 'data' array as follows:
[ { Column: '1',
Active: 'True',
Description: 'test',
Number: 'test' },
{ Column: '1',
Active: 'True',
Description: 'test1',
Number: 'test1' },
{ Column: '1',
Active: 'True',
Description: 'test2',
Number: 'test2' },
{ Column: '1',
Active: 'True',
Description: 'test3',
Number: 'test3' },
{ Column: '1',
Active: 'True',
Description: 'test4',
Number: 'test4' } ]
Then, it shouldn't be too difficult to write to the SQL database.

Related

Scripted search and looping - how do I show only 1 alert if multiple results?

I have a script that is triggering a search when a field is changed. The goal is that if the search finds a result to alter the user that this may be a possible duplicate. The issue I am having is that if there are more than 1 result it will show the alert as many times as there are results. How can I have it show the result only once?
/**
*#NApiVersion 2.0
*#NScriptType ClientScript
*/
define(['N/record', 'N/search', 'N/ui/dialog'], function(r, search, u) {
function fieldChanged(context) {
var recCurrent = context.currentRecord;
var show = recCurrent.getValue({
fieldId: 'custrecord_eym_er_show'
});
var client = recCurrent.getValue({
fieldId: 'custrecord_eym_er_customer'
});
if ((context.fieldId == 'custrecord_eym_er_customer')) {
var client = recCurrent.getValue({
fieldId: 'custrecord_eym_er_customer'
});
console.log(client);
var sv = search.create({
type: "customrecord_eym_exhibit_reg",
columns: [
search.createColumn({
name: "internalid",
label: "Internal ID"
})
],
filters: [
["custrecord_eym_er_show", "anyof", show],
"AND",
["custrecord_eym_er_customer", "anyof", client]
]
});
var pagedData = sv.runPaged({
pageSize: 1000
});
// iterate the pages
for (var i = 0; i < pagedData.pageRanges.length; i++) {
// fetch the current page data
var currentPage = pagedData.fetch(i);
// and forEach() thru all results
currentPage.data.forEach(function(result) {
// you have the result row. use it like this....
var duplicate = result.getValue('internalid');
console.log(duplicate);
if (duplicate) {
alert('There is more than 1 entry for this client')
}
});
}
}
}
return {
fieldChanged: fieldChanged
};
});
You can change:
var pagedData = sv.runPaged({
pageSize: 1000
});
To
var pagedData = sv.run().getRanged({
start: 0,
end:1
});
The variable pagedData is an array contains the result, if you have many duplication you will have only the First result.
Also another way you can simply introduce a variable to print it once like this:
var printOnce = 0;
for (var i = 0; i < pagedData.pageRanges.length; i++) {
// fetch the current page data
var currentPage = pagedData.fetch(i);
// and forEach() thru all results
currentPage.data.forEach(function(result) {
// you have the result row. use it like this....
var duplicate = result.getValue('internalid');
console.log(duplicate);
if (duplicate && printOnce==0) {
printOnce++;
alert('There is more than 1 entry for this client')
}
});
}
I think the first approach is better since you don't need to retrieve all the data.
Try using a summary level grouping on the saved search results. Ex: If you're getting multiple internal ids, where 0 mean no duplicates, and 1+ means duplicates try changing
columns: [
search.createColumn({
name: "internalid",
label: "Internal ID"
})
],
to
columns: [
search.createColumn({
name: "internalid",
label: "Internal ID",
summary: 'GROUP'
})
],
And change
var duplicate = result.getValue('internalid');
to
var duplicate = result.getValue(name: 'internalid', summary: 'GROUP');
If I assumed incorrectly about the use of internal ids, add "Customer" or the desired grouped value as a column and use the summary grouping on that field.
There is a way to get the number of results returned by a search, in your case it seems you just want to show an alert of the search return at least one result so, you can use this code:
/**
*#NApiVersion 2.0
*#NScriptType ClientScript
*/
define(['N/record', 'N/search', 'N/ui/dialog'], function(r, search, u) {
function fieldChanged(context) {
var recCurrent = context.currentRecord;
var show = recCurrent.getValue({
fieldId: 'custrecord_eym_er_show'
});
var client = recCurrent.getValue({
fieldId: 'custrecord_eym_er_customer'
});
if ((context.fieldId == 'custrecord_eym_er_customer')) {
var client = recCurrent.getValue({
fieldId: 'custrecord_eym_er_customer'
});
console.log(client);
var sv = search.create({
type: "customrecord_eym_exhibit_reg",
columns: [
search.createColumn({
name: "internalid",
label: "Internal ID"
})
],
filters: [
["custrecord_eym_er_show", "anyof", show],
"AND",
["custrecord_eym_er_customer", "anyof", client]
]
});
if(sv.runPaged().count >= 1) {
alert('There is more than 1 entry for this client');
}
}
}
return {
fieldChanged: fieldChanged
};
});

Parsing CSV with common data preceding other data with NodeJS

I'm trying to read in CSV files with nodejs and the code is like below.
fs.createReadStream(file)
.pipe(csv.parse({from_line: 6, columns: true, bom: true}, (err, data) => {
data.forEach((row, i) => {
As I am using from_line parameter, the data starts at line 6 with header.
The issue is that the line #3 has the date which is also used with other data.
What is the best way to resolve this?
Data file looks like below:
Genre: ABC
Date: 2020-01-01, 2020-12-31
Number of Data: 300
No., Code, Name, sales, delivery, return, stock
1, ......
2, ......
Additional question
I have inserted iconv.decodeStream in the second part of function.
How could I apply the same decoder for header read-in process?
fs.createReadStream(file)
.pipe(iconv.decodeStream("utf-8"))
.pipe(csv.parse({from_line: 6, columns: true, bom: true}, (err, data) => {
data.forEach((row, i) => {
I'd suggest reading the header data first, then you can access this data in your processing callback(s), something like the example below:
app.js
// Import the package main module
const csv = require('csv')
const fs = require("fs");
const { promisify } = require('util');
const parse = promisify(csv.parse);
const iconv = require('iconv-lite');
async function readHeaderData(file, iconv) {
let buffer = Buffer.alloc(1024);
const fd = fs.openSync(file)
fs.readSync(fd, buffer);
fs.closeSync(fd);
buffer = await iconv.decode(buffer, "utf-8");
const options = { to_line: 3, delimiter: ':', columns: false, bom: true, trim: true };
const rows = await parse(buffer, options);
// Convert array to object
return Object.fromEntries(rows);
}
async function readFile(file, iconv) {
const header = await readHeaderData(file, iconv);
console.log("readFile: File header:", header);
fs.createReadStream(file)
.pipe(iconv.decodeStream("utf-8"))
.pipe(csv.parse({ from_line: 6, columns: true, bom: true, trim: true }, (err, data) => {
// We now have access to the header data along with the row data in the callback.
data.forEach((row, i) => console.log( { line: i, header, row } ))
}));
}
readFile('stream-with-skip.csv', iconv)
This will give an output like:
readFile: File header: {
Genre: 'ABC',
Date: '2020-01-01, 2020-12-31',
'Number of Data': '300'
}
and
{
line: 0,
header: {
Genre: 'ABC',
Date: '2020-01-01, 2020-12-31',
'Number of Data': '300'
},
row: {
'No.': '1',
Code: 'Code1',
Name: 'Name1',
sales: 'sales1',
delivery: 'delivery1',
return: 'return1',
stock: 'stock1'
}
}
{
line: 1,
header: {
Genre: 'ABC',
Date: '2020-01-01, 2020-12-31',
'Number of Data': '300'
},
row: {
'No.': '2',
Code: 'Code2',
Name: 'Name2',
sales: 'sales2',
delivery: 'delivery2',
return: 'return2',
stock: 'stock2'
}
}
example.csv
Genre: ABC
Date: 2020-01-01, 2020-12-31
Number of Data: 300
No., Code, Name, sales, delivery, return, stock
1, Code1, Name1, sales1, delivery1, return1, stock1
2, Code2, Name2, sales2, delivery2, return2, stock2

NodeJS MongoDB Mongoose export nested subdocuments and arrays to XLSX columns

I have query results from MongoDB as an array of documents with nested subdocuments and arrays of subdocuments.
[
{
RecordID: 9000,
RecordType: 'Item',
Location: {
_id: 5d0699326e310a6fde926a08,
LocationName: 'Example Location A'
}
Items: [
{
Title: 'Example Title A',
Format: {
_id: 5d0699326e310a6fde926a01,
FormatName: 'Example Format A'
}
},
{
Title: 'Example Title B',
Format: {
_id: 5d0699326e310a6fde926a01,
FormatName: 'Example Format B'
}
}
],
},
{
RecordID: 9001,
RecordType: 'Item',
Location: {
_id: 5d0699326e310a6fde926a08,
LocationName: 'Example Location C'
},
Items: [
{
Title: 'Example Title C',
Format: {
_id: 5d0699326e310a6fde926a01,
FormatName: 'Example Format C'
}
}
],
}
]
Problem
I need to export the results to XLSX in column order. The XLSX library is working to export the top-level properties (such as RecordID and RecordType) only. I also need to export the nested objects and arrays of objects. Given a list of property names e.g. RecordID, RecordType, Location.LocationName, Items.Title, Items.Format.FormatName the properties must be exported to XLSX columns in the specified order.
Desired result
Here is the desired 'flattened' structure (or something similar) that
I think should be able to convert to XLSX columns.
[
{
'RecordID': 9000,
'RecordType': 'Item',
'Location.LocationName': 'Example Location A',
'Items.Title': 'Example Title A, Example Title B',
'Items.Format.FormatName': 'Example Format A, Example Format B',
},
{
'RecordID': 9001,
'RecordType': 'Item',
'Location.LocationName': 'Example Location C',
'Items.Title': 'Example Title C',
'Items.Format.FormatName': 'Example Format C',
}
]
I am using the XLSX library to convert the query results to XLSX which works for top-level properties only.
const worksheet: XLSX.WorkSheet = XLSX.utils.json_to_sheet(results.data);
const workbook: XLSX.WorkBook = { Sheets: { 'data': worksheet }, SheetNames: ['data'] };
const excelBuffer: any = XLSX.write(workbook, { bookType: 'xlsx', type: 'array' });
const data: Blob = new Blob([excelBuffer], { type: EXCEL_TYPE });
FileSaver.saveAs(data, new Date().getTime());
POSSIBLE OPTIONS
I am guessing I need to 'flatten' the structure either using aggregation in the query or by performing post-processing when the query is returned.
Option 1: Build the logic in the MongoDB query to flatten the results.
$replaceRoot might work since it is able to "promote an existing embedded document to the top level". Although I am not sure if this will solve the problem exactly, I do not want to modify the documents in place, I just need to flatten the results for exporting.
Here is the MongoDB query I am using to produce the results:
records.find({ '$and': [ { RecordID: { '$gt': 9000 } } ]},
{ skip: 0, limit: 10, projection: { RecordID: 1, RecordType: 1, 'Items.Title': 1, 'Items.Location': 1 }});
Option 2: Iterate and flatten the results on the Node server
This is likely not the most performant option, but might be the easiest if I can't find a way to do so within the MongoDB query.
UPDATE:
I may be able to use MongoDB aggregate $project to 'flatten' the results. For example, this aggregate query effectively 'flattens' the results by 'renaming' the properties. I just need to figure out how to implement the query conditions within the aggregate operation.
db.records.aggregate({
$project: {
RecordID: 1,
RecordType: 1,
Title: '$Items.Title',
Format: '$Items.Format'
}
})
UPDATE 2:
I have abandoned the $project solution because I would need to change the entire API to support aggregation. Also, I would need to find a solution for populate because aggregate does not support it, rather, it uses $lookup which is possible but time consuming because I would need to write the queries dynamically. I am going back to look into how to flatten the object by creating a function to iterate the array of objects recursively.
Below is a solution for transforming the Mongo data on the server via a function flattenObject which recursively flattens nested objects and returns a 'dot-type' key for nested paths.
Note that the snippet below contains a function that renders and editable table to preview, however, the important part you want (download the file), should be triggered when you run the snippet and click the 'Download' button.
const flattenObject = (obj, prefix = '') =>
Object.keys(obj).reduce((acc, k) => {
const pre = prefix.length ? prefix + '.' : '';
if (typeof obj[k] === 'object') Object.assign(acc, flattenObject(obj[k], pre + k));
else acc[pre + k] = obj[k];
return acc;
}, {});
var data = [{
RecordID: 9000,
RecordType: "Item",
Location: {
_id: "5d0699326e310a6fde926a08",
LocationName: "Example Location A"
},
Items: [{
Title: "Example Title A",
Format: {
_id: "5d0699326e310a6fde926a01",
FormatName: "Example Format A"
}
},
{
Title: "Example Title B",
Format: {
_id: "5d0699326e310a6fde926a01",
FormatName: "Example Format B"
}
}
]
},
{
RecordID: 9001,
RecordType: "Item",
Location: {
_id: "5d0699326e310a6fde926a08",
LocationName: "Example Location C"
},
Items: [{
Title: "Example Title C",
Format: {
_id: "5d0699326e310a6fde926a01",
FormatName: "Example Format C"
}
}]
}
];
const EXCEL_MIME_TYPE = `application/vnd.ms-excel`;
const flattened = data.map(e => flattenObject(e));
const ws_default_header = XLSX.utils.json_to_sheet(flattened);
const ws_custom_header = XLSX.utils.json_to_sheet(flattened, {
header: ['Items.Title', 'RecordID', 'RecordType', 'Location.LocationName', 'Items.Format.FormatName']
});
const def_workbook = XLSX.WorkBook = {
Sheets: {
'data': ws_default_header
},
SheetNames: ['data']
}
const custom_workbook = XLSX.WorkBook = {
Sheets: {
'data': ws_custom_header
},
SheetNames: ['data']
}
const def_excelBuffer = XLSX.write(def_workbook, {
bookType: 'xlsx',
type: 'array'
});
const custom_excelBuffer = XLSX.write(custom_workbook, {
bookType: 'xlsx',
type: 'array'
});
const def_blob = new Blob([def_excelBuffer], {
type: EXCEL_MIME_TYPE
});
const custom_blob = new Blob([custom_excelBuffer], {
type: EXCEL_MIME_TYPE
});
const def_button = document.getElementById('dl-def')
/* trigger browser to download file */
def_button.onclick = e => {
e.preventDefault()
saveAs(def_blob, `${new Date().getTime()}.xlsx`);
}
const custom_button = document.getElementById('dl-cus')
/* trigger browser to download file */
custom_button.onclick = e => {
e.preventDefault()
saveAs(custom_blob, `${new Date().getTime()}.xlsx`);
}
/*
render editable table to preview (for SO convenience)
*/
const html_string_default = XLSX.utils.sheet_to_html(ws_default_header, {
id: "data-table",
editable: true
});
const html_string_custom = XLSX.utils.sheet_to_html(ws_custom_header, {
id: "data-table",
editable: true
});
document.getElementById("container").innerHTML = html_string_default;
document.getElementById("container-2").innerHTML = html_string_custom;
<script src="https://cdnjs.cloudflare.com/ajax/libs/xlsx/0.14.3/xlsx.full.min.js"></script>
<head>
<title>Excel file generation from JSON</title>
<meta charset="utf-8" />
<style>
.xport,
.btn {
display: inline;
text-align: center;
}
a {
text-decoration: none
}
#data-table,
#data-table th,
#data-table td {
border: 1px solid black
}
</style>
</head>
<script>
function render(type, fn, dl) {
var elt = document.getElementById('data-table');
var wb = XLSX.utils.table_to_book(elt, {
sheet: "Sheet JS"
});
return dl ?
XLSX.write(wb, {
bookType: type,
bookSST: true,
type: 'array'
}) :
XLSX.writeFile(wb, fn || ('SheetJSTableExport.' + (type || 'xlsx')));
}
</script>
<div>Default Header</div>
<div id="container"></div>
<br/>
<div>Custom Header</div>
<div id="container-2"></div>
<br/>
<table id="xport"></table>
<button type="button" id="dl-def">Download Default Header Config</button>
<button type="button" id="dl-cus">Download Custom Header Config</button>
<script src="https://cdnjs.cloudflare.com/ajax/libs/FileSaver.js/1.3.8/FileSaver.min.js"></script>
I wrote a function to iterate all object in the results array and create new flattened objects recursively. The flattenObject function shown here is similar to the previous answer and I took additional inspiration from this related answer.
The '_id' properties are specifically excluded from being added to the flattened object, since ObjectIds are still being returned as bson types even though I have the lean() option set.
I still need to figure out how to sort the objects such that they are in the order given e.g. RecordID, RecordType, Items.Title. I believe that might be easiest to achieve by creating a separate function to iterate the flattened results, although not necessarily the most performant. Let me know if anyone has any suggestions on how to achieve the object sorting by a given order or has any improvements to the solution.
const apiCtrl = {};
/**
* Async array iterator
*/
apiCtrl.asyncForEach = async (array, callback) => {
for (let index = 0; index < array.length; index++) {
await callback(array[index], index, array)
}
}
// Check if a value is an object
const isObject = (val) => {
return typeof val == 'object' && val instanceof Object && !(val instanceof Array);
}
// Check if a value is a date object
const isDateObject = (val) => {
return Object.prototype.toString.call(val) === '[object Date]';
}
/**
* Iterate object properties recursively and flatten all values to top level properties
* #param {object} obj Object to flatten
* #param {string} prefix A string to hold the property name
* #param {string} res A temp object to store the current iteration
* Return a new object with all properties on the top level only
*
*/
const flattenObject = (obj, prefix = '', res = {}) =>
Object.entries(obj).reduce((acc, [key, val]) => {
const k = `${prefix}${key}`
// Skip _ids since they are returned as bson values
if (k.indexOf('_id') === -1) {
// Check if value is an object
if (isObject(val) && !isDateObject(val)) {
flattenObject(val, `${k}.`, acc)
// Check if value is an array
} else if (Array.isArray(val)) {
// Iterate each array value and call function recursively
val.map(element => {
flattenObject(element, `${k}.`, acc);
});
// If value is not an object or an array
} else if (val !== null & val !== 'undefined') {
// Check if property has a value already
if (res[k]) {
// Check for duplicate values
if (typeof res[k] === 'string' && res[k].indexOf(val) === -1) {
// Append value with a separator character at the beginning
res[k] += '; ' + val;
}
} else {
// Set value
res[k] = val;
}
}
}
return acc;
}, res);
/**
* Convert DB query results to an array of flattened objects
* Required to build a format that is exportable to csv, xlsx, etc.
* #param {array} results Results of DB query
* Return a new array of objects with all properties on the top level only
*/
apiCtrl.buildExportColumns = async (results) => {
const data = results.data;
let exportColumns = [];
if (data && data.length > 0) {
try {
// Iterate all records in results data array
await apiCtrl.asyncForEach(data, async (record) => {
// Convert the multi-level object to a flattened object
const flattenedObject = flattenObject(record);
// Push flattened object to array
exportColumns.push(flattenedObject);
});
} catch (e) {
console.error(e);
}
}
return exportColumns;
}

Multiple insertion with addition data with pg-promise

I have a large dataset that I want to insert into a postgres db, I can achieve this using pg-promise like this
function batchUpload (req, res, next) {
var data = req.body.data;
var cs = pgp.helpers.ColumnSet(['firstname', 'lastname', 'email'], { table: 'customer' });
var query = pgp.helpers.insert(data, cs);
db.none(query)
.then(data => {
// success;
})
.catch(error => {
// error;
return next(error);
});
}
The dataset is an array of objects like this:
[
{
firstname : 'Lola',
lastname : 'Solo',
email: 'mail#solo.com',
},
{
firstname : 'hello',
lastname : 'world',
email: 'mail#example.com',
},
{
firstname : 'mami',
lastname : 'water',
email: 'mami#example.com',
}
]
The challenge is I have a column added_at which isn't included in the dataset and cannot be null. How do I add a timestamp for each record insertion to the query.
As per the ColumnConfig syntax:
const col = {
name: 'added_at',
def: () => new Date() // default to the current Date/Time
};
const cs = pgp.helpers.ColumnSet(['firstname', 'lastname', 'email', col], { table: 'customer' });
Alternatively, you can define it in a number of other ways, as ColumnConfig is very flexible.
Example:
const col = {
name: 'added_at',
mod: ':raw', // use raw-text modifier, to inject the string directly
def: 'now()' // use now() for the column
};
or you can use property init to set the value dynamically:
const col = {
name: 'added_at',
mod: ':raw', // use raw-text modifier, to inject the string directly
init: () => {
return 'now()';
}
};
See the ColumnConfig syntax for details.
P.S. I'm the author of pg-promise.

Omiting column names / inserting objects directly into node-postgres

I'd like to pass dictionaries with column names as keys, thus avoiding declaring the column names within the query itself (typing them directly).
Assume I have a table User with 2 column names:
idUser(INT)
fullName(VARCHAR)
To create a record using node-postgres, I'll need to declare within the query the column names like so:
var idUser = 2;
var fullName = "John Doe";
var query = 'INSERT INTO User(idUser, age) VALUES ($1, $2)';
database.query(query, [idUser, fullName], function(error, result) {
callback(error, result.rows);
database.end();
});
I'd prefer if there was a way to just pass a dictionary & have it infer the column names from the keys - If there's an easy trick I'd like to hear it.
E.g something like this:
var values = {
idUser : 2,
fullName: "John Doe"
};
var query = 'INSERT INTO User VALUES ($1)';
database.query(query, [values], function(error, result) {
callback(error, result.rows);
database.end();
});
A complete example of doing it with pg-promise:
const pgp = require('pg-promise')(/*options*/);
const cn = 'postgres://username:password#host:port/database';
const db = pgp(cn);
const values = {
idUser: 2,
fullName: 'John Doe'
};
// generating the insert query:
const query = pgp.helpers.insert(values, null, 'User');
//=> INSERT INTO "User"("idUser","fullName") VALUES(2,'John Doe')
db.none(query)
.then(data => {
// success;
})
.catch(error => {
// error;
});
And with focus on high performance it would change to this:
// generating a set of columns from the object (only once):
const cs = new pgp.helpers.ColumnSet(values, {table: 'User'});
// generating the insert query:
const query = pgp.helpers.insert(values, cs);
//=> INSERT INTO "User"("idUser","fullName") VALUES(2,'John Doe')
There's no support for key-value values in the insert statement, so it can not be done with native sql.
However, the node-postgres extras page mentions multiple sql generation tools, and for example Squel.js parameters can be used to construct sql in a way very close like what you're looking for:
squel.insert()
.into("User")
.setFieldsRows([
{ idUser: 2, fullName: "John Doe" }
])
.toParam()
// => { text: 'INSERT INTO User (idUser, fullName) VALUES (?, ?)',
// values: [ 2, 'John Doe' ] }
My case was a bit special as I had a field named order in the JSON object which is a keyword in SQL. Therefore I had to wrap everything in quotes using a JSONify() function.
Also note the numberedParameters argument as well as the double quotes around the 'Messages' string.
import { pool } from './connection';
function JSONify(obj: Map<string, any>) {
var o = {};
for (var i in obj) {
o['"' + i + '"'] = obj[i]; // make the quotes
}
return o;
}
// I have a table named "Messages" with the columns order and name
// I also supply the createdAt and updatedAt timestamps just in case
const messages = [
{
order: 0,
name: 'Message with index 0',
createdAt: new Date().toISOString(),
updatedAt: new Date().toISOString(),
}
]
// Create the insert statement
const insertStatement = insert({ numberedParameters: true })
.into('"Messages"')
.setFieldsRows(messages.map((message) => JSONify(message)))
.toParam();
console.log(insertStatement);
// Notice the quotes wrapping the table and column names
// => { text: 'INSERT INTO "Messages" ("order", "name", "createdAt", "updatedAt") VALUES ($1, $2, $3, $4)',
// values: [ 0, 'Message with index 0', '2022-07-22T13:51:27.679Z', '2022-07-22T13:51:27.679Z' ] }
// Create
await pool.query(insertStatement.text, insertStatement.values);
See the Squel documentation for more details.
And this is how I create the pool object if anyone is curious.
import { Pool } from 'pg';
import { DB_CONFIG } from './config';
export const pool = new Pool({
user: DB_CONFIG[process.env.NODE_ENV].username,
host: DB_CONFIG[process.env.NODE_ENV].host,
database: DB_CONFIG[process.env.NODE_ENV].database,
password: DB_CONFIG[process.env.NODE_ENV].password,
port: DB_CONFIG[process.env.NODE_ENV].port,
});

Resources