Good way to handle Q Promises in Waterline with Sails.js - node.js

I have a problem that I'm importing some data and each new row depends on previous row being added (since each row has its order attribute set based on current maximum order from other objects). The flow is that I first try to find object with the same name, if not found I first check maximum order and create new object with order + 1 from that query.
I tried doing this with Q promises which are available under Waterline. I tried using all method as well as combining queries with then from Q docs:
var result = Q(initialVal);
funcs.forEach(function (f) {
result = result.then(f);
});
return result;
But all objects had the same order, just like they would be executed in parallel, instead of waiting for the first chain to finish.
I finally found a solution with recurrency, but I doubt it's the best way of working with promises. Here's the code that works (+ needs some refactor and cleaning etc.), to show the rough idea:
function findOrCreateGroup(groupsQuery, index, callback) {
var groupName = groupsQuery[index];
Group.findOne({ 'name' : groupName }).then(function(group) {
if (!group) {
return Group.find().limit(1).sort('order DESC').then(function(foundGroups) {
var maxOrder = 0;
if (foundGroups.length > 0) {
maxOrder = foundGroups[0].order;
}
return Group.create({
'name' : groupName,
'order' : (maxOrder + 1)
}).then(function(g) {
dbGroups[g.name] = g;
if (index + 1 < groupsQuery.length) {
findOrCreateGroup(groupsQuery, index + 1, callback);
} else {
callback();
}
return g;
});
});
} else {
dbGroups[group.name] = group;
if (index + 1 < groupsQuery.length) {
findOrCreateGroup(groupsQuery, index + 1, callback);
} else {
callback();
}
return group;
}
});
}

Related

Why is the first function call is executed two times faster than all other sequential calls?

I have a custom JS iterator implementation and code for measuring performance of the latter implementation:
const ITERATION_END = Symbol('ITERATION_END');
const arrayIterator = (array) => {
let index = 0;
return {
hasValue: true,
next() {
if (index >= array.length) {
this.hasValue = false;
return ITERATION_END;
}
return array[index++];
},
};
};
const customIterator = (valueGetter) => {
return {
hasValue: true,
next() {
const nextValue = valueGetter();
if (nextValue === ITERATION_END) {
this.hasValue = false;
return ITERATION_END;
}
return nextValue;
},
};
};
const map = (iterator, selector) => customIterator(() => {
const value = iterator.next();
return value === ITERATION_END ? value : selector(value);
});
const filter = (iterator, predicate) => customIterator(() => {
if (!iterator.hasValue) {
return ITERATION_END;
}
let currentValue = iterator.next();
while (iterator.hasValue && currentValue !== ITERATION_END && !predicate(currentValue)) {
currentValue = iterator.next();
}
return currentValue;
});
const toArray = (iterator) => {
const array = [];
while (iterator.hasValue) {
const value = iterator.next();
if (value !== ITERATION_END) {
array.push(value);
}
}
return array;
};
const test = (fn, iterations) => {
const times = [];
for (let i = 0; i < iterations; i++) {
const start = performance.now();
fn();
times.push(performance.now() - start);
}
console.log(times);
console.log(times.reduce((sum, x) => sum + x, 0) / times.length);
}
const createData = () => Array.from({ length: 9000000 }, (_, i) => i + 1);
const testIterator = (data) => () => toArray(map(filter(arrayIterator(data), x => x % 2 === 0), x => x * 2))
test(testIterator(createData()), 10);
The output of the test function is very weird and unexpected - the first test run is constantly executed two times faster than all the other runs. One of the results, where the array contains all execution times and the number is the mean (I ran it on Node):
[
147.9088459983468,
396.3472499996424,
374.82447600364685,
367.74555300176144,
363.6300039961934,
362.44370299577713,
363.8418449983001,
390.86111199855804,
360.23125199973583,
358.4788999930024
]
348.6312940984964
Similar results can be observed using Deno runtime, however I could not reproduce this behaviour on other JS engines. What can be the reason behind it on the V8?
Environment:
Node v13.8.0, V8 v7.9.317.25-node.28,
Deno v1.3.3, V8 v8.6.334
(V8 developer here.) In short: it's inlining, or lack thereof, as decided by engine heuristics.
For an optimizing compiler, inlining a called function can have significant benefits (e.g.: avoids the call overhead, sometimes makes constant folding possible, or elimination of duplicate computations, sometimes even creates new opportunities for additional inlining), but comes at a cost: it makes the compilation itself slower, and it increases the risk of having to throw away the optimized code ("deoptimize") later due to some assumption that turns out not to hold. Inlining nothing would waste performance, inlining everything would waste performance, inlining exactly the right functions would require being able to predict the future behavior of the program, which is obviously impossible. So compilers use heuristics.
V8's optimizing compiler currently has a heuristic to inline functions only if it was always the same function that was called at a particular place. In this case, that's the case for the first iterations. Subsequent iterations then create new closures as callbacks, which from V8's point of view are new functions, so they don't get inlined. (V8 actually knows some advanced tricks that allow it to de-duplicate function instances coming from the same source in some cases and inline them anyway; but in this case those are not applicable [I'm not sure why]).
So in the first iteration, everything (including x => x % 2 === 0 and x => x * 2) gets inlined into toArray. From the second iteration onwards, that's no longer the case, and instead the generated code performs actual function calls.
That's probably fine; I would guess that in most real applications, the difference is barely measurable. (Reduced test cases tend to make such differences stand out more; but changing the design of a larger app based on observations made on a small test is often not the most impactful way to spend your time, and at worst can make things worse.)
Also, hand-optimizing code for engines/compilers is a difficult balance. I would generally recommend not to do that (because engines improve over time, and it really is their job to make your code fast); on the other hand, there clearly is more efficient code and less efficient code, and for maximum overall efficiency, everyone involved needs to do their part, i.e. you might as well make the engine's job simpler when you can.
If you do want to fine-tune performance of this, you can do so by separating code and data, thereby making sure that always the same functions get called. For example like this modified version of your code:
const ITERATION_END = Symbol('ITERATION_END');
class ArrayIterator {
constructor(array) {
this.array = array;
this.index = 0;
}
next() {
if (this.index >= this.array.length) return ITERATION_END;
return this.array[this.index++];
}
}
function arrayIterator(array) {
return new ArrayIterator(array);
}
class MapIterator {
constructor(source, modifier) {
this.source = source;
this.modifier = modifier;
}
next() {
const value = this.source.next();
return value === ITERATION_END ? value : this.modifier(value);
}
}
function map(iterator, selector) {
return new MapIterator(iterator, selector);
}
class FilterIterator {
constructor(source, predicate) {
this.source = source;
this.predicate = predicate;
}
next() {
let value = this.source.next();
while (value !== ITERATION_END && !this.predicate(value)) {
value = this.source.next();
}
return value;
}
}
function filter(iterator, predicate) {
return new FilterIterator(iterator, predicate);
}
function toArray(iterator) {
const array = [];
let value;
while ((value = iterator.next()) !== ITERATION_END) {
array.push(value);
}
return array;
}
function test(fn, iterations) {
for (let i = 0; i < iterations; i++) {
const start = performance.now();
fn();
console.log(performance.now() - start);
}
}
function createData() {
return Array.from({ length: 9000000 }, (_, i) => i + 1);
};
function even(x) { return x % 2 === 0; }
function double(x) { return x * 2; }
function testIterator(data) {
return function main() {
return toArray(map(filter(arrayIterator(data), even), double));
};
}
test(testIterator(createData()), 10);
Observe how there are no more dynamically created functions on the hot path, and the "public interface" (i.e. the way arrayIterator, map, filter, and toArray compose) is exactly the same as before, only under-the-hood details have changed. A benefit of giving all functions names is that you get more useful profiling output ;-)
Astute readers will notice that this modification only shifts the issue away: if you have several places in your code that call map and filter with different modifiers/predicates, then the inlineability issue will come up again. As I said above: microbenchmarks tend to be misleading, as real apps typically have different behavior...
(FWIW, this is pretty much the same effect as at Why is the execution time of this function call changing? .)
Just to add to this investigation, I compared the OP's original code with the predicate and selector functions declared as separate functions as suggested by jmrk to two other implementations. So, this code has three implementations:
OP's code with predicate and selector functions declared separately as named functions (not inline).
Using standard array.map() and .filter() (which you would think would be slower because of the extra creation of intermediate arrays)
Using a custom iteration that does both filtering and mapping in one iteration
The OP's attempt at saving time and making things faster is actually the slowest (on average). The custom iteration is the fastest.
I guess the lesson here is that it's not necessarily intuitive how you make things faster with the optimizing compiler so if you're tuning performance, you have to measure against the "typical" way of doing things (which may benefit from the most optimizations).
Also, note that in the method #3, the first two iterations are the slowest and then it gets faster - the opposite effect from the original code. Go figure.
The results are here:
[
99.90320014953613,
253.79690098762512,
271.3091011047363,
247.94990015029907,
247.457200050354,
261.9487009048462,
252.95090007781982,
250.8520998954773,
270.42809987068176,
249.340900182724
]
240.59370033740998
[
222.14270091056824,
220.48679995536804,
224.24630093574524,
237.07260012626648,
218.47070002555847,
218.1493010520935,
221.50559997558594,
223.3587999343872,
231.1618001461029,
243.55419993400574
]
226.01488029956818
[
147.81360006332397,
144.57479882240295,
73.13350009918213,
79.41700005531311,
77.38950109481812,
78.40880012512207,
112.31539988517761,
80.87990117073059,
76.7899010181427,
79.79679894447327
]
95.05192012786866
The code is here:
const { performance } = require('perf_hooks');
const ITERATION_END = Symbol('ITERATION_END');
const arrayIterator = (array) => {
let index = 0;
return {
hasValue: true,
next() {
if (index >= array.length) {
this.hasValue = false;
return ITERATION_END;
}
return array[index++];
},
};
};
const customIterator = (valueGetter) => {
return {
hasValue: true,
next() {
const nextValue = valueGetter();
if (nextValue === ITERATION_END) {
this.hasValue = false;
return ITERATION_END;
}
return nextValue;
},
};
};
const map = (iterator, selector) => customIterator(() => {
const value = iterator.next();
return value === ITERATION_END ? value : selector(value);
});
const filter = (iterator, predicate) => customIterator(() => {
if (!iterator.hasValue) {
return ITERATION_END;
}
let currentValue = iterator.next();
while (iterator.hasValue && currentValue !== ITERATION_END && !predicate(currentValue)) {
currentValue = iterator.next();
}
return currentValue;
});
const toArray = (iterator) => {
const array = [];
while (iterator.hasValue) {
const value = iterator.next();
if (value !== ITERATION_END) {
array.push(value);
}
}
return array;
};
const test = (fn, iterations) => {
const times = [];
let result;
for (let i = 0; i < iterations; i++) {
const start = performance.now();
result = fn();
times.push(performance.now() - start);
}
console.log(times);
console.log(times.reduce((sum, x) => sum + x, 0) / times.length);
return result;
}
const createData = () => Array.from({ length: 9000000 }, (_, i) => i + 1);
const cache = createData();
const comp1 = x => x % 2 === 0;
const comp2 = x => x * 2;
const testIterator = (data) => () => toArray(map(filter(arrayIterator(data), comp1), comp2))
// regular array filter and map
const testIterator2 = (data) => () => data.filter(comp1).map(comp2);
// combine filter and map in same operation
const testIterator3 = (data) => () => {
let result = [];
for (let value of data) {
if (comp1(value)) {
result.push(comp2(value));
}
}
return result;
}
const a = test(testIterator(cache), 10);
const b = test(testIterator2(cache), 10);
const c = test(testIterator3(cache), 10);
function compareArrays(a1, a2) {
if (a1.length !== a2.length) return false;
for (let [i, val] of a1.entries()) {
if (a2[i] !== val) return false;
}
return true;
}
console.log(a.length);
console.log(compareArrays(a, b));
console.log(compareArrays(a, c));

Prevent nested lists in text-editor (froala)

I need to prevent/disable nested lists in text editor implemented in Angular. So far i wrote a hack that undos a nested list when created by the user. But if the user creates a normal list and presses the tab-key the list is shown as nested for a few milliseconds until my hack sets in back to a normal list. I need something like event.preventDefault() or stopPropagation() on tab-event keydown but unfortunately that event is not tracked for some reason. Also the froala settings with tabSpaces: falseis not showing any difference when it comes to nested list...in summary i want is: if the user creates a list and presses the tab-key that nothing happens, not even for a millisecond. Has anyone an idea about that?
Froala’s support told us, there’s no built-in way to suppress nested list creation. They result from TAB key getting hit with the caret on a list item. However we found a way to get around this using MutationObserver
Basically we move the now nested list item to his former sibling and remove the newly created list. Finally we take care of the caret position.
var observer = new MutationObserver(mutationObserverCallback);
observer.observe(editorNode, {
childList: true,
subtree: true
});
var mutationObserverCallback = function (mutationList) {
var setCaret = function (ele) {
if (ele.nextSibling) {
ele = ele.nextSibling;
}
var range = document.createRange();
var sel = window.getSelection();
range.setStart(ele, 0);
range.collapse(true);
sel.removeAllRanges();
sel.addRange(range);
};
var handleAddedListNode = function (listNode) {
if (! listNode.parentNode) {
return;
}
var parentListItem = listNode.parentNode.closest('li');
if (!parentListItem) {
return;
}
var idx = listNode.children.length - 1;
while (idx >= 0) {
var childNode = listNode.children[idx];
if (parentListItem.nextSibling) {
parentListItem.parentNode.insertBefore(childNode, parentListItem.nextSibling);
} else {
parentListItem.parentNode.appendChild(childNode);
}
--idx;
}
setCaret(parentListItem);
listNode.parentNode.removeChild(listNode);
};
mutationList.forEach(function (mutation) {
var addedNodes = mutation.addedNodes;
if (!addedNodes.length) {
return;
}
for (var i = 0; i < addedNodes.length; i++) {
var currentNode = addedNodes[i];
switch (currentNode.nodeName.toLowerCase()) {
case 'ol':
case 'ul':
handleAddedListNode(currentNode);
break;
// more optimizations
}
}
})
};

How to control the number of Oracle's request in Node.js?

I'm trying to do the following using Node.js and the 'oracledb' node:
Consult Database A to get all bills released on a specific date.
Assign the result to a variable list.
Use the function .map() on list, and inside this function consult Database B to get client's info by a common key, for each item of list.
The problem is: the Database B requests are done all together, so if there's 1000 bills to map, it returns only 100 and treats the rest as error. It is probably related to the number of requests at the same time.
So, given the details, I'd like to know if there's a way to divide the number of requests (e.g. 100 at the time), or any other solution.
ps.: i apologize in advance for my mistakes. I also apologize for not demonstrate on code.
Here's an example of how you can do this by leveraging the new executeMany in v2.2.0 (recently released) and global temporary tables to minimize round trips.
Given these objects:
-- Imagine this is the table you want to select from based on the common keys
create table t (
common_key number,
info varchar2(50)
);
-- Add 10,000 rows with keys 1-10,000 and random data for info
insert into t (common_key, info)
select rownum,
dbms_random.string('p', 50)
from dual
connect by rownum <= 10000;
commit;
-- Create a temp table
create global temporary table temp_t (
common_key number not null
)
on commit delete rows;
The following should work:
const oracledb = require('oracledb');
const config = require('./dbConfig.js');
const startKey = 1000;
const length = 2000;
// Uses a promise to simulate async work.
function getListFromDatabaseA() {
return new Promise((resolve) => {
const list = [];
const count = length - startKey;
for (let x = 0; x < count; x += 1) {
list.push(startKey + x);
}
resolve(list);
});
}
// The list returned from A likely isn't in the right format for executeMany.
function reformatAsBinds(list) {
const binds = [];
for (let x = 0; x < list.length; x += 1) {
binds.push({
key: list[x]
});
}
return binds;
}
async function runTest() {
let conn;
try {
const listFromA = await getListFromDatabaseA();
const binds = reformatAsBinds(listFromA);
conn = await oracledb.getConnection(config);
// Send the keys to the temp table with executeMany for a single round trip.
// The data in the temp table will only be visible to this session and will
// be deleted automatically at the end of the transaction.
await conn.executeMany('insert into temp_t (common_key) values (:key)', binds);
// Now get your common_key and info based on the common keys in the temp table.
let result = await conn.execute(
`select common_key, info
from t
where common_key in (
select common_key
from temp_t
)
order by common_key`
);
console.log('Got ' + result.rows.length + ' rows');
console.log('Showing the first 10 rows');
for (let x = 0; x < 10; x += 1) {
console.log(result.rows[x]);
}
} catch (err) {
console.error(err);
} finally {
if (conn) {
try {
await conn.close();
} catch (err) {
console.error(err);
}
}
}
}
runTest();
After I posted the solution above, I thought I should provide an alternative that keeps the keys going from Node.js to the DB "in memory". You'll have to run tests and review explain plans to see which is the best option for you (will depend on a number of factors).
Given these objects:
-- This is the same as before
create table t (
common_key number,
info varchar2(50)
);
-- Add 10,000 rows with keys 1-10,000 and random data for info
insert into t (common_key, info)
select rownum,
dbms_random.string('p', 50)
from dual
connect by rownum <= 10000;
-- But here, we use a nested table instead of a temp table
create or replace type number_ntt as table of number;
This should work:
const oracledb = require('oracledb');
const config = require('./dbConfig.js');
const startKey = 1000;
const length = 2000;
// Uses a promise to simulate async work.
function getListFromDatabaseA() {
return new Promise((resolve) => {
const list = [];
const count = length - startKey;
for (let x = 0; x < count; x += 1) {
list.push(startKey + x);
}
resolve(list);
});
}
async function runTest() {
let conn;
try {
const listFromA = await getListFromDatabaseA();
const binds = {
keys: {
type: oracledb.NUMBER,
dir: oracledb.BIND_IN,
val: listFromA
},
rs: {
type: oracledb.CURSOR,
dir: oracledb.BIND_OUT
}
};
conn = await oracledb.getConnection(config);
// Now get your common_key and info based on what's in the temp table.
let result = await conn.execute(
`declare
type number_aat is table of number index by pls_integer;
l_keys number_aat;
l_key_tbl number_ntt := number_ntt();
begin
-- Unfortunately, we have to bind in with this data type, but
-- it can't be used as a table...
l_keys := :keys;
-- So we'll transfer the data to another array type that can. This
-- variable's type was created at the schema level so that it could
-- be seen by the SQL engine.
for x in 1 .. l_keys.count
loop
l_key_tbl.extend();
l_key_tbl(l_key_tbl.count) := l_keys(x);
end loop;
open :rs for
select common_key, info
from t
where common_key in (
select column_value
from table(l_key_tbl)
)
order by common_key;
end;`,
binds
);
const resultSet = result.outBinds.rs;
console.log('Showing the first 10 rows');
for (x = 0; x < 10; x += 1) {
let row = await resultSet.getRow();
console.log(row);
}
} catch (err) {
console.error(err);
} finally {
if (conn) {
try {
await conn.close();
} catch (err) {
console.error(err);
}
}
}
}
runTest();
The formatting for the binds was different (a bit simpler here). Also, because I was executing PL/SQL, I needed to have an out bind cursor/result set type.
See this post regarding cardinality with nested tables:
http://www.oracle-developer.net/display.php?id=427
If you try both, please leave some feedback about which worked better.

Need to execute function when forEach functions ends

I have this code in node js / firebase :
ref.child("recipts").once("value", function(usersSnap) {
usersSnap.forEach(function(reciptsSnap) {
reciptsSnap.forEach(function(reciptSnap) {
reciptSnap.ref.child("last_recipt").once("value", function(b) {
b.forEach(function(c) { //Here I fill some "product" object
});
});
reciptSnap.forEach(function(b) { //Here I fill some "product" object
});
});
});
});
I need to execute a function just when "reciptSnap" forEachs finished. How can I accomplish this, I try using a variable i++ and i-- but only work for one forEach iteration.
The function I call is for manipulating the product object I created with the filled data from the forEachs loops.
If I have understood correctly, you want to call a function when reciptsSnap.forEach is complete and all async tasks inside it are also complete.
For achieving this, you can use the index parameter and the original array that is passed to the callback function of forEach. (See Documentation)
The code will be like this:
(Note: The following code is without changing the current forEach loop structure used. However, re-writing the code with Promise or async would be a better & cleaner way to do it).
var loop1Done = false;
var loop2Done = false;
ref.child("recipts").once("value", function (usersSnap) {
usersSnap.forEach(function (reciptsSnap) {
reciptsSnap.forEach(function (reciptSnap, index, colA) {
const idx = index;
const col = colA;
reciptSnap.ref.child("last_recipt").once("value", function (b) {
const i = idx;
const c = col;
b.forEach(function (c, j, colB) { //Here I fill some "product" object
// Do what you want here
// Check if all done for this loop
if ((j >= colB.length) && (i >= c.length)) {
loop1Done = true;
// Check if all loops done
if (loop1Done && loop2Done) {
// Call final callback function
// e.g. myFinalCallback();
}
}
});
});
reciptSnap.forEach(function (b, k, colC) { //Here I fill some "product" object
const i = idx;
const c = col;
// Do what you want here
// Check if all done for this loop
if ((k >= colC.length) && (i >= c.length)) {
loop2Done = true;
// Check if all loops done
if (loop1Done && loop2Done) {
// Call final callback function
// e.g. myFinalCallback();
}
}
});
});
});
});
Try:
reciptSnap.child("last_recipt").forEach(function(b) {
b.forEach(function(c) {
//Here I fill some "product" object
});
});
This should work since all of your data should already have been fetched when you did "value" on the receipts node.
If this works, your code is no longer asynchronous and right after the last forEach, you can execute the function you wanted to.
reciptSnap.forEach(function(b) {
//Here I fill some "product" object
});
//Execute your function here
});

Mongoose default sorting order

Is there a way to specify sorting order on the schema/model level in Mongoose?
I have model Posts, and I always fetch posts ordered by 'createdAt' field. Thus on each query I have to write .sort('-createdAt'). Can I make this order default for this model?
There is no way, in Mongoose, directly to define a default sort order on your query.
If you're doing something over and over again though, you might want to abstract this into a function that does it for you:
function findPostsByDate(cb){
Posts.find({}).sort('-createdAt').exec(cb);
}
Or even something more generic than that:
function findXByDate(model, findCriteria, cb){
model.find(findCriteria).sort('-createdAt').exec(cb);
}
You can achieve this by creating a static method in your schema definition.
Mongoose documentation for Methods and statics here: http://mongoosejs.com/docs/2.7.x/docs/methods-statics.html
Example
In your schema file:
PostSchema.statics.sortedFind = function sortedFind(query, fields, options cb){
//First 3 parameters are optional
if( arguments.length === 1){
cb = query;
} else if (arguments.length === 2) {
cb = fields;
} else if(arguments.length === 3){
cb = options;
}
this.find(query, fields, options).sort('-createdAt').exec(cb);
}
Then you can use:
var query = {user_id: currentUser.id}; // query example, modify according to your needs
Post.sortedFind(query, function(err, response){ /* Your code goes here */ });
This is how I enforce sortable columns and provide a default sort. I copy this code into each model and just supply the allowSortOn array.
postSchema.pre('find', function (){
if (typeof this.options.sort !== 'undefined') {
var allowSortOn = ["_id","createdAt"] // add other allowable sort columns here
, propCount = 0;
for (var prop in this.options.sort)
if (this.options.sort.hasOwnProperty(prop)) {
if (allowSortOn.indexOf(prop) === -1) {
console.log('Invalid sort column ' + prop);
delete this.options.sort[prop];
} else {
propCount++;
}
}
if (propCount === 0) {
this.options.sort[allowSortOn[1]] = 1;
console.log('Setting sort column to ' + JSON.stringify(this.options.sort));
}
}
})

Resources