set delay dynamic on retryWhen - node.js

Is it possible to set the delay value dynamic after every retry. I tried it like this but it looks lite it keeps the value which is set initial.
imageController(epgData: EpgDataDTO[], showOrMovie: string){
var retryAfterMilliSeconds = 1000;
epgData.forEach( (data) => {
this.getImagesFromMovieDB(data.title).pipe(
retryWhen((error) => {
return error.pipe(
mergeMap((error: any) => {
if(error.response.status === 429) {
const retryAfter = error.response.headers;
retryAfterMilliSeconds = +retryAfter['retry-after'] * 1000
console.log(retryAfterMilliSeconds); // it tells me the correct value here but the retry happens every 1000ms
console.log(data.title);
}else{
this.errorHandling(error)
return of("error");
}
return of("error");
}),
delay(retryAfterMilliSeconds),
take(5)
)
}))
.subscribe( (res) => {
console.log(res.status);
console.log(res.headers);
});
})
}

You were VERY close! To get this to work, all I had to do was move the delay(retryAfterMilliSeconds) to after the return value of the mergeMap() operator to tie it to the same observable. Without this it would randomly delay the RETURN from mergeMap() which would be random for which Observable was actually delayed.
I put this up in a Stackblitz to test it. Click on 'Console' at the bottom of the far right frame to see results.
Here is the function from that StackBlitz:
imageController(epgData: EpgDataDTO[], showOrMovie: string){
var retryAfterMilliSeconds = 1000;
epgData.forEach( (data) => {
this.getImagesFromMovieDB(data.title).pipe(
retryWhen((error) => {
return error.pipe(
mergeMap((error: any) => {
if(error.response.status === 429) {
const retryAfter = error.response.headers;
retryAfterMilliSeconds = +retryAfter['retry-after'] * 1000
console.log(retryAfterMilliSeconds); // it tells me the correct value here but the retry happens every 1000ms
console.log(data.title);
}else{
this.errorHandling(error)
// return of("error"); <-- unnecessary since this will be executed with next statement
}
return of("error").pipe(delay(retryAfterMilliSeconds));
}),
// delay(retryAfterMilliSeconds),
take(5)
)
}))
.subscribe(
(res) => {
// console.log(res.status);
// console.log(res.headers);
const elapsedTime = Math.round(((new Date()).getTime() - startTime) / 1000);
console.log(`'${res.status}' is Ok - total elapsed time ${elapsedTime} seconds`);
}
);
})
}
Some other notes:
The return from getImagesFromMovieDB() is actually important - it needs to return a unique observable for every call for this to work, please ensure this is the case. I simulated this in the StackBlitz by constructing the Observable return with delay.
As you can see I changed the first function inside the .subscribe() to print out the total elapsed time taken to get valid data for this res.status. I did this only to show for each emission that it correctly takes the sum of all delays.
It is retried after every failure for a random amount of time (I arbitrarily chose between 5 and 10 seconds), as returned by the response header in your original function.
Minor point: you have two returns from mergeMap return of("error") but the first is unnecessary since the second will be immediately executed, so I commented that one out.
I hope this helps.

Related

If any Promise is rejected, how do you count the number of success in Promise.all?

I'm currently trying to figure out if it is possible to do something like this:
async function(x) {
...
}
try {
await Promise.all([function(1), function(2), function(3), ...]);
} catch (err) {
// Count number of successful Promises resolved at the point when one is rejected
return statistics(num_success);
}
Is this possible? The reason I am trying to do this in the catch block is so that I can terminate the function immediately if any errors.
If it is possible to do so, how can I achieve this?
If you'd like all of your promises to run until they're settled (fulfilled or rejected), you can do so with Promise.allSettled as in Raphael PICCOLO's comment and SrHenry's answer.
However, to preserve Promise.all's behavior of reacting to a rejection immediately, you'll need some additional custom behavior. This sounds like a good reason to wrap Promse.all.
/**
* Receives an array, like Promise.all, and runs until the first rejection.
*
* Unlike Promise.all, but like Promise.allSettled, this will always resolve
* to an object; however, like Promise.all, this will resolve as soon as it
* encounters the first rejection.
*
* Returns a promise that resolves to an object with these properties:
* success: boolean, whether Promise.all would have succeeded
* count: number, number of resolved Promises at the time of return
* results: Array, result array containing all resolved Promises/values
* error: any, the reject value that caused this to fail, or null
*/
function allWithProgress(arrayOfPromises) {
const results = new Array(arrayOfPromises);
let count = 0;
/**
* Given an input to Promise.resolve, increments results+count
* when complete.
*/
function wrap(valueOrThenable, index) {
return Promise.resolve(valueOrThenable).then(x => {
results[index] = x;
count++;
return x;
});
}
// slice(0) prevents the results array from being modified.
// You could also add a condition check that prevents `wrap` from
// modifying the results after it returns.
return Promise
.all(arrayOfPromises.map(wrap)) // or "(e, i) => wrap(e, i)"
.then(x => ({success: true, count, results: results.slice(0), error: null}))
.catch(e => ({success: false, count, results: results.slice(0), error: e}));
}
// Test harness below
function timeoutPromise(string, timeoutMs) {
console.log("Promise created: " + string + " - " + timeoutMs + "ms");
return new Promise(function(resolve, reject) {
window.setTimeout(function() {
console.log("Promise resolved: " + string + " - " + timeoutMs + "ms");
resolve(string);
}, timeoutMs);
});
}
Promise.resolve().then(() => {
// success
return allWithProgress([
timeoutPromise("s1", 1000),
timeoutPromise("s2", 2000),
timeoutPromise("s3", 3000),
"not a promise"
]).then(console.log);
}).then(() => {
// failure
return allWithProgress([
timeoutPromise("f1", 1000),
timeoutPromise("f2", 2000)
// rejects with a String for Stack Snippets; use an Error in real code
.then(() => Promise.reject("f2 failed")),
timeoutPromise("f3", 3000),
"not a promise"
]).then(console.log);
});
Note that ES6 Promises can't be canceled or rolled back in any standard way, so in the test case f3 is not prevented from completing. If it is important to stop those promises in flight, you'll need to write that logic yourself.
as Raphael PICCOLO commened, you can use Promise.allSettled<T = any>(...promises: Promise<T>[]). It returns a promise of array containing objects with status property, assigned with "fulfilled" or "rejected", according to each promise fulfillness or rejectness. A value property will be available if promise has fulfilled and, in case it rejects, a reason property will be available instead.
Example code:
//...
const promises = []; //Array with promises.
let count = 0;
Promise.allSettled(promises)
.then(settled => {
settled.forEach(({ status, value, reason }) => {
if (status === 'fulfilled')
count++;
})
})
.then(() => statistics(count));
//...

await for Lock() on stateless action

Problem:
front-end page make x parallel requests (let's call it first group),
the next group (x request) will be after 5 seconds, the first request (of the first group) set the cache from DB.
the other x-1 requests got empty array insted of wait to first request to done his job.
the second group and the all next requests got proper data from cache.
What is the best practics to lock other threads until the first done (or fail) in stateless mechanism?
EDIT:
The cache module allow use trigger of set chache but it's not work since it stateless mechanism.
const GetDataFromDB= async (req, res, next) => {
var cachedTableName = undefined;
// "lockFlag" uses to prevent parallel request to get into critical section (because its take time to set cache from db)
// to prevent that we uses "lockFlag" that is short-initiation to cache.
//
if ( !myCache.has( "lockFlag" ) && !myCache.has( "dbtable" ) ){
// here arrive first req from first group only
// the other x-1 of first group went to the nest condition
// here i would build mechanism to wait 'till first req come back from DB (init cache)
myCache.set( "lockFlag", "1" )
const connection1 = await odbc.connect(connectionConfig);
const cachedTableName = await connection1.query(`select * from ${tableName}`);
if(cachedTableName.length){
const success = myCache.set([
{key: "dbtable", val: cachedTableName, ttl: 180},
])
if(success)
{
cachedTableName = myCache.get( "dbtable" );
}
}
myCache.take("lockFlag");
connection1.close();
return res.status(200).json(cachedTableName ); // uses for first response.
}
// here comes x-1 of first group went to the nest condition and got nothing, bacause the cache not set yet
//
if ( myCache.has( "dbtable" ) ){
cachedTableName = myCache.get( "dbtable" );
}
return res.status(200).json(cachedTableName );
}
You can try the approach given here, with minor modifications to apply it for your case.
For brevity, I removed comments and shortened variables names.
Code, then explanation:
const EventEmitter = require('events');
const bus = new EventEmitter();
const getDataFromDB = async (req, res, next) => {
var table = undefined;
if (myCache.has("lockFlag")) {
await new Promise(resolve => bus.once("unlocked", resolve));
}
if (myCache.has("dbtable")) {
table = myCache.get("dbtable");
}
else {
myCache.set("lockFlag", "1");
const connection = await odbc.connect(connectionConfig);
table = await connection.query(`select * from ${tableName}`);
connection.close();
if (table.length) {
const success = myCache.set([
{ key: "dbtable", val: table, ttl: 180 },
]);
}
myCache.take("lockFlag");
bus.emit("unlocked");
}
return res.status(200).json(table);
}
This is how it should work:
At first, lockFlag is not present.
Then, some code calls getDataFromDB. That code evaluates the first if block to false, so it continues: it sets lockFlag to true ("1"), then goes on to retrieve the table data from db. In the meantime:
Some other code calls getDataFromDB. That code, however, evaluates the first if block to true, so it awaits on the promise, until an unlocked event will be emitted.
Back to the first calling code: It finishes its logic, caches the table data, sets lockFlag back to false, emits an unlocked event, and returns.
The other code can now continue its execution: it evaluates the second if to true, so it takes the table from the cache, and returns.
As workaround i add "finally" scope to remove lock-key from cache after first initiation, and this:
while(myCache.has( "lockFlag" )){
await wait(1500);
}
And the "wait" function:
function wait(milleseconds) {
return new Promise(resolve => setTimeout(resolve, milleseconds))
}
(source)
This is working, but still could be time (<1500 ms) that there is cache and the thread not aware.
I'ld happy for batter solution.

Waiting in a while loop on an async function (Node.js/ES6)

I'm writing a Windows Node.js server app (using ES6 btw).
The first thing I want to do - in the top-level code - is sit in a while loop, calling an async function which searches for a particular registry key/value. This function is 'proven' - it returns the value data if found, or else throws:
async GetRegValue(): Promise<string> { ... }
I need to sit in a while loop until the registry item exists, and then grab the value data. (With a delay between retries).
I think I know how to wait for an async call to complete (one way or the other) before progressing with the rest of the start-up, but I can't figure out how to sit in a loop waiting for it to succeed.
Any advice please on how to achieve this?
(I'm fairly new to typescript, and still struggling to get my head round all async/await scenarios!)
Thanks
EDIT
Thanks guys. I know I was 'vague' about my code - I didn't want to put my real/psuedo code attempts, since they have all probably overlooked the points you can hopefully help me understand.
So I just kept it as a textual description... I'll try though:
async GetRegValue(): Promise<string> {
const val: RegistryItem = await this.GetKeyValue(this.KEY_SW, this.VAL_CONN);
return val.value
}
private async GetKeyValue(key: string, name: string): Promise<RegistryItem> {
return await new Promise((resolve, reject) => {
new this.Registry({
hive: this.Hive, key
}).get(name, (err, items) => {
if (err) {
reject(new Error('Registry get failed'));
}
else {
resolve( items );
}
});
})
.catch(err => { throw err });
}
So I want to do something like:
let keyObtained = false
let val
while (keyObtained == false)
{
// Call GetRegValue until val returned, in which case break from loop
// If exception then pause (e.g. ~100ms), then loop again
}
}
// Don't execute here till while loop has exited
// Then use 'val' for the subsequent statements
As I say, GetRegValue() works fine in other places I use it, but here I'm trying to pause further execution (and retry) until it does come back with a value
You can probably just use recursion. Here is an example on how you can keep calling the GetRegValue function until is resolves using the retryReg function below.
If the catch case is hit, it will just call GetRegValue over and over until it resolves successfully.
you should add a counter in the catch() where if you tried x amount of times you give up.
Keep in mind I mocked the whole GetRegValue function, but given what you stated this would still work for you.
let test = 0;
function GetRegValue() {
return new Promise((resolve, reject) => {
setTimeout(function() {
test++;
if (test === 4) {
return resolve({
reg: "reg value"
});
}
reject({
msg: "not ready"
});
}, 1000);
});
}
function retryReg() {
GetRegValue()
.then(registryObj => {
console.log(`got registry obj: ${JSON.stringify(registryObj)}`)
})
.catch(fail => {
console.log(`registry object is not ready: ${JSON.stringify(fail)}`);
retryReg();
});
}
retryReg();
I don't see why you need this line:
.catch(err => { throw err });
The loop condition of while isn't much use in this case, as you don't really need a state variable or expression to determine if the loop should continue:
let val;
while (true)
{
try {
val = await GetRegValue(/* args */);
break;
} catch (x) {
console.log(x); // or something better
}
await delay(100);
}
If the assignment to val succeeds, we make it to the break; statement and so we leave the loop successfully. Otherwise we jump to the catch block and log the error, wait 100 ms and try again.
It might be better to use a for loop and so set a sensible limit on how many times to retry.
Note that delay is available in an npm package of the same name. It's roughly the same as:
await new Promise(res => setTimeout(res, 100));

How to stop rethink database call if cannot find result?

I run RethinkDB command with Node.js (babel) asynchronous call:
let user = await r.table('users').filter({key: key}).limit(1).run();
How can I stop the asynchronous call, if database cannot find result?
Using the await function means that node will wait for the asynchronous r.table(... command to return before continuing to the next line of code, meaning that it behaves logically as if it were synchronous code.
Your specific command should return when RethinkDB finds the first 'user' document with the specified key. There is no need to "stop" it if it cannot find a result, it will stop as soon as it (a) finds a result or (b) finished scanning the entire table.
In general "stopping" asynchronous code in node/javascript is not possible but you can limit the amount of time you'll wait for an async method. Here is an example using the Promise.race() function.
/*
* toy async function
*
* returns a promise that resolves to the specified number `n`
* after the specified number of seconds `s` (default 2)
*/
const later = (n, s=2) => {
return new Promise(resolve => {
setTimeout(() => resolve(n), s*1000);
})
}
/*
* returns a promise that rejects with `TIMEOUT_ERROR` after the
* specified number of seconds `s`
*/
const timeout = (s) => {
return new Promise((resolve, reject) => {
setTimeout(() => reject("TIMEOUT_ERROR"), s*1000)
})
}
/*
* Example 1: later finished before timeout
* later resolves after 1 second, timeout function rejects after 3 seconds
* so we end up in the `.then` block with `val == 42`
*/
Promise.race([later(42, 1), timeout(3)])
.then(val => {
// do somethign with val...
console.log(val)
}).catch(err => {
if (err === "TIMEOUT_ERROR") {
console.log("we timed out!")
} else {
consle.log("something failed (but it was not a timeout)")
}
});
/*
* Example 2 (using async/await syntax): we timeout before later returns.
* later resolves after 3 seconds, timeout function rejects after 2 seconds
* so we end up in the `.catch` block with `err == "TIMEOUT_ERROR"`
*/
try {
const val = await Promise.race([later(11, 3), timeout(2)]);
// do something with val...
} catch (err) {
if (err === "TIMEOUT_ERROR") {
console.error("we timed out!")
} else {
console.error("something failed (but it was not a timeout)")
}
}

Node socket.io, anything to prevent flooding?

How can I prevent someone from simply doing
while(true){client.emit('i am spammer', true)};
This sure proves to be a problem when someone has the urge to crash my node server!
Like tsrurzl said you need to implement a rate limiter (throttling sockets).
Following code example only works reliably if your socket returns a Buffer (instead of a string). The code example assumes that you will first call addRatingEntry(), and then call evalRating() immediately afterwards. Otherwise you risk a memory leak in the case where evalRating() doesn't get called at all or too late.
var rating, limit, interval;
rating = []; // rating: [*{'timestamp', 'size'}]
limit = 1048576; // limit: maximum number of bytes/characters.
interval = 1000; // interval: interval in milliseconds.
// Describes a rate limit of 1mb/s
function addRatingEntry (size) {
// Returns entry object.
return rating[(rating.push({
'timestamp': Date.now(),
'size': size
}) - 1);
}
function evalRating () {
// Removes outdated entries, computes combined size, and compares with limit variable.
// Returns true if you're connection is NOT flooding, returns false if you need to disconnect.
var i, newRating, totalSize;
// totalSize in bytes in case of underlying Buffer value, in number of characters for strings. Actual byte size in case of strings might be variable => not reliable.
newRating = [];
for (i = rating.length - 1; i >= 0; i -= 1) {
if ((Date.now() - rating[i].timestamp) < interval) {
newRating.push(rating[i]);
}
}
rating = newRating;
totalSize = 0;
for (i = newRating.length - 1; i >= 0; i -= 1) {
totalSize += newRating[i].timestamp;
}
return (totalSize > limit ? false : true);
}
// Assume connection variable already exists and has a readable stream interface
connection.on('data', function (chunk) {
addRatingEntry(chunk.length);
if (evalRating()) {
// Continue processing chunk.
} else {
// Disconnect due to flooding.
}
});
You can add extra checks, like checking whether or not the size parameter really is a number etc.
Addendum: Make sure the rating, limit and interval variables are enclosed (in a closure) per connection, and that they don't define a global rate (where each connection manipulates the same rating).
I implemented a little flood function, not perfect (see improvements below) but it will disconnect a user when he does to much request.
// Not more then 100 request in 10 seconds
let FLOOD_TIME = 10000;
let FLOOD_MAX = 100;
let flood = {
floods: {},
lastFloodClear: new Date(),
protect: (io, socket) => {
// Reset flood protection
if( Math.abs( new Date() - flood.lastFloodClear) > FLOOD_TIME ){
flood.floods = {};
flood.lastFloodClear = new Date();
}
flood.floods[socket.id] == undefined ? flood.floods[socket.id] = {} : flood.floods[socket.id];
flood.floods[socket.id].count == undefined ? flood.floods[socket.id].count = 0 : flood.floods[socket.id].count;
flood.floods[socket.id].count++;
//Disconnect the socket if he went over FLOOD_MAX in FLOOD_TIME
if( flood.floods[socket.id].count > FLOOD_MAX){
console.log('FLOODPROTECTION ', socket.id)
io.sockets.connected[socket.id].disconnect();
return false;
}
return true;
}
}
exports = module.exports = flood;
And then use it like this:
let flood = require('../modules/flood')
// ... init socket io...
socket.on('message', function () {
if(flood.protect(io, socket)){
//do stuff
}
});
Improvements would be, to add another value next to the count, how often he got disconneted and then create a banlist and dont let him connect anymore. Also when a user refreshes the page he gets a new socket.id so maybe use here a unique cookie value instead of the socket.id
Here is simple rate-limiter-flexible package example.
const app = require('http').createServer();
const io = require('socket.io')(app);
const { RateLimiterMemory } = require('rate-limiter-flexible');
app.listen(3000);
const rateLimiter = new RateLimiterMemory(
{
points: 5, // 5 points
duration: 1, // per second
});
io.on('connection', (socket) => {
socket.on('bcast', async (data) => {
try {
await rateLimiter.consume(socket.handshake.address); // consume 1 point per event from IP
socket.emit('news', { 'data': data });
socket.broadcast.emit('news', { 'data': data });
} catch(rejRes) {
// no available points to consume
// emit error or warning message
socket.emit('blocked', { 'retry-ms': rejRes.msBeforeNext });
}
});
});
Read more in official docs

Resources