Im using express with node.js, and testing certain routes. I'm doing this tute at http://coenraets.org/blog/2012/10/creating-a-rest-api-using-node-js-express-and-mongodb/
Im calling the http://localhost:3000/wines via ajax (the content doesn't matter). But I want to test latency. Can I do something like make express respond after 2 seconds? (I want to simulate the ajax loader and I'm running on localhost so my latency is pretty much nil)
Use as middleware, for all your requests
app.use(function(req,res,next){setTimeout(next,1000)});
Just call res.send inside of a setTimeout:
setTimeout((() => {
res.send(items)
}), 2000)
To apply it globaly on all requests you can use the following code:
app.use( ( req, res, next ) => {
setTimeout(next, Math.floor( ( Math.random() * 2000 ) + 100 ) );
});
Time values are:
Max = 2000 (sort of.... min value is added so in reality its 2100)
Min = 100
Try connect-pause module. It adds delay to all or only some routes in your app.
app.get('/fakeDelay', function(req,res){
let ms = req.query.t;
ms = (ms>5000 || isNaN(ms)) ? 1000 : parseInt(ms);
setTimeout((()=> res.status(200).send({delay:ms})), ms);
})
Then request the URL as: http://localhost/fakeDelay/?t=2000
(max 5000ms and default of 1000ms on this example)
Update:
Using a Promise. The function 'sleep' can be used for delaying any Express response or other async function.
const sleep = (ms) => new Promise(resolve => setTimeout(resolve, ms));
app.get('/fakeDelay', async (req, res) => {
await sleep(500);
res.send([])
})
just add a comment on top of the solution of #maggocnx : put this middleware early (before your route handler)
app.use(function(req,res,next){setTimeout(next,1000)});
You could also just write your own generic delay handler using a Promise or callback (using a q promise in this case):
pause.js:
var q = require('q');
function pause(time) {
var deferred = q.defer();
// if the supplied time value is not a number,
// set it to 0,
// else use supplied value
time = isNaN(time) ? 0 : time;
// Logging that this function has been called,
// just in case you forgot about a pause() you added somewhere,
// and you think your code is just running super-slow :)
console.log('pause()-ing for ' + time + ' milliseconds');
setTimeout(function () {
deferred.resolve();
}, time);
return deferred.promise;
}
module.exports = pause;
then use it however you'd like:
server.js:
var pause = require('./pause');
router.get('/items', function (req, res) {
var items = [];
pause(2000)
.then(function () {
res.send(items)
});
});
You can use the express-delay package.
The code below will delay all incoming requests by one second.
const app = require('express')();
const delay = require('express-delay');
app.use(delay(1000));
The package also offers the possibility to introduce a random delay within specified boundaries, e.g. by calling delay(1000, 2000) for a delay between one and two seconds.
In my case I wanted a way to have the same processing time for all of my endpoints.
The solution I found is to override one of the Response methods :
/* MINIMUM DELAY */
const minDelay = 200; /* ms */
app.use((req, res, next) => {
const tmp = res.json;
const start = new Date().getTime();
(res.json as any) = async (body: any) => {
await new Promise((re) => setTimeout(re, minDelay - new Date().getTime() + start));
tmp.apply(res, [body]);
};
next();
});
This way an attacker would not be able to differentiate failed login requests from OK requests just by looking at the response time :)
Related
I want to response to client request one by one.
Consider the following code:
let i = 1;
router.post("/test", (req, res) => {
// Wait for response to previous request from another client (If there is)
console.log(i)
setTimeout(function() {
i++
return res.json(true);
}, 10000);
});
I would like to respond to two consecutive requests as follows:
in 0s: log 1 And after replying to the previous request in 10s: log 2
https://caolan.github.io/async/v3/docs.html#queue
This doesn't handle errors in the callback, but you get the idea of the machine.
const async = require('async')
var q = async.queue(function(task, callback) {
task.res.json(true)
callback();
}, 1); // concurrency 1
let i = 1;
router.post("/test", (req, res) => {
q.push({res}, (err) => console.log(i++))
});
Is this what you are looking for?
Good work making a minimal example of your problem, btw.
I'm playing around with sockets for the first time and have a created a very simple back end and front end to test with our system at work.
I want the back end to query our server every 10 seconds in this example and pass the results to the front end.
I currently have the interval set as 10 seconds however when I run it I only get the result after ten seconds - i want the result straight away and then to check every ten seconds for changes.
I've tried moving code around and seeing what works, but I usually get a message telling me the variable is undefined (because it is then obviously outside the function.
My code is below - I am aware its probably a it overkill having the set interval in both the result and catch, so if anyone can help tidy it up so it works correctly, I'd appreciate it. Still a bit of a noob I'm afraid!
const express = require("express");
const app = express();
const server = require("http").createServer(app);
const io = require("socket.io")(server);
const oledb = require('oledb');
const smartconn =
`--myodbcconnection--`;
const db = oledb.odbcConnection(smartconn);
let command = `SELECT item FROM mytable.table LIMIT 10`
db.query(command)
.then(result => {
setInterval(function(){
io.emit("query", result.result);
}, 10000);
},
err => {
setInterval(function(){
io.emit("query", err);
}, 10000);
});
io.set("origins", "*:*");
io.on("connection", async (socket) => {
console.log("Client Successfully Connected");
});
server.listen(5000, () => {
console.log("Backend Server is running on http://localhost:5000");
});
Expect results to show immediately. Previously the old method didn't use sockets and polled the using set interval from the front end, which I want to move away from.
Here's one way to do it:
//Resolves a Promise after ms milliseconds
const sleep = (ms) => {
return new Promise((resolve, reject) => {
setTimeout(resolve, ms);
});
}
//Returns a db.query result with server status
const getServerStatus = async () => {
return await db.query(`SELECT item FROM mytable.table LIMIT 10`);
}
//Runs indefinitely and emits events to frontend every 10 seconds
(async () => {
while(true){
try{
const result = await getServerStatus();
io.emit("query", result.result);
} catch (error){
io.emit("query", error); //It would be better to change the event name to something else
}
await sleep(10000);
}
})();
This way the first event will be sent immediately because unlike setInterval, my implementation waits after executing the code, not before. Also, you can be sure that queries will not overlap when they take more than 10s to execute. setInterval doesn't guarantee that the next execution will wait for the previous one to finish.
The downside is that events will be sent after (10,000ms + query_delay), so depending on your database size and performance, some of them might get delayed by a few or a few hundred milliseconds. To counter that, you can measure getServerStatus's execution time and subtract it from the wait time.
Sorted it, by moving the emit functions into the io.on function.
Cheers!
i am calling a 3rd party API in a loop in my nodejs application. Basically I have a list, am iterating through the list and calling the 3rd party API.
The 3rd party API is very slow and cannot handle more than 3 requests. I have been advised to add some delay.
Please can someone advise how to add delay in this scenario.
var promises = [];
promises = rids.map((rid,j) => {
// 3rd party API
// getServiceDetailsApi is wrapper around 3rd party API
return getServiceDetailsApi(rid)
});
// console.log(promises);
Promise.all(promises)
.then(res => {
// console.log('promise complete..' + res.length)
var responses = [];
res.map((response,i) => {
var serviceAttributesDetail = {};
// console.log(response);
serviceAttributesDetails = response.data.serviceAttributesDetails;
serviceAttributesDetail.rid = serviceAttributesDetails.rid;
responses = responses.concat(serviceAttributesDetail);
})
// Add more logic above
return Promise.all(responses);
})
If one request at a time is enough, you can try this way:
'use strict';
(async function main() {
try {
const responses = [];
for (const rid of rids) {
const response = await getServiceDetailsApi(rid);
responses.push({ rid: response.data.serviceAttributesDetails.rid });
}
console.log(responses);
} catch (err) {
console.error(err);
}
})();
If your restriction is about having a maximum of 3 concurrent requests to that API, here is a possibility (untested though, there might be typos, and I didn't think the rejection handling):
const cfgMaxApiCalls = 3;
...
function getServiceDetailsApi() {...}
...
const rids = ...
...
const promisedResponses = new Promise((generalResolve) => {
let currentCalls = 0; // to know how many calls in progress
const responses = []; // the output of the general promise
// this chains promises, ensuring we do an API call only when another one finished
const consumer = (response) => {
responses.push(response); // first remember the data
// stop condition: nothing more to process, and all current calls have resolved
if (!rids.length && !currentCalls--) {
return generalResolve(responses);
}
// otherwise make a new call since this one's done
return getServiceDetailsApi(rids.shift()).then(consumer);
};
// start the process for maximum `cfgMaxApiCalls` concurrent calls
for (; currentCalls < cfgMaxApiCalls && rids.length; currentCalls++) {
getServiceDetailsApi(rids.shift()).then(consumer);
}
});
promisedResponses.then((res) => {
// here `res` === your code's `res`
// and by the way, Array.prototype.concat is not asynchronous,
// so no need to Promise.all(responses) at the end ;)
});
I use Supertest to test my Express apps, but I'm running into a challenge when I want my handlers to do asynchronous processing after a request is sent. Take this code, for example:
const request = require('supertest');
const express = require('express');
const app = express();
app.get('/user', async (req, res) => {
res.status(200).json({ success: true });
await someAsyncTaskThatHappensAfterTheResponse();
});
describe('A Simple Test', () => {
it('should get a valid response', () => {
return request(app)
.get('/user')
.expect(200)
.then(response => {
// Test stuff here.
});
});
});
If the someAsyncTaskThatHappensAfterTheResponse() call throws an error, then the test here is subject to a race condition where it may or may not failed based on that error. Even aside from error handling, it's also difficult to check for side effects if they happen after the response is set. Imagine that you wanted to trigger database updates after sending a response. You wouldn't be able to tell from your test when you should expect that the updates have completely. Is there any way to use Supertest to wait until the handler function has finished executing?
This can not be done easily because supertest acts like a client and you do not have access to the actual req/res objects in express (see https://stackoverflow.com/a/26811414/387094).
As a complete hacky workaround, here is what worked for me.
Create a file which house a callback/promise. For instance, my file test-hack.js looks like so:
let callback = null
export const callbackPromise = () => new Promise((resolve) => {
callback = resolve
})
export default function callWhenComplete () {
if (callback) callback('hack complete')
}
When all processing is complete, call the callback callWhenComplete function. For instance, my middleware looks like so.
import callWhenComplete from './test-hack'
export default function middlewareIpnMyo () {
return async function route (req, res, next) {
res.status(200)
res.send()
// async logic logic
callWhenComplete()
}
}
And finally in your test, await for the callbackPromise like so:
import { callbackPromise } from 'test-hack'
describe('POST /someHack', () => {
it.only('should handle a post request', async () => {
const response = await request
.post('/someHack')
.send({soMuch: 'hackery'})
.expect(200)
const result = await callbackPromise()
// anything below this is executed after callWhenComplete() is
// executed from the route
})
})
Inspired by #travis-stevens, here is a slightly different solution that uses setInterval so you can be sure the promise is set up before you make your supertest call. This also allows tracking requests by id in case you want to use the library for many tests without collisions.
const backgroundResult = {};
export function backgroundListener(id, ms = 1000) {
backgroundResult[id] = false;
return new Promise(resolve => {
// set up interval
const interval = setInterval(isComplete, ms);
// completion logic
function isComplete() {
if (false !== backgroundResult[id]) {
resolve(backgroundResult[id]);
delete backgroundResult[id];
clearInterval(interval);
}
}
});
}
export function backgroundComplete(id, result = true) {
if (id in backgroundResult) {
backgroundResult[id] = result;
}
}
Make a call to get the listener promise BEFORE your supertest.request() call (in this case, using agent).
it('should respond with a 200 but background error for failed async', async function() {
const agent = supertest.agent(app);
const trackingId = 'jds934894d34kdkd';
const bgListener = background.backgroundListener(trackingId);
// post something but include tracking id
await agent
.post('/v1/user')
.field('testTrackingId', trackingId)
.field('name', 'Bob Smith')
.expect(200);
// execute the promise which waits for the completion function to run
const backgroundError = await bgListener;
// should have received an error
assert.equal(backgroundError instanceof Error, true);
});
Your controller should expect the tracking id and pass it to the complete function at the end of controller backgrounded processing. Passing an error as the second value is one way to check the result later, but you can just pass false or whatever you like.
// if background task(s) were successful, promise in test will return true
backgroundComplete(testTrackingId);
// if not successful, promise in test will return this error object
backgroundComplete(testTrackingId, new Error('Failed'));
If anyone has any comments or improvements, that would be appreciated :)
I'm using axios to make requests to the Deezer API. Unfortunately, with Deezer's API when you request an artist's albums it does not include album tracks. So, I am working around this by requesting the artist's albums and then performing a subsequent axios request for each album. The problem I'm running into is that the API limits requests to 50 per 5 seconds. If an artist has more than 50 albums I usually get a "quota exceeded" error. Is there a way to throttle axios requests to 50 per 5 seconds, specifically when using axios.all?
var axios = require('axios');
function getAlbums(artistID) {
axios.get(`https://api.deezer.com/artist/${artistID}/albums`)
.then((albums) => {
const urls = albums.data.data.map((album) => {
return axios.get(`https://api.deezer.com/album/${album.id}`)
.then(albumInfo => albumInfo.data);
});
axios.all(urls)
.then((allAlbums) => {
console.log(allAlbums);
});
}).catch((err) => {
console.log(err);
});
}
getAlbums(413);
First of all, let's see what you really need. Your goal here is to make request at most each 100 milliseconds, if you have a large number of albums. (Using axios.all for this matter is no different from using Promise.all, you just want to wait for all of the requests to complete.)
Now, with axios you have the interception API, allowing to plug your logic before requests. So you can use an interceptor like this:
function scheduleRequests(axiosInstance, intervalMs) {
let lastInvocationTime = undefined;
const scheduler = (config) => {
const now = Date.now();
if (lastInvocationTime) {
lastInvocationTime += intervalMs;
const waitPeriodForThisRequest = lastInvocationTime - now;
if (waitPeriodForThisRequest > 0) {
return new Promise((resolve) => {
setTimeout(
() => resolve(config),
waitPeriodForThisRequest);
});
}
}
lastInvocationTime = now;
return config;
}
axiosInstance.interceptors.request.use(scheduler);
}
What it does is timing requests so they are performed at intervalMs milliseconds intervals.
In your code:
function getAlbums(artistID) {
const deezerService = axios.create({ baseURL: 'https://api.deezer.com' });
scheduleRequests(deezerService, 100);
deezerService.get(`/artist/${artistID}/albums`)
.then((albums) => {
const urlRequests = albums.data.data.map(
(album) => deezerService
.get(`/album/${album.id}`)
.then(albumInfo => albumInfo.data));
//you need to 'return' here, otherwise any error in album
// requests will not propagate to the final 'catch':
return axios.all(urls).then(console.log);
})
.catch(console.log);
}
This is, however, a simplistic approach, in your case you probably would like to receive the results as fast as possible for number of requests less than 50. For this, you have to add some kind of a counter inside the scheduler which will count the number of requests and delay their execution based both on the interval and the counter.
Here is my solution using simple async setTimeout / Es6 code :
you can setup the delay in the sleep param func
const sleep = (delay) => {
return new Promise(function(resolve) {
setTimeout(resolve, delay);
});
}
axios.interceptors.response.use(async function (response) {
await sleep(3000)
return response;
}, function (error) {
// Do something with response error
console.error(error)
return Promise.reject(error);
});
There is also this npm package you can use :
https://www.npmjs.com/package/axios-request-throttle
There is a module that worked for me outside the box with a NestJS application
Throttle axios request-per-second rate with 3 lines of code. The main
difference with this module and others like axios-throttled is that
you don't have to create a new axios instance, and by extension don't
have to fix imports project-wide. Apply once and every axios.get,
post, put, delete etc is throttled.
axios-request-throttle