const express = require("express");
const REFRESH_INTERVAL = 1000;
const app = express();
const recurringSetTimeout = (id) => {
setTimeout(
handle = async () => {
console.log(`Starting setTimeout${id}`);
setTimeout(handle, REFRESH_INTERVAL);
},
REFRESH_INTERVAL,
)
}
const main = () => {
app.get("/t1", async (req, res) => {
const msg = "Starting timeout 1...";
console.log(msg)
recurringSetTimeout(1);
res.send(msg);
});
app.get("/t2", async (req, res) => {
const msg = "Starting timeout 2...";
console.log(msg)
recurringSetTimeout(2);
res.send(msg);
});
app.listen(3000, () => {
console.log("Server is running...");
});
}
main();
I have this code that should run two different setTimeouts on two route calls, t1 runs first and t2 runs second. After calling t1 I am getting correct results in logs, "Starting setTimeout1" and after each second again. But when I call t2, I am expecting to get "Starting setTimeout2" but as well as "Starting setTimeout1" from the previous route call. But it seems that setTimeout2 somehow overrides setTimeout1, since I am getting only "Starting setTimeout2" but after each second two logs, instead of one. So it seems that setTimeout1 is running but gets overwritten by setTimeout2, since I am getting only timeout2 logs (2 per second).
If I run setInterval, instead of setTimeout, it works fine, but I want to understand this behaviour of setTimeout, can someone please explain. Thanks!
Related
I have a route in my expressjs which calls some logic on an interval:
export const get = (req, res) => {
const { ids } = req.query;
let interval = setInterval(() => {
let id = ids.pop()
console.log("On id #:", id)
// do some stuff with the id
if (!ids.length) {
clearInterval(interval)
}
}, 1000)
interval.unref();
res.json();
};
So if you hit the route that calls this function, code runs every 1 second, and prints the console statement. This is all within an express server. If the process runs to completion, great. However, I notice that even if I ctrl c out of the server's running process before the interval loop is done, the terminal continues printing the id every 1 second, until its done.
I thought the .unref() method is supposed to prevent this, but it has no effect. I also tried this:
["exit", "uncaughtException", "SIGINT", "SIGTERM"]
.forEach(signal => {
process.on(signal, () => {
clearInterval(interval);
});
});
This also seems to have no effect. How can I make sure that any running intervals are stopped and cleared when I shut down my express server?
Maybe my issue has something to do with some convoluted logic for the setup and cleanup of my server. For reference:
import express from "express";
import { setRoutes } from "./routes";
let app = express();
const server = app.listen(8080, function () {
console.log(`🎧 Server is now running on port : ${8080}`);
});
app = setRoutes(app);
function stop() {
// Run some code to clean things up before server exits or restarts
server.on("close", function () {
console.log("⬇ Shutting down server");
process.exit();
});
server.close();
}
process.on("SIGINT", stop);
process.on("SIGTERM", stop);
process.on("SIGQUIT", stop);
process.once("SIGUSR2", function () {
// Run some code to do a different kind of cleanup on nodemon restart:
process.kill(process.pid, "SIGUSR2");
});
I'm playing around with sockets for the first time and have a created a very simple back end and front end to test with our system at work.
I want the back end to query our server every 10 seconds in this example and pass the results to the front end.
I currently have the interval set as 10 seconds however when I run it I only get the result after ten seconds - i want the result straight away and then to check every ten seconds for changes.
I've tried moving code around and seeing what works, but I usually get a message telling me the variable is undefined (because it is then obviously outside the function.
My code is below - I am aware its probably a it overkill having the set interval in both the result and catch, so if anyone can help tidy it up so it works correctly, I'd appreciate it. Still a bit of a noob I'm afraid!
const express = require("express");
const app = express();
const server = require("http").createServer(app);
const io = require("socket.io")(server);
const oledb = require('oledb');
const smartconn =
`--myodbcconnection--`;
const db = oledb.odbcConnection(smartconn);
let command = `SELECT item FROM mytable.table LIMIT 10`
db.query(command)
.then(result => {
setInterval(function(){
io.emit("query", result.result);
}, 10000);
},
err => {
setInterval(function(){
io.emit("query", err);
}, 10000);
});
io.set("origins", "*:*");
io.on("connection", async (socket) => {
console.log("Client Successfully Connected");
});
server.listen(5000, () => {
console.log("Backend Server is running on http://localhost:5000");
});
Expect results to show immediately. Previously the old method didn't use sockets and polled the using set interval from the front end, which I want to move away from.
Here's one way to do it:
//Resolves a Promise after ms milliseconds
const sleep = (ms) => {
return new Promise((resolve, reject) => {
setTimeout(resolve, ms);
});
}
//Returns a db.query result with server status
const getServerStatus = async () => {
return await db.query(`SELECT item FROM mytable.table LIMIT 10`);
}
//Runs indefinitely and emits events to frontend every 10 seconds
(async () => {
while(true){
try{
const result = await getServerStatus();
io.emit("query", result.result);
} catch (error){
io.emit("query", error); //It would be better to change the event name to something else
}
await sleep(10000);
}
})();
This way the first event will be sent immediately because unlike setInterval, my implementation waits after executing the code, not before. Also, you can be sure that queries will not overlap when they take more than 10s to execute. setInterval doesn't guarantee that the next execution will wait for the previous one to finish.
The downside is that events will be sent after (10,000ms + query_delay), so depending on your database size and performance, some of them might get delayed by a few or a few hundred milliseconds. To counter that, you can measure getServerStatus's execution time and subtract it from the wait time.
Sorted it, by moving the emit functions into the io.on function.
Cheers!
I need help increasing the 2 min default request timeout in a Koa server.
I have a long task operation that is taking a few minutes. When it is finished I'm sending a response back. The problem is that the connection is automatically closed after a timeout of 2 min which is node js default.
I tried everything in google.
tried using all kind of third party npm modules.
tried ctx.request.socket.setTimeout(0)
I am out of ideas and need help.
I am executing my requests to the server using postmen with infinite timeout.
Update - This is a code snipped of something im trying to do:
const Koa = require('koa')
const app = new Koa()
const PORT = 7555
const server = app.listen(PORT)
app.use(async (ctx, next) => {
ctx.req.setTimeout(0);
await next();
});
app.use(async (ctx, next) => {
const wait = async () => {
return new Promise((resolve, reject) => {
setTimeout(() => {
resolve()
}, 1000 * 60 * 5)
})
}
await wait()
console.log("settimeout finished", new Date())
ctx.response.body = { success: "true", date: new Date() }
})
I tested this one ... worked fine:
const Koa = require('koa')
const app = new Koa()
const PORT = 7555
const server = app.listen(PORT);
server.setTimeout(0); // <-- this should do the trick ...
app.use(async (ctx, next) => {
console.log("request started", new Date())
const wait = async () => {
return new Promise((resolve, reject) => {
setTimeout(() => {
resolve()
}, 1000 * 60 * 5)
})
}
await wait()
console.log("settimeout finished", new Date())
ctx.response.body = { success: "true", date: new Date() }
})
Be sure that - if apache or nginx is involved in your production system to also modify their configurations (here I increased it to 500 seconds).
For NGINX (proxy)
vim /etc/nginx/nginx.conf
Add following in http{..} section
(see http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_read_timeout)
http {
#...
proxy_read_timeout 500s;
proxy_send_timeout 500s;
#...
}
For Apache
vim /etc/apache2/apache2.conf:
Search for TimeOut
# ...
TimeOut 500
# ...
You could add a middleware early on your app like this:
app.use(async (ctx, next) => {
ctx.req.setTimeout(0);
await next();
});
It seems koa keeps the 120s timeout default from http_server.js
So, you could try to hack into it, but this middleware should do the trick.
You could also try this:
const server = app.listen(PORT); // This returns the https_server instance
server.setTimeout(600000)
You can see relevant info here
I use Supertest to test my Express apps, but I'm running into a challenge when I want my handlers to do asynchronous processing after a request is sent. Take this code, for example:
const request = require('supertest');
const express = require('express');
const app = express();
app.get('/user', async (req, res) => {
res.status(200).json({ success: true });
await someAsyncTaskThatHappensAfterTheResponse();
});
describe('A Simple Test', () => {
it('should get a valid response', () => {
return request(app)
.get('/user')
.expect(200)
.then(response => {
// Test stuff here.
});
});
});
If the someAsyncTaskThatHappensAfterTheResponse() call throws an error, then the test here is subject to a race condition where it may or may not failed based on that error. Even aside from error handling, it's also difficult to check for side effects if they happen after the response is set. Imagine that you wanted to trigger database updates after sending a response. You wouldn't be able to tell from your test when you should expect that the updates have completely. Is there any way to use Supertest to wait until the handler function has finished executing?
This can not be done easily because supertest acts like a client and you do not have access to the actual req/res objects in express (see https://stackoverflow.com/a/26811414/387094).
As a complete hacky workaround, here is what worked for me.
Create a file which house a callback/promise. For instance, my file test-hack.js looks like so:
let callback = null
export const callbackPromise = () => new Promise((resolve) => {
callback = resolve
})
export default function callWhenComplete () {
if (callback) callback('hack complete')
}
When all processing is complete, call the callback callWhenComplete function. For instance, my middleware looks like so.
import callWhenComplete from './test-hack'
export default function middlewareIpnMyo () {
return async function route (req, res, next) {
res.status(200)
res.send()
// async logic logic
callWhenComplete()
}
}
And finally in your test, await for the callbackPromise like so:
import { callbackPromise } from 'test-hack'
describe('POST /someHack', () => {
it.only('should handle a post request', async () => {
const response = await request
.post('/someHack')
.send({soMuch: 'hackery'})
.expect(200)
const result = await callbackPromise()
// anything below this is executed after callWhenComplete() is
// executed from the route
})
})
Inspired by #travis-stevens, here is a slightly different solution that uses setInterval so you can be sure the promise is set up before you make your supertest call. This also allows tracking requests by id in case you want to use the library for many tests without collisions.
const backgroundResult = {};
export function backgroundListener(id, ms = 1000) {
backgroundResult[id] = false;
return new Promise(resolve => {
// set up interval
const interval = setInterval(isComplete, ms);
// completion logic
function isComplete() {
if (false !== backgroundResult[id]) {
resolve(backgroundResult[id]);
delete backgroundResult[id];
clearInterval(interval);
}
}
});
}
export function backgroundComplete(id, result = true) {
if (id in backgroundResult) {
backgroundResult[id] = result;
}
}
Make a call to get the listener promise BEFORE your supertest.request() call (in this case, using agent).
it('should respond with a 200 but background error for failed async', async function() {
const agent = supertest.agent(app);
const trackingId = 'jds934894d34kdkd';
const bgListener = background.backgroundListener(trackingId);
// post something but include tracking id
await agent
.post('/v1/user')
.field('testTrackingId', trackingId)
.field('name', 'Bob Smith')
.expect(200);
// execute the promise which waits for the completion function to run
const backgroundError = await bgListener;
// should have received an error
assert.equal(backgroundError instanceof Error, true);
});
Your controller should expect the tracking id and pass it to the complete function at the end of controller backgrounded processing. Passing an error as the second value is one way to check the result later, but you can just pass false or whatever you like.
// if background task(s) were successful, promise in test will return true
backgroundComplete(testTrackingId);
// if not successful, promise in test will return this error object
backgroundComplete(testTrackingId, new Error('Failed'));
If anyone has any comments or improvements, that would be appreciated :)
Im using express with node.js, and testing certain routes. I'm doing this tute at http://coenraets.org/blog/2012/10/creating-a-rest-api-using-node-js-express-and-mongodb/
Im calling the http://localhost:3000/wines via ajax (the content doesn't matter). But I want to test latency. Can I do something like make express respond after 2 seconds? (I want to simulate the ajax loader and I'm running on localhost so my latency is pretty much nil)
Use as middleware, for all your requests
app.use(function(req,res,next){setTimeout(next,1000)});
Just call res.send inside of a setTimeout:
setTimeout((() => {
res.send(items)
}), 2000)
To apply it globaly on all requests you can use the following code:
app.use( ( req, res, next ) => {
setTimeout(next, Math.floor( ( Math.random() * 2000 ) + 100 ) );
});
Time values are:
Max = 2000 (sort of.... min value is added so in reality its 2100)
Min = 100
Try connect-pause module. It adds delay to all or only some routes in your app.
app.get('/fakeDelay', function(req,res){
let ms = req.query.t;
ms = (ms>5000 || isNaN(ms)) ? 1000 : parseInt(ms);
setTimeout((()=> res.status(200).send({delay:ms})), ms);
})
Then request the URL as: http://localhost/fakeDelay/?t=2000
(max 5000ms and default of 1000ms on this example)
Update:
Using a Promise. The function 'sleep' can be used for delaying any Express response or other async function.
const sleep = (ms) => new Promise(resolve => setTimeout(resolve, ms));
app.get('/fakeDelay', async (req, res) => {
await sleep(500);
res.send([])
})
just add a comment on top of the solution of #maggocnx : put this middleware early (before your route handler)
app.use(function(req,res,next){setTimeout(next,1000)});
You could also just write your own generic delay handler using a Promise or callback (using a q promise in this case):
pause.js:
var q = require('q');
function pause(time) {
var deferred = q.defer();
// if the supplied time value is not a number,
// set it to 0,
// else use supplied value
time = isNaN(time) ? 0 : time;
// Logging that this function has been called,
// just in case you forgot about a pause() you added somewhere,
// and you think your code is just running super-slow :)
console.log('pause()-ing for ' + time + ' milliseconds');
setTimeout(function () {
deferred.resolve();
}, time);
return deferred.promise;
}
module.exports = pause;
then use it however you'd like:
server.js:
var pause = require('./pause');
router.get('/items', function (req, res) {
var items = [];
pause(2000)
.then(function () {
res.send(items)
});
});
You can use the express-delay package.
The code below will delay all incoming requests by one second.
const app = require('express')();
const delay = require('express-delay');
app.use(delay(1000));
The package also offers the possibility to introduce a random delay within specified boundaries, e.g. by calling delay(1000, 2000) for a delay between one and two seconds.
In my case I wanted a way to have the same processing time for all of my endpoints.
The solution I found is to override one of the Response methods :
/* MINIMUM DELAY */
const minDelay = 200; /* ms */
app.use((req, res, next) => {
const tmp = res.json;
const start = new Date().getTime();
(res.json as any) = async (body: any) => {
await new Promise((re) => setTimeout(re, minDelay - new Date().getTime() + start));
tmp.apply(res, [body]);
};
next();
});
This way an attacker would not be able to differentiate failed login requests from OK requests just by looking at the response time :)