Execute a middleware one-time only at server startup in Koa v2 - node.js

I created this middleware which executing only once when any route in the website gets the first hit from a visitor:
// pg-promise
const db = require('./db/pgp').db;
const pgp = require('./db/pgp').pgp;
app.use(async (ctx, next) => {
try {
ctx.db = db;
ctx.pgp = pgp;
} catch (err) {
debugErr(`PGP ERROR: ${err.message}` || err);
}
await next();
});
// One-Time middleware
// https://github.com/expressjs/express/issues/2457
const oneTime = (fn) => {
try {
let done = false;
const res = (ctx, next) => {
if (done === false) {
fn(ctx, next);
done = true;
}
next();
};
return res;
} catch (err) {
debugErr(`oneTime ERROR: ${err.message}` || err);
}
};
const oneTimeQuery = async (ctx) => {
const result = await ctx.db.proc('version', [], a => a.version);
debugLog(result);
};
app.use(oneTime(oneTimeQuery));
This code executing on the first-time only when a user visiting the website, resulting:
app:log Listening on port 3000 +13ms
app:req GET / 200 - 24ms +2s
23:07:15 connect(postgres#postgres)
23:07:15 SELECT * FROM version()
23:07:15 disconnect(postgres#postgres)
app:log PostgreSQL 9.6.2, compiled by Visual C++ build 1800, 64-bit +125ms
My problem is that I want to execute it at the server start, when there's no any visit on the site.
The future purpose of this code will be to check the existence of tables in the database.
Solution:
Placing this in ./bin/www before the const server = http.createServer(app.callback()); declaration helped:
const query = async () => {
const db = require('../db/pgp').db;
const pgp = require('../db/pgp').pgp;
const result = await db.proc('version', [], a => a.version);
debugLog(`www: ${result}`);
pgp.end(); // for immediate app exit, closing the connection pool (synchronous)
};
query();

You could start your application using a js script that requires your app and uses node's native http module to fire up the server. Exactly like in koa-generator (click).
This is in your app.js file:
const app = require('koa')();
...
module.exports = app;
And then this is in your script to fire up the server:
const app = require('./app');
const http = require('http');
[this is the place where you should run your code before server starts]
const server = http.createServer(app.callback());
server.listen(port);
Afterwards you start your application with:
node [script_name].js
Of course keep in mind the async nature of node when doing it this way. What I mean by that - run the 'listen' method on 'server' variable in callback/promise.

Related

Jest + Supertest - API tests are using random ephemeral ports

I'm using Restify for my API and I'm trying to write tests for my endpoints.
At first, I had only a test for ping and it was okay, but now, after I added a new test, supertest is trying to ephemeral ports to the test server (80201).
I've searched a lot and tried some approaches that seem to work for most people but not for me. I'm probably messing something up, but I have no clue what it could be.
Check out my code:
server.js
require('dotenv').config();
const config = require('./config');
const routes = require('./src/routes');
const cors = require('restify-cors-middleware');
const http = require('http');
const https = require('https');
const restify = require('restify');
module.exports = function () {
http.globalAgent.keepAlive = true;
http.globalAgent.maxSockets = 256;
https.globalAgent.keepAlive = true;
https.globalAgent.maxSockets = 256;
const _cors = cors({
preflightMaxAge: 5,
origins: [new RegExp("^(https?:\/\/)?[-\w]+\.hackz\.co(\.\w+)?(:[\d]+)?$")],
allowHeaders: [
'authorization',
'x-requested-with',
'Content-MD5',
'Date',
'Accept-Version',
'Api-Version',
'Response-Time'
],
credentials: true
});
const server = restify.createServer({ name: config.apiName });
// Middlewares
server.pre(_cors.preflight);
server.use(_cors.actual);
server.use(restify.plugins.fullResponse());
server.use(restify.plugins.queryParser({ mapParams: true }));
server.use(restify.plugins.bodyParser({ mapParams: true }));
// Load Routes
routes.set(server);
server.on('error', function (req, res, route, error) {
if (error && (error.statusCode == null || error.statusCode !== 404)) {}
});
// Start Server
server.listen(config.apiPort, function () {
console.log(`${server.name} listening at ${server.url}.\nWe're in ${config.env} environment!`);
});
return server;
}();
tests/config/server.js
const server = require('../..');
const request = require('supertest');
function TestServer() {
return request(server);
}
module.exports = { TestServer };
tests/services/request.js
const { TestServer } = require("../config/server");
async function get(path, sessionkey = '', params = {}) {
const server = TestServer();
return await server
.get(path)
.query(params)
.set("authorization", sessionkey)
.set("content-type", "application/json")
;
}
async function post(path) {
const server = TestServer();
return await server
.post(path)
.set("content-type", "application/json")
;
}
module.exports = {
get,
post,
};
tests/config/setup.js
const server = require('../..');
afterAll(() => {
return server.close()
});
src/controllers/Ping.test.js
const { get } = require('../../tests/services/request');
describe('Ping Controller', () => {
describe('GET /ping', () => {
it('Should return 200', async () => {
const response = await get('/ping');
expect(response.status).toBe(200);
});
});
});
src/controllers/Session.test.js
const { post } = require('../../tests/services/request');
describe('Session Controller', () => {
const userId = 1;
describe('POST /:userId/create', () => {
it('Should create session successfully!', async () => {
const response = await post(`${userId}/create`);
expect(response.status).toBe(200);
expect(response.body.auth).toBe(true);
});
});
});
package.json (scripts and Jest config)
...
"scripts": {
"start": "node index.js",
"test": "jest --detectOpenHandles --forceExit --coverage",
"test:api": "npm run test -- --roots ./src/controllers"
},
...
"jest": {
"setupFilesAfterEnv": [
"jest-extended",
"<rootDir>/tests/config/setup.js"
],
...
}
This is the error output:
> user-session-api#1.0.0 test
> jest --detectOpenHandles --forceExit --coverage
FAIL src/controllers/Session.test.js
Session Controller
POST /:userId/create
✕ Should create session successfully! (39 ms)
● Session Controller › POST /:userId/create › Should create session successfully!
RangeError: Port should be >= 0 and < 65536. Received 80201.RangeError [ERR_SOCKET_BAD_PORT]: Port should be >= 0 and < 65536. Received 80201.
Things I've tried:
Passing the result of server.listen(...) (instead of the server instance) to supertest (as described here);
Using beforeEach for each test manually listening to a specific port;
This approach, which is similar to the first item.
HELP!
UPDATE:
Just realized that running npm run test "Ping.test.js" it succeeds and running npm run test "Session.test.js" (which is the new test) it fails. So there's probably something wrong with that single file.
I was also getting this error:
RangeError [ERR_SOCKET_BAD_PORT]: Port should be >= 0 and < 65536. Received 80201
Using supertest and Jest to test a NestJS application. I was also doing a
return request(app.getHttpServer())
.get(`route`)
Instead of
return request(app.getHttpServer())
.get(`/route`)
OH MY GOD!
I found the issue and, prepare yourself, the solution is ridiculous.
The request path in my test had a typo.
I was doing this:
const response = await post(`${userId}/create`); // with userId as 1
The path was missing an initial /, that's it haha.
THAT'S why supertest was appending a 1 to the server port and the RangeError was raised.
I'm hating myself right now.

I can't find a way to get data from gremlin-server, using node/express - promise {<pending>} forever

Before you read on too far, this question might be about Node/Express or it might be about Gremlin - I can't work out which is the cause?
On my Linux box as SU, I've started Janusgraph:
docker run --name janusgraph-default --volume /home/greg/Downloads/gremlin:/dataImports janusgraph/janusgraph:latest (I mount a local folder, to pick up the air-routes.graphml file)
On the same box, in a new terminal I start gremlin-server: docker run --rm --link janusgraph-default:janusgraph -e GREMLIN_REMOTE_HOSTS=janusgraph -it janusgraph/janusgraph:latest ./bin/gremlin.sh (which gives me a gremlin console)
From the gremlin console, I can run :remote connect tinkerpop.server conf/remote.yaml
Then :> graph.io(graphml()).readGraph("/dataImports/air-routes.graphml")
Finally: :> g.V().has('airport','code','DFW').values(), which returns the details for Dallas Fort Worth, as expected.
Having demonstrated that I've loaded the data successfully and can query it from the console, I want to create a Node/Express program that will ultimately run some generally static queries to build a series of dashboard pages.
I have cobbled together my first Express app:
const express = require('express');
const app = express();
const router = express.Router();
const gremlin = require('gremlin');
const traversal = gremlin.process.AnonymousTraversalSource.traversal;
const __ = gremlin.process.statics;
const DriverRemoteConnection = gremlin.driver.DriverRemoteConnection;
const column = gremlin.process.column;
const direction = gremlin.process.direction;
const p = gremlin.process.P;
const textp = gremlin.process.TextP;
const pick = gremlin.process.pick;
const pop = gremlin.process.pop;
const order = gremlin.process.order;
const scope = gremlin.process.scope;
const t = gremlin.process.t;
// const tq = traversal().withRemote(new DriverRemoteConnection('ws://localhost:8182/gremlin'));
const tq = traversal().withRemote(new DriverRemoteConnection('ws://localhost:8182'));
const g = tq;
router.get('/home', (req,res) => {
res.send('<h1>Hello World,</h1><p>This is home router</p>');
});
// my first pass
// router.get('/query', async (req,res) => {
// const response = await gremlinQuery();
// //const response = gremlinQuery();
// const resp = res.send(response);
// });
//my second pass
router.get('/query', (req,res) => {
const response = gremlinQuery();
console.log(response);
const resp = res.send(response);
});
//<snipped out other router.gets>
app.use('/', router);
app.listen(process.env.port || 3000);
console.log('Web Server is listening at port '+ (process.env.port || 3000));
async function gremlinQuery() {
let results;
//function gremlinQuery() {
try{
const results = await g.V().has('airport', 'code', 'LHR').values().toList();
console.log(results);
//JsonConvert.DeserializeObject<results>(JsonConvert.SerializeObject(results2));
var arr = JSON.parse(results);
console.log('output: ' + arr);
return('output: ' + arr);
} catch (error) {
console.error(error);
}
}
The query in the code returns the expected result in gremlin-console but in this code (which I run in debug mode in VSCode) it launches the expected webpage on localhost:3000 but without the expected JSON response data and on the debug terminal it stops at Promise {<pending>} and I can't work out whether I'm
a) not connecting to the gremlin-server?
b) have made a mess of my async/await (is it OK that it's on the function called by the router function?)
c) Something else
(got to say the 'net isn't awash with Node/Express/Gremlin/Janusgraph examples to draw on.)
Docker version 19.03.11, build 42e35e61f352
Linux xxxxxxxx 5.3.18-lp152.60-default #1 SMP Tue Jan 12 23:10:31 UTC 2021 (9898712) x86_64
x86_64 x86_64 GNU/Linux
gremlin> Gremlin.version() ==>3.4.6
node --version v14.15.4
express ??
gremlinQuery is a async function, then you have wait until the function resolve a result.
And gremlinQuery look like an unused function (???)
How about
async function gremlinQuery() {
const results = await g.V().has('airport', 'code', 'LHR').values().toList();
// return JSON.parse(results); // results is a string???
return results;
}
express router
router.get('/query', async (req, res) => { // async handle
try {
const response = await gremlinQuery(); // wait until the query response
// res.send(response);
res.json(response); // send it to client as a json object
} catch (error) {
res.status(500).send(error); // Notice to client - Some thing went wrong
}
});
I'm not sure why but there is a name is effect to create client variable because of the same problem m facing but when using the same constant name as per the document then it's work
Doc variable sample
const authenticator = new Gremlin.driver.auth.PlainTextSaslAuthenticator(
`/dbs/${GREMLIN_DATABASE}/colls/${GREMLIN_GRAPH}`,
GREMLIN_PRIMARY_KEY
);
const client = new Gremlin.driver.Client(GREMLIN_ENDPOINT, {
authenticator,
traversalsource: "g",
rejectUnauthorized: true,
mimeType: "application/vnd.gremlin-v2.0+json",
});

Why the object is not passed within the callback of the route handler of express.Router(), being exported from different module?

Entrypoint of my server is publisher.js. Its code is the following:
'use strict';
const publishingRoute = require('./routes/enqueue');
. . . .
. . . .
const PublishingServicePort = process.env.PUB_PORT || 4001;
const RabbitMQHostURL = process.env.RABBITMQ_AMQP_URL;
const RabbitMQQueueName = process.env.QUEUE;
const RabbitMQRoutingKey = process.env.ROUTING_KEY;
const RabbitMQExchange = process.env.EXCHANGE;
let rbmqChannel;
let rbmqConnection;
const app = express();
app.use(bodyParser.json());
//initConnection() creates the TCP connection and confirm channel with the Broker. No error here
(async function initConnection() {
rbmqConnection = await rbmqConnectionManager.connect(amqp, RabbitMQHostURL);
rbmqChannel = await rbmqConnectionManager.createConfirmChannel(rbmqConnection);
})();
//Error Here: None of the parameter is passed to the callback function
app.use('/enqueue', publishingRoute.enqueueUser(rbmqChannel, RabbitMQQueueName, RabbitMQRoutingKey, RabbitMQExchange));
app.listen(PublishingServerPort , async () => {
console.log(`Server listening on ${PublishingServerPort }`);
})
../routes/ contains modules, that handles the routes from the publisher.js. One of the routes's module is the enqueue.js. It enqueues JabberID of the users (JSON) into the RabbitMQ queue, through the Exchange. The following code are of enqueue.js module.
'use strict';
const router = require('express').Router();
const PublisherService = require('../services/rbmq-publisher');
export function enqueueUser(amqpChannel, queueName, routingKey, exchange) {
//Error: No parameter has reached here. amqpChannel, queueName, routingKey, and exchange are undefined. I need all of them here.
router.post('/', async (req, res) => {
const content = req.body.payload;
await PublisherService.PublishPayload(amqpChannel, exchange, routingKey, content);
res.status(200);
res.send({
"Message": "OK",
"payloadEnqueued": content
});
});
return router;
}
rbmq-publisher.js is the controller (module) for handling publishing/enqueuing into the RabbitMQ Exchange. The following code is of the rbmq-publisher.js
'use strict';
const PublishPayload = async function(channel, exchange, routingKey, content) {
//Error: None of the parameter has reached here. All of them are undefined.
console.info(`[AMQP] : Publishing Message ...`);
try {
await channel.publish(exchange, routingKey, Buffer.from(JSON.stringify(content)));
console.log(`[AMQP SUCCESS]: JabberID Published`);
}
catch(exception) {
//This is what is being shown in the console window. I have written the error below this code snippet
console.error(`[AMQP ERROR]: ${exception}`);
}
};
export {
PublishPayload
};
Console windows shows the following error:
[AMQP ERROR]: TypeError: Cannot read property 'publish' of undefined
None of the arguments from the app.use() has reached the callback function. Where did I go wrong?

How to correct a 404 error when I fetch Data from DataBase?

I want to fetch data from SQL server using Redis-Caching , but when I send the request
(GET http://localhost:4000/product/Data) => I see this error:
GET /product/Data 404 7.390 ms - 33
GET /product/Data - - ms - -
, How can I solve it ?
server.js :
const redis = require("redis");
const redisUrl='redis://127.0.0.1:6379';
// create Redis client on Redis port
const client = redis.createClient(redisUrl);
// create express application instance
const Data=async(req,res,next)=>{
cache,getData
}
//Make request to DataBase for Data
async function getData(req,res,next){
try{
console.log('Fetching Data From SQL server...')
const Pull_Products=await
SingletonSqlHandler.instance.sendQuery('SELECT * from
dbo.getAllProducts()')
// return Pull_Products;
return res.status(200).send.json(Pull_Products)
}
catch(error){
console.error(error);
res.status(500).json({error:error})
}
}
// Cache midleware
async function cache(req,res,next){
console.log('Fetching Data From Cache...')
const Pull_Products=await
SingletonSqlHandler.instance.sendQuery('SELECT * from
dbo.getAllProducts()')
client.get(Pull_Products,(error,cachedData)=>{
if(error) throw error
if(cachedData!=null){
res.send(setResponse(Pull_Products,cachedData));
}
else {
next();
}
})
}
module.exports={
Data:Data
}
productRoute.js:
const router = require('express').Router();
const Redis=require('../redis-cache/server')
router.get('/Data', Redis.Data);
module.exports = router;

How to solve listen EADDRINUSE: address already in use in integration tests

I am pretty new to Nodejs and i am learning Nodejs course on udemy, I am facing some trouble of listen EADDRINUSE: address already in use :::4000 while re-running integration tests multiple time. The first time its successful but afterward I am getting the above-mentioned error on the following line
const server = app.listen(port, () => {winston.info(`Listening on port ${port}`)});
I am pasting my index.js and two test files, if some can point me out it will be a great help for me.
Index.js
const Joi = require("#hapi/joi");
Joi.objectId = require("joi-objectid")(Joi);
const winston = require("winston");
const express = require("express");
const app = express();
require("./startup/logging")();
require("./startup/config")();
require("./startup/dbconnectivity")();
require("./startup/routes")(app);
const port = process.env.port || 4000;
const server = app.listen(port, () => {winston.info(`Listening on port ${port}`)});
// exporting server object to be used in integration tests.
module.exports = server;
**Integration test file for Genre**
const request = require("supertest");
let server;
const {Genere} = require("../../models/genere");
const {User} = require("../../models/user");
describe("/api/genere", () => {
beforeEach(() => {
console.log("Before each Genre");
server = require("../../index");
});
afterEach(async () => {
console.log("After each Genre");
await Genere.deleteMany({});
await server.close();
});
describe("/GET", () => {
it("should return list of generes", async() => {
await Genere.insertMany([
{name: "genre1"},
{name: "genre2"},
{name: "genre3"}
]);
const res = await request(server).get("/api/geners");
expect(res.status).toBe(200);
console.log("response body is : " + res.body);
expect(res.body.length).toBe(3);
expect(res.body.map(g => g.name)).toContain("genre1");
});
});
describe("/GET/:id", () => {
it("should return genre with id", async() => {
const genre = new Genere({name: "genre1"});
await genre.save();
const res = await request(server).get("/api/geners/"+ genre.id);
expect(res.status).toBe(200);
expect(res.body.name).toBe("genre1");
});
it("should return error with invalid id", async() => {
const genre = new Genere({name: "genre1"});
await genre.save();
const res = await request(server).get("/api/geners/1");
expect(res.status).toBe(404);
expect(res.text).toMatch(/Invalid/);
});
});
describe("/POST", () => {
it("should return 401 if not authorized", async() => {
const genere = new Genere({name: "genere1"});
const res = await request(server).post("/api/geners").send(genere);
expect(res.status).toBe(401);
});
it("should return 400 if the name is less than 4 chars", async() => {
const res = await createRequestWithGenre({name: "g1"});
expect(res.status).toBe(400);
});
it("should return 400 if the name is greater than 25 chars", async() => {
const genreName = Array(26).fill("a").join("");
const res = await createRequestWithGenre({name: genreName})
expect(res.status).toBe(400);
});
it("should return 201 with gener object if proper object is sent", async() => {
const res = await createRequestWithGenre({name: "genre1"})
expect(res.status).toBe(201);
expect(res.body).toHaveProperty("_id");
expect(res.body).toHaveProperty("name", "genre1");
const genre = await Genere.find({ name: "genre1"});
expect(genre).not.toBe(null);
});
async function createRequestWithGenre(genre) {
const token = new User().generateAuthToken();
return await request(server)
.post("/api/geners")
.set("x-auth-token", token)
.send(genre);
}
});
});
As soon as i add another file for integration test like the one below i started to get the error which is mentioned after this file code.
const {User} = require("../../models/user");
const {Genere} = require("../../models/genere");
const request = require("supertest");
let token;
describe("middleware", () => {
beforeEach(() => {
console.log("Before each Middleware");
token = new User().generateAuthToken();
server = require("../../index");
});
afterEach(async () => {
console.log("After each Middleware");
await Genere.deleteMany({});
await server.close();
});
const exec = async() => {
return await request(server)
.post("/api/geners")
.set("x-auth-token", token)
.send({name: "gener1"});
}
it("should return 400 if invalid JWT token is sent", async() => {
token = "invalid_token";
const res = await exec();
expect(res.status).toBe(400);
expect(res.text).toBe("Invalid auth token");
});
});
Console Error
middleware
✕ should return 400 if invalid JWT token is sent (510ms)
● middleware › should return 400 if invalid JWT token is sent
listen EADDRINUSE: address already in use :::4000
10 | require("./startup/routes")(app);
11 | const port = process.env.port || 4000;
> 12 | const server = app.listen(port, () => {winston.info(`Listening on port ${port}`)});
| ^
13 | // exporting server object to be used in integration tests.
14 | module.exports = server;
at Function.listen (node_modules/express/lib/application.js:618:24)
at Object.<anonymous> (index.js:12:20)
at Object.beforeEach (tests/integration/middleware.test.js:11:22)
If someone can help me why it fails on the multiple runs then it will be really helpful for me to understand why do we need to open and close server object every time.
Supertest is able to manage the setup/teardown of an express/koa app itself if you can import an instance of app without calling .listen() on it.
This involves structuring the code a little differently so app becomes a module, separate to the server .listen()
// app.js module
const app = require('express')()
require("./startup/logging")()
...
module.exports = app
Then the entrypoint for running the server imports the app then sets up the server with .listen()
// server.js entrypoint
const app = require('./app')
const port = process.env.port || 4000;
app.listen(port, () => {winston.info(`Listening on port ${port}`)});
When supertest uses the imported app, it will start its own server and listen on a random unused port without clashes.
// test
const request = require('supertest')
const app = require('./app')
request(app).get('/whatever')
The supertest "server" instance can be reused for multiple tests too
// reuse test
const supertest = require('supertest')
const app = require('./app')
describe('steps', () => {
const request = supertest(app)
it('should step1', async() => {
return request.get('/step1')
})
it('should step2', async() => {
return request.get('/step2')
})
})
One solution is to run jest with max workers specified to 1 which can be configured in your package.json in the following way:
"scripts": {
"test": "NODE_ENV=test jest --forceExit --detectOpenHandles --watchAll --maxWorkers=1"
},
If I understand your setup correctly, you have multiple intergration-test files which Jest will try to run in parallel (this is the default-mode). The error you're getting makes sense, since for each suite a new server instance is created before each test, but the server might already have been started while executing a different suite.
As described in the offical documentation instead of beforeEach it would make sense to use globalSetup where you would init your server once before running all test suites and stop the server afterwards:
// setup.js
module.exports = async () => {
// ...
// Set reference to your node server in order to close it during teardown.
global.__MY_NODE_SERVER__ = require("../../index");
};
// teardown.js
module.exports = async function() {
await global.__MY_NODE_SERVER__.stop();
};
// in your jest-config you'd set the path to these files:
module.exports = {
globalSetup: "<rootDir>/setup.js",
globalTeardown: "<rootDir>/teardown.js",
};
Alternatively you could run your tests with the --runInBand option and beforeAll instead of beforeEach in order to make sure that only one server is created before each test, but I'd recommend the first option.

Resources