Dynamically Running Mocha Tests - node.js

I'm trying to run a series of tests dynamically. I have the following setup but it doesn't seem to run and I'm not getting any errors:
import Mocha from 'mocha';
const Test = Mocha.Test;
const Suite = Mocha.Suite;
const mocha = new Mocha();
for (let s in tests) {
let suite = Suite.create(mocha.suite, s);
tests[s].forEach((test) => {
console.log('add test', test.name)
suite.addTest(new Test(test.name), () => {
expect(1+1).to.equal(2);
});
});
}
mocha.run();
The tests I'm running look like this:
{ todo:
[ { name: 'POST /todos',
should: 'create a new todo',
method: 'POST',
endpoint: '/todos',
body: [Object] } ] }
(though at this point my test is just trying to check a basic expect)
Based on the console.logs the iteration seems fine and it appears to be adding the tests, so I'm confident in the flow of operations, I just can't get any execution or errors.

You have to pass the test function to the Test constructor, not to suite.addTest. So change your code to add your tests like this:
suite.addTest(new Test(test.name, () => {
expect(1+1).to.equal(2);
}));
Here is the entire code I'm running, adapted from your question:
import Mocha from 'mocha';
import { expect } from 'chai';
const Test = Mocha.Test;
const Suite = Mocha.Suite;
const mocha = new Mocha();
var tests = { todo:
[ { name: 'POST /todos',
should: 'create a new todo',
method: 'POST',
endpoint: '/todos',
body: [Object] } ] };
for (let s in tests) {
let suite = Suite.create(mocha.suite, s);
tests[s].forEach((test) => {
console.log('add test', test.name);
suite.addTest(new Test(test.name, () => {
expect(1+1).to.equal(2);
}));
});
}
mocha.run();
When I run the above with node_modules/.bin/babel-node test.es6, I get the output:
todo
✓ POST /todos
1 passing (5ms)

It's critical to test your test system and make sure it deals with passing and failing tests and thrown exceptions.
Since folks are counting on a build process to warn them about errors, you must also set the exit code to a non-zero if anything failed.
Below is a test script (which you must invoke with node test.js rather than mocha test.js) which exercises all paths through your test suite:
const Mocha = require('mocha')
const expect = require('chai').expect
var testRunner = new Mocha()
var testSuite = Mocha.Suite.create(testRunner.suite, 'Dynamic tests')
var tests = [ // Define some tasks to add to test suite.
{ name: 'POST /todos', f: () => true }, // Pass a test.
{ name: 'GET /nonos', f: () => false }, // Fail a test.
{ name: 'HEAD /hahas', f: () => { throw Error(0) } } // Throw an error.
]
tests.forEach(
test =>
// Create a test which value errors and caught exceptions.
testSuite.addTest(new Mocha.Test(test.name, function () {
expect(test.f()).to.be.true
}))
)
var suiteRun = testRunner.run() // Run the tests
process.on('exit', (code) => { // and set exit code.
process.exit(suiteRun.stats.failures > 0) // Non-zero exit indicates errors.
}) // Falling off end waits for Mocha events to finish.
Given that this is prominent in web searches for asynchronous mocha tests, I'll provide a couple more useful templates for folks to copy.
Embedded execution: The first directly adds tests which invoke an asynchronous faux-network call and check the result in a .then:
const Mocha = require('mocha')
const expect = require('chai').expect
var testRunner = new Mocha()
var testSuite = Mocha.Suite.create(testRunner.suite, 'Network tests')
var tests = [ // Define some long async tasks.
{ name: 'POST /todos', pass: true, wait: 3500, exception: null },
{ name: 'GET /nonos', pass: false, wait: 2500, exception: null },
{ name: 'HEAD /hahas', pass: true, wait: 1500, exception: 'no route to host' }
]
tests.forEach(
test =>
// Create a test which value errors and caught exceptions.
testSuite.addTest(new Mocha.Test(test.name, function () {
this.timeout(test.wait + 100) // so we can set waits above 2000ms
return asynchStuff(test).then(asyncResult => {
expect(asyncResult.pass).to.be.true
}) // No .catch() needed because Mocha.Test() handles them.
}))
)
var suiteRun = testRunner.run() // Run the tests
process.on('exit', (code) => { // and set exit code.
process.exit(suiteRun.stats.failures > 0) // Non-zero exit indicates errors.
}) // Falling off end waits for Mocha events to finish.
function asynchStuff (test) {
return new Promise(function(resolve, reject) {
setTimeout(() => {
// console.log(test.name + ' on ' + test.endpoint + ': ' + test.wait + 'ms')
if (test.exception)
reject(Error(test.exception))
resolve({name: test.name, pass: test.pass}) // only need name and pass
}, test.wait)
})
}
This code handles passing and failing data, reports exceptions, and exits with a non-zero status if there were errors. The output reports all expected problems and additionally whines about the test taking a like time (3.5s):
Network tests
✓ POST /todos (3504ms)
1) GET /nonos
2) HEAD /hahas
1 passing (8s)
2 failing
1) Network tests GET /nonos:
AssertionError: expected false to be true
+ expected - actual
-false
+true
2) Network tests HEAD /hahas:
Error: no route to host
Delayed execution: This approach invokes all of the slow tasks before populating and starting the the mocha test suite:
const Mocha = require('mocha')
const expect = require('chai').expect
var testRunner = new Mocha()
var testSuite = Mocha.Suite.create(testRunner.suite, 'Network tests')
var tests = [ // Define some long async tasks.
{ name: 'POST /todos', pass: true, wait: 3500, exception: null },
{ name: 'GET /nonos', pass: false, wait: 2500, exception: null },
{ name: 'HEAD /hahas', pass: true, wait: 1500, exception: 'no route to host' }
]
Promise.all(tests.map( // Wait for all async operations to finish.
test => asynchStuff(test)
.catch(e => { // Resolve caught errors so Promise.all() finishes.
return {name: test.name, caughtError: e}
})
)).then(testList => // When all are done,
testList.map( // for each result,
asyncResult => // test value errors and exceptions.
testSuite.addTest(new Mocha.Test(asyncResult.name, function () {
if (asyncResult.caughtError) { // Check test object for caught errors
throw asyncResult.caughtError
}
expect(asyncResult.pass).to.be.true
}))
)
).then(x => { // When all tests are created,
var suiteRun = testRunner.run() // run the tests
process.on('exit', (code) => { // and set exit code.
process.exit(suiteRun.stats.failures > 0) // Non-zero exit indicates errors.
})
})
function asynchStuff (test) {
return new Promise(function(resolve, reject) {
setTimeout(() => {
// console.log(test.name + ' on ' + test.endpoint + ': ' + test.wait + 'ms')
if (test.exception)
reject(Error(test.exception))
resolve({name: test.name, pass: test.pass}) // only need name and pass
}, test.wait)
})
}
The output is the same except that mocha doesn't whine about the slow test and instead believes the tests tool less than 10ms. The Promise.all waits for all the promises to resolve or reject then creates the tests to validate the results or report exceptions. This is a few lines longer than Embedded execution because it must:
Resolve exceptions so Promise.all() resolves.
Execute the tests in a final Promise.all().then()
Comments describing how folks pick which style to use could guide others. Share your wisdom!

Related

How to cover unit test of "if statement" in jest dependeing of external variable in nodejs

I have the following Javscript code:
const authenticationTypeMapping = (payload) => {
const { API_CONFIG } = process.env;
try {
const apiConfig = JSON.parse(API_CONFIG.toString('utf8'));
// set authenticationType to Federated for production
if (apiConfig.API_BASE_URL.includes('prd')) {
payload.authenticationTypeName = 'Federated';
// set authenticationType to Federated for dev or UAT
} else if (apiConfig.API_BASE_URL.includes('dev') || apiConfig.API_BASE_URL.includes('uat')) {
payload.authenticationTypeName = 'Basic';
}
} catch (err) {
console.log(`Failed to map authenticationType. Unable to parse Secret: ${err}`);
}
return payload;
};
I have problem to cover unit test using jesty for the code for the lines inside try block.
If statement depends on external variable "apiConfig.API_BASE_URL" of "process.env" which I don't how to represent to jest code.
it('should call authenticationTypeMapping', async () => {
const payload = mapper.authenticationTypeMapping(basicPayload);
expect(payload.authenticationTypeName).toEqual('Basic');
});
What should be added to cover the unit test?
You can set the Environment in the test and check for the same condition in the unit test as follows
it('should call authenticationTypeMapping', async () => {
process.env.API_BASE_URL = 'prd...'
expect(mapper.authenticationTypeMapping(basicPayload).authenticationTypeName).toEqual('Federated');
process.env.API_BASE_URL = 'dev...'
expect(mapper.authenticationTypeMapping(basicPayload).authenticationTypeName).toEqual('Basic');
});
Maybe you can have more than one unit tests to make things clear like one to test 'prd' and one to test 'dev'

Codeceptjs on Google Cloud Function goto of undefined

I'm trying to automate a web activity using CodeceptJS (with Puppeteer) running in a Google Cloud Function.
My index.js is:
const Container = require('codeceptjs').container;
const Codecept = require('codeceptjs').codecept;
const event = require('codeceptjs').event;
const path = require('path');
module.exports.basicTest = async (req, res) => {
let message = '';
// helpers config
let config = {
tests: './*_test.js',
output: './output',
helpers: {
Puppeteer: {
url: 'https://github.com', // base url
show: true,
disableScreenshots: true, // don't store screenshots on failure
windowSize: '1200x1000', // set window size dimensions
waitForAction: 1000, // increase timeout for clicking
waitForNavigation: [ 'domcontentloaded', 'networkidle0' ], // wait for document to load
chrome: {
args: ['--no-sandbox'] // IMPORTANT! Browser can't be run without this!
}
}
},
include: {
I: './steps_file.js'
},
bootstrap: null,
mocha: {},
name: 'basic_test',
// Once a tests are finished - send back result via HTTP
teardown: (done) => {
res.send(`Finished\n${message}`);
}
};
// pass more verbose output
let opts = {
debug: true,
steps: true
};
// a simple reporter, let's collect all passed and failed tests
event.dispatcher.on(event.test.passed, (test) => {
message += `- Test "${test.title}" passed 😎`;
});
event.dispatcher.on(event.test.failed, (test) => {
message += `- Test "${test.title}" failed 😭`;
});
// create runner
let codecept = new Codecept(config, opts);
// codecept.init(testRoot)
codecept.initGlobals(__dirname);
// create helpers, support files, mocha
Container.create(config, opts);
try {
// initialize listeners
codecept.bootstrap();
// load tests
codecept.loadTests('*_test.js');
// run tests
codecept.run();
} catch (err) {
printError(err)
process.exitCode = 1
} finally {
await codecept.teardown()
}
}
and my simple test is:
Feature('Basic Automation');
Scenario('Basic Test', async ({ I }) => {
// ===== Login =====
pause()
I.amOnPage('https://cotps.com');
I.see('Built for developers', 'h1');
});
If I run it using npx codeceptjs run --steps it works but if I run it using node -e 'require("./index").basicTest() I get the error:
Cannot read property 'goto' of undefined. I also get the error if I deploy it to GCP and run it. I've looked through the docs for both Codecept and Puppeteer but found nothing and the only examples online are for previous versions of the libraries.

Fail jest test suite if console error count increases

We want to fail the build if more console errors are introduced. For example, let's say console.error was called 30 times in the whole test suite. If another error is introduced this will increase to 31, which we don't want. Is there a way to prevent this?
For one test suite it is possible with:
const spy = jest.spyOn(console, "error");
let count = 0;
afterEach(() => {
count += spy.mock.calls.length;
});
afterAll(() => {
if (count > 2) {
throw Error(`oops error count: ${count}`);
}
});
but it would be nice to have this globally defined.
We solved this in a slightly different way:
// src/utils/testUtils
let consoleErrorSpy;
export const spyOnConsoleError = () => {
consoleErrorSpy = jest.spyOn(console, "error");
};
/**
* We are using this to prevent the console errors from increasing.
* These are our preferences in order of priority:
* 1. Don't call this method
* 2. Call this method at the end of a specific test (eg. for an error that can't be solved)
* 3. Call this method in `afterEach` (eg. for an async error that can't be solved)
*/
export const catchConsoleErrors = ({ silenced = [] } = {}) => {
const alwaysSilencedErrors = [
'<bug from a 3rd party library>'
];
const forbiddenCalls = [];
const silencedCalls = [];
for (const call of consoleErrorSpy.mock.calls) {
if (
new RegExp([...alwaysSilencedErrors, ...silenced].join("|")).test(call)
) {
silencedCalls.push(call);
} else {
forbiddenCalls.push(call);
}
}
for (const silencedCall of silencedCalls) {
// eslint-disable-next-line no-console
console.log("SILENCED\n---\n" + silencedCall.join(",") + "\n---");
}
expect(forbiddenCalls).toHaveLength(0);
// We clear the mock here so nothing happens if the method is called again for the same test,
// which is the case when this method is called in a specific test (file)
// as it is also called in `afterEach` in setUpTests.js
consoleErrorSpy.mockClear();
};
// some test file
afterEach(() => {
catchConsoleErrors({
silenced: [
"Warning: Can't perform a React state update on an unmounted component.*"
]
});
});
// src/setupTests.js
spyOnConsoleError();
afterEach(() => {
catchConsoleErrors();
});

Jest-Puppeteer test randomly fails without output

I have the following testcase:
it('User can logout', async () => {
await helper.navClick('.header-user-action__logout');
const url = page.url();
console.log(url)
await expect(url).toContain('auth');
});
helper.navClick is just a small wrapper:
async function navClick(selector, options) {
return await Promise.all([page.waitForNavigation(), expect(page).toClick(selector, options)]);
}
Most of the time, it succeeds without any problem, but sometimes it'll be marked as failed:
✕ User can logout (569 ms)
● Login › User can logout
console.log
https://auth.example.com/auth/realms/example/protocol/openid-connect/auth?response_type=code&client_id=example-app&redirect_uri=https%3A%2F%2Fexample.com%2Fsso%2Flogin&state=807469fd-3ee5-4d93-8354-ca47a63e69a6&login=true&scope=openid
How can this happen? The url contains "auth" multiple times, and I don't see anything else that could cause the test to fail.
I found the issue by setting up a custom environment:
const PuppeteerEnvironment = require("jest-environment-puppeteer");
const util = require('util');
class DebugEnv extends PuppeteerEnvironment {
async handleTestEvent(event, state) {
const ignoredEvents = ['setup', 'add_hook', 'start_describe_definition', 'add_test', 'finish_describe_definition', 'run_start',
'run_describe_start', 'test_start', 'hook_start', 'hook_success', 'test_fn_start', 'test_fn_success', 'test_done',
'run_describe_finish', 'run_finish', 'teardown'];
if (!ignoredEvents.includes(event.name)) {
console.log(new Date().toString() + " Unhandled event(" + event.name + "): " + util.inspect(event));
}
}
}
module.exports = DebugEnv;
In my package.json, I set the testEnvironment to this DebugEnv:
"jest": {
"preset": "jest-puppeteer",
"testEnvironment": "./debugenv.js",
By doing this, I found an error which had nothing to with the test itself (network related if I recall correctly).

hapijs testing with server.inject error catched

i am trying to test hapijs with server.inject
/// <reference path="../../typings/index.d.ts" />
import * as chai from "chai";
let assert = chai.assert;
import server from "../../src/server";
import UserController from '../../src/controllers/userController';
import UserRepository from '../../src/libs/repository/mongo/userRepository';
import {IUser, IUserActivation, IUserCreate} from "../../src/libs/repository/interfaces";
describe("routes/user", function() {
const userController = new UserController(server, new UserRepository());
// ========================== [ ACTIVATE ] ==========================
it.only("/activate: should activate a user", function(done) {
let user: IUserActivation = {
'_id': '1234566737465',
'token': '123234523542345'
};
let url = '/api/users/' + user._id + '/' + user.token;
const request = {
method: 'PUT',
url: url,
payload: user
};
server.inject(request).then((response) => {
let res = JSON.parse(response.payload);
//assert.strictEqual(res.success, true, '/users/{id}/{token}')
chai.expect(res.success).to.deep.equal(false);
chai.expect(res.success).to.deep.equal(true);
done();
}).catch((error) => {
console.log(error.message);
});
});
});
The response.success attribute is true. So normally the test should fail because of chai.expect(res.success).to.deep.equal(false);.
But the test fails with the message: Error: timeout of 2000ms exceeded. Ensure the done() callback is being called in this test.
When removing the catch clause it also fails with the timeout error.
If I add done() to the end of the catch-clause the test is passing. That is wrong behavior, because the test should fail.
What can i do to get the expected behavior? thanks in advance.
The problem would be that server.inject returns a promise. As long as your using a recent version of Mocha, it can handle that but you don't need to worry about calling done() and rather return the server.inject.
describe("routes/user", function() {
const userController = new UserController(server, new UserRepository());
// ========================== [ ACTIVATE ] ==========================
it.only("/activate: should activate a user", function() { //No done needed
let user: IUserActivation = {
'_id': '1234566737465',
'token': '123234523542345'
};
let url = '/api/users/' + user._id + '/' + user.token;
const request = {
method: 'PUT',
url: url,
payload: user
};
//Return the promise
return server.inject(request).then((response) => {
let res = JSON.parse(response.payload);
//assert.strictEqual(res.success, true, '/users/{id}/{token}')
chai.expect(res.success).to.deep.equal(false);
chai.expect(res.success).to.deep.equal(true);
}); //No catch needed.
});
});

Resources