Trying to get up to speed with node.js and nodeunit but am finding an issue with nodeunit where it's not seeing the call to test.done() in one of the tests.
The code:
// Added for clarity.
var client = require("restify").createJsonClient({
"version": "*",
"url": "http://localhost:" + server.Port
});
exports["tests"] = {
"setUp": function (callback) {
server.StartServer();
callback();
},
"tearDown": function (callback) {
callback();
},
"CanIHaveSomeTeaPlease?": function (test) {
test.expect(4);
client.get("/tea", function (err, req, res, data) {
test.equal(err.statusCode, 418, "Expected ImATeapot Error.");
test.equal(err.message, "Want a biscuit?", "Expected to be asked if I want a buscuit.");
test.equal(err.restCode, "ImATeapotError");
test.equal(err.name, "ImATeapotError");
test.done();
});
},
// Note: I expect this test to fail as it is a copy of the above
// test on a different url that doesn't return the ImATeapot
// HTTP error. But it doesn't look like it's detecting it
// properly.
"TakeThisInfo": function (test) {
test.expect(4);
client.put("/push", {
"hello": "world"
}, function (err, req, res, data) {
test.equal(err.statusCode, 418, "Expected ImATeapot Error.");
test.equal(err.message, "Want a biscuit?", "Expected to be asked if I want a buscuit.");
test.equal(err.restCode, "ImATeapotError");
test.equal(err.name, "ImATeapotError");
test.done();
});
}
};
Output:
FAILURES: Undone tests (or their setups/teardowns):
- tests - TakeThisInfo
To fix this, make sure all tests call test.done()
I'm hoping it is something stupid.
Versions:-
Node: 0.10.21
NPM: 1.3.11
Nodeunit: 0.8.2
Grunt-CLI: 0.1.10
Grunt: 0.4.1
First, I don't know what "server" is in your code, but I would expect it to be asynchronous, so to have something more like this in your setUp function:
function (callback) {
server.StartServer(function(){
callback();
});
}
Second, keep present that nodeunit executes the startUp and the tearDown functions after and before EVERY test so I suspect you are starting your server 2 times (as in the tearDown you are not really closing it).
I've spent the last couple of hours messing with this issue, and what has become clear is that nodeunit has no ability to catch and display exceptions thrown in functions which are triggered later by an IO or setTimeout type process. Considering the way JavaScript runs, this isn't surprising. Everything works once you are sure there are no exceptions, but if you have an error in your code, you will get "undone tests" message and nothing else. Here is what I did to resolve my issues (using a restify route as an example):
function myRoute(req, res, next) {
try {
// your code goes here...
}
catch (err) {
// write it to the console (for unit testing)
console.log(err);
// pass the error to the next function.
next(err);
}
}
Once I understood the problem in this way, fixing it because a lot more clear and I was able to get all of my tests to pass!
I suspect you're not actually calling test.done() in that second test. Put a console.log() call in there to verify you're actually making that call.
FWIW, I repro'd the described problem using a simplified version of your test, below. If you omit the on('error', function() {...}) handler, then the 2nd test fails to complete. Thus, my theory is that your /push endpoint is triggering a different behavior in the restify module. I.e. are you sure restify is invoking your callback with an err property there, or is it doing something different? ... like, for example, emitting an event like http.get does, below.
var http = require('http');
exports.test1 = function (test) {
test.expect(1);
http.get({hostname: "www.broofa.com", path: "/"}, function (res) {
test.equal(res.statusCode, 200, 'got 200');
test.done();
});
};
exports.test2 = function (test) {
test.expect(1);
http.get({hostname: "www.no-such-domain.com", path: "/"}, function (res) {
test.equal(res.statusCode, 200, 'got 200');
test.done();
}).on('error', function() {
// Comment line below out to repro the "Undone tests" error
test.done();
});
};
I'm working around it by forking the server into it's own process in the setup and then killing it in the teardown. Think the issue was to do with the server being created and not shutdown. Thanks #matteofigus for that.
var cp = null; // child process
exports["tests"] = {
"setUp": function (callback) {
cp = fork("./lib/server.js", {"silent": true});
callback();
},
"tearDown": function (callback) {
cp.kill("SIGHUP");
callback();
},
"CanIHaveSomeTeaPlease?": function (test) {
test.expect(4);
client.get("/tea", function (err, req, res, data) {
test.equal(err.statusCode, 418, "Expected ImATeapot Error.");
test.equal(err.message, "Want a biscuit?", "Expected to be asked if I want a buscuit.");
test.equal(err.restCode, "ImATeapotError");
test.equal(err.name, "ImATeapotError");
test.done();
});
},
"TakeThisInfo": function (test) {
test.expect(1);
client.put("/push", {
"hello": "world"
}, function (err, req, res, data) {
test.ok(false);
test.done();
});
}
};
Related
When running mocha tests using npm run test, is it possible to have the contents of the response body printed whenever a test fails with an error?
chai.request(server)
.post('/')
.set('X-Access-Token', testUser.accessToken)
.send(fields)
.end((error, response) => {
console.log(response.body); // log this!
response.should.have.status(201); // if this fails!
done();
});
});
In other words, could the afterEach function have access to error and response for each test?
afterEach(function(error, response) {
if (error) console.log('afterEach', response.body);
});
We have useful error messages coming down in the response, so we find ourselves pasting that console.log line into the failing test to debug. It'd be nice to always see the response.body on each error.
OP here - I came up with an answer and figured I'd leave it here until someone comes up with a better one.
The reason it's not ideal is that it requires a single line in each test, which updates a shared variable currentResponse with that test's response. But if your tests span many files, you can maintain a global variable in your setup script:
// you can use a global variable if tests span many files
let currentResponse = null;
afterEach(function() {
const errorBody = currentResponse && currentResponse.body;
if (this.currentTest.state === 'failed' && errorBody) {
console.log(errorBody);
}
currentResponse = null;
});
And then each of your tests would update the current response, so we can log it in the afterEach, in the event that it fails.
describe('POST /interests', () => {
it('400s if categoryName field is not present in the category', done => {
const fields = [
{ language: 'en' },
];
chai.request(server)
.post('/interests')
.set('X-Access-Token', testUser.accessToken)
.send(fields)
.end((error, response) => {
currentResponse = response; // update it here
response.should.have.status(400);
done();
});
});
And this will output the response whenever there's an error, so you can see what the server returned.
Given the following gulp tasks I'm able to successfully start the gulp, webpack and nodemon process, but the webpack tasks are open ended, so they will continue to fire the completion handler when their watch / compile cycle is complete.
The server task depends on the client task output, so I need these operations to be synchronous, hence the done
function onBuild(done) {
return function(err, stats) {
if(err) {
gutil.log('Error', err);
if(done) {
done();
}
} else {
Object.keys(stats.compilation.assets).forEach(function(key){
gutil.log('Webpack: output ', gutil.colors.green(key));
});
gutil.log('Webpack: ', gutil.colors.blue('finished ', stats.compilation.name));
if(done) {
done();
}
}
}
}
//dev watch
gulp.task('webpack-client-watch', function(done) {
webpack(devConfig[0]).watch(100, function(err, stats) {
onBuild(done)(err, stats);
});
});
gulp.task('webpack-server-watch', function(done) {
webpack(devConfig[1]).watch(100, function(err, stats) {
onBuild(done)(err, stats);
nodemon.restart();
});
});
gulp.task('webpack-watch',function(callback) {
runSequence(
'webpack-client-watch',
'webpack-server-watch',
callback
);
});
gulp.task('nodemon', ['webpack-watch'], function() {
nodemon({
script: path.join('server/dist/index.js'),
//ignore everything
ignore: ['*'],
watch: ['foo/'],
ext: 'noop'
}).on('restart', function() {
gutil.log(gutil.colors.cyan('Restarted'));
});
});
When I change a file, the watcher does its thing and gulp complains about the callback being called yet again.
[15:00:25] Error: task completion callback called too many times
I've looked at this, but not sure if its applicable.
Why might I be getting "task completion callback called too many times" in gulp?
Basically, I just want this to work synchronously and continuously without error.
gulp nodemon
This solved it for me: Just don't call the callback parameter in your webpack-watch task. Leave it out completely.
After that, the watcher works fine and fast without complaining.
If public folder exists in your application. Please remove and re-run, after you can see this issue resolved.
I have the following route (express) for which I'm writing an integration test.
Here's the code:
var q = require("q"),
request = require("request");
/*
Example of service wrapper that makes HTTP request.
*/
function getProducts() {
var deferred = q.defer();
request.get({uri : "http://localhost/some-service" }, function (e, r, body) {
deferred.resolve(JSON.parse(body));
});
return deferred.promise;
}
/*
The route
*/
exports.getProducts = function (request, response) {
getProducts()
.then(function (data) {
response.write(JSON.stringify(data));
response.end();
});
};
I want to test that all the components work together but with a fake HTTP response, so I am creating a stub for the request/http interactions.
I am using Chai, Sinon and Sinon-Chai and Mocha as the test runner.
Here's the test code:
var chai = require("chai"),
should = chai.should(),
sinon = require("sinon"),
sinonChai = require("sinon-chai"),
route = require("../routes"),
request = require("request");
chai.use(sinonChai);
describe("product service", function () {
before(function(done){
sinon
.stub(request, "get")
// change the text of product name to cause test failure.
.yields(null, null, JSON.stringify({ products: [{ name : "product name" }] }));
done();
});
after(function(done){
request.get.restore();
done();
});
it("should call product route and return expected resonse", function (done) {
var writeSpy = {},
response = {
write : function () {
writeSpy.should.have.been.calledWith("{\"products\":[{\"name\":\"product name\"}]}");
done();
}
};
writeSpy = sinon.spy(response, "write");
route.getProducts(null, response);
});
});
If the argument written to the response (response.write) matches the test passes ok. The issue is that when the test fails the failure message is:
"Error: timeout of 2000ms exceeded"
I've referenced this answer, however it doesn't resolve the problem.
How can I get this code to display the correct test name and the reason for failure?
NB A secondary question may be, could the way the response object is being asserted be improved upon?
The problem looks like an exception is getting swallowed somewhere. The first thing that comes to my mind is adding done at the end of your promise chain:
exports.getProducts = function (request, response) {
getProducts()
.then(function (data) {
response.write(JSON.stringify(data));
response.end();
})
.done(); /// <<< Add this!
};
It is typically the case when working with promises that you want to end your chain by calling a method like this. Some implementations call it done, some call it end.
How can I get this code to display the correct test name and the reason for failure?
If Mocha never sees the exception, there is nothing it can do to give you a nice error message. One way to diagnose a possible swallowed exception is to add a try... catch block around the offending code and dump something to the console.
In my node application I'm using mocha to test my code. While calling many asynchronous functions using mocha, I'm getting timeout error (Error: timeout of 2000ms exceeded.). How can I resolve this?
var module = require('../lib/myModule');
var should = require('chai').should();
describe('Testing Module', function() {
it('Save Data', function(done) {
this.timeout(15000);
var data = {
a: 'aa',
b: 'bb'
};
module.save(data, function(err, res) {
should.not.exist(err);
done();
});
});
it('Get Data By Id', function(done) {
var id = "28ca9";
module.get(id, function(err, res) {
console.log(res);
should.not.exist(err);
done();
});
});
});
You can either set the timeout when running your test:
mocha --timeout 15000
Or you can set the timeout for each suite or each test programmatically:
describe('...', function(){
this.timeout(15000);
it('...', function(done){
this.timeout(15000);
setTimeout(done, 15000);
});
});
For more info see the docs.
I find that the "solution" of just increasing the timeouts obscures what's really going on here, which is either
Your code and/or network calls are way too slow (should be sub 100 ms for a good user experience)
The assertions (tests) are failing and something is swallowing the errors before Mocha is able to act on them.
You usually encounter #2 when Mocha doesn't receive assertion errors from a callback. This is caused by some other code swallowing the exception further up the stack. The right way of dealing with this is to fix the code and not swallow the error.
When external code swallows your errors
In case it's a library function that you are unable to modify, you need to catch the assertion error and pass it onto Mocha yourself. You do this by wrapping your assertion callback in a try/catch block and pass any exceptions to the done handler.
it('should not fail', function (done) { // Pass reference here!
i_swallow_errors(function (err, result) {
try { // boilerplate to be able to get the assert failures
assert.ok(true);
assert.equal(result, 'bar');
done();
} catch (error) {
done(error);
}
});
});
This boilerplate can of course be extracted into some utility function to make the test a little more pleasing to the eye:
it('should not fail', function (done) { // Pass reference here!
i_swallow_errors(handleError(done, function (err, result) {
assert.equal(result, 'bar');
}));
});
// reusable boilerplate to be able to get the assert failures
function handleError(done, fn) {
try {
fn();
done();
} catch (error) {
done(error);
}
}
Speeding up network tests
Other than that I suggest you pick up the advice on starting to use test stubs for network calls to make tests pass without having to rely on a functioning network. Using Mocha, Chai and Sinon the tests might look something like this
describe('api tests normally involving network calls', function() {
beforeEach: function () {
this.xhr = sinon.useFakeXMLHttpRequest();
var requests = this.requests = [];
this.xhr.onCreate = function (xhr) {
requests.push(xhr);
};
},
afterEach: function () {
this.xhr.restore();
}
it("should fetch comments from server", function () {
var callback = sinon.spy();
myLib.getCommentsFor("/some/article", callback);
assertEquals(1, this.requests.length);
this.requests[0].respond(200, { "Content-Type": "application/json" },
'[{ "id": 12, "comment": "Hey there" }]');
expect(callback.calledWith([{ id: 12, comment: "Hey there" }])).to.be.true;
});
});
See Sinon's nise docs for more info.
If you are using arrow functions:
it('should do something', async () => {
// do your testing
}).timeout(15000)
A little late but someone can use this in future...You can increase your test timeout by updating scripts in your package.json with the following:
"scripts": {
"test": "test --timeout 10000" //Adjust to a value you need
}
Run your tests using the command test
For me the problem was actually the describe function,
which when provided an arrow function, causes mocha to miss the
timeout, and behave not consistently. (Using ES6)
since no promise was rejected I was getting this error all the time for different tests that were failing inside the describe block
so this how it looks when not working properly:
describe('test', () => {
assert(...)
})
and this works using the anonymous function
describe('test', function() {
assert(...)
})
Hope it helps someone, my configuration for the above:
(nodejs: 8.4.0, npm: 5.3.0, mocha: 3.3.0)
My issue was not sending the response back, so it was hanging. If you are using express make sure that res.send(data), res.json(data) or whatever the api method you wanna use is executed for the route you are testing.
Make sure to resolve/reject the promises used in the test cases, be it spies or stubs make sure they resolve/reject.
I am using node-mongodb-native driver. I tried
collection.findOne({email: 'a#mail.com'}, function(err, result) {
if (!result) throw new Error('Record not found!');
});
But the error is caught by mongodb driver and the express server is terminated.
What's the correct way for this case?
=== Edit===
I have the code below in app.js
app.configure('development', function() {
app.use(express.errorHandler({dumpExceptions: true, showStack: true}));
});
app.configure('production', function() {
app.use(express.errorHandler());
});
Related code in node_modules/mongodb/lib/mongodb/connection/server.js
connectionPool.on("message", function(message) {
try {
......
} catch (err) {
// Throw error in next tick
process.nextTick(function() {
throw err; // <-- here throws an uncaught error
})
}
});
The correct use is not to throw an error, but to pass it to next function. First you define the error handler:
app.error(function (err, req, res, next) {
res.render('error_page.jade');
})
(What's this talk about error being depracated? I don't know anything about that. But even if then you can just use use. The mechanism is still the same.).
Now in your route you pass the error to the handler like this:
function handler(req, res, next) {
collection.findOne({email: 'a#mail.com'}, function(err, result) {
if (!result) {
var myerr = new Error('Record not found!');
return next(myerr); // <---- pass it, not throw it
}
res.render('results.jade', { results: result });
});
};
Make sure that no other code (related to the response) is fired after next(myerr); (that's why I used return there).
Side note: Errors thrown in asynchronous operations are not handled well by Express (well, actually they somewhat are, but that's not what you need). This may crash your app. The only way to capture them is by using
process.on('uncaughtException', function(err) {
// handle it here, log or something
});
but this is a global exception handler, i.e. you cannot use it to send the response to the user.
I'm guessing that the error is not caught. Are you using an Express error handler? Something like:
app.error(function (err, req, res, next) {
res.render('error-page', {
status: 404
});
More on error handling in Express: http://expressjs.com/guide.html#error-handling
In terms of checking for errors off mongodb, use '!error' for success as opposed to '!result' for errors.
collection.findOne({email: 'a#mail.com'}, function(err, result) {
if (!error) {
// do good stuff;
} else {
throw new Error('Record not found!');
}
});
As for the custom 404, I've yet to do that in node and express, but I would imagine it would involve "app.router".