I'm building a logging module for my web app in nodejs. I'd like to be able to test using mocha that my module outputs the correct messages to the terminal. I have been looking around but haven't found any obvious solutions to check this. I have found
process.stdout.on('data', function (){})
but haven't been able to get this to work. does anybody have any advice?
process.stdout is never going to emit 'data' events because it's not a readable stream. You can read all about that in the node stream documentation, if you're curious.
As far as I know, the simplest way to hook or capture process.stdout or process.stderr is to replace process.stdout.write with a function that does what you want. Super hacky, I know, but in a testing scenario you can use before and after hooks to make sure it gets unhooked, so it's more or less harmless. Since it writes to the underlying stream anyway, it's not the end of the world if you don't unhook it anyway.
function captureStream(stream){
var oldWrite = stream.write;
var buf = '';
stream.write = function(chunk, encoding, callback){
buf += chunk.toString(); // chunk is a String or Buffer
oldWrite.apply(stream, arguments);
}
return {
unhook: function unhook(){
stream.write = oldWrite;
},
captured: function(){
return buf;
}
};
}
You can use it in mocha tests like this:
describe('console.log', function(){
var hook;
beforeEach(function(){
hook = captureStream(process.stdout);
});
afterEach(function(){
hook.unhook();
});
it('prints the argument', function(){
console.log('hi');
assert.equal(hook.captured(),'hi\n');
});
});
Here's a caveat: mocha reporters print to the standard output. They do not, as far as I know, do so while example (it('...',function(){})) functions are running, but you may run into trouble if your example functions are asynchronous. I'll see if I can find more out about this.
I've attempted jjm's answer and had problems which I suspect was due to my programs async behaviour.
I found a solution via a cli on github that uses the sinon library.
An example code to test:
/* jshint node:true */
module.exports = Test1;
function Test1(options) {
options = options || {};
}
Test1.prototype.executeSync = function() {
console.log("ABC");
console.log("123");
console.log("CBA");
console.log("321");
};
Test1.prototype.executeASync = function(time, callback) {
setTimeout(function() {
console.log("ABC");
console.log("123");
console.log("CBA");
console.log("321");
callback();
}, time);
};
And the mocha tests:
/* jshint node:true */
/* global describe:true, it:true, beforeEach:true, afterEach:true, expect:true */
var assert = require('chai').assert;
var expect = require('chai').expect;
var sinon = require("sinon");
var Test1 = require("../test");
var test1 = null;
describe("test1", function() {
beforeEach(function() {
sinon.stub(console, "log").returns(void 0);
sinon.stub(console, "error").returns(void 0);
test1 = new Test1();
});
afterEach(function() {
console.log.restore();
console.error.restore();
});
describe("executeSync", function() {
it("should output correctly", function() {
test1.executeSync();
assert.isTrue(console.log.called, "log should have been called.");
assert.equal(console.log.callCount, 4);
assert.isFalse(console.log.calledOnce);
expect(console.log.getCall(0).args[0]).to.equal("ABC");
expect(console.log.getCall(1).args[0]).to.equal("123");
expect(console.log.args[2][0]).to.equal("CBA");
expect(console.log.args[3][0]).to.equal("321");
});
});
describe("executeASync", function() {
it("should output correctly", function(done) {
test1.executeASync(100, function() {
assert.isTrue(console.log.called, "log should have been called.");
assert.equal(console.log.callCount, 4);
assert.isFalse(console.log.calledOnce);
expect(console.log.getCall(0).args[0]).to.equal("ABC");
expect(console.log.getCall(1).args[0]).to.equal("123");
expect(console.log.args[2][0]).to.equal("CBA");
expect(console.log.args[3][0]).to.equal("321");
done();
});
});
});
});
I'm providing the above as it demonstrates working with async calls, it deals with both console and error output and the method of inspection is of more use.
I should note that I've provided two methods of obtaining what was passed to the console, console.log.getCall(0).args[0] and console.log.args[0][0]. The first param is the line written to the console. Feel free to use what you think is appropriate.
Two other libraries that help with this are test-console and intercept-stdout I haven't used intercept-stdout, but here's how you can do it with test-console.
var myAsync = require('my-async');
var stdout = require('test-console').stdout;
describe('myAsync', function() {
it('outputs something', function(done) {
var inspect = stdout.inspect();
myAsync().then(function() {
inspect.restore();
assert.ok(inspect.output.length > 0);
done();
});
});
});
Note: You must use Mocha's async api. No calling done() will swallow mocha's test messaging.
Related
In my original function I need to make 2 requests to 2 different db's within the same couch login.
var cloudant = require('cloudant')(https://cloudant_url);
var userdb = cloudant.db.use('user');
var addrdb = cloudant.db.use('address');
function onChange(username) {
userdb.get(username, function(err,resp) {
var user_id = resp.id;
addrdb.get(user_id,function(err1,resp1){
var addr = resp1.address;
});
});
};
var nockVar = function() {
nock(testCloudantDBURL)
.get('/user/jack')
.reply(200,{'id' : 123});
nock(testCloudantDBURL)
.get('/address/123')
.reply(200,{'address':'123});
};
describe('Test Cloudant Listener code' , function() {
nockVar();
it('test get scenario', function() {
onChange('jack');
});
});
With this only the first call works and I can get the id : 123. The second call on address db is not getting intercepeted.
With nock I'm able to intercept only the first call,the second call is not happening.Any pointers ?
This happens because your code is executed asynchronously and your test doesn't wait for the userdb.get and addrdb.get to finish. Easiest (not best) way to handle this is to add a done callback to your test scenario and call it as soon as your onChange function is finished. Roughly something like:
function onChange(username, done) {
userdb.get(username, function(err,resp) {
var user_id = resp.id;
addrdb.get(user_id,function(err1,resp1){
var addr = resp1.address;
done();
});
};
};
it('test get scenario', function(done) {
onChange('jack', done);
});
You might also consider working with Promises based code.
My node.js code is:
function getPatientNotificationNumbers(patientId) {
patientId = patientId && patientId.toString();
var sql = "SELECT * FROM [notification_phone_number] ";
sql += "WHERE patient_id = " + escapeSql(patientId);
return sqlServer.query(sql).then(function(results) {
var phoneNumbers = _.map(results[0], function (result) {
var obj = {
id: result.id,
phoneNumber: result.phone_number,
isPrimary: result.is_primary,
isVerified: result.is_verified,
patientId: result.patient_id
}
return obj;
});
return phoneNumbers;
});
}
Pretty simple and straight forward. What I want to test is that the return of this function, properly resolved, is an array of phoneNumbers that match that format.
sqlServer is require'd above and I have a ton of things require'd in this file. To stub them out, I am using mockery, which seems to be pretty great.
Here is my test, so far:
before(function() {
deferred = Q.defer();
mockery.enable();
moduleConfig.init();
mockery.registerSubstitute('moment', moment);
mockery.registerAllowable('../../util');
mockStubs.sqlServer = {
query: sinon.stub().returns(deferred.promise)
}
mockery.registerMock('../../db/sqlserver', mockStubs.sqlServer);
methods = require('../../../rpc/patient/methods');
});
beforeEach(function() {
deferred = Q.defer();
})
it('should get the patient notification numbers', function(done) {
// sinon.spy(sqlServer, 'query').and.returnValue(deferred.promise);
deferred.resolve('here we go');
methods.getPatientNotificationNumbers(1).then(function(result) {
console.log(result);
done();
});
});
However, it never gets past sqlServer.query in my code. So the results are pointless. I also tried something like:
response = methods.getPatientNotificationNumbers(1)
but when I console.log(response), it's basically {state: 'pending'}, which I guess is an unresolved promise.
So I'm all over the place and I'm open to using whatever libraries make things easy. I am not married to mockery, sinon or whatever else. Any suggestions would help.
Thanks!
So another approach is to use a combination of rewire and deride.
var should = require('should-promised');
var rewire = require('rewire');
var Somefile = rewire('./path/to/file');
var deride = require('deride');
var sut, mockSql;
before(function() {
mockSql = deride.stub(['query']);
Somefile.__set__('sqlServer', mockSql);
sut = new Somefile();
});
describe('getting patient notification numbers', function() {
beforeEach(function() {
mockSql.setup.query.toResolveWith(fakeSqlResponse);
});
it('resolves the promise', function() {
sut.getPatientNotificationNumbers(id).should.be.fulfilledWith(expectedPhoneNumbers);
});
it('queries Sql', function(done) {
sut.getPatientNotificationNumbers(id).then(function() {
mockSql.expect.query.called.once();
done();
});
});
});
This will mean that you do not need to change your production code and you can easily start testing the un-happy paths using something like this:
it('handles Sql errors', function(done) {
mockSql.setup.query.toRejectWith(new Error('Booom'));
sut.getPatientNotificationNumbers(id)
.then(function(){
done('should not have resolved');
})
.catch(function(e) {
e.should.be.an.Error;
e.message.should.eql('Booom');
done();
});
});
Or even more succinctly:
it('handles Sql errors', function(done) {
mockSql.setup.query.toRejectWith(new Error('Booom'));
sut.getPatientNotificationNumbers(id).should.be.rejectedWith(/Booom/);
});
I would zoom out a little bit and think of the test strategy. Assume that the goal is to test your model layer (methods) on top of the sql server. It can be done without stubbing out sql server. Your test suite layer can have set of util methods to create, initialize and drop the db which can be called from before, beforeEach etc.
Pros of doing this:
Testing real product code path is better (than stubbed code path).
Stubbing is better if you suspect the underlying layer bugs to generate noise. sqlserver layer is likely stable.
model layer seem to be simple enough that does not require testing in isolation.
Stubbing would make sense if you are trying to test sqlserver failure handling in your model layer. Then the stub layer can fake such errors - to exercise error paths in model code.
This is based on limited view of your problem. If there is more to it. Pl do share and we can take it from there.
It seems to me your intent is to test the function your passing the the .then() method, not the sqlserver library nor the actual promise it returns. We can assume those have already been tested.
If this is the case, you can simply factor out that function (the one that extracts the phone numbers from the SQL result) and test it independently.
Your code would become something along the lines of:
var getNumbers = require('./getNumbers');
function getPatientNotificationNumbers(patientId) {
patientId = patientId && patientId.toString();
var sql = "SELECT * FROM [notification_phone_number] ";
sql += "WHERE patient_id = " + escapeSql(patientId);
return sqlServer.query(sql).then(getNumbers);
}
... and your ./getNumbers.js file would look something like:
function getNumbers(results) {
var phoneNumbers = _.map(results[0], function (result) {
var obj = {
id: result.id,
phoneNumber: result.phone_number,
isPrimary: result.is_primary,
isVerified: result.is_verified,
patientId: result.patient_id
}
return obj;
});
return phoneNumbers;
}
module.exports = getNumbers;
Now you can test it independently:
var getNumbers = require('../path/to/getNumbers');
...
it('should get the patient notification numbers', function(done) {
var sampleResults = [ ... ]; // sample results from sqlServer
var phoneNumbers = getNumbers(sampleResults);
assert(...); // make what ever assertions you want
});
Let's say I have the following function
'use strict';
var http = require('http');
var getLikes = function(graphId, callback) {
// request to get the # of likes
var req = http.get('http://graph.facebook.com/' + graphId, function(response) {
var str = '';
// while data is incoming, concatenate it
response.on('data', function (chunk) {
str += chunk;
});
// data is fully recieved, and now parsable
response.on('end', function () {
var likes = JSON.parse(str).likes;
var data = {
_id: 'likes',
value: likes
};
callback(null, data);
});
}).on('error', function(err) {
callback(err, null);
});
};
module.exports = getLikes;
I would like to test it with mocha AND sinon, but I don't get how to stub the http.get.
For now I'm doing a real http.get to facebook, but I would like to avoid it.
Here is my current test:
'use strict';
/*jshint expr: true*/
var should = require('chai').should(),
getLikes = require('getLikes');
describe('getLikes', function() {
it('shoud return likes', function(done) {
getLikes(function(err, likes) {
should.not.exist(err);
likes._id.should.equal('likes');
likes.value.should.exist();
done();
});
});
});
How can I achieve what I want, without relying on something else than sinon? (I don't want to use the request module to perform the get, or using another testing lib)
Thanks!
You should be able to do this with just sinon.stub(http, 'get').yields(fakeStream); but you might be better served by looking at nock and/or rewire. nock would let you fake the facebook response without mucking too much in the getLikes implementation details. rewire would let you swap in a mock http variable into the getLikes scope without monkey patching the http.get function globally.
Do do it with just sinon as above, you'll need to create a mock response that will properly resemble the stream. Something like:
var fakeLikes = {_id: 'likes', value: 'foo'};
var resumer = require('resumer');
var stream = resumer().queue(JSON.stringify(fakeLikes)).end()
I've been trying to find a reasonable way to test code that uses streams. Has anyone found a reasonable way/ framework to help testing code that uses streams in nodejs?
For example:
var fs = require('fs'),
request = require('request');
module.exports = function (url, path, callback) {
request(url)
.pipe(fs.createWriteStream(path))
.on('finish', function () {
callback();
});
};
My current way of testing this type of code either involves simplifying the code with streams so much that I can abstract it out to a non-tested chunk of code or by writing something like this:
var rewire = require('rewire'),
download = rewire('../lib/download'),
stream = require('stream'),
util = require('util');
describe('download', function () {
it('should download a url', function (done) {
var fakeRequest, fakeFs, FakeStream;
FakeStream = function () {
stream.Writable.call(this);
};
util.inherits(FakeStream, stream.Writable);
FakeStream.prototype._write = function (data, encoding, cb) {
expect(data.toString()).toEqual("hello world")
cb();
};
fakeRequest = function (url) {
var output = new stream.Readable();
output.push("hello world");
output.push(null);
expect(url).toEqual('http://hello');
return output;
};
fakeFs = {
createWriteStream: function (path) {
expect(path).toEqual('hello.txt');
return new FakeStream();
}
};
download.__set__('fs', fakeFs);
download.__set__('request', fakeRequest);
download('http://hello', 'hello.txt', function () {
done();
});
});
});
Has anyone come up with more elegant ways of testing streams?
Made streamtest for that purpose. It not only make streams tests cleaner but also allows to test V1 and V2 streams https://www.npmjs.com/package/streamtest
I've also been using memorystream, but then putting my assertions into the finish event. That way it looks more like a real use of the stream being tested:
require('chai').should();
var fs = require('fs');
var path = require('path');
var MemoryStream = require('memorystream');
var memStream = MemoryStream.createWriteStream();
/**
* This is the Transform that we want to test:
*/
var Parser = require('../lib/parser');
var parser = new Parser();
describe('Parser', function(){
it('something', function(done){
fs.createReadStream(path.join(__dirname, 'something.txt'))
.pipe(parser)
.pipe(memStream)
.on('finish', function() {
/**
* Check that our parser has created the right output:
*/
memStream
.toString()
.should.eql('something');
done();
});
});
});
Checking objects can be done like this:
var memStream = MemoryStream.createWriteStream(null, {objectMode: true});
.
.
.
.on('finish', function() {
memStream
.queue[0]
.should.eql({ some: 'thing' });
done();
});
.
.
.
Read the Stream into memory and compare it with the expected Buffer.
it('should output a valid Stream', (done) => {
const stream = getStreamToTest();
const expectedBuffer = Buffer.from(...);
let bytes = new Buffer('');
stream.on('data', (chunk) => {
bytes = Buffer.concat([bytes, chunk]);
});
stream.on('end', () => {
try {
expect(bytes).to.deep.equal(expectedBuffer);
done();
} catch (err) {
done(err);
}
});
});
I feel you pain.
I don't know any framework to help out testing with streams, but if take a look here,
where I'm developing a stream library, you can see how I approach this problem.
here is a idea of what I'm doing.
var chai = require("chai")
, sinon = require("sinon")
, chai.use(require("sinon-chai"))
, expect = chai.expect
, through2 = require('through2')
;
chai.config.showDiff = false
function spy (stream) {
var agent, fn
;
if (spy.free.length === 0) {
agent = sinon.spy();
} else {
agent = spy.free.pop();
agent.reset();
}
spy.used.push(agent);
fn = stream._transform;
stream.spy = agent;
stream._transform = function(c) {
agent(c);
return fn.apply(this, arguments);
};
stream._transform = transform;
return agent;
};
spy.free = [];
spy.used = [];
describe('basic through2 stream', function(){
beforeEach(function(){
this.streamA = through2()
this.StreamB = through2.obj()
// other kind of streams...
spy(this.streamA)
spy(this.StreamB)
})
afterEach(function(){
spy.used.map(function(agent){
spy.free.push(spy.used.pop())
})
})
it("must call transform with the data", function(){
var ctx = this
, dataA = new Buffer('some data')
, dataB = 'some data'
;
this.streamA.pipe(through2(function(chunk, enc, next){
expect(ctx.streamA.spy).to.have.been.calledOnce.and.calledWidth(dataA)
}))
this.streamB.pipe(through2(function(chunk, enc, next){
expect(ctx.streamB.spy).to.have.been.calledOnce.and.calledWidth(dataB)
}))
this.streamA.write(dataA)
this.streamB.write(dataB)
})
})
Note that my spy function wraps the _transform method and call my spy and call the original _transform
Also, The afterEach function is recycling the spies, because you can end up creating hundreds of them.
The problem gets hard is when you want to test async code. Then promises your best friend. The link I gave above have some sample that.
I haven't used this, and it's quite old, but https://github.com/dominictarr/stream-spec might help.
You can test streams using MemoryStream and sinon by using spies. Here is how I tested some of my code.
describe('some spec', function() {
it('some test', function(done) {
var outputStream = new MemoryStream();
var spyCB = sinon.spy();
outputStream.on('data', spyCB);
doSomething(param, param2, outputStream, function() {
sinon.assert.calledWith(spyCB, 'blah');
done();
});
});
});
Best way I have found is to use events
const byline = require('byline');
const fs = require('fs');
it('should process all lines in file', function(done){
//arrange
let lines = 0;
//file with 1000 lines
let reader = fs.readFileStream('./input.txt');
let writer = fs.writeFileStream('./output.txt');
//act
reader.pipe(byline).pipe(writer);
byline.on('line', function() {
lines++;
});
//assert
writer.on('close', function() {
expect(lines).to.equal(1000);
done();
});
});
by passing done as a callback, mocha waits until it is called before moving on.
I want to pass a stream as argument to a function and use it in an async callback but it is destroyed (stream.readable is false)
for example:
var test = require('./test');
var file = fs.createReadStream('./file.txt');
test(file, console.log);
and in test.js:
module.exports = function(stream, callback) {
//stream.pipe(process.stdout); ///////// STREAM IS READABLE HERE
doSomething('abc', function(err) {
stream.pipe(process.stdout); ///////// STREAM IS NOT READABLE HERE
callback(err);
});
};
enter code here
why is this happening ?
what can I do to use it in the callback ?
This happens because stream ends before you trying to pipe it. In the first place stream is readable because you synchronous code still working. In the second place (inside of callback) stream already ended because callback may be executed after several ticks in future. You need to pasue your streem if you want to read it in future. This code should work:
var test = require('./test');
var file = fs.createReadStream('./file.txt');
file.pause();
test(file, console.log);
test.js
module.exports = function(stream, callback) {
doSomething('abc', function(err) {
stream.resume();
stream.pipe(process.stdout);
callback(err);
});
};