Unit testing with Bookshelf.js and knex.js - node.js

I'm relatively new to Node and am working on a project using knex and bookshelf. I'm having a little bit of trouble unit testing my code and I'm not sure what I'm doing wrong.
Basically I have a model (called VorcuProduct) that looks like this:
var VorcuProduct = bs.Model.extend({
tableName: 'vorcu_products'
});
module.exports.VorcuProduct = VorcuProduct
And a function that saves a VorcuProduct if it does not exist on the DB. Quite simple. The function doing this looks like this:
function subscribeToUpdates(productInformation, callback) {
model.VorcuProduct
.where({product_id: productInformation.product_id, store_id: productInformation.store_id})
.fetch()
.then(function(existing_model) {
if (existing_model == undefined) {
new model.VorcuProduct(productInformation)
.save()
.then(function(new_model) { callback(null, new_model)})
.catch(callback);
} else {
callback(null, existing_model)
}
})
}
Which is the correct way to test this without hitting the DB? Do I need to mock fetch to return a model or undefined (depending on the test) and then do the same with save? Should I use rewire for this?
As you can see I'm a little bit lost, so any help will be appreciated.
Thanks!

I have been using in-memory Sqlite3 databases for automated testing with great success. My tests take 10 to 15 minutes to run against MySQL, but only 30 seconds or so with an in-memory sqlite3 database. Use :memory: for your connection string to utilize this technique.
A note about unit tesing - This is not true unit testing, since we're still running a query against a database. This is technically integration testing, however it runs within a reasonable time period and if you have a query-heavy application (like mine) then this technique is going to prove more effective at catching bugs than unit testing anyway.
Gotchas - Knex/Bookshelf initializes the connection at the start of the application, which means that you keep the context between tests. I would recommend writing a schema create/destroy script so that you and build and destroy the tables for each test. Also, Sqlite3 is less sensitive about foreign key constraints than MySQL or PostgreSQL, so make sure you run your app against one of those every now and then to ensure that your constraints will work properly.

This is actually a great question which brings up both the value and limitations of unit testing.
In this particular case the non-stubbed logic is pretty simple -- just a simple if block, so it's arguable whether it's this is worth the unit testing effort, so the accepted answer is a good one and points out the value of small scale integration testing.
On the other hand the exercise of doing unit testing is still valuable in that it points out opportunities for code improvements. In general if the tests are too complicated, the underlying code can probably use some refactoring. In this case a doesProductExist function can likely be refactored out. Returning the promises from knex/bookshelf instead of converting to callbacks would also be a helpful simplification.
But for comparison here's my take on what true unit-testing of the existing code would look like:
var rewire = require('rewire');
var sinon = require('sinon');
var expect = require('chai').expect;
var Promise = require('bluebird');
var subscribeToUpdatesModule = rewire('./service/subscribe_to_updates_module');
var subscribeToUpdates = subscribeToUpdatesModule.__get__(subscribeToUpdates);
describe('subscribeToUpdates', function () {
before(function () {
var self = this;
this.sandbox = sinon.sandbox.create();
var VorcuProduct = subscribeToUpdatesModule.__get__('model').VorcuProduct;
this.saveStub = this.sandbox.stub(VorcuProduct.prototype, 'save');
this.saveStub.returns(this.saveResultPromise);
this.fetchStub = this.sandbox.stub()
this.fetchStub.returns(this.fetchResultPromise);
this.sandbox.stub(VorcuProduct, 'where', function () {
return { fetch: self.fetchStub };
})
});
afterEach(function () {
this.sandbox.restore();
});
it('calls save when fetch of existing_model succeeds', function (done) {
var self = this;
this.fetchResultPromise = Promise.resolve('valid result');
this.saveResultPromise = Promise.resolve('save result');
var callback = function (err, result) {
expect(err).to.be.null;
expect(self.saveStub).to.be.called;
expect(result).to.equal('save result');
done();
};
subscribeToUpdates({}, callback);
});
// ... more it(...) blocks
});

Related

Populating mongodb in one unit test intereferes with another unit test

I'm trying to run all of my unit tests asynchronously, but calling a function to populate the database with some dummy data interferes with the other unit tests that run at the same time and that make use of the same data.
collectionSeed.js file:
const {ObjectID} = require('mongodb');
import { CollectionModel } from "../../models/collection";
const collectionOneId = new ObjectID();
const collectionTwoId = new ObjectID();
const collections = [{
_id: collectionOneId
}, {
_id: collectionTwoId
}];
const populateCollections = (done) => {
CollectionModel.remove({}).then(() => {
var collectionOne = new CollectionModel(collections[0]);
collectionOne.save(() =>{
var collectionTwo = new CollectionModel(collections[1]);
collectionTwo.save(() => {
done();
});
});
});
};
unitTest1 file:
beforeEach(populateCollections);
it('Should run', (done) => {
//do something with collection[0]
})
unitTest2 file:
beforeEach(populateCollections);
it('Should run', (done) => {
//do something with collection[0]
})
I'm running unit tests that change, delete, and add data to the database, so using beforeEach is preferable to keep all of the data consistent, but the CollectionModel.remove({}) functions often run in between an it function from one file and a second it function inside the other unit test file, so one unit test is working fine, while the second it is trying to use data that doesn't exist.
Is there anyway to prevent the different unit test files from interfering with each other?
I recommend you create a database per test file, for example adding to the DB name the name of the file. So you just have to take care of tests not interfering inside the same file, but you can forget about tests in other files.
I think that managing fixtures is one the most troublesome parts of unit testing, so with this, creating and fixing unit tests is going to become smoother.
As a trade off, each test file will take more execution time; but in my opinion in most of the cases it is worth enough.
Ideally each test should be independent of the rest, but, in general, that would take way too much overhead, so I recommended the once per test file approach.

Bluebird, node-mysql, pooling, and disposing

Currently trying to implement a different approach to connecting to my database using promises and pooling. This is what I have as of the moment:
// databaseConnection.js
var configDB = require('./database.js');
var mysql = require('promise-mysql');
var pool = mysql.createPool(configDB.connectionData);
function getSqlConnection() {
return pool.getConnection(configDB.connectionData).disposer(function(connection) {
connection.release();
});
}
module.exports = getSqlConnection;
Then I use the query like this:
#sqlQuery.js
var Promise = require("bluebird");
var getSqlConnection = require('./databaseConnection')
Promise.using(getSqlConnection(), function(connection) {
return connection.query("SELECT * FROM EXAMPLE_TABLE").then(function(row) {
return process(rows);
}
}
I'm using this library which is just node-mysql wrapped with BlueBird promises. With that, I wanted to take advantage of BlueBird's disposing and using capability so I would only be connected to the DB when I needed to be.
Currently though I'm getting an error from Connection.js of mysql stating: cb is not a function. Based on this question I have somewhat of an idea of what I'm doing wrong but I'm not sure how I would go about using that with BlueBird's dispose/using paradigm. Thanks in advance for anyone that can help!
Huge lack of oversight on my part. The following line:
return pool.getConnection(configDB.connectionData).disposer...
should be:
return pool.getConnection().disposer...
Sorry about that. Still getting an error for connection.release not being a function which is strange but at least I can move forward with debugging that.

asynchronous version of JSON.stringify and JSON.parse

var assert = require('assert');
var parseJSON = require('json-parse-async');
var contact = new Object();
contact.firstname = "Jesper";
contact.surname = "Aaberg";
contact.phone = ["555-0100", "555-0120"];
var contact2 = new Object();
contact2.firstname = "JESPER";
contact2.surname = "AABERG";
contact2.phone = ["555-0100", "555-0120"];
contact.toJSON = function(key) {
var replacement = new Object();
for (var val in this) {
if (typeof(this[val]) === 'string')
replacement[val] = this[val].toUpperCase();
else
replacement[val] = this[val]
}
return replacement;
};
var jsonText = JSON.stringify(contact);
contact = JSON.parse(jsonText);
console.log(contact);
console.log(contact2);
assert.deepEqual(contact, contact2, 'these two objects are the same');
What are the asynchronous equivalent functions of JSON.parse, JSON.stringify and assert.deepEqual? I am trying to create a race condition and non-deterministic behavior within the following code but I have not been able lto find non-blocking, asynchronous equivalents of the functions mentioned above.
node.js does not have an actual asynchronous JSON parser built-in. If you want something that will actually do the parsing outside the main node.js Javascript thread, then you would have to find a third party module that parses the JSON outside of the Javascript thread (e.g. in a native code thread or in some other process). There are some modules in NPM that advertise themselves as asynchronous such as async-json-parser or async-json-parse or json-parse-async. You would have to verify that whichever implementation you were interested in was truly an asynchronous implementation (your Javascript continues to run while the parsing happens in the background).
But, reading the detail in your question about the problem you're trying to solve, it doesn't sound like you actually need a parser that truly happens in the background. To give you your ability to test what you're trying to test, it seems to me like you just need an indeterminate finish that allows other code to run before the parsing finishes. That can be done by wrapping the synchronous JSON.parse() in a setTimeout() with a promise that has a random delay. That will give some random amount of time for other code to run (to try to test for your race conditions). That could be done like this:
JSON.parseAsyncRandom = function(str) {
return new Promise(function(resolve, reject) {
// use a random 0-10 second delay
setTimeout(function() {
try {
resolve(JSON.parse(str));
} catch(e) {
reject(e);
}
}, Math.floor(Math.random() * 10000));
});
}
JSON.parseAsyncRandom(str).then(function(obj) {
// process obj here
}, function(err) {
// handle err here
});
Note: This is not true asynchronous execution. It's an asynchronous result (in that it arrives some random time later and other code will run before the result arrives), but true asynchronous execution happens in the background in parallel with other JS running and this isn't quite that. But, given your comment that you just want variable and asynchronous results for testing purposes, this should do that.
I've recently faced this problem myself, so I decided to create a library to handle JSON parsing in a really asynchronous way.
The idea behind it is to divide the parsing process into chunks, and then run each separately in the event loop so that other events (user interactions, etc) can still be evaluated within a few milliseconds, keeping the UI interactive.
If you are interested, the library it's called RAJI and you can find it here: https://github.com/federico-terzi/raji
After installing RAJI, you can then convert your synchronous JSON.parse calls into async raji.parse calls, such as:
const object = await parse(payload);
These calls won't block the UI
You can use 'bluebird', like this example to convert calling function to promise.
I write code below using javascript es6.
const Promise = require('bluebird')
function stringifyPromise(jsonText) {
return Promise.try(() => JSON.stringify(jsonText))
}
function parsePromise(str) {
return Promise.try(() => JSON.parse(str))
}
stringifyPromise(contact)
.then(jsonText => parsePromise(jsonText))
.then(contact => {
assert.deepEqual(contact, contact2, 'these two objects are the same')
})
})

Best practices of db connection pool handling in a node js app?

I'm referring to node-postgres package below, but I guess this question is rather generic.
There is this trivial example where you 1) acquire (connect) a connection (client) from the pool in the top level http request handler, 2) do all business inside of that handler and 3) release it back to the pool after you're done.
I guess it works fine for that example, but as soon as your app becomes somewhat bigger this becomes painfull soon.
I'm thinking of these two options, but I'm not quite sure...
do the "get client + work + release client" approach everywhere I need to talk to db.
This seems like a good choice, but will it not lead to eating up more than one connection/client per the top http request (there are parallel async db calls in many places in my project)?
try to assign a globaly shared reference to one client/connection accessible via require()
Is this a good idea and actually reasonably doable? Is it possible to nicely handle the "back to the pool release" in all ugly cases (errors in parallel async stuff for example)?
Thank you.
Well, I lost some time trying to figure that out. At the end, after some consideration and influenced by John Papa's code I decided use a database module like this:
var Q = require('q');
var MongoClient = require('mongodb').MongoClient;
module.exports.getDb = getDb;
var db = null;
function getDb() {
return Q.promise(theDb);
function theDb(resolve, reject, notify) {
if (db) {
resolve(db);
} else {
MongoClient.connect(mongourl, mongoOptions, function(err, theDb) {
resolve(db);
}
});
}
}
}
So, when I need to perform a query:
getDb().then(function(db) {
//performe query here
});
At least for Mongodb this is good practice as seen here.
The best advise would depend on the type of database and the basic framework that represents the database.
In case of Postgres, the basic framework/driver is node-postgres, which has embedded support for connection pool. That support is however low-level.
For high-level access see pg-promise, which provides automatic connection management, support for tasks, transactions and much more.
Here is what has worked well for me.
var pg = require('pg');
var config = { pg : 'postgres://localhost/postgres' };
pg.connect(config.pg, function(err, client, done) {
client.query('SELECT version();', function (err, results) {
done();
//do something with results.rows
});
});

Using supertest and co to validate database content after request

I want to write a test to update a blog post (or whatever):
* Insert a blog post in a database
* Get the id the blog post got in MongoDb
* POST an updated version to my endpoint
* After the request have finished: check in the database that update has been done
Here's this, using koa:
var db = require('../lib/db.js');
describe('a test suite', function(){
it('updates an existing text', function (done) {
co(function * () {
var insertedPost = yield db.postCollection.insert({ title : "Title", content : "My awesome content"});
var id = insertedPost._id;
var url = "/post/" + id;
var updatedPost = { content : 'Awesomer content' };
request
.post(url)
.send(updatedTextData)
.expect(302)
.expect('location', url)
.end(function () {
co(function *() {
var p = yield db.postCollection.findById(id);
p.content.should.equal(updatedPost.content);
console.log("CHECKED DB");
})(done());
});
});
});
});
I realize that there's a lot of moving parts in there, but I've tested all the interactions separately. Here's the db-file I've included (which I know works fine since I use it in production):
var monk = require('monk');
var wrap = require('co-monk');
function getCollection(mongoUrl, collectionName) {
var db = monk(mongoUrl);
return wrap(db.get(collectionName));
};
module.exports.postCollection = getCollection([SECRET MONGO CONNECTION], 'posts');
The production code works as intended.
This test passes but it seems, to me, like the co-function in the .end()-clause never is run... but the done() call gets made. No "CHECKED DB" is being printed, at least.
I've tried with "done()" and "done" without. Sometimes that works and sometimes not.
I've tried to move the check of the database outside the request... but that just hangs, since supertest wants us to call done() when we are completed.
All of this leaves me confused and scared (:)) - what am I doing wrong here.
Realising that the question was very long-winding and specific I feared that I would never get a proper answer. Due to the badly asked question.
But the answer given and the comments made me look again and I found it. I wrote a long blog post about it but I'll give away the end of it here as a summary. If it doesn't make sense there's more of the same :) in the blog post.
Here is the TL;DR:
I wanted to check the state of the database after doing a request. This can be done using the .end() function of supertest.
Since I used co-monk I wanted to be able to do that using yield and generators. This means that I need to wrap my generator function with co.
co, since version 4.0.0, returns a promise. This perfect for users of mocha since it allows us to use the .then() function and pass the done variable to both the success and failure functions of .then(fn success, fn failure(err)).
The test in it’s entirety is displayed below. Running this returns the error due to failing assertion, as I want:
var co = require("co");
var should = require("should");
var helpers = require('./testHelpers.js');
var users = helpers.users;
var request = helpers.request;
describe('POST to /user', function(){
var test_user = {};
beforeEach(function (done) {
test_user = helpers.test_user;
helpers.removeAll(done);
});
afterEach(function (done) {
helpers.removeAll(done);
});
it('creates a new user for complete posted data', function(done){
// Post
request
.post('/user')
.send(test_user)
.expect('location', /^\/user\/[0-9a-fA-F]{24}$/) // Mongo Object Id /user/234234523562512512
.expect(201)
.end(function () {
co(function *() {
var userFromDb = yield users.findOne({ name : test_user.name });
userFromDb.name.should.equal("This is not the name you are looking for");
}).then(done, done);
});
});
});
This happens because
var p = yield db.postCollection.findById(id);
is the last line will be executed in your generator function.
You can test whether I am right by adding a console.log('before first yield').
yield is the replacement for return in generator functions, but it runs to the next yield if you call the function a second time.
A generator-function is executed from yield to yield
(best way to explain it the short way - I think).
Your solution:
simple erase the yield before the database find:
var p = db.postCollection.findById(id);

Resources