I created a simple Webapp using express.js and want to test it with jasmine-node. Works fine so far but my problem is that I have to start the server manually every time before I can run my tests.
Could you help me on how to write a spec-helper that runs the server (with another port then my development one) just for the tests and then kills it afterwards?
This is what I do:
I have a server.js file inside the root of my node project that sets up the node application server (with express) and exports 2 methods:
exports.start = function( config, readyCallback ) {
if(!this.server) {
this.server = app.listen( config.port, function() {
console.log('Server running on port %d in %s mode', config.port, app.settings.env);
// callback to call when the server is ready
if(readyCallback) {
readyCallback();
}
});
}
};
exports.close = function() {
this.server.close();
};
The app.js file will be simple at this point:
var server = require('./server');
server.start( { port: 8000 } );
So the files/folder basic structure would be the following:
src
app.js
server.js
Having this separation will allow you to run the server normally:
node src/app.js
..and/or require it from a custom node script, which could be a node script (or a jake/grunt/whatever task) that executes your tests like this:
/** my-test-task.js */
// util that spawns a child process
var spawn = require('child_process').spawn;
// reference to our node application server
var server = require('./path/to/server.js');
// starts the server
server.start( { port: 8000 }, function() {
// on server ready launch the jasmine-node process with your test file
var jasmineNode = spawn('jasmine-node', [ '.path/to/test/file.js' ]);
// logs process stdout/stderr to the console
function logToConsole(data) {
console.log(String(data));
}
jasmineNode.stdout.on('data', logToConsole);
jasmineNode.stderr.on('data', logToConsole);
jasmineNode.on('exit', function(exitCode) {
// when jasmine-node is done, shuts down the application server
server.close();
}
});
I use Mocha - which is damn similar - but the same principle should apply: you could try requireing your app.js file in a 'beforeEach' hook inside the main describe. That should fire it up for you.
Assuming you use some code that invokes app.listen() in server.js, don't require the file on each run but only once and then have two functions like
startServer = -> app.listen(3000)
stopServer = -> app.close()
Then you can use these in beforeEach and afterEach
If you want then to go one step further in automating your testing while you develop, you can go to your terminal line and execute
jasmine-node . --autotest
Jasmine then will stay listening to every file inside your project and whenever you make changes to one it will tell if that piece of your code breaks any of your tests ;)
Related
I have one node application which have a file called server.js where I run
node server.js env=feature port=8081 to start the node server.
I installed jest and supertest. After writing one async get call in sample.test.js file I am hitting npm test, But the result is not as expected. Because to start server.js I will pass two parameters called env and port as mentioned above. My question is, How to use it in Jest. My server importing code is present below
const app = require('../server.js')
const request = supertest(app)
describe('Sample Test', () => {
const response = await request.get("/rest/api/v1/mytools/getAvailableTools?provisioning=all");
console.log('James bob ' + JSON.stringify(response))
expect(response.statusCode).toEqual(401);
done()
})
There are a couple of ways how you can approach this.
First, it's a common practice to pass such values using environment variables, you would have to update code processing those values but at the end, you can do the following:
ENV=feature PORT=8081 node server.js
ENV=feature PORT=8081 npm test
The second option would be to decouple command line parameters from actual server and test them separately, e.g.
export const createServer(env, port) {
// Server code
}
// Command line processing code
createServer(cliEnv, cliPort)
So in tests you can write:
const createServer = require('../server.js')
const request = supertest(createServer('feature', 8081))
describe('Sample Test', () => {
const response = await request.get("/rest/api/v1/mytools/getAvailableTools?provisioning=all");
console.log('James bob ' + JSON.stringify(response))
expect(response.statusCode).toEqual(401);
done()
})
Until today in all my node\express projects, I have created the HTTP server in some file
var server = http.createServer(app);
http.globalAgent.maxSockets = maxSockets;
server.listen(port, function() {
logger.info('App starting on : ' + port );
});
and called that file directly to start the app. Recently i am noticing some boilerplates, using the approach of calling a starter file and based on arguments, spawn a child process, be it building the app or starting the app
in package json
"start": "babel-node tools/run.js"
in run .js
// Launch `node build/server.js` on a background thread
function spawnServer() {
return cp.spawn(
'node',
[
// Pre-load application dependencies to improve "hot reload" restart time
...Object.keys(pkg.dependencies).reduce(
(requires, val) => requires.concat(['--require', val]),
[],
),
// If the parent Node.js process is running in debug (inspect) mode,
// launch a debugger for Express.js app on the next port
...process.execArgv.map(arg => {
if (arg.startsWith('--inspect')) {
const match = arg.match(/^--inspect=(\S+:|)(\d+)$/);
if (match) debugPort = Number(match[2]) + 1;
return `--inspect=${match ? match[1] : '0.0.0.0:'}${debugPort}`;
}
return arg;
}),
'--no-lazy',
// Enable "hot reload", it only works when debugger is off
...(isDebug
? ['./server.js']
: [
'--eval',
'process.stdin.on("data", data => { if (data.toString() === "load") require("./server.js"); });',
]),
],
{ cwd: './build', stdio: ['pipe', 'inherit', 'inherit'], timeout: 3000 },
);
}
eg : https://github.com/kriasoft/nodejs-api-starter/
how is this advantageous?
In my experience this is not a widespread practice. It appears based on the comments that they're doing it in order to be able to do configuration on command line options based on the environment and so on. To be honest, this seems a bit counterproductive to me.
What I've seen far more often is to start node from the command line with just npm start. package.json has several options for defining what that will do, but most often I would just have it call something simple like node server.js or similar. If you have multiple options for starting that you want to offer (for example, turning on the debug flags and such), then just add more scripts and call npm run <scriptname> to make it happen.
Any other special sauce would be baked into the process manager of choice (systemd is my preference but others like pm2 exist and work well), and between that and the environment variables you can do all or most of what's in the above script without all the indirection. I feel like the example you posted would really up the 'wtf-factor' if I were starting to maintain the app and didn't know that at it was doing things like that for me under the hood.
I've got two server scripts (both are relying on socket.io; running on different ports).
I'd like to start both in parallel via gulp. But in addition I'd like to have a possibility to stop one of them. And maybe even access the console output of each script.
Is there any existing solution for this? Or would you even recommend using anything else than gulp?
I found a solution in which I additionally start a mongoDB server:
var child_process = require('child_process');
var nodemon = require('gulp-nodemon');
var processes = {server1: null, server2: null, mongo: null};
gulp.task('start:server', function (cb) {
// The magic happens here ...
processes.server1 = nodemon({
script: "server1.js",
ext: "js"
});
// ... and here
processes.server2 = nodemon({
script: "server2.js",
ext: "js"
});
cb(); // For parallel execution accept a callback.
// For further info see "Async task support" section here:
// https://github.com/gulpjs/gulp/blob/master/docs/API.md
});
gulp.task('start:mongo', function (cb) {
processes.mongo = child_process.exec('mongod', function (err, stdout, stderr) {});
cb();
});
process.on('exit', function () {
// In case the gulp process is closed (e.g. by pressing [CTRL + C]) stop both processes
processes.server1.kill();
processes.server2.kill();
processes.mongo.kill();
});
gulp.task('run', ['start:mongo', 'start:server']);
gulp.task('default', ['run']);
nodemon/foreverjs is a good solution for not complicated cases, but they are not as scalable as pm2 is. So, if you want a scalable and reliable solution I'd recommend to use pm2.
Also, it worth mentioning that pm2 daemonizes after start unlike foreverjs/nodemon. It could be a bug or a feature for you and generally depends on your needs.
pm2 start script1.js
pm2 start script2.js
pm2 status // show status of running processes
pm2 logs // tail -f logs from running processes
So basically this is what I want to do. Have a grunt script that compiles my coffee files to JS. Then run the node server and then, either after the server closes or while it's still running, delete the JS files that were the result of the compilation and only keep the .coffee ones.
I'm having a couple of issues getting it to work. Most importantly, the way I'm currently doing it is this:
grunt.loadNpmTasks("grunt-contrib-coffee");
grunt.registerTask("node", "Starting node server", function () {
var done = this.async();
console.log("test");
var sp = grunt.util.spawn({
cmd: "node",
args: ["index"]
}, function (err, res, code) {
console.log(err, res, code);
done();
});
});
grunt.registerTask("default", ["coffee", "node"]);
The problem here is that the node serer isn't run in the same process as grunt. This matters because I can't just CTRL-C once to terminate JUST the node server.
Ideally, I'd like to have it run in the same process and have the grunt script pause while it's waiting for me to CTRL-C the server. Then, after it's finished, I want grunt to remove the said files.
How can I achieve this?
Edit: Note that the snippet doesn't have the actual removal implemented since I can't get this to work.
If you keep the variable sp in a more global scope, you can define a task node:kill that simply checks whether sp === null (or similar), and if not, does sp.kill(). Then you can simply run the node:kill task after your testing task. You could additionally invoke a separate task that just deletes the generated JS files.
For something similar I used grunt-shell-spawn in conjunction with a shutdown listener.
In your grunt initConfig:
shell: {
runSuperCoolJavaServer:{
command:'java -jar mysupercoolserver.jar',
options: {
async:true //spawn it instead!
}
}
},
Then outside of initConfig, you can set up a listener for when the user ctrl+c's out of your grunt task:
grunt.registerTask("superCoolServerShutdownListener",function(step){
var name = this.name;
if (step === 'exit') process.exit();
else {
process.on("SIGINT",function(){
grunt.log.writeln("").writeln("Shutting down super cool server...");
grunt.task.run(["shell:runSuperCoolJavaServer:kill"]); //the key!
grunt.task.current.async()();
});
}
});
Finally, register the tasks
grunt.registerTask('serverWithKill', [
'runSuperCoolJavaServer',
'superCoolServerShutdownListener']
);
I'm starting a nodejs project and would like to do BDD with Mocha and Zombiejs. Unfortunately I'm new to just about every buzzword in that sentence. I can get Mocha and Zombiejs running tests fine, but I can't seem to integrate the two - is it possible to use Mocha to run Zombiejs tests, and if so, how would that look?
Just looking for "hello world" to get me started, but a tutorial/example would be even better.
Thanks!
Assuming you already have installed mocha, zombie and expect.js according to instructions, this should work for you:
// Put below in a file in your *test* folder, ie: test/sampletest.js:
var expect = require('expect.js'),
Browser = require('zombie'),
browser = new Browser();
describe('Loads pages', function(){
it('Google.com', function(done){
browser.visit("http://www.google.com", function () {
expect(browser.text("title")).to.equal('Google');
done();
});
});
});
Then you should be able to run the mocha command from your root application folder:
# mocha -R spec
Loads pages
✓ Google.com (873ms)
✔ 1 tests complete (876ms)
Note: If your tests keep failing due to timeouts, it helps to increase mocha's timeout setting a bit by using the -t argument. Check out mocha's documentation for complete details.
I wrote a lengthy reply to this question explaining important gotchas about asynchronous tests, good practices ('before()', 'after()', TDD, ...), and illustrated by a real world example.
http://redotheweb.com/2013/01/15/functional-testing-for-nodejs-using-mocha-and-zombie-js.html
if you want to use cucumber-js for your acceptance tests and mocha for your "unit" tests for a page, you can use cuked-zombie (sorry for the advertising).
Install it like described in the readme on github, but place your world config in a file called world-config.js
`/* globals __dirname */
var os = require('os');
var path = require('path');
module.exports = {
cli: null,
domain: 'addorange-macbook': 'my-testing-domain.com',
debug: false
};
Then use mocha with zombie in your unit tests like this:
var chai = require('chai'), expect = chai.expect;
var cukedZombie = require('cuked-zombie');
describe('Apopintments', function() {
describe('ArrangeFormModel', function() {
before(function(done) { // execute once
var that = this;
cukedZombie.infectWorld(this, require('../world-config'));
this.world = new this.World(done);
// this inherits the whole world api to your test
_.merge(this, this.world);
});
describe("display", function() {
before(function(done) { // executed once before all tests are run in the discribe display block
var test = this;
this.browser.authenticate().basic('maxmustermann', 'Ux394Ki');
this.visitPage('/someurl', function() {
test.helper = function() {
};
done();
});
});
it("something on the /someurl page is returned", function() {
expect(this.browser.html()).not.to.be.empty;
});