I want to set up a callback to run after jasmine has completed all tasks.
This is what I've tried:
package.json
{
"scripts": {
"test": "jasmine"
}
...
"jasmine": "^2.8.0"
}
spec/support/jasmine.json
{
"helpers": [
"helpers/env.js",
"helpers/**/*.js"
],
...
}
spec/helpers/env.js
jasmine.onComplete( () => console.log('yay, done') )
but keep on getting errors
$ npm test
...
jasmine.onComplete( () => console.log('yay, done') )
^
TypeError: jasmine.onComplete is not a function
...
It seems,
You are not going to initialize Jasmine.
var Jasmine = require('jasmine');
var jasmine = new Jasmine();
Refer this:
And then this should work:
jasmine.onComplete(function(passed) {
if(passed) {
console.log('All specs have passed');
}
else {
console.log('At least one spec has failed');
}
});
Related
I am trying to develop a project that runs on electron and capacitor android at the same time. I get this error when I run my electron app. It relates to the preload script. This script allows me to do IPC between the main thread and the vue thread. After installing capacitor none of my electron IPC works.
This is the error:
Uncaught Error: Cannot read properties of undefined (reading 'length')
at EventEmitter.<anonymous> (VM115 preloadx.js:49:57)
at EventEmitter.emit (VM14 node:events:390:28)
at Object.onMessage (VM113 renderer_init:69:746)
This is the renderer script. It is in the public folder. Node puts it into the 'out' folder, which is where it is found during execution.
const {
contextBridge,
ipcRenderer,
} = require("electron");
let validChannels = [
"api",
"ascii",
// some channels here ...
"py-neo",
];
contextBridge.exposeInMainWorld(
"api", {
send: (channel, data) => {
// whitelist channels
if (validChannels.includes(channel)) {
ipcRenderer.send(channel, data);
}
},
receive: (channel, func) => {
if (validChannels.includes(channel)) {
// Deliberately strip event as it includes `sender`
ipcRenderer.on(channel, (event, ...args) => func(...args)); // <-- error on this line!!
}
},
sendSync: (channel, data) => {
// whitelist channels
if (validChannels.includes(channel)) {
return ipcRenderer.sendSync(channel, data);
}
},
}
);
Here is some code from the main thread js file.
function createWindow() {
console.log("createWindow", __dirname);
mainWindow = new BrowserWindow({
width: 800,
height: 600,
webPreferences: {
//sandbox: true,
contextIsolation: true,
enableRemoteModule: true,
nodeIntegration: true,
nodeIntegrationInWorker: true,
preload: path.join(__dirname, "out", "preload.js"), // use a preload script
},
});
I am using electron 18.1.0 and node 16.15.1. I would like the IPC to work when I launch on the desktop. It does not need to work when I launch in Android studio. below is a segment from my package.json file.
"scripts": {
"start": "vue-cli-service build && electron --disable-gpu --disable-software-rasterizer . ",
"dist": "electron-builder",
"build": "vue-cli-service build",
"android": "npx cap sync android"
},
I did something like the following to remove the error message.
contextBridge.exposeInMainWorld(
"api", {
send: (channel, data) => {
// whitelist channels
if (validChannels.includes(channel)) {
ipcRenderer.send(channel, data);
}
},
receive: (channel, func) => {
if (validChannels.includes(channel)) {
// Deliberately strip event as it includes `sender`
ipcRenderer.on(channel, (event, ...args) => {
console.log(channel , ...args);
try {
func(...args);
}
catch {
console.log(...args);
}
//func(...args)
});
}
},
sendSync: (channel, data) => {
// whitelist channels
if (validChannels.includes(channel)) {
return ipcRenderer.sendSync(channel, data);
}
},
}
);
To generate the code coverage report for vscode extension, i am using nyc and running those via vscode test runner.
Source : https://code.visualstudio.com/api/working-with-extensions/testing-extension
Project structure:
out
-test
-unit
-testcases.js
-index.js
- runTest.js
``
"test": "rm -rf .nyc_output/ && nyc node ./out/test/runTest.js",
"nyc": {
"extends": "#istanbuljs/nyc-config-typescript",
"require": [
"ts-node/register",
"source-map-support/register"
],
"report-dir": ".",
"reporter": [
"text",
"html",
"lcov"
],
"exclude": ["out/test/**"],
"include": [ "out/**/*.js" ],
"check-coverage": true
},
index.ts file:
import * as path from 'path';
import * as Mocha from 'mocha';
import * as glob from 'glob';
export function run(): Promise<void> {
const mocha = new Mocha({
ui: 'tdd',
color: true,
timeout: 20000,});
const testsRoot = path.resolve(__dirname, '../unit');
return new Promise((c, e) => {
glob('**/**.test.js', { cwd: testsRoot }, (err, files) => {
if (err) {
return e(err);
}
// Add files to the test suite
files.forEach(f => {
mocha.addFile(path.resolve(testsRoot, f));
});
try {
// Run the mocha test
mocha.run(failures => {
if (failures > 0) {
e(new Error(`${failures} tests failed.`));
} else {
c();
}
});
} catch (err) {
// eslint-disable-next-line no-console
console.error(err);
e(err);
}
});
});
}
runTest.ts file:
import * as path from 'path';
import { runTests } from 'vscode-test';
async function main() {
try {
// The folder containing the Extension Manifest package.json
// Passed to `--extensionDevelopmentPath`
const extensionDevelopmentPath = path.resolve(__dirname, '../../');
// The path to test runner
// Passed to --extensionTestsPath
//const extensionTestsPath = path.resolve(__dirname, './unit/index-coverage');
const extensionTestsPath = path.resolve(__dirname, './unit/index');
// Download VS Code, unzip it and run the integration test
await runTests({ extensionDevelopmentPath, extensionTestsPath });
} catch (err) {
//console.error('Failed to run tests');
process.exit(1);
}
}
main();
I was not able to generate code coverage report.It generates report but without any information.
What i am doing wrong here??
There are couple of ways to do this. I found some valuable information while checking below link:
How do a generate vscode TypeScript extension coverage report
Seems the easiest one is from user frenya. but the other two also gives valuable information.
I have a working example with Jest and mocks from __mocks__ directory that works :
With simple Jest setup
// package.json
{
"name": "a",
"version": "1.0.0",
"main": "index.js",
"scripts": {
"test": "jest"
},
...
"devDependencies": {
"jest": "^26.6.3"
},
"dependencies": {
"#octokit/rest": "^18.0.12"
}
}
And then /index.js :
const { Octokit } = require("#octokit/rest");
const octokit = new Octokit();
module.exports.foo = function() {
return octokit.repos.listForOrg({ org: "octokit", type: "public" })
}
with its test (/index.test.js):
const { foo } = require("./index.js");
test("foo should be true", async () => {
expect(await foo()).toEqual([1,2]);
});
and the mock (/__mocks__/#octokit/rest/index.js):
module.exports.Octokit = jest.fn().mockImplementation( () => ({
repos: {
listForOrg: jest.fn().mockResolvedValue([1,2])
}
}) );
This works quite well and tests pass.
With Create React App
However doing the same with Create React App seems to be giving me a weird result:
// package.json
{
"name": "b",
"version": "0.1.0",
"dependencies": {
"#octokit/rest": "^18.0.12",
"#testing-library/jest-dom": "^5.11.4",
"#testing-library/react": "^11.1.0",
"#testing-library/user-event": "^12.1.10",
"react": "^17.0.1",
"react-dom": "^17.0.1",
"react-scripts": "4.0.1",
"web-vitals": "^0.2.4"
},
"scripts": {
"start": "react-scripts start",
"build": "react-scripts build",
"test": "react-scripts test",
"eject": "react-scripts eject"
},
...
}
And then /src/foo.js:
import { Octokit } from "#octokit/rest";
const octokit = new Octokit();
module.exports.foo = function() {
return octokit.repos.listForOrg({ org: "octokit", type: "public" })
}
with its test (/src/foo.test.js):
const { foo} = require("./foo.js");
test("foo should be true", async () => {
expect(await foo()).toEqual([1,2]);
});
and the very same mock (under /src/__mocks__/#octokit/rest/index.js):
export const Octokit = jest.fn().mockImplementation( () => ({
repos: {
listForOrg: jest.fn().mockResolvedValue([1,2])
}
}) );
This makes the test fail:
FAIL src/foo.test.js
✕ foo should be true (2 ms)
● foo should be true
expect(received).toEqual(expected) // deep equality
Expected: [1, 2]
Received: undefined
2 |
3 | test("foo should be true", async () => {
> 4 | expect(await foo()).toEqual([1,2]);
| ^
5 | });
6 |
7 |
at Object.<anonymous> (src/foo.test.js:4:25)
After reading a lot it seems that I can't make __mocks__ work inside Create React App. What's the problem?
The problem is that CRA's default Jest setup automatically resets the mocks, which removes the mockResolvedValue you set.
One way to solve this, which also gives you more control to have different values in different tests (e.g. to test error handling) and assert on what it was called with, is to expose the mock function from the module too:
export const mockListForOrg = jest.fn();
export const Octokit = jest.fn().mockImplementation(() => ({
repos: {
listForOrg: mockListForOrg,
},
}));
Then you configure the value you want in the test, after Jest would have reset it:
import { mockListForOrg } from "#octokit/rest";
import { foo } from "./foo";
test("foo should be true", async () => {
mockListForOrg.mockResolvedValueOnce([1, 2]);
expect(await foo()).toEqual([1, 2]);
});
Another option is to add the following into your package.json to override that configuration, per this issue:
{
...
"jest": {
"resetMocks": false
}
}
This could lead to issues with mock state (calls received) being retained between tests, though, so you'll need to make sure they're getting cleared and/or reset somewhere.
Note that you generally shouldn't mock what you don't own, though - if the interface to #octokit/rest changes your tests will continue to pass but your code won't work. To avoid this issue, I would recommend either or both of:
Moving the assertions to the transport layer, using e.g. MSW to check that the right request gets made; or
Writing a simple facade that wraps #octokit/rest, decoupling your code from the interface you don't own, and mocking that;
along with higher-level (end-to-end) tests to make sure everything works correctly with the real GitHub API.
In fact, deleting the mocks and writing such a test using MSW:
import { rest } from "msw";
import { setupServer } from "msw/node";
import { foo } from "./foo";
const server = setupServer(rest.get("https://api.github.com/orgs/octokit/repos", (req, res, ctx) => {
return res(ctx.status(200), ctx.json([1, 2]));
}));
beforeAll(() => server.listen());
afterAll(() => server.close());
test("foo should be true", async () => {
expect(await foo()).toEqual([1, 2]);
});
exposes that the current assumption about what octokit.repos.listForOrg would return is inaccurate, because this test fails:
● foo should be true
expect(received).toEqual(expected) // deep equality
Expected: [1, 2]
Received: {"data": [1, 2], "headers": {"content-type": "application/json", "x-powered-by": "msw"}, "status": 200, "url": "https://api.github.com/orgs/octokit/repos?type=public"}
13 |
14 | test("foo should be true", async () => {
> 15 | expect(await foo()).toEqual([1, 2]);
| ^
16 | });
17 |
at Object.<anonymous> (src/foo.test.js:15:25)
Your implementation should actually look something more like:
export async function foo() {
const { data } = await octokit.repos.listForOrg({ org: "octokit", type: "public" });
return data;
}
or:
export function foo() {
return octokit.repos.listForOrg({ org: "octokit", type: "public" }).then(({ data }) => data);
}
I'm trying to implement a few e2e tests in my aurelia-cli app. I've tried looking for docs or blogs but haven't found anything on e2e setup for the cli. I've made the following adjustments to the project.
first I added this to aurelia.json
"e2eTestRunner": {
"id": "protractor",
"displayName": "Protractor",
"source": "test/e2e/src/**/*.ts",
"dist": "test/e2e/dist/",
"typingsSource": [
"typings/**/*.d.ts",
"custom_typings/**/*.d.ts"
]
},
Also added the e2e tasks on aurelia_project/tasks:
e2e.ts
import * as project from '../aurelia.json';
import * as gulp from 'gulp';
import * as del from 'del';
import * as typescript from 'gulp-typescript';
import * as tsConfig from '../../tsconfig.json';
import {CLIOptions} from 'aurelia-cli';
import { webdriver_update, protractor } from 'gulp-protractor';
function clean() {
return del(project.e2eTestRunner.dist + '*');
}
function build() {
var typescriptCompiler = typescriptCompiler || null;
if ( !typescriptCompiler ) {
delete tsConfig.compilerOptions.lib;
typescriptCompiler = typescript.createProject(Object.assign({}, tsConfig.compilerOptions, {
// Add any special overrides for the compiler here
module: 'commonjs'
}));
}
return gulp.src(project.e2eTestRunner.typingsSource.concat(project.e2eTestRunner.source))
.pipe(typescript(typescriptCompiler))
.pipe(gulp.dest(project.e2eTestRunner.dist));
}
// runs build-e2e task
// then runs end to end tasks
// using Protractor: http://angular.github.io/protractor/
function e2e() {
return gulp.src(project.e2eTestRunner.dist + '**/*.js')
.pipe(protractor({
configFile: 'protractor.conf.js',
args: ['--baseUrl', 'http://127.0.0.1:9000']
}))
.on('end', function() { process.exit(); })
.on('error', function(e) { throw e; });
}
export default gulp.series(
webdriver_update,
clean,
build,
e2e
);
and the e2e.json
{
"name": "e2e",
"description": "Runs all e2e tests and reports the results.",
"flags": []
}
I've added a protractor.conf file and aurelia.protractor to the root of my project
protractor.conf.js
exports.config = {
directConnect: true,
// Capabilities to be passed to the webdriver instance.
capabilities: {
'browserName': 'chrome'
},
//seleniumAddress: 'http://0.0.0.0:4444',
specs: ['test/e2e/dist/*.js'],
plugins: [{
path: 'aurelia.protractor.js'
}],
// Options to be passed to Jasmine-node.
jasmineNodeOpts: {
showColors: true,
defaultTimeoutInterval: 30000
}
};
aurelia.protractor.js
/* Aurelia Protractor Plugin */
function addValueBindLocator() {
by.addLocator('valueBind', function (bindingModel, opt_parentElement) {
var using = opt_parentElement || document;
var matches = using.querySelectorAll('*[value\\.bind="' + bindingModel +'"]');
var result;
if (matches.length === 0) {
result = null;
} else if (matches.length === 1) {
result = matches[0];
} else {
result = matches;
}
return result;
});
}
function loadAndWaitForAureliaPage(pageUrl) {
browser.get(pageUrl);
return browser.executeAsyncScript(
'var cb = arguments[arguments.length - 1];' +
'document.addEventListener("aurelia-composed", function (e) {' +
' cb("Aurelia App composed")' +
'}, false);'
).then(function(result){
console.log(result);
return result;
});
}
function waitForRouterComplete() {
return browser.executeAsyncScript(
'var cb = arguments[arguments.length - 1];' +
'document.querySelector("[aurelia-app]")' +
'.aurelia.subscribeOnce("router:navigation:complete", function() {' +
' cb(true)' +
'});'
).then(function(result){
return result;
});
}
/* Plugin hooks */
exports.setup = function(config) {
// Ignore the default Angular synchronization helpers
browser.ignoreSynchronization = true;
// add the aurelia specific valueBind locator
addValueBindLocator();
// attach a new way to browser.get a page and wait for Aurelia to complete loading
browser.loadAndWaitForAureliaPage = loadAndWaitForAureliaPage;
// wait for router navigations to complete
browser.waitForRouterComplete = waitForRouterComplete;
};
exports.teardown = function(config) {};
exports.postResults = function(config) {};
and I added a sample test in my test/e2e/src folder it doesn't get executed. I've also tried implementing a e2e test within the unit test folder since when I run au test I see that a chrome browser opens up.
describe('aurelia homepage', function() {
it('should load page', function() {
browser.get('http://www.aurelia.io');
expect(browser.getTitle()).toEqual('Home | Aurelia');
});
});
But this throws the error browser is undefined. Am I missing something with e2e testing with the cli? I know aurelia-protractor comes pre-installed but I don't see any way to run it.
I know this is a very late answer, but perhaps for others looking for an answer, you could try to import from the aurelia-protractor plugin
import {browser} from 'aurelia-protractor-plugin/protractor';
I am pretty new in react world and trying to write simple friendslist application. I wrote my friends store in es6 style and using babel as transpiler from es5 to es6.
import AppDispatcher from '../dispatcher/app_dispatcher';
import { EventEmitter } from 'events';
import FRIENDS_CONST from '../constants/friends';
const CHANGE_EVENT = 'CHANGE';
let friendsList = [];
let add = (name) => {
let counter = friendsList.length + 1;
let newFriend = {
id: counter,
name: name
};
friendsList.push(newFriend);
}
let remove = (id) => {
let index = friendsList.findIndex(e => e.id == id);
delete friendsList[index];
}
let FriendsStore = Object.assign({}, EventEmitter.prototype, {
getAll: () => {
return friendsList;
},
emitChange: () => {
this.emit(CHANGE_EVENT);
},
addChangeListener: (callback) => {
this.on(CHANGE_EVENT, callback);
},
removeChangeListener: (callback) => {
this.removeListener(CHANGE_EVENT, callback);
}
});
AppDispatcher.register((action) => {
switch (action.actionType) {
case FRIENDS_CONST.ADD_FRIENDS:
add(action.name);
FriendsStore.emitChange();
break;
case FRIENDS_CONST.REMOVE_FRIENDS:
remove(action.id);
FriendsStore.emitChange();
break;
}
});
export default FriendsStore;
Now I want to test my store and wrote the unit test also in es6
jest.dontMock('../../constants/friends');
jest.dontMock('../friends_store');
describe('FriendsStore', () => {
import FRIENDS from '../../constants/friends';
import AppDispatcher from '../../dispatcher/AppDispatcher';
import FriendsStore from '../friends_store';
let FakeAppDispatcher;
let FakeFriendsStore;
let callback;
let addFriends = {
actionType: FRIENDS.ADD_FRIENDS,
name: 'Many'
};
let removeFriend = {
actionType: FRIENDS.REMOVE_FRIENDS,
id: '3'
};
beforeEach(function() {
FakeAppDispatcher = AppDispatcher;
FakeFriendsStore = FriendsStore;
callback = AppDispatcher.register.mock.calls[0][0];
});
it('Should initialize with no friends items', function() {
var all = FriendsStore.getAll();
expect(all).toEqual([]);
});
});
When I execute the test with statement npm test, I've got the error message:
> react-starterify#0.0.9 test /Volumes/Developer/reactjs/app5
> echo "Error: no test specified"
Error: no test specified
What am I doing wrong? The file structure looks as follow:
I did it following the tutorial:
npm install --save-dev jest babel-jest babel-preset-es2015 babel-preset-react react-test-renderer
then
add to package.json
"scripts": {
"test": "jest"
},
"jest": {
"testPathDirs": [
"src/main/resources/web_pages/__tests__"
]
},
Result:
PASS src/main/resources/web_pages/__tests__/modules/utils/ValidationUtil.spec.js (5.214s)
✓ ValidateEmail (5ms)
Test Suites: 1 passed, 1 total
Tests: 1 passed, 1 total
Snapshots: 0 total
Time: 6.092s, estimated 7s
Ran all test suites.
To test ES6 syntax and JSX files, they need to be transformed for Jest. Jest has a config variable where you can define a preprocessor (scriptPreprocessor). You can use the babel-jest preprocessor:
Make the following changes to package.json:
{
"devDependencies": {
"babel-jest": "*",
"jest-cli": "*"
},
"scripts": {
"test": "jest"
},
"jest": {
"scriptPreprocessor": "<rootDir>/node_modules/babel-jest",
"testFileExtensions": ["es6", "js"],
"moduleFileExtensions": ["js", "json", "es6"]
}
}
And run:
$ npm install