Parsoid doesn't start - parsoid

I installed parsoid from its official repository, and configured it correctly by adding my Mediawiki API to /etc/mediawiki/parsoid/settings.js. I also removed the Interwiki options from my /usr/lib/parsoid/src/api/localsettings.jsfile, because they seem to be replaced by the parsoidConfig.setMwApi option in the settings.js file.
When running nodejs /usr/lib/parsoid/src/api/server.js from the command promt, I get the following error message:
[fatal][worker][4915] uncaught exception object is not a function
TypeError: object is not a function
at new ParsoidService (/usr/lib/parsoid/src/api/ParsoidService.js:43:12)
at Object.<anonymous> (/usr/lib/parsoid/src/api/server.js:203:12)
at Module._compile (module.js:456:26)
at Object.Module._extensions..js (module.js:474:10)
at Module.load (module.js:356:32)
at Function.Module._load (module.js:312:12)
at Function.Module.runMain (module.js:497:10)
at startup (node.js:119:16)
at node.js:902:3
[warning][master][4882] worker 4911 died (1), restarting.
When I run service parsoid start and service parsoid status I am always getting the information, that there is no parsoid process running.
Can someone explain me why Parsoid does not work?
EDIT:
localsettings.js
/*
* This is a sample configuration file.
*
* Copy this file to localsettings.js and edit that file to fit your needs.
*
* Also see the file ParserService.js for more information.
*/
exports.setup = function( parsoidConfig ) {
// The URL here is supposed to be your MediaWiki installation root
//parsoidConfig.setInterwiki( 'localhost', 'http://domain.com/Wiki/api.php' );
//parsoidConfig.setInterwiki( 'foo', 'http://localhost/wikiarst/api.php' );
//parsoidConfig.setInterwiki( 'noconn', 'http://213.127.84.12:80/wikiarst/api.php' );
//parsoidConfig.setInterwiki( 'disney', 'http://disneychannel.wikia.com/api.php' );
// Use the PHP preprocessor to expand templates via the MW API (default true)
//parsoidConfig.usePHPPreProcessor = true;
// Use selective serialization (default false)
parsoidConfig.useSelser = true;
// parsoid cache url
//parsoidConfig.parsoidCacheURI = 'http://localhost:8000/';
//parsoidConfig.trace = true;
//parsoidConfig.traceFlags = 'selser,wts';
//parsoidConfig.traceFlags = 'selser';
//parsoidConfig.defaultAPIProxyURI = 'http://localhost/';
};
/* vim: set filetype=javascript noexpandtab ts=4 sw=4 cindent : */
settings.js
/*
* This is a sample configuration file.
*
* Copy this file to localsettings.js and edit that file to fit your needs.
*
* Also see:
* - api/server.js for more information about passing config files via
* the commandline.
* - lib/mediawiki.ParsoidConfig.js all the properties
* that you can configure here. Not all properties are
* documented here.
*/
'use strict';
exports.setup = function(parsoidConfig) {
// Set your own user-agent string
// Otherwise, defaults to "Parsoid/<current-version-defined-in-package.json>"
//parsoidConfig.userAgent = "My-User-Agent-String";
// The URL of your MediaWiki API endpoint.
parsoidConfig.setMwApi({ prefix: 'localhost', uri: 'http://domain.com/Wiki/api.php' });
// To specify a proxy (or proxy headers) specific to this prefix (which
// overrides defaultAPIProxyURI) use:
/*
parsoidConfig.setMwApi({
prefix: 'localhost',
uri: 'http://localhost/w/api.php',
// set `proxy` to `null` to override and force no proxying.
proxy: {
uri: 'http://my.proxy:1234/',
headers: { 'X-Forwarded-Proto': 'https' } // headers are optional
}
});
*/
// We pre-define wikipedias as 'enwiki', 'dewiki' etc. Similarly
// for other projects: 'enwiktionary', 'enwikiquote', 'enwikibooks',
// 'enwikivoyage' etc. (default true)
//parsoidConfig.loadWMF = false;
// A default proxy to connect to the API endpoints.
// Default: undefined (no proxying).
// Overridden by per-wiki proxy config in setMwApi.
//parsoidConfig.defaultAPIProxyURI = 'http://proxy.example.org:8080';
// Enable debug mode (prints extra debugging messages)
//parsoidConfig.debug = true;
// Use the PHP preprocessor to expand templates via the MW API (default true)
//parsoidConfig.usePHPPreProcessor = false;
// Use selective serialization (default false)
parsoidConfig.useSelser = true;
// Allow cross-domain requests to the API (default '*')
// Sets Access-Control-Allow-Origin header
// disable:
//parsoidConfig.allowCORS = false;
// restrict:
//parsoidConfig.allowCORS = 'some.domain.org';
// Set to true for using the default performance metrics reporting to statsd
// If true, provide the statsd host/port values
/*
parsoidConfig.useDefaultPerformanceTimer = true;
parsoidConfig.txstatsdHost = 'statsd.domain.org';
parsoidConfig.txstatsdPort = 8125;
*/
// Alternatively, define performanceTimer as follows:
/*
parsoidConfig.performanceTimer = {
timing: function(metricName, time) { }, // do-something-with-it
count: function(metricName, value) { }, // do-something-with-it
};
*/
// How often should we emit a heap sample? Time in ms.
// This setting is only relevant if you have enabled
// performance monitoring either via the default metrics
// OR by defining your own performanceTimer properties
//parsoidConfig.heapUsageSampleInterval = 5 * 60 * 1000;
// Allow override of port/interface:
//parsoidConfig.serverPort = 8000;
//parsoidConfig.serverInterface = '127.0.0.1';
// The URL of your LintBridge API endpoint
//parsoidConfig.linterAPI = 'http://lintbridge.wmflabs.org/add';
// Require SSL certificates to be valid (default true)
// Set to false when using self-signed SSL certificates
//parsoidConfig.strictSSL = false;
// Use a different server for CSS style modules.
// Set to true to use bits.wikimedia.org, or to a string with the URI.
// Leaving it undefined (the default) will use the same URI as the MW API,
// changing api.php for load.php.
//parsoidConfig.modulesLoadURI = true;
// Suppress some warnings from the Mediawiki API
// (defaults to suppressing warnings which the Parsoid team knows to
// be harmless)
//parsoidConfig.suppressMwApiWarnings = /annoying warning|other warning/;
};

It seems that your /etc/mediawiki/parsoid/settings.js and or /usr/lib/parsoid/src/api/localsettings.js is broken, and Parsoid is crashing as soon as it tries to look at your parsoidConfig. I recommend going through the instructions at https://www.mediawiki.org/wiki/Parsoid/Setup#Configuration carefully, and if you still have problems then please post excerpts of your settings files.

Related

webpack multicompile via node api (^4.44.2)

I have a Webpack configuration repository to separate the application boilerplate from the Webpack configuration. This application is dependent on the repository where the Webpack configuration is. I made a binary in the Webpack repository so that, in the application, I could compile and run this configuration in development. The configuration combines a common configuration with the past environment.
Problem: The scenario is beautiful but it is not working in the part of compiling and serving the application. Apparently my configuration is OK - i isolate them and test them separately. And I'm also following the v4 documentation for NODE-API.
As I have nowhere to turn, I'm sorry if I'm not on the right platform, i am studying how to compile different configurations of the same application (boilerplate) using webpack.
Link to the code.
i appreciate some repo example...
I came across several problems pointing to the Libs of the Webpack and Webpack-dev-server packages. However, today I got what I wanted. I will share here for future reference from other users.
My goal was to be able to trigger the development or production environment from a node script. Involving the construction of multiple FrontEnd applications, which was abstracted in webpack.config.
Now i can run mycli development on shell and this will trigger the construction of the configuration for that environment.
// mycli.js
#!/usr/bin/env node
const webpack = require('webpack')
const WebpackDevServer = require('webpack-dev-server')
const webpackConfiguration = require('./webpack/webpack.config')
const args = (process.argv && process.argv.length > 2) ? process.argv.slice(2) : []
const mode = args.length > 0 ? args[0] : 'development'
const config = webpackConfiguration(mode)
/**
* Minimum webpack configuration for cli validation.
* #see {#link webpackConfiguration} to further information
*/
const minConfig = {
entry: __dirname + '/test.js',
mode,
output: {
path: '/dist',
filename: 'bundle.js'
}
}
/** #type {WebpackCompilerInstance} compiler */
const compiler = webpack(minConfig)
switch (config.mode) {
case 'development':
/**
* Recommended by documentation:
* "If you're using dev-server through the Node.js API,
* the options in devServer will be ignored. Pass the options
* as a second parameter instead."
* #see {#link https://v4.webpack.js.org/configuration/dev-server/#devserver} for further information.
* #see {#link https://github.com/webpack/webpack-dev-server/tree/master/examples/api/simple} for example
*/
const devServerConfig = config.devserver;
if (config) delete config.devserver
const devServerOptions = Object.assign({}, devServerConfig, {
open: true,
stats: {
colors: true,
},
})
const devserverCallback = (err) => {
if (err) throw err
console.log('webpack-dev-server listening...')
}
new WebpackDevServer(compiler, devServerOptions).listen(devServerConfig.port, devServerConfig.host, devserverCallback)
break;
case 'production':
const compilerCallback = (err, stats) => {
console.log(stats, err)
if (err) throw err
process.stdout.write(`Stats: \n${stats} \n`)
console.log('Compiler has finished execution.')
}
compiler.run(compilerCallback)
break;
default:
console.error('No matching mode. Try "development" or "production".')
break;
}

webdriverio Error: Could not request headers from chromedriver_mac64.zip: Error: read ECONNRESET - Mac OSX

I am writing some tests in webdriverio for one of the apps. I start the selenium server and run the following command
./node_modules/.bin/wdio wdio.conf.js
When I run this on my old windows computer it works just fine, now that I have got a new mac, it fails. The following is the stack trace
A service failed in the 'onPrepare' hook
Error: Could not request headers from https://chromedriver.storage.googleapis.com/2.43/chromedriver_mac64.zip: Error: read ECONNRESET
at Request.<anonymous> (/Users/fdfgd/Desktop/test_scripts/test/node_modules/selenium-standalone/lib/install.js:552:8)
at Object.onceWrapper (events.js:286:20)
at Request.emit (events.js:198:13)
at Request.EventEmitter.emit (domain.js:448:20)
at Request.onRequestError (/Users/v6x5932/Desktop/test_scripts/test/node_modules/request/request.js:881:8)
at ClientRequest.emit (events.js:198:13)
at ClientRequest.EventEmitter.emit (domain.js:448:20)
at TLSSocket.socketErrorListener (_http_client.js:392:9)
at TLSSocket.emit (events.js:198:13)
at TLSSocket.EventEmitter.emit (domain.js:448:20)
Continue...
Wrote xunit report "WDIO.xunit.chrome.0-0.xml" to [./test/reports].
0 passing (1.50s)
npm ERR! code ELIFECYCLE
npm ERR! errno 1
npm ERR! test#1.0.0 run-wdio-tests: `wdio wdio.conf.js`
npm ERR! Exit status 1
npm ERR!
npm ERR! Failed at the test#1.0.0 run-wdio-tests script.
npm ERR! This is probably not a problem with npm. There is likely additional logging output above.
npm ERR! A complete log of this run can be found in:
npm ERR! /Users/dsfaf/.npm/_logs/2019-09-05T11_55_52_274Z-debug.log
Here is my wdio.conf.js file
exports.config = {
//
// ==================
// Specify Test Files
// ==================
// Define which test specs should run. The pattern is relative to the directory
// from which `wdio` was called. Notice that, if you are calling `wdio` from an
// NPM script (see https://docs.npmjs.com/cli/run-script) then the current working
// directory is where your package.json resides, so `wdio` will be called from there.
//
specs: [
'./test/specs/*.js'
],
// Patterns to exclude.
exclude: [
// 'path/to/excluded/files'
],
//
// ============
// Capabilities
// ============
// Define your capabilities here. WebdriverIO can run multiple capabilities at the same
// time. Depending on the number of capabilities, WebdriverIO launches several test
// sessions. Within your capabilities you can overwrite the spec and exclude options in
// order to group specific specs to a specific capability.
//
// First, you can define how many instances should be started at the same time. Let's
// say you have 3 different capabilities (Chrome, Firefox, and Safari) and you have
// set maxInstances to 1; wdio will spawn 3 processes. Therefore, if you have 10 spec
// files and you set maxInstances to 10, all spec files will get tested at the same time
// and 30 processes will get spawned. The property handles how many capabilities
// from the same test should run tests.
//
maxInstances: 10,
//
// If you have trouble getting all important capabilities together, check out the
// Sauce Labs platform configurator - a great tool to configure your capabilities:
// https://docs.saucelabs.com/reference/platforms-configurator
//
capabilities: [{
// maxInstances can get overwritten per capability. So if you have an in-house Selenium
// grid with only 5 firefox instances available you can make sure that not more than
// 5 instances get started at a time.
maxInstances: 5,
//
browserName: 'chrome',
proxy: {
proxyType: "MANUAL",
httpProxy: "domain:port",
// sslProxy: "domain:port"
}
}],
//
// ===================
// Test Configurations
// ===================
// Define all options that are relevant for the WebdriverIO instance here
//
// By default WebdriverIO commands are executed in a synchronous way using
// the wdio-sync package. If you still want to run your tests in an async way
// e.g. using promises you can set the sync option to false.
sync: true,
//
// Level of logging verbosity: silent | verbose | command | data | result | error
logLevel: 'silent',
//
// Enables colors for log output.
coloredLogs: true,
//
// Warns when a deprecated command is used
deprecationWarnings: true,
//
// If you only want to run your tests until a specific amount of tests have failed use
// bail (default is 0 - don't bail, run all tests).
bail: 0,
//
// Saves a screenshot to a given path if a command fails.
screenshotPath: './errorShots/',
//
// Set a base URL in order to shorten url command calls. If your `url` parameter starts
// with `/`, the base url gets prepended, not including the path portion of your baseUrl.
// If your `url` parameter starts without a scheme or `/` (like `some/path`), the base url
// gets prepended directly.
baseUrl: 'http://localhost',
//
// Default timeout for all waitFor* commands.
waitforTimeout: 10000,
//
// Default timeout in milliseconds for request
// if Selenium Grid doesn't send response
connectionRetryTimeout: 90000,
//
// Default request retries count
connectionRetryCount: 3,
//
// Initialize the browser instance with a WebdriverIO plugin. The object should have the
// plugin name as key and the desired plugin options as properties. Make sure you have
// the plugin installed before running any tests. The following plugins are currently
// available:
// WebdriverCSS: https://github.com/webdriverio/webdrivercss
// WebdriverRTC: https://github.com/webdriverio/webdriverrtc
// Browserevent: https://github.com/webdriverio/browserevent
// plugins: {
// webdrivercss: {
// screenshotRoot: 'my-shots',
// failedComparisonsRoot: 'diffs',
// misMatchTolerance: 0.05,
// screenWidth: [320,480,640,1024]
// },
// webdriverrtc: {},
// browserevent: {}
// },
//
// Test runner services
// Services take over a specific job you don't want to take care of. They enhance
// your test setup with almost no effort. Unlike plugins, they don't add new
// commands. Instead, they hook themselves up into the test process.
services: ['selenium-standalone'],
//
// Framework you want to run your specs with.
// The following are supported: Mocha, Jasmine, and Cucumber
// see also: http://webdriver.io/guide/testrunner/frameworks.html
//
// Make sure you have the wdio adapter package for the specific framework installed
// before running any tests.
framework: 'mocha',
//
// Test reporter for stdout.
// The only one supported by default is 'dot'
// see also: http://webdriver.io/guide/reporters/dot.html
reporters: ['junit','allure'],
reporterOptions: {
junit: {
outputDir: './test/reports'
},
allure: {
outputDir: 'allure-results'
}
},
//
// Options to be passed to Mocha.
// See the full list at http://mochajs.org/
mochaOpts: {
ui: 'bdd',
timeout: 60000
},
//
// =====
// Hooks
// =====
// WebdriverIO provides several hooks you can use to interfere with the test process in order to enhance
// it and to build services around it. You can either apply a single function or an array of
// methods to it. If one of them returns with a promise, WebdriverIO will wait until that promise got
// resolved to continue.
/**
* Gets executed once before all workers get launched.
* #param {Object} config wdio configuration object
* #param {Array.<Object>} capabilities list of capabilities details
*/
// onPrepare: function (config, capabilities) {
// },
/**
* Gets executed just before initialising the webdriver session and test framework. It allows you
* to manipulate configurations depending on the capability or spec.
* #param {Object} config wdio configuration object
* #param {Array.<Object>} capabilities list of capabilities details
* #param {Array.<String>} specs List of spec file paths that are to be run
*/
// beforeSession: function (config, capabilities, specs) {
// },
/**
* Gets executed before test execution begins. At this point you can access to all global
* variables like `browser`. It is the perfect place to define custom commands.
* #param {Array.<Object>} capabilities list of capabilities details
* #param {Array.<String>} specs List of spec file paths that are to be run
*/
// before: function (capabilities, specs) {
// },
/**
* Runs before a WebdriverIO command gets executed.
* #param {String} commandName hook command name
* #param {Array} args arguments that command would receive
*/
// beforeCommand: function (commandName, args) {
// },
/**
* Hook that gets executed before the suite starts
* #param {Object} suite suite details
*/
// beforeSuite: function (suite) {
// },
/**
* Function to be executed before a test (in Mocha/Jasmine) or a step (in Cucumber) starts.
* #param {Object} test test details
*/
// beforeTest: function (test) {
// },
/**
* Hook that gets executed _before_ a hook within the suite starts (e.g. runs before calling
* beforeEach in Mocha)
*/
// beforeHook: function () {
// },
/**
* Hook that gets executed _after_ a hook within the suite ends (e.g. runs after calling
* afterEach in Mocha)
*/
// afterHook: function () {
// },
/**
* Function to be executed after a test (in Mocha/Jasmine) or a step (in Cucumber) ends.
* #param {Object} test test details
*/
// afterTest: function (test) {
// },
/**
* Hook that gets executed after the suite has ended
* #param {Object} suite suite details
*/
// afterSuite: function (suite) {
// },
/**
* Runs after a WebdriverIO command gets executed
* #param {String} commandName hook command name
* #param {Array} args arguments that command would receive
* #param {Number} result 0 - command success, 1 - command error
* #param {Object} error error object if any
*/
// afterCommand: function (commandName, args, result, error) {
// },
/**
* Gets executed after all tests are done. You still have access to all global variables from
* the test.
* #param {Number} result 0 - test pass, 1 - test fail
* #param {Array.<Object>} capabilities list of capabilities details
* #param {Array.<String>} specs List of spec file paths that ran
*/
// after: function (result, capabilities, specs) {
// },
/**
* Gets executed right after terminating the webdriver session.
* #param {Object} config wdio configuration object
* #param {Array.<Object>} capabilities list of capabilities details
* #param {Array.<String>} specs List of spec file paths that ran
*/
// afterSession: function (config, capabilities, specs) {
// },
/**
* Gets executed after all workers got shut down and the process is about to exit.
* #param {Object} exitCode 0 - success, 1 - fail
* #param {Object} config wdio configuration object
* #param {Array.<Object>} capabilities list of capabilities details
*/
// onComplete: function(exitCode, config, capabilities) {
// }
}
I have tried the follwing approaches in the wdio.conf.js file
Approach 1: Lower case manual
proxy: {
proxyType: "manual",
httpProxy: "http://domain:port"
}
Approach 2: Upper case manual
proxy: {
proxyType: "MANUAL",
httpProxy: "http://domain:port"
}
Approach 3: proxy with http://
proxy: {
proxyType: "MANUAL",
httpProxy: "http://domain:port"
}
Approach 4: proxy without http://
proxy: {
proxyType: "MANUAL",
httpProxy: "http://domain:port"
}
Can you let me know what can the issue be, I also tried adding proxy to the terminal session and its still the same
This error message...
Could not request headers from https://chromedriver.storage.googleapis.com/2.43/chromedriver_mac64.zip: Error: read ECONNRESET
...implies that the ChromeDriver was unable to initiate/spawn a new WebBrowser i.e. Chrome Browser session.
Your main issue is the incompatibility between the version of the binaries you are using as follows:
You are using chromedriver=2.43
Release Notes of chromedriver=2.43 clearly mentions the following :
Supports Chrome v69-71
Presumably you are using the latest chrome= 76.0
Release Notes of ChromeDriver v76.0 clearly mentions the following :
Supports Chrome version 76
So there is a clear mismatch between the ChromeDriver v2.43 and the Chrome Browser v76.0
Solution
Ensure that:
ChromeDriver is updated to current ChromeDriver v76.0 level.
Chrome is updated to current Chrome Version 76.0 level. (as per ChromeDriver v76.0 release notes)
Clean your Project Workspace through your IDE and Rebuild your project with required dependencies only.
If your base Web Client version is too old, then uninstall and install a recent GA and released version of Web Client.
Take a System Reboot.
Execute your #Test as non-root user.

How can I control PhantomJS in WebdriverIO to skip download / load some kind of resource (i.e., fonts, images, css, js)?

Below is my code, currently this works fine.. but i want to optimize it to not load / download some resources like (fonts, images, css, js).. I've read the api docs but i'mnot able to find the related configs.. Well, I'm using webdriverIO and phantomjs as browser..
'use strict';
var _ = require('lodash');
var webdriverio = require('webdriverio');
var cheerio = require('cheerio');
/**
* Base class for browser based crawler.
* To run this crawler you need to first run phantomJS with webdriver on localhost
* ```
* ./phantomjs --webdriver 4444
* ```
*/
class BaseWebdriverIO {
/**
* Constructor
* #param opts - webdriverio config http://webdriver.io/guide/getstarted/configuration.html
*/
constructor(opts) {
this.opts = _.defaults(opts || {}, {
desiredCapabilities: {
browserName: 'phantomjs'
}
});
}
/**
* webdriver and parse url func
* #param parseUrl
* #returns {Promise}
*/
parse(parseUrl) {
console.log("getting url", parseUrl);
return webdriverio.remote(this.opts)
.init()
.url(parseUrl)
.waitForVisible('body')
.getHTML('body', false, function(err, html) {
if (err) {
throw new Error(err);
}
this.end();
return cheerio.load(html);
});
}
}
module.exports = BaseWebdriverIO;
I'm not able to find any documentation related this.
Can anyone tell me, How can I do that?
Edit/Update: I've found a working example which optimize images to not load by using setting phantomjs.cli.args from here: https://github.com/angular/protractor/issues/150#issuecomment-128109354 Some basic settings have been configured and works fine though, this is the modified desiredCapabilities settings object:
desiredCapabilities: {
'browserName': 'phantomjs',
'phantomjs.binary.path': require('phantomjs').path,
'phantomjs.cli.args': [
'--ignore-ssl-errors=true',
'--ssl-protocol=any', // tlsv1
'--web-security=false',
'--load-images=false',
//'--debug=false',
//'--webdriver-logfile=webdriver.log',
//'--webdriver-loglevel=DEBUG',
],
javascriptEnabled: false,
logLevel: 'verbose'
}
And css/fonts optimization i 've found question raised on stack overflow How can I control PhantomJS to skip download some kind of resource? and the solution to this discussed there is something like this:
page.onResourceRequested = function(requestData, request) {
if ((/http:\/\/.+?\.css/gi).test(requestData['url']) || requestData['Content-Type'] == 'text/css') {
console.log('The url of the request is matching. Aborting: ' + requestData['url']);
// request.abort();
request.cancel();
}
};
But I 'm not able trigger this function via in webdriverIO's configs desiredCapabilities object.. i.e., onResourceRequested()..
Can anyone tell me how can i call/define this function in my WebdriverIO script capabilities or any other way? Thanks.

What is the correct way to load polyfills and shims with Browserify

I'm building a web app and I'm getting to know and love Browserify. One thing has bugged me though.
I'm using some ES6 features that need to be shimmed/polyfilled in older browsers such as es6-promise and object-assign (packages on npm).
Currently I'm just loading them into each module that needs them:
var assign = require('object-assign');
var Promise = require('es6-promise');
I know this is definitely not the way to go. It is hard to maintain and I would like to use the ES6 features transparently instead of having to "depend" on them via requires.
What's the definitive way to load shims like these? I've seen several examples around the internet but they're all different. I could:
load them externally:
var bundle = browserify();
bundle.require('s6-promise');
// or should I use it bundle.add to make sure the code is runned???
The problem I have here is that I don't know which order the modules
will be loaded in in the browser. So the polyfilling might not have happened
yet at call sites that need the polyfilled functionality.
This has the additional downside that backend code cannot benefit from these
polyfills (unless I'm missing something).
use browserify-shim or something similar. I don't really see how this would work for ES6 features.
manually set up the polyfilling:
Object.assign = require('object-assign');
Don't require polyfills in your modules, that's an anti-pattern. Your modules should assume that the runtime is patched (when needed), and that should be part of the contract. A good example of this is ReactJS, where they explicitly define the minimum requirement for the runtime so that the library can work: http://facebook.github.io/react/docs/working-with-the-browser.html#browser-support-and-polyfills
You could use a polyfill service (e.g.: https://cdn.polyfill.io/) to include an optimized script tag at the top of your page to guarantee that the runtime is patched correctly with the pieces you need, while modern browsers will not get penalized.
This is the method that I'm using. The key is that you have to export your polyfill properly at the top of your main entry file.
The following won't work:
// Using ES6 imports
import './polyfill';
// Using CommonJS style
require('./polyfill');
... // rest of your code goes here
You actually need to export the polyfill:
// Using ES6 export
export * from './polyfill';
// Using CommonJS style
var polyfill = require('./polyfill');
... // rest of your code goes here
Your polyfills will load correctly if you do either of the latter methods.
Below you can find examples of my polyfills.
polyfill.js:
import './polyfill/Array.from';
import './polyfill/Object.assign';
Object.assign:
if (typeof Object.assign !== 'function') {
(function iife() {
const ObjectHasOwnProperty = Object.prototype.hasOwnProperty;
/**
* Copy the values of all enumerable own properties from one source
* object to a target object. It will return the target object.
* #param {Object} target The target object.
* #param {Object} source The source object.
* #return {Object} The target object.
*/
function shallowAssign(target, source) {
if (target === source) return target;
Object.keys(source).forEach((key) => {
// Avoid bugs when hasOwnProperty is shadowed
if (ObjectHasOwnProperty.call(source, key)) {
target[key] = source[key];
}
});
return target;
}
/**
* Copy the values of all enumerable own properties from one source
* object to a target object. It will return the target object.
* #param {Object} target The target object.
* #param {Object} source The source object.
* #return {Object} The target object.
*/
Object.assign = function assign(target, ...sources) {
if (target === null || target === undefined) {
throw new TypeError('Cannot convert undefined or null to object');
}
sources.forEach((source) => {
if (source !== null) { // Skip over if undefined or null
shallowAssign(Object(target), Object(source));
}
});
return target;
};
}());
}
One solution that worked for me was to use bundle.add
I split my bundle in 2 parts, app.js for app code, and appLib.js for libraries (this one will be cached as it does not change oftenly).
See https://github.com/sogko/gulp-recipes/tree/master/browserify-separating-app-and-vendor-bundles
For appLibs.js I use bundle.add for polyfills, as they must be loaded when the script is loaded, while I use bundle.require for other libs, that will be loaded only when required inside app.js.
polyfills.forEach(function(polyfill) {
b.add(polyfill);
});
libs.forEach(function(lib) {
b.require(lib);
});
The page loads these 2 bundles in order:
<head>
...
<script type="text/javascript" src="appLibs.js" crossorigin></script>
<script type="text/javascript" src="app.js" crossorigin></script>
...
</head>
This way it seems safe to assume that all polyfills will be loaded even before other libs are initialized. Not sure it's the best option but it worked for me.
My complete setup:
"use strict";
var browserify = require('browserify');
var gulp = require('gulp');
var gutil = require('gulp-util');
var handleErrors = require('../util/handleErrors');
var source = require('vinyl-source-stream');
var watchify = require("watchify");
var livereload = require('gulp-livereload');
var gulpif = require("gulp-if");
var buffer = require('vinyl-buffer');
var uglify = require('gulp-uglify');
// polyfills should be automatically loaded, even if they are never required
var polyfills = [
"intl"
];
var libs = [
"ajax-interceptor",
"autolinker",
"bounded-cache",
"fuse.js",
"highlight.js",
"imagesloaded",
"iscroll",
"jquery",
"keymaster",
"lodash",
"medium-editor",
"mime-db",
"mime-types",
"moment",
"packery",
"q",
"rangy",
"spin.js",
"steady",
"store",
"string",
"uuid",
"react-dnd"
];
// permits to create a special bundle for vendor libs
// See https://github.com/sogko/gulp-recipes/tree/master/browserify-separating-app-and-vendor-bundles
gulp.task('browserify-libs', function () {
var b = browserify({
debug: true
});
polyfills.forEach(function(polyfill) {
b.add(polyfill);
});
libs.forEach(function(lib) {
b.require(lib);
});
return b.bundle()
.on('error', handleErrors)
.pipe(source('appLibs.js'))
// TODO use node_env instead of "global.buildNoWatch"
.pipe(gulpif(global.buildNoWatch, buffer()))
.pipe(gulpif(global.buildNoWatch, uglify()))
.pipe(gulp.dest('./build'));
});
// Inspired by http://truongtx.me/2014/08/06/using-watchify-with-gulp-for-fast-browserify-build/
gulp.task('browserify',['cleanAppJs','browserify-libs'],function browserifyShare(){
var b = browserify({
cache: {},
packageCache: {},
fullPaths: true,
extensions: ['.jsx'],
paths: ['./node_modules','./src/'],
debug: true
});
b.transform('reactify');
libs.forEach(function(lib) {
b.external(lib);
});
// TODO use node_env instead of "global.buildNoWatch"
if ( !global.buildNoWatch ) {
b = watchify(b);
b.on('update', function() {
gutil.log("Watchify detected change -> Rebuilding bundle");
return bundleShare(b);
});
}
b.on('error', handleErrors);
//b.add('app.js'); // It seems to produce weird behaviors when both using "add" and "require"
// expose does not seem to work well... see https://github.com/substack/node-browserify/issues/850
b.require('app.js',{expose: 'app'});
return bundleShare(b);
});
function bundleShare(b) {
return b.bundle()
.on('error', handleErrors)
.pipe(source('app.js'))
.pipe(gulp.dest('./build'))
// TODO use node_env instead of "global.buildNoWatch"
.pipe(gulpif(!global.buildNoWatch, livereload()));
}
As you can see
Or use the polyfill service at
https://cdn.polyfill.io/v2/docs/

How to gZip content that's passed through a piped readStream

Im currently working on a project that requires the content to be gZip-ed before it's sent back to the browser.
Im currently using a simple read stream and piping the data to the response of a request, but im not sure the best way to gZip content without blocking requests
The line that send the data is:
require('fs').createReadStream(self.staticPath + Request.url).pipe(Response);
See the following class is the static handler object:
(function(){
var StaticFeeder = function()
{
this.staticPath = process.cwd() + '/application/static';
this.contentTypes = require('./contenttypes')
}
StaticFeeder.prototype.handle = function(Request,Response,callback)
{
var self = this;
if(Request.url == '/')
{
return false;
}
if(Request.url.indexOf('../') > -1)
{
return false;
}
require('path').exists(this.staticPath + Request.url,function(isthere){
/*
* If no file exists, pass back to the main handler and return
* */
if(isthere === false)
{
callback(false);
return;
}
/*
* Get the extention if possible
* */
var ext = require('path').extname(Request.url).replace('.','')
/*
* Get the Content-Type
* */
var ctype = self.contentTypes[ext] !== undefined ? self.contentTypes[ext] : 'application/octet-stream';
/*
* Send the Content-Type
* */
Response.setHeader('Content-Type',ctype);
/*
* Create a readable stream and send the file
* */
require('fs').createReadStream(self.staticPath + Request.url).pipe(Response);
/*
* Tell the main handler we have delt with the response
* */
callback(true);
})
}
module.exports = new StaticFeeder();
})();
Can anyone help me get around this problem, i haven't a clue on how to tell the piping to compress with gZip.
Thanks
Actually, I have a blog post on just this thing. http://dhruvbird.blogspot.com/2011/03/node-and-proxydecorator-pattern.html
You will need to:
npm install compress -g
Before using it though.
The basic idea revolves around using pipes to add functionality.
However, for your use-case, you would be better off putting node.js behind nginx to do all the gzip'ing since node.js is a single process (actually not), and the gzip routines would take up your process' CPU.
You can just pipe it through a compression stream:
var fs = require('fs')
var zlib = require('zlib')
fs.createReadStream(file)
.pipe(zlib.createGzip())
.pipe(Response)
assumes the file is not compressed already and you have already set all the headers for the response.

Resources