I am implementing HTTP2/SPDY push resourcing using the node module spdy. I have followed indutny's doc and have been doing test runs implementing his example into my server.js.
The problem is two fold; I am not getting any errors in the log(s) nor am I seeing the alert in the browser. I don't see any change in the Developers Console as well. If I set a bogus push URL, I get no response/errors,etc. I believe in theory, the Priority should change from medium to High(?). See Screen shoot.
Is there another way for me to test if the push is being made to the browser? Or, do I have something wrong in my script (Please check for inconsistencies)? Also, what to throw in stream.on('error', function() {});?
Testing in Chrome (on a ChromeBook), nodejs v5.1.0, npm v3.3.12 - H2 enabled is verified in Chrome.
server.js:
var environment = '../env/' + process.env.NODE_ENV
// Process User config
, fS = require('fs')
, jsonFile = fS.readFileSync(environment + '/config.json')
, jsonString, hostIp, hostPort, cacheExp, cps;
try {
jsonString = JSON.parse(jsonFile);
var SERV_HOST = jsonString['hostIp']
, SERV_PORT = jsonString['hostPort']
, CACHE_EXP = jsonString['cacheExp']
, CPS = jsonString['cps']
, xPowerBy = 'OceanPress'
, xFrameOptions = 'DENY'
, xXSSProtection = '1; mode=block'
, xContentTypeOption = 'nosniff';
} catch (err) {
console.log('There is an error parsing the json file : ' + err);
}
// Load modules
var fs = require('fs')
, watch = require('staticsmith-watch')
, buffet = require('buffet')({root: environment + '/_public'})
, spdy = require('spdy')
// spdy options
, options = {
key: fs.readFileSync(environment + '/keys/key.pem')
, cert: fs.readFileSync(environment + '/keys/cert.pem')
// SPDY-specific options
, spdy: {
protocols: [ 'h2','spdy/3.1', 'spdy/3', 'spdy/2','http/1.1', 'http/1.0' ]
, plain: false
, maxStreams: 200
, connection: {
windowSize: 1024 * 1024
, autoSpdy31: false
}
}
// Set ip and port
, host: SERV_HOST
, port: SERV_PORT
}
// Security header options
, security = [
{ name: 'X-Powered-By',
option: xPowerBy }
, { name: 'x-frame-options',
option: xFrameOptions }
, { name: 'X-XSS-Protection',
option: xXSSProtection }
, { name: 'X-Content-Type-Options',
option: xContentTypeOption }
, { name: 'Cache-Control',
option: CACHE_EXP }
, { name: 'Content-Security-Policy',
option: CPS }
, { name: 'server',
option: 'Who knows' }
];
if (process.env.NODE_ENV == 'production') {
spdy.createServer(options, function(req, res) {
// Add Content Security Rules
for(var i = 0; i < security.length; i++) {
res.setHeader(security[i].name, security[i].option);
}
// #see https://www.npmjs.com/package/buffet
buffet(req, res, function (err, result) {
// Push JavaScript asset (main.js) to the client
var stream = res.push('/js/main.js', {
req: {'accept': '*/*'},
res: {'content-type': 'application/javascript'}
});
stream.on('acknowledge', function() {
console.log("Stream ACK");
});
stream.on('error', function() {
console.error("stream ERR");
});
stream.end('alert("hello from push stream!");');
// write main response body and terminate stream
res.end('<script src="/js/main.js"></script>');
// There was an error serving the file? Throw it!
if (err) {
console.error("Error serving " + req.url + " - " + err.message);
// Respond to the client
res.writeHead(err.status, err.headers);
}
});
}).listen(options.port, options.host);
console.log("serving at https://" + options.host + ":" + options.port);
console.log("On Node v" + process.versions.node);
console.log("On npm v" + process.versions.npm);
watch({
pattern: '**/*',
livereload: true,
});
}
UPDATE:
I have also added:
stream.on('acknowledge', function() {
console.log('stream ACK');
});
There is no console log written - It's like the function is dead.
Dev Console with push-stream (main.js):
There are a few problems here.
The buffet callback is only invoked when the requested URL does not match a file on disk. Just like express middleware, it's essentially a next function. Thus, you're never actually pushing anything.
The first argument to res.push is a URL, not a filesystem path.
res.push will not exist on ≤ HTTP/1.1 connections; you need to make sure it's there or you'll throw an uncaught exception (and crash).
Here's a reduced working example.
spdy.createServer({
key: fs.readFileSync('./s.key'),
cert: fs.readFileSync('./s.crt')
}, function(req, res) {
if (req.url == '/') {
res.writeHead(200, { 'Content-Length': 42 });
res.end('<h1>Hi</h1><script src="main.js"></script>');
if (res.push) {
// Push JavaScript asset (main.js) to the client
var stream = res.push('/main.js', {
req: {'accept': '**/*'},
res: {'content-type': 'application/javascript'}
});
stream.on('error', function() {
console.error(err);
});
stream.end('alert("hello from push stream!");');
}
} else {
res.writeHead(404);
res.end();
}
}).listen(777);
As far as actually verifying in Chrome that things are being pushed, open a new tab and type chrome://net-internals/#http2. Click the ID of the HTTP/2 session with your server, then click the session in the left-hand pane. Mixed in with the initial request, you'll see something like:
t= 3483 [st= 19] HTTP2_SESSION_RECV_PUSH_PROMISE
--> :method: GET
:path: /main.js
:scheme: https
:authority: localhost:777
--> id = 3
--> promised_stream_id = 4
t= 3483 [st= 19] HTTP2_SESSION_RECV_HEADERS
--> fin = false
--> :status: 200
--> stream_id = 4
t= 3483 [st= 19] HTTP2_SESSION_RECV_DATA
--> fin = true
--> size = 0
--> stream_id = 4
t= 3546 [st= 82] HTTP2_STREAM_ADOPTED_PUSH_STREAM
--> stream_id = 4
--> url = "https://localhost:777/main.js"
(I did not see the Priority of main.js change in the dev tools -- it was still Medium.)
Within the Chrome inspector, I discovered it is quite easily recognized when a resource has been pushed by the server.
First: Within the network view/tab, the resource(s) in question will show virtually no request sent and 'waiting(TTFB)' in the waterfall (See image below).
The theme.min.css & theme.min.js resources are pushed:
Second: After clicking on the pushed the resource(s), opening the "Headers" pane and inspecting the "Request Headers" panel at the bottom, check for Provisional headers are shown. If the warning is shown for the resource, then it was pushed. See this SO answer to why you will see this warning.
Headers Inspector:
If you need a little more detailed information about the pushed resource(s), using the chrome://net-internals/#http2 method as stated in the second part of #josh3736 answer would work too. But if you need a quick way to verify that the resource(s) has been pushed and excepted by the client, viewing the waterfall will show this.
Related
I am working on creating a zip of multiple files on the server and stream it to the client while creating. Initially, I was using ArchiverJs It was working fine if I was appending buffer to it but it fails when I need to add streams into it. Then after having some discussion on Github, I switched to Node zip-stream which started working fine thanks to jntesteves. But as I deploy the code on GKE k8s I Started getting Network Failed errors for huge files.
Here is my sample code :
const ZipStream = require("zip-stream");
/**
* #summary Adding readable stream provided by https module into zipStreamer using entry method
*/
const handleEntryCB = ({ readableStream, zipStreamer, fileName, resolve }) => {
readableStream.on("error", () => {
console.error("Error while listening readableStream : ", error);
resolve("done");
});
zipStreamer.entry(readableStream, { name: fileName }, error => {
if (!error) {
resolve("done");
} else {
console.error("Error while listening zipStream readableStream : ", error);
resolve("done");
}
});
};
/**
* #summary Handling downloading of files using native https, http and request modules
*/
const handleUrl = ({ elem, zipStreamer }) => {
return new Promise((resolve, reject) => {
let fileName = elem.fileName;
const url = elem.url;
//Used in most of the cases
if (url.startsWith("https")) {
https.get(url, readableStream => {
handleEntryCB({ readableStream, zipStreamer, url, fileName, resolve, reject });
});
} else if (url.startsWith("http")) {
http.get(url, readableStream => {
handleEntryCB({ readableStream, zipStreamer, url, fileName, resolve, reject });
});
} else {
const readableStream = request(url);
handleEntryCB({ readableStream, zipStreamer, url, fileName, resolve, reject });
}
});
};
const downloadZipFile = async (data, resp) => {
let { urls = [] } = data || {};
if (!urls.length) {
throw new Error("URLs are mandatory.");
}
//Output zip name
const outputFileName = `Test items.zip`;
console.log("Downloading using streams.");
//Initialize zip-stream instance
const zipStreamer = new ZipStream();
//Set headers to response
resp.writeHead(200, {
"Content-Type": "application/zip",
"Content-Disposition": `attachment; filename="${outputFileName}"`,
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "GET, POST, OPTIONS"
});
//piping zipStreamer to the resp so that client starts getting response
//as soon as first chunk is added to the zipStreamer
zipStreamer.pipe(resp);
for (const elem of urls) {
await handleUrl({ elem, zipStreamer });
}
zipStreamer.finish();
};
app.post(restPrefix + "/downloadFIle", (req, resp) => {
try {
const { data } = req.body || {};
downloadZipFile(data, resp);
} catch (error) {
console.error("[FileBundler] unknown error : ", error);
if (resp.headersSent) {
resp.end("Unknown error while archiving.");
} else {
resp.status(500).end("Unknown error while archiving.");
}
}
});
I tested for 7-8 files of ~4.5 GB each on local, it works fine and when I tried the same on google k8s, I got network failed error.
After some more research, I Increased server timeout on k8s t0 3000 seconds, than it starts working fine, but I guess the increasing timeout is not good.
Is there anything I am missing on code level or can you suggest some good GKE deployment configuration for a server that can download large files with many concurrent users?
I am stuck on this for the past 1.5+ months. please help!
Edit 1: I edited the timeout in the ingress i.e Network services-> Load Balancing ->edit the timeout in the service
I am tring to deploy a webRTC signaling server on Heroku. It works on my local machine but gave:
Error: getaddrinfo ENOTFOUND global.xirsys.net global.xirsys.net`
when deployed on Heroku.
I am using simpleWebRTC + signalmaster + XirSys
The official doc from xirSys shows:
// Node Get ICE STUN and TURN list
var https = require("https");
var options = {
host: "global.xirsys.net",
path: "/_turn/Acumany",
method: "PUT",
headers: {
"Authorization": "Basic " + new Buffer("acumany:4b6aea04-6152-11e7-9d16-3fa9b82ffd4f").toString("base64")
}
};
var httpreq = https.request(options, function(httpres) {
var str = "";
httpres.on("data", function(data){ str += data; });
httpres.on("error", function(e){ console.log("error: ",e); });
httpres.on("end", function(){
console.log("ICE List: ", str);
});
});
httpreq.end();
And I used axios:
axios.put("https://acumany:4b6aea04-6152-11e7-9d16-3fa9b82ffd4f#global.xirsys.net/_turn/Acumany")
.then((res) => {
var result = res.data;
var iceServers = result.v.iceServers;
var turnservers = [],
stunservers = [];
iceServers.forEach(function (server) {
if(server.url.indexOf("stun:") != -1){
stunservers.push(server);
}else{
turnservers.push(server);
}
});
console.log("emitting server info => ", stunservers, turnservers);
client.emit('stunservers', stunservers || []);
client.emit('turnservers', turnservers);
})
.catch(function (err) {
console.log("axios error => ", err);
});
My guess is that heroku switch http/https in its own load balancer.
How can I change this code to make it work? (either with https or axios)
global.xirsys.net is a dynamic domain which geographically routes your request. Maybe something about that is incompatible with Heroku. Try a static xirsys endpoint instead. Like ss.xirsys.com for Singapore or us.xirsys.com for US east coast.
You can see a list of endpoints here: https://us.xirsys.com:9000/api-intro
I have been working with Jupyterhub's Configurable Http Proxy and I have been adding the necessary options for the proxy to handle client's ssl certificates without having to use the command line options.
My main goal is that I want to take in a clients request to the proxy and add their certificate information to the header. Once in the header, I will use jupyterhub's authenticator to craft a username.
My issue is that when I use the proxy.on('proxyReq method available for the http-proxy to set the header, I get this error: [Error: Can't set headers after they are sent.]
I have been looking all over the code to see where a response/request is being written or sent, but I cannot find it.
Here is the ConfigurableProxy function code, I can give you more if needed:
function ConfigurableProxy (options) {
var that = this;
this.options = options || {};
this.trie = new trie.URLTrie();
this.auth_token = this.options.auth_token;
this.includePrefix = options.includePrefix === undefined ? true : options.includePrefix;
this.routes = {};
this.host_routing = this.options.host_routing;
this.error_target = options.error_target;
if (this.error_target && this.error_target.slice(-1) !== '/') {
this.error_target = this.error_target + '/'; // ensure trailing /
}
this.error_path = options.error_path || path.join(__dirname, 'error');
if (this.options.default_target) {
this.add_route('/', {
target: this.options.default_target
});
}
options.ws = true;
options.secure= true;
// These are the ssl options
options.ssl = {
//Right the key and cert are relative path on my computer
//but these can be changed.
key: fs.readFileSync('/Users/grantherman/Desktop/jupyterHubCSProject/ssl/server.key'),
cert: fs.readFileSync('/Users/grantherman/Desktop/jupyterHubCSProject/ssl/server.crt'),
requestCert: true,
//Right now this is set to false, but if we add a CA to these options
// and set this to true, the proxy will reject all unkown ssl certs
rejectUnauthorized: false
};
var response = [];
var data = [];
var proxy = this.proxy = httpProxy.createProxyServer(options);
proxy.on('proxyReq', function(proxyReq, req, res, options) {
console.log("proxy request");
try{
proxyReq.setHeader('X-Special-Proxy-Header', req.socket.getPeerCertificate());
}catch(err){
console.log(err);
}
});
proxy.on('data', function(data, req, res, options) {
data.push(data);
});
proxy.on('proxyRes', function(proxyRes, req, res, options) {
response.push(proxyRes);
});
proxy.on('error', function(error, req, res, options) {
log.add(error);
});
proxy.on('close', function (req, socket, head) {
// view disconnected websocket connections
console.log('Client disconnected');
});
// tornado-style regex routing,
// because cross-language cargo-culting is always a good idea
this.api_handlers = [
[ /^\/api\/routes(\/.*)?$/, {
get : bound(this, authorized(this.get_routes)),
post : json_handler(bound(this, authorized(this.post_routes))),
'delete' : bound(this, authorized(this.delete_routes))
} ]
];
I think this is going to require modifications to configurable-http-proxy itself. The place to add headers is on the original req object prior to initiating the proxied request, here.
It would look something like:
ConfigurableProxy.prototype.handle_proxy = function (kind, req, res) {
...
req.headers['X-My-Header'] = 'My-Value';
// dispatch the actual method
this.proxy[kind].apply(this.proxy, args);
Adding a hook to CHP for modifying the request here, on its way through, should make this doable without modifying the CHP source.
Regular client initiated requests to the node server are captured fine in Fiddler. However, requests sent from node to a web service are not captured. It did not help to pass in config for proxy (127.0.0.1:8888) to the request method. How can I route the request messages through Fiddler?
var http = require('http');
var request = require('request');
request.get(webserviceURL, { "auth" : {"user": "user", "pass" = "pass", sendImmediately: true },
"proxy" : { "host" : "127.0.0.1", "port" : 8888 }},
function (error, response) { console.log( "response received" );
});
Request repo: https://github.com/mikeal/request
I just tried to do this myself (using Fiddler and the request library from npm). Here's how I got it working:
process.env['NODE_TLS_REJECT_UNAUTHORIZED'] = '0'; // Ignore 'UNABLE_TO_VERIFY_LEAF_SIGNATURE' authorization error
// Issue the request
request(
{
method: "GET",
uri: "https://secure.somewebsite.com/",
proxy: "http://127.0.0.1:8888" // Note the fully-qualified path to Fiddler proxy. No "https" is required, even for https connections to outside.
},
function(err, response, body) {
console.log("done");
});
This is with Fiddler2 using the default port and proxy options (and no proxy authentication).
Fiddler works by setting your "Internet Options" (from start menu) "Connections" > "LAN Settings" > "Proxy Server" to its port, thus making all HTTP traffic (clients which obey this setting) go through it.
You should point your node.js client lib to use a proxy, the settings are written in that options dialog after you start Fiddler.
The proxy option should be a full url, like this:
proxy : "http://127.0.0.1:8888"
To do this on an ad-hoc basis, without changing your code, you can use environment variables.
Request respects:
HTTP_PROXY
HTTPS_PROXY
NO_PROXY
So, to proxy just set these in your console before running your process.
For example, to setup http and https proxy use:
set HTTP_PROXY="http://127.0.0.1:8888"
set HTTPS_PROXY="http://127.0.0.1:8888"
set NODE_TLS_REJECT_UNAUTHORIZED=0
The latter line stops issues with SSL through the fiddler proxy.
I've been wanting the same... an equivalent of the Network tab in chrome DevTools, only for Nodejs. Unfortunately, it doesn't appear as though one exists. I don't have Fiddler on macos, so this is how I went about stubbing the require('http') methods to log and pass though. Leaving this here in case I need it again or someone else finds it helpful. You can turn it on by attaching a debugger and require('filename')() the file containing this script.
module.exports = () => {
const http = require('http');
http._request = http.request;
global.DO_LOG_AJAX = true;
const log = str => {
if (global.DO_LOG_AJAX) {
console.debug(str);
}
};
const flushLog = (requestLines, responseLines) => {
if (global.DO_LOG_AJAX) {
log([
'----------------Begin Request-----------------------------------',
...requestLines,
'----------------End Request / Begin Response--------------------',
...responseLines,
'----------------End Reponse-------------------------------------',
].join('\n'));
}
};
let write;
let end;
http.request = (...requestParams) => {
const req = http._request(...requestParams);
const { method, path, headers, host, port } = requestParams[0];
const requestLogLines = [];
requestLogLines.push(`${method} ${path}`);
requestLogLines.push(`Host: ${host}:${port}`);
for (const header of Object.keys(headers)) {
requestLogLines.push(`${header}: ${headers[header]}`);
}
write = write || req.write;
end = end || req.end;
req.on('error', err => {
log({ err });
});
req._write = write;
req._end = end;
const requestBody = [];
req.write = (...writeParams) => {
requestBody.push(writeParams[0].toString());
return req._write(...writeParams);
};
req.end = (...endParams) => {
if (endParams[0]) {
requestBody.push(endParams[0].toString());
}
requestLogLines.push('');
requestLogLines.push(requestBody.join(''));
return req._end(...endParams);
};
const responseLogLines = [];
req.once('response', response => {
const responseBody = [];
responseLogLines.push(`${response.statusCode} ${response.statusMessage}`);
for (const header of Object.keys(response.headers)) {
responseLogLines.push(`${header}: ${response.headers[header]}`);
}
const onData = chunk => {
responseBody.push(chunk.toString());
};
const onClose = err => {
responseLogLines.push('');
responseLogLines.push(responseBody.join(''));
responseLogLines.push('');
responseLogLines.push(`--- ERROR --- ${err.toString()}`);
flushLog(requestLogLines, responseLogLines);
req.removeListener('data', onData);
};
const onEnd = () => {
responseLogLines.push('');
responseLogLines.push(responseBody.join(''));
flushLog(requestLogLines, responseLogLines);
req.removeListener('data', onData);
};
response.on('data', onData);
response.once('close', onClose);
response.once('end', onEnd);
});
return req;
};
};
I have two node servers, one on port 5000 (call it "Face") and another on port 5001 (call it "Hands")
Both are started via a foreman procfile at the same time. Ports are fixed and the url I'm targeting works in the browser.
When the Hands starts up, it needs to talk to the Face (Facepalm?) and register itself. However, the below code doesn't seem to be working. (This is coffeescript generated JS)
Register gets called during server initialization, after the http server has been started. In case it was a timing issue, I kick off the register function with a setTimeout() of 2 seconds. I know the page that its hitting (/home/register) is available and working.
Right now I can see it get to the "Posting to" console log line. On the Face I have put a console.log in the register code and its never logging anything - meaning I don't think its actually getting hit. (It DOES log if hit from browser) And nothing errors out - it just calls the request and then wanders off to get a sandwich.
Both servers are "roll your own" - not using any frameworks. Let me know if you see a weird typo or need more info. Thanks!
register = function() {
var _this = this;
console.log('Registering with Face Server');
return post_face('/home/register', GLOBAL.data, function(rs) {
console.log(rs);
if (rs.registered) {
GLOBAL.data.registered = true;
return console.log("Registered with face at " + GLOBAL.config.face_host + ":" + GLOBAL.config.face_port);
} else {
throw "ERROR: Could not register with face server! " + GLOBAL.config.face_host + ":" + GLOBAL.config.face_port;
return false;
}
});
};
post_face = function(path, data, cb) {
var post_data, post_options, post_req;
post_data = querystring.stringify({
'registration': data
});
post_options = {
host: GLOBAL.config.face_host,
port: GLOBAL.config.face_port,
path: path,
method: 'POST',
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
'Content-Length': post_data.length
}
};
console.log("Posting to " + post_options.host + ":" + post_options.port);
post_req = http.request(post_options, function(res) {
var response,
_this = this;
console.log(res);
response = "";
res.setEncoding('utf8');
res.on('data', function(chunk) {
return response += chunk;
});
return res.on('end', function() {
return cb.call(_this, response);
});
});
return true;
};
Thanks to Bill above, the answer was that I wasn't actually posting the data and ending the request! Bad copy / paste / edit from some samples I was referring to. Here's the last two lines of code I should have had:
post_req.write(post_data);
post_req.end();