Unable to gracefully close http2 client stream in nodejs - node.js

I have the following code which has been heavily inspired from nodejs official documentation of a client-side example
import http2 from 'http2';
// The `http2.connect` method creates a new session with example.com
const session = http2.connect('https://somedomain.com');
// Handle errors
session.on('error', (err) => console.error(err))
const req = session.request({
':authority': 'somedomain.com',
':path': '/some-path',
':method': 'GET',
':scheme': 'https',
'accept': 'text/html',
});
// To fetch the response body, we set the encoding
// we want and initialize an empty data string
req.setEncoding('utf8')
let data = ''
// append response data to the data string every time
// we receive new data chunks in the response
req.on('data', (chunk) => { data += chunk })
// Once the response is finished, log the entire data
// that we received
req.on('end', () => {
console.log(`\n${data}`)
session.close();
});
req.on('error', (error) => {
console.log(error);
});
req.end();
Please note that the actual hostname has been replaced with somedomain.com. Running this, results in data getting logged, as expected, however, the process doesn't shut down gracefully. I get the following unhandled error in the terminal.
node:events:504
throw er; // Unhandled 'error' event
^
Error [ERR_HTTP2_STREAM_ERROR]: Stream closed with error code NGHTTP2_FLOW_CONTROL_ERROR
at new NodeError (node:internal/errors:371:5)
at ClientHttp2Stream._destroy (node:internal/http2/core:2330:13)
at _destroy (node:internal/streams/destroy:102:25)
at ClientHttp2Stream.destroy (node:internal/streams/destroy:64:5)
at Http2Stream.onStreamClose (node:internal/http2/core:544:12)
Emitted 'error' event on ClientHttp2Stream instance at:
at emitErrorNT (node:internal/streams/destroy:157:8)
at emitErrorCloseNT (node:internal/streams/destroy:122:3)
at processTicksAndRejections (node:internal/process/task_queues:83:21) {
code: 'ERR_HTTP2_STREAM_ERROR'
}
I understand it is possible that the server is behaving incorrectly. However, there should be a way on the nodejs client to close the session gracefully. Regardless, what would be the ideal way to handle such errors? I've already tried listening to session.on('error') and req.on('error') but that doesn't work.

Related

How to catch UnhandledPromiseRejectionWarning for GCS WriteStream

Observed Application Behavior
I'm getting a UnhandledPromiseRejectionWarning: Error: Upload failed when using #google-cloud/storage in node.js.
These errors come when processing thousands of requests. It's a small percentage that cause errors, but due to the lack of ability to handle the errors, and the lack of proper context from the error message, it's very difficult to determine WHICH files are failing.
I know in general promises must have a .catch or be surrounded by a try/catch block. But in this case I'm using a write stream. I'm a little bit confused as to where the promise that's being rejected is actually located and how I would intercept it. The stack trace is unhelpful, as it only contains library code:
UnhandledPromiseRejectionWarning: Error: Upload failed
at Request.requestStream.on.resp (.../node_modules/gcs-resumable-upload/build/src/index.js:163:34)
at emitTwo (events.js:131:20)
at Request.emit (events.js:214:7)
at Request.<anonymous> (.../node_modules/request/request.js:1161:10)
at emitOne (events.js:121:20)
at Request.emit (events.js:211:7)
at IncomingMessage.<anonymous> (.../node_modules/request/request.js:1083:12)
at Object.onceWrapper (events.js:313:30)
at emitNone (events.js:111:20)
at IncomingMessage.emit (events.js:208:7)
My Code
The code that's creating the writeStream looks like this:
const {join} = require('path')
const {Storage} = require('#google-cloud/storage')
module.exports = (config) => {
const storage = new Storage({
projectId: config.gcloud.project,
keyFilename: config.gcloud.auth_file
})
return {
getBucketWS(path, contentType) {
const {bucket, path_prefix} = config.gcloud
// add path_prefix if we have one
if (path_prefix) {
path = join(path_prefix, path)
}
let setup = storage.bucket(bucket).file(path)
let opts = {}
if (contentType) {
opts = {
contentType,
metadata: {contentType}
}
}
const stream = setup.createWriteStream(opts)
stream._bucket = bucket
stream._path = path
return stream
}
}
}
And the consuming code looks like this:
const gcs = require('./gcs-helper.js')
module.exports = ({writePath, contentType, item}, done) => {
let ws = gcs.getBucketWS(writePath, contentType)
ws.on('error', (err) => {
err.message = `Could not open gs://${ws._bucket}/${ws._path}: ${err.message}`
done(err)
})
ws.on('finish', () => {
done(null, {
path: writePath,
item
})
})
ws.write(item)
ws.end()
}
Given that I'm already listening for the error event on the stream, I don't see what else I can do here. There isn't a promise happening at the level of #google-cloud/storage that I'm consuming.
Digging into the #google-cloud/storage Library
The first line of the stack trace brings us to a code block in the gcs-resumable-upload node module that looks like this:
requestStream.on('complete', resp => {
if (resp.statusCode < 200 || resp.statusCode > 299) {
this.destroy(new Error('Upload failed'));
return;
}
this.emit('metadata', resp.body);
this.deleteConfig();
this.uncork();
});
This is passing the error to the destroy method on the stream. The stream is being created by the #google-cloud/common project's utility module, and this is using the duplexify node module to create the stream. The destroy method is defined on the duplexify stream and can be found in the README documentation.
Reading the duplexify code, I see that it first checks this._ondrain before emitting an error. Maybe I can provide a callback to avoid this error being unhandled?
I tried ws.write(item, null, cb) and still got the same UnhandledPromiseRejectionWarning. I tried ws.end(item, null, cb) and even wrapped the .end call in a try catch, and ended up getting this error which crashed the process entirely:
events.js:183
throw er; // Unhandled 'error' event
^
Error: The uploaded data did not match the data from the server. As a precaution, the file has been deleted. To be sure the content is the same, you should try uploading the file again.
at delete (.../node_modules/#google-cloud/storage/build/src/file.js:1295:35)
at Util.handleResp (.../node_modules/#google-cloud/common/build/src/util.js:123:9)
at retryRequest (.../node_modules/#google-cloud/common/build/src/util.js:404:22)
at onResponse (.../node_modules/retry-request/index.js:200:7)
at .../node_modules/teeny-request/build/src/index.js:208:17
at <anonymous>
at process._tickCallback (internal/process/next_tick.js:189:7)
My final code looks something like this:
let ws = gcs.getBucketWS(writePath, contentType)
const handleErr = (err) => {
if (err) err.message = `Could not open gs://${ws._bucket}/${ws._path}: ${err.message}`
done(err)
}
ws.on('error', handleErr)
// trying to do everything we can to handle these errors
// for some reason we still get UnhandledPromiseRejectionWarning
try {
ws.write(item, null, err => {
handleErr(err)
})
ws.end()
} catch (e) {
handleErr(e)
}
Conclusion
It's still a mystery to me how a user of the #google-cloud/storage library, or duplexify for that matter, is supposed to perform proper error handling. Comments from library maintainers of either project would be appreciated. Thanks!

How can I gracefully handle a failed tcpSocket.connect attempt?

The following code causes an error when there is no existing TCP server to communicate with on the specified host:
const net = require('net');
const argv = require('minimist')(process.argv.slice(2));
try {
var tcpSocket = new net.Socket();
tcpSocket.connect(argv.tcpport, argv.tcphost, function onConnected() {
console.log('connected');
tcpSocket.on('data', function onIncoming(data) {
console.log(data);
});
tcpSocket.on('close', function onClose(data) {
tcpSocketConnected = false;
});
tcpSocketConnected = true;
});
} catch (err) {
console.log("PRINT ME: ", err);
}
Error:
events.js:183
throw er; // Unhandled 'error' event
^
Error: connect ECONNREFUSED 127.0.0.1:1906
at Object._errnoException (util.js:992:11)
at _exceptionWithHostPort (util.js:1014:20)
at TCPConnectWrap.afterConnect [as oncomplete] (net.js:1186:14)
I am unable to catch the error even though I wrap the code in a try...catch.
Why does my catch block not catch the error?
How can I gracefully handle the error?
You should be able to explicitly handle the error event using event emitter api (same way as you handled close and data):
tcpSocket.on('error', handleError)
From Docs:
Event: 'error'#
Added in: v0.1.90
<Error>
Emitted when an error occurs. Unlike net.Socket, the 'close' event
will not be emitted directly following this event unless server.close()
is manually called. See the example in discussion of server.listen().

NodeJS -> Error: write after end (only after first request)

In my express app i have the route below:
router.get('/generatedData', function (req, res) {
res.setHeader('Connection' , 'Transfer-Encoding');
res.setHeader('Content-Type' , 'text/html; charset=utf-8');
res.setHeader('Transfer-Encoding' , 'chunked');
var Client = someModule.client;
var client = Client();
client.on('start', function() {
console.log('start');
});
client.on('data', function(str) {
console.log('data');
res.write(str);
});
client.on('end', function(msg) {
client.stop();
res.end();
});
client.on('err', function(err) {
client.stop();
res.end(err);
});
client.on('end', function() {
console.log('end');
});
client.start();
});
On first call everything works fine (console)
We've got ourselves a convoy on port 3000
start
data
data
data
data
data
...
data
end
GET /generatedData 200 208.426 ms - -
I get all the data and res.end() is being called and successfully closes the request.
The problem starts after first request. I make the exact same request (new one of course) and i get the following error (console):
start
data
data
data
events.js:160
throw er; // Unhandled 'error' event
^
Error: write after end
at ServerResponse.OutgoingMessage.write (_http_outgoing.js:439:15)
at Client.<anonymous> (/Users/xxxx/projects/xxxx/routes/index.js:33:17)
at emitOne (events.js:96:13)
at Client.emit (events.js:188:7)
at FSWatcher.<anonymous> (/Users/xxxx/projects/xxxx/lib/someModule.js:116:32)
at emitTwo (events.js:106:13)
at FSWatcher.emit (events.js:191:7)
at FSEvent.FSWatcher._handle.onchange (fs.js:1412:12)
[nodemon] app crashed - waiting for file changes before starting...
This happens without res.end() being called.
I manage to get some data before the crash.
How can i get this error without res.end() being called at all?
Do i somehow save the previous res instance?
Thanks,
Asaf
Have the same problem. My module was extened by EventEmitter and each time i catch event in router - it stays there, end on second call there are two eventlisteners not one. Setting "once" instead of "on" - worked for me.
client.once('start', function() {
console.log('start');
});
instead of
client.on('start', function() {
console.log('start');
});

Node.js Http post request on aws lambda Socket Hang up

var http = require('http');
exports.handler = function(event, context) {
var headers = {
'content-type': 'application/x-www-form-urlencoded'
}
var options = {
host: 'stage.wings.com',
path:'/test-lambda',
form: {
'days':'3'
},
headers:headers
};
console.log(options);
var req = http.request(options, function(response) {
// Continuously update stream with data
var body = '';
response.on('data', function(d) {
body += d;
});
response.on('end', function() {
// Data reception is done, do whatever with it!
var parsed = JSON.parse(body);
console.log("success");
console.log(parsed);
});
});
// Handler for HTTP request errors.
req.on('error', function (e) {
console.error('HTTP error: ' + e.message);
completedCallback('API request completed with error(s).');
});
};
my node version : v0.10.25
If i execute on file it gives HTTP error: socket hang up
From aws lambda if i run this function it throws error
Lambda error:2016-10-09T23:11:17.200Z 89f2146f-8e75-11e6-9219-b9b32aa0a768 Error: socket hang up
at createHangUpError (_http_client.js:200:15)
at Socket.socketOnEnd (_http_client.js:285:23)
at emitNone (events.js:72:20)
at Socket.emit (events.js:166:7)
at endReadableNT (_stream_readable.js:905:12)
at nextTickCallbackWith2Args (node.js:437:9)
at process._tickDomainCallback (node.js:392:17)
There is a timeout time for aws-lambda, it will hang up after at most 300 seconds.
Here is little more about it. http://docs.aws.amazon.com/lambda/latest/dg/limits.html
you can use
context.getRemainingTimeInMillis(); which will return you remaining time of your lambda so you can flush your data. If this is intended to be run longer than five minutes, then you can implement some kind of grace-full shutdown and flush your data before that.

socket hanging and node crashing

I have a node app that posts data to remote apis and fetches the responses. It works fine but at times the node server crashes and generates the following errror:
events.js:71
throw arguments[1]; // Unhandled 'error' event
^
Error: socket hang up
at createHangUpError (http.js:1264:15)
at Socket.socketCloseListener (http.js:1315:23)
at Socket.EventEmitter.emit (events.js:126:20)
at Socket._destroy.destroyed (net.js:358:10)
at process.startup.processNextTick.process._tickCallback (node.js:244:9)
I googled and found that it happens due to some timeout thing but i am not really sure as to how to overcome this.
Here is the required code in my server.js:
if(request.body.company=="CQ")
{
var postData={firstName:request.body.firstname,lastName:request.body.lastname,Email:request.body.email,DayPhoneNumber:request.body.daytimePhone,Address1:request.body.addressOne,city:request.body.city,state:request.body.State,postalCode:request.body.zip,AreaOfIntrest:request.body.areaOfInterest,VendorLocationId:"38404",Notes:request.body.Notes,GraduationYear:request.body.graduationYear,AffiliateLocationId:"12345",LocationId:"12345",CampaignId:"12345"};
var options={hostname:'webservices.someapi.com', path:'/ILM/Default.ashx?'+qs.stringify(postData), method:'GET'};
var req = http.request(options, function(res) {
res.on('data', function (chunk) {
edModel.find({$and:[{daytimePhone:request.body.daytimePhone},{company:"CQ"}]},function(err,count){
if(count.length==0)
{
var sr='RESPONSE: ' + chunk;
if(sr.indexOf('status="Error"')==-1)
{
request.body.leadid=sr;
var sr=sr.slice(sr.indexOf("leadid"));
sr=sr.slice(0,sr.indexOf(">"));
edDoc=new edModel(request.body);
edDoc.save();
response.send({response:sr});
}
else
{
response.send({response:sr});
}
}
else
{
response.send({response:"<span style='color:red'>Duplicate Lead.<br> A lead with this number already exists in our database</span>"});
}
});
});
});
// write data to request body
req.write('data\n');
req.end();
}
I have several such if else conditions in the server.js file.
in node 0.8.20 there was a bug about that problem. try using http.get instead of http.request. or just dont use 0.8.20 if you use that version.

Resources