How to write on process.stdin with nodejs - node.js

I have a problem with nodejs when trying to write to stdin of a process I have spawned with this spawn function of child_process
const spawn = require("child_process").spawn;
class Barotrauma {
static instance = null;
server = null;
constructor() {
this.server = spawn(
"F:\\dev\\barotrauma\\steamcmd\\steamapps\\common\\Barotrauma Dedicated Server\\DedicatedServer.exe",
{
stdio: [process.stdin, process.stdout, process.stderr],
}
);
}
static getInstance() {
if (Barotrauma.instance === null) {
Barotrauma.instance = new Barotrauma();
} else {
return Barotrauma.instance;
}
}
sendCommand(command) {
//this.server.stdout.write(`${command}\n`)
// this is a test to get an output on command execution to see if it works
process.stdin.write("help\n");
}
}
module.exports = Barotrauma;
So, this snippet of code is for starting a game server then send command to it on socket events (the socket call sendCommand function)
if I try to write commands in the console it works fine, but if I try to execute the sendCommand function it crash with error :
node:events:498
throw er; // Unhandled 'error' event
^
Error: write EPIPE
at afterWriteDispatched (node:internal/stream_base_commons:160:15)
at writeGeneric (node:internal/stream_base_commons:151:3)
at ReadStream.Socket._writeGeneric (node:net:795:11)
at ReadStream.Socket._write (node:net:807:8)
at writeOrBuffer (node:internal/streams/writable:389:12)
at _write (node:internal/streams/writable:330:10)
at ReadStream.Writable.write (node:internal/streams/writable:334:10)
at Barotrauma.sendCommand (F:\dev\barotrauma\serverManager.js:26:19)
at handleReward (F:\dev\barotrauma\handler.js:4:28)
at WebSocket.connection.onmessage (F:\dev\barotrauma\index.js:26:5)
Emitted 'error' event on ReadStream instance at:
at emitErrorNT (node:internal/streams/destroy:157:8)
at emitErrorCloseNT (node:internal/streams/destroy:122:3)
at processTicksAndRejections (node:internal/process/task_queues:83:21) {
errno: -4047,
code: 'EPIPE',
syscall: 'write'
}
The crash seems to originate from the process.stin.write function.
Any idea how to solve this?

You might want to refer to options.stdio.
Without further depth into the full context it's hard to say what's exactly going on, but the following might work as you'd expect:
If you really need the parent and children fds to be connected you could use pipe for the stdio option and have data be propagated between the two processes.
The following snippet (reduced to minimize noise) should do the trick:
class Barotrauma {
server = null;
constructor() {
// Default `stdio` option is `pipe` for fds 0, 1 and 2 (stdio, stdout and stderr)
this.server = spawn(
"F:\\dev\\barotrauma\\steamcmd\\steamapps\\common\\Barotrauma Dedicated Server\\DedicatedServer.exe"
);
// Catches incoming messages from child, if necessary
this.server.stdout.on('data', (d) => {
console.log(`Message from child: '${d}'`)
})
}
sendCommand(cmd) {
// Sends messages to child
this.server.stdin.write(`${cmd}\n`);
}
}
Given you're in a Windows environment, mind you might want to use overlapped instead of pipe if you need async I/O to communicate with that application. For more info, refer to the official docs.

Related

Handling Selenium webdrivers on Node.js when SIGINT is recieved

I'm building a Node.js application which utilizes selenium with the chrome webdriver. I'm trying to gracefully handle exit states using the following:
import {Builder} from 'selenium-webdriver';
import {Options} from 'selenium-webdriver/chrome.js'
const chromeOptions = new Options();
chromeOptions.excludeSwitches('enable-logging');
chromeOptions.addArguments('--disable-extensions', '--headless');
chromeOptions.setChromeBinaryPath('./chrome-win/chrome.exe');
const driver = await new Builder()
.forBrowser('chrome')
.setChromeOptions(chromeOptions)
.build();
const cleanup = async (sig) => {
try {
await driver.close();
await driver.quit();
} catch (e){
console.error('EXIT HANDLER ERROR', e)
}
process.exit(isNaN(sig) ? 1 : +sig);
}
[
'beforeExit', 'uncaughtException', 'SIGINT',
'SIGUSR1', 'SIGUSR2', 'SIGTERM'
].forEach(evt => process.on(evt, cleanup.bind(evt)))
This works for the beforeExit and uncaughtException events, but upon recieving a SIGINT I get the following error
EXIT HANDLER ERROR Error: ECONNREFUSED connect ECONNREFUSED 127.0.0.1:61290
at ClientRequest.<anonymous> ([PATH_TO_PROJECT]\node_modules\selenium-webdriver\http\index.js:294:15)
at ClientRequest.emit (node:events:527:28)
at Socket.socketErrorListener (node:_http_client:454:9)
at Socket.emit (node:events:527:28)
at emitErrorNT (node:internal/streams/destroy:157:8)
at emitErrorCloseNT (node:internal/streams/destroy:122:3)
at processTicksAndRejections (node:internal/process/task_queues:83:21)
and the chromium processes are left hanging. As far as I can tell, it seems like selenium is intercepting the SIGINT and doing some level of cleanup, but that just prevents me from actually calling the proper functions to kill the processes.
My immediate thought was that I could possibly try to get the PID for the browser instance(s) created by selenium and killing them manually, but my research has turned up with that not being an option, at least in Node.js
I just want to ensure that this is something that I'm doing wrong before I make an issue on the selenium git

NodeJS : Error: read ECONNRESET at TCP.onStreamRead (internal/stream_base_commons.js:111:27)

Using Polling like below to check if the content of the file is changed then, other two functions are called
var poll_max_date=AsyncPolling(function (end,err) { if(err) {
console.error(err); } var stmp_node_id=fs.readFileSync(path.join(__dirname,'node_id'),"utf8");
console.log("--------loaded node : "+stmp_node_id);
if(druid_stmp_node_id!=stmp_node_id) {
// MAX DATA CUT-OFF DRUID QUERY
druid_exe.max_date_query_fire();
// // DRUID QUERY FOR GLOBAL DATA
druid_exe.global_druid_query_fire();
druid_stmp_node_id=stmp_node_id; }
end(); }, 1800000).run();//30 mins
Its working fine for sometime, but then getting below error like after 4 - 5hours :
events.js:167
throw er; // Unhandled 'error' event
^
Error: read ECONNRESET
at TCP.onStreamRead (internal/stream_base_commons.js:111:27) Emitted 'error' event at:
at emitErrorNT (internal/streams/destroy.js:82:8)
at emitErrorAndCloseNT (internal/streams/destroy.js:50:3)
tried using fs.watch to monitor the changes in the file instead of polling like below :
let md5Previous = null; let fsWait = false;
fs.watch(dataSourceLogFile, (event, filename) => { if (filename) {
if (fsWait) return;
fsWait = setTimeout(() => {
fsWait = false;
}, 1000);
const md5Current = md5(fs.readFileSync(dataSourceLogFile));
if (md5Current === md5Previous) {
return;
}
md5Previous = md5Current;
console.log(`${filename} file Changed`);
// MAX DATA CUT-OFF DRUID QUERY
druid_exe.max_date_query_fire();
// DRUID QUERY FOR GLOBAL DATA
druid_exe.global_druid_query_fire(); } });
Its is also working fine for sometime, but then getting same error like after 4 - 5hours :
events.js:167 throw er; // Unhandled 'error' event ^
Error: read ECONNRESET at TCP.onStreamRead
(internal/stream_base_commons.js:111:27) Emitted 'error' event at: at
emitErrorNT (internal/streams/destroy.js:82:8) at emitErrorAndCloseNT
(internal/streams/destroy.js:50:3)
But when run in Local Machine, its working fine. the error occurs only when run in remote Linux Machine.
somebody can help me how I can fix that problem?
Use fs.watchFile once , because fs.watch is not consistent across platforms,
https://nodejs.org/docs/latest/api/fs.html#fs_fs_watchfile_filename_options_listener
Change your code according to the requirement.
It has been happening since the users are closing the browser before the data request is received, leading to Connection Reset.
Used PM2 (http://pm2.keymetrics.io/) to run the application, and it is working great now .

Error: stream.push() after EOF

Playing with node steams
This code reads from index.js and writes to indexCopy.js - kind of file copy.
Target file got created, but during execution exception is thrown:
node index.js
events.js:183
throw er; // Unhandled 'error' event
^
Error: stream.push() after EOF
at readableAddChunk (_stream_readable.js:240:30)
at MyStream.Readable.push (_stream_readable.js:208:10)
at ReadStream.f.on (C:\Node\index.js:16:28)
at emitOne (events.js:116:13)
at ReadStream.emit (events.js:211:7)
at addChunk (_stream_readable.js:263:12)
at readableAddChunk (_stream_readable.js:250:11)
at ReadStream.Readable.push (_stream_readable.js:208:10)
at fs.read (fs.js:2042:12)
at FSReqWrap.wrapper [as oncomplete] (fs.js:658:17)
C:\Node>
This is code:
var util = require('util');
var stream = require('stream');
var fs = require('fs');
var MyStream = function(){
stream.Readable.call(this)
}
util.inherits(MyStream,stream.Readable);
MyStream.prototype._read = function(d){
f = fs.createReadStream("index.js");
f.on('data',(d)=>{this.push(d)});
f.on('end',()=>{this.push(null)}); //when file finished need to close stream
}
var f = fs.createWriteStream("indexCopy.js")
var myStream = new MyStream()
myStream.pipe(f);
I tried to call this.push(null) in 'data' event, in that case even target file is not created and code fails with the exception.
I realize that copy file should be done easier with pipe() function - I am just experimenting/learning.
What is wrong with my approach?
You don't want the f = fs.createReadStream("index.js") line inside the _read method -- _read gets called repeatedly so you're creating multiple read streams. Put that in your constructor instead.
function MyStream () {
stream.Readable.call(this);
this.source = fs.createReadStream("index.js");
this.haveBound = false;
}
MyStream.prototype._read = function () {
if (this.haveBound) return; // Don't bind to events repeatedly
this.haveBound = true;
this.source.on("data", d => this.push(d));
this.source.on("end", () => this.push(null));
};
This is awkward though. Streams are meant to be pipe'ed.

Cannot find module '../dialog' (Electron fatal error)

In electron, I encounter the following error:
module.js:440
throw err;
^
Error: Cannot find module '../dialog'
at Module._resolveFilename (module.js:438:15)
at Function.Module._resolveFilename (/opt/App/resources/electron.asar/common/reset-search-paths.js:47:12)
at Function.Module._load (module.js:386:25)
at Module.require (module.js:466:17)
at require (internal/module.js:20:19)
at Object.get [as dialog] (/opt/App/resources/electron.asar/browser/api/exports/electron.js:35:14)
at process.<anonymous> (/opt/App/resources/electron.asar/browser/init.js:64:31)
at emitOne (events.js:96:13)
at process.emit (events.js:188:7)
at process._fatalException (node.js:276:26)
It happens on a child process spawn that fails in Linux. Strange because I do have a try catch block around that, yet it still results in an uncaughtexception, as seen in the code in browser/init.js from electron.asar:
// Don't quit on fatal error.
process.on('uncaughtException', function (error) {
// Do nothing if the user has a custom uncaught exception handler.
var dialog, message, ref, stack
if (process.listeners('uncaughtException').length > 1) {
return
}
// Show error in GUI.
dialog = require('electron').dialog
stack = (ref = error.stack) != null ? ref : error.name + ': ' + error.message
message = 'Uncaught Exception:\n' + stack
dialog.showErrorBox('A JavaScript error occurred in the main process', message)
}
As said, my code is in a try catch:
try {
server = childProcess.spawn(java, ["-jar", "App.jar"], {
"cwd": serverDirectory,
"detached": true
}, function(err) {
console.log("in callback");
});
} catch (err) {
console.log("here we are");
console.log(err);
}
But neither the callback nor the catch block is reached. Any ideas what is going on here and why the default dialog module cannot be found?
I found same error with electron 1.6.2
Figured it was due, when closing the application an error occur and electron want to display it in a dialog, maybe the close process has started and electron can't load this module, anyway I add:
const { dialog } = require('electron');
in main.js, no more error in console instead a dialog the error, I can fix it, After that I let the require just in case.
I hope I understand your question correctly...
If by "default dialog module", you mean the electron dialog API, then you can require that like so: const { dialog } = require('electron'). (Or in pre-1.0 versions simply require('dialog'). See https://github.com/electron/electron/blob/master/docs/api/dialog.md
Also the try/catch needs to be around the require in the child process. The try/catch you have is around the spawning of the child process in the parent. That require is failing in an entirely different node.js process, so it's not caught in the parent process that spawned it. It sounds like your child process code might work better if it looked like:
try {
const dialog = require('dialog');
} catch(e) {}
Also, if childProcess.spawn is referring to the core node module child_process, that doesn't accept a callback function. See https://nodejs.org/api/child_process.html#child_process_child_process_spawn_command_args_options
Can you share the code from your child process? That might help more.

RESTIFY: Error: socket hang up] code: 'ECONNRESET' on multiple requests

I am implementing a node app, which brings in order details from BigCommerce.
Multiple calls are made to BigCommerce API asynchronously using Restify JsonClient.
It works fine for some calls but after that i gives error: [Error: socket hang up] code: 'ECONNRESET', sslError: undefined, body: {}
I have tried turning off socket pooling ie by setting agent=false, but it still gives same error.
Following is code which makes call to BigCommerce API
makeRequest = function (url, params, headers, orderDetails, cb) {
var options = {
headers: headers
};
var client = restify.createJsonClient({
url: url
});
client.get(options, function(err, req, res, obj) {
if(err){
console.log(err);
cb(err,obj);
} else if(obj != null) {
var result = obj;
if(orderDetails == null) {
cb(null,result);
} else {
cb(null, result , orderDetails);
}
}
});
};
I get following error:
{ [Error: socket hang up] code: 'ECONNRESET', sslError: unde
fined, body: {} } Error: socket hang up
at SecurePair.error (tls.js:993:23)
at EncryptedStream.CryptoStream._done (tls.js:689:22)
at CleartextStream.read [as _read] (tls.js:490:24)
at CleartextStream.Readable.read (_stream_readable.js:320:10)
at EncryptedStream.onCryptoStreamFinish (tls.js:301:47)
at EncryptedStream.g (events.js:175:14)
at EncryptedStream.EventEmitter.emit (events.js:117:20)
at finishMaybe (_stream_writable.js:352:12)
at endWritable (_stream_writable.js:359:3)
at EncryptedStream.Writable.end (_stream_writable.js:337:5)
at EncryptedStream.CryptoStream.end (tls.js:628:31)
at Socket.onend (_stream_readable.js:483:10)
Why am i getting such error? How can i handle it?
Thanks
I just wanted to make sure that you're setting the agent setting in the right area.
Include the
"agent": false
in your options. (It's not set in the options in the code you pasted)
Per gfpacheco in the comments here: https://github.com/restify/node-restify/issues/485
By default NodeJS uses agents to keep the TCP connection open, so you can reuse it.
The problem is that if the server is closed, or it closes your connection for whatever reason you get the ECONNRESET error.
To close the connection every time you just need to set agent: false in your client creation
I've tried this solution and it worked for me.
Other than that, the
"secureOptions": "constants.SSL_OP_NO_TLSv1_2"
solution posted here sounds like it could be the right path, since you're getting an sslError.
Maybe you are running into this issue https://github.com/joyent/node/issues/5360
TL;DR: You could try with latest node version and secureOptions: constants.SSL_OP_NO_TLSv1_2 added to your options.

Resources