How to tell when a spawned process is idle - node.js

I am launching an external program (adb shell) via node's spawn method.
Then I am sending commands via childProc.stdin.write(input) and the shell returns stuff accordingly.
My data callback gets called each time there is a new data chunk available. All good!
But since the connection stays open, I don't know when the external program is finished sending data.
Is there a way to solve this? Isn't there some standard way of showing "that's the data you asked for, I am done returning"?
static async execCommand2(command, args, input) {
if(!this.adbShellProc || this.adbShellProc.killed){
this.adbShellProc = spawn(command, args)
this.adbShellProc.stdout.setEncoding('utf8')
}
const self = this
let promise = new Promise((resolve, reject) => {
log.debug('Shell.execCommand2:', command, 'args:', args)
log.debug('send input: ', input)
self.adbShellProc.stdin.write(input)
self.adbShellProc.stdin.write('\n')
self.adbShellProc.stdout.on('data', chunk => {
// resolve(chunk)
// PROBLEM: I can't resolve here, cause there might be more data coming in
})
self.adbShellProc.on('close', code => {
self.adbShellProc.stdin.end()
self.adbShellProc = null
})
})
return promise
}

I am sure your answer is in your question itself. You have added close event listener, so when process is ended, your control will be in close event(Just one modification is, close event will be of adbShellProc.stdout), so you can resolve/return data from there.
static async execCommand2(command, args, input) {
let finalOutcome = ''
if(!this.adbShellProc || this.adbShellProc.killed){
this.adbShellProc = spawn(command, args)
this.adbShellProc.stdout.setEncoding('utf8')
}
const self = this
let promise = new Promise((resolve, reject) => {
log.debug('Shell.execCommand2:', command, 'args:', args)
log.debug('send input: ', input)
self.adbShellProc.stdin.write(input)
self.adbShellProc.stdin.write('\n')
self.adbShellProc.stdout.on('data', chunk => {
// resolve(chunk)
finalOutcome += data
// PROBLEM: I can't resolve here, cause there might be more data coming in
})
self.adbShellProc.stdout.on('close', code => {
self.adbShellProc.stdin.end()
self.adbShellProc = null
resolve(finalOutcome)
})
})
return promise
}
Also, it has another event end of stdout,
ls.stdout.on('end', (d) =>{
// You can resolve from here also
})
You can get more details here Child Process

No. There's no way of knowing if the output is final unless an exit code is returned, this is the linux way.
Fortunately, adb shell gives you a way of running one off commands using the adb shell without having to start it in interactive mode.
Instead of starting the shell and then sending the commands as input, make the whole thing a single command. Example:
adb shell am start -a android.intent.action.VIEW
Then the close event will give you a clear indication that the command has finished so you can resolve and you will also get the return code, so you can check if the command has succeeded or not.
There's no real benefit in starting adb shell in interactive mode, because the real processing is not happening in adb but in the actual server, which is adbd.
https://developer.android.com/studio/command-line/adb#shellcommands
static async execCommand2(command, args, input) {
let finalOutcome = ''
//append the input to the arguments and don't use this, all your commands are one offs and do not require a state check
let adbShellProc = spawn(command, args+input)
adbShellProc.stdout.setEncoding('utf8')
let promise = new Promise((resolve, reject) => {
log.debug('Shell.execCommand2:', command, 'args:', args+input)
self.adbShellProc.stdout.on('data', chunk => {
finalOutcome += data
})
self.adbShellProc.stdout.on('close', code => {
//unix return code, 0 is good, not zero means error
if(code != 0){
reject(finalOutcome)
}
resolve(finalOutcome)
})
})
return promise
}

You can end your shell commands with ; echo some_marker and wait until some_marker appears in the stdout. (And you can also get the exit code with ; echo some_marker $?)

Related

NodeJS read partial stream

I have a NodeJS child process that on invocation calculates a unique pid (used by redis)
I and console logging it (e.g: console.log('PID: ' + pid))
In the parent I call spawn(command, arg, {stdio: ['pipe', 'pipe', 'pipe'], detached: true})
Some things I tried:
Close the child processes stdout and pass it to a function (that returns a promise) and then
return new Promise((resolve, reject) => {
inputStream.on('data', (i) => {
const isPidString = RegExp('/^PID:.+/g').test(i.toString('utf8'))
console.log('isPidString', isPidString)
if (isPidString) {
console.log('found string')
console.log('i', i.toString())
const pidValue = parseInt(i.toString('utf8').split(': ')[1])
console.log('pidVal', pidValue)
inputStream.destroy()
resolve(pidValue)
}
})
setTimeout(() => {
reject()
}, 5000);
});
Somehow it doesn't work. My question is: how do i listen to a stream (thats going to run nonstop) for just one specific console.log() that I'm concerned with.
Edit: Was able to figure this out
The code largely works, heres what was required:
Clone the child processes stdout (where we want to read data from)
read the string (convert to string using utf-8 encoding)
When the output matches a regex string you use string manipulators to extrac the value, destroy the stream and resolve.

How to stop async code from running Node.JS

I'm creating a program where I constantly run and stop async code, but I need a good way to stop the code.
Currently, I have tried to methods:
Method 1:
When a method is running, and another method is called to stop the first method, I start an infinite loop to stop that code from running and then remove the method from the queue(array)
I'm 100% sure that this is the worst way to accomplish it, and it works very buggy.
Code:
class test{
async Start(){
const response = await request(options);
if(stopped){
while(true){
await timeout(10)
}
}
}
}
Code 2:
var tests = [];
Start(){
const test = new test();
tests.push(test)
tests.Start();
}
Stop(){
tests.forEach((t, i) => {t.stopped = true;};
tests = [];
}
Method 2:
I load the different methods into Workers, and when I need to stop the code, I just terminate the Worker.
It always takes a lot of time(1 sec) to create the Worker, and therefore not the best way, since I need the code to run without 1-2 sec pauses.
Code:
const Worker = require("tiny-worker");
const code = new Worker(path.resolve(__dirname, "./Code/Code.js"))
Stopping:
code.terminate()
Is there any other way that I can stop async code?
The program contains Request using nodejs Request-promise module, so program is waiting for requests, it's hard to stop the code without one of the 2 methods.
Is there any other way that I can stop async code?
Keep in mind the basic of how Nodejs works. I think there is some misunderstanding here.
It execute the actual function in the actual context, if encounters an async operation the event loop will schedule it's execetution somewhere in the future. There is no way to remove that scheduled execution.
More info on event loop here.
In general for manage this kind of situations you shuold use flags or semaphores.
The program contains Request using nodejs Request-promise module, so program is waiting for requests, it's hard to stop the code
If you need to hard "stop the code" you can do something like
func stop() {
process.exit()
}
But if i'm getting it right, you're launching requests every x time, at some point you need to stop sending the request without managing the response.
You can't de-schedule the response managemente portion, but you can add some logic in it to (when it will be runned) check if the "request loop" has been stopped.
let loop_is_stopped = false
let sending_loop = null
func sendRequest() {
const response = await request(options) // "wait here"
// following lines are scheduled after the request promise is resolved
if (loop_is_stopped) {
return
}
// do something with the response
}
func start() {
sending_loop = setInterval(sendRequest, 1000)
}
func stop() {
loop_is_stopped = true
clearInterval(sending_loop)
}
module.exports = { start, stop }
We can use Promise.all without killing whole app (process.exit()), here is my example (you can use another trigger for calling controller.abort()):
const controller = new AbortController();
class Workflow {
static async startTask() {
await new Promise((res) => setTimeout(() => {
res(console.log('RESOLVE'))
}, 3000))
}
}
class ScheduleTask {
static async start() {
return await Promise.all([
new Promise((_res, rej) => { if (controller.signal.aborted) return rej('YAY') }),
Workflow.startTask()
])
}
}
setTimeout(() => {
controller.abort()
console.log("ABORTED!!!");
}, 1500)
const run = async () => {
try {
await ScheduleTask.start()
console.log("DONE")
} catch (err) {
console.log("ERROR", err.name)
}
}
run()
// ABORTED!!!
// RESOLVE
"DONE" will never be showen.
res will be complited
Maybe would be better to run your code as script with it's own process.pid and when we need to interrupt this functionality we can kill this process by pid in another place of your code process.kill.

How to handle sync browser emulations in node.js

I'm writing a script that is intended to load some stuff from .txt files and then perform multiple ( in a loop) requests to a website with node.js` browser emulator nightmare.
I have no problem with reading from the txt files and so no, but managing to make it run sync and without exceptions.
function visitPage(url, code) {
new Promise((resolve, reject) => {
Nightmare
.goto(url)
.click('.vote')
.insert('input[name=username]', 'testadmin')
.insert('.test-code-verify', code)
.click('.button.vote.submit')
.wait('.tag.vote.disabled,.validation-error')
.evaluate(() => document.querySelector('.validation -error').innerHTML)
.end()
.then(text => {
return text;
})
});
}
async function myBackEndLogic() {
try {
var br = 0, user, proxy, current, agent;
while(br < loops){
current = Math.floor(Math.random() * (maxLoops-br-1));
/*...getting user and so on..*/
const response = await visitPage('https://example.com/admin/login',"code")
br++;
}
} catch (error) {
console.error('ERROR:');
console.error(error);
}
}
myBackEndLogic();
The error that occurs is:
UnhandledPromiseRejectionWarning: TypeError: Cannot read property 'webContents' of undefined
So the questions are a few:
1) How to fix the exception
2) How to make it actually work sync and emulate everytime the address ( as in a previous attempt, which I didn't save, I fixed the exception, but the browser wasn't actually openning and it was basically skipped
3) (Not so important) Is it possible to select a few objects with
.wait('.class1,.class2,.validation-error')
and save each value in different variables or just get the text from the first that occured? ( if no any of these has occurred, then return 0 for example )
I see a few issues with the code above.
In the visitPage function, you are returning a Promise. That's fine, except you don't have to create the wrapping promise! It looks like nightmare returns a promise for you. Today, you're dropping an errors that promise returns by wrapping it. Instead - just use an async function!
async function visitPage(url, code) {
return Nightmare
.goto(url)
.click('.vote')
.insert('input[name=username]', 'testadmin')
.insert('.test-code-verify', code)
.click('.button.vote.submit')
.wait('.tag.vote.disabled,.validation-error')
.evaluate(() => document.querySelector('.validation -error').innerHTML)
.end();
}
You probably don't want to wrap the content of this method in a 'try/catch'. Just let the promises flow :)
async function myBackEndLogic() {
var br = 0, user, proxy, current, agent;
while(br < loops){
current = Math.floor(Math.random() * (maxLoops-br-1));
const response = await visitPage('https://example.com/admin/login',"code")
br++;
}
}
When you run your method - make sure to include a catch! Or a then! Otherwise, your app may exit early.
myBackEndLogic()
.then(() => console.log('donesies!'))
.catch(console.error);
I'm not sure if any of this will help with your specific issue, but hopefully it gets you on the right path :)

Kill an unresolved promise (or ignore and move on)

Using node child process exec, I'm calling a ffmpeg conversion via a promise that takes a bit of time. Each time the use clicks "next" it starts the FFMpeg command on a new file:
function doFFMpeg(path){
return new Promise((resolve, reject) => {
exec('ffmpeg (long running command)', (error, stdout, stderr) => {
if (error) {
reject();
}
}).on('exit', (code) => { // Exit returns code 0 for good 1 bad
if (code) {
reject();
} else {
resolve();
}
});
});
}
The problem is, if the user moves on to the next video before the promise is returned, I need to scrap the process and move on to converting the next video.
How do I either:
A) (Ideally) Cancel the current promised exec process*
B) Let the current promised exec process complete, but just ignore that promise while I start a new one.
*I realize that promise.cancel is not yet in ECMA, but I'd like to know of a workaround -- preferably without using a 3rd party module / library.
Attempt:
let myChildProcess;
function doFFMpeg(path){
myChildProcess.kill();
return new Promise((resolve, reject) => {
myChildProcess = exec('ffmpeg (long running command)', (error, stdout, stderr) => {
if (error) {
reject();
}
}).on('exit', (code) => { // Exit returns code 0 for good 1 bad
if (code) {
reject();
} else {
resolve();
}
});
});
}
Assuming exec() does indeed return an object with a .kill() method, the attempt looks pretty close to what you want. You just have to accept promise rejection in lieu of cancellation, which is unavailable in native Promises. It is typically inconsequential, even better, to reject than to cancel.
As I understand it, killing the process will cause the callback to fire with an error, (and/or the 'exit' handler to fire with the error code). If so, you don't need to reject the promise explicitly when the process is killed - reject() will be called anyway.
Your doFFMpeg() just needs some safety around calling myChildProcess.kill().
Something like this should do it :
const doFFMpeg = (function() {
var myChildProcess = null;
return function (path) {
if (myChildProcess) {
myChildProcess.kill();
}
return new Promise((resolve, reject) => {
myChildProcess = exec('ffmpeg (long running command)', (error, stdout, stderr) => {
if (error) {
reject(error);
}
myChildProcess = null;
});
myChildProcess.on('exit', (code) => { // Exit returns code 0 for good 1 bad
if (code) {
reject(new Error('bad exit'));
} else {
resolve();
}
myChildProcess = null;
});
});
}
})();
I'm not sure that exec()'s callback is necessary (unless you need to process stdout/stderr or need to know details of the error). It's possible that just the .on('exit') handler will suffice, or maybe .on('end') and .on('error') handlers.
If the caller needs to handle "kill errors" differently from other errors, then there's a little more work to do. You will need to ensure that, on kill, the Promise is rejected with a detectable error (eg a custom error, or an Error monkeypatched with a custom property).
If I understand correctly you want to execute ffmpeg conversions in a chain, one after the other, and kill the active one upon moving to the next if the active one hasn't finished yet.
Assuming childprocess.exec() is used, you could keep track of the child processes in a global variable and when doFFMpeg() is invoked, it should kill() any still running before instantiating the new promise.

Block for stdin in Node.js

Short explanation:
I'm attempting to write a simple game in Node.js that needs to wait for user input every turn. How do I avoid callback hell (e.g. messy code) internal to a turn loop where each turn loop iteration needs to block and wait for input from stdin?
Long explanation:
All the explanations I have read on StackOverflow when someone asks about blocking for stdin input seem to be "that's not what Node.js is about!"
I understand that Node.js is designed to be non-blocking and I also understand why. However I feel that it has me stuck between a rock and a hard place on how to solve this. I feel like I have three options:
Find a way to block for stdin and retain my while loop
Ditch the while loop and instead recursively call a method (like nextTurn) whenever the previous turn ends.
Ditch the while loop and instead use setTimeout(0, ...) or something similar to call a method (like nextTurn) whenever a turn ends.
With option (1) I am going against Node.js principles of non-blocking IO.
With option (2) I will eventually reach a stack overflow as each call adds another turn to the call stack.
With option (3) my code ends up being a mess to follow.
Internal to Node.js there are default functions that are marked **Sync (e.g. see the fs library or the sleep function) and I'm wondering why there is no Sync method for getting user input? And if I were to write something similar to fs.readSync how would I go about doing it and still follow best practices?
Just found this:
https://www.npmjs.com/package/readline-sync
Example code (after doing an npm install readline-sync)
var readlineSync = require('readline-sync');
while(true) {
var yn = readlineSync.question("Do you like having tools that let you code how you want, rather than how their authors wanted?");
if(yn === 'y') {
console.log("Hooray!");
} else {
console.log("Back to callback world, I guess...");
process.exit();
}
}
Only problem so far is the wailing of the "That's not how node is meant to be used!" chorus, but I have earplugs :)
I agree with the comment about moving towards an event based system and would ditch the loops. I've thrown together a quick example of text based processing which can be used for simple text games.
var fs = require('fs'),
es = require('event-stream');
process.stdin
.pipe(es.split())
.on('data', parseCommand);
var actionHandlers = {};
function parseCommand(command) {
var words = command.split(' '),
action = '';
if(words.length > 1) {
action = words.shift();
}
if(actionHandlers[action]) {
actionHandlers[action](words);
} else {
invalidAction(action);
}
}
function invalidAction(action) {
console.log('Unknown Action:', action);
}
actionHandlers['move'] = function(words) {
console.log('You move', words);
}
actionHandlers['attack'] = function(words) {
console.log('You attack', words);
}
You can now break up your actions into discrete functions which you can register with a central actionHandlers variable. This makes adding new commands almost trivial. If you can add some details on why the above approach wouldn't work well for you, let me know and I'll revise the answer.
ArtHare's solution, at least for my use case, blocks background execution, including those started by a promise. While this code isn't elegant, it did block execution of the current function, until the read from stdin completed.
While this code must run from inside an async function, keep in mind that running an async function from a top-level context (directly from a script, not contained within any other function) will block that function until it completes.
Below is a full .js script demonstrating usage, tested with node v8.12.0:
const readline = require('readline');
const sleep = (waitTimeInMs) => new Promise(resolve => setTimeout(resolve, waitTimeInMs));
async function blockReadLine() {
var rl = readline.createInterface({
input: process.stdin,
output: process.stdout,
terminal: false
});
let result = undefined;
rl.on('line', function(line){
result = line;
})
while(!result) await sleep(100);
return result;
}
async function run() {
new Promise(async () => {
while(true) {
console.log("Won't be silenced! Won't be censored!");
await sleep(1000);
}
});
let result = await blockReadLine();
console.log("The result was:" + result);
process.exit(0);
}
run();

Resources