I'm trying to run a command (aws configure) from a .js file called with node. I've been using child_process which allows me to execute a command or batch file. However when that file would normally prompt the user for something like a username and password, I am unable to have that interaction take place.
I tried using process.stdin with node in order to perhaps use node as the middle man of a user's input, but I am unable to link the two together.
If anyone has any ideas that would be great!
var spawn = require('child_process').spawn;
var mystdin = process.stdin;
var conf = spawn('aws configure', {input: mystdin, shell: true});
//When command returns
conf.stdout.on('data', function(data) {
console.log('stdout: ' + data);
//prompt for input
mystdin.resume();
mystdin.setEncoding('utf8');
var util = require('util');
mystdin.on('data', function (text) {
//INSERT ANSWER HERE
console.log('received data:', util.inspect(text));
if (text === 'quit\n') {
done();
}
});
function done() {
console.log('Now that process.stdin is paused, there is nothing more to do.');
process.exit();
}
});
conf.stderr.on('data', function(data) {
console.log('stdout: ' + data);
});
conf.on('close', function(code) {
console.log('child precess exited with code ' + code);
});
This is just the path I was currently going down. If anyone has a better suggestion let me know!
Thanks
Two ideas: first, I think the arguments are supposed to be in the second parameters. Second, maybe the aws command doesn't realize it is an interactive terminal or something. You may try with child_process like that, or if that doesn't work, with pty.js:
var pty = require('pty.js');
var term = pty.spawn('aws', ['configure'], {
name: 'xterm-color',
cols: 80,
rows: 30,
cwd: process.env.HOME,
env: process.env
});
term.on('data', function(data) {
console.log(data);
});
I am wondering how I can run a protractor test as a script and not as a child process or from a task runner such as grunt and gulp. I am wanting to run the test suits in order when my sauce queuing application notifies the test runner I am building. This way, my tests do not conflict with my co-workers tests.
I am using node, so is there something like this?
var protractor = require('protractor');
protractor.run('path/to/conf', suites, callback);
protractor.on('message', callback)
protractor.on('error', callback)
protractor.end(callback);
It will not be possible. I tried to do that but by reading the protractor source code there is no way to perform this.
https://github.com/angular/protractor/blob/master/lib/launcher.js#L107
This function is called with your config as a json object, but as you can see it calls a bunch of process.exit, according to this it will not be possible to run this without at least forking your process.
My solution for programmatically call protractor is the following:
var npm = require('npm');
var childProcess = require('child_process');
var address = ...some address object
var args = ['--baseUrl', url.format(address)];
npm.load({}, function() {
var child = childProcess
.fork(path.join(npm.root, 'protractor/lib/cli'), args)
.on('close', function(errorCode) {
console.log('error code: ', errorCode);
});
process.on('SIGINT', child.kill);
});
const Launcher = require("protractor/built/launcher");
Launcher.init('path/to/conf');
const protractorFlake = require('protractor-flake'),
baseUrl = process.argv[2],
maxAttempts = process.argv[3];
if (process.argv.length > 2) {
console.info('Launching protractor with baseUrl: %s, maxAttempts: %d', baseUrl, maxAttempts);
protractorFlake({
maxAttempts: maxAttempts,
parser: 'multi',
protractorArgs: [
'./protractor.conf.js',
'--baseUrl',
baseUrl
]
}, function (status, output) {
process.exit(status);
});
} else {
console.error(`
Usage: protractor-wrapper <baseUrl>
`);
}
I have a Grunt task and currently I am utilising AsyncJS to run it. AsyncJS worked well but still I feel like it can be more powerful if I can utilise NodeJS cluster to run it. I have checked out Grunt Parallel and Grunt Concurrent and it is not much different to what I doing in my Grunt task. Any suggestions on utilising NodeJS cluster module to speed up Task execution.
Currently I am doing like this
var queue = async.queue(task, function(task, cb){
// Process task with PhantomJS and then
cb();
}, require('os').cpus().length);
async.each(htmlPages, function(val, cb) {
queue.push(val, function() {
cb();
});
}, function() {
console.log('Completed');
done();
});
How can I make this work with NodeJS cluster?
One way to do it is to spawn the number of workers that you want using the cluster module. Then send messages to them when you want to start them working on something.
Below is code that initialises os.cpus().length workers and a queue that sends the work to them. It then pushes everything in htmlPages to that queue, waits for it to finish and then finally kills all the workers.
var os = require('os');
var async = require('async');
var cluster = require('cluster');
if (cluster.isWorker) {
process.on('message', function(msg) {
// Do the Phantom JS stuff
process.send(theResult);
});
}
if (cluster.isMaster) {
var workers = os.cpus().map(function () {
return cluster.fork();
});
var queue = async.queue(function (msg, cb) {
var worker = workers.pop();
worker.once('message', function (msg) {
workers.push(worker);
cb(null, msg);
});
worker.send(msg);
}, workers.length);
async.each(htmlPages, queue.push.bind(queue), function (err) {
if (err) { throw err; }
workers.forEach(function (worker) {
worker.kill();
});
console.log('Completed');
});
}
My goal is to insert VERY large csv's, so right not I use the csv streaming like so:
var myCollection = db.collection(myCollectionId);
var q = async.queue(Collection.insert.bind(myCollection), 10);
csv()
.from.path(myFilePath, {columns: true})
.transform(function(data, index, cb){
q.push(data, function (err, res) {
if (err) return cb(err);
cb(null, res[0]);
});
})
.on('end', function () {
q.drain = function() {
//do some stufff
};
})
.on('error', function (err) {
res.end(500, err.message);
console.log('on.error() executed');
});
});
But when files get REALLY large, like 70M+ and it's streaming them, my server is very slow and it takes forever, and when i try to load pages on the website its lethargic during this process.
Why is it not possible to execute a mongo insert using cron-job like this. I ask because the same insert takes maybe 30 seconds from the mongo command line.
P.S. Don't mind the readFile and lines part, I am doing that because I want to test for when all the lines have been inserted into the collection after the process is started (haven't implemented this yet).
var cronJob = require('cron').CronJob;
var spawn = require('child_process').spawn;
var fs = require('fs');
function MongoImportEdgeFile(dataID, filePath){
var scriptPath = "/local/main/db/mongodb-linux-x86_64-2.4.5/bin/mongoimport";
console.log("script path = "+scriptPath)
var output = "";
fs.readFile(filePath, 'utf-8',function(err, data) {
if (err){
console.log(err)
throw err;
}
//console.log('data = '+data);
var lines = data.split('\n');
console.log("total lines in file = " + lines);
var job = new cronJob(new Date(), function() {
// store reference to 'this', which is cronJob object. needed to stop job after script is done executing.
var context = this;
// execute R script asynchronously
var script = spawn(scriptPath, [" -d mydb -c Data_ForID_" + dataID + " --file " + filePath + " --type csv" ]);
console.log("Executing R script via node-cron: " + scriptPath);
// script has finished executing, so complete cron job and fire completion callback
script.on('close', function() {
console.log('inside script.on(close, function() for import');
context.stop();
});
}, function() {
// callback function that executes upon completion
console.log("Finished executing import");
}, true);
});
}
You shouldn't use individual insert calls. You're forcing mongo to perform internal sync with each call -- I think it's even worse given your parallel approach.
Use bulk insertion: it's as easy as calling insert() with an array.
You could execute mongoimport directly from node by creating a child process. Here's an article on using mongoimport to import a csv. You can also do json.
Somehow I missed the part about using mongoimport inside cron. If I understand correctly it looks like you somehow know the csv's you would like to import, and you are using cron to check for them.
Have you considered a message queue? This will allow your processor to receive the import job instantaneously instead of on an interval. This will also throttle your processing.
If you need more throughput, you could create additional listener processes that are attached to the same queue. They will compete for the next job. This will allow your solution to scale.
I have a script that outputs 'hi', sleeps for a second, outputs 'hi', sleeps for 1 second, and so on and so forth. Now I thought I would be able to tackle this problem with this model.
var spawn = require('child_process').spawn,
temp = spawn('PATH TO SCRIPT WITH THE ABOVE BEHAVIOUR');
temp.stdout.pipe(process.stdout);
Now the problem is that the task needs to be finished in order for the output to be displayed. As I am understanding it, this is due to the fact that the newly spawned process takes execution control. Obviously node.js does not support threads so any solutions? My idea was to possibly run two instances, first one for the specific purpose of creating the task and have it pipe the output to process of the second instance, considering this can be achieved.
It's much easier now (6 years later)!
Spawn returns a childObject, which you can then listen for events with. The events are:
Class: ChildProcess
Event: 'error'
Event: 'exit'
Event: 'close'
Event: 'disconnect'
Event: 'message'
There are also a bunch of objects from childObject, they are:
Class: ChildProcess
child.stdin
child.stdout
child.stderr
child.stdio
child.pid
child.connected
child.kill([signal])
child.send(message[, sendHandle][, callback])
child.disconnect()
See more information here about childObject: https://nodejs.org/api/child_process.html
Asynchronous
If you want to run your process in the background while node is still able to continue to execute, use the asynchronous method. You can still choose to perform actions after your process completes, and when the process has any output (for example if you want to send a script's output to the client).
child_process.spawn(...); (Node v0.1.90)
var spawn = require('child_process').spawn;
var child = spawn('node ./commands/server.js');
// You can also use a variable to save the output
// for when the script closes later
var scriptOutput = "";
child.stdout.setEncoding('utf8');
child.stdout.on('data', function(data) {
//Here is where the output goes
console.log('stdout: ' + data);
data=data.toString();
scriptOutput+=data;
});
child.stderr.setEncoding('utf8');
child.stderr.on('data', function(data) {
//Here is where the error output goes
console.log('stderr: ' + data);
data=data.toString();
scriptOutput+=data;
});
child.on('close', function(code) {
//Here you can get the exit code of the script
console.log('closing code: ' + code);
console.log('Full output of script: ',scriptOutput);
});
Here's how you would use a callback + asynchronous method:
var child_process = require('child_process');
console.log("Node Version: ", process.version);
run_script("ls", ["-l", "/home"], function(output, exit_code) {
console.log("Process Finished.");
console.log('closing code: ' + exit_code);
console.log('Full output of script: ',output);
});
console.log ("Continuing to do node things while the process runs at the same time...");
// This function will output the lines from the script
// AS is runs, AND will return the full combined output
// as well as exit code when it's done (using the callback).
function run_script(command, args, callback) {
console.log("Starting Process.");
var child = child_process.spawn(command, args);
var scriptOutput = "";
child.stdout.setEncoding('utf8');
child.stdout.on('data', function(data) {
console.log('stdout: ' + data);
data=data.toString();
scriptOutput+=data;
});
child.stderr.setEncoding('utf8');
child.stderr.on('data', function(data) {
console.log('stderr: ' + data);
data=data.toString();
scriptOutput+=data;
});
child.on('close', function(code) {
callback(scriptOutput,code);
});
}
Using the method above, you can send every line of output from the script to the client (for example using Socket.io to send each line when you receive events on stdout or stderr).
Synchronous
If you want node to stop what it's doing and wait until the script completes, you can use the synchronous version:
child_process.spawnSync(...); (Node v0.11.12+)
Issues with this method:
If the script takes a while to complete, your server will hang for
that amount of time!
The stdout will only be returned once the script
has finished running. Because it's synchronous, it cannot continue
until the current line has finished. Therefore it's unable to capture
the 'stdout' event until the spawn line has finished.
How to use it:
var child_process = require('child_process');
var child = child_process.spawnSync("ls", ["-l", "/home"], { encoding : 'utf8' });
console.log("Process finished.");
if(child.error) {
console.log("ERROR: ",child.error);
}
console.log("stdout: ",child.stdout);
console.log("stderr: ",child.stderr);
console.log("exist code: ",child.status);
I'm still getting my feet wet with Node.js, but I have a few ideas. first, I believe you need to use execFile instead of spawn; execFile is for when you have the path to a script, whereas spawn is for executing a well-known command that Node.js can resolve against your system path.
1. Provide a callback to process the buffered output:
var child = require('child_process').execFile('path/to/script', [
'arg1', 'arg2', 'arg3',
], function(err, stdout, stderr) {
// Node.js will invoke this callback when process terminates.
console.log(stdout);
});
2. Add a listener to the child process' stdout stream (9thport.net)
var child = require('child_process').execFile('path/to/script', [
'arg1', 'arg2', 'arg3' ]);
// use event hooks to provide a callback to execute when data are available:
child.stdout.on('data', function(data) {
console.log(data.toString());
});
Further, there appear to be options whereby you can detach the spawned process from Node's controlling terminal, which would allow it to run asynchronously. I haven't tested this yet, but there are examples in the API docs that go something like this:
child = require('child_process').execFile('path/to/script', [
'arg1', 'arg2', 'arg3',
], {
// detachment and ignored stdin are the key here:
detached: true,
stdio: [ 'ignore', 1, 2 ]
});
// and unref() somehow disentangles the child's event loop from the parent's:
child.unref();
child.stdout.on('data', function(data) {
console.log(data.toString());
});
Here is the cleanest approach I've found:
require("child_process").spawn('bash', ['./script.sh'], {
cwd: process.cwd(),
detached: true,
stdio: "inherit"
});
I had a little trouble getting logging output from the "npm install" command when I spawned npm in a child process. The realtime logging of dependencies did not show in the parent console.
The simplest way to do what the original poster wants seems to be this (spawn npm on windows and log everything to parent console):
var args = ['install'];
var options = {
stdio: 'inherit' //feed all child process logging into parent process
};
var childProcess = spawn('npm.cmd', args, options);
childProcess.on('close', function(code) {
process.stdout.write('"npm install" finished with code ' + code + '\n');
});
PHP-like passthru
import { spawn } from 'child_process';
export default async function passthru(exe, args, options) {
return new Promise((resolve, reject) => {
const env = Object.create(process.env);
const child = spawn(exe, args, {
...options,
env: {
...env,
...options.env,
},
});
child.stdout.setEncoding('utf8');
child.stderr.setEncoding('utf8');
child.stdout.on('data', data => console.log(data));
child.stderr.on('data', data => console.log(data));
child.on('error', error => reject(error));
child.on('close', exitCode => {
console.log('Exit code:', exitCode);
resolve(exitCode);
});
});
}
Usage
const exitCode = await passthru('ls', ['-al'], { cwd: '/var/www/html' })
child:
setInterval(function() {
process.stdout.write("hi");
}, 1000); // or however else you want to run a timer
parent:
require('child_process').fork('./childfile.js');
// fork'd children use the parent's stdio
I found myself requiring this functionality often enough that I packaged it into a library called std-pour. It should let you execute a command and view the output in real time. To install simply:
npm install std-pour
Then it's simple enough to execute a command and see the output in realtime:
const { pour } = require('std-pour');
pour('ping', ['8.8.8.8', '-c', '4']).then(code => console.log(`Error Code: ${code}`));
It's promised based so you can chain multiple commands. It's even function signature-compatible with child_process.spawn so it should be a drop in replacement anywhere you're using it.
Adding a sample for exec as I too had needed live feedback and wasn't getting any until after the script finished. exec does return an EventEmitter, contrary to the many claims that only spawn works in such a way.
This supplements the comment I made to the accepted answer more thoroughly.
The interface for exec is similar to spawn:
// INCLUDES
import * as childProcess from 'child_process'; // ES6 Syntax
// DEFINES
let exec = childProcess.exec; // Use 'var' for more proper
// semantics, or 'const' it all
// if that's your thing; though 'let' is
// true-to-scope;
// Return an EventEmitter to work with, though
// you can also chain stdout too:
// (i.e. exec( ... ).stdout.on( ... ); )
let childProcess = exec
(
'./binary command -- --argument argumentValue',
( error, stdout, stderr ) =>
{ // When the process completes:
if( error )
{
console.log( `${error.name}: ${error.message}` );
console.log( `[STACK] ${error.stack}` );
}
console.log( stdout );
console.log( stderr );
callback(); // Gulp stuff
}
);
Now its as simple as registering an event handler for stdout:
childProcess.stdout.on( 'data', data => console.log( data ) );
And for stderr:
childProcess.stderr.on( 'data', data => console.log( `[ERROR]: ${data}` ) );
You can also pipe stdout to the main process' stdout:
childProcess.stdout.pipe( process.stdout );
Not too bad at all - HTH
I was interested into running a script that gets the input and outputs from my terminal, and that will close my process once the child script finishes.
import { spawn } from 'node:child_process'
import process from 'node:process'
const script = spawn('path/to/script', { stdio: 'inherit' })
script.on('close', process.exit)
I ran into a situation where none of the above worked when I was spawning a Python 3 script. I would get data from stdout, but only once the child terminated.
As it turns out, Python buffers stdout by default. It's possible to disable stdout buffering by including -u as a command line parameter to python3.