I'd like to run my protractor end-to-end tests automatically each day.
Currently I can do
$ grunt
In the test directory and they run.
Is there a way to get them to run automatically each day at a specific time?
I've tried using cron, but according to this, I'd have to make all of my require paths into absolute paths, which I don't want to do.
Use a node script and setInterval:
var exec = require('child_process').exec
var path = require('path')
var running = false
var run = function(what, where) {
if (running === true) return
running = true
// by default, just run grunt
what = what || 'grunt'
// by default, run on grunt in the current working directory
where = path.resolve(where || path.join(process.cwd(), 'Gruntfile.coffee'))
what += ' --gruntfile=' + where
exec(what, { cwd: path.dirname(where) }, function(err, stdout, stderr) {
if (err || stderr) { /* log the error somewhere */ }
/* log the stdout if needed*/
console.log(stdout)
running = false
})
}
setInterval(function() {
run(/* set what to run, where to run */)
/* or even multiple gruntfiles and node projects */
}, 24 * 60 * 60 * 1000) // once a day
This is platform agnostic, easy to start and stop, easy to maintain and customize.
Check out a library like https://www.npmjs.org/package/forever for running a node script forever. Or many of the other ways: nohup, monit, upstart, etc.
Using Kyle Robinson Young's answer along with the node module cron, I came up with the following which runs the gruntfile every day at 5:00pm:
var cronJob = require('cron').CronJob
, exec = require('child_process').exec
, path = require('path')
, running = false
;
var run = function(what, where) {
if (running === true) {
return;
}
running = true;
// by default, just run grunt
what = what || 'grunt';
// by default, run on grunt in the current working directory
where = path.resolve(where || path.join(process.cwd(), 'Gruntfile.js'));
what += ' --gruntfile=' + where;
exec(what, { cwd: path.dirname(where) }, function(err, stdout, stderr) {
if (err || stderr) {
console.log(err);
}
/* log the stdout if needed*/
console.log(stdout);
running = false;
});
};
new cronJob('00 00 17 * * *', function(){
console.log('Running Gruntfile ' + new Date());
var what = 'grunt'
, where
;
run(what, where);
}, null, true);
Related
I'm a bit confused because all the examples I read about Node cluster module only seem to apply to webservers and concurrent requests. Otherwise to CPU intensive application it is recommended to use the worker_threads module.
And what about I/O file operations? Imagine I have an array with 1 million filenames: ['1.txt', '2.txt', etc., ..., '1000000.txt'] and I need to do heavy processing and then write the result file content?
What would be the method to efficiently use all the cores of the CPU to spread the processing towards different cores amongst different filenames?
Normally I would use this:
const fs = require('fs')
const fs = require('async')
const heavyProcessing = require('./heavyProcessing.js')
const files = ['1.txt', '2.txt', ..., '1000000.txt']
async.each(files, function (file, cb) {
fs.writeFile(file, heavyProcessing(file), function (err) {
if (!err) cb()
})
}
Should I use now the cluster or the worker_threads? And how should I use it?
Does this work?
const fs = require('fs')
const fs = require('async')
const heavyProcessing = require('./heavyProcessing.js')
const cluster = require('node:cluster');
const http = require('node:http');
const numCPUs = require('node:os').cpus().length;
const process = require('node:process');
if (cluster.isPrimary) {
console.log(`Primary ${process.pid} is running`);
// Fork workers.
for (let i = 0; i < numCPUs; i++) {
cluster.fork();
}
cluster.on('exit', (worker, code, signal) => {
console.log(`worker ${worker.process.pid} died`);
});
} else {
const files = ['1.txt', '2.txt', ..., '1000000.txt']
async.each(files, function (file, cb) {
fs.writeFile(file, heavyProcessing(file), function (err) {
if (!err) cb()
})
}
}
Just for everyone to know, if they are interested, you need to use the npm module piscina.
In this gist I explain everything. NodeJS is a powerful tool for backend developers, but you must be aware of multi-core processing in order to maximize the potential of your CPU.
This NodeJS multi-core feature is mostly used for webservers and NodeJS has already out of the box the cluster module thereto.
Although NodeJS has also out of the box the module threads, it's not so easy to deal with.
Let's create a project that will test single-thread and multi-thread CPU intensive data and write some random data to file.
Create the project:
mkdir test-threads && cd test-threads
npm init -y
Install dependencies and create dist/ directory
npm install async progress piscina command-line-args
mkdir dist
Create the file index.js at the root of the project directory
const path = require('path')
const async = require('async')
const ProgressBar = require('progress')
const Piscina = require('piscina')
const commandLineArgs = require('command-line-args')
console.time('main')
const worker = require(path.resolve(__dirname, 'worker.js'))
const piscina = new Piscina({
filename: path.resolve(__dirname, 'worker.js')
})
const argvOptions = commandLineArgs([
{ name: 'multi-thread', type: Boolean },
{ name: 'iterations', alias: 'i', type: Number }
])
const files = []
for (let i=0; i < (argvOptions.iterations || 1000); i++) {
files.push(path.join(__dirname, 'dist', i + '.txt'))
}
var bar = new ProgressBar(':bar', { total: files.length, width: 80 });
async.each(files, function (file, cb) {
(async function() {
try {
const err = argvOptions['multi-thread'] ? (await piscina.run(file)) : worker(file)
bar.tick()
if (err) cb(Error(err)); else cb()
} catch(err) {
cb(Error(err))
}
})();
}, (err) => {
if (err) {
console.error('There was an error: ', err)
process.exitCode = 1
} else {
bar.terminate()
console.log('Success')
console.timeEnd('main')
process.exitCode = 0
}
})
Create now worker.js also at the root of the project directory
const fs = require('fs')
// some CPU intensive function; the higher is baseNumber, the higher is the time elapsed
function mySlowFunction(baseNumber) {
let result = 0
for (var i = Math.pow(baseNumber, 7); i >= 0; i--) {
result += Math.atan(i) * Math.tan(i)
}
}
module.exports = (file) => {
try {
mySlowFunction(parseInt(Math.random() * 10 + 1))
fs.writeFileSync(file, Math.random().toString())
return null
} catch (e) {
return Error(e)
}
}
Now just run on single thread and check time elapsed, for 1000 and 10000 iterations (one iteration equals to data processing and file creation)
node index.js -i 1000
node index.js -i 10000
Now compare with the great advantage of multi-thread
node index.js --multi-thread -i 1000
node index.js --multi-thread -i 10000
With the test I did (16 cores CPU), the difference is huge, it went with 1000 iterations from 1:27.061 (m:ss.mmm) for single thread to 8.884s with multi-thread. Check also the files inside dist/ to be sure they were created correctly.
Hello fellow nerds and nerdettes,
I just started building a little app that uses an external command line interface. The app first checks if the binary is installed in the users path and if not offers to install it for them. The external cli bin is the digitalocean cli and requires to curl, pipe to tar, and then move the bin into the users path. I have built the check if installed functionality and have been reading the child-process api but have been having a hard time figuring out how to console out the status of the curl command. My current incantation shows no console output. My question is this. How do i pipe the output of cURL to the console to confirm its working? How might i go about testing success then moving on?
Thanks y'all
const exec = require('child_process').exec
const curlScriptOSX = 'curl -L https://github.com/digitalocean/doctl/releases/download/v1.6.0/doctl-1.6.0-darwin-10.6-amd64.tar.gz | tar xz'
exec(curlScriptOSX, function(error, stdout, stderr) {
console.log('stdout: ' + stdout);
console.log('stderr: ' + stderr);
if(error !== null) {
console.log('exec error: ' + error);
}
})
UPDATE: i am looking at the request library as well. is it possible to
request(url).pipe(fs.createWriteStream('binary.tar.gz')).then(exec(extracting)).catch(err => console.error(err))
i wonder... ima try this now
Using the request library:
const fs = require('fs')
const os = require('os')
const request = require('request')
const url = 'https://github.com/digitalocean/doctl/releases/download/v1.6.0/doctl-1.6.0-darwin-10.6-amd64.tar.gz'
platform = os.platform()
function getInstallerFile (url) {
console.log("Getting tar")
// Variable to save downloading progress
var received_bytes = 0;
var total_bytes = 0;
const output = fs.createWriteStream('doctl.tar.gz')
request
.get(url)
.on('error', function(err) {
console.log(err);
})
.on('response', function(data) {
total_bytes = parseInt(data.headers['content-length']);
})
.on('data', function(chunk) {
received_bytes += chunk.length;
showDownloadingProgress(received_bytes, total_bytes);
})
.pipe(output);
};
function showDownloadingProgress(received, total) {
var percentage = ((received * 100) / total).toFixed(2);
process.stdout.write((platform == 'win32') ? "\033[0G": "\r");
process.stdout.write(percentage + "% | " + received + " bytes of " + total + " bytes.");
}
getInstallerFile(url)
I am trying to create a node script to traverse a folder of large files (mp3s containing 1 hour music sets) and upload them to Mixcloud via their API. It works except in the case where it hits the API rate limit and needs to wait x seconds - I set a timeout and loop back to beginning, but after the set amount of time, the process exits and doesn't log any useful output. Is this due to a bug in my code, or the way I'm running it? I'm currently running it with sudo nohup node index.js > log.log, is this incorrect? my end goal is a script that can be run at the end of every day of our radio station and upload the archived files.
const restler = require('restler');
const fs = require('fs');
const readDir = require('readdir');
const powerOff = require('power-off');
const options = {
folder: 'files',
completefolder: 'complete',
accesstoken: 'xxxxxxxxxxxxxxxxxxxxxxx'
}
// shut down computer
const shutDown = () => {powerOff((err, stderr, stdout) => {
if(!err && !stderr) {
console.log(stdout);
}
})
};
// uploadFile uploads file with restler to mixcloud, if api returns rate limiting object, try again in x seconds
const uploadFile = (folder, filename) => {
const filepath = `./${folder}/${filename}`
fs.stat(`./${folder}/${filename}`, function(err, stats) {
const size = stats.size;
restler.post(`https://api.mixcloud.com/upload/?access_token=${options.accesstoken}`, {
multipart: true,
data: {
"mp3": restler.file(`./${folder}/${filename}`, null, size, null, 'audio/mpeg'),
"name": filename,
// "unlisted": true
// more data can be added here depending on changes in workflow, automate images etc
}
}).on("complete", function(data) {
const returned = JSON.parse(data);
if (returned.error) {
if (returned.error.type == "RateLimitException") {
// try again in x seconds
console.log(`uploading too fast, retrying upload of ${filename}after ${returned.error.retry_after} seconds`);
setTimeout(() => {(folder, filename) => uploadFile}, returned.error.retry_after*1000);
}
else {
console.log('non-rate-limiting error');
console.log(returned);
}
}
else {
console.log('Success!');
console.log(returned);
// move uploaded files into completed folder
fs.rename(`./${folder}/${filename}`, `./${options.completefolder}/${filename}`, (err)=> {
if (err) {
console.log(err)
}
else {
counter += 1;
console.log(counter);
if (counter === files.length) {
console.log('done');
shutDown();
}
}
})
}
});
});
};
// get all mp3s and upload all of them
const files = readDir.readSync(`./${options.folder}`, ['**.mp3'] );
let counter = 0;
for (var i = 0; i < files.length; i++) {
uploadFile(options.folder, files[i])
};
I'm running gulp as build tool for my nodejs server. I have one issue with nodemon (Nodemon is a utility that will monitor for any changes in your source and automatically restart your server. ), since i'm not able to execute a gulp task to transform my ecmascript 6 code before the server restarts.
This is my code. Nodemon documentation says that syncronous task execution is possible with node version 0.12.0. My node version is 0.12.6. Anyone noticed the same problem or knows how to fix?
https://github.com/JacksonGariety/gulp-nodemon
var gulp = require('gulp');
var noDemon = require('nodemon');
var babel = require('gulp-babel');
var plumber = require('gulp-plumber');
gulp.task('server', ['transformES6'], function () {
noDemon({
script : 'server/dist/server.js',
ext : 'js',
env : {
PORT : 8000
},
ignore : [
'./node_modules/**',
'./server/dist/**'
],
tasks : ['transformES6']
}).on('restart', function () {
var now = new Date();
console.log('server is restarting: ' + now.getHours() + ' Hours, ' + now.getMinutes() + ' Minutes, ' + now.getSeconds() + ' Seconds');
});
});
gulp.task('transformES6', function () {
return gulp.src(['./server/**/**/**/**/*.js', '!./server/dist/'])
.pipe(plumber())
.pipe(babel())
.pipe(gulp.dest('./server/dist/'));
})
Thanks in advance!
Greetings Mauro
So, I have been working on an upload script (code is below) that I have working on my windows machine, but on my Linux server it seems to fail with
Error: ENOENT, open 'uploads/da5ab67b48ea2deecd25127186017083'
I understand that the error is saying there is no path/file, but I do check to for file existence before I attempt the file rename.
exports.product_files_upload = function (req, res) {
if (req.files) {
var tmpArray = [];
for(var file in req.files){
console.log(file)
if (file){
var splitName = file.split('-');
if (splitName.length > 1) {
var username = splitName[0];
var productId = splitName[1];
var index = splitName[2];
} else {
return;
}
} else {
return;
}
//console.log(username)
//console.log(productId)
var tmp_path = req.files[file].path;
console.log(fs.existsSync(tmp_path))
createUserDir();
createProductDirs(username, productId);
//console.log(__dirname)
var target_path = './public/user_files/'+ username + '/products/'+productId+'/files/' + req.files[file].name,
save_path = 'user_files/'+ username + '/products/'+productId+'/files/' + req.files[file].name;
if (fs.existsSync(target_path)) {
tmpArray.push({exists:true, name:req.files[file].name});
} else {
tmpArray.push({
size: req.files[file].size,
type: req.files[file].type,
name: req.files[file].name,
path: save_path,
exists:false
});
if (fs.existsSync(tmp_path)) {
fs.rename(tmp_path, target_path, function(err) {
if (err) throw err;
// delete the temporary file, so that the explicitly set temporary upload dir does not get filled with unwanted files
fs.unlink(tmp_path, function() {
if (err) throw err;
// res.send(save_path);
});
});
} else {
tmpArray.push({
size: req.files[file].size,
type: req.files[file].type,
name: req.files[file].name,
path: save_path,
exists:false
});
}
}
}
res.json(tmpArray);
}
};
UPDATE: I am using forever for running my express app, and when I stopped my app forever process and switched to just "node app.js" the problem was fixed. This is not an acceptable fix. I am thinking there might be a problem with forever.
Okay, I figured it out. I was using forever with an absolute path to launch my app on my linux box like
forever start /path/to/app.js
But when I cd into the app directory and then launch the app it works.
forever start app.js