I am using PM2 with the following configuration:
module.exports = {
apps : [{
name: 'sandbox',
script: 'index.js',
args: ["PORT=8084", "--color"],
instances: 1,
autorestart: true,
watch: 'index.js',
out_file: "logs/out.log",
node_args: "--trace-warnings"
}]
};
All works well except that changes in index.js doesn't trigger restart.
I have tried many things:
adding the absolute path in script and in the watch
adding cwd with the absolute path
Using variations in the watch like ./index.js or ../ or ./ or true
Removing autorestart
Additional info:
My app use express
The status shows that watch is enabled:
│ status │ online
│ name │ sandbox
│ version │ 1.0.0
│ restarts │ 0
│ uptime │ 8m │
│ script path │ /var/www/api/index.js │
│ script args │ PORT=8084 --color │
│ error log path │ /home/ubuntu/.pm2/logs/sandbox-error-10.log │
│ out log path │ /var/www/api/logs/out-10.log │
│ pid path │ /home/ubuntu/.pm2/pids/sandbox-10.pid │
│ interpreter │ node │
│ interpreter args │ --trace-warnings │
│ script id │ 10 │
│ exec cwd │ /var/www/api │
│ exec mode │ cluster_mode │
│ node.js version │ 11.10.0 │
│ node env │ N/A │
│ watch & reload │ ✔ │
│ unstable restarts │ 0 │
│ created at │ 2019-11-30T10:45:14.704Z
Related
how do you do?
I'm trying to figure out why some requests to my Images API (usually the last ones) are taking over than 1 minute to load. The first ones are basically instantaneous. Search all over the internet, but had no apropriate answer yet. I am using Google Cloud to storage the images and NodeJS at the server, who is providing the images as a bufferized writing head to the browser.
You can check what am I saying accessing the website (18 year old content):
https://divinasacompanhantes.com/
As you can see, some images just don't load properly. I'm worried because this website is expected to have thousands more profiles, all over the world.
I am using PM2 to handle the services at the server side (2GB available memory). Here's the table:
┌─────┬─────────────────────┬─────────────┬─────────┬─────────┬──────────┬────────┬──────┬───────────┬──────────┬──────────┬──────────┬──────────┐
│ id │ name │ namespace │ version │ mode │ pid │ uptime │ ↺ │ status │ cpu │ mem │ user │ watching │
├─────┼─────────────────────┼─────────────┼─────────┼─────────┼──────────┼────────┼──────┼───────────┼──────────┼──────────┼──────────┼──────────┤
│ 7 │ ServiceAfiliado │ default │ 1.0.0 │ fork │ 31312 │ 20m │ 3 │ online │ 0% │ 55.9mb │ root │ disabled │
│ 0 │ ServiceAvaliacao │ default │ 1.0.0 │ fork │ 31249 │ 20m │ 3 │ online │ 0% │ 55.2mb │ root │ disabled │
│ 8 │ ServiceBlog │ default │ 1.0.0 │ fork │ 31330 │ 20m │ 3 │ online │ 0% │ 61.1mb │ root │ disabled │
│ 1 │ ServiceChat │ default │ 1.0.0 │ fork │ 31256 │ 20m │ 3 │ online │ 0% │ 57.3mb │ root │ disabled │
│ 9 │ ServiceConfig │ default │ 1.0.0 │ fork │ 31337 │ 20m │ 3 │ online │ 0% │ 56.2mb │ root │ disabled │
│ 10 │ ServiceImage │ default │ 1.0.0 │ fork │ 31904 │ 0s │ 13 │ online │ 0% │ 19.1mb │ root │ disabled │
│ 2 │ ServiceLead │ default │ 1.0.0 │ fork │ 31269 │ 20m │ 3 │ online │ 0% │ 54.8mb │ root │ disabled │
│ 3 │ ServiceMail │ default │ 1.0.0 │ fork │ 31276 │ 20m │ 3 │ online │ 0% │ 43.3mb │ root │ disabled │
│ 4 │ ServicePagamento │ default │ 1.0.0 │ fork │ 31289 │ 20m │ 3 │ online │ 0% │ 42.5mb │ root │ disabled │
│ 5 │ ServiceParceiro │ default │ 1.0.0 │ fork │ 31296 │ 20m │ 3 │ online │ 0% │ 60.1mb │ root │ disabled │
│ 6 │ ServicePerfil │ default │ 1.0.0 │ fork │ 31309 │ 20m │ 3 │ online │ 0% │ 69.7mb │ root │ disabled │
└─────┴─────────────────────┴─────────────┴─────────┴─────────┴──────────┴────────┴──────┴───────────┴──────────┴──────────┴──────────┴──────────┘
The route handling this specific request:
router.get('/image/:imageId', async function (req, res) {
try {
let imageId = req.param('imageId')
let returnImage = await cloudController.getImageFromBucket('fotos_perfil', imageId)
res.writeHead(200, {'Content-Type': 'image/jpg'});
returnImage.on('data', (data) => {
res.write(data)
})
returnImage.on('error', (error) => {
res.status(400).send('Erro lendo a imagem')
console.error(error)
})
returnImage.on('end', () => {
res.end()
})
} catch (err) {
res.status(500).send('Internal Server Error')
}
})
And the controller associated:
async function getImageFromBucket(bucket, imageId) {
return new Promise((resolve, reject) => {
try {
let imageInfo = storage.bucket(bucket).file(imageId).createReadStream()
resolve(imageInfo)
} catch (e) {
reject(e)
}
})
}
Can anyone provide me some ideas to solve this? I've read the official Google documentation and the tip is to use fast-crc32c, but only. No clues on how to configure...
I have the following file
ecosystem.js:
module.exports = {
apps: [
{
name: 'my-app',
cwd: '/test,
script: './myapp.js',
instances: 'max', // match the number of CPUs on the machine
exec_mode: 'cluster', // Run multiple child processes
args: 'start',
env: {
NODE_ENV: 'production'
}
},
],
};
I expect to see a cluster of node processes running. But it seems to start it in fork mode, ignoring my settings entirely.
I start like this:
pm2 start ecosystem.js
Output:
Starting ecosystem.js in fork_mode (1 instance)
─────────┐
│ id │ name │ namespace │ version │ mode │ pid │ uptime │ ↺ │ status │ cpu │ mem │ user │ watching │
├─────┼─────────────────────────┼─────────────┼─────────┼─────────┼──────────┼────────┼──────┼───────────┼──────────┼──────────┼──────────┼──────────┤
│ 0 │ ecosystem │ default │ 1.0.0 │ fork │ 156879 │ 0s │ 0 │ online │ 0% │ 14.3mb │ -… │ disabled │
└─────┴─────────────────────────┴─────────────┴─────────┴─────────┴──────────┴────────┴──────┴───────────┴──────────┴──────────┴──────────┴──────────┘
What could be causing this?
Perhaps the missing trailing ' closing quote on cwd.
And if not that, I just stumbled on Problem running express app with pm2 using ecosystem config file. which solved the issue for me.
File name must end with .config.js.
I'm currently trying to spawn a process inside my node server to take a screenshot of the only screen attached to my raspberry with this command :
var scrot = childProcess.spawn(path.join(__dirname, "bin", "scrot", "scrot"), [options.output]);
This command work on my local machine but I get a code 2 response when I try to run it on my headless raspberry under Debian. I suspect it is because my node process is spawned at the beginning of the boot routine, before the x server is started.
The pstree command show me this :
systemd─┬─avahi-daemon───avahi-daemon
├─bluetoothd
├─cron
├─2*[dbus-daemon]
├─dbus-launch
├─dhcpcd
├─hciattach
├─login───startx───xinit─┬─Xorg───{InputThread}
│ └─openbox─┬─openbox-autosta───sh───chromium-browse─┬─ch+
│ │ ├─ch+
│ │ ├─{A+
│ │ ├─{B+
│ │ ├─{C+
│ │ ├─{C+
│ │ ├─{C+
│ │ ├─{C+
│ │ ├─{C+
│ │ ├─{D+
│ │ ├─{N+
│ │ ├─2*+
│ │ ├─3*+
│ │ ├─{T+
│ │ ├─7*+
│ │ ├─{c+
│ │ ├─{e+
│ │ ├─{g+
│ │ ├─{i+
│ │ ├─{r+
│ │ └─{s+
│ └─ssh-agent
├─node───9*[{node}]
Is there a way to add a child process to the x server context ?
Thanks for any help in advance,
C.
guys
I need a help regarding using Vue and webpack with my existing project.
I have built my website using nodejs express server among with ejs as templates.
actually, I want to change the front end of my website totally. And I wanna use Vuetify as the primary frontend framework. But I have never implemented web pack in an existing project. can anyone please help me to do it?
My project file structure is following:
├───includs
│ func.js
│ mailer.js
│ middlewares.js
│
├───models
│ BruteForceSchema.js
│ coupon.js
│ invoice.js
│ payment.js
│ product.js
│ services.js
│ statement.js
│ ticket.js
│ user.js
│
├───routs
│ │ auth.js
│ │ footer.js
│ │ index.js
│ │ products.js
│ │ profile.js
│ │
│ ├───admin
│ │ coupon.js
│ │ index.js
│ │ plans.js
│ │ recharge.js
│ │ services.js
│ │ tickets.js
│ │ users.js
│ │
│ └───clientarea
│ hostings.js
│ index.js
│ recharge.js
│ statement.js
│ tickets.js
│
└───views
│ 404.ejs
│ index.ejs
│
├───admin
│ │ activeService.ejs
│ │ editplan.ejs
│ │ editservice.ejs
│ │ index.ejs
│ │ newplan.ejs
│ │ plans.ejs
│ │ recharge.ejs
│ │ services.ejs
│ │ users.ejs
│ │ viewplan.ejs
│ │ viewservice.ejs
│ │
│ ├───coupon
│ │ addnew.ejs
│ │ edit.ejs
│ │ index.ejs
│ │ view.ejs
│ │
│ ├───tickets
│ │ index.ejs
│ │ view.ejs
│ │
│ └───users
│ editUser.ejs
│ userlist.ejs
│ viewUser.ejs
│
├───auth
│ login.ejs
│ newpassword.ejs
│ resetpwd.ejs
│ signup.ejs
│
├───clientarea
│ │ head.ejs
│ │ hostings.ejs
│ │ index.ejs
│ │ recharge.ejs
│ │ recharge_old.ejs
│ │ statement.ejs
│ │ viewhosting.ejs
│ │
│ └───payments
│ proceed.ejs
│
├───email
│ │ resetdone.ejs
│ │ welcome.ejs
│ │
│ └───searvice
│ renewDone.ejs
│ renewFailed.ejs
│
├───footer
│ aboutus.ejs
│ contactus.ejs
│ development.ejs
│ privacy.ejs
│ tos.ejs
│
├───inc
│ admincpsider.ejs
│ footer.ejs
│ func.ejs
│ header.ejs
│ usercpsider.ejs
│
├───products
│ buy.ejs
│ filehosting.ejs
│ index.ejs
│ info.ejs
│ shared-hosting.ejs
│ wordpress.ejs
│
├───profile
│ edit.ejs
│ index.ejs
│
└───tickets
index.ejs
newTicket.ejs
view.ejs
├───app
│ ├───modules
│ │ ├───asd
│ │ │ ├───angular
│ │ │ │ ├───src
│ │ │ │ └───test
│ │ │ ├───app
│ │ │ │ └───src
│ │ │ ├───base
│ │ │ │ └───src
│ │ │ │ └───client
│ │ │ ├───common
│ │ │ │ ├───json
│ │ │ │ │ ├───src
│ │ │ │ │ └───test
│ │ │ │ ├───src
│ │ │ │ └───test
│ │ │ └───test
│ │ ├───core
│ │ │ ├───base
│ │ │ │ ├───docs
│ │ │ │ └───src
│ │ │ ├───ui
│ │ │ │ ├───directives
│ │ │ │ │ └───src
│ │ │ │ │ └───bmbDropdownMenu
│ │ │ │ │ └───css
│ │ │ │ ├───directives.CategorizedList
│ │ │ │ │ └───docs
│ │ │ │ ├───directives.noResults
│ │ │ │ │ └───lang
│ │ │ │ └───directives.popover
│ │ │ │ └───docs
Is it possible to remove all files and directories inside "app/modules" exception folder "core" via rimraf or another module?
I'm trying like this (doesn't work):
rimraf('./app/modules/(!(core)|**)/*.*', callback);
I spent a lot of time on this, only to figure out that rimraf doesn't support globbing.
I went with del instead, which supports the same format as gulp (an array of paths, including negated ones).
var globby = require('globby');
var rimraf = require('rimraf');
globby(['*', '!app/modules/core.js'])
.then(function then(paths) {
paths.map(function map(item) {
rimraf.sync(item);
});
});
For more info, see the globby documentation
Credit: vladimir-starkov