Hosting Node with Heroku - Internal Server Error - node.js

Im trying to host a Node + Express app with Heroku however when I try to render a view I get an "Internal Server Error"
The connection to Redis works fine.
app.configure(function(){
app.set('views', __dirname + '/views');
app.set('view engine', 'jade');
app.use(express.bodyParser());
app.use(express.methodOverride());
app.use(app.router);
app.use(express.static(__dirname + '/public'));
});
app.get('/', function(req, res) {
redis.get('foo', function(err, value){
res.render('index', { title: 'My App', Order: value }); -> This Line Fails
console.log(value);
});
});
Loggly Logs:
2011 Nov 14 20:09:42.649 50.19.0.98 126: <40>1 2011-11-14T20:09:42+00:00 d.0bd0cf5e-9521-4fd5-8d4a-b8fcf757e5bf heroku web.1 - - State changed from bouncing to created
2011 Nov 14 20:09:42.669 50.19.0.98 126: <40>1 2011-11-14T20:09:42+00:00 d.0bd0cf5e- 9521-4fd5-8d4a-b8fcf757e5bf heroku web.1 - - State changed from created to starting
2011 Nov 14 20:09:44.531 50.17.63.134 131: <40>1 2011-11-14T20:09:44+00:00 d.0bd0cf5e- 9521-4fd5-8d4a-b8fcf757e5bf heroku web.1 - - Starting process with command `node app.js`
2011 Nov 14 20:09:44.913 50.17.63.134 123: <13>1 2011-11-14T20:09:44+00:00 d.0bd0cf5e-9521-4fd5-8d4a-b8fcf757e5bf app web.1 - - [36minfo -[39m socket.io started
2011 Nov 14 20:09:44.938 50.17.63.134 142: <13>1 2011-11-14T20:09:44+00:00 d.0bd0cf5e-9521-4fd5-8d4a-b8fcf757e5bf app web.1 - - Express server listening on port 27910 in production mode
2011 Nov 14 20:09:45.763 50.19.0.98 121: <40>1 2011-11-14T20:09:45+00:00 d.0bd0cf5e-9521-4fd5-8d4a-b8fcf757e5bf heroku web.1 - - State changed from starting to up
2011 Nov 14 20:09:53.510 184.73.5.216 121: <40>1 2011-11-14T20:09:53+00:00 d.0bd0cf5e-9521-4fd5-8d4a-b8fcf757e5bf heroku api - - Scale to web=1 by ****#gmail.com
2011 Nov 14 20:09:56.027 50.17.63.134 96: <13>1 2011-11-14T20:09:56+00:00 d.0bd0cf5e-9521-4fd5-8d4a-b8fcf757e5bf app web.1 - - 1,2,3,4,5,6
2011 Nov 14 20:09:56.029 50.17.63.134 281: <13>1 2011-11-14T20:09:56+00:00 d.0bd0cf5e- 9521-4fd5-8d4a-b8fcf757e5bf app web.1 - - 10.108.67.105 - - [Mon, 14 Nov 2011 20:09:56 GMT] "GET / HTTP/1.1" 200 - "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2"
2011 Nov 14 20:09:56.029 50.17.63.134 97: <13>1 2011-11-14T20:09:56+00:00 d.0bd0cf5e-9521-4fd5-8d4a-b8fcf757e5bf app web.1 - - Served Index
2011 Nov 14 20:09:56.144 184.73.5.216 178: <158>1 2011-11-14T20:09:56+00:00 d.0bd0cf5e-9521-4fd5-8d4a-b8fcf757e5bf heroku router - - GET rtcubes.herokuapp.com/ dyno=web.1 queue=0 wait=11ms service=45ms status=200 bytes=21
2011 Nov 14 20:10:05.337 50.17.63.134 281: <13>1 2011-11-14T20:10:05+00:00 d.0bd0cf5e-9521-4fd5-8d4a-b8fcf757e5bf app web.1 - - 10.217.29.248 - - [Mon, 14 Nov 2011 20:10:05 GMT] "GET / HTTP/1.1" 200 - "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2"
2011 Nov 14 20:10:05.337 50.17.63.134 97: <13>1 2011-11-14T20:10:05+00:00 d.0bd0cf5e-9521-4fd5-8d4a-b8fcf757e5bf app web.1 - - Served Index
2011 Nov 14 20:10:05.337 50.17.63.134 96: <13>1 2011-11-14T20:10:05+00:00 d.0bd0cf5e-9521-4fd5-8d4a-b8fcf757e5bf app web.1 - - 1,2,3,4,5,6
2011 Nov 14 20:10:56.123 50.17.63.134 97: <13>1 2011-11-14T20:10:56+00:00 d.0bd0cf5e-9521-4fd5-8d4a-b8fcf757e5bf app web.1 - - Served Index
2011 Nov 14 20:10:56.125 50.17.63.134 281: <13>1 2011-11-14T20:10:56+00:00 d.0bd0cf5e-9521-4fd5-8d4a-b8fcf757e5bf app web.1 - - 10.217.17.204 - - [Mon, 14 Nov 2011 20:10:56 GMT] "GET / HTTP/1.1" 200 - "-

The way I have fixed the issue was I deployed the app from local machine using Heroku CLI (and not from GitHub as previously done)

I have fixed the issue, Destroyed the app and recreated it on heroku and it just worked. Must have been a configuration problem.
Thanks

I had a similar problem. Figured out I was requesting a wrong page. Fixed.

Related

NodeJS converting Docker Redis hostname to localhost

It seems the Redis container hostname is being converted to localhost by NodeJS.
Here are my files:
.env
REDIS_HOST=redis-eventsystem
REDIS_PORT=6379
REDIS_SECRET=secret
index.ts
// there are things above this
let Redis = require('redis');
let client : any = Redis.createClient({
host: process.env.REDIS_HOST,
port: process.env.REDIS_PORT,
legacyMode: true
});
client.on('error', (err : Error) : void => {
console.error(
`Redis connection error: ${err}`
);
});
client.on('connect', (err : Error) : void => {
console.info(
`Redis connection success.`
);
});
client.connect();
// there are things bellow this
docker-compose.yml
version: '3.8'
services:
eventsystem:
image: eventsystem
restart: always
depends_on:
- "redis-eventsystem"
ports:
- "80:3000"
networks:
- eventsystem
redis-eventsystem:
image: redis
command: ["redis-server", "--bind", "redis-eventsystem", "--port", "6379", "--protected-mode", "no"]
restart: always
networks:
- eventsystem
networks:
eventsystem:
driver: bridge
docker log
2022-11-21 17:50:41 eventsystem-redis-eventsystem-1 | 1:C 21 Nov 2022 20:50:41.106 # oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo
2022-11-21 17:50:41 eventsystem-redis-eventsystem-1 | 1:C 21 Nov 2022 20:50:41.106 # Redis version=7.0.5, bits=64, commit=00000000, modified=0, pid=1, just started
2022-11-21 17:50:41 eventsystem-redis-eventsystem-1 | 1:C 21 Nov 2022 20:50:41.106 # Configuration loaded
2022-11-21 17:50:41 eventsystem-redis-eventsystem-1 | 1:M 21 Nov 2022 20:50:41.106 * monotonic clock: POSIX clock_gettime
2022-11-21 17:50:41 eventsystem-redis-eventsystem-1 | 1:M 21 Nov 2022 20:50:41.108 * Running mode=standalone, port=6379.
2022-11-21 17:50:41 eventsystem-redis-eventsystem-1 | 1:M 21 Nov 2022 20:50:41.108 # Server initialized
2022-11-21 17:50:41 eventsystem-redis-eventsystem-1 | 1:M 21 Nov 2022 20:50:41.108 * Ready to accept connections
2022-11-21 17:50:41 eventsystem-eventsystem-1 |
2022-11-21 17:50:41 eventsystem-eventsystem-1 | > eventsystem#1.0.0 start
2022-11-21 17:50:41 eventsystem-eventsystem-1 | > node index.js serve
2022-11-21 17:50:41 eventsystem-eventsystem-1 |
2022-11-21 17:50:42 eventsystem-eventsystem-1 | Application is listening at http://localhost:3000
2022-11-21 17:50:42 eventsystem-eventsystem-1 | Mon Nov 21 2022 20:50:42 GMT+0000 (Coordinated Universal Time) - Redis connection error: Error: connect ECONNREFUSED 127.0.0.1:6379
As you all can see the connection is refused for the IP 127.0.0.1 but on my application the redis is set to work on the hostname for the container which holds the redis server. I can't think of anything that may be causing this problem.
So to answer my own question, basically the problem was related to the variables passed on createClient at my code.
It seems that for some unknown reason the host and port variables need to be passed inside a variable called socket inside the createClient argument object.
So, instead of doing the usual and passing the host and port inside the argument object, you must do the following:
let client : any = Redis.createClient({
host: process.env.REDIS_HOST,
port: process.env.REDIS_PORT,
socket: {
host: process.env.REDIS_HOST,
port: process.env.REDIS_PORT
},
legacyMode: true
});
Hope to have helped someone else besides me.
Cheers!

can't disable following express:* related pm2 logs?

I want to disable some logs (express:router) from express code which use pm2 to run.
Current logs are as follows
GMT express:router dispatching GET /healthcheck
10|consumer | Wed, 10 Oct 2018 19:40:04 GMT express:router query : /healthcheck
10|consumer | Wed, 10 Oct 2018 19:40:04 GMT express:router expressInit : /healthcheck
10|consumer | Wed, 10 Oct 2018 19:40:04 GMT express:router corsMiddleware : /healthcheck
10|consumer | Wed, 10 Oct 2018 19:40:04 GMT express:router jsonParser : /healthcheck
10|consumer | Wed, 10 Oct 2018 19:40:04 GMT body-parser:json skip empty body
10|consumer | Wed, 10 Oct 2018 19:40:04 GMT express:router urlencodedParser : /healthcheck
10|consumer | Wed, 10 Oct 2018 19:40:04 GMT body-parser:urlencoded skip empty body
I pass /dev/null to error_file or out_file to disable logs saving -
not works i can still see the logs
Run pm2 using command - pm2 start consumer -o "/dev/null" -e "/dev/null" - Not works.
I run it using DEBUG=express.* node index.js - not worked.
I manage to get it done by adding following environment var
process.env.DEBUG=false

Webstorm console infos source?

While playing with NodeJS in Webstorm I came to a point where 'suddenly' this output appeared. Until now I didn't have all the 'time .. express:router .. ' lines:
"C:\Program Files (x86)\JetBrains\WebStorm 9.0.1\bin\runnerw.exe" "C:\Program Files\nodejs\node.exe" --debug-brk=49723 --nolazy srv.js
debugger listening on port 49723
Tue, 06 Jan 2015 11:15:06 GMT express:router:route new *
Express server listening 0.0.0.0:80
Tue, 06 Jan 2015 11:15:06 GMT express:router:layer new *
Tue, 06 Jan 2015 11:15:06 GMT express:router:route get *
Tue, 06 Jan 2015 11:15:06 GMT express:router:layer new /
Tue, 06 Jan 2015 11:15:06 GMT express:application compile etag weak
Tue, 06 Jan 2015 11:15:06 GMT express:application compile query parser extended
Tue, 06 Jan 2015 11:15:06 GMT express:application compile trust proxy false
Tue, 06 Jan 2015 11:15:06 GMT express:application booting in development mode
Tue, 06 Jan 2015 11:15:06 GMT express:router use / query
Tue, 06 Jan 2015 11:15:06 GMT express:router:layer new /
Tue, 06 Jan 2015 11:15:06 GMT express:router use / expressInit
I try to understand which setting or function is responsible for this. My simple code:
'use strict';
var appEnv = 'development'; //'production'
var appPort = 80;
var path = require('path');
var http = require('http');
var express = require('express');
var bodyParser = require('body-parser');
var resource = require('express-resource');
var session = require('express-session');
// here the mentioned output appears first during step-by-step debugging:
var appRouter = require('./routes/index');
var user = require('./routes/user');
...
I guess it's not a Node then more a Webstorm question, right? Probably there is a setting for this ... which I didn't find. Maybe there is a key combination I hit by mistake?
Thanks for any help!
It's a Webstorm Debugger console info. U might have clicked on "debug the project" option in your IDE.
These messages are logged by Express when running in debug mode. See http://expressjs.com/guide/debugging.html

Node.js cluster get master PID

I used following cluster code to fork multiple process for my node app.
if (cluster.isMaster) {
require('os').cpus().forEach(function () {
cluster.fork();
});
cluster.on('exit', function (worker, code, signal) {
cluster.fork();
});
} else if (cluster.isWorker) {
logger.log.info('Worker server started on port %d (ID: %d, PID: %d)', app.get('port'), cluster.worker.id, cluster.worker.process.pid);
}
the output is:
Thu Sep 05 2013 20:30:03 GMT-0700 (PDT) - info: Worker server started on port 3000 (ID: 1, PID: 606)
Thu Sep 05 2013 20:30:03 GMT-0700 (PDT) - info: Worker server started on port 3000 (ID: 2, PID: 607)
Thu Sep 05 2013 20:30:03 GMT-0700 (PDT) - info: Worker server started on port 3000 (ID: 5, PID: 610)
Thu Sep 05 2013 20:30:03 GMT-0700 (PDT) - info: Worker server started on port 3000 (ID: 3, PID: 608)
Thu Sep 05 2013 20:30:03 GMT-0700 (PDT) - info: Worker server started on port 3000 (ID: 4, PID: 609)
Thu Sep 05 2013 20:30:03 GMT-0700 (PDT) - info: Worker server started on port 3000 (ID: 6, PID: 611)
Thu Sep 05 2013 20:30:03 GMT-0700 (PDT) - info: Worker server started on port 3000 (ID: 8, PID: 613)
Thu Sep 05 2013 20:30:03 GMT-0700 (PDT) - info: Worker server started on port 3000 (ID: 7, PID: 612)
There is 8 worker processes but when I checked process using pgrep, I saw 9
$ pgrep -l node
613 node
612 node
611 node
610 node
609 node
608 node
607 node
606 node
605 node
so one process extra must be master process and how do I print out the master process IP?
Thanks
I posted another question related to this one, I think it's might be useful for everyone to look at this as well:
Node.js cluster master process reboot after got kill & pgrep?
You can get the master process pid with process.pid inside if(cluster.isMaster). IP and port are properties of your app so that would be the same.
You can get the master (parent) pid with process.ppid.
This will let you send a signal which is useful for reloads without downtime.
For instance process.kill(process.ppid, 'SIGHUP');

node.js express cluster and high CPU usage

My node.js app uses express, socket.io and talks to mongodb through mongoose. All these are working fine with low cpu usage.
When I made the app run with cluster, it works fine, but the CPU usage really goes very high. Here is what i am doing.
var settings = require("./settings"),
cluster = require('cluster');
cluster('./server')
.use(cluster.logger('logs'))
.use(cluster.stats())
.use(cluster.pidfiles('pids'))
.use(cluster.cli())
.use(cluster.repl(8888))
.listen(7777);
When I check the master.log, I see
[Fri, 21 Oct 2011 02:59:51 GMT] INFO master started
[Fri, 21 Oct 2011 02:59:53 GMT] ERROR worker 0 died
[Fri, 21 Oct 2011 02:59:53 GMT] INFO spawned worker 0
[Fri, 21 Oct 2011 02:59:54 GMT] ERROR worker 0 died
[Fri, 21 Oct 2011 02:59:54 GMT] INFO spawned worker 0
[Fri, 21 Oct 2011 02:59:56 GMT] ERROR worker 0 died
[Fri, 21 Oct 2011 02:59:56 GMT] INFO spawned worker 0
.....
[Fri, 21 Oct 2011 03:11:08 GMT] INFO spawned worker 0
[Fri, 21 Oct 2011 03:11:10 GMT] WARNING shutting down master
[Fri, 21 Oct 2011 03:12:07 GMT] INFO spawned worker 0
[Fri, 21 Oct 2011 03:12:07 GMT] INFO spawned worker 1
[Fri, 21 Oct 2011 03:12:07 GMT] INFO master started
[Fri, 21 Oct 2011 03:12:09 GMT] ERROR worker 1 died
[Fri, 21 Oct 2011 03:12:09 GMT] INFO spawned worker 1
[Fri, 21 Oct 2011 03:12:10 GMT] ERROR worker 1 died
[Fri, 21 Oct 2011 03:12:10 GMT] INFO spawned worker 1
In workers.access.log, I see all console messages, socket.io logs etc...
In workers.error.log, I see the following error messages, looks like something wrong...
node.js:134
throw e; // process.nextTick error, or 'error' event on first tick
^
Error: EADDRINUSE, Address already in use
at HTTPServer._doListen (net.js:1106:5)
at net.js:1077:14
at Object.lookup (dns.js:153:45)
at HTTPServer.listen (net.js:1071:20)
at Object.<anonymous> (/cygdrive/c/HTML5/RENT/test/server/server.js:703:5)
at Module._compile (module.js:402:26)
at Object..js (module.js:408:10)
at Module.load (module.js:334:31)
at Function._load (module.js:293:12)
at require (module.js:346:19)
server.js:703 - points to app.listen(9999);
EDIT: server.js code
var express = require("express"),
fs = require("fs"),
form = require('connect-form'),
app = module.exports = express.createServer(
form({ keepExtensions: true })
),
sys = require("sys"),
RentModel = require("./rent_schema"),
UserModel = require("./track_schema"),
email = require("./email_connect"),
SubscriptionModel = require("./subscription_schema"),
io = require("socket.io"),
fb = require('facebook-js'),
Twitter = require('./Twitter_Analysis'),
Foursquare = require('./Foursquare_Analysis'),
YQL = require("yql"),
settings = require("./settings");
//
var cluster = require('cluster');
cluster(app)
.use(cluster.logger('logs'))
.use(cluster.stats())
.use(cluster.pidfiles('pids'))
.use(cluster.cli())
.use(cluster.debug())
.use(cluster.repl(settings.ADMIN_PORT))
.listen(settings.PORT);
socket = io.listen(app);
.....
.....
//app.listen(settings.PORT);
It looks like you're trying to bind your workers with the same port, that is crashing the workers, but cluster is restarting the workers. So you're in an infinite death cycle.
I'm not sure if you need the app.listen(9999) in your server.js file, which is probably trying to bind port 9999 in all your workers. See the examples in the cluster package for a good example: https://github.com/LearnBoost/cluster/blob/master/examples/express.js

Resources