I used following cluster code to fork multiple process for my node app.
if (cluster.isMaster) {
require('os').cpus().forEach(function () {
cluster.fork();
});
cluster.on('exit', function (worker, code, signal) {
cluster.fork();
});
} else if (cluster.isWorker) {
logger.log.info('Worker server started on port %d (ID: %d, PID: %d)', app.get('port'), cluster.worker.id, cluster.worker.process.pid);
}
the output is:
Thu Sep 05 2013 20:30:03 GMT-0700 (PDT) - info: Worker server started on port 3000 (ID: 1, PID: 606)
Thu Sep 05 2013 20:30:03 GMT-0700 (PDT) - info: Worker server started on port 3000 (ID: 2, PID: 607)
Thu Sep 05 2013 20:30:03 GMT-0700 (PDT) - info: Worker server started on port 3000 (ID: 5, PID: 610)
Thu Sep 05 2013 20:30:03 GMT-0700 (PDT) - info: Worker server started on port 3000 (ID: 3, PID: 608)
Thu Sep 05 2013 20:30:03 GMT-0700 (PDT) - info: Worker server started on port 3000 (ID: 4, PID: 609)
Thu Sep 05 2013 20:30:03 GMT-0700 (PDT) - info: Worker server started on port 3000 (ID: 6, PID: 611)
Thu Sep 05 2013 20:30:03 GMT-0700 (PDT) - info: Worker server started on port 3000 (ID: 8, PID: 613)
Thu Sep 05 2013 20:30:03 GMT-0700 (PDT) - info: Worker server started on port 3000 (ID: 7, PID: 612)
There is 8 worker processes but when I checked process using pgrep, I saw 9
$ pgrep -l node
613 node
612 node
611 node
610 node
609 node
608 node
607 node
606 node
605 node
so one process extra must be master process and how do I print out the master process IP?
Thanks
I posted another question related to this one, I think it's might be useful for everyone to look at this as well:
Node.js cluster master process reboot after got kill & pgrep?
You can get the master process pid with process.pid inside if(cluster.isMaster). IP and port are properties of your app so that would be the same.
You can get the master (parent) pid with process.ppid.
This will let you send a signal which is useful for reloads without downtime.
For instance process.kill(process.ppid, 'SIGHUP');
Related
My purpose is to expire a token after 1hr (3600 secs). While trying with nodejs speakeasy the token is getting invalidated much before that. The below logs are for "1, 10 and 60 minutes" and that also getting invalidated muche before the 1 minute. Max of the time I am getting inconsistent results.
Partial code snippet
let secret = speakeasy.generateSecret({
length: 10
});
let seconds= 3600; //1Hr
let token = speakeasy.totp({
secret: secret.base32,
step: seconds
});
let otp = {
"secret": secret.base32.toString(),
"token": token
};
function checkOTP(otp) {
let verified = speakeasy.totp.verify({
secret: otp.secret,
token: otp.token,
step: seconds
});
return verified;
}
Am I doing something wrong? Few console logs from a sample script:
For 1 minute - Invalidated before 18secs
[ Fri Dec 08 2017 09:16:18 GMT-0800 (Pacific Standard Time) ](true) 9:16:59 AM
[ Fri Dec 08 2017 09:16:18 GMT-0800 (Pacific Standard Time) ](false) 9:17:00 AM
For 10Mins - Invalidated before 7minutes
[ Fri Dec 08 2017 09:18:28 GMT-0800 (Pacific Standard Time) ](true) 9:19:59 AM
[ Fri Dec 08 2017 09:18:28 GMT-0800 (Pacific Standard Time) ](true) 9:19:59 AM
[ Fri Dec 08 2017 09:18:28 GMT-0800 (Pacific Standard Time) ](true) 9:19:59 AM
[ Fri Dec 08 2017 09:18:28 GMT-0800 (Pacific Standard Time) ](true) 9:19:59 AM
[ Fri Dec 08 2017 09:18:28 GMT-0800 (Pacific Standard Time) ](false) 9:20:00 AM
For 1Hr - Invalidated before 7minutes
[ Fri Dec 08 2017 11:07:01 GMT-0800 (Pacific Standard Time) ](true) 11:56:41 AM
[ Fri Dec 08 2017 11:07:01 GMT-0800 (Pacific Standard Time) ](true) 11:56:43 AM
[ Fri Dec 08 2017 11:07:01 GMT-0800 (Pacific Standard Time) ](false) 12:00:37 PM
What is the appropriate way to validate within the above window?
From the readme of speakeasy it looks like your token parameters are wrong:
var token = speakeasy.totp({
secret: secret.base32,
encoding: 'base32',
time: 1453667708 // You have this as 'step' not 'time'
});
I wan't to use GeoCouch but it seems I can't get the setup right
setup:
Ubuntu 14.10
CouchDB 1.6.1, built from source
GeoCouch: I couldn't build it from the newvtree branch as indicated in the README, so I tried with the couchdb1.3.x branch as in this gist (+commentary) adapted to 1.6.1
the data comes from here and is passed in a db with this design doc:
{
"_id": "_design/geotest",
"_rev": "7-6e930896b441ace3dc1d46ff1dd4f09e",
"language": "javascript",
"spatial": {
"points": "function(doc){ if (doc.latitude && doc.longitude){emit([doc.latitude, doc.longitude], null)}}"
}
}
curl $DB/_design/geotest/_spatial/points?bbox=46,16,48.2,16.4 gives me an Empty reply from server while this is what appears in the logs:
[Mon, 21 Sep 2015 18:55:12 GMT] [info] [<0.32.0>] Apache CouchDB has started on http://0.0.0.0:5984/
[Mon, 21 Sep 2015 18:57:40 GMT] [info] [<0.304.0>] Opening index for db: geoexample idx: _design/geotest sig: "ad4c001590440653d6856cc41edf57d5"
[Mon, 21 Sep 2015 18:57:40 GMT] [info] [<0.308.0>] Starting index update for db: geoexample idx: _design/geotest
[Mon, 21 Sep 2015 18:57:41 GMT] [error] [emulator] Error in process <0.314.0> with exit value: {function_clause,[{couch_spatial_updater,process_result,[[[3.317463e+01,-1.173577e+02],null]],[{file,"src/geocouch/couch_spatial_updater.erl"},{line,286}]},{couch_spatial_updater,'-merge_results/3-lc$^1/1-1-',1,[{file,"src/geocouch/couch_sp...
[Mon, 21 Sep 2015 18:57:41 GMT] [error] [<0.120.0>] {error_report,<0.31.0>,
{<0.120.0>,crash_report,
[[{initial_call,
{mochiweb_acceptor,init,
['Argument__1','Argument__2','Argument__3']}},
{pid,<0.120.0>},
{registered_name,[]},
{error_info,
{error,badarg,
[{erlang,list_to_binary,
[[{couch_spatial_updater,process_result,
[[[33.174628,-117.357673],null]],
[{file,
"src/geocouch/couch_spatial_updater.erl"},
{line,286}]},
{couch_spatial_updater,
'-merge_results/3-lc$^1/1-1-',1,
[{file,
"src/geocouch/couch_spatial_updater.erl"},
{line,189}]},
{couch_spatial_updater,
'-merge_results/3-lc$^0/1-0-',1,
[{file,
"src/geocouch/couch_spatial_updater.erl"},
{line,189}]},
{couch_spatial_updater,merge_results,3,
[{file,
"src/geocouch/couch_spatial_updater.erl"},
{line,189}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1261}]},
{couch_spatial_updater,merge_results,4,
[{file,
"src/geocouch/couch_spatial_updater.erl"},
{line,180}]},
{couch_spatial_updater,write_results,2,
[{file,
"src/geocouch/couch_spatial_updater.erl"},
{line,155}]}]],
[]},
{couch_httpd,error_info,1,
[{file,"couch_httpd.erl"},{line,818}]},
{couch_httpd,send_error,2,
[{file,"couch_httpd.erl"},{line,925}]},
{couch_httpd,handle_request_int,5,
[{file,"couch_httpd.erl"},{line,353}]},
{mochiweb_http,headers,5,
[{file,"mochiweb_http.erl"},{line,94}]},
{proc_lib,init_p_do_apply,3,
[{file,"proc_lib.erl"},{line,239}]}]}},
{ancestors,
[couch_httpd,couch_secondary_services,
couch_server_sup,<0.32.0>]},
{messages,[]},
{links,[<0.104.0>,#Port<0.2715>]},
{dictionary,
[{mochiweb_request_qs,[{"bbox","46,16,48.2,16.4"}]},
{couch_rewrite_count,0},
{mochiweb_request_cookie,[]}]},
{trap_exit,false},
{status,running},
{heap_size,6772},
{stack_size,27},
{reductions,6588}],
[]]}}
I tried variations on this setup, but the closest answer I found related to this kind of error invites to install a version of CouchDB that stopped being even available on Apache mirrors, while apparently some people did make GeoCouch work with CouchDB 1.6.1, at least on OS X, so I guess it is just that I'm doing something wrong, but what?!? Thank in advance for any clue
If you use the couchdb1.3.x branch, you need to emit a GeoJSON geometry. So in your case that would be:
emit({"type": "Point", "coordinates": [doc.longitude, doc.latitude]}, null);
I am trying to use the distributed executor service for hazelcast 3.1 and find that i am unable to use submitToMember(task,member). In my example below 10.69.108.60 is my local machine and 170.194.100.111 is my remote machine. I am able to get return value in my future when the member is my local machine but gives me a TargetNotAMemberException if the member is remote machine.
Below is the code
public class DistExecutionTest {
public static void main(String args[]){
DistributedExecutor dex = new DistributedExecutor();
try {
Member member = new MemberImpl(new Address("170.194.100.111",5701), false );
String msg;
msg = dex.echoOnTheMember("Hey youuuu!", member);
System.out.println(msg);
} catch (UnknownHostException e1) {
e1.printStackTrace();
} catch (InterruptedException e) {
e.printStackTrace();
} catch (ExecutionException e) {
e.printStackTrace();
}catch (Exception e) {
e.printStackTrace();
}
}
}
and
public class DistributedExecutor {
Config config;
NetworkConfig network;
JoinConfig join;
DistributedExecutor(){
config = new Config();
network = config.getNetworkConfig();
// network.setPort(5701);
join = network.getJoin();
join.getMulticastConfig().setEnabled(false);
join.getTcpIpConfig().addMember("170.194.100.111").addMember("10.69.108.60").setEnabled(true);
network.getInterfaces().setEnabled(true).addInterface("170.194.100.*").addInterface("10.69.108.*");
}
public String echoOnTheMember(String input, Member member) throws Exception {
Callable<String> task = new DistObject(input);
HazelcastInstance hz = Hazelcast.newHazelcastInstance(config);
IExecutorService executorService = hz.getExecutorService("default");
Future<String> future = executorService.submitToMember(task, member);
String distObjectResult = future.get();
return distObjectResult;
}
}
and
public class Echo implements Callable<String>, Serializable, HazelcastInstanceAware {
private static final long serialVersionUID = -3164053990811643392L;
String message = null;
transient HazelcastInstance localInstance;
public Echo(String msg){
message = msg;
}
#Override
public String call() throws Exception {
return localInstance.toString() + message;
}
#Override
public void setHazelcastInstance(HazelcastInstance hazelcastInstance) {
this.localInstance = hazelcastInstance;
}
}
Here is the logging on local machine
Dec 17, 2013 1:03:20 PM com.hazelcast.instance.DefaultAddressPicker
INFO: Interfaces is enabled, trying to pick one address matching to one of: [162.124.194.*, 10.38.148.*]
Dec 17, 2013 1:03:20 PM com.hazelcast.instance.DefaultAddressPicker
INFO: Prefer IPv4 stack is true.
Dec 17, 2013 1:03:20 PM com.hazelcast.instance.DefaultAddressPicker
INFO: Picked Address[10.69.108.60]:5701, using socket ServerSocket[addr=/0:0:0:0:0:0:0:0,localport=5701], bind any local is true
Dec 17, 2013 1:03:21 PM com.hazelcast.system
INFO: [10.69.108.60]:5701 [dev] Hazelcast Community Edition 3.1 (20131011) starting at Address[10.69.108.60]:5701
Dec 17, 2013 1:03:21 PM com.hazelcast.system
INFO: [10.69.108.60]:5701 [dev] Copyright (C) 2008-2013 Hazelcast.com
Dec 17, 2013 1:03:21 PM com.hazelcast.instance.Node
INFO: [10.69.108.60]:5701 [dev] Creating TcpIpJoiner
Dec 17, 2013 1:03:21 PM com.hazelcast.core.LifecycleService
INFO: [10.69.108.60]:5701 [dev] Address[10.69.108.60]:5701 is STARTING
Dec 17, 2013 1:03:21 PM com.hazelcast.cluster.TcpIpJoiner
INFO: [10.69.108.60]:5701 [dev] Connecting to possible member: Address[10.69.108.60]:5703
Dec 17, 2013 1:03:21 PM com.hazelcast.cluster.TcpIpJoiner
INFO: [10.69.108.60]:5701 [dev] Connecting to possible member: Address[10.69.108.60]:5702
Dec 17, 2013 1:03:21 PM com.hazelcast.nio.SocketConnector
INFO: [10.69.108.60]:5701 [dev] Connecting to /10.69.108.60:5703, timeout: 0, bind-any: true
Dec 17, 2013 1:03:21 PM com.hazelcast.nio.SocketConnector
INFO: [10.69.108.60]:5701 [dev] Connecting to /10.69.108.60:5702, timeout: 0, bind-any: true
Dec 17, 2013 1:03:21 PM com.hazelcast.cluster.TcpIpJoiner
INFO: [10.69.108.60]:5701 [dev] Connecting to possible member: Address[170.194.100.111]:5703
Dec 17, 2013 1:03:21 PM com.hazelcast.cluster.TcpIpJoiner
INFO: [10.69.108.60]:5701 [dev] Connecting to possible member: Address[170.194.100.111]:5702
Dec 17, 2013 1:03:21 PM com.hazelcast.nio.SocketConnector
INFO: [10.69.108.60]:5701 [dev] Connecting to /170.194.100.111:5703, timeout: 0, bind-any: true
Dec 17, 2013 1:03:21 PM com.hazelcast.nio.SocketConnector
INFO: [10.69.108.60]:5701 [dev] Connecting to /170.194.100.111:5702, timeout: 0, bind-any: true
Dec 17, 2013 1:03:21 PM com.hazelcast.cluster.TcpIpJoiner
INFO: [10.69.108.60]:5701 [dev] Connecting to possible member: Address[170.194.100.111]:5701
Dec 17, 2013 1:03:21 PM com.hazelcast.nio.SocketConnector
INFO: [10.69.108.60]:5701 [dev] Connecting to /170.194.100.111:5701, timeout: 0, bind-any: true
Dec 17, 2013 1:03:22 PM com.hazelcast.nio.SocketConnector
INFO: [10.69.108.60]:5701 [dev] Could not connect to: /10.69.108.60:5703. Reason: SocketException[Connection refused: connect to address /10.69.108.60:5703]
Dec 17, 2013 1:03:22 PM com.hazelcast.nio.SocketConnector
INFO: [10.69.108.60]:5701 [dev] Could not connect to: /10.69.108.60:5702. Reason: SocketException[Connection refused: connect to address /10.69.108.60:5702]
Dec 17, 2013 1:03:22 PM com.hazelcast.nio.SocketConnector
INFO: [10.69.108.60]:5701 [dev] Could not connect to: /170.194.100.111:5703. Reason: SocketException[Connection refused: connect to address /170.194.100.111:5703]
Dec 17, 2013 1:03:22 PM com.hazelcast.nio.SocketConnector
INFO: [10.69.108.60]:5701 [dev] Could not connect to: /170.194.100.111:5702. Reason: SocketException[Connection refused: connect to address /170.194.100.111:5702]
Dec 17, 2013 1:03:22 PM com.hazelcast.nio.SocketConnector
INFO: [10.69.108.60]:5701 [dev] Could not connect to: /170.194.100.111:5701. Reason: SocketException[Connection refused: connect to address /170.194.100.111:5701]
Dec 17, 2013 1:03:23 PM com.hazelcast.cluster.TcpIpJoiner
INFO: [10.69.108.60]:5701 [dev]
Members [1] {
Member [10.69.108.60]:5701 this
}
Dec 17, 2013 1:03:23 PM com.hazelcast.core.LifecycleService
INFO: [10.69.108.60]:5701 [dev] Address[10.69.108.60]:5701 is STARTED
HazelcastInstance{name='_hzInstance_1_dev', node=Address[10.69.108.60]:5701}Hey youuuu!
The logging on the remote machine is on these lines.Couldnt paste all the logging.Managed to get the important part.
INFO: [10.69.108.60]:5701 [dev] Connecting to possible member: Address[10.38.148.60]:5703
Dec 17, 2013 1:03:21 PM com.hazelcast.cluster.TcpIpJoiner
INFO: [10.69.108.60]:5701 [dev] Connecting to possible member: Address[10.38.148.60]:5702
Dec 17, 2013 1:03:21 PM com.hazelcast.cluster.TcpIpJoiner
INFO: [10.69.108.60]:5701 [dev] Connecting to possible member: Address[10.38.148.60]:5701
Dec 17, 2013 1:03:21 PM com.hazelcast.cluster.TcpIpJoiner
INFO: [10.69.108.60]:5701 [dev] Connecting to possible member: Address[170.194.100.111]:5703
Dec 17, 2013 1:03:21 PM com.hazelcast.cluster.TcpIpJoiner
INFO: [10.69.108.60]:5701 [dev] Connecting to possible member: Address[170.194.100.111]:5702
Dec 17, 2013 1:03:21 PM com.hazelcast.nio.SocketConnector
Members [2] {
Member [10.69.108.60]:5701 this
Member [170.194.100.111]:5701
}
Instead of creating a member instance directly, could you get the member instance using the hz.getCluster().getMembers() method and select the one you want to send to? I want to see if it is caused by the way you are creating that member.
I can't format text in comments, so I'll put another answer.
So the problem you are suffering from is why your members don't form a cluster.
You should see logging like:
Members [2] {
Member [192.168.1.104]:5701 this
Member [192.168.1.104]:5702
}
That is why I need more logging that just your stacktrace, but that currently doesn't provide any more value. I need to see what Hazelcast says about joining other clusters.
I need to see something like this:
Dec 17, 2013 7:24:13 PM com.hazelcast.config.XmlConfigBuilder
INFO: Looking for hazelcast.xml config file in classpath.
Dec 17, 2013 7:24:13 PM com.hazelcast.config.XmlConfigBuilder
WARNING: Could not find hazelcast.xml in classpath.
Hazelcast will use hazelcast-default.xml config file in jar.
Dec 17, 2013 7:24:13 PM com.hazelcast.config.XmlConfigBuilder
INFO: Using configuration file /java/projects/Hazelcast/hazelcast/hazelcast/target/classes/hazelcast-default.xml in the classpath.
Dec 17, 2013 7:24:13 PM com.hazelcast.instance.DefaultAddressPicker
INFO: Prefer IPv4 stack is true.
Dec 17, 2013 7:24:13 PM com.hazelcast.instance.DefaultAddressPicker
INFO: Picked Address[192.168.1.102]:5701, using socket ServerSocket[addr=/0:0:0:0:0:0:0:0,localport=5701], bind any local is true
Dec 17, 2013 7:24:13 PM com.hazelcast.system
INFO: [192.168.1.102]:5701 [dev] [3.2-SNAPSHOT] Hazelcast Community Edition 3.2-SNAPSHOT (20131217) starting at Address[192.168.1.102]:5701
Dec 17, 2013 7:24:13 PM com.hazelcast.system
INFO: [192.168.1.102]:5701 [dev] [3.2-SNAPSHOT] Copyright (C) 2008-2013 Hazelcast.com
Dec 17, 2013 7:24:13 PM com.hazelcast.instance.Node
INFO: [192.168.1.102]:5701 [dev] [3.2-SNAPSHOT] Creating MulticastJoiner
Dec 17, 2013 7:24:13 PM com.hazelcast.core.LifecycleService
INFO: [192.168.1.102]:5701 [dev] [3.2-SNAPSHOT] Address[192.168.1.102]:5701 is STARTING
Dec 17, 2013 7:24:15 PM com.hazelcast.cluster.MulticastJoiner
INFO: [192.168.1.102]:5701 [dev] [3.2-SNAPSHOT]
Members [1] {
Member [192.168.1.102]:5701 this
}
Dec 17, 2013 7:24:16 PM com.hazelcast.core.LifecycleService
INFO: [192.168.1.102]:5701 [dev] [3.2-SNAPSHOT] Address[192.168.1.102]:5701 is STARTED
Dec 17, 2013 7:24:16 PM com.hazelcast.partition.PartitionService
INFO: [192.168.1.102]:5701 [dev] [3.2-SNAPSHOT] Initializing cluster partition table first arrangement...
I was also getting the same error.
It is due to this line in echoonthemember function
HazelcastInstance hz = Hazelcast.newHazelcastInstance(config);
It creates the new hazelcast instance of default or of cfg config. The member is searched in this instance of the hazelcast which is not actually present in it. Thus the error message is displayed as
TargetNotAMemberException.
To have it work properly, just pass the created instance in the echoonthemember function.
E.g by making it the member variable in class DistributedExecutor and setting it by the constructor.
then if your actual instance was 'abcdef', then
use as
IExecutorService executorService = abcdef.getExecutorService("default");
do not create new Hazelcast instance.
I am beginner in MongoDB and I have a problem with the execution of this in the server.
My project is hosted in servers of hostmonster.com but they don't give me support for MongoDB data bases, although they say that I can install it under my own responsability.
Then, I installed MongoDB 2.4.1 without problems into Linux 64, after, in the MongoDB bin folder (with: mongo, mongod, mongodump ... ) I created a folder called 'data' and 'data/db' for doing some tests.
from console, I connect to the server across the SSH protocol and I run
./mongod --dbpath 'data/db'
and it works.
But, I need that it run automatically forever.
I followed the steps of Mongodb can't start and run the next line:
./mongod --fork --dbpath 'data/db' --smallfiles --logpath 'data/mongodb.log' --logappend
It also worked, It started the process and I closed the console, this process continued running and I could view my data across my domain.
The problem is that the process takes a day to close, ie, I can't see my data across domain, then, I need run mongod again. with:
./mongod --fork --dbpath 'data/db' --smallfiles --logpath 'data/mongodb.log' --logappend
I don't want do it everyday, my question is:
What may be the problem?, why the mongod process dies each day?
how can I run the process forever?
Sorry for my English.
Edit: Add the last error log. I don't understand it.
Fri Apr 12 03:19:34.577 [TTLMonitor] query local.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:0 keyUpdates:0 locks(micros) r:141663 nreturned:0 reslen:20 141ms
Fri Apr 12 03:19:34.789 [TTLMonitor] query users.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:3 keyUpdates:0 locks(micros) r:211595 nreturned:0 reslen:20 211ms
Fri Apr 12 03:20:57.869 [PeriodicTask::Runner] task: DBConnectionPool-cleaner took: 18215ms
Fri Apr 12 03:20:57.931 [PeriodicTask::Runner] task: WriteBackManager::cleaner took: 8ms
Fri Apr 12 03:22:14.155 [PeriodicTask::Runner] task: DBConnectionPool-cleaner took: 32ms
Fri Apr 12 03:22:14.215 [PeriodicTask::Runner] task: WriteBackManager::cleaner took: 14ms
Fri Apr 12 03:22:30.670 [TTLMonitor] query actarium.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:2 keyUpdates:0 locks(micros) r:430204 nreturned:0 reslen:20 430ms
Fri Apr 12 03:23:14.825 [PeriodicTask::Runner] task: DBConnectionPool-cleaner took: 7ms
Fri Apr 12 03:23:31.133 [TTLMonitor] query actarium.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:2 keyUpdates:0 locks(micros) r:179175 nreturned:0 reslen:20 168ms
Fri Apr 12 03:25:19.201 [PeriodicTask::Runner] task: WriteBackManager::cleaner took: 505ms
Fri Apr 12 03:25:23.370 [TTLMonitor] query local.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:0 keyUpdates:0 locks(micros) r:3604735 nreturned:0 reslen:20 3604ms
Fri Apr 12 03:25:25.294 [TTLMonitor] query users.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:3 keyUpdates:0 numYields: 1 locks(micros) r:3479328 nreturned:0 reslen:20 1882ms
Fri Apr 12 03:26:26.647 [TTLMonitor] query actarium.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:2 keyUpdates:0 numYields: 1 locks(micros) r:1764712 nreturned:0 reslen:20 1044ms
Fri Apr 12 04:09:27.804 [TTLMonitor] query actarium.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:2 keyUpdates:0 locks(micros) r:200919 nreturned:0 reslen:20 200ms
Fri Apr 12 04:43:54.002 got signal 15 (Terminated), will terminate after current cmd ends
Fri Apr 12 04:43:54.151 [interruptThread] now exiting
Fri Apr 12 04:43:54.151 dbexit:
Fri Apr 12 04:43:54.157 [interruptThread] shutdown: going to close listening sockets...
Fri Apr 12 04:43:54.160 [interruptThread] closing listening socket: 9
Fri Apr 12 04:43:54.160 [interruptThread] closing listening socket: 10
Fri Apr 12 04:43:54.160 [interruptThread] closing listening socket: 11
Fri Apr 12 04:43:54.160 [interruptThread] removing socket file: /tmp/mongodb-27017.sock
Fri Apr 12 04:43:54.160 [interruptThread] shutdown: going to flush diaglog...
Fri Apr 12 04:43:54.160 [interruptThread] shutdown: going to close sockets...
Fri Apr 12 04:43:54.176 [interruptThread] shutdown: waiting for fs preallocator...
Fri Apr 12 04:43:54.176 [interruptThread] shutdown: lock for final commit...
Fri Apr 12 04:43:54.176 [interruptThread] shutdown: final commit...
Fri Apr 12 04:43:54.176 [interruptThread] shutdown: closing all files...
Fri Apr 12 04:43:54.212 [interruptThread] closeAllFiles() finished
Fri Apr 12 04:43:54.220 [interruptThread] journalCleanup...
Fri Apr 12 04:43:54.246 [interruptThread] removeJournalFiles
Fri Apr 12 04:43:54.280 [interruptThread] error removing journal files
boost::filesystem::directory_iterator::construct: No such file or directory: "/home2/anuncio3/bin/mongodb-linux-x86_64-2.4.1/bin/data/db/journal"
Fri Apr 12 04:43:54.280 [interruptThread] error couldn't remove journal file during shutdown boost::filesystem::directory_iterator::construct: No such file or directory: "/home2/anuncio3/bin/mongodb-linux-x86_64-2.4.1/bin/data/db/journal"
Fri Apr 12 04:43:54.285 shutdown failed with exception
Fri Apr 12 04:43:54.285 dbexit: really exiting now
Your answer is here:
Fri Apr 12 04:43:54.002 got signal 15 (Terminated), will terminate after current cmd ends
Fri Apr 12 04:43:54.151 [interruptThread] now exiting
Your process is receiving signal 15, which is the default kill signal. It's possible that their systems are automatically killing long-running processes or something similar. If that is indeed what's happening, then your host would have to resolve that.
Additionally, these errors:
Fri Apr 12 04:43:54.280 [interruptThread] error removing journal files
boost::filesystem::directory_iterator::construct: No such file or directory: "/home2/anuncio3/bin/mongodb-linux-x86_64-2.4.1/bin/data/db/journal"
Fri Apr 12 04:43:54.280 [interruptThread] error couldn't remove journal file during shutdown boost::filesystem::directory_iterator::construct: No such file or directory: "/home2/anuncio3/bin/mongodb-linux-x86_64-2.4.1/bin/data/db/journal"
indicate that something is wrong with your install's data directory. The journal files either don't exist, or are going missing; if some process on the system is trying to clean things up, then it wouldn't surprise me if something is nuking your journal files.
I know this is old question but my experience might be helpful for other reviewers.
Based on my tests, They only let you run a program for 5 minutes (sometimes more than this) before killing it, so it’s fairly useless to install MongoDB unless you have a dedicated IP.
My node.js app uses express, socket.io and talks to mongodb through mongoose. All these are working fine with low cpu usage.
When I made the app run with cluster, it works fine, but the CPU usage really goes very high. Here is what i am doing.
var settings = require("./settings"),
cluster = require('cluster');
cluster('./server')
.use(cluster.logger('logs'))
.use(cluster.stats())
.use(cluster.pidfiles('pids'))
.use(cluster.cli())
.use(cluster.repl(8888))
.listen(7777);
When I check the master.log, I see
[Fri, 21 Oct 2011 02:59:51 GMT] INFO master started
[Fri, 21 Oct 2011 02:59:53 GMT] ERROR worker 0 died
[Fri, 21 Oct 2011 02:59:53 GMT] INFO spawned worker 0
[Fri, 21 Oct 2011 02:59:54 GMT] ERROR worker 0 died
[Fri, 21 Oct 2011 02:59:54 GMT] INFO spawned worker 0
[Fri, 21 Oct 2011 02:59:56 GMT] ERROR worker 0 died
[Fri, 21 Oct 2011 02:59:56 GMT] INFO spawned worker 0
.....
[Fri, 21 Oct 2011 03:11:08 GMT] INFO spawned worker 0
[Fri, 21 Oct 2011 03:11:10 GMT] WARNING shutting down master
[Fri, 21 Oct 2011 03:12:07 GMT] INFO spawned worker 0
[Fri, 21 Oct 2011 03:12:07 GMT] INFO spawned worker 1
[Fri, 21 Oct 2011 03:12:07 GMT] INFO master started
[Fri, 21 Oct 2011 03:12:09 GMT] ERROR worker 1 died
[Fri, 21 Oct 2011 03:12:09 GMT] INFO spawned worker 1
[Fri, 21 Oct 2011 03:12:10 GMT] ERROR worker 1 died
[Fri, 21 Oct 2011 03:12:10 GMT] INFO spawned worker 1
In workers.access.log, I see all console messages, socket.io logs etc...
In workers.error.log, I see the following error messages, looks like something wrong...
node.js:134
throw e; // process.nextTick error, or 'error' event on first tick
^
Error: EADDRINUSE, Address already in use
at HTTPServer._doListen (net.js:1106:5)
at net.js:1077:14
at Object.lookup (dns.js:153:45)
at HTTPServer.listen (net.js:1071:20)
at Object.<anonymous> (/cygdrive/c/HTML5/RENT/test/server/server.js:703:5)
at Module._compile (module.js:402:26)
at Object..js (module.js:408:10)
at Module.load (module.js:334:31)
at Function._load (module.js:293:12)
at require (module.js:346:19)
server.js:703 - points to app.listen(9999);
EDIT: server.js code
var express = require("express"),
fs = require("fs"),
form = require('connect-form'),
app = module.exports = express.createServer(
form({ keepExtensions: true })
),
sys = require("sys"),
RentModel = require("./rent_schema"),
UserModel = require("./track_schema"),
email = require("./email_connect"),
SubscriptionModel = require("./subscription_schema"),
io = require("socket.io"),
fb = require('facebook-js'),
Twitter = require('./Twitter_Analysis'),
Foursquare = require('./Foursquare_Analysis'),
YQL = require("yql"),
settings = require("./settings");
//
var cluster = require('cluster');
cluster(app)
.use(cluster.logger('logs'))
.use(cluster.stats())
.use(cluster.pidfiles('pids'))
.use(cluster.cli())
.use(cluster.debug())
.use(cluster.repl(settings.ADMIN_PORT))
.listen(settings.PORT);
socket = io.listen(app);
.....
.....
//app.listen(settings.PORT);
It looks like you're trying to bind your workers with the same port, that is crashing the workers, but cluster is restarting the workers. So you're in an infinite death cycle.
I'm not sure if you need the app.listen(9999) in your server.js file, which is probably trying to bind port 9999 in all your workers. See the examples in the cluster package for a good example: https://github.com/LearnBoost/cluster/blob/master/examples/express.js