spatial view fails with a function_clause error - couchdb

I wan't to use GeoCouch but it seems I can't get the setup right
setup:
Ubuntu 14.10
CouchDB 1.6.1, built from source
GeoCouch: I couldn't build it from the newvtree branch as indicated in the README, so I tried with the couchdb1.3.x branch as in this gist (+commentary) adapted to 1.6.1
the data comes from here and is passed in a db with this design doc:
{
"_id": "_design/geotest",
"_rev": "7-6e930896b441ace3dc1d46ff1dd4f09e",
"language": "javascript",
"spatial": {
"points": "function(doc){ if (doc.latitude && doc.longitude){emit([doc.latitude, doc.longitude], null)}}"
}
}
curl $DB/_design/geotest/_spatial/points?bbox=46,16,48.2,16.4 gives me an Empty reply from server while this is what appears in the logs:
[Mon, 21 Sep 2015 18:55:12 GMT] [info] [<0.32.0>] Apache CouchDB has started on http://0.0.0.0:5984/
[Mon, 21 Sep 2015 18:57:40 GMT] [info] [<0.304.0>] Opening index for db: geoexample idx: _design/geotest sig: "ad4c001590440653d6856cc41edf57d5"
[Mon, 21 Sep 2015 18:57:40 GMT] [info] [<0.308.0>] Starting index update for db: geoexample idx: _design/geotest
[Mon, 21 Sep 2015 18:57:41 GMT] [error] [emulator] Error in process <0.314.0> with exit value: {function_clause,[{couch_spatial_updater,process_result,[[[3.317463e+01,-1.173577e+02],null]],[{file,"src/geocouch/couch_spatial_updater.erl"},{line,286}]},{couch_spatial_updater,'-merge_results/3-lc$^1/1-1-',1,[{file,"src/geocouch/couch_sp...
[Mon, 21 Sep 2015 18:57:41 GMT] [error] [<0.120.0>] {error_report,<0.31.0>,
{<0.120.0>,crash_report,
[[{initial_call,
{mochiweb_acceptor,init,
['Argument__1','Argument__2','Argument__3']}},
{pid,<0.120.0>},
{registered_name,[]},
{error_info,
{error,badarg,
[{erlang,list_to_binary,
[[{couch_spatial_updater,process_result,
[[[33.174628,-117.357673],null]],
[{file,
"src/geocouch/couch_spatial_updater.erl"},
{line,286}]},
{couch_spatial_updater,
'-merge_results/3-lc$^1/1-1-',1,
[{file,
"src/geocouch/couch_spatial_updater.erl"},
{line,189}]},
{couch_spatial_updater,
'-merge_results/3-lc$^0/1-0-',1,
[{file,
"src/geocouch/couch_spatial_updater.erl"},
{line,189}]},
{couch_spatial_updater,merge_results,3,
[{file,
"src/geocouch/couch_spatial_updater.erl"},
{line,189}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1261}]},
{couch_spatial_updater,merge_results,4,
[{file,
"src/geocouch/couch_spatial_updater.erl"},
{line,180}]},
{couch_spatial_updater,write_results,2,
[{file,
"src/geocouch/couch_spatial_updater.erl"},
{line,155}]}]],
[]},
{couch_httpd,error_info,1,
[{file,"couch_httpd.erl"},{line,818}]},
{couch_httpd,send_error,2,
[{file,"couch_httpd.erl"},{line,925}]},
{couch_httpd,handle_request_int,5,
[{file,"couch_httpd.erl"},{line,353}]},
{mochiweb_http,headers,5,
[{file,"mochiweb_http.erl"},{line,94}]},
{proc_lib,init_p_do_apply,3,
[{file,"proc_lib.erl"},{line,239}]}]}},
{ancestors,
[couch_httpd,couch_secondary_services,
couch_server_sup,<0.32.0>]},
{messages,[]},
{links,[<0.104.0>,#Port<0.2715>]},
{dictionary,
[{mochiweb_request_qs,[{"bbox","46,16,48.2,16.4"}]},
{couch_rewrite_count,0},
{mochiweb_request_cookie,[]}]},
{trap_exit,false},
{status,running},
{heap_size,6772},
{stack_size,27},
{reductions,6588}],
[]]}}
I tried variations on this setup, but the closest answer I found related to this kind of error invites to install a version of CouchDB that stopped being even available on Apache mirrors, while apparently some people did make GeoCouch work with CouchDB 1.6.1, at least on OS X, so I guess it is just that I'm doing something wrong, but what?!? Thank in advance for any clue

If you use the couchdb1.3.x branch, you need to emit a GeoJSON geometry. So in your case that would be:
emit({"type": "Point", "coordinates": [doc.longitude, doc.latitude]}, null);

Related

Strange Config file error

I am working on an application which requires AWS (Amazon Web Services) push notification services.
I am running this on a node server, when I run the curl http request I get this error:
Registering user with deviceId: nodeserver
{ [Error: Missing credentials in config]
message: 'Missing credentials in config',
code: 'SigningError',
time: Tue Nov 17 2015 13:52:20 GMT+0000 (GMT),
originalError:
{ message: 'Could not load credentials from any providers',
code: 'CredentialsError',
time: Tue Nov 17 2015 13:52:20 GMT+0000 (GMT),
originalError:
{ message: 'Connection timed out after 1000ms',
code: 'TimeoutError',
time: Tue Nov 17 2015 13:52:20 GMT+0000 (GMT) } } }
Does anybody know what this error is or if anybody has had this type of error before and knows how to go about fixing it?

Couch DB giving a weird exception on view changes

I have couch views running on my local database. By some strange quirks one of the view started sending an error report with the following content.
{error_report,<0.31.0>,
{<0.11889.0>,crash_report,
[[{initial_call,{couch_file,init,['Argument__1']}},
{pid,<0.11889.0>},
{registered_name,[]},
{error_info,
{exit,
{{badmatch,{error,eacces}},
[{couch_file,init,1},
{gen_server,init_it,6},
{proc_lib,init_p_do_apply,3}]},
[{gen_server,init_it,6},
{proc_lib,init_p_do_apply,3}]}},
{ancestors,[<0.11888.0>,<0.11887.0>]},
{messages,[]},
{links,[#Port<0.5466>,<0.11888.0>]},
{dictionary,[]},
{trap_exit,true},
{status,running},
{heap_size,377},
{stack_size,24},
{reductions,575}],
[{neighbour,
[{pid,<0.11887.0>},
{registered_name,[]},
{initial_call,{erlang,apply,2}},
{current_function,{proc_lib,sync_wait,2}},
{ancestors,[]},
{messages,[]},
{links,[<0.11869.0>,<0.11888.0>]},
{dictionary,[]},
{trap_exit,false},
{status,waiting},
{heap_size,233},
{stack_size,8},
{reductions,15}]},
{neighbour,
[{pid,<0.11888.0>},
{registered_name,[]},
{initial_call,{couch_index,init,['Argument__1']}},
{current_function,{proc_lib,sync_wait,2}},
{ancestors,[<0.11887.0>]},
{messages,
[{ack,<0.11889.0>,
{error,
{{badmatch,{error,eacces}},
[{couch_file,init,1},
{gen_server,init_it,6},
{proc_lib,init_p_do_apply,3}]}}}]},
{links,[<0.11887.0>,<0.11889.0>]},
{dictionary,[]},
{trap_exit,false},
{status,runnable},
{heap_size,4181},
{stack_size,33},
{reductions,1748}]}]]}}
[error] [<0.11869.0>] ** Generic server couch_index_server terminating
** Last message in was {'EXIT',<0.11887.0>,
{{badmatch,{error,eacces}},
[{couch_file,init,1},
{gen_server,init_it,6},
{proc_lib,init_p_do_apply,3}]}}
** When Server state == {st,"/var/lib/couchdb/1.3.0"}
** Reason for termination ==
** {{badmatch,{error,eacces}},
[{couch_file,init,1},{gen_server,init_it,6},{proc_lib,init_p_do_apply,3}]}
[Mon, 30 Sep 2013 06:03:40 GMT] [error] [<0.11869.0>] {error_report,<0.31.0>,
{<0.11869.0>,crash_report,
[[{initial_call,
{couch_index_server,init,['Argument__1']}},
{pid,<0.11869.0>},
{registered_name,couch_index_server},
{error_info,
{exit,
{{badmatch,{error,eacces}},
[{couch_file,init,1},
{gen_server,init_it,6},
{proc_lib,init_p_do_apply,3}]},
[{gen_server,terminate,6},
{proc_lib,init_p_do_apply,3}]}},
{ancestors,
[couch_secondary_services,couch_server_sup,
<0.32.0>]},
{messages,[]},
{links,[<0.11870.0>,<0.8687.0>]},
{dictionary,[]},
{trap_exit,true},
{status,running},
{heap_size,987},
{stack_size,24},
{reductions,495}],
[{neighbour,
[{pid,<0.11870.0>},
{registered_name,[]},
{initial_call,
{couch_event_sup,init,['Argument__1']}},
{current_function,{gen_server,loop,6}},
{ancestors,
[couch_index_server,
couch_secondary_services,
couch_server_sup,<0.32.0>]},
{messages,[]},
{links,[<0.11869.0>,<0.8682.0>]},
{dictionary,[]},
{trap_exit,false},
{status,waiting},
{heap_size,233},
{stack_size,9},
{reductions,32}]}]]}}
[Mon, 30 Sep 2013 06:03:40 GMT] [error] [<0.8687.0>] {error_report,<0.31.0>,
{<0.8687.0>,supervisor_report,
[{supervisor,{local,couch_secondary_services}},
{errorContext,child_terminated},
{reason,
{{badmatch,{error,eacces}},
[{couch_file,init,1},
{gen_server,init_it,6},
{proc_lib,init_p_do_apply,3}]}},
{offender,
[{pid,<0.11869.0>},
{name,index_server},
{mfargs,{couch_index_server,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]}]}}
[Mon, 30 Sep 2013 06:03:40 GMT] [error] [<0.11069.0>] Uncaught error in HTTP request: {exit,
{{{badmatch,
{error,eacces}},
[{couch_file,init,1},
{gen_server,
init_it,6},
{proc_lib,
init_p_do_apply,
3}]},
{gen_server,call,
[couch_index_server,
{get_index,
{couch_mrview_index,
{mrst,
<<117,203,135,163,
183,160,137,85,
158,17,190,127,
58,84,144,172>>,
nil,undefined,
<<"kri008">>,
<<"_design/order">>,
<<"javascript">>,
[],
{[]},
[{mrview,0,0,0,
[<<"getAllOpen">>],
[],
--------------------------------------------------------
[Mon, 30 Sep 2013 06:03:40 GMT] [info] [<0.11069.0>] Stacktrace: [{gen_server,call,3},
{couch_index_server,get_index,4},
{couch_mrview_util,get_view,4},
{couch_mrview,query_view,6},
{couch_httpd,etag_maybe,2},
{couch_mrview_http,design_doc_view,5},
{couch_httpd_db,do_db_req,2},
{couch_httpd,handle_request_int,5}]
To fix this all I did was, I updated the view source code by adding a space after a semicolon and error disappears. Adding a space or any character that does not violate JavaScript syntax (i.e. adding a letter to a comment in the view source code) will resume the above error and Couch DB returns data. If I revert the change, it will again start giving the above error. Unfortunately the change I do to drive out this error has no point. I tried restarting Couch DB and also checked the permissions for Couchdb .couch files. Any help will be appreciated.
Update: This is the view content I used to fetch the dataset from the DB
function(doc) {
if (doc.documentType == 'com.acme.Order' && (doc.lock != null)) {
emit(doc.documentObject.createdTime, doc);
}
}
Ok, let me take a wild guess. Are your documents rather big objects? I think you might be running into the timeout of the view server performing the mapdoc. I'd try changing your view not to emit the document as a value and retry, like this:
-emit(doc.documentObject.createdTime, doc);
+emit(doc.documentObject.createdTime, null);
This will save you a lot of disk space for the view index file and you will make it faster way faster to create.
To get the document bodies add include_docs=true to your view query.

The MongoDB process is shutting down each day. how run mongod forever in the server?

I am beginner in MongoDB and I have a problem with the execution of this in the server.
My project is hosted in servers of hostmonster.com but they don't give me support for MongoDB data bases, although they say that I can install it under my own responsability.
Then, I installed MongoDB 2.4.1 without problems into Linux 64, after, in the MongoDB bin folder (with: mongo, mongod, mongodump ... ) I created a folder called 'data' and 'data/db' for doing some tests.
from console, I connect to the server across the SSH protocol and I run
./mongod --dbpath 'data/db'
and it works.
But, I need that it run automatically forever.
I followed the steps of Mongodb can't start and run the next line:
./mongod --fork --dbpath 'data/db' --smallfiles --logpath 'data/mongodb.log' --logappend
It also worked, It started the process and I closed the console, this process continued running and I could view my data across my domain.
The problem is that the process takes a day to close, ie, I can't see my data across domain, then, I need run mongod again. with:
./mongod --fork --dbpath 'data/db' --smallfiles --logpath 'data/mongodb.log' --logappend
I don't want do it everyday, my question is:
What may be the problem?, why the mongod process dies each day?
how can I run the process forever?
Sorry for my English.
Edit: Add the last error log. I don't understand it.
Fri Apr 12 03:19:34.577 [TTLMonitor] query local.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:0 keyUpdates:0 locks(micros) r:141663 nreturned:0 reslen:20 141ms
Fri Apr 12 03:19:34.789 [TTLMonitor] query users.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:3 keyUpdates:0 locks(micros) r:211595 nreturned:0 reslen:20 211ms
Fri Apr 12 03:20:57.869 [PeriodicTask::Runner] task: DBConnectionPool-cleaner took: 18215ms
Fri Apr 12 03:20:57.931 [PeriodicTask::Runner] task: WriteBackManager::cleaner took: 8ms
Fri Apr 12 03:22:14.155 [PeriodicTask::Runner] task: DBConnectionPool-cleaner took: 32ms
Fri Apr 12 03:22:14.215 [PeriodicTask::Runner] task: WriteBackManager::cleaner took: 14ms
Fri Apr 12 03:22:30.670 [TTLMonitor] query actarium.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:2 keyUpdates:0 locks(micros) r:430204 nreturned:0 reslen:20 430ms
Fri Apr 12 03:23:14.825 [PeriodicTask::Runner] task: DBConnectionPool-cleaner took: 7ms
Fri Apr 12 03:23:31.133 [TTLMonitor] query actarium.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:2 keyUpdates:0 locks(micros) r:179175 nreturned:0 reslen:20 168ms
Fri Apr 12 03:25:19.201 [PeriodicTask::Runner] task: WriteBackManager::cleaner took: 505ms
Fri Apr 12 03:25:23.370 [TTLMonitor] query local.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:0 keyUpdates:0 locks(micros) r:3604735 nreturned:0 reslen:20 3604ms
Fri Apr 12 03:25:25.294 [TTLMonitor] query users.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:3 keyUpdates:0 numYields: 1 locks(micros) r:3479328 nreturned:0 reslen:20 1882ms
Fri Apr 12 03:26:26.647 [TTLMonitor] query actarium.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:2 keyUpdates:0 numYields: 1 locks(micros) r:1764712 nreturned:0 reslen:20 1044ms
Fri Apr 12 04:09:27.804 [TTLMonitor] query actarium.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:2 keyUpdates:0 locks(micros) r:200919 nreturned:0 reslen:20 200ms
Fri Apr 12 04:43:54.002 got signal 15 (Terminated), will terminate after current cmd ends
Fri Apr 12 04:43:54.151 [interruptThread] now exiting
Fri Apr 12 04:43:54.151 dbexit:
Fri Apr 12 04:43:54.157 [interruptThread] shutdown: going to close listening sockets...
Fri Apr 12 04:43:54.160 [interruptThread] closing listening socket: 9
Fri Apr 12 04:43:54.160 [interruptThread] closing listening socket: 10
Fri Apr 12 04:43:54.160 [interruptThread] closing listening socket: 11
Fri Apr 12 04:43:54.160 [interruptThread] removing socket file: /tmp/mongodb-27017.sock
Fri Apr 12 04:43:54.160 [interruptThread] shutdown: going to flush diaglog...
Fri Apr 12 04:43:54.160 [interruptThread] shutdown: going to close sockets...
Fri Apr 12 04:43:54.176 [interruptThread] shutdown: waiting for fs preallocator...
Fri Apr 12 04:43:54.176 [interruptThread] shutdown: lock for final commit...
Fri Apr 12 04:43:54.176 [interruptThread] shutdown: final commit...
Fri Apr 12 04:43:54.176 [interruptThread] shutdown: closing all files...
Fri Apr 12 04:43:54.212 [interruptThread] closeAllFiles() finished
Fri Apr 12 04:43:54.220 [interruptThread] journalCleanup...
Fri Apr 12 04:43:54.246 [interruptThread] removeJournalFiles
Fri Apr 12 04:43:54.280 [interruptThread] error removing journal files
boost::filesystem::directory_iterator::construct: No such file or directory: "/home2/anuncio3/bin/mongodb-linux-x86_64-2.4.1/bin/data/db/journal"
Fri Apr 12 04:43:54.280 [interruptThread] error couldn't remove journal file during shutdown boost::filesystem::directory_iterator::construct: No such file or directory: "/home2/anuncio3/bin/mongodb-linux-x86_64-2.4.1/bin/data/db/journal"
Fri Apr 12 04:43:54.285 shutdown failed with exception
Fri Apr 12 04:43:54.285 dbexit: really exiting now
Your answer is here:
Fri Apr 12 04:43:54.002 got signal 15 (Terminated), will terminate after current cmd ends
Fri Apr 12 04:43:54.151 [interruptThread] now exiting
Your process is receiving signal 15, which is the default kill signal. It's possible that their systems are automatically killing long-running processes or something similar. If that is indeed what's happening, then your host would have to resolve that.
Additionally, these errors:
Fri Apr 12 04:43:54.280 [interruptThread] error removing journal files
boost::filesystem::directory_iterator::construct: No such file or directory: "/home2/anuncio3/bin/mongodb-linux-x86_64-2.4.1/bin/data/db/journal"
Fri Apr 12 04:43:54.280 [interruptThread] error couldn't remove journal file during shutdown boost::filesystem::directory_iterator::construct: No such file or directory: "/home2/anuncio3/bin/mongodb-linux-x86_64-2.4.1/bin/data/db/journal"
indicate that something is wrong with your install's data directory. The journal files either don't exist, or are going missing; if some process on the system is trying to clean things up, then it wouldn't surprise me if something is nuking your journal files.
I know this is old question but my experience might be helpful for other reviewers.
Based on my tests, They only let you run a program for 5 minutes (sometimes more than this) before killing it, so it’s fairly useless to install MongoDB unless you have a dedicated IP.

Error during couchdb filtered replication with params

I'm trying to run a filtered replication on two different machines, I realized that this only happens when doing a pull replication, if I do a push replication it works fine.
curl -X POST http://localhost:5984/_replicate -d '{\"source\":\"http://MARTIN-NEWPC:5984/pdlib\",\"target\":\"pdlib\",\"filter\":\"replication/SINGLE_COLLECTION\",\"query_params\":{\"key\":\"bb579347-9bfb-4dda-84eb-622b43108872\"}}' -H "Content-Type: application/json"
The cryptic response I get from that request is:
{"error":"json_encode", "reason":"{bad_term, <0.20050.0>}"}
And the debug output in the target couchdb log file is:
[Mon, 17 Oct 2011 01:20:48 GMT] [debug] [<0.476.0>] 'GET' /pdlib/_changes?key=bb579347-9bfb-4dda-84eb-622b43108872&filter=replication/SINGLE_COLLECTION&style=all_docs&heartbeat=10000&since=0&feed=normal {1,
1}
Headers: [{'Accept',"application/json"},
{'Content-Length',"0"},
{'Host',"MARTIN-NEWPC:5984"},
{'User-Agent',"CouchDB/1.0.2"}]
[Mon, 17 Oct 2011 01:20:48 GMT] [debug] [<0.476.0>] OAuth Params: [{"key","bb579347-9bfb-4dda-84eb-622b43108872"},
{"filter","replication/SINGLE_COLLECTION"},
{"style","all_docs"},
{"heartbeat","10000"},
{"since","0"},
{"feed","normal"}]
[Mon, 17 Oct 2011 01:20:48 GMT] [info] [<0.476.0>] 192.168.2.3 - - 'GET' /pdlib/_changes?key=bb579347-9bfb-4dda-84eb-622b43108872&filter=replication/SINGLE_COLLECTION&style=all_docs&heartbeat=10000&since=0&feed=normal 200
[Mon, 17 Oct 2011 01:20:48 GMT] [error] [<0.476.0>] attempted upload of invalid JSON (set log_level to debug to log it)
[Mon, 17 Oct 2011 01:20:48 GMT] [debug] [<0.476.0>] Invalid JSON: <<"bb579347-9bfb-4dda-84eb-622b43108872">>
[Mon, 17 Oct 2011 01:20:48 GMT] [info] [<0.476.0>] 192.168.2.3 - - 'GET' /pdlib/_changes?key=bb579347-9bfb-4dda-84eb-622b43108872&filter=replication/SINGLE_COLLECTION&style=all_docs&heartbeat=10000&since=0&feed=normal 400
[Mon, 17 Oct 2011 01:20:48 GMT] [debug] [<0.476.0>] httpd 400 error response:
{"error":"bad_request","reason":"invalid UTF-8 JSON"}
In case you need to know, this is the filter function:
function (doc, req) {
if (doc.type == 'collection' || doc.type == 'document') {
for (var i in doc.path) {
if (doc.path[i] == req.query.key) {
return true;
}
}
}
return false;
}
Any ideas about the possible cause?
It's common to get a 400 "invalid UTF-8 JSON" error when CouchDB tries to interpret one of your query values as JSON when it's a raw (unquoted) string instead. In this case the replication config results in this HTTP request:
GET /pdlib/_changes?key=bb579347-9bfb-4dda-84eb-622b43108872&filter=replication/SINGLE_COLLECTION&style=all_docs&heartbeat=10000&since=0&feed=normal 400
The _changes feed itself doesn't use a key parameter, but normal CouchDB _view queries do — and there expect it to be a JSON value! — so you might try renaming that query_param to something different.
(Somewhat unfortunately, user-defined filter (and list, etc.) functions share the query parameter namespace with CouchDB itself...you may want to prefix your custom parameters with something that's unlikely to conflict with current or future builtin options, e.g. myapp_key.)
Looks to me like there is something wrong with the way you have your JSON escaped. This works for me:
curl -X POST http://localhost:5984/_replicate -d '{"source":"source_db","target":"target_db","filter":"ddoc/filter-name","query_params":{"key":"some_key"}}' -H "Content-Type: application/json"

node.js express cluster and high CPU usage

My node.js app uses express, socket.io and talks to mongodb through mongoose. All these are working fine with low cpu usage.
When I made the app run with cluster, it works fine, but the CPU usage really goes very high. Here is what i am doing.
var settings = require("./settings"),
cluster = require('cluster');
cluster('./server')
.use(cluster.logger('logs'))
.use(cluster.stats())
.use(cluster.pidfiles('pids'))
.use(cluster.cli())
.use(cluster.repl(8888))
.listen(7777);
When I check the master.log, I see
[Fri, 21 Oct 2011 02:59:51 GMT] INFO master started
[Fri, 21 Oct 2011 02:59:53 GMT] ERROR worker 0 died
[Fri, 21 Oct 2011 02:59:53 GMT] INFO spawned worker 0
[Fri, 21 Oct 2011 02:59:54 GMT] ERROR worker 0 died
[Fri, 21 Oct 2011 02:59:54 GMT] INFO spawned worker 0
[Fri, 21 Oct 2011 02:59:56 GMT] ERROR worker 0 died
[Fri, 21 Oct 2011 02:59:56 GMT] INFO spawned worker 0
.....
[Fri, 21 Oct 2011 03:11:08 GMT] INFO spawned worker 0
[Fri, 21 Oct 2011 03:11:10 GMT] WARNING shutting down master
[Fri, 21 Oct 2011 03:12:07 GMT] INFO spawned worker 0
[Fri, 21 Oct 2011 03:12:07 GMT] INFO spawned worker 1
[Fri, 21 Oct 2011 03:12:07 GMT] INFO master started
[Fri, 21 Oct 2011 03:12:09 GMT] ERROR worker 1 died
[Fri, 21 Oct 2011 03:12:09 GMT] INFO spawned worker 1
[Fri, 21 Oct 2011 03:12:10 GMT] ERROR worker 1 died
[Fri, 21 Oct 2011 03:12:10 GMT] INFO spawned worker 1
In workers.access.log, I see all console messages, socket.io logs etc...
In workers.error.log, I see the following error messages, looks like something wrong...
node.js:134
throw e; // process.nextTick error, or 'error' event on first tick
^
Error: EADDRINUSE, Address already in use
at HTTPServer._doListen (net.js:1106:5)
at net.js:1077:14
at Object.lookup (dns.js:153:45)
at HTTPServer.listen (net.js:1071:20)
at Object.<anonymous> (/cygdrive/c/HTML5/RENT/test/server/server.js:703:5)
at Module._compile (module.js:402:26)
at Object..js (module.js:408:10)
at Module.load (module.js:334:31)
at Function._load (module.js:293:12)
at require (module.js:346:19)
server.js:703 - points to app.listen(9999);
EDIT: server.js code
var express = require("express"),
fs = require("fs"),
form = require('connect-form'),
app = module.exports = express.createServer(
form({ keepExtensions: true })
),
sys = require("sys"),
RentModel = require("./rent_schema"),
UserModel = require("./track_schema"),
email = require("./email_connect"),
SubscriptionModel = require("./subscription_schema"),
io = require("socket.io"),
fb = require('facebook-js'),
Twitter = require('./Twitter_Analysis'),
Foursquare = require('./Foursquare_Analysis'),
YQL = require("yql"),
settings = require("./settings");
//
var cluster = require('cluster');
cluster(app)
.use(cluster.logger('logs'))
.use(cluster.stats())
.use(cluster.pidfiles('pids'))
.use(cluster.cli())
.use(cluster.debug())
.use(cluster.repl(settings.ADMIN_PORT))
.listen(settings.PORT);
socket = io.listen(app);
.....
.....
//app.listen(settings.PORT);
It looks like you're trying to bind your workers with the same port, that is crashing the workers, but cluster is restarting the workers. So you're in an infinite death cycle.
I'm not sure if you need the app.listen(9999) in your server.js file, which is probably trying to bind port 9999 in all your workers. See the examples in the cluster package for a good example: https://github.com/LearnBoost/cluster/blob/master/examples/express.js

Resources