Couch DB giving a weird exception on view changes - couchdb

I have couch views running on my local database. By some strange quirks one of the view started sending an error report with the following content.
{error_report,<0.31.0>,
{<0.11889.0>,crash_report,
[[{initial_call,{couch_file,init,['Argument__1']}},
{pid,<0.11889.0>},
{registered_name,[]},
{error_info,
{exit,
{{badmatch,{error,eacces}},
[{couch_file,init,1},
{gen_server,init_it,6},
{proc_lib,init_p_do_apply,3}]},
[{gen_server,init_it,6},
{proc_lib,init_p_do_apply,3}]}},
{ancestors,[<0.11888.0>,<0.11887.0>]},
{messages,[]},
{links,[#Port<0.5466>,<0.11888.0>]},
{dictionary,[]},
{trap_exit,true},
{status,running},
{heap_size,377},
{stack_size,24},
{reductions,575}],
[{neighbour,
[{pid,<0.11887.0>},
{registered_name,[]},
{initial_call,{erlang,apply,2}},
{current_function,{proc_lib,sync_wait,2}},
{ancestors,[]},
{messages,[]},
{links,[<0.11869.0>,<0.11888.0>]},
{dictionary,[]},
{trap_exit,false},
{status,waiting},
{heap_size,233},
{stack_size,8},
{reductions,15}]},
{neighbour,
[{pid,<0.11888.0>},
{registered_name,[]},
{initial_call,{couch_index,init,['Argument__1']}},
{current_function,{proc_lib,sync_wait,2}},
{ancestors,[<0.11887.0>]},
{messages,
[{ack,<0.11889.0>,
{error,
{{badmatch,{error,eacces}},
[{couch_file,init,1},
{gen_server,init_it,6},
{proc_lib,init_p_do_apply,3}]}}}]},
{links,[<0.11887.0>,<0.11889.0>]},
{dictionary,[]},
{trap_exit,false},
{status,runnable},
{heap_size,4181},
{stack_size,33},
{reductions,1748}]}]]}}
[error] [<0.11869.0>] ** Generic server couch_index_server terminating
** Last message in was {'EXIT',<0.11887.0>,
{{badmatch,{error,eacces}},
[{couch_file,init,1},
{gen_server,init_it,6},
{proc_lib,init_p_do_apply,3}]}}
** When Server state == {st,"/var/lib/couchdb/1.3.0"}
** Reason for termination ==
** {{badmatch,{error,eacces}},
[{couch_file,init,1},{gen_server,init_it,6},{proc_lib,init_p_do_apply,3}]}
[Mon, 30 Sep 2013 06:03:40 GMT] [error] [<0.11869.0>] {error_report,<0.31.0>,
{<0.11869.0>,crash_report,
[[{initial_call,
{couch_index_server,init,['Argument__1']}},
{pid,<0.11869.0>},
{registered_name,couch_index_server},
{error_info,
{exit,
{{badmatch,{error,eacces}},
[{couch_file,init,1},
{gen_server,init_it,6},
{proc_lib,init_p_do_apply,3}]},
[{gen_server,terminate,6},
{proc_lib,init_p_do_apply,3}]}},
{ancestors,
[couch_secondary_services,couch_server_sup,
<0.32.0>]},
{messages,[]},
{links,[<0.11870.0>,<0.8687.0>]},
{dictionary,[]},
{trap_exit,true},
{status,running},
{heap_size,987},
{stack_size,24},
{reductions,495}],
[{neighbour,
[{pid,<0.11870.0>},
{registered_name,[]},
{initial_call,
{couch_event_sup,init,['Argument__1']}},
{current_function,{gen_server,loop,6}},
{ancestors,
[couch_index_server,
couch_secondary_services,
couch_server_sup,<0.32.0>]},
{messages,[]},
{links,[<0.11869.0>,<0.8682.0>]},
{dictionary,[]},
{trap_exit,false},
{status,waiting},
{heap_size,233},
{stack_size,9},
{reductions,32}]}]]}}
[Mon, 30 Sep 2013 06:03:40 GMT] [error] [<0.8687.0>] {error_report,<0.31.0>,
{<0.8687.0>,supervisor_report,
[{supervisor,{local,couch_secondary_services}},
{errorContext,child_terminated},
{reason,
{{badmatch,{error,eacces}},
[{couch_file,init,1},
{gen_server,init_it,6},
{proc_lib,init_p_do_apply,3}]}},
{offender,
[{pid,<0.11869.0>},
{name,index_server},
{mfargs,{couch_index_server,start_link,[]}},
{restart_type,permanent},
{shutdown,brutal_kill},
{child_type,worker}]}]}}
[Mon, 30 Sep 2013 06:03:40 GMT] [error] [<0.11069.0>] Uncaught error in HTTP request: {exit,
{{{badmatch,
{error,eacces}},
[{couch_file,init,1},
{gen_server,
init_it,6},
{proc_lib,
init_p_do_apply,
3}]},
{gen_server,call,
[couch_index_server,
{get_index,
{couch_mrview_index,
{mrst,
<<117,203,135,163,
183,160,137,85,
158,17,190,127,
58,84,144,172>>,
nil,undefined,
<<"kri008">>,
<<"_design/order">>,
<<"javascript">>,
[],
{[]},
[{mrview,0,0,0,
[<<"getAllOpen">>],
[],
--------------------------------------------------------
[Mon, 30 Sep 2013 06:03:40 GMT] [info] [<0.11069.0>] Stacktrace: [{gen_server,call,3},
{couch_index_server,get_index,4},
{couch_mrview_util,get_view,4},
{couch_mrview,query_view,6},
{couch_httpd,etag_maybe,2},
{couch_mrview_http,design_doc_view,5},
{couch_httpd_db,do_db_req,2},
{couch_httpd,handle_request_int,5}]
To fix this all I did was, I updated the view source code by adding a space after a semicolon and error disappears. Adding a space or any character that does not violate JavaScript syntax (i.e. adding a letter to a comment in the view source code) will resume the above error and Couch DB returns data. If I revert the change, it will again start giving the above error. Unfortunately the change I do to drive out this error has no point. I tried restarting Couch DB and also checked the permissions for Couchdb .couch files. Any help will be appreciated.
Update: This is the view content I used to fetch the dataset from the DB
function(doc) {
if (doc.documentType == 'com.acme.Order' && (doc.lock != null)) {
emit(doc.documentObject.createdTime, doc);
}
}

Ok, let me take a wild guess. Are your documents rather big objects? I think you might be running into the timeout of the view server performing the mapdoc. I'd try changing your view not to emit the document as a value and retry, like this:
-emit(doc.documentObject.createdTime, doc);
+emit(doc.documentObject.createdTime, null);
This will save you a lot of disk space for the view index file and you will make it faster way faster to create.
To get the document bodies add include_docs=true to your view query.

Related

Get-WinEvent output is truncated

I want to get the records from the Event file but it keeps truncating the messages.
Get-WinEvent -LogName 'Microsoft-AppV-Client/Admin' -MaxEvents 5
TimeCreated Id LevelDisplayName Message
10/21/2021 2:29:20 PM 19102 Error Getting server publishing data failed....
10/21/2021 2:29:20 PM 19203 Error HttpRequest sendRequest failed....
10/21/2021 2:29:05 PM 19102 Error Getting server publishing data failed....
10/21/2021 2:29:05 PM 19203 Error HttpRequest sendRequest failed....
10/21/2021 2:28:50 PM 19102 Error Getting server publishing data failed....

Getting `invalid_token` error for a seemingly valid `/messages` call

I am getting a strange invalid_token error when doing GET on https://outlook.office.com/api/v2.0/me/messages even though the access_token I supply is fresh and valid.
#<Faraday::Response:0x007f842a55d9c0 #on_complete_callbacks=[], #env=#<Faraday::Env #method=:get #body="" #url=#<URI::HTTPS https://outlook.office.com/api/v2.0/me/messages> #request=#<Faraday::RequestOptions (empty)> #request_headers={"User-Agent"=>"Faraday v0.9.2", "Authorization"=>"Bearer XXXXX_my_access_code_goes_hereXXXXX"} #ssl=#<Faraday::SSLOptions verify=true> #response=#<Faraday::Response:0x007f842a55d9c0 ...> #response_headers={"content-length"=>"0", "server"=>"Microsoft-IIS/8.5", "set-cookie"=>"exchangecookie=da79bef43acf4d1aa5f0bb00988f6629; expires=Sat, 09-Sep-2017 18:34:51 GMT; path=/; HttpOnly", "www-authenticate"=>"Bearer client_id=\"my_client_id\", trusted_issuers=\"00000001-0000-0000-c000-000000000000#*\", token_types=\"app_asserted_user_v1 service_asserted_app_v1\", authorization_uri=\"https://login.windows.net/common/oauth2/authorize\", error=\"invalid_token\",Basic Realm=\"\",Basic Realm=\"\",Basic Realm=\"\"", "request-id"=>"28ed7077-b92c-470a-b062-0f5f2a54d74a", "x-calculatedfetarget"=>"DM3PR12CU001.internal.outlook.com", "x-backendhttpstatus"=>"401, 401", "x-feproxyinfo"=>"DM3PR12CA0039.NAMPRD12.PROD.OUTLOOK.COM", "x-calculatedbetarget"=>"DM2PR12MB0315.namprd12.prod.outlook.com", "x-ms-diagnostics"=>"2000010;reason=\"ErrorCode: 'PP_E_RPS_CERT_NOT_FOUND'. Message: 'Certificate cannot be found. Certificate required for the operation cannot be found.%0d%0a Internal error: spRPSTicket->ProcessToken failed.'\";error_category=\"invalid_msa_ticket\"", "x-diaginfo"=>"DM2PR12MB0315", "x-beserver"=>"DM2PR12MB0315", "x-feserver"=>"DM3PR12CA0039, BY1PR13CA0015", "x-powered-by"=>"ASP.NET", "x-msedge-ref"=>"Ref A: A8A103D34AD84EC089A59EFDA0AF5385 Ref B: 740314A1C3A73205090D30CCB559AAA0 Ref C: Fri Sep 9 11:34:51 2016 PST", "date"=>"Fri, 09 Sep 2016 18:34:50 GMT", "connection"=>"close"} #status=401>>
I found the answer. Apparently, scope https://graph.microsoft.com/mail.read (that I was requesting) is not the same as https://outlook.office.com/mail.read. Once I switched over to using outlook.office.com in the scope, my API call worked too.

spatial view fails with a function_clause error

I wan't to use GeoCouch but it seems I can't get the setup right
setup:
Ubuntu 14.10
CouchDB 1.6.1, built from source
GeoCouch: I couldn't build it from the newvtree branch as indicated in the README, so I tried with the couchdb1.3.x branch as in this gist (+commentary) adapted to 1.6.1
the data comes from here and is passed in a db with this design doc:
{
"_id": "_design/geotest",
"_rev": "7-6e930896b441ace3dc1d46ff1dd4f09e",
"language": "javascript",
"spatial": {
"points": "function(doc){ if (doc.latitude && doc.longitude){emit([doc.latitude, doc.longitude], null)}}"
}
}
curl $DB/_design/geotest/_spatial/points?bbox=46,16,48.2,16.4 gives me an Empty reply from server while this is what appears in the logs:
[Mon, 21 Sep 2015 18:55:12 GMT] [info] [<0.32.0>] Apache CouchDB has started on http://0.0.0.0:5984/
[Mon, 21 Sep 2015 18:57:40 GMT] [info] [<0.304.0>] Opening index for db: geoexample idx: _design/geotest sig: "ad4c001590440653d6856cc41edf57d5"
[Mon, 21 Sep 2015 18:57:40 GMT] [info] [<0.308.0>] Starting index update for db: geoexample idx: _design/geotest
[Mon, 21 Sep 2015 18:57:41 GMT] [error] [emulator] Error in process <0.314.0> with exit value: {function_clause,[{couch_spatial_updater,process_result,[[[3.317463e+01,-1.173577e+02],null]],[{file,"src/geocouch/couch_spatial_updater.erl"},{line,286}]},{couch_spatial_updater,'-merge_results/3-lc$^1/1-1-',1,[{file,"src/geocouch/couch_sp...
[Mon, 21 Sep 2015 18:57:41 GMT] [error] [<0.120.0>] {error_report,<0.31.0>,
{<0.120.0>,crash_report,
[[{initial_call,
{mochiweb_acceptor,init,
['Argument__1','Argument__2','Argument__3']}},
{pid,<0.120.0>},
{registered_name,[]},
{error_info,
{error,badarg,
[{erlang,list_to_binary,
[[{couch_spatial_updater,process_result,
[[[33.174628,-117.357673],null]],
[{file,
"src/geocouch/couch_spatial_updater.erl"},
{line,286}]},
{couch_spatial_updater,
'-merge_results/3-lc$^1/1-1-',1,
[{file,
"src/geocouch/couch_spatial_updater.erl"},
{line,189}]},
{couch_spatial_updater,
'-merge_results/3-lc$^0/1-0-',1,
[{file,
"src/geocouch/couch_spatial_updater.erl"},
{line,189}]},
{couch_spatial_updater,merge_results,3,
[{file,
"src/geocouch/couch_spatial_updater.erl"},
{line,189}]},
{lists,foldl,3,[{file,"lists.erl"},{line,1261}]},
{couch_spatial_updater,merge_results,4,
[{file,
"src/geocouch/couch_spatial_updater.erl"},
{line,180}]},
{couch_spatial_updater,write_results,2,
[{file,
"src/geocouch/couch_spatial_updater.erl"},
{line,155}]}]],
[]},
{couch_httpd,error_info,1,
[{file,"couch_httpd.erl"},{line,818}]},
{couch_httpd,send_error,2,
[{file,"couch_httpd.erl"},{line,925}]},
{couch_httpd,handle_request_int,5,
[{file,"couch_httpd.erl"},{line,353}]},
{mochiweb_http,headers,5,
[{file,"mochiweb_http.erl"},{line,94}]},
{proc_lib,init_p_do_apply,3,
[{file,"proc_lib.erl"},{line,239}]}]}},
{ancestors,
[couch_httpd,couch_secondary_services,
couch_server_sup,<0.32.0>]},
{messages,[]},
{links,[<0.104.0>,#Port<0.2715>]},
{dictionary,
[{mochiweb_request_qs,[{"bbox","46,16,48.2,16.4"}]},
{couch_rewrite_count,0},
{mochiweb_request_cookie,[]}]},
{trap_exit,false},
{status,running},
{heap_size,6772},
{stack_size,27},
{reductions,6588}],
[]]}}
I tried variations on this setup, but the closest answer I found related to this kind of error invites to install a version of CouchDB that stopped being even available on Apache mirrors, while apparently some people did make GeoCouch work with CouchDB 1.6.1, at least on OS X, so I guess it is just that I'm doing something wrong, but what?!? Thank in advance for any clue
If you use the couchdb1.3.x branch, you need to emit a GeoJSON geometry. So in your case that would be:
emit({"type": "Point", "coordinates": [doc.longitude, doc.latitude]}, null);

Error during couchdb filtered replication with params

I'm trying to run a filtered replication on two different machines, I realized that this only happens when doing a pull replication, if I do a push replication it works fine.
curl -X POST http://localhost:5984/_replicate -d '{\"source\":\"http://MARTIN-NEWPC:5984/pdlib\",\"target\":\"pdlib\",\"filter\":\"replication/SINGLE_COLLECTION\",\"query_params\":{\"key\":\"bb579347-9bfb-4dda-84eb-622b43108872\"}}' -H "Content-Type: application/json"
The cryptic response I get from that request is:
{"error":"json_encode", "reason":"{bad_term, <0.20050.0>}"}
And the debug output in the target couchdb log file is:
[Mon, 17 Oct 2011 01:20:48 GMT] [debug] [<0.476.0>] 'GET' /pdlib/_changes?key=bb579347-9bfb-4dda-84eb-622b43108872&filter=replication/SINGLE_COLLECTION&style=all_docs&heartbeat=10000&since=0&feed=normal {1,
1}
Headers: [{'Accept',"application/json"},
{'Content-Length',"0"},
{'Host',"MARTIN-NEWPC:5984"},
{'User-Agent',"CouchDB/1.0.2"}]
[Mon, 17 Oct 2011 01:20:48 GMT] [debug] [<0.476.0>] OAuth Params: [{"key","bb579347-9bfb-4dda-84eb-622b43108872"},
{"filter","replication/SINGLE_COLLECTION"},
{"style","all_docs"},
{"heartbeat","10000"},
{"since","0"},
{"feed","normal"}]
[Mon, 17 Oct 2011 01:20:48 GMT] [info] [<0.476.0>] 192.168.2.3 - - 'GET' /pdlib/_changes?key=bb579347-9bfb-4dda-84eb-622b43108872&filter=replication/SINGLE_COLLECTION&style=all_docs&heartbeat=10000&since=0&feed=normal 200
[Mon, 17 Oct 2011 01:20:48 GMT] [error] [<0.476.0>] attempted upload of invalid JSON (set log_level to debug to log it)
[Mon, 17 Oct 2011 01:20:48 GMT] [debug] [<0.476.0>] Invalid JSON: <<"bb579347-9bfb-4dda-84eb-622b43108872">>
[Mon, 17 Oct 2011 01:20:48 GMT] [info] [<0.476.0>] 192.168.2.3 - - 'GET' /pdlib/_changes?key=bb579347-9bfb-4dda-84eb-622b43108872&filter=replication/SINGLE_COLLECTION&style=all_docs&heartbeat=10000&since=0&feed=normal 400
[Mon, 17 Oct 2011 01:20:48 GMT] [debug] [<0.476.0>] httpd 400 error response:
{"error":"bad_request","reason":"invalid UTF-8 JSON"}
In case you need to know, this is the filter function:
function (doc, req) {
if (doc.type == 'collection' || doc.type == 'document') {
for (var i in doc.path) {
if (doc.path[i] == req.query.key) {
return true;
}
}
}
return false;
}
Any ideas about the possible cause?
It's common to get a 400 "invalid UTF-8 JSON" error when CouchDB tries to interpret one of your query values as JSON when it's a raw (unquoted) string instead. In this case the replication config results in this HTTP request:
GET /pdlib/_changes?key=bb579347-9bfb-4dda-84eb-622b43108872&filter=replication/SINGLE_COLLECTION&style=all_docs&heartbeat=10000&since=0&feed=normal 400
The _changes feed itself doesn't use a key parameter, but normal CouchDB _view queries do — and there expect it to be a JSON value! — so you might try renaming that query_param to something different.
(Somewhat unfortunately, user-defined filter (and list, etc.) functions share the query parameter namespace with CouchDB itself...you may want to prefix your custom parameters with something that's unlikely to conflict with current or future builtin options, e.g. myapp_key.)
Looks to me like there is something wrong with the way you have your JSON escaped. This works for me:
curl -X POST http://localhost:5984/_replicate -d '{"source":"source_db","target":"target_db","filter":"ddoc/filter-name","query_params":{"key":"some_key"}}' -H "Content-Type: application/json"

node.js express cluster and high CPU usage

My node.js app uses express, socket.io and talks to mongodb through mongoose. All these are working fine with low cpu usage.
When I made the app run with cluster, it works fine, but the CPU usage really goes very high. Here is what i am doing.
var settings = require("./settings"),
cluster = require('cluster');
cluster('./server')
.use(cluster.logger('logs'))
.use(cluster.stats())
.use(cluster.pidfiles('pids'))
.use(cluster.cli())
.use(cluster.repl(8888))
.listen(7777);
When I check the master.log, I see
[Fri, 21 Oct 2011 02:59:51 GMT] INFO master started
[Fri, 21 Oct 2011 02:59:53 GMT] ERROR worker 0 died
[Fri, 21 Oct 2011 02:59:53 GMT] INFO spawned worker 0
[Fri, 21 Oct 2011 02:59:54 GMT] ERROR worker 0 died
[Fri, 21 Oct 2011 02:59:54 GMT] INFO spawned worker 0
[Fri, 21 Oct 2011 02:59:56 GMT] ERROR worker 0 died
[Fri, 21 Oct 2011 02:59:56 GMT] INFO spawned worker 0
.....
[Fri, 21 Oct 2011 03:11:08 GMT] INFO spawned worker 0
[Fri, 21 Oct 2011 03:11:10 GMT] WARNING shutting down master
[Fri, 21 Oct 2011 03:12:07 GMT] INFO spawned worker 0
[Fri, 21 Oct 2011 03:12:07 GMT] INFO spawned worker 1
[Fri, 21 Oct 2011 03:12:07 GMT] INFO master started
[Fri, 21 Oct 2011 03:12:09 GMT] ERROR worker 1 died
[Fri, 21 Oct 2011 03:12:09 GMT] INFO spawned worker 1
[Fri, 21 Oct 2011 03:12:10 GMT] ERROR worker 1 died
[Fri, 21 Oct 2011 03:12:10 GMT] INFO spawned worker 1
In workers.access.log, I see all console messages, socket.io logs etc...
In workers.error.log, I see the following error messages, looks like something wrong...
node.js:134
throw e; // process.nextTick error, or 'error' event on first tick
^
Error: EADDRINUSE, Address already in use
at HTTPServer._doListen (net.js:1106:5)
at net.js:1077:14
at Object.lookup (dns.js:153:45)
at HTTPServer.listen (net.js:1071:20)
at Object.<anonymous> (/cygdrive/c/HTML5/RENT/test/server/server.js:703:5)
at Module._compile (module.js:402:26)
at Object..js (module.js:408:10)
at Module.load (module.js:334:31)
at Function._load (module.js:293:12)
at require (module.js:346:19)
server.js:703 - points to app.listen(9999);
EDIT: server.js code
var express = require("express"),
fs = require("fs"),
form = require('connect-form'),
app = module.exports = express.createServer(
form({ keepExtensions: true })
),
sys = require("sys"),
RentModel = require("./rent_schema"),
UserModel = require("./track_schema"),
email = require("./email_connect"),
SubscriptionModel = require("./subscription_schema"),
io = require("socket.io"),
fb = require('facebook-js'),
Twitter = require('./Twitter_Analysis'),
Foursquare = require('./Foursquare_Analysis'),
YQL = require("yql"),
settings = require("./settings");
//
var cluster = require('cluster');
cluster(app)
.use(cluster.logger('logs'))
.use(cluster.stats())
.use(cluster.pidfiles('pids'))
.use(cluster.cli())
.use(cluster.debug())
.use(cluster.repl(settings.ADMIN_PORT))
.listen(settings.PORT);
socket = io.listen(app);
.....
.....
//app.listen(settings.PORT);
It looks like you're trying to bind your workers with the same port, that is crashing the workers, but cluster is restarting the workers. So you're in an infinite death cycle.
I'm not sure if you need the app.listen(9999) in your server.js file, which is probably trying to bind port 9999 in all your workers. See the examples in the cluster package for a good example: https://github.com/LearnBoost/cluster/blob/master/examples/express.js

Resources