Ive been trying to receive json data with reqwest and serde but I keep getting the error:
Error: reqwest::Error { kind: Decode, source: Error("expected value", line: 1, column: 1) }
This is my code so far:
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let url: String = String::from("https://api.slothpixel.me/api/players/leastrio");
let echo_json: serde_json::Value = reqwest::Client::new()
.get(url)
.send()
.await?
.json()
.await?;
println!("{:#?}", echo_json);
Ok(())
}
reqwest = { version = "0.11", features = ["json"] }
tokio = { version = "1", features = ["full"] }
serde_json = "1"
So I've trie a few things, and it seems you need to add a user agent for it to work. No idea why the documentation doesn't mention it. And I guess reqwest doesn't provied one by default.
reqwest::Client::new()
.get(url)
.header("User-Agent", "Reqwest Rust Test")
.send()
.await?
.json()
.await?;
I used this and it worked!
It should work a quick test with:
tracing = "0.1"
tracing-subscriber = "0.2"
Adding to main:
let subscriber = tracing_subscriber::FmtSubscriber::builder()
.with_max_level(tracing::Level::TRACE)
.finish();
tracing::subscriber::set_global_default(subscriber)
.expect("setting default subscriber failed");
dbg!(reqwest::Client::new().get(&url).send().await?.text().await);
RUST_LOG=trace cargo run
Jul 13 09:45:59.232 TRACE hyper::client::pool: checkout waiting for idle connection: ("https", api.slothpixel.me)
Jul 13 09:45:59.234 TRACE hyper::client::connect::http: Http::connect; scheme=Some("https"), host=Some("api.slothpixel.me"), port=None
Jul 13 09:45:59.234 DEBUG hyper::client::connect::dns: resolving host="api.slothpixel.me"
Jul 13 09:45:59.277 DEBUG hyper::client::connect::http: connecting to [2606:4700:3036::6815:5b3]:443
Jul 13 09:45:59.301 DEBUG hyper::client::connect::http: connected to [2606:4700:3036::6815:5b3]:443
Jul 13 09:45:59.352 TRACE hyper::client::conn: client handshake Http1
Jul 13 09:45:59.353 TRACE hyper::client::client: handshake complete, spawning background dispatcher task
Jul 13 09:45:59.353 TRACE hyper::proto::h1::conn: flushed({role=client}): State { reading: Init, writing: Init, keep_alive: Busy }
Jul 13 09:45:59.353 TRACE hyper::client::pool: checkout dropped for ("https", api.slothpixel.me)
Jul 13 09:45:59.354 TRACE encode_headers: hyper::proto::h1::role: Client::encode method=GET, body=None
Jul 13 09:45:59.355 DEBUG hyper::proto::h1::io: flushed 76 bytes
Jul 13 09:45:59.355 TRACE hyper::proto::h1::conn: flushed({role=client}): State { reading: Init, writing: KeepAlive, keep_alive: Busy }
Jul 13 09:45:59.376 TRACE hyper::proto::h1::conn: Conn::read_head
Jul 13 09:45:59.377 TRACE parse_headers: hyper::proto::h1::role: Response.parse([Header; 100], [u8; 953])
Jul 13 09:45:59.377 TRACE parse_headers: hyper::proto::h1::role: Response.parse Complete(937)
Jul 13 09:45:59.378 DEBUG hyper::proto::h1::io: parsed 14 headers
Jul 13 09:45:59.378 DEBUG hyper::proto::h1::conn: incoming body is content-length (16 bytes)
Jul 13 09:45:59.378 TRACE hyper::proto::h1::decode: decode; state=Length(16)
Jul 13 09:45:59.379 DEBUG hyper::proto::h1::conn: incoming body completed
Jul 13 09:45:59.379 TRACE hyper::proto::h1::conn: maybe_notify; read_from_io blocked
Jul 13 09:45:59.379 TRACE hyper::proto::h1::conn: flushed({role=client}): State { reading: Init, writing: Init, keep_alive: Idle }
Jul 13 09:45:59.379 TRACE hyper::proto::h1::conn: flushed({role=client}): State { reading: Init, writing: Init, keep_alive: Idle }
Jul 13 09:45:59.380 TRACE hyper::client::pool: put; add idle connection for ("https", api.slothpixel.me)
Jul 13 09:45:59.380 DEBUG hyper::client::pool: pooling idle connection for ("https", api.slothpixel.me)
[Jul 13 09:45:59.380 TRACE hyper::proto::h1::conn: flushed({role=client}): State { reading: Init, writing: Init, keep_alive: Idle }
src\main.rs:12] reqwest::Client::new().get(&url).send().await?.text().await = Ok(
"error code: 1020",
)
Jul 13 09:45:59.381 TRACE hyper::proto::h1::dispatch: client tx closed
Jul 13 09:45:59.381 TRACE hyper::client::pool: pool closed, canceling idle interval
Jul 13 09:45:59.382 TRACE hyper::client::pool: checkout waiting for idle connection: ("https", api.slothpixel.me)
Jul 13 09:45:59.382 TRACE hyper::proto::h1::conn: State::close_read()
Jul 13 09:45:59.382 TRACE hyper::client::connect::http: Http::connect; scheme=Some("https"), host=Some("api.slothpixel.me"), port=None
Jul 13 09:45:59.382 TRACE hyper::proto::h1::conn: State::close_write()
Jul 13 09:45:59.382 DEBUG hyper::client::connect::dns: resolving host="api.slothpixel.me"
Jul 13 09:45:59.383 TRACE hyper::proto::h1::conn: flushed({role=client}): State { reading: Closed, writing: Closed, keep_alive: Disabled }
Jul 13 09:45:59.383 DEBUG hyper::client::connect::http: connecting to [2606:4700:3036::6815:5b3]:443
Jul 13 09:45:59.383 TRACE hyper::proto::h1::conn: shut down IO complete
Jul 13 09:45:59.396 DEBUG hyper::client::connect::http: connected to [2606:4700:3036::6815:5b3]:443
Jul 13 09:45:59.428 TRACE hyper::client::conn: client handshake Http1
Jul 13 09:45:59.428 TRACE hyper::client::client: handshake complete, spawning background dispatcher task
Jul 13 09:45:59.429 TRACE hyper::proto::h1::conn: flushed({role=client}): State { reading: Init, writing: Init, keep_alive: Busy }
Jul 13 09:45:59.429 TRACE hyper::client::pool: checkout dropped for ("https", api.slothpixel.me)
Jul 13 09:45:59.430 TRACE encode_headers: hyper::proto::h1::role: Client::encode method=GET, body=None
Jul 13 09:45:59.430 DEBUG hyper::proto::h1::io: flushed 76 bytes
Jul 13 09:45:59.430 TRACE hyper::proto::h1::conn: flushed({role=client}): State { reading: Init, writing: KeepAlive, keep_alive: Busy }
Jul 13 09:45:59.451 TRACE hyper::proto::h1::conn: Conn::read_head
Jul 13 09:45:59.451 TRACE parse_headers: hyper::proto::h1::role: Response.parse([Header; 100], [u8; 953])
Jul 13 09:45:59.452 TRACE parse_headers: hyper::proto::h1::role: Response.parse Complete(937)
Jul 13 09:45:59.452 DEBUG hyper::proto::h1::io: parsed 14 headers
Jul 13 09:45:59.452 DEBUG hyper::proto::h1::conn: incoming body is content-length (16 bytes)
Jul 13 09:45:59.453 TRACE hyper::proto::h1::decode: decode; state=Length(16)
Jul 13 09:45:59.453 DEBUG hyper::proto::h1::conn: incoming body completed
Jul 13 09:45:59.453 TRACE hyper::proto::h1::conn: maybe_notify; read_from_io blocked
Jul 13 09:45:59.453 TRACE hyper::proto::h1::conn: flushed({role=client}): State { reading: Init, writing: Init, keep_alive: Idle }
Jul 13 09:45:59.454 TRACE hyper::proto::h1::conn: flushed({role=client}): State { reading: Init, writing: Init, keep_alive: Idle }
Jul 13 09:45:59.454 TRACE hyper::client::pool: put; add idle connection for ("https", api.slothpixel.me)
Jul 13 09:45:59.454 DEBUG hyper::client::pool: pooling idle connection for ("https", api.slothpixel.me)
Jul 13 09:45:59.454 TRACE hyper::proto::h1::conn: flushed({role=client}): State { reading: Init, writing: Init, keep_alive: Idle }
Jul 13 09:45:59.454 TRACE hyper::client::pool: pool closed, canceling idle interval
Jul 13 09:45:59.454 TRACE hyper::proto::h1::dispatch: client tx closed
Jul 13 09:45:59.455 TRACE hyper::proto::h1::conn: State::close_read()
Jul 13 09:45:59.455 TRACE hyper::proto::h1::conn: State::close_write()
Jul 13 09:45:59.455 TRACE hyper::proto::h1::conn: flushed({role=client}): State { reading: Closed, writing: Closed, keep_alive: Disabled }
Jul 13 09:45:59.456 TRACE hyper::proto::h1::conn: shut down IO complete
Error: reqwest::Error { kind: Decode, source: Error("expected value", line: 1, column: 1) }
error code: 1020 not even a json object contrary to https://api.slothpixel.me/api/players/ that return a json object "error", I suggest a bug report this to https://github.com/slothpixel/core or where it's adequate cause this error is odd.
Related
I am writing an async lambda on AWS using Node.js (14.x). This lambda is being called by an API Gateway (REST API, POST method, CORS enabled).
To allow the API Gateway to call an async lambda, I added the following HTTP Header to its Integration Request: Name = X-Amz-Invocation-Type and Mapped from (value) = 'Event' as specified here.
When I run Test, I get the following output:
Execution log for request BLAH
Fri Mar 26 07:30:34 UTC 2021 : Starting execution for request: BLAH
Fri Mar 26 07:30:34 UTC 2021 : HTTP Method: POST, Resource Path: BLAH
Fri Mar 26 07:30:34 UTC 2021 : Method request path: {}
Fri Mar 26 07:30:34 UTC 2021 : Method request query string: {}
Fri Mar 26 07:30:34 UTC 2021 : Method request headers: {}
Fri Mar 26 07:30:34 UTC 2021 : Method request body before transformations: {
"id": "BLAH",
"recaptcha": "BLAH"
}
Fri Mar 26 07:30:34 UTC 2021 : Request validation succeeded for content type application/json
Fri Mar 26 07:30:34 UTC 2021 : Endpoint request URI: https://lambda.BLAH.amazonaws.com/2015-03-31/functions/arn:aws:lambda:BLAH:function:BLAH/invocations
Fri Mar 26 07:30:34 UTC 2021 : Endpoint request headers: {X-Amz-Date=BLAH, x-amzn-apigateway-api-id=BLAH, Accept=application/json, User-Agent=AmazonAPIGateway_BLAH, Host=lambda.BLAH.amazonaws.com, X-Amz-Content-Sha256=BLAH, X-Amzn-Trace-Id=Root=BLAH, x-amzn-lambda-integration-tag=BLAH, Authorization=*****BLAH*****, X-Amz-Source-Arn=arn:aws:execute-api:BLAH/test-invoke-stage/POST/BLAH, X-Amz-Invocation-Type=Event, X-Amz-Security-Token=BLAH [TRUNCATED]
Fri Mar 26 07:30:34 UTC 2021 : Endpoint request body after transformations: {
"id": "BLAH",
"recaptcha": "BLAH"
}
Fri Mar 26 07:30:34 UTC 2021 : Sending request to https://lambda.BLAH.amazonaws.com/2015-03-31/functions/arn:aws:lambda:BLAH:function:BLAH/invocations
Fri Mar 26 07:30:34 UTC 2021 : Received response. Status: 202, Integration latency: 34 ms
Fri Mar 26 07:30:34 UTC 2021 : Endpoint response headers: {Date=Fri, 26 Mar 2021 07:30:34 GMT, Content-Length=0, Connection=keep-alive, x-amzn-RequestId=BLAH, x-amzn-Remapped-Content-Length=0, X-Amzn-Trace-Id=root=BLAH;sampled=0}
Fri Mar 26 07:30:34 UTC 2021 : Endpoint response body before transformations:
Fri Mar 26 07:30:34 UTC 2021 : Method response body after transformations:
Fri Mar 26 07:30:34 UTC 2021 : Method response headers: {X-Amzn-Trace-Id=Root=BLAH;Sampled=0, Access-Control-Allow-Origin=*, Content-Type=application/json}
Fri Mar 26 07:30:34 UTC 2021 : Successfully completed execution
Note that the response gives Status: 202 and that there is no response body. No matter what code I put in my lambda, this seems to be the case. Here are a couple basic examples that I've tried:
// Load the AWS SDK for Node.js and set the region.
var AWS = require('aws-sdk');
AWS.config.update({region: 'BLAH'});
// Main function, responds to AWS API Gateway.
exports.handler = async function (event, context) {
var response = {
httpStatus : 200,
message : "Success"
};
return JSON.stringify(response);
};
and
// Load the AWS SDK for Node.js and set the region.
var AWS = require('aws-sdk');
AWS.config.update({region: 'BLAH'});
// Main function, responds to AWS API Gateway.
exports.handler = async function (event, context) {
var promise = new Promise(function(resolve, reject) {
var response = {
httpStatus : 200,
message : "Success"
};
resolve(JSON.stringify(response));
});
return promise;
};
I made sure to deploy the API and Lambda. How can I get a non-202 status (i.e. get the API Gateway to wait on the async lambda)?
This is working as intended. The second sentence in AWS docs on "Asynchronous invocation" says:
When you invoke a function asynchronously, you don't wait for a response from the function code.
If you want a response with a result, asynchronous invocation will not work.
I am trying to add SOGo to an already working server with Postfix + Dovecot.
The server is Centos 7, 2 core with 3Gb RAM with less than 10 users. Sogo installed from official repo: sogo-2.3.8-1.el7.centos.x86_64
/etc/sysconfig/sogo is set up for 10 workers: PREFORK=10"
Sogo is configured with 10 workers with this configuration:
WOListenQueueSize=10;
WOWatchDogRequestTimeout=60;
SOGoMaximumPingInterval = 354;
SOGoMaximumSyncInterval = 354;
SOGoInternalSyncInterval = 15;
SOGoMaximumSyncWindowSize = 50;
SOGoMaximumSyncResponseSize = 2048;
The problem seems to be whith activesync clients (ms outlook). SOGo processes starts eating all the RAM and sometimes hangs (process can't be killed with signal 15). The problem seems to be related to The log file reports:
Feb 19 13:30:26 sogod [13164]: Sleeping 15 seconds while detecting changes in Ping...
Feb 19 13:30:26 sogod [13163]: Sleeping 15 seconds while detecting changes in Ping...
Feb 19 13:30:26 sogod [13150]: [ERROR] No child available to handle incoming request!
Feb 19 13:30:26 sogod [13155]: Sleeping 15 seconds while detecting changes in Ping...
Feb 19 13:30:27 sogod [13152]: Sleeping 15 seconds while detecting changes in Ping...
Feb 19 13:30:27 sogod [13150]: [WARN] pid 13168 has been hanging in the same request for 3 minutes
Feb 19 13:30:28 sogod [13150]: [ERROR] No child available to handle incoming request!
Feb 19 13:30:28 sogod [13150]: [WARN] pid 13164 has been hanging in the same request for 3 minutes
Feb 19 13:30:29 sogod [13150]: [ERROR] No child available to handle incoming request!
Feb 19 13:30:29 sogod [13150]: [WARN] pid 13163 has been hanging in the same request for 2 minutes
Feb 19 13:30:30 sogod [13168]: Sleeping 15 seconds while detecting changes in Ping...
Feb 19 13:30:30 sogod [13150]: [WARN] pid 13151 has been hanging in the same request for 1 minutes
Feb 19 13:35:03 sogod [13150]: [WARN] pid 13153 has been hanging in the same request for 5 minutes
Feb 19 13:35:04 sogod [13150]: [ERROR] No child available to handle incoming request!
Feb 19 13:35:06 sogod [13150]: [ERROR] No child available to handle incoming request!
Feb 19 13:35:07 sogod [13153]: Sleeping 15 seconds while detecting changes in Ping...
Feb 19 13:35:07 sogod [13150]: [ERROR] No child available to handle incoming request!
Feb 19 13:35:08 sogod [13164]: Sleeping 15 seconds while detecting changes in Ping...
I used gdb to get a trace of one of the hanging processes. The response is this:
#0 0x00007f176ddcc49d in nanosleep () from /lib64/libc.so.6
#1 0x00007f176ddcc334 in sleep () from /lib64/libc.so.6
#2 0x00007f17608e8a99 in -[SOGoActiveSyncDispatcher processPing:inResponse:] () from /usr/lib64/GNUstep/SOGo/ActiveSync.SOGo/./ActiveSync
#3 0x00007f17608eee4b in -[SOGoActiveSyncDispatcher dispatchRequest:inResponse:context:] () from /usr/lib64/GNUstep/SOGo/ActiveSync.SOGo/./ActiveSync
#4 0x00007f1760d50d84 in -[SOGoMicrosoftActiveSyncActions microsoftServerActiveSyncAction] () from /usr/lib64/GNUstep/SOGo/MainUI.SOGo/./MainUI
#5 0x00007f1773e61113 in -[WODirectAction performActionNamed:] () from /lib64/libNGObjWeb.so.4.9
#6 0x00007f1773ee3834 in -[SoActionInvocation callOnObject:withPositionalParametersWhenNotNil:inContext:] () from /lib64/libNGObjWeb.so.4.9
#7 0x00007f1773edee98 in -[SoObjectMethodDispatcher dispatchInContext:] () from /lib64/libNGObjWeb.so.4.9
#8 0x00007f1773ee0f09 in -[SoObjectRequestHandler handleRequest:inContext:session:application:] () from /lib64/libNGObjWeb.so.4.9
#9 0x00007f1773e72753 in -[WORequestHandler handleRequest:] () from /lib64/libNGObjWeb.so.4.9
#10 0x00007f1773e3433c in -[WOCoreApplication dispatchRequest:usingHandler:] () from /lib64/libNGObjWeb.so.4.9
#11 0x00007f1773e3463f in -[WOCoreApplication dispatchRequest:] () from /lib64/libNGObjWeb.so.4.9
#12 0x00007f17751fbb4d in -[SOGo dispatchRequest:] ()
#13 0x00007f1773ed1a85 in -[WOHttpTransaction _run] () from /lib64/libNGObjWeb.so.4.9
#14 0x00007f1773ed1de5 in -[WOHttpTransaction run] () from /lib64/libNGObjWeb.so.4.9
#15 0x00007f1773ecd9e4 in -[WOHttpAdaptor runConnection:] () from /lib64/libNGObjWeb.so.4.9
#16 0x00007f1773ecdc02 in -[WOHttpAdaptor _handleAcceptedConnection:] () from /lib64/libNGObjWeb.so.4.9
#17 0x00007f1773ecdff7 in -[WOHttpAdaptor _handleConnection:] () from /lib64/libNGObjWeb.so.4.9
#18 0x00007f1773ece2c3 in -[WOHttpAdaptor acceptControlMessage:] () from /lib64/libNGObjWeb.so.4.9
#19 0x00007f177261613f in -[NSNotificationCenter _postAndRelease:] () from /lib64/libgnustep-base.so.1.24
#20 0x00007f17732a0e3d in -[NSObject(FileObjectWatcher) receivedEvent:type:extra:forMode:] () from /lib64/libNGExtensions.so.4.9
#21 0x00007f177271ceea in -[GSRunLoopCtxt pollUntil:within:] () from /lib64/libgnustep-base.so.1.24
#22 0x00007f177265d870 in -[NSRunLoop acceptInputForMode:beforeDate:] () from /lib64/libgnustep-base.so.1.24
#23 0x00007f177265dd22 in -[NSRunLoop runMode:beforeDate:] () from /lib64/libgnustep-base.so.1.24
#24 0x00007f1773e33b94 in -[WOCoreApplication run] () from /lib64/libNGObjWeb.so.4.9
#25 0x00007f17751fb1fe in -[SOGo run] ()
#26 0x00007f1773e7bc5e in -[WOWatchDog _runChildWithControlSocket:] () from /lib64/libNGObjWeb.so.4.9
#27 0x00007f1773e7c0f1 in -[WOWatchDog _spawnChild:] () from /lib64/libNGObjWeb.so.4.9
#28 0x00007f1773e7c7d9 in -[WOWatchDog _ensureChildren] () from /lib64/libNGObjWeb.so.4.9
#29 0x00007f1773e7d7f6 in -[WOWatchDog run:argc:argv:] () from /lib64/libNGObjWeb.so.4.9
#30 0x00007f1773e7df21 in WOWatchDogApplicationMain () from /lib64/libNGObjWeb.so.4.9
#31 0x00007f17751fa491 in main ()
Any help please?
vim /etc/sysconfig/sogo
PREFORK=10
USER=sogo
vim /etc/rc.d/init.d/sogod
PREFORK=10
I am working on an application which requires AWS (Amazon Web Services) push notification services.
I am running this on a node server, when I run the curl http request I get this error:
Registering user with deviceId: nodeserver
{ [Error: Missing credentials in config]
message: 'Missing credentials in config',
code: 'SigningError',
time: Tue Nov 17 2015 13:52:20 GMT+0000 (GMT),
originalError:
{ message: 'Could not load credentials from any providers',
code: 'CredentialsError',
time: Tue Nov 17 2015 13:52:20 GMT+0000 (GMT),
originalError:
{ message: 'Connection timed out after 1000ms',
code: 'TimeoutError',
time: Tue Nov 17 2015 13:52:20 GMT+0000 (GMT) } } }
Does anybody know what this error is or if anybody has had this type of error before and knows how to go about fixing it?
I am beginner in MongoDB and I have a problem with the execution of this in the server.
My project is hosted in servers of hostmonster.com but they don't give me support for MongoDB data bases, although they say that I can install it under my own responsability.
Then, I installed MongoDB 2.4.1 without problems into Linux 64, after, in the MongoDB bin folder (with: mongo, mongod, mongodump ... ) I created a folder called 'data' and 'data/db' for doing some tests.
from console, I connect to the server across the SSH protocol and I run
./mongod --dbpath 'data/db'
and it works.
But, I need that it run automatically forever.
I followed the steps of Mongodb can't start and run the next line:
./mongod --fork --dbpath 'data/db' --smallfiles --logpath 'data/mongodb.log' --logappend
It also worked, It started the process and I closed the console, this process continued running and I could view my data across my domain.
The problem is that the process takes a day to close, ie, I can't see my data across domain, then, I need run mongod again. with:
./mongod --fork --dbpath 'data/db' --smallfiles --logpath 'data/mongodb.log' --logappend
I don't want do it everyday, my question is:
What may be the problem?, why the mongod process dies each day?
how can I run the process forever?
Sorry for my English.
Edit: Add the last error log. I don't understand it.
Fri Apr 12 03:19:34.577 [TTLMonitor] query local.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:0 keyUpdates:0 locks(micros) r:141663 nreturned:0 reslen:20 141ms
Fri Apr 12 03:19:34.789 [TTLMonitor] query users.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:3 keyUpdates:0 locks(micros) r:211595 nreturned:0 reslen:20 211ms
Fri Apr 12 03:20:57.869 [PeriodicTask::Runner] task: DBConnectionPool-cleaner took: 18215ms
Fri Apr 12 03:20:57.931 [PeriodicTask::Runner] task: WriteBackManager::cleaner took: 8ms
Fri Apr 12 03:22:14.155 [PeriodicTask::Runner] task: DBConnectionPool-cleaner took: 32ms
Fri Apr 12 03:22:14.215 [PeriodicTask::Runner] task: WriteBackManager::cleaner took: 14ms
Fri Apr 12 03:22:30.670 [TTLMonitor] query actarium.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:2 keyUpdates:0 locks(micros) r:430204 nreturned:0 reslen:20 430ms
Fri Apr 12 03:23:14.825 [PeriodicTask::Runner] task: DBConnectionPool-cleaner took: 7ms
Fri Apr 12 03:23:31.133 [TTLMonitor] query actarium.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:2 keyUpdates:0 locks(micros) r:179175 nreturned:0 reslen:20 168ms
Fri Apr 12 03:25:19.201 [PeriodicTask::Runner] task: WriteBackManager::cleaner took: 505ms
Fri Apr 12 03:25:23.370 [TTLMonitor] query local.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:0 keyUpdates:0 locks(micros) r:3604735 nreturned:0 reslen:20 3604ms
Fri Apr 12 03:25:25.294 [TTLMonitor] query users.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:3 keyUpdates:0 numYields: 1 locks(micros) r:3479328 nreturned:0 reslen:20 1882ms
Fri Apr 12 03:26:26.647 [TTLMonitor] query actarium.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:2 keyUpdates:0 numYields: 1 locks(micros) r:1764712 nreturned:0 reslen:20 1044ms
Fri Apr 12 04:09:27.804 [TTLMonitor] query actarium.system.indexes query: { expireAfterSeconds: { $exists: true } } ntoreturn:0 ntoskip:0 nscanned:2 keyUpdates:0 locks(micros) r:200919 nreturned:0 reslen:20 200ms
Fri Apr 12 04:43:54.002 got signal 15 (Terminated), will terminate after current cmd ends
Fri Apr 12 04:43:54.151 [interruptThread] now exiting
Fri Apr 12 04:43:54.151 dbexit:
Fri Apr 12 04:43:54.157 [interruptThread] shutdown: going to close listening sockets...
Fri Apr 12 04:43:54.160 [interruptThread] closing listening socket: 9
Fri Apr 12 04:43:54.160 [interruptThread] closing listening socket: 10
Fri Apr 12 04:43:54.160 [interruptThread] closing listening socket: 11
Fri Apr 12 04:43:54.160 [interruptThread] removing socket file: /tmp/mongodb-27017.sock
Fri Apr 12 04:43:54.160 [interruptThread] shutdown: going to flush diaglog...
Fri Apr 12 04:43:54.160 [interruptThread] shutdown: going to close sockets...
Fri Apr 12 04:43:54.176 [interruptThread] shutdown: waiting for fs preallocator...
Fri Apr 12 04:43:54.176 [interruptThread] shutdown: lock for final commit...
Fri Apr 12 04:43:54.176 [interruptThread] shutdown: final commit...
Fri Apr 12 04:43:54.176 [interruptThread] shutdown: closing all files...
Fri Apr 12 04:43:54.212 [interruptThread] closeAllFiles() finished
Fri Apr 12 04:43:54.220 [interruptThread] journalCleanup...
Fri Apr 12 04:43:54.246 [interruptThread] removeJournalFiles
Fri Apr 12 04:43:54.280 [interruptThread] error removing journal files
boost::filesystem::directory_iterator::construct: No such file or directory: "/home2/anuncio3/bin/mongodb-linux-x86_64-2.4.1/bin/data/db/journal"
Fri Apr 12 04:43:54.280 [interruptThread] error couldn't remove journal file during shutdown boost::filesystem::directory_iterator::construct: No such file or directory: "/home2/anuncio3/bin/mongodb-linux-x86_64-2.4.1/bin/data/db/journal"
Fri Apr 12 04:43:54.285 shutdown failed with exception
Fri Apr 12 04:43:54.285 dbexit: really exiting now
Your answer is here:
Fri Apr 12 04:43:54.002 got signal 15 (Terminated), will terminate after current cmd ends
Fri Apr 12 04:43:54.151 [interruptThread] now exiting
Your process is receiving signal 15, which is the default kill signal. It's possible that their systems are automatically killing long-running processes or something similar. If that is indeed what's happening, then your host would have to resolve that.
Additionally, these errors:
Fri Apr 12 04:43:54.280 [interruptThread] error removing journal files
boost::filesystem::directory_iterator::construct: No such file or directory: "/home2/anuncio3/bin/mongodb-linux-x86_64-2.4.1/bin/data/db/journal"
Fri Apr 12 04:43:54.280 [interruptThread] error couldn't remove journal file during shutdown boost::filesystem::directory_iterator::construct: No such file or directory: "/home2/anuncio3/bin/mongodb-linux-x86_64-2.4.1/bin/data/db/journal"
indicate that something is wrong with your install's data directory. The journal files either don't exist, or are going missing; if some process on the system is trying to clean things up, then it wouldn't surprise me if something is nuking your journal files.
I know this is old question but my experience might be helpful for other reviewers.
Based on my tests, They only let you run a program for 5 minutes (sometimes more than this) before killing it, so it’s fairly useless to install MongoDB unless you have a dedicated IP.
My node.js app uses express, socket.io and talks to mongodb through mongoose. All these are working fine with low cpu usage.
When I made the app run with cluster, it works fine, but the CPU usage really goes very high. Here is what i am doing.
var settings = require("./settings"),
cluster = require('cluster');
cluster('./server')
.use(cluster.logger('logs'))
.use(cluster.stats())
.use(cluster.pidfiles('pids'))
.use(cluster.cli())
.use(cluster.repl(8888))
.listen(7777);
When I check the master.log, I see
[Fri, 21 Oct 2011 02:59:51 GMT] INFO master started
[Fri, 21 Oct 2011 02:59:53 GMT] ERROR worker 0 died
[Fri, 21 Oct 2011 02:59:53 GMT] INFO spawned worker 0
[Fri, 21 Oct 2011 02:59:54 GMT] ERROR worker 0 died
[Fri, 21 Oct 2011 02:59:54 GMT] INFO spawned worker 0
[Fri, 21 Oct 2011 02:59:56 GMT] ERROR worker 0 died
[Fri, 21 Oct 2011 02:59:56 GMT] INFO spawned worker 0
.....
[Fri, 21 Oct 2011 03:11:08 GMT] INFO spawned worker 0
[Fri, 21 Oct 2011 03:11:10 GMT] WARNING shutting down master
[Fri, 21 Oct 2011 03:12:07 GMT] INFO spawned worker 0
[Fri, 21 Oct 2011 03:12:07 GMT] INFO spawned worker 1
[Fri, 21 Oct 2011 03:12:07 GMT] INFO master started
[Fri, 21 Oct 2011 03:12:09 GMT] ERROR worker 1 died
[Fri, 21 Oct 2011 03:12:09 GMT] INFO spawned worker 1
[Fri, 21 Oct 2011 03:12:10 GMT] ERROR worker 1 died
[Fri, 21 Oct 2011 03:12:10 GMT] INFO spawned worker 1
In workers.access.log, I see all console messages, socket.io logs etc...
In workers.error.log, I see the following error messages, looks like something wrong...
node.js:134
throw e; // process.nextTick error, or 'error' event on first tick
^
Error: EADDRINUSE, Address already in use
at HTTPServer._doListen (net.js:1106:5)
at net.js:1077:14
at Object.lookup (dns.js:153:45)
at HTTPServer.listen (net.js:1071:20)
at Object.<anonymous> (/cygdrive/c/HTML5/RENT/test/server/server.js:703:5)
at Module._compile (module.js:402:26)
at Object..js (module.js:408:10)
at Module.load (module.js:334:31)
at Function._load (module.js:293:12)
at require (module.js:346:19)
server.js:703 - points to app.listen(9999);
EDIT: server.js code
var express = require("express"),
fs = require("fs"),
form = require('connect-form'),
app = module.exports = express.createServer(
form({ keepExtensions: true })
),
sys = require("sys"),
RentModel = require("./rent_schema"),
UserModel = require("./track_schema"),
email = require("./email_connect"),
SubscriptionModel = require("./subscription_schema"),
io = require("socket.io"),
fb = require('facebook-js'),
Twitter = require('./Twitter_Analysis'),
Foursquare = require('./Foursquare_Analysis'),
YQL = require("yql"),
settings = require("./settings");
//
var cluster = require('cluster');
cluster(app)
.use(cluster.logger('logs'))
.use(cluster.stats())
.use(cluster.pidfiles('pids'))
.use(cluster.cli())
.use(cluster.debug())
.use(cluster.repl(settings.ADMIN_PORT))
.listen(settings.PORT);
socket = io.listen(app);
.....
.....
//app.listen(settings.PORT);
It looks like you're trying to bind your workers with the same port, that is crashing the workers, but cluster is restarting the workers. So you're in an infinite death cycle.
I'm not sure if you need the app.listen(9999) in your server.js file, which is probably trying to bind port 9999 in all your workers. See the examples in the cluster package for a good example: https://github.com/LearnBoost/cluster/blob/master/examples/express.js