When I run concurrent MongoDb queries using Node.js, the second query always takes ~2 seconds to return. Using explain(), executionTimeMillis always returns 0ms, which is absolutely normal as my test collection has only 2 entries. Here's my reduced testcase:
'use strict'
const { MongoClient } = require('mongodb')
const main = async () => {
const client = new MongoClient('mongodb://admin:123456#localhost:27017/', {
useNewUrlParser: true,
useUnifiedTopology: true,
})
await client.connect()
const db = client.db('test')
const numbers = db.collection('numbers')
const promises = []
console.time()
for (let i = 0; i < 3; i++) {
promises.push(numbers.find({ number: i }).explain())
}
for (const promise of promises) {
console.log(await promise)
console.timeLog()
}
console.timeEnd()
await client.close()
}
main()
Output:
{
queryPlanner: {
plannerVersion: 1,
namespace: 'test.numbers',
indexFilterSet: false,
parsedQuery: { number: [Object] },
winningPlan: { stage: 'FETCH', inputStage: [Object] },
rejectedPlans: []
},
executionStats: {
executionSuccess: true,
nReturned: 1,
executionTimeMillis: 0,
totalKeysExamined: 1,
totalDocsExamined: 1,
executionStages: {
stage: 'FETCH',
nReturned: 1,
executionTimeMillisEstimate: 0,
works: 2,
advanced: 1,
needTime: 0,
needYield: 0,
saveState: 0,
restoreState: 0,
isEOF: 1,
invalidates: 0,
docsExamined: 1,
alreadyHasObj: 0,
inputStage: [Object]
},
allPlansExecution: []
},
serverInfo: {
host: 'DESKTOP-C7CAL9N',
port: 27017,
version: '4.0.10',
gitVersion: 'c389e7f69f637f7a1ac3cc9fae843b635f20b766'
},
ok: 1
}
default: 32.252ms
{
queryPlanner: {
plannerVersion: 1,
namespace: 'test.numbers',
indexFilterSet: false,
parsedQuery: { number: [Object] },
winningPlan: { stage: 'FETCH', inputStage: [Object] },
rejectedPlans: []
},
executionStats: {
executionSuccess: true,
nReturned: 1,
executionTimeMillis: 0,
totalKeysExamined: 1,
totalDocsExamined: 1,
executionStages: {
stage: 'FETCH',
nReturned: 1,
executionTimeMillisEstimate: 0,
works: 2,
advanced: 1,
needTime: 0,
needYield: 0,
saveState: 0,
restoreState: 0,
isEOF: 1,
invalidates: 0,
docsExamined: 1,
alreadyHasObj: 0,
inputStage: [Object]
},
allPlansExecution: []
},
serverInfo: {
host: 'DESKTOP-C7CAL9N',
port: 27017,
version: '4.0.10',
gitVersion: 'c389e7f69f637f7a1ac3cc9fae843b635f20b766'
},
ok: 1
}
default: 2042.929ms
{
queryPlanner: {
plannerVersion: 1,
namespace: 'test.numbers',
indexFilterSet: false,
parsedQuery: { number: [Object] },
winningPlan: { stage: 'FETCH', inputStage: [Object] },
rejectedPlans: []
},
executionStats: {
executionSuccess: true,
nReturned: 0,
executionTimeMillis: 0,
totalKeysExamined: 0,
totalDocsExamined: 0,
executionStages: {
stage: 'FETCH',
nReturned: 0,
executionTimeMillisEstimate: 0,
works: 1,
advanced: 0,
needTime: 0,
needYield: 0,
saveState: 0,
restoreState: 0,
isEOF: 1,
invalidates: 0,
docsExamined: 0,
alreadyHasObj: 0,
inputStage: [Object]
},
allPlansExecution: []
},
serverInfo: {
host: 'DESKTOP-C7CAL9N',
port: 27017,
version: '4.0.10',
gitVersion: 'c389e7f69f637f7a1ac3cc9fae843b635f20b766'
},
ok: 1
}
default: 2062.851ms
default: 2063.513ms
If I run queries consequentially, each query takes only some milliseconds to return. Then why is the 2 seconds response time?
Edit:
In the first for loop, I made/ran "concurrent" queries promises.push(numbers.find({ number: i }).explain()). In the second for loop, I wait for promises to resolve one after another but that doesn't mean that a promise must wait till the previous one resolved to begin its job.
To avoid misunderstandings, I've made a little changes to my code, replacing the two for loops with this:
for (let i = 0; i < 3; i++) {
promises.push(
numbers
.find({ number: i })
.explain()
.then(result => {
// console.log(result)
console.log('query index:', i)
console.timeLog()
})
)
}
await Promise.all(promises)
Output:
query index: 0
default: 22.040ms
query index: 2
default: 2032.921ms
query index: 1
default: 2034.682ms
default: 2035.260ms
Edit 2:
For further clarification, I use labels to denote timers.
for (let i = 0; i < 3; i++) {
console.time(`query index: ${ i }`)
promises.push(
numbers
.find({ number: i })
.explain()
.then(result => {
// console.log(result)
console.timeEnd(`query index: ${ i }`)
})
)
}
await Promise.all(promises)
Output:
query index: 0: 12.692ms
query index: 1: 2015.143ms
query index: 2: 2015.310ms
Set MongoClient's poolSize to 1.
Related
I'm facing a rather unexpected behavior with an aggregation. I'm running some performance tests with a collection of ~10M documents which have the form
{
_id: ObjectId(),
location: {
type: 'Point',
coordinates: [
26.38914519633397,
-42.404163283097745
]
},
isAvailable: true,
...more properties
}
with the aggregation :
this.mongo.db.collection('users').aggregate([
{$limit: 1000000},
{$match: {
isAvailable: true,
location: {
$geoWithin: {
$centerSphere :[
position, //an array like [ 26.3, -42.4]
radius //a number between 0 and 1
]}},
}}
],{allowDiskUse: true}).toArray()
The thing is that (everything else being equal) this aggregation takes ~1,8s with indexes (on isAvailable and location) and ~1,65s without these indexes. (Average on 50 aggregationz)
What am i missing ? What am I doing wrong ?
The indexes
{"isAvailable":1}
//and
{"location":"2dsphere"}
Edit: Some cases (10 aggregations average)
(real use case has just another match stage)
1) Match only
With indexes ~11,5s found 298,845 documents
Without indexes ~9,7s found 298,845 documents
Real use case with indexes ~33,5s found 15,553 documents
Real use case without indexes ~9,3s found 15,553 documents (wtf)
2) Limit 1M and then match
With indexes ~1,45s found 29,726 documents
Without indexes ~1,4s found 29,726 documents
Real use case with indexes ~1,8s found 1,525 documents
Real use case without indexes ~1,7s found 1,525 documents
3) Match and then limit 100k (and then another match for 3 and 4)
With indexes ~3,6s found 100,000 documents
Without indexes ~3,1s found 100,000 documents
Real use case with indexes ~3,6s found 5,137 documents
Real use case without indexes ~3,1s found 5,137 documents
Edit: Some cases with .explain("executionStats") (Match only, no limit)
1) With indexes ~11,4s
{
explainVersion: '1',
queryPlanner: {
namespace: 'database.userProps',
indexFilterSet: false,
parsedQuery: { '$and': [Array] },
optimizedPipeline: true,
maxIndexedOrSolutionsReached: false,
maxIndexedAndSolutionsReached: false,
maxScansToExplodeReached: false,
winningPlan: { stage: 'FETCH', filter: [Object], inputStage: [Object] },
rejectedPlans: [ [Object] ]
},
executionStats: {
executionSuccess: true,
nReturned: 298845,
executionTimeMillis: 11744,
totalKeysExamined: 8003852,
totalDocsExamined: 8003852,
executionStages: {
stage: 'FETCH',
filter: [Object],
nReturned: 298845,
executionTimeMillisEstimate: 1166,
works: 8003853,
advanced: 298845,
needTime: 7705007,
needYield: 0,
saveState: 8021,
restoreState: 8021,
isEOF: 1,
docsExamined: 8003852,
alreadyHasObj: 0,
inputStage: {
stage: 'IXSCAN',
nReturned: 8003852,
executionTimeMillisEstimate: 184,
works: 8003853,
advanced: 8003852,
needTime: 0,
needYield: 0,
saveState: 8039,
restoreState: 8039,
isEOF: 1,
keyPattern: { isAvailable: 1 },
indexName: 'isAvailable_1',
isMultiKey: false,
multiKeyPaths: { isAvailable: [] },
isUnique: false,
isSparse: false,
isPartial: false,
indexVersion: 2,
direction: 'forward',
indexBounds: { isAvailable: [ '[true, true]' ] },
keysExamined: 8003852,
seeks: 1,
dupsTested: 0,
dupsDropped: 0
}
}
},
command: {
aggregate: 'users',
pipeline: [ [Object] ],
allowDiskUse: true,
cursor: {},
'$db': 'database'
},
serverInfo: {...},
serverParameters: {...},
ok: 1
}
2) Without indexes ~9,8s
explainVersion: '1',
queryPlanner: {
namespace: 'database.userProps',
indexFilterSet: false,
parsedQuery: { '$and': [Array] },
optimizedPipeline: true,
maxIndexedOrSolutionsReached: false,
maxIndexedAndSolutionsReached: false,
maxScansToExplodeReached: false,
winningPlan: { stage: 'COLLSCAN', filter: [Object], direction: 'forward' },
rejectedPlans: []
},
executionStats: {
executionSuccess: true,
nReturned: 298845,
executionTimeMillis: 9446,
totalKeysExamined: 0,
totalDocsExamined: 10000001,
executionStages: {
stage: 'COLLSCAN',
filter: [Object],
nReturned: 298845,
executionTimeMillisEstimate: 632,
works: 10000003,
advanced: 298845,
needTime: 9701157,
needYield: 0,
saveState: 10000,
restoreState: 10000,
isEOF: 1,
direction: 'forward',
docsExamined: 10000001
}
},
command: {
aggregate: 'users',
pipeline: [ [Object] ],
allowDiskUse: true,
cursor: {},
'$db': 'database'
},
serverInfo: {...},
serverParameters: {...},
ok: 1
}
3) With compound index ~33s
{
explainVersion: '1',
queryPlanner: {
namespace: 'database.userProps',
indexFilterSet: false,
parsedQuery: { '$and': [Array] },
optimizedPipeline: true,
maxIndexedOrSolutionsReached: false,
maxIndexedAndSolutionsReached: false,
maxScansToExplodeReached: false,
winningPlan: { stage: 'FETCH', filter: [Object], inputStage: [Object] },
rejectedPlans: []
},
executionStats: {
executionSuccess: true,
nReturned: 298845,
executionTimeMillis: 35564,
totalKeysExamined: 423980,
totalDocsExamined: 423951,
executionStages: {
stage: 'FETCH',
filter: [Object],
nReturned: 298845,
executionTimeMillisEstimate: 35281,
works: 423980,
advanced: 298845,
needTime: 125134,
needYield: 0,
saveState: 2239,
restoreState: 2239,
isEOF: 1,
docsExamined: 423951,
alreadyHasObj: 0,
inputStage: {
stage: 'IXSCAN',
nReturned: 423951,
executionTimeMillisEstimate: 468,
works: 423980,
advanced: 423951,
needTime: 28,
needYield: 0,
saveState: 2634,
restoreState: 2634,
isEOF: 1,
keyPattern: [Object],
indexName: 'isAvailable_1_location_2dsphere',
isMultiKey: false,
multiKeyPaths: [Object],
isUnique: false,
isSparse: false,
isPartial: false,
indexVersion: 2,
direction: 'forward',
indexBounds: [Object],
keysExamined: 423980,
seeks: 29,
dupsTested: 0,
dupsDropped: 0
}
}
},
command: {
aggregate: 'users',
pipeline: [ [Object] ],
allowDiskUse: true,
cursor: {},
'$db': 'database'
},
serverInfo: {...},
serverParameters: {...},
ok: 1
}
This overall question is very interesting. Even with the available information, there is still a bunch of nuance that is tricky to wade through. Indeed I very much agree with #Alex Blex in the comments:
inputStage: [Object] ? =) The object is the most interesting part.
The problem with the [Object] redaction is likely related to how the MongoDB Shell is configured. Perhaps gather the verbose explain outputs again after either upgrading the shell or after increasing the inspectDepth setting (see here).
It would also be helpful to know what version of the database you are using. This is included in the serverInfo field of explain output which appears to have been manually redacted up to this point.
The reason the truncated information is so important is because it contains the critical details about how the database is actually using the indexes etc. For example:
stage: 'FETCH',
filter: [Object],
...
inputStage: {
stage: 'IXSCAN',
indexName: 'isAvailable_1_location_2dsphere',
indexBounds: [Object],
...
Here we can see that the database is doing some filtering after FETCHing the data, but we don't know what that filtering is or why it wasn't able to do so during the IXSCAN phase (e.g. directly via the indexBounds or secondarily as a filter during that stage).
Indexes are best at supporting the efficient retrieval of a selective result set. The larger that result set is relative to the total collection size, the less beneficial an index will be in supporting the operation (as was also mentioned by #Alex Blex in the comments). It may very well be the case that the database is not using the index as efficiently as expected, but it could also be the case that environmental factors (including the relative size of the results) are the primary challenges here. There are situations where indexes are not helpful, and indeed part of the optimizer's job is to figure out when it may not be appropriate to use them.
We would only be able to speculate about these components without updated explain output to investigate further.
Usage of Limit
This was also touched on in the comments, but what is your intention with using limit here?
Placing it as the first stage of the aggregation pipeline changes the semantics of what you are asking for. When it is placed first, you are instructing the database to first retrieve that number of documents from the collection and then perform subsequent processing on them (such as filtering in a later $match). This may be quite different than what your intended semantics (and therefore database behavior) are. The result is that your tests and conclusions from them may not be representative of the actual problem that you are investigating.
As a general pointer, it is always a good idea to explain and test the exact operation that you intend to run with the application. In this case that likely means removing a preceding $limit and having the compound index present. Doing so will greatly improve the applicability of the investigation and the associated suggestions.
I have an issue with slow data fetch. I have the following query to fetch the data
const query1 = this._ctx.signals.find({
user_id: user._id,
'spell.id': null,
'metadata.0.spell_id': { $in: spellsIds }
}).hint({user_id: 1, 'spell.id': 1, 'metadata.0.spell_id': 1}).explain('allPlansExecution')
And the execution time according to explain is 35ms. Here is explain object
{
queryPlanner: {
plannerVersion: 1,
namespace: 'gringotts.Signals',
indexFilterSet: false,
parsedQuery: { ... },
winningPlan: {
stage: 'FETCH',
inputStage: {
stage: 'IXSCAN',
keyPattern: {
user_id: 1,
'spell.id': 1
},
indexName: 'user_id_1_spell.id_1',
isMultiKey: false,
multiKeyPaths: {
user_id: [],
'spell.id': []
},
isUnique: false,
isSparse: false,
isPartial: false,
indexVersion: 2,
direction: 'forward',
indexBounds: { ... }
}
},
rejectedPlans: []
},
executionStats: {
executionSuccess: true,
nReturned: 23866,
executionTimeMillis: 35,
totalKeysExamined: 23869,
totalDocsExamined: 23866,
executionStages: {
stage: 'FETCH',
nReturned: 23866,
executionTimeMillisEstimate: 1,
works: 23869,
advanced: 23866,
needTime: 2,
needYield: 0,
saveState: 23,
restoreState: 23,
isEOF: 1,
docsExamined: 23866,
alreadyHasObj: 0,
inputStage: {
stage: 'IXSCAN',
nReturned: 23866,
executionTimeMillisEstimate: 1,
works: 23869,
advanced: 23866,
needTime: 2,
needYield: 0,
saveState: 23,
restoreState: 23,
isEOF: 1,
keyPattern: {
user_id: 1,
'spell.id': 1
},
indexName: 'user_id_1_spell.id_1',
isMultiKey: false,
multiKeyPaths: {
user_id: [],
'spell.id': []
},
isUnique: false,
isSparse: false,
isPartial: false,
indexVersion: 2,
direction: 'forward',
indexBounds: { ... },
keysExamined: 23869,
seeks: 3,
dupsTested: 0,
dupsDropped: 0
}
},
allPlansExecution: []
},
serverInfo: {
host: 'ip-192-168-1-98.ec2.internal',
port: 27017,
version: '4.4.4',
gitVersion: '8db30a63db1a9d84bdcad0c83369623f708e0397'
},
ok: 1
}
When I try to fetch data with the following piece of code I have execution time starting from 750ms to 900ms (21x). Average document size is 544.6135124888154 bytes.
console.time('q1-time')
const q1 = await this._ctx.signals.find({
user_id: user._id,
'spell.id': {
$in: spellsIds
}
// #ts-ignore
}).hint({
user_id: 1,
'spell.id': 1
})
const f = (q) => {
const result = []
return new Promise((res, rej) => {
q.stream().on('end', function() {
console.log('done processing stream')
res(result)
})
q.stream().on('data', (d) => {
result.push(d)
})
})
}
const data = await f(q1)
console.timeEnd('q1-time') -- > q1 - time 769.511 ms
I tried different approaches: .toArray, iteration via cursor and the one with streams (I posted above) is the fastest).
Why it takes so much longer to get the data? Can it be optimized somehow?
I have a collection with more than 1 million user, I'm trying to update
users balance on some event.
while I'm trying to update e.g. 299 row it takes up to 15739.901ms
no high load on the sever, it's just mongo running. I'm storing the database on an SSD Samsung evo 860 but MongoDB installed on an HDD.
Here's my function:
async usersUpdate(usersToUpdate){
const updates = [];
return new Promise(async (resolve, reject) => {
users.forEach(user=>{
updates.push(
{ "updateOne": {
"filter": { "userID": user.userID, 'balance':user.userBalance },
"update": { "$set": { "user.$.userBalance": user.newBalance } , "$addToSet":{'orders.$.orderID':user.OrderID} }
}
});
}
console.log('total updates' , updates.length);
if (updates.length > 0){
const DbConnection = await getConnection();
const usersTable = DbConnection.collection('usersCollection');
transactionsTable.bulkWrite(updates, {"ordered": false, writeConcern : { w : 0 } }, function(err, result) {
// do something with result
if (err) return reject(err);
return resolve(result)
});
}else{
return resolve('Nothing to update');
}
});
}
both userID and userBalance are indexed, and I set writeconcern equals to false.
I don't know what's the wrong with code and why it's super slow.
What's the problem and how could I speed up the progress a bit?
Mongodb config file:
storage:
dbPath: "/ssd/mongodb"
journal:
enabled: false
Explain result:
{ queryPlanner:
{ plannerVersion: 1,
namespace: 'usersDB.usersCollection',
indexFilterSet: false,
parsedQuery:
{ '$and':
[ { userID:
{ '$eq': 'Kfasg3ffasg' } },
{ 'user.userBalance': { '$eq': 10 } } ] },
winningPlan:
{ stage: 'FETCH',
filter: { 'user.userBalance': { '$eq': 10 } },
inputStage:
{ stage: 'IXSCAN',
keyPattern: { userID: 1 },
indexName: 'userID_1',
isMultiKey: false,
multiKeyPaths: { userID: [] },
isUnique: true,
isSparse: false,
isPartial: false,
indexVersion: 2,
direction: 'forward',
indexBounds:
{ userID:
[ '["Kfasg3ffasg", "Kfasg3ffasg"]' ] } } },
rejectedPlans: [] },
executionStats:
{ executionSuccess: true,
nReturned: 1,
executionTimeMillis: 24,
totalKeysExamined: 1,
totalDocsExamined: 1,
executionStages:
{ stage: 'FETCH',
filter: { 'user.userBalance': { '$eq': 10 } },
nReturned: 1,
executionTimeMillisEstimate: 0,
works: 2,
advanced: 1,
needTime: 0,
needYield: 0,
saveState: 0,
restoreState: 0,
isEOF: 1,
invalidates: 0,
docsExamined: 1,
alreadyHasObj: 0,
inputStage:
{ stage: 'IXSCAN',
nReturned: 1,
executionTimeMillisEstimate: 0,
works: 2,
advanced: 1,
needTime: 0,
needYield: 0,
saveState: 0,
restoreState: 0,
isEOF: 1,
invalidates: 0,
keyPattern: { userID: 1 },
indexName: 'userID_1',
isMultiKey: false,
multiKeyPaths: { userID: [] },
isUnique: true,
isSparse: false,
isPartial: false,
indexVersion: 2,
direction: 'forward',
indexBounds:
{ userID:
[ '["Kfasg3ffasg", "Kfasg3ffasg"]' ] },
keysExamined: 1,
seeks: 1,
dupsTested: 0,
dupsDropped: 0,
seenInvalidated: 0 } },
allPlansExecution: [] },
serverInfo:
{ }
I'm trying to create a logstash pipeline that polls a ActiveMQ jolokia endpoint. I'm wanting to collect all the metrics for the queues on the broker. I have the following pipeline.
input {
http_poller {
urls => {
health_metrics => {
method => "get"
url => "http://localhost:8161/api/jolokia/read/org.apache.activemq:type=Broker,brokerName=localhost,destinationType=Queue,destinationName=*"
headers => {
"Content-Type" => "application/json"
}
auth => {
user => "admin"
password => "admin"
}
}
}
request_timeout => 30
keepalive => false
interval => 5
codec => "json"
type => "activemq_broker_queue"
}
}
filter {
json_encode {
source => "value"
}
json {
source => "value"
}
mutate {
remove_field => ["request", "value", "timestamp"]
}
}
output {
elasticsearch {
hosts => "localhost"
# An index is created for each type of metrics inpout
index => "logstash-activmq"
document_type => "%{type}"
}
stdout {
codec => rubydebug
}
}
My jolokia response is in this format.
{
request: {
mbean: "org.apache.activemq:brokerName=localhost,destinationName=*,destinationType=Queue,type=Broker",
type: "read"
},
value: {
org.apache.activemq: brokerName=localhost,
destinationName=SEARCH,
destinationType=Queue,
type=Broker: {
ProducerFlowControl: true,
Options: "",
AlwaysRetroactive: false,
MemoryUsageByteCount: 0,
AverageBlockedTime: 0,
MemoryPercentUsage: 0,
CursorMemoryUsage: 0,
InFlightCount: 0,
Subscriptions: [],
CacheEnabled: true,
ForwardCount: 0,
DLQ: false,
StoreMessageSize: 0,
AverageEnqueueTime: 0,
Name: "SEARCH",
BlockedSends: 0,
TotalBlockedTime: 0,
MaxAuditDepth: 2048,
QueueSize: 0,
MaxPageSize: 200,
PrioritizedMessages: false,
MemoryUsagePortion: 1,
Paused: false,
EnqueueCount: 0,
MessageGroups: {
},
ConsumerCount: 0,
AverageMessageSize: 0,
CursorFull: false,
ExpiredCount: 0,
MaxProducersToAudit: 1024,
CursorPercentUsage: 0,
MinEnqueueTime: 0,
MemoryLimit: 668309914,
MinMessageSize: 0,
DispatchCount: 0,
MaxEnqueueTime: 0,
DequeueCount: 0,
BlockedProducerWarningInterval: 30000,
ProducerCount: 0,
MessageGroupType: "cached",
MaxMessageSize: 0,
UseCache: true,
SlowConsumerStrategy: null
},
org.apache.activemq: brokerName=localhost,
destinationName=weather,
destinationType=Queue,
type=Broker: {
ProducerFlowControl: true,
Options: "",
AlwaysRetroactive: false,
MemoryUsageByteCount: 0,
AverageBlockedTime: 0,
MemoryPercentUsage: 0,
CursorMemoryUsage: 0,
InFlightCount: 0,
Subscriptions: [],
CacheEnabled: true,
ForwardCount: 0,
DLQ: false,
StoreMessageSize: 0,
AverageEnqueueTime: 0,
Name: "weather",
BlockedSends: 0,
TotalBlockedTime: 0,
MaxAuditDepth: 2048,
QueueSize: 0,
MaxPageSize: 200,
PrioritizedMessages: false,
MemoryUsagePortion: 1,
Paused: false,
EnqueueCount: 0,
MessageGroups: {
},
ConsumerCount: 0,
AverageMessageSize: 0,
CursorFull: false,
ExpiredCount: 0,
MaxProducersToAudit: 1024,
CursorPercentUsage: 0,
MinEnqueueTime: 0,
MemoryLimit: 668309914,
MinMessageSize: 0,
DispatchCount: 0,
MaxEnqueueTime: 0,
DequeueCount: 0,
BlockedProducerWarningInterval: 30000,
ProducerCount: 0,
MessageGroupType: "cached",
MaxMessageSize: 0,
UseCache: true,
SlowConsumerStrategy: null
}
},
timestamp: 1453588727,
status: 200
}
I would like to be able to split the two queue destinations into two different documents and then save them to ES.
Currently I'm get an error about cannot contain '.'
I don't know why I can't access my property.
connection.query("call VerifyAccountToken(0, null)", function(err, rows, fields) {
if(err) console.log("Error: " + err);
console.log("SQLRet: ", rows[0].result);
console.log(rows);
console.log(fields);
});
VerifyAccountToken returns a single row/column result with the column named 'result'.
The console outputs the following:
SQLRet: undefined
[ [ { result: 0 } ],
{ fieldCount: 0,
affectedRows: 0,
insertId: 0,
serverStatus: 2,
warningCount: 1,
message: '',
protocol41: true,
changedRows: 0 } ]
[ [ { catalog: 'def',
db: '',
table: '',
orgTable: '',
name: 'result',
orgName: 'iRes',
filler1: ,
charsetNr: 63,
length: 11,
type: 3,
flags: 0,
decimals: 0,
filler2: ,
default: undefined,
zeroFill: false,
protocol41: true } ],
undefined ]
Everything I know tells me this should work.
Ok apparently I needed to use
rows[0][0].result;
I'm not sure why node-mysql nests stored procedures' returned results.