I am implementing pagination.
What I am doing is counting the items from collection first & after that count will return. I am then doing another find with skip and limit.
Now I want to assign count to the data returned by 2nd query which I am not able to do.
I tried toObject() but I am getting error "toObject() is not a function".
I don't want to use any library.
ErrorReportModel.find().count(function(err, count) {
if (err) {
console.log(err);
utile.internalError(res);
}
ErrorReportModel.find().sort({
date_create: -1
}).limit(20).skip(query.page * 20).lean().exec(function(err, data) {
if (err) {
console.log(err);
utile.internalError(res);
}
// i am doing this at the moment
var myData = {};
myData.data=data;
myData.count = count;
utile.response(res, myData);
});
});
I want to send the count to client side because I want to display page_number buttons depending on that count.
Related
I want to get the size of the array in my DB so I can loop through it, so I created a field called Size in my DB. I want to store the value of this field in a variable so I know how many times I need to loop through the array. I am able to print out the whole document, but I cannot single out the Size value.
Here is my DB document:
_id:ObjectId("5c02492d1c9d440000498a9b")
Names:Array
Size:5
Now, this is my code that I am trying to extract the number 5 from the size field.
var cursor = db.collection('Room').find('Size');
cursor.forEach(function (err, num) {
if(err) {
return console.log(err);
}
console.log(num);
});
This is what console.log(num) prints:
{ _id: 5c02492d1c9d440000498a9b,
Names:
[ 'TjAa0wOe5k4',
'Sz_YPczxzZc',
'mM5_T-F1Yn4',
'En6TUJJWwww',
'5T_CqqjOPDc' ],
Size: 5 }
Any ideas/help?
For anyone who is wondering, I figured it out. It turns out that turning the result into an array and then calling numArr[0].Size works. Here is the code for anyone curious:
var length;
db.collection('Room').find({'Size': Object}).toArray((err, numArr) => {
if(err) {
return console.log(err);
}
length = numArr[0].Size
return length;
});
Update:
OP fixed it by converting the cursor to an array using .toArray() method and then referencing the property like numArr[0].Size.
You can access the Size property like this:
var size = num.Size;
Have you tried it this way?
I have two collections, one contains my static items and other collection contains reverse geocode results for that item. They are matched by id property.
I am writing a script that would fill reverse geocode collection with missing items.
This is my current solution which is super slow, it does:
Get total count of static items
Create read stream from static items collection
Uses find one on reverse geocode collection for each item that comes from the read stream
If items exists, increase counter by 1 and ignore it
If item doesn't exist, fetch it from API, save it to collection and increase counter by 1
When counter is equal total count, it means all items are fetched,
therefore resolve function
function fetchMissingData(){
return new Promise((resolve, reject) => {
const staticData = Global.state.db.collection('static_data')
const googleData = Global.state.db.collection('google_reverse_geocode')
staticData.count((countErr, count) => {
if (countErr) return reject(countErr)
let counter = 0
let fetched = 0
function finishIfReady(){
process.stdout.write(`Progress...(${counter}/${count}), (fetched total: ${fetched})\r`)
if (count === counter) {
resolve({ fetched, counter })
}
}
staticData.find()
.on('data', (hotel) => {
googleData.findOne({ id: hotel.id }, (findErr, geocodedItem) => {
if (findErr) return reject(findErr)
if (geocodedItem) {
counter++
finishIfReady()
} else {
GMClient.reverseGeocode({ latlng: hotel.geo }, (err, response) => {
if (err) return reject(err)
googleData.insertOne({
id: hotel.id,
payload: response,
}).then(() => {
fetched++
counter++
finishIfReady()
}).catch(e => reject(e))
})
}
})
})
.on('error', e => reject(e))
})
})
}
Is there more elegant solution using aggregation framework that would allow me same behavior without O(n^{2}) O(nlogn) complexity?
First, the actual complexity is O(nlogn) because findOne on id use binary search. Second, although there is no way to pass the theory complexity O(nlogn) in this case, there is way to help make your code faster in practice. This is what I would do:
function getIdOfAllGeoData() {
// return an array of existing Geo data IDs
return Global.state.db.collection('google_reverse_geocode')
.find().toArray().map(o => o.id);
}
function getStaticDataMissingGeo(existingGeoDataIds) {
const staticData = Global.state.db.collection('static_data');
return staticData.find({
id: {
$nin: existingGeoDataIds
}
}).toArray();
}
function fetchMissingData() {
const existingGeoDataIds = getIdOfAllGeoData();
const staticDataMissingGeo = getStaticDataMissingGeo(existingGeoDataIds);
// staticDataMissingGeo is all the static that need geo data
// you can loop through this array, get each items geo data and insert to database
// ...
}
Finally, you could use bulk operation to speed thing up, it will be much faster. Also, my mongo related code above may not be correct, consider it as an idea.
I build rest api with sails.js.
I want implement pagination in my angularjs frontend.
To do this I should get a list of items along with the total count of items (to calculate pages count), that meet the criteria.
Unfortunately sails.js returns only data list without total records count.
I expect that the server response will look like this:
{
data: [...], // collection data
count: 193 // records count, that meet the criteria of the request.
}
How can I implement this?
you can use async.auto
async.auto(
count: functuon(callback){
Model.count(...).exec(callback);
},
data: function(callback){
Model.find(...).exec(callback);
}
},function(err,results){
console.log(results.count);
console.log(results.data);
});
I implimented a set of blueprints that will return a count in ether the header or the body
https://github.com/randallmeeker/SailsBluePrintActions/tree/master/pagination
here is an example
var query = Model.find()
.where( actionUtil.parseCriteria(req) )
.limit( actionUtil.parseLimit(req) )
.skip( actionUtil.parseSkip(req) )
.sort( actionUtil.parseSort(req) );
var metaInfo,
criteria = actionUtil.parseCriteria(req),
skip = actionUtil.parseSkip(req),
limit = actionUtil.parseLimit(req);
Model.count(criteria)
.exec(function(err, total){
if (err) return res.serverError(err);
metaInfo = {
start : skip,
end : skip + limit,
limit : limit,
total : total,
criteria: criteria
};
res.ok({info: metaInfo, items: matchingRecords});
});
You can use Model.count to count all data that meet the criteria.
Example:
// very important here to use the same `criteria` for `find` and `count`
var criteria = {foo: 'bar'};
Model.find(criteria).exec(function (err, found) {
Model.count(criteria).exec(function (error, count) {
console.log({ data: found, count: count });
});
});
I'm using nodejs with the module cradle to interact with the couchdb server, the question is to let me understanding the reduce process to improve the view query...
For example, I should get the user data from his ID with a view like this:
map: function (doc) { emit(null, doc); }
And in node.js (with cradle):
db.view('users/getUserByID', function (err, resp) {
var found = false;
resp.forEach(function (key, row, id) {
if (id == userID) {
found = true;
userData = row;
}
});
if (found) {
//good, works
}
});
As you can see, this is really bad for large amount of documents (users in the database), so I need to improve this view with a reduce but I don't know how because I don't understand of reduce works.. thank you
First of all, you're doing views wrong. View are indexes at first place and you shouldn't use them for full-scan operations - that's ineffective and wrong. Use power of Btree index with key, startkey and endkey query parameters and emit field you like to search for as key value.
In second, your example could be easily transformed to:
db.get(userID, function(err, body) {
if (!err) {
// found!
}
});
Since in your loop you're checking row's document id with your userID value. There is no need for that loop - you may request document by his ID directly.
In third, if your userID value isn't matches document's ID, your view should be:
function (doc) { emit(doc.userID, null); }
and your code will be looks like:
db.view('users/getUserByID', {key: userID}, function (err, resp) {
if (!err) {
// found!
}
});
Simple. Effective. Fast. If you need matched doc, use include_docs: true query parameter to fetch it.
We have an order system where every order has an id. For accounting purposes we need a way to generate invoices with incremening numbers. What is the best way to do this without using an sql database?
We are using node to implement the application.
http://www.mongodb.org/display/DOCS/How+to+Make+an+Auto+Incrementing+Field
The first approach is keeping counters in a side document:
One can keep a counter of the current _id in a side document, in a
collection dedicated to counters. Then use FindAndModify to atomically
obtain an id and increment the counter.
The other approach is to loop optimistically and handle dup key error code of 11000 by continuing and incrementing the id for the edge case of collisions. That works well unless there's high concurrency writes to a specific collection.
One can do it with an optimistic concurrency "insert if not present"
loop.
But be aware of the warning on that page:
Generally in MongoDB, one does not use an auto-increment pattern for
_id's (or other fields), as this does not scale up well on large database clusters. Instead one typically uses Object IDs.
Other things to consider:
Timestamp - unique long but not incrementing (base on epoch)
Hybrid Approach - apps don't necessarily have to pick one storage option.
Come up with your id mechanism based on things like customer, date/time parts etc... that you generate and handle collisions for. Depending on the scheme, collisions can be much less likely. Not necessarily incrementing but is unique and has a well defined readable pattern.
I did not find any working solution, so I implemented the "optimistic loop" in node.js to get Auto-Incrementing Interger ID fields. Uses the async module to realize the while loop.
// Insert the document to the targetCollection. Use auto-incremented integer IDs instead of UIDs.
function insertDocument(targetCollection, document, callback) {
var keepRunning = true;
var seq = 1;
// $type 16/18: Integer Values
var isNumericQuery = {$or : [{"_id" : { $type : 16 }}, {"_id" : { $type : 18 }}]};
async.whilst(testFunction, mainFunction, afterFinishFunction);
// Called before each execution of mainFunction(). Works like the stop criteria of a while function.
function testFunction() {
return keepRunning;
}
// Called each time the testFunction() passes. It is passed a function (next) which must be called after it has completed.
function mainFunction(next) {
findCursor(targetCollection, findCursorCallback, isNumericQuery, { _id: 1 });
function findCursorCallback(cursor) {
cursor.sort( { _id: -1 } ).limit(1);
cursor.each(cursorEachCallback);
}
function cursorEachCallback(err, doc) {
if (err) console.error("ERROR: " + err);
if (doc != null) {
seq = doc._id + 1;
document._id = seq;
targetCollection.insert(document, insertCallback);
}
if (seq === 1) {
document._id = 1;
targetCollection.insert(document, insertCallback);
}
}
function insertCallback(err, result) {
if (err) {
console.dir(err);
}
else {
keepRunning = false;
}
next();
}
}
// Called once after the testFunction() fails and the loop has ended.
function afterFinishFunction(err) {
callback(err, null);
}
}
// Call find() with optional query and projection criteria and return the cursor object.
function findCursor(collection, callback, optQueryObject, optProjectionObject) {
if (optProjectionObject === undefined) {
optProjectionObject = {};
}
var cursor = collection.find(optQueryObject, optProjectionObject);
callback(cursor);
}
Call with
insertDocument(db.collection(collectionName), documentToSave, function() {if(err) console.error(err);});