Hello guys I am trying to manipulate the documents that I am inserting to my collection with the create().Essentially I am calling a function that increments a letter field.
My pre hook is like
baseAttribute.pre('save',async function(next){
var att=this;
const query=await mongoose.models.BaseAttributes.find({},{},{sort:{_id:-1}}).limit(1)
console.log(query)
if(query.length===0)
{
att.code="AA"
}else{
att.code= Codes.GetAlphaCode(query[0].code);
}
next()
})
The result is that all the documents inserted by the create function are getting the same code
I found a solution to the problem.
// asyncs because I am not sure if it will cause a conflict with my async functions
var asyncs = require("async");
asyncs.eachOfSeries(newArray,function (item, i, next){
// console.log(item)
console.log("In each async")
// item.save(next);
BaseAttribute.find({},{},{sort:{_id:-1}}).limit(1).then(result=>{
console.log("In find")
if(!result[0]){
item.code="AA"
}else{
item.code=Codes.GetAlphaCode(result[0].code)
}
item.save(next);
})
}, function(err) {
if (err) return console.log(err);
res.json({done:"true"})
});
This is the way save documents one by one (in a serial order).
I need to fetch 1000 records from mongodb , how can i do this in node js?
i have tried using pagination by sending the limit to 1000 even thought it takes 25 seconds to return the response, so is there any other way to minimize the response time ?
exports.getAll = function(filters, sort, skip, limit, callback) {
let query = {
removed: false
};
let filter = {};
let options = {
sort: []
};
req.skip = 0;
options.limit = 10000
options.sort = [
['_id', 'desc']
];
collections.findItems(query, filter, options, (err, result) => {
if (err) {
logger.error('db error', err);
return callback('Something went wrong');
}
callback(null,result);
});
};
exports.getAll = function getAll(req, res, next) {
service.getAll(req.body.filters, req.body.sort, req.body.skip, req.body.limit, (err, data) => {
if (err) {
return next({
status: 400,
error: err
});
}
res.json(data);
});
};
I guess you are using mongoose find in findItems method. find() returns mongoose documents with quite lot of stuff in it.
What you can do is use find().lean().
With lean(), simple plain javascript object of data from mongodb will be returned. It will optimize the response time a lot.
But I also suggest you to read this for more query optimizations at mongodb level: https://docs.mongodb.com/manual/core/query-optimization/
Are you using any ORM ? Like sequelize ?
What's your structure of document?
Check your mongdb logs:
tail -f /var/log/mongodb/mongod.log
If your document is straight forward then you should use indexing.
If you have index then try to rebuild them.
Sorting works efficiently when you have applied indexing. Avoid sorting on server if you can,Sort data at client side.
Because database imposes a 32MB memory limit on sorting operations
I'm using nodejs and mongodb. I'm trying to build a query and execute it so that I can return a json object through this api. When I do the following I get the error:
TypeError: Cannot read property 'exec' of undefined
How should I be setting this up differently.
var mongodb = require('mongodb');
var db = new mongodb.Db('MyDD', new mongodb.Server('localhost', 27017, {}));
exports.findAll = function(req, res) {
const limit = parseInt(req.query.limit);
let query = GetTransactionList(limit)
query.exec(function(err,items){
if (err){
console.log(err);
} else {
res.json(items);
}
})
}
function GetTransactionList(limit){
console.log("GetTransactionList entered with limit: ", limit)
let query;
if (limit){
query = db.collection('transtest').find({},{limit:limit}).sort({postdate: -1})
} else {
query = db.collection('transtest').find()
}
return query
}
you should first import or create GetTransactionList scheme.
but in your example it is not a scheme and does not implement method exec.
its a plain function.
The exec is a helper method of Mongoose defined in mongoose not in mongodb, so in mongodb the find() method return a cursor to the selected documents and you can use toArray() method to returns an array of the documents from the cursor.
Simple Example
db.collection('restaurants').find().toArray(function(err, documents){
documents.forEach(function(doc){
console.log(doc.name);
});
});
In your case
query.toArray(function(err, items){
//assert.equal(err, null);
items.forEach(function(item){
console.log(item);
});
res.json(items);
})
I hope this could help you
Because the find() method returns a cursor, you may want to append the toArray() method of the cursor to return a Promise instead when no callback is passed.
Consider rewriting your code as follows:
exports.findAll = function(req, res) {
const query = GetTransactionList(parseInt(req.query.limit));
query.then(res.json).catch(console.log);
/* Above is equivalent to
query.then(function(items) {
res.json(items);
}).catch(function(err) {
console.log(err);
});
*/
}
function GetTransactionList(limit){
console.log("GetTransactionList entered with limit: ", limit);
const cursor = db.collection('transtest').find({});
return limit ? cursor.sort([['postdate', -1]]).limit(limit).toArray() : cursor.toArray();
}
In Mongoose, I need to find elements in a collection and count them, and getting both the results of the find and count. I have tried
Model.find().count(function (err, count) {
// Get count, but cannot get results of find
});
Is there a way to get both find() and count() without calling them twice?
You can use the length of the returned array:
Model.find().exec(function (err, results) {
var count = results.length
});
You have to do 2 separate queries unfortunately. Festo's answer only works if you have less elements in the database than the limit.
var countQuery = Model.count();
var findQuery = Model.find().limit(2);
countQuery.exec(function (e, count) {
console.log('count', count); // can be more than 2, this is not calculated, mongo stores this value internally
})
findQuery.exec(function(e, data) {
console.log('found items', data); // will be 2 or less elements
});
As stated in the mongoose documentation and in the answer by Benjamin, the method Model.count() is deprecated. Instead of using count(), the alternatives are the following:
SomeModel.countDocuments({}, function(err, count) {
if (err) { return handleError(err) } //handle possible errors
console.log(count)
//and do some other fancy stuff
})
or
SomeModel
.estimatedDocumentCount()
.then(count => {
console.log(count)
//and do one super neat trick
})
.catch(err => {
//handle possible errors
})
You can also use mongoose-paginate plugin.
For example:
Model.paginate({}, { offset: 100, limit: 0 }).then(function(result) {
// result.docs - Array of documents
// result.total - Total number of documents in collection that match a query
// result.limit - 0
// result.offset - 100
});
DeprecationWarning: collection.count is deprecated, and will be removed in a future version. Use Collection.countDocuments or Collection.estimatedDocumentCount instead.
Hope this update helps someone.
Example :
var user = await User.find().countDocuments()
Just a better way to write
try{
let result = await Model.find();
console.log(result); //result
console.log(result.length); //count
} catch(err){
//error
}
I have a huge collection of documents in my DB and I'm wondering how can I run through all the documents and update them, each document with a different value.
The answer depends on the driver you're using. All MongoDB drivers I know have cursor.forEach() implemented one way or another.
Here are some examples:
node-mongodb-native
collection.find(query).forEach(function(doc) {
// handle
}, function(err) {
// done or error
});
mongojs
db.collection.find(query).forEach(function(err, doc) {
// handle
});
monk
collection.find(query, { stream: true })
.each(function(doc){
// handle doc
})
.error(function(err){
// handle error
})
.success(function(){
// final callback
});
mongoose
collection.find(query).stream()
.on('data', function(doc){
// handle doc
})
.on('error', function(err){
// handle error
})
.on('end', function(){
// final callback
});
Updating documents inside of .forEach callback
The only problem with updating documents inside of .forEach callback is that you have no idea when all documents are updated.
To solve this problem you should use some asynchronous control flow solution. Here are some options:
async
promises (when.js, bluebird)
Here is an example of using async, using its queue feature:
var q = async.queue(function (doc, callback) {
// code for your update
collection.update({
_id: doc._id
}, {
$set: {hi: 'there'}
}, {
w: 1
}, callback);
}, Infinity);
var cursor = collection.find(query);
cursor.each(function(err, doc) {
if (err) throw err;
if (doc) q.push(doc); // dispatching doc to async.queue
});
q.drain = function() {
if (cursor.isClosed()) {
console.log('all items have been processed');
db.close();
}
}
Using the mongodb driver, and modern NodeJS with async/await, a good solution is to use next():
const collection = db.collection('things')
const cursor = collection.find({
bla: 42 // find all things where bla is 42
});
let document;
while ((document = await cursor.next())) {
await collection.findOneAndUpdate({
_id: document._id
}, {
$set: {
blu: 43
}
});
}
This results in only one document at a time being required in memory, as opposed to e.g. the accepted answer, where many documents get sucked into memory, before processing of the documents starts. In cases of "huge collections" (as per the question) this may be important.
If documents are large, this can be improved further by using a projection, so that only those fields of documents that are required are fetched from the database.
var MongoClient = require('mongodb').MongoClient,
assert = require('assert');
MongoClient.connect('mongodb://localhost:27017/crunchbase', function(err, db) {
assert.equal(err, null);
console.log("Successfully connected to MongoDB.");
var query = {
"category_code": "biotech"
};
db.collection('companies').find(query).toArray(function(err, docs) {
assert.equal(err, null);
assert.notEqual(docs.length, 0);
docs.forEach(function(doc) {
console.log(doc.name + " is a " + doc.category_code + " company.");
});
db.close();
});
});
Notice that the call .toArray is making the application to fetch the entire dataset.
var MongoClient = require('mongodb').MongoClient,
assert = require('assert');
MongoClient.connect('mongodb://localhost:27017/crunchbase', function(err, db) {
assert.equal(err, null);
console.log("Successfully connected to MongoDB.");
var query = {
"category_code": "biotech"
};
var cursor = db.collection('companies').find(query);
function(doc) {
cursor.forEach(
console.log(doc.name + " is a " + doc.category_code + " company.");
},
function(err) {
assert.equal(err, null);
return db.close();
}
);
});
Notice that the cursor returned by the find() is assigned to var cursor. With this approach, instead of fetching all data in memory and consuming data at once, we're streaming the data to our application. find() can create a cursor immediately because it doesn't actually make a request to the database until we try to use some of the documents it will provide. The point of cursor is to describe our query. The 2nd parameter to cursor.forEach shows what to do when the driver gets exhausted or an error occurs.
In the initial version of the above code, it was toArray() which forced the database call. It meant we needed ALL the documents and wanted them to be in an array.
Also, MongoDB returns data in batch format. The image below shows, requests from cursors (from application) to MongoDB
forEach is better than toArray because we can process documents as they come in until we reach the end. Contrast it with toArray - where we wait for ALL the documents to be retrieved and the entire array is built. This means we're not getting any advantage from the fact that the driver and the database system are working together to batch results to your application. Batching is meant to provide efficiency in terms of memory overhead and the execution time. Take advantage of it, if you can in your application.
None of the previous answers mentions batching the updates. That makes them extremely slow 🐌 - tens or hundreds of times slower than a solution using bulkWrite.
Let's say you want to double the value of a field in each document. Here's how to do that fast 💨 and with fixed memory consumption:
// Double the value of the 'foo' field in all documents
let bulkWrites = [];
const bulkDocumentsSize = 100; // how many documents to write at once
let i = 0;
db.collection.find({ ... }).forEach(doc => {
i++;
// Update the document...
doc.foo = doc.foo * 2;
// Add the update to an array of bulk operations to execute later
bulkWrites.push({
replaceOne: {
filter: { _id: doc._id },
replacement: doc,
},
});
// Update the documents and log progress every `bulkDocumentsSize` documents
if (i % bulkDocumentsSize === 0) {
db.collection.bulkWrite(bulkWrites);
bulkWrites = [];
print(`Updated ${i} documents`);
}
});
// Flush the last <100 bulk writes
db.collection.bulkWrite(bulkWrites);
And here is an example of using a Mongoose cursor async with promises:
new Promise(function (resolve, reject) {
collection.find(query).cursor()
.on('data', function(doc) {
// ...
})
.on('error', reject)
.on('end', resolve);
})
.then(function () {
// ...
});
Reference:
Mongoose cursors
Streams and promises
Leonid's answer is great, but I want to reinforce the importance of using async/promises and to give a different solution with a promises example.
The simplest solution to this problem is to loop forEach document and call an update. Usually, you don't need close the db connection after each request, but if you do need to close the connection, be careful. You must just close it if you are sure that all updates have finished executing.
A common mistake here is to call db.close() after all updates are dispatched without knowing if they have completed. If you do that, you'll get errors.
Wrong implementation:
collection.find(query).each(function(err, doc) {
if (err) throw err;
if (doc) {
collection.update(query, update, function(err, updated) {
// handle
});
}
else {
db.close(); // if there is any pending update, it will throw an error there
}
});
However, as db.close() is also an async operation (its signature have a callback option) you may be lucky and this code can finish without errors. It may work only when you need to update just a few docs in a small collection (so, don't try).
Correct solution:
As a solution with async was already proposed by Leonid, below follows a solution using Q promises.
var Q = require('q');
var client = require('mongodb').MongoClient;
var url = 'mongodb://localhost:27017/test';
client.connect(url, function(err, db) {
if (err) throw err;
var promises = [];
var query = {}; // select all docs
var collection = db.collection('demo');
var cursor = collection.find(query);
// read all docs
cursor.each(function(err, doc) {
if (err) throw err;
if (doc) {
// create a promise to update the doc
var query = doc;
var update = { $set: {hi: 'there'} };
var promise =
Q.npost(collection, 'update', [query, update])
.then(function(updated){
console.log('Updated: ' + updated);
});
promises.push(promise);
} else {
// close the connection after executing all promises
Q.all(promises)
.then(function() {
if (cursor.isClosed()) {
console.log('all items have been processed');
db.close();
}
})
.fail(console.error);
}
});
});
The node-mongodb-native now supports a endCallback parameter to cursor.forEach as for one to handle the event AFTER the whole iteration, refer to the official document for details http://mongodb.github.io/node-mongodb-native/2.2/api/Cursor.html#forEach.
Also note that .each is deprecated in the nodejs native driver now.
You can now use (in an async function, of course):
for await (let doc of collection.find(query)) {
await updateDoc(doc);
}
// all done
which nicely serializes all updates.
let's assume that we have the below MongoDB data in place.
Database name: users
Collection name: jobs
===========================
Documents
{ "_id" : ObjectId("1"), "job" : "Security", "name" : "Jack", "age" : 35 }
{ "_id" : ObjectId("2"), "job" : "Development", "name" : "Tito" }
{ "_id" : ObjectId("3"), "job" : "Design", "name" : "Ben", "age" : 45}
{ "_id" : ObjectId("4"), "job" : "Programming", "name" : "John", "age" : 25 }
{ "_id" : ObjectId("5"), "job" : "IT", "name" : "ricko", "age" : 45 }
==========================
This code:
var MongoClient = require('mongodb').MongoClient;
var dbURL = 'mongodb://localhost/users';
MongoClient.connect(dbURL, (err, db) => {
if (err) {
throw err;
} else {
console.log('Connection successful');
var dataBase = db.db();
// loop forEach
dataBase.collection('jobs').find().forEach(function(myDoc){
console.log('There is a job called :'+ myDoc.job +'in Database')})
});
I looked for a solution with good performance and I end up creating a mix of what I found which I think works good:
/**
* This method will read the documents from the cursor in batches and invoke the callback
* for each batch in parallel.
* IT IS VERY RECOMMENDED TO CREATE THE CURSOR TO AN OPTION OF BATCH SIZE THAT WILL MATCH
* THE VALUE OF batchSize. This way the performance benefits are maxed out since
* the mongo instance will send into our process memory the same number of documents
* that we handle in concurrent each time, so no memory space is wasted
* and also the memory usage is limited.
*
* Example of usage:
* const cursor = await collection.aggregate([
{...}, ...],
{
cursor: {batchSize: BATCH_SIZE} // Limiting memory use
});
DbUtil.concurrentCursorBatchProcessing(cursor, BATCH_SIZE, async (doc) => ...)
* #param cursor - A cursor to batch process on.
* We can get this from our collection.js API by either using aggregateCursor/findCursor
* #param batchSize - The batch size, should match the batchSize of the cursor option.
* #param callback - Callback that should be async, will be called in parallel for each batch.
* #return {Promise<void>}
*/
static async concurrentCursorBatchProcessing(cursor, batchSize, callback) {
let doc;
const docsBatch = [];
while ((doc = await cursor.next())) {
docsBatch.push(doc);
if (docsBatch.length >= batchSize) {
await PromiseUtils.concurrentPromiseAll(docsBatch, async (currDoc) => {
return callback(currDoc);
});
// Emptying the batch array
docsBatch.splice(0, docsBatch.length);
}
}
// Checking if there is a last batch remaining since it was small than batchSize
if (docsBatch.length > 0) {
await PromiseUtils.concurrentPromiseAll(docsBatch, async (currDoc) => {
return callback(currDoc);
});
}
}
An example of usage for reading many big documents and updating them:
const cursor = await collection.aggregate([
{
...
}
], {
cursor: {batchSize: BATCH_SIZE}, // Limiting memory use
allowDiskUse: true
});
const bulkUpdates = [];
await DbUtil.concurrentCursorBatchProcessing(cursor, BATCH_SIZE, async (doc: any) => {
const update: any = {
updateOne: {
filter: {
...
},
update: {
...
}
}
};
bulkUpdates.push(update);
// Updating if we read too many docs to clear space in memory
await this.bulkWriteIfNeeded(bulkUpdates, collection);
});
// Making sure we updated everything
await this.bulkWriteIfNeeded(bulkUpdates, collection, true);
...
private async bulkWriteParametersIfNeeded(
bulkUpdates: any[], collection: any,
forceUpdate = false, flushBatchSize) {
if (bulkUpdates.length >= flushBatchSize || forceUpdate) {
// concurrentPromiseChunked is a method that loops over an array in a concurrent way using lodash.chunk and Promise.map
await PromiseUtils.concurrentPromiseChunked(bulkUpsertParameters, (upsertChunk: any) => {
return techniquesParametersCollection.bulkWrite(upsertChunk);
});
// Emptying the array
bulkUpsertParameters.splice(0, bulkUpsertParameters.length);
}
}