Limiting Number of Collection Records in Loopback Model - node.js

What I need to do is limit a particular collection (in my case logs) to 100 records. That is, when there are 100 logs and a new one is added, the oldest one is destroyed.
I know I can do this in mongo by setting the capped/size/max values, but I also want to have some code for more advanced query filters down the road.
I'm looking to introduce this as an Operation Hook (http://docs.strongloop.com/display/public/LB/Operation+hooks#Operationhooks-access), but I can't figure out how to query the Model in question and remove the last record if a threshold has been exceeded. Right now I just setup an Access hook that's just checks that the threshold has not been met, if it has it will delete the last record. This would ultimately be done on the "before create" hook, but doing it this way is easier for testing.
Here's some Pseudocode (common/models/log.js):
module.exports = function (Log) {
Log.observe('access', function logQuery (ctx, next) {
var threshold = 10;
var logs = Log.find({});
if (logs.length > threshold) {
logs[logs.length].delete // delete last record
}
next();
});
};
This obviously doesn't work, just hoping it gives a clue to what I'm trying to do.
Thanks.

I think what you're looking for is actually a remote hook and then the PersistedModel's count() method:
module.exports = function(Log) {
Log.afterRemote('create', function accessCount(ctx, instance, next) {
console.log('AFTER CREATE of a new Log');
Log.count(function(err, count) {
if (err) {
// let this one go maybe? if not, call: next(err);
}
console.log('There are now ' + count + ' Log records.');
// do whatever you need to here
next();
});
});
};

Related

Using node.js and promise to fetch paginated data

Please keep in mind that I am new to node.js and I am used with android development.
My scenario is like this:
Run a query against the database that returns either null or a value
Call a web service with that database value, that offers info paginated, meaning that on a call I get a parameter to pass for the next call if there is more info to fetch.
After all the items are retrieved, store them in a database table
If everything is well, for each item received previously, I need to make another web call and store the retrieved info in another table
if fetching any of the data set fails, all data must be reverted from the database
So far, I've tried this:
getAllData: function(){
self.getMainWebData(null)
.then(function(result){
//get secondary data for each result row and insert it into database
}
}
getMainWebData: function(nextPage){
return new Promise(function(resolve, reject) {
module.getWebData(nextPage, function(errorReturned, response, values) {
if (errorReturned) {
reject(errorReturned);
}
nextPage = response.nextPageValue;
resolve(values);
})
}).then(function(result) {
//here I need to insert the returned values in database
//there's a new page, so fetch the next set of data
if (nextPage) {
//call again getMainWebData?
self.getMainWebData(nextPage)
}
})
There are a few things missing, from what I've tested, getAllData.then fires only one for the first set of items and not for others, so clearly handling the returned data in not right.
LATER EDIT: I've edited the scenario. Given some more research my feeling is that I could use a chain or .then() to perform the operations in a sequence.
Yes it is happening as you are resolving the promise on the first call itself. You should put resolve(value) inside an if statement which checks if more data is needed to be fetched. You will also need to restructure the logic as node is asynchronous. And the above code will not work unless you do change the logic.
Solution 1:
You can either append the paginated response to another variable outside the context of the calls you are making. And later use that value after you are done with the response.
getAllData: function(){
self.getMainWebData(null)
.then(function(result){
// make your database transaction if result is not an error
}
}
function getList(nextpage, result, callback){
module.getWebData(nextPage, function(errorReturned, response, values) {
if(errorReturned)
callback(errorReturned);
result.push(values);
nextPage = response.nextPageValue;
if(nextPage)
getList(nextPage, result, callback);
else
callback(null, result);
})
}
getMainWebData: function(nextPage){
return new Promise(function(resolve, reject) {
var result = [];
getList(nextpage, result, function(err, results){
if(err)
reject(err);
else{
// Here all the items are retrieved, you can store them in a database table
// for each item received make your web call and store it into another variable or result set
// suggestion is to make the database transaction only after you have retrieved all your data
// other wise it will include database rollback which will depend on the database which you are using
// after all this is done resolve the promise with the returning value
resolve(results);
}
});
})
}
I have not tested it but something like this should work. If problem persists let me know in comments.
Solution 2:
You can remove promises and try the same thing with callback as they are easier to follow and will make sense to the programmers who are familiar with structural languages.
Looking at your problem, I have created a code that would loop through promises.
and would only procede if there is more data to be fetched, the stored data would still be available in an array.
I hope this help. Dont forget to mark if it helps.
let fetchData = (offset = 0, limit= 10) => {
let addresses = [...Array(100).keys()];
return Promise.resolve(addresses.slice(offset, offset + limit))
}
// o => offset & l => limit
let o = 0, l = 10;
let results = [];
let process = p => {
if (!p) return p;
return p.then(data => {
// Process with data here;
console.log(data);
// increment the pagination
o += l;
results = results.concat(data);
// while there is data equal to limit set then fetch next page
// otherwise return the collected result
return (data.length == l)? process(fetchAddress(o, l)).then(data => data) : results;
})
}
process(fetchAddress(o, l))
.then(data => {
// All the fetched data will be here
}).catch(err => {
// Handle Error here.
// All the retrieved data from database will be available in "results" array
});
if You want to do it more often I have also created a gist for reference.
If You dont want to use any global variable, and want to do it in very functional way. You can check this example. However it requires little more complication.

Google Cloud Datastore, how to query for more results

Straight and simple, I have the following function, using Google Cloud Datastore Node.js API:
fetchAll(query, result=[], queryCursor=null) {
this.debug(`datastoreService.fetchAll, queryCursor=${queryCursor}`);
if (queryCursor !== null) {
query.start(queryCursor);
}
return this.datastore.runQuery(query)
.then( (results) => {
result=result.concat(results[0]);
if (results[1].moreResults === _datastore.NO_MORE_RESULTS) {
return result;
} else {
this.debug(`results[1] = `, results[1]);
this.debug(`fetch next with queryCursor=${results[1].endCursor}`);
return this.fetchAll(query, result, results[1].endCursor);
}
});
}
The Datastore API object is in the variable this.datastore;
The goal of this function is to fetch all results for a given query, notwithstanding any limits on the number of items returned per single runQuery call.
I have not yet found out about any definite hard limits imposed by the Datastore API on this, and the documentation seems somewhat opaque on this point, but I only noticed that I always get
results[1] = { moreResults: 'MORE_RESULTS_AFTER_LIMIT' },
indicating that there are still more results to be fetched, and the results[1].endCursor remains stuck on constant value that is passed on again on each iteration.
So, given some simple query that I plug into this function, I just go on running the query iteratively, setting the query start cursor (by doing query.start(queryCursor);) to the endCursor obtained in the result of the previous query. And my hope is, obviously, to obtain the next bunch of results on each successive query in this iteration. But I always get the same value for results[1].endCursor. My question is: Why?
Conceptually, I cannot see a difference to this example given in the Google Documentation:
// By default, google-cloud-node will automatically paginate through all of
// the results that match a query. However, this sample implements manual
// pagination using limits and cursor tokens.
function runPageQuery (pageCursor) {
let query = datastore.createQuery('Task')
.limit(pageSize);
if (pageCursor) {
query = query.start(pageCursor);
}
return datastore.runQuery(query)
.then((results) => {
const entities = results[0];
const info = results[1];
if (info.moreResults !== Datastore.NO_MORE_RESULTS) {
// If there are more results to retrieve, the end cursor is
// automatically set on `info`. To get this value directly, access
// the `endCursor` property.
return runPageQuery(info.endCursor)
.then((results) => {
// Concatenate entities
results[0] = entities.concat(results[0]);
return results;
});
}
return [entities, info];
});
}
(except for the fact, that I don't specify a limit on the size of the query result by myself, which I have also tried, by setting it to 1000, which does not change anything.)
Why does my code run into this infinite loop, stuck on each step at the same "endCursor"? And how do I correct this?
Also, what is the hard limit on the number of results obtained per call of datastore.runQuery()? I have not found this information in the Google Datastore documentation thus far.
Thanks.
Looking at the API documentation for the Node.js client library for Datastore there is a section on that page titled "Paginating Records" that may help you. Here's a direct copy of the code snippet from the section:
var express = require('express');
var app = express();
var NUM_RESULTS_PER_PAGE = 15;
app.get('/contacts', function(req, res) {
var query = datastore.createQuery('Contacts')
.limit(NUM_RESULTS_PER_PAGE);
if (req.query.nextPageCursor) {
query.start(req.query.nextPageCursor);
}
datastore.runQuery(query, function(err, entities, info) {
if (err) {
// Error handling omitted.
return;
}
// Respond to the front end with the contacts and the cursoring token
// from the query we just ran.
var frontEndResponse = {
contacts: entities
};
// Check if more results may exist.
if (info.moreResults !== datastore.NO_MORE_RESULTS) {
frontEndResponse.nextPageCursor = info.endCursor;
}
res.render('contacts', frontEndResponse);
});
});
Maybe you can try using one of the other syntax options (instead of Promises). The runQuery method can take a callback function as an argument, and that callback's parameters include explicit references to the entities array and the info object (which has the endCursor as a property).
And there are limits and quotas imposed on calls to the Datastore API as well. Here are links to official documentation that address them in detail:
Limits
Quotas

How can I cancel MongoDB query from .each callback

I implemented a little NodeJs web server that stores log entries and provides a backend for a web based log browser. The web interface provides also an "Export to CVS" function and lets user download the logs in CVS format. My code looks similar to this:
this.log_entries(function(err, collection) {
collection.find(query)
.sort({_id: 1})
.each(function (err, doc) {
if(doc){
WriteLineToCSVFile(doc);
}
else {
ZipCSVFileAndSendIt();
}
});
});
The export operation may take a significant amount of time and disk space in case if a user didn't specify the right filters for the query. I need to implement a fail safe mechanism preventing this. One important requirement is that user should be able to abort the ongoing export operation at any point in time. Currently my solution is that I stop writing the data to the CSV file, however the callback passed to the .each() still gets called. I could not find any information how to stop the each loop. So the question is how can I do this?
UPDATE, THE ANSWER:
Use cursor.nextObject()
For the correct answer see the comments by #dbra below: db.currentOp() and db.killOp() doesn't work for this case.
The final solution looks like this:
this.log_entries(function(err, collection) {
var cursor = collection.find(query);
cursor.sort("_id", 1, function(err, sorted) {
function exportFinished(aborted) {
...
}
function processItem(err, doc) {
if(doc === null ) {
exportFinished(false);
}
else if( abortCurrentExport ) {
exportFinished(true);
}
else {
var line = formatCSV(doc);
WriteFile(line);
process.nextTick(function(){
sorted.nextObject(processItem);
});
}
}
sorted.nextObject(processItem);
});
});
Note the usage of process.nextTick - without it there will be a stack overflow!
You could search the running query with db.currentOp and then kill it with db.killOp, but il would be a nasty solution.
A better way could be working with limited subsequent batches; the easier way would be a simple pagination with "limit" and "skip", but it depends on how your collection changes while you read it.

Nodejs behaviour

I have been working on nodeJS + MongoDB, using the Express and Mongoose frameworks for a few months, and I wanted to ask you guys what is really happening in a situation such as the following:
Model1.find({}, function (err, elems) {
if (err) {
console.log('ERROR');
} else {
elems.forEach(function (el) {
Model2.find({[QUERY RELATED WITH FIELDS IN 'el']}, function (err, elems2) {
if (err) {
console.log('ERROR');
} else {
//DO STAFF.
}
});
});
}
});
My best guess is that there's a main thread looping over elems, and then different threads attending each query over Model2, but I'm not really sure.
Is that correct? And also, is this a good solution? And if not, how would you code in a situation such as this, where you need the information in each of the elements you get from Model1 to get elements from Model2, and perform the actual functionality you are looking for?
I know I could elaborate a more complex query where I could get all the elements each of the 'el' in elems would yield, but I¡d rather not do that, because in that case i would be worried about the memory expense.
Also, I've been thinking about changing the data model, but I've gone over it and I'm confident it is well thought, and I don't think that's the best solution for my aplication.
Thanks!
NodeJS is a single threaded environment and it works asynchronously for blocking function calls such as network requests in your case. So there is only one thread and your query results will be called asynchronously so that nothing will be blocked due to intensive network operation.
In your scenario if the first query returns quite a lot of records such as 100000 thousands you may exhaust your mongo server in your loop as you will query your server as many as the result of first query instantly. This will happen because node won't stop for receiving the results of each query as it works asynchronously.
So usually manually throttling the requests to network operations is a good practice. This is not trivial when working on asynchronous environment. One way to do is to use recursive function call. Basically you split your tasks into groups and do each group in batch, once you are done with one batch you start with your next group.
Here is a simple example on how to do it, I have used promises instead of callback functions, Q is a promise library that is very useful for handling promises:
var rows = [...]; // array of many
function handleRecursively(startIndex, batchSize){
var promises = [];
for(i = 0; i < batchSize && i + batchSize < rows.length; i++){
var theRow = rows[startIndex + i];
promises.push(doAsynchronousJobWithTheRow(theRow));
}
//you wait until you handle all tasks in this iteration
Q.all(promises).then(function(){
startIndex += batchSize;
if(startIndex < rows.length){ // if there is still task to do continue with next batch
handleRecursively(startIndex, batchSize); }
})
}
handleRecursively(0, 1000);
Here is the best solution :
Model1.find({}, function (err, elems) {
if (err) {
console.log('ERROR');
} else {
loopAllElements(0,elems);
}
});
function loopAllElements(startIndex,elems){
if (startIndex==elems.length) {
return "success";
}else{
Model2.find({[QUERY RELATED WITH FIELDS IN elems[startIndex] ]}, function (err, elems2) {
if (err) {
console.log('ERROR');
return "error";
} else {
//DO STAFF.
loopAllElements(startIndex+1, elems);
}
});
}
}

how to make this function async in node.js

Here is the situation:
I am new to node.js, I have a 40MB file containing multilevel json file like:
[{},{},{}] This is an array of objects (~7000 objects). Each object has properties and a one of those properties is also an array of objects
I wrote a function to read the content of the file and iterate it. I succeeded to get what I wanted in terms of content but not usability. I thought that I wrote an async function that would allow node to serve other web requests while iterating the array but that is not the case. I would be very thankful if anyone can point me to what I've done wrong and how to rewrite it so I can have a non-blocking iteration. Here's the function that handles the situation:
function getContents(callback) {
fs.readFile(file, 'utf8', function (err, data) {
if (err) {
console.log('Error: ' + err);
return;
}
js = JSON.parse(data);
callback();
return;
});
}
getContents(iterateGlobalArr);
var count = 0;
function iterateGlobalArr() {
if (count < js.length) {
innerArr = js.nestedProp;
//iterate nutrients
innerArr.forEach(function(e, index) {
//some simple if condition here
});
var schema = {
//.....get props from forEach iteration
}
Model.create(schema, function(err, post) {
if(err) {
console.log('\ncreation error\n', err);
return;
}
if (!post) {
console.log('\nfailed to create post for schema:\n' + schema);
return;
}
});
count++;
process.nextTick(iterateGlobalArr);
}
else {
console.log("\nIteration finished");
next();
}
Just so it is clear how I've tested the above situation. I open two tabs one loading this iteration which takes some time and second with another node route which does not load until the iteration is over. So essentially I've written a blocking code but not sure how to re-factor it! I suspect that just because everything is happening in the callback I am unable to release the event loop to handle another request...
Your code is almost correct. What you are doing is inadvertently adding ALL the items to the very next tick... which still blocks.
The important piece of code is here:
Model.create(schema, function(err, post) {
if(err) {
console.log('\ncreation error\n', err);
return;
}
if (!post) {
console.log('\nfailed to create post for schema:\n' + schema);
return;
}
});
// add EVERYTHING to the very same next tick!
count++;
process.nextTick(iterateGlobalArr);
Let's say you are in tick A of the event loop when getContents() runs and count is 0. You enter iterateGlobalArr and you call Model.create. Because Model.create is async, it is returning immediately, causing process.nextTick() to add processing of item 1 to the next tick, let's say B. Then it calls iterateGlobalArr, which does the same thing, adding item 2 to the next tick, which is still B. Then item 3, and so on.
What you need to do is move the count increment and process.nextTick() into the callback of Model.create(). This will make sure the current item is processed before nextTick is invoked... which means next item is actually added to the next tick AFTER the model item has been created... which will give your app time to handle other things in between. The fixed version of iterateGlobalArr is here:
function iterateGlobalArr() {
if (count < js.length) {
innerArr = js.nestedProp;
//iterate nutrients
innerArr.forEach(function(e, index) {
//some simple if condition here
});
var schema = {
//.....get props from forEach iteration
}
Model.create(schema, function(err, post) {
// schedule our next item to be processed immediately.
count++;
process.nextTick(iterateGlobalArr);
// then move on to handling this result.
if(err) {
console.log('\ncreation error\n', err);
return;
}
if (!post) {
console.log('\nfailed to create post for schema:\n' + schema);
return;
}
});
}
else {
console.log("\nIteration finished");
next();
}
}
Note also that I would strongly suggest that you pass in your js and counter with each call to iterageGlobalArr, as it will make your iterateGlobalArr alot easier to debug, among other things, but that's another story.
Cheers!
Node is single-threaded so async will only help you if you are relying on another system/subsystem to do the work (a shell script, external database, web service etc). If you have to do the work in Node you are going to block while you do it.
It is possible to create one node process per core. This solution would result in only blocking one of the node processes and leave the rest to service your requests, but this feature is still listed as experimental http://nodejs.org/api/cluster.html.
A single instance of Node runs in a single thread. To take advantage
of multi-core systems the user will sometimes want to launch a cluster
of Node processes to handle the load.
The cluster module allows you to easily create child processes that
all share server ports.

Resources