I have this code inside my FOXX play application
var geodata = new Geodata(
applicationContext.collection('geodata'),
{model: Geodatum}
);
/** Lists of all geodata.
*
* This function simply returns the list of all Geodatum.
*/
controller.get('/', function (req, res) {
var parameters = req.parameters;
var limit = parameters['limit'];
var result = geodata.all().limit(10);
if (limit != "undefined") {
result = result.slice(0, limit);
}
res.json(_.map(result, function (model) {
return model.forClient();
}));
});
According to the docs I should be able to use pagination here - I want to limit the search results by the given 'limit' parameter but this gives me an error
2016-05-16T14:17:58Z [6354] ERROR TypeError: geodata.all(...).limit is not a function
https://docs.arangodb.com/SimpleQueries/Pagination.html
The documentation refers to collections. You seem to be using a Foxx repository. Foxx repositories are wrappers around collections that provide most of the same methods but instead of returning plain documents (or cursors) they wrap the results in Foxx models.
In your case it looks like you probably don't want to use Foxx models at all (you're just converting them back to documents, likely just removing a few attributes like _rev and _id) so you could simply forego the repository completely and use the collection you're passing into it directly:
var geodata = applicationContext.collection('geodata');
/** Lists of all geodata.
*
* This function simply returns the list of all Geodatum.
*/
controller.get('/', function (req, res) {
var parameters = req.parameters;
var limit = parameters['limit'];
var result = geodata.all().limit(10);
if (limit != "undefined") {
result = result.slice(0, limit);
}
res.json(_.map(result, function (doc) {
return _.omit(doc, ['_id', '_rev']);
}));
});
You're not the first person to be confused by the distinction between repositories and collections, which is why repositories and models will go away in the upcoming 3.0 release (but you can still use them in legacy 2.8-compatible services if you need to).
Related
Straight and simple, I have the following function, using Google Cloud Datastore Node.js API:
fetchAll(query, result=[], queryCursor=null) {
this.debug(`datastoreService.fetchAll, queryCursor=${queryCursor}`);
if (queryCursor !== null) {
query.start(queryCursor);
}
return this.datastore.runQuery(query)
.then( (results) => {
result=result.concat(results[0]);
if (results[1].moreResults === _datastore.NO_MORE_RESULTS) {
return result;
} else {
this.debug(`results[1] = `, results[1]);
this.debug(`fetch next with queryCursor=${results[1].endCursor}`);
return this.fetchAll(query, result, results[1].endCursor);
}
});
}
The Datastore API object is in the variable this.datastore;
The goal of this function is to fetch all results for a given query, notwithstanding any limits on the number of items returned per single runQuery call.
I have not yet found out about any definite hard limits imposed by the Datastore API on this, and the documentation seems somewhat opaque on this point, but I only noticed that I always get
results[1] = { moreResults: 'MORE_RESULTS_AFTER_LIMIT' },
indicating that there are still more results to be fetched, and the results[1].endCursor remains stuck on constant value that is passed on again on each iteration.
So, given some simple query that I plug into this function, I just go on running the query iteratively, setting the query start cursor (by doing query.start(queryCursor);) to the endCursor obtained in the result of the previous query. And my hope is, obviously, to obtain the next bunch of results on each successive query in this iteration. But I always get the same value for results[1].endCursor. My question is: Why?
Conceptually, I cannot see a difference to this example given in the Google Documentation:
// By default, google-cloud-node will automatically paginate through all of
// the results that match a query. However, this sample implements manual
// pagination using limits and cursor tokens.
function runPageQuery (pageCursor) {
let query = datastore.createQuery('Task')
.limit(pageSize);
if (pageCursor) {
query = query.start(pageCursor);
}
return datastore.runQuery(query)
.then((results) => {
const entities = results[0];
const info = results[1];
if (info.moreResults !== Datastore.NO_MORE_RESULTS) {
// If there are more results to retrieve, the end cursor is
// automatically set on `info`. To get this value directly, access
// the `endCursor` property.
return runPageQuery(info.endCursor)
.then((results) => {
// Concatenate entities
results[0] = entities.concat(results[0]);
return results;
});
}
return [entities, info];
});
}
(except for the fact, that I don't specify a limit on the size of the query result by myself, which I have also tried, by setting it to 1000, which does not change anything.)
Why does my code run into this infinite loop, stuck on each step at the same "endCursor"? And how do I correct this?
Also, what is the hard limit on the number of results obtained per call of datastore.runQuery()? I have not found this information in the Google Datastore documentation thus far.
Thanks.
Looking at the API documentation for the Node.js client library for Datastore there is a section on that page titled "Paginating Records" that may help you. Here's a direct copy of the code snippet from the section:
var express = require('express');
var app = express();
var NUM_RESULTS_PER_PAGE = 15;
app.get('/contacts', function(req, res) {
var query = datastore.createQuery('Contacts')
.limit(NUM_RESULTS_PER_PAGE);
if (req.query.nextPageCursor) {
query.start(req.query.nextPageCursor);
}
datastore.runQuery(query, function(err, entities, info) {
if (err) {
// Error handling omitted.
return;
}
// Respond to the front end with the contacts and the cursoring token
// from the query we just ran.
var frontEndResponse = {
contacts: entities
};
// Check if more results may exist.
if (info.moreResults !== datastore.NO_MORE_RESULTS) {
frontEndResponse.nextPageCursor = info.endCursor;
}
res.render('contacts', frontEndResponse);
});
});
Maybe you can try using one of the other syntax options (instead of Promises). The runQuery method can take a callback function as an argument, and that callback's parameters include explicit references to the entities array and the info object (which has the endCursor as a property).
And there are limits and quotas imposed on calls to the Datastore API as well. Here are links to official documentation that address them in detail:
Limits
Quotas
Is it possible to process a db.model.find() query inside of function context and retrieve a result without using callbacks and promises with mongoose library?
I need to get assured, if some user exists in process of running controller, so, I can't minimize current scope to callback due to large amount of same operations (for example, communication with database). Also I'm trying to realize MVC model in my project, so, I want to keep the helper libs (modules) in separated files. That's why I don't want to use any callbacks or promises - they will much times complicate everything even more then things already do.
For example, how should I rewrite the following code to be executed successfully (if it's actually possible) (you can ignore login model and controller - they are written to represent complicacy if to rewrite that code using callbacks):
user.js lib
var db = require('./lib/db');
class User{
constructor(id){ //get user by id
var result = db.models.user.findOne({_id: id}); //unsupported syntax in real :(
if(!result || result._id != _id)
return false;
else{
this.userInfo = result;
return result;
}
}
}
module.exports = User;
login model
var user = require('./lib/user')
var model = {};
model.checkUserLogged(function(req){
if(!req.user.id || req.user.id == undefined)
return false;
if(!(this.user = new user(req.user.id)))
return false;
else
return true;
});
module.exports = model;
login controller
var proxy = require('express').router();
proxy.all('/login', function(req, res){
var model = require('./models/login');
if(!model.checkUserLogged()){
console.log('User is not logged in!');
res.render('unlogged', model);
}else{
console.log('User exists in database!');
res.render('logged_in', model);
}
});
Generator functions/yields, async/await (es2017), and everything et cetera can be used just to solve the problem without nesting.
Thx in advance.
There are two points wrong:
Mongoose methods can't be called synchronously (Anyway a call to a DB done synchronously is not a good idea at all).
Nor async/await nor generators can be used in the constructor of an ES6 Class. It is explained in this answer.
If you don't want nested code an easy option could be to use async/await (currently available in Node.js using a flag, not recommended for production). Since Mongoose methods return promises they can be used with async/await.
But as I said you can not do that in the constructor, so it has to be somewhere else.
As an example you could do something like this:
var proxy = require('express').router();
var db = require('./lib/db');
proxy.all('/login', async function(req, res){
const result = await db.models.user.findOne({_id: req.user.id}).exec();
if (!result) {
console.log('User is not logged in!');
return res.render('unlogged');
}
res.render('logged_in');
});
Old question, but I want to share a method for handling this that I didn't see in my first couple searches.
I want to get data from a model, run some logic and return the results from that logic. I need a promise wrapper around my call to the model.
Below is a slightly abstracted function that takes a model to run a mongoose/mongo query on, and a couple params to help it do some logic. It then returns the value that is expected in the promise or rejects.
export function promiseFunction(aField: string, aValue, model: Model<ADocument, {}>): Promise<aType> {
return new Promise<string>((resolve, reject) => {
model.findOne({[aField]: aValue}, (err, theDocument) => {
if(err){
reject(err.toString());
} else {
if(theDocument.someCheck === true){
return(theDocument.matchingTypeField)
} else {
reject("there was an error of some type")
}
}
});
})
}
I've drawn a simple flow chart, which basically crawls some data from internet and loads them into the database. So far, I had thought I was peaceful with promises, however now I have an issue that I'm working for at least three days without a simple step.
Here is the flow chart:
Consider there is a static string array like so: const courseCodes = ["ATA, "AKM", "BLG",... ].
I have a fetch function, it basically does a HTTP request followed by parsing. Afterwards it returns some object array.
fetch works perfectly with invoking its callback with that expected object array, it even worked with Promises, which was way greater and tidy.
fetch function should be invoked with every element in the courseCodes array as its parameter. This task should be performed in parallel execution, since those seperate fetch functions do not affect each other.
As a result, there should be a results array in callback (or Promises resolve parameter), which includes array of array of objects. With those results, I should invoke my loadCourse with those objects in the results array as its parameter. Those tasks should be performed in serial execution, because it basically queries database if similar object exists, adds it if it's not.
How can perform this kind of tasks in node.js? I could not maintain the asynchronous flow in such a scenario like this. I've failed with caolan/async library and bluebird & q promise libraries.
Try something like this, if you are able to understand this:
const courseCodes = ["ATA, "AKM", "BLG",... ]
//stores the tasks to be performed.
var parallelTasks = [];
var serialTasks = [];
//keeps track of courses fetched & results.
var courseFetchCount = 0;
var results = {};
//your fetch function.
fetch(course_code){
//your code to fetch & parse.
//store result for each course in results object
results[course_code] = 'whatever result comes from your fetch & parse code...';
}
//your load function.
function loadCourse(results) {
for(var index in results) {
var result = results[index]; //result for single course;
var task = (
function(result) {
return function() {
saveToDB(result);
}
}
)(result);
serialTasks.push(task);
}
//execute serial tasks for saving results to database or whatever.
var firstSerialTask = serialTasks.shift();
nextInSerial(null, firstSerialTask);
}
//pseudo function to save a result to database.
function saveToDB(result) {
//your code to store in db here.
}
//checks if fetch() is complete for all course codes in your array
//and then starts the serial tasks for saving results to database.
function CheckIfAllCoursesFetched() {
courseFetchCount++;
if(courseFetchCount == courseCodes.length) {
//now process courses serially
loadCourse(results);
}
}
//helper function that executes tasks in serial fashion.
function nextInSerial(err, result) {
if(err) throw Error(err.message);
var nextSerialTask = serialTasks.shift();
nextSerialTask(result);
}
//start executing parallel tasks for fetching.
for(var index in courseCode) {
var course_code = courseCode[index];
var task = (
function(course_code) {
return function() {
fetch(course_code);
CheckIfAllCoursesFetched();
}
}
)(course_code);
parallelTasks.push(task);
for(var task_index in parallelTasks) {
parallelTasks[task_index]();
}
}
Or you may refer to nimble npm module.
I have the following queries, which starts with the GetById method firing up, once that fires up and extracts data from another document, it saves into the race document.
I want to be able to cache the data after I save it for ten minutes. I have taken a look at cacheman library and not sure if it is the right tool for the job. what would be the best way to approach this ?
getById: function(opts,callback) {
var id = opts.action;
var raceData = { };
var self = this;
this.getService().findById(id,function(err,resp) {
if(err)
callback(null);
else {
raceData = resp;
self.getService().getPositions(id, function(err,positions) {
self.savePositions(positions,raceData,callback);
});
}
});
},
savePositions: function(positions,raceData,callback) {
var race = [];
_.each(positions,function(item) {
_.each(item.position,function(el) {
race.push(el);
});
});
raceData.positions = race;
this.getService().modelClass.update({'_id' : raceData._id },{ 'positions' : raceData.positions },callback(raceData));
}
I have recently coded and published a module called Monc. You could find the source code over here. You could find several useful methods to store, delete and retrieve data stored into the memory.
You may use it to cache Mongoose queries using simple nesting as
test.find({}).lean().cache().exec(function(err, docs) {
//docs are fetched into the cache.
});
Otherwise you may need to take a look at the core of Mongoose and override the prototype in order to provide a way to use cacheman as you original suggested.
Create a node module and force it to extend Mongoose as:
monc.hellocache(mongoose, {});
Inside your module you should extend the Mongoose.Query.prototype
exports.hellocache = module.exports.hellocache = function(mongoose, options, Aggregate) {
//require cacheman
var CachemanMemory = require('cacheman-memory');
var cache = new CachemanMemory();
var m = mongoose;
m.execAlter = function(caller, args) {
//do your stuff here
}
m.Query.prototype.exec = function(arg1, arg2) {
return m.execAlter.call(this, 'exec', arguments);
};
})
Take a look at Monc's source code as it may be a good reference on how you may extend and chain Mongoose methods
I will explain with npm redis package which stores key/value pairs in the cache server. keys are queries and redis stores only strings.
we have to make sure that keys are unique and consistent. So key value should store query and also name of the model that you are applying the query.
when you query, inside the mongoose library, there is
function Query(conditions, options, model, collection) {} //constructor function
responsible for query. inside this constructor,
Query.prototype.exec = function exec(op, callback) {}
this function is responsible executing the queries. so we have to manipulate this function and have it execute those tasks:
first check if we have any cached data related to the query
if yes respond to request right away and return
if no we need to respond to request and update our cache and then respond
const redis = require("client");
const redisUrl = "redis://127.0.0.1:6379";
const client = redis.createClient(redisUrl);
const util = require("util");
//client.get does not return promise
client.get = util.promisify(client.get);
const exec = mongoose.Query.prototype.exec;
//mongoose code is written using classical prototype inheritance for setting up objects and classes inside the library.
mongoose.Query.prototype.exec = async function() {
//crate a unique and consistent key
const key = JSON.stringify(
Object.assign({}, this.getQuery(), {
collection: this.mongooseCollection.name
})
);
//see if we have value for key in redis
const cachedValue = await redis.get(key);
//if we do return that as a mongoose model.
//the exec function expects us to return mongoose documents
if (cachedValue) {
const doc = JSON.parse(cacheValue);
return Array.isArray(doc)
? doc.map(d => new this.model(d))
: new this.model(doc);
}
const result = await exec.apply(this, arguments); //now exec function's original task.
client.set(key, JSON.stringify(result),"EX",6000);//it is saved to cache server make sure capital letters EX and time as seconds
};
if we store values as array of objects we need to make sure that each object is individullay converted to mongoose document.
this.model is a method inside the Query constructor and converts object to a mongoose document.
note that if you are storing nested values instead of client.get and client.set, use client.hset and client.hget
Now we monkey patched
Query.prototype.exec
so you do not need to export this function. wherever you have a query operation inside your code, mongoose will execute above code
Maybe there is not a definitive answer here but I would like to know where to handle data validation when dealing with express.js and mongoose. Which of the following is the best practice (I currently use a combination and it's starting to feel very clumsy):
the Model (mongoose)
the Controller / Route (express)
Some older posts I have read are:
this;
this;
and, this;
but conflicting answers just add to the confusion. Maybe it simply isn't clear cut, in which case is one a better option?
When using mongoose I would push most of my validation logic to the mongoose model/schema. You can use mongoose-validator which is just a wrapper around node-validator for simple model validation. If you need validation against other models or more complex logic in the validation you can write your own custom mongoose pre validate or post validate hook (see mongoose middleware).
An additional benefit you gain when using mongoose to validate your model is that mongoose adds an error property to your model which can be accessed via model.errors[property]. This property can be used for validation error messages on the web or for a service client.
When writing more/very complex software tying the validation to the model may become a problem. But I'd deal with this problem when it arises. Since JavaScript has functions as first class citizens your validation functions still can be reused even in these complex situations.
The mongoose validator is a great place to start on a model level, but if you need to have controller specific validation, I use the following code in a utils.js file:
var async = require('async')
exports.validator = function (reqProps, props, mongoEnt, next) {
var propsErr = [];
var mongoErr = {};
async.parallel([function (cb) {
reqProps.forEach(function (rp) {
if (!props[rp])propsErr.push(rp);
})
cb()
}, function (cb) {
if (mongoEnt != null) {
var test = new mongoEnt(props);
test.validate(function (err) {
mongoErr = err;
cb();
});
} else {
mongoErr = null;
cb();
}
}], function (err, result) {
if (propsErr.length != 0) {
return next(new Error('The following props were not included: ' + propsErr));
} else if(mongoErr != null) {
return next(new Error('The following prop was not included: ' +mongoErr.errors[Object.keys(mongoErr.errors).pop()].path));
} else {
return next(null);
}
})
}
This allows me to both validate using the mongoose validator and check for the additional props that I include in the reqProps property in one line of code. Though this is only checking for required properties, you could easily extend it for your own validation scheme.
An example of usage for this code:
var Person = mongoose.model('Person')
exports.addUSCitizen = function(props, next){
utils.validator(['ssn'], props, Person, function (err) {
if(err) return next(err);
//do something using props.ssn
})
}