loopbackjs: Attach a model to different datasources - node.js

I have defined several models that use a Datasource "db" (mysql) for my environment.
Is there any way to have several datasources attached to those models, so I would be able to perform REST operations to different databases?
i.e:
GET /api/Things?ds="db"
GET /api/Things?ds="anotherdb"
GET /api/Things (will use default ds)

As #superkhau pointed above, each LoopBack Model can be attached to a single data-source only.
You can create (subclass) a new model for each datasource you want to use. Then you can either expose these per-datasource models via unique REST URLs, or you can implement a wrapper model that will dispatch methods to the correct datasource-specific model.
In my example, I'll show how to expose per-datasource models for a Car model that is attached to db and anotherdb. The Car model is defined in the usual way via common/models/car.json and common/models/car.js.
Now you need to define per-datasource models:
// common/models/car-db.js
{
"name": "Car-db",
"base": "Car",
"http": {
"path": "/cars:db"
}
}
// common/models/car-anotherdb.js
{
"name": "Car-anotherdb",
"base": "Car",
"http": {
"path": "/cars:anotherdb"
}
}
// server/model-config.json
{
"Car": {
"dataSource": "default"
},
"Car-db": {
"dataSource": "db"
},
"Car-anotherdb": {
"dataSource": "anotherdb"
}
}
Now you have the following URLs available:
GET /api/Cars:db
GET /api/Cars:anotherdb
GET /api/Cars
The solution outlined above has two limitations: you have to define a new model for each datasource and the datasource cannot be selected using a query parameter.
To fix that, you need a different approach. I'll again assume there is a Car model already defined.
Now you need to create a "dispatcher".
// common/models/car-dispatcher.json
{
"name": "CarDispatcher",
"base": "Model", //< important!
"http": {
"path": "/cars"
}
}
// common/models/car-dispatcher.js
var loopback = require('loopback').PersistedModel;
module.exports = function(CarDispatcher) {
Car.find = function(ds, filter, cb) {
var model = this.findModelForDataSource(ds);
model.find(filter, cb);
};
// a modified copy of remoting metadata from loopback/lib/persisted-model.js
Car.remoteMethod('find', {
isStatic: true,
description: 'Find all instances of the model matched by filter from the data source',
accessType: 'READ',
accepts: [
{arg: 'ds', type: 'string', description: 'Name of the datasource to use' },
{arg: 'filter', type: 'object', description: 'Filter defining fields, where, orderBy, offset, and limit'}
],
returns: {arg: 'data', type: [typeName], root: true},
http: {verb: 'get', path: '/'}
});
// TODO: repeat the above for all methods you want to expose this way
Car.findModelForDataSource = function(ds) {
var app = this.app;
var ds = ds && app.dataSources[ds] || app.dataSources.default;
var modelName = this.modelName + '-' + ds;
var model = loopback.findModel(modelName);
if (!model) {
model = loopback.createModel(
modelName,
{},
{ base: this.modelName });
}
return model;
};
};
The final bit is to remove Car and use CarDispatcher in the model config:
// server/model-config.json
{
"CarDispatcher": {
dataSource: null,
public: true
}
}

By default, you can only attach data sources on a per-model basis. Meaning you can attach each model to a different data source via datasources.json.
For your use case, you will to add a remote hook to each endpoint you want for multiple data sources. In your remote hook, you will do something like:
...
var ds1 = Model.app.dataSources.ds1;
var ds2 = Model.app.dataSources.ds2;
//some logic to pick a data source
if (context.req.params...
...
See http://docs.strongloop.com/display/LB/Remote+hooks for more info.

For anyone still looking for a working answer to this, the solution for switching databases on the fly was to write a middleware script that examined the request path and then created a new DataSource connector, passing in a variable based on the req.path variable. For example, if the request path is /orders, then "orders" as a string would be saved in a variable, then we attached a new Datasource, passing in that variable for "orders". Here's the complete working code.
'use strict';
const DataSource = require('loopback-datasource-juggler').DataSource;
const app = require('../server.js');
module.exports = function() {
return function datasourceSelector(req, res, next) {
// Check if the API request path contains one of our models.
// We could use app.models() here, but that would also include
// models we don't want.
let $models = ['offers', 'orders', 'prducts'];
// $path expects to be 'offers', 'orders', 'prducts'.
let $path = req.path.toLowerCase().split("/")[1];
// Run our function if the request path is equal to one of
// our models, but not if it also includes 'count'. We don't
// want to run this twice unnecessarily.
if (($models.includes($path, 0)) && !(req.path.includes('count'))) {
// The angular customer-select form adds a true value
// to the selected property of only one customer model.
// So we search the customers for that 'selected' = true.
let customers = app.models.Customer;
// Customers.find() returns a Promise, so we need to get
// our selected customer from the results.
customers.find({"where": {"selected": true}}).then(function(result){
// Called if the operation succeeds.
let customerDb = result[0].name;
// Log the selected customer and the timestamp
// it was selected. Needed for debugging and optimization.
let date = new Date;
console.log(customerDb, $path+req.path, date);
// Use the existing veracore datasource config
// since we can use its environment variables.
let settings = app.dataSources.Veracore.settings;
// Clear out the veracore options array since that
// prevents us from changing databases.
settings.options = null;
// Add the selected customer to the new database value.
settings.database = customerDb;
try {
let dataSource = new DataSource(settings);
// Attach our models to the new database selection.
app.models.Offer.attachTo(dataSource);
app.models.Order.attachTo(dataSource);
app.models.Prduct.attachTo(dataSource);
} catch(err) {
console.error(err);
}
})
// Called if the customers.find() promise fails.
.catch(function(err){
console.error(err);
});
}
else {
// We need a better solution for paths like '/orders/count'.
console.log(req.path + ' was passed to datasourceSelector().');
}
next();
};
};

Related

How do I select specific column data to be displayed in my bookshelf model belongsTo relationship in Nodejs?

This is a contrived example of what I would like to do:
Suppose I have a database of teams and players:
team:
->id
->color
->rank
->division
player:
->id
->team_id
->number
->SECRET
And the following bookshelf models:
var Base = require('./base');
const Player = Base.Model.extend(
{
tableName: "players",
},
nonsecretdata: function() {
return this.belongsTo('Team')
},
{
fields: {
id: Base.Model.types.integer,
team_id: Base.Model.types.integer,
number: Base.Model.types.integer,
SECRET: Base.Model.types.string,
}
}
);
module.exports = Base.model('Player', Player);
And
var Base = require('./base');
const Team = Base.Model.extend(
{
tableName: "teams",
},
{
fields: {
id: Base.Model.types.integer,
color: Base.Model.types.string,
rank: Base.Model.types.integer,
division: Base.Model.types.string,
}
}
);
module.exports = Base.model('Team', Team);
My question is, how can I limit the scope of player such that SECRET is not grabbed by calls to join player and team with callback nonsecretdata?
I am new to Bookshelf so if any other information is needed, please let me know. Thank you
++++++++++
Edit: Do I need to create a separate model?
The only way to do this using bookshelf would be to delete the individual fields from the object after fetching the entire model.
A potentially better solution for this use case would be to define a custom Data Access Object class that uses a SQL query for the information that would like to be obtained and then use that DOA instead of using bookshelf. That way the SQL code is still abstracted away from the code that is requesting the information and the SECRET or any other potential sensitive information that is added to the table will not be included in the fetch.

Optional but non-nullable fields in GraphQL

In an update to our GraphQL API only the models _id field is required hence the ! in the below SDL language code. Other fields such as name don't have to be included on an update but also cannot have null value. Currently, excluding the ! from the name field allows the end user to not have to pass a name in an update but it allows them to pass a null value for the name in, which cannot be allowed.
A null value lets us know that a field needs to be removed from the database.
Below is an example of a model where this would cause a problem - the Name custom scalar doesn't allow null values but GraphQL still allows them through:
type language {
_id: ObjectId
iso: Language_ISO
auto_translate: Boolean
name: Name
updated_at: Date_time
created_at: Date_time
}
input language_create {
iso: Language_ISO!
auto_translate: Boolean
name: Name!
}
input language_update {
_id: ObjectId!
iso: Language_ISO!
auto_translate: Boolean
name: Name
}
When a null value is passed in it bypasses our Scalars so we cannot throw a user input validation error if null isn't an allowed value.
I am aware that ! means non-nullable and that the lack of the ! means the field is nullable however it is frustrating that, as far as I can see, we cannot specify the exact values for a field if a field is not required / optional. This issue only occurs on updates.
Are there any ways to work around this issue through custom Scalars without having to start hardcoding logic into each update resolver which seems cumbersome?
EXAMPLE MUTATION THAT SHOULD FAIL
mutation tests_language_create( $input: language_update! ) { language_update( input: $input ) { name }}
Variables
input: {
_id: "1234",
name: null
}
UPDATE 9/11/18: for reference, I can't find a way around this as there are issues with using custom scalars, custom directives and validation rules. I've opened an issue on GitHub here: https://github.com/apollographql/apollo-server/issues/1942
What you're effectively looking for is custom validation logic. You can add any validation rules you want on top of the "default" set that is normally included when you build a schema. Here's a rough example of how to add a rule that checks for null values on specific types or scalars when they are used as arguments:
const { specifiedRules } = require('graphql/validation')
const { GraphQLError } = require('graphql/error')
const typesToValidate = ['Foo', 'Bar']
// This returns a "Visitor" whose properties get called for
// each node in the document that matches the property's name
function CustomInputFieldsNonNull(context) {
return {
Argument(node) {
const argDef = context.getArgument();
const checkType = typesToValidate.includes(argDef.astNode.type.name.value)
if (checkType && node.value.kind === 'NullValue') {
context.reportError(
new GraphQLError(
`Type ${argDef.astNode.type.name.value} cannot be null`,
node,
),
)
}
},
}
}
// We're going to override the validation rules, so we want to grab
// the existing set of rules and just add on to it
const validationRules = specifiedRules.concat(CustomInputFieldsNonNull)
const server = new ApolloServer({
typeDefs,
resolvers,
validationRules,
})
EDIT: The above only works if you're not using variables, which isn't going to be very helpful in most cases. As a workaround, I was able to utilize a FIELD_DEFINITION directive to achieve the desired behavior. There's probably a number of ways you could approach this, but here's a basic example:
class NonNullInputDirective extends SchemaDirectiveVisitor {
visitFieldDefinition(field) {
const { resolve = defaultFieldResolver } = field
const { args: { paths } } = this
field.resolve = async function (...resolverArgs) {
const fieldArgs = resolverArgs[1]
for (const path of paths) {
if (_.get(fieldArgs, path) === null) {
throw new Error(`${path} cannot be null`)
}
}
return resolve.apply(this, resolverArgs)
}
}
}
Then in your schema:
directive #nonNullInput(paths: [String!]!) on FIELD_DEFINITION
input FooInput {
foo: String
bar: String
}
type Query {
foo (input: FooInput!): String #nonNullInput(paths: ["input.foo"])
}
Assuming that the "non null" input fields are the same each time the input is used in the schema, you could map each input's name to an array of field names that should be validated. So you could do something like this as well:
const nonNullFieldMap = {
FooInput: ['foo'],
}
class NonNullInputDirective extends SchemaDirectiveVisitor {
visitFieldDefinition(field) {
const { resolve = defaultFieldResolver } = field
const visitedTypeArgs = this.visitedType.args
field.resolve = async function (...resolverArgs) {
const fieldArgs = resolverArgs[1]
visitedTypeArgs.forEach(arg => {
const argType = arg.type.toString().replace("!", "")
const nonNullFields = nonNullFieldMap[argType]
nonNullFields.forEach(nonNullField => {
const path = `${arg.name}.${nonNullField}`
if (_.get(fieldArgs, path) === null) {
throw new Error(`${path} cannot be null`)
}
})
})
return resolve.apply(this, resolverArgs)
}
}
}
And then in your schema:
directive #nonNullInput on FIELD_DEFINITION
type Query {
foo (input: FooInput!): String #nonNullInput
}

Node.js Testing with Mongoose. unique gets ignored

I'm having a little trouble with an integration test for my mongoose application. The problem is, that my unique setting gets constantly ignored. The Schema looks more or less like this (so no fancy stuff in there)
const RealmSchema:Schema = new mongoose.Schema({
Title : {
type : String,
required : true,
unique : true
},
SchemaVersion : {
type : String,
default : SchemaVersion,
enum: [ SchemaVersion ]
}
}, {
timestamps : {
createdAt : "Created",
updatedAt : "Updated"
}
});
It looks like basically all the rules set in the schema are beeing ignored. I can pass in a Number/Boolean where string was required. The only thing that is working is fields that have not been declared in the schema won't be saved to the db
First probable cause:
I have the feeling, that it might have to do with the way I test. I have multiple integration tests. After each one my database gets dropped (so I have the same condition for every test and precondition the database in that test).
Is is possible that the reason is my indices beeing droped with the database and not beeing reinitiated when the next text creates database and collection again? And if this is the case, how could I make sure that after every test I get an empty database that still respects all my schema settings?
Second probable cause:
I'm using TypeScript in this project. Maybe there is something wrong in defining the Schema and the Model. This is what i do.
1. Create the Schema (code from above)
2. Create an Interface for the model (where IRealmM extends the Interface for the use in mongoose)
import { SpecificAttributeSelect } from "../classes/class.specificAttribute.Select";
import { SpecificAttributeText } from "../classes/class.specificAttribute.Text";
import { Document } from "mongoose";
interface IRealm{
Title : String;
Attributes : (SpecificAttributeSelect | SpecificAttributeText)[];
}
interface IRealmM extends IRealm, Document {
}
export { IRealm, IRealmM }
3. Create the model
import { RealmSchema } from '../schemas/schema.Realm';
import { Model } from 'mongoose';
import { IRealmM } from '../interfaces/interface.realm';
// Apply Authentication Plugin and create Model
const RealmModel:Model<IRealmM> = mongoose.model('realm', RealmSchema);
// Export the Model
export { RealmModel }
Unique options is not a validator. Check out this link from Mongoose docs.
OK i finally figured it out. The key issue is described here
Mongoose Unique index not working!
Solstice333 states in his answer that ensureIndex is deprecated (a warning I have been getting for some time now, I thought it was still working though)
After adding .createIndexes() to the model leaving me with the following code it works (at least as far as I'm not testing. More on that after the code)
// Apply Authentication Plugin and create Model
const RealmModel:Model<IRealmM> = mongoose.model('realm', RealmSchema);
RealmModel.createIndexes();
Now the problem with this will be that the indexes are beeing set when you're connection is first established, but not if you drop the database in your process (which at least for me occurs after every integration test)
So in my tests the resetDatabase function will look like this to make sure all the indexes are set
const resetDatabase = done => {
if(mongoose.connection.readyState === 1){
mongoose.connection.db.dropDatabase( async () => {
await resetIndexes(mongoose.models);
done();
});
} else {
mongoose.connection.once('open', () => {
mongoose.connection.db.dropDatabase( async () => {
await resetIndexes(mongoose.models);
done();
});
});
}
};
const resetIndexes = async (Models:Object) => {
let indexesReset: any[] = [];
for(let key in Models){
indexesReset.push(Models[key].createIndexes());
}
Promise.all(indexesReset).then( () => {
return true;
});
}

Always fetch from related models in Bookshelf.js

I would like baffle.where({id: 1}).fetch() to always get typeName attribute as a part of baffle model, without fetching it from baffleType explicitly each time.
The following works for me but it seems that withRelated will load relations if baffle model is fetched directly, not by relation:
let baffle = bookshelf.Model.extend({
constructor: function() {
bookshelf.Model.apply(this, arguments);
this.on('fetching', function(model, attrs, options) {
options.withRelated = options.withRelated || [];
options.withRelated.push('type');
});
},
virtuals: {
typeName: {
get: function () {
return this.related('type').attributes.typeName;
}
}
},
type: function () {
return this.belongsTo(baffleType, 'type_id');
}
});
let baffleType = bookshelf.Model.extend({});
What is the proper way to do that?
Issue on repo is related to Fetched event, However Fetching event is working fine (v0.9.2).
So just for example if you have a 3rd model like
var Test = Bookshelf.model.extend({
tableName : 'test',
baffleField : function(){
return this.belongsTo(baffle)
}
})
and then do Test.forge().fetch({ withRelated : ['baffleField']}), fetching event on baffle will fire. However ORM will not include type (sub Related model) unless you specifically tell it to do so by
Test.forge().fetch({ withRelated : ['baffleField.type']})
However I would try to avoid this if it is making N Query for N records.
UPDATE 1
I was talking about same thing that you were doing on fetching event like
fetch: function fetch(options) {
var options = options || {}
options.withRelated = options.withRelated || [];
options.withRelated.push('type');
// Fetch uses all set attributes.
return this._doFetch(this.attributes, options);
}
in model.extend. However as you can see, this might fail on version changes.
This question is super old, but I'm answering anyway.
I solved this by just adding a new function, fetchFull, which keeps things pretty DRY.
let MyBaseModel = bookshelf.Model.extend({
fetchFull: function() {
let args;
if (this.constructor.withRelated) {
args = {withRelated: this.constructor.withRelated};
}
return this.fetch(args);
},
};
let MyModel = MyBaseModel.extend({
tableName: 'whatever',
}, {
withRelated: [
'relation1',
'relation1.related2'
]
}
);
Then whenever you're querying, you can either call Model.fetchFull() to load everything, or in cases where you don't want to take a performance hit, you can still resort to Model.fetch().

Overriding mongoose query for a specific model

I want to automatically add a query option for all queries related for a specific mongoose model without affecting other models
I saw this answer where Mongoose.Query is patched and that will affect all mongoose models.
I was able to do this for my soft deleted items. Haven't tested it extensively yet though.
function findNotDeletedMiddleware(next) {
this.where('deleted').equals(false);
next();
}
MySchema.pre('find', findNotDeletedMiddleware);
MySchema.pre('findOne', findNotDeletedMiddleware);
MySchema.pre('findOneAndUpdate', findNotDeletedMiddleware);
MySchema.pre('count', findNotDeletedMiddleware);
I see two possible easy ways to do this:
Alternative #1
Add a static dict with the options you want to be applied to your specific Mongoose schema:
FooSchema.statics.options = {
...
};
Now, when you query you need to do:
Foo.find({}, null, Foo.options, function(err, foos) {
...
});
Alternative #2
Implement a wrapper to the find method that always uses your specific options:
FooSchema.statics.findWithOptions = function(query, next) {
var options = { ... };
this.find(query, null, options, next);
};
And use this method like so:
Foo.findWithOptions({}, function(err, foos) {
...
})
Reusability
To make these wrapper methods more reusable, you can make a dict with all your wrappers:
var withOptionsWrappers = {
findWithOptions: function(query, next) {
this.find(query, null, this.options, next);
},
findByIdWithOptions: ...
findOneWithOptions: ...
...
};
Since we're referring to this there will be no problem reusing this. And now have this be applied to all your schemas along with your schema specific options:
FooSchema.statics = withOptionsWrappers;
FooSchema.statics.options = {
...
};
BarSchema.statics = withOptionsWrappers;
BarSchema.statics.options = {
...
};

Resources