I have ended up with multiple foreign keys for some reason. It's weird because I'm trying to use the recipeID as the foreign key for a left join later on. For that operation, sequelize choses to use RecipeURL, which is not defined anywhere in the schema below.
const Recipe = sequelize.define('Recipe', {
URL: {
type: DataTypes.STRING(512),
allowNull: false,
unique: true,
primaryKey: true
},
contentID: {
type: DataTypes.UUID,
allowNull: true
},
source: {
type: DataTypes.STRING,
allowNull: false
},
title: {
type: DataTypes.STRING,
allowNull: true
},
isRecipe: {
type: DataTypes.BOOLEAN,
allowNull: true,
defaultValue: null
},
ContentsURL: {
type: DataTypes.STRING(512),
allowNull: true
},
ScreenshotURL: {
type: DataTypes.STRING(512),
allowNull: true
},
});
const Comment = sequelize.define('Comment', {
ID: {
type: DataTypes.STRING,
primaryKey: true,
allowNull: false
},
text: {
type: DataTypes.TEXT,
allowNull: false
},
name: {
type: DataTypes.TEXT,
allowNull: true
},
date: {
type: DataTypes.DATE,
allowNull: true
}
});
Recipe.hasMany(Comment, { as: "comments" });
Comment.belongsTo(Recipe, {
foreignKey: "recipeID",
as: "recipe",
});
(async () => {
await sequelize.sync({alter: true, force: true})
process.exit(1)
})();
Running it:
$ node db.js
Executing (default): DROP TABLE IF EXISTS `Comments`;
Executing (default): DROP TABLE IF EXISTS `Recipes`;
Executing (default): DROP TABLE IF EXISTS `Recipes`;
Executing (default): CREATE TABLE IF NOT EXISTS `Recipes` (`URL` VARCHAR(512) NOT NULL UNIQUE , `contentID` CHAR(36) BINARY, `source` VARCHAR(255) NOT NULL, `title` VARCHAR(255), `isRecipe` TINYINT(1) DEFAULT NULL, `ContentsURL` VARCHAR(512), `ScreenshotURL` VARCHAR(512), `createdAt` DATETIME NOT NULL, `updatedAt` DATETIME NOT NULL, PRIMARY KEY (`URL`)) ENGINE=InnoDB;
Executing (default): SHOW FULL COLUMNS FROM `Recipes`;
Executing (default): SELECT CONSTRAINT_NAME as constraint_name,CONSTRAINT_NAME as constraintName,CONSTRAINT_SCHEMA as constraintSchema,CONSTRAINT_SCHEMA as constraintCatalog,TABLE_NAME as tableName,TABLE_SCHEMA as tableSchema,TABLE_SCHEMA as tableCatalog,COLUMN_NAME as columnName,REFERENCED_TABLE_SCHEMA as referencedTableSchema,REFERENCED_TABLE_SCHEMA as referencedTableCatalog,REFERENCED_TABLE_NAME as referencedTableName,REFERENCED_COLUMN_NAME as referencedColumnName FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE where TABLE_NAME = 'Recipes' AND CONSTRAINT_NAME!='PRIMARY' AND CONSTRAINT_SCHEMA='recipe' AND REFERENCED_TABLE_NAME IS NOT NULL;
Executing (default): ALTER TABLE `Recipes` CHANGE `contentID` `contentID` CHAR(36) BINARY;
Executing (default): ALTER TABLE `Recipes` CHANGE `source` `source` VARCHAR(255) NOT NULL;
Executing (default): ALTER TABLE `Recipes` CHANGE `title` `title` VARCHAR(255);
Executing (default): ALTER TABLE `Recipes` CHANGE `isRecipe` `isRecipe` TINYINT(1) DEFAULT NULL;
Executing (default): ALTER TABLE `Recipes` CHANGE `ContentsURL` `ContentsURL` VARCHAR(512);
Executing (default): ALTER TABLE `Recipes` CHANGE `ScreenshotURL` `ScreenshotURL` VARCHAR(512);
Executing (default): ALTER TABLE `Recipes` CHANGE `createdAt` `createdAt` DATETIME NOT NULL;
Executing (default): ALTER TABLE `Recipes` CHANGE `updatedAt` `updatedAt` DATETIME NOT NULL;
Executing (default): SHOW INDEX FROM `Recipes`
Executing (default): DROP TABLE IF EXISTS `Comments`;
Executing (default): CREATE TABLE IF NOT EXISTS `Comments` (`ID` VARCHAR(255) NOT NULL , `text` TEXT NOT NULL, `name` TEXT, `date` DATETIME, `createdAt` DATETIME NOT NULL, `updatedAt` DATETIME NOT NULL, `RecipeURL` VARCHAR(512), `recipeID` VARCHAR(512), PRIMARY KEY (`ID`), FOREIGN KEY (`RecipeURL`) REFERENCES `Recipes` (`URL`) ON DELETE SET NULL ON UPDATE CASCADE, FOREIGN KEY (`recipeID`) REFERENCES `Recipes` (`URL`) ON DELETE SET NULL ON UPDATE CASCADE) ENGINE=InnoDB;
Executing (default): SHOW FULL COLUMNS FROM `Comments`;
Executing (default): SELECT CONSTRAINT_NAME as constraint_name,CONSTRAINT_NAME as constraintName,CONSTRAINT_SCHEMA as constraintSchema,CONSTRAINT_SCHEMA as constraintCatalog,TABLE_NAME as tableName,TABLE_SCHEMA as tableSchema,TABLE_SCHEMA as tableCatalog,COLUMN_NAME as columnName,REFERENCED_TABLE_SCHEMA as referencedTableSchema,REFERENCED_TABLE_SCHEMA as referencedTableCatalog,REFERENCED_TABLE_NAME as referencedTableName,REFERENCED_COLUMN_NAME as referencedColumnName FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE where TABLE_NAME = 'Comments' AND CONSTRAINT_NAME!='PRIMARY' AND CONSTRAINT_SCHEMA='recipe' AND REFERENCED_TABLE_NAME IS NOT NULL;
Executing (default): ALTER TABLE `Comments` CHANGE `text` `text` TEXT NOT NULL;
Executing (default): ALTER TABLE `Comments` CHANGE `name` `name` TEXT;
Executing (default): ALTER TABLE `Comments` CHANGE `date` `date` DATETIME;
Executing (default): ALTER TABLE `Comments` CHANGE `createdAt` `createdAt` DATETIME NOT NULL;
Executing (default): ALTER TABLE `Comments` CHANGE `updatedAt` `updatedAt` DATETIME NOT NULL;
Executing (default): SELECT CONSTRAINT_CATALOG AS constraintCatalog, CONSTRAINT_NAME AS constraintName, CONSTRAINT_SCHEMA AS constraintSchema, CONSTRAINT_TYPE AS constraintType, TABLE_NAME AS tableName, TABLE_SCHEMA AS tableSchema from INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE table_name='Comments' AND constraint_name = 'Comments_ibfk_1' AND TABLE_SCHEMA = 'recipe';
Executing (default): ALTER TABLE `Comments` DROP FOREIGN KEY `Comments_ibfk_1`;
Executing (default): SELECT CONSTRAINT_CATALOG AS constraintCatalog, CONSTRAINT_NAME AS constraintName, CONSTRAINT_SCHEMA AS constraintSchema, CONSTRAINT_TYPE AS constraintType, TABLE_NAME AS tableName, TABLE_SCHEMA AS tableSchema from INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE table_name='Comments' AND constraint_name = 'Comments_ibfk_2' AND TABLE_SCHEMA = 'recipe';
Executing (default): ALTER TABLE `Comments` DROP FOREIGN KEY `Comments_ibfk_2`;
Executing (default): ALTER TABLE `Comments` ADD FOREIGN KEY (`RecipeURL`) REFERENCES `Recipes` (`URL`) ON DELETE SET NULL ON UPDATE CASCADE;
Executing (default): ALTER TABLE `Comments` ADD FOREIGN KEY (`recipeID`) REFERENCES `Recipes` (`URL`) ON DELETE SET NULL ON UPDATE CASCADE;
Look at the last two lines. Why is two foreign keys added? I think this might be a caching issue or something similar. So if I could just editing an internal schema file of sorts and comment out this RecipeURL reference it would perhaps solve the problem.
Because you have declared it like that.
Recipe.hasMany(Comment, { as: "comments" });
Here, sequelize automatically picks up your primary key as the tables foreign Key.
Your primary key is not id. Your primary key that you have set is URL.
Comment.belongsTo(Recipe, {
foreignKey: "recipeID",
as: "recipe",
});
In this code you are overriding the default foreignKey. You are calling it as recipeID.
specify the same on Recipe association and you'll see only one foreign key.
Related
Suppose I have a table of something like cars, where inside is a JSONB object specifying possible customizations:
+----+-----------------------------------------+
| id | customizations JSONB |
+----+-----------------------------------------+
| 1 | {"color": "blue", "lights": "led", ...} |
+----+-----------------------------------------+
| 2 | {"color": "red"} |
+----+-----------------------------------------+
| 3 | {} |
+----+-----------------------------------------+
If I want to query for a certain customization based on case-insensitive value or partial value (i.e., ILIKE), I can do something like:
SELECT * FROM "Cars" WHERE EXISTS (
SELECT 1 FROM JSONB_EACH_TEXT("customizations") WHERE "value" ~* 'BLU'
);
This pattern works fine in Postgres, but now I am trying to translate it over to Sequelize as best as I can. The important thing here is that the search term ('BLU' from the example) is passed as a parameter.
const cars = await cars.findAll({
where: // ???? Maybe something like db.fn('EXISTS', ...), but how do I bind the parameter?
});
How can I use an EXISTS query here, but bind a parameter? I know I could use db.literal(), but if I did that, I'd have to escape the search term string myself before interpolating it into the query. (Is there at least a proper method for doing this data escaping in Sequelize?)
Note that the JSONB object in customizations can have many keys, a single key, or even no keys.
Bounty Note: Answers using modern versions of Postgres are fine, but I would also like an answer for PostgreSQL v10.12, as that's all that is available with AWS Aurora Serverless. I'll happily assign a separate bounty to both answers!
If on postgresql 12+, it would be possible to use the json path expression to extract all values & cast to text in order to regex match
The raw query would be
SELECT *
FROM cars
WHERE jsonb_path_query_array(customizations, '$.*')::TEXT ~* 'BLU'
in sequelize:
where: Sequelize.where(
Sequelize.literal("jsonb_path_query_array(customizations, '$.*')::TEXT"),
Op.iRegexp,
'BLU'
)
On older versions, i would probably use a raw query with bound parameters that maps back to the Car object.
How can I use an EXISTS query here, but bind a parameter? I know I could use db.literal(), but if I did that, I'd have to escape the search term string myself before interpolating it into the query. (Is there at least a proper method for doing this data escaping in Sequelize?)
you can bind parameters to raw sql queries using either $1 or $name for positional (array) and named (object) arguments.
// cars.js
const Sequelize = require('sequelize');
const path = 'postgresql://hal:hal#localhost:5432/hal'
const sequelize = new Sequelize(path);
let Car = sequelize.define('cars', {
id: { type: Sequelize.INTEGER, primaryKey: true },
customizations: Sequelize.JSONB
});
let cars = [{ id: 1, customizations: {color: "blue", lights: "led"} },
{ id: 2, customizations: {color: "red"} },
{ id: 3, customizations: {} }]
const search_pat = 'BLU';
const stmt = `
select distinct cars.*
from cars, jsonb_each_text(customizations) kv(kk, vv)
where vv ~* $search_pattern
`;
sequelize
.sync()
.then(() => {
Car.bulkCreate(cars)
})
.then(() => {
sequelize.query(stmt, {
bind: {search_pattern: search_pat},
type: sequelize.QueryTypes.SELECT,
model: Car,
mapToModel: true
}).then(cars => {console.log(cars);})
});
executing this script (node cars.js) produces the following output:
Executing (default): CREATE TABLE IF NOT EXISTS "cars" ("id" INTEGER , "customizations" JSONB, "createdAt" TIMESTAMP WITH TIME ZONE NOT NULL, "updatedAt" TIMESTAMP WITH TIME ZONE NOT NULL, PRIMARY KEY ("id"));
Executing (default): SELECT i.relname AS name, ix.indisprimary AS primary, ix.indisunique AS unique, ix.indkey AS indkey, array_agg(a.attnum) as column_indexes, array_agg(a.attname) AS column_names, pg_get_indexdef(ix.indexrelid) AS definition FROM pg_class t, pg_class i, pg_index ix, pg_attribute a WHERE t.oid = ix.indrelid AND i.oid = ix.indexrelid AND a.attrelid = t.oid AND t.relkind = 'r' and t.relname = 'cars' GROUP BY i.relname, ix.indexrelid, ix.indisprimary, ix.indisunique, ix.indkey ORDER BY i.relname;
Executing (default): INSERT INTO "cars" ("id","customizations","createdAt","updatedAt") VALUES (1,'{"color":"blue","lights":"led"}','2021-04-14 04:42:50.446 +00:00','2021-04-14 04:42:50.446 +00:00'),(2,'{"color":"red"}','2021-04-14 04:42:50.446 +00:00','2021-04-14 04:42:50.446 +00:00'),(3,'{}','2021-04-14 04:42:50.446 +00:00','2021-04-14 04:42:50.446 +00:00') RETURNING "id","customizations","createdAt","updatedAt";
Executing (default): select distinct cars.*
from cars, jsonb_each_text(customizations) kv(kk, vv)
where vv ~* $1
[
cars {
dataValues: {
id: 1,
customizations: [Object],
createdAt: 2021-04-14T04:42:50.446Z,
updatedAt: 2021-04-14T04:42:50.446Z
},
_previousDataValues: {
id: 1,
customizations: [Object],
createdAt: 2021-04-14T04:42:50.446Z,
updatedAt: 2021-04-14T04:42:50.446Z
},
_changed: Set(0) {},
_options: {
isNewRecord: false,
_schema: null,
_schemaDelimiter: '',
raw: true,
attributes: undefined
},
isNewRecord: false
}
]
note that i've used an alternative (and likely less efficient select statement in the example above)
You can use your original query, which IMO is the most straight-forward query for the target postgresql version (10.x), i.e.
const stmt = `
SELECT *
FROM cars
WHERE EXISTS (
SELECT 1
FROM JSONB_EACH_TEXT(customizations) WHERE "value" ~* $search_pattern
)
`;
If you modify your condition like this:
SELECT * FROM "Cars" WHERE (
SELECT "value" FROM JSONB_EACH_TEXT("customizations")) ~* 'BLU'
then you can use Sequelize.where in a conjunction with Sequelize.literal:
where: Sequelize.where(
Sequelize.literal('(SELECT "value" FROM JSONB_EACH_TEXT("customizations"))'),
Op.iRegexp,
'BLU'
)
Upd.
This solution will work only if a subquery returns 1 record.
I am using Postman to test a POST route for my React application using Sequelize, node and express. I am getting this error below
{
"name": "SequelizeDatabaseError",
"parent": {
"code": "ER_NO_DEFAULT_FOR_FIELD",
"errno": 1364,
"sqlState": "HY000",
"sqlMessage": "Field 'title' doesn't have a default value",
"sql": "INSERT INTO `Trips` (`id`,`createdAt`,`updatedAt`) VALUES (DEFAULT,?,?);",
"parameters": [
"2019-12-01 00:50:42",
"2019-12-01 00:50:42"
]
},
"original": {
"code": "ER_NO_DEFAULT_FOR_FIELD",
"errno": 1364,
"sqlState": "HY000",
"sqlMessage": "Field 'title' doesn't have a default value",
"sql": "INSERT INTO `Trips` (`id`,`createdAt`,`updatedAt`) VALUES (DEFAULT,?,?);",
"parameters": [
"2019-12-01 00:50:42",
"2019-12-01 00:50:42"
]
},
"sql": "INSERT INTO `Trips` (`id`,`createdAt`,`updatedAt`) VALUES (DEFAULT,?,?);",
"parameters": [
"2019-12-01 00:50:42",
"2019-12-01 00:50:42"
]
}
The schema for my table is as follows
CREATE TABLE Trips (
id INT NOT NULL AUTO_INCREMENT,
title varchar(255) NOT NULL,
location varchar(255) DEFAULT NULL,
Description varchar(255) DEFAULT NULL,
tripDate datetime DEFAULT NULL,
image varchar(255) DEFAULT NULL,
createdAt timestamp default current_timestamp,
updatedAt timestamp,
PRIMARY KEY (id)
);
I tried changing the various columns to be DEFAULT NULL but even when I input data into those fields, I am getting back null in the database. I added images of my code.
React Form
Trips Controller
Trips Model
Trips Router
-Sam
You have 2 problems:
1) At your SQL declaration and in your model declaration you're missing the default value for the title column.
Your SQL table declaration should like this:
CREATE TABLE Trips (
id INT NOT NULL AUTO_INCREMENT,
title varchar(255) DEFAULT NULL, -- Or any other default value
location varchar(255) DEFAULT NULL,
Description varchar(255) DEFAULT NULL,
tripDate datetime DEFAULT NULL,
image varchar(255) DEFAULT NULL,
createdAt timestamp default current_timestamp,
updatedAt timestamp,
PRIMARY KEY (id)
);
According to this declaration your model declaration should be:
const Trips = sequelize.define('Trips', {
title: {
type: DataTypes.STRING,
defaultValue: null // or whatever you would like
},
location:DataTypes.STRING,
Description:DataTypes.STRING,
tripDate:DataTypes.DATEONLY,
image:DataTypes.STRING
},
If you still get this error after modifiying these two your problem is on the client
side, for some reason the title data doesn't pass to the server and therefore is
undefined at req.body
1.by default it will look for createdAt,updatedAt in your model schema.
so either you add the createdAt,updatedAt else if you don't want these 2 fields then set timestamps:false in model schema.
2.you should add id field in your model schema because sequelize always require a id field with primary key in your model schema.Its necessary in every model.
const Trips = sequelize.define('Trips',{
id: {
allowNull: false,
autoIncrement: true,
primaryKey: true,
type: Sequelize.INTEGER
},
title:DataTypes.STRING,
location:DataTypes.STRING,
Description:DataTypes.STRING,
tripDate:DataTypes.DATEONLY,
image:DataTypes.STRING
},
{timestamps:false});
I have below table and want to partition this table please suggest the better way. this table will be use in joins mostly with Problemid column. This table will have history of the each problemid/ticket i.t at what time this ticket moved to with state etc.
CREATE TABLE `sd_servicerequest_history` (
`ProblemId` int(11) NOT NULL,
`CurrentTime` datetime NOT NULL,
`NatureOfChange` varchar(255) NOT NULL,
`ActionPerformedBy` varchar(255) NOT NULL,
`HistoryID` int(10) unsigned NOT NULL AUTO_INCREMENT,
`OldValue` varchar(5120) NOT NULL,
`NewValue` varchar(5120) NOT NULL,
`Parameter` varchar(255) NOT NULL,
`FIELDID` int(10) unsigned NOT NULL DEFAULT '0',
`ChildOf` int(10) unsigned NOT NULL DEFAULT '0',
`OldStateID` int(10) unsigned NOT NULL DEFAULT '0',
`NewStateID` int(10) unsigned NOT NULL DEFAULT '0',
`Userid` int(10) DEFAULT '0',
PRIMARY KEY (`HistoryID`),
KEY `FK_servicehistory_ProblemId` (`ProblemId`),
KEY `ChildOfIndex` (`ChildOf`),
KEY `Userid` (`Userid`),
CONSTRAINT `FK_servicehistory_1` FOREIGN KEY (`Userid`) REFERENCES `userdetails` (`userid`) ON DELETE CASCADE,
CONSTRAINT `FK_servicehistory_ProblemId` FOREIGN KEY (`ProblemId`) REFERENCES `sd_servicereqmaster` (`ProblemId`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8
I have tried partition using problemid but I can not pass the problemid in where clause
Im pertaining to the Red Book by Vaughn Vernon.
On the Collaboration Bounded Context he made the Author, Member, Participant, Creator etc as Value Objects where the fields are stored inline with the Entity they are bound to.
Lets say if you make a Discussion which has one Creator, then the fields of Creator (id, name, email) will be stored in the same table (tbl_discussions).
This is also true for the Forum. (see the schema below)
DROP DATABASE IF EXISTS iddd_collaboration;
CREATE DATABASE iddd_collaboration;
USE iddd_collaboration;
SET FOREIGN_KEY_CHECKS=0;
CREATE TABLE `tbl_dispatcher_last_event` (
`event_id` bigint(20) NOT NULL,
PRIMARY KEY (`event_id`)
) ENGINE=InnoDB;
CREATE TABLE `tbl_es_event_store` (
`event_id` bigint(20) NOT NULL auto_increment,
`event_body` text NOT NULL,
`event_type` varchar(250) NOT NULL,
`stream_name` varchar(250) NOT NULL,
`stream_version` int(11) NOT NULL,
KEY (`stream_name`),
UNIQUE KEY (`stream_name`, `stream_version`),
PRIMARY KEY (`event_id`)
) ENGINE=InnoDB;
CREATE TABLE `tbl_vw_calendar` (
`calendar_id` varchar(36) NOT NULL,
`description` varchar(500),
`name` varchar(100) NOT NULL,
`owner_email_address` varchar(100) NOT NULL,
`owner_identity` varchar(50) NOT NULL,
`owner_name` varchar(200) NOT NULL,
`tenant_id` varchar(36) NOT NULL,
KEY `k_owner_identity` (`owner_identity`),
KEY `k_tenant_id` (`name`,`tenant_id`),
PRIMARY KEY (`calendar_id`)
) ENGINE=InnoDB;
CREATE TABLE `tbl_vw_calendar_entry` (
`calendar_entry_id` varchar(36) NOT NULL,
`alarm_alarm_units` int(11) NOT NULL,
`alarm_alarm_units_type` varchar(10) NOT NULL,
`calendar_id` varchar(36) NOT NULL,
`description` varchar(500),
`location` varchar(100),
`owner_email_address` varchar(100) NOT NULL,
`owner_identity` varchar(50) NOT NULL,
`owner_name` varchar(200) NOT NULL,
`repetition_ends` datetime NOT NULL,
`repetition_type` varchar(20) NOT NULL,
`tenant_id` varchar(36) NOT NULL,
`time_span_begins` datetime NOT NULL,
`time_span_ends` datetime NOT NULL,
KEY `k_calendar_id` (`calendar_id`),
KEY `k_owner_identity` (`owner_identity`),
KEY `k_repetition_ends` (`repetition_ends`),
KEY `k_tenant_id` (`tenant_id`),
KEY `k_time_span_begins` (`time_span_begins`),
KEY `k_time_span_ends` (`time_span_ends`),
PRIMARY KEY (`calendar_entry_id`)
) ENGINE=InnoDB;
CREATE TABLE `tbl_vw_calendar_entry_invitee` (
`id` int(11) NOT NULL auto_increment,
`calendar_entry_id` varchar(36) NOT NULL,
`participant_email_address` varchar(100) NOT NULL,
`participant_identity` varchar(50) NOT NULL,
`participant_name` varchar(200) NOT NULL,
`tenant_id` varchar(36) NOT NULL,
KEY `k_calendar_entry_id` (`calendar_entry_id`),
KEY `k_participant_identity` (`participant_identity`),
KEY `k_tenant_id` (`tenant_id`),
PRIMARY KEY (`id`)
) ENGINE=InnoDB;
CREATE TABLE `tbl_vw_calendar_sharer` (
`id` int(11) NOT NULL auto_increment,
`calendar_id` varchar(36) NOT NULL,
`participant_email_address` varchar(100) NOT NULL,
`participant_identity` varchar(50) NOT NULL,
`participant_name` varchar(200) NOT NULL,
`tenant_id` varchar(36) NOT NULL,
KEY `k_calendar_id` (`calendar_id`),
KEY `k_participant_identity` (`participant_identity`),
KEY `k_tenant_id` (`tenant_id`),
PRIMARY KEY (`id`)
) ENGINE=InnoDB;
CREATE TABLE `tbl_vw_discussion` (
`discussion_id` varchar(36) NOT NULL,
`author_email_address` varchar(100) NOT NULL,
`author_identity` varchar(50) NOT NULL,
`author_name` varchar(200) NOT NULL,
`closed` tinyint(1) NOT NULL,
`exclusive_owner` varchar(100),
`forum_id` varchar(36) NOT NULL,
`subject` varchar(100) NOT NULL,
`tenant_id` varchar(36) NOT NULL,
KEY `k_author_identity` (`author_identity`),
KEY `k_forum_id` (`forum_id`),
KEY `k_tenant_id` (`tenant_id`),
KEY `k_exclusive_owner` (`exclusive_owner`),
PRIMARY KEY (`discussion_id`)
) ENGINE=InnoDB;
CREATE TABLE `tbl_vw_forum` (
`forum_id` varchar(36) NOT NULL,
`closed` tinyint(1) NOT NULL,
`creator_email_address` varchar(100) NOT NULL,
`creator_identity` varchar(50) NOT NULL,
`creator_name` varchar(200) NOT NULL,
`description` varchar(500) NOT NULL,
`exclusive_owner` varchar(100),
`moderator_email_address` varchar(100) NOT NULL,
`moderator_identity` varchar(50) NOT NULL,
`moderator_name` varchar(200) NOT NULL,
`subject` varchar(100) NOT NULL,
`tenant_id` varchar(36) NOT NULL,
KEY `k_creator_identity` (`creator_identity`),
KEY `k_tenant_id` (`tenant_id`),
KEY `k_exclusive_owner` (`exclusive_owner`),
PRIMARY KEY (`forum_id`)
) ENGINE=InnoDB;
CREATE TABLE `tbl_vw_post` (
`post_id` varchar(36) NOT NULL,
`author_email_address` varchar(100) NOT NULL,
`author_identity` varchar(50) NOT NULL,
`author_name` varchar(200) NOT NULL,
`body_text` text NOT NULL,
`changed_on` datetime NOT NULL,
`created_on` datetime NOT NULL,
`discussion_id` varchar(36) NOT NULL,
`forum_id` varchar(36) NOT NULL,
`reply_to_post_id` varchar(36),
`subject` varchar(100) NOT NULL,
`tenant_id` varchar(36) NOT NULL,
KEY `k_author_identity` (`author_identity`),
KEY `k_discussion_id` (`discussion_id`),
KEY `k_forum_id` (`forum_id`),
KEY `k_reply_to_post_id` (`reply_to_post_id`),
KEY `k_tenant_id` (`tenant_id`),
PRIMARY KEY (`post_id`)
) ENGINE=InnoDB;
Now if the User change his email, then you have to update/synchronize all the tables to reflect the change.
Im just wondering why did he came up with that solution? What were the things he considered?
Also are there any alternative? Like achieving the same code, but persist them differently?
From the book:
There is not effort made to keep Collborator Value instances
synchronized with the Identity and Access Context. They are immutable
and can only be fully replaced, not modified. p.468 para. 1
That means synchronization will basically occur when the value will get replaced.
If a Collaborator name or e-mail address changes in the Identity
and Access Context, such changes won't be automatically updated in
the Collaboration Context. Those kinds of changes rarely occur, so
the team made the decision to keep this particular design simple and
not attempt to sycnrhonize changes in the remote Context with objects
in their local Context. p.469 para. 1
You will also want to read out p.476 para.2 Can You Handle the Responsibility. In this section Vaughn demonstrates how complex it might be to keep data synchronized between bounded contexts when having to deal with out of order message consumption. It is also outlined that to guarantee the order of messages we may not rely on a complex messaging infrastructure but simply pull messages from the remote context (e.g. through a RESTful notification service2).
It's always a question of trade-offs and what is acceptable for your domain.
2. Such approach is described at p.312 sec. Publishing Notifications as RESTful Resources.
I am trying to change the Datatype of my primarykey to String in sequelize and am getting the following error while trying to create using upsert.
error: SequelizeDatabaseError: null value in column "id" violates not-null constraint
here is the code:
id: {
type: DataTypes.STRING,
primaryKey: true,
allowNull: true,
autoIncrement: false,
field: "id"
}
How do I make this work?
thanks in advance.
For the documentation:
Technically, a primary key constraint is simply a combination of a unique constraint and a not-null constraint.
You cannot create nullable primary key.
...
allowNull: false,
...