Error inserting multiple rows into a PostgreSQL Ecto database using a changeset - ecto

I have this function to insert all categories into a productCategory table for a specific product.
EG one product has many categories.
In repo.ex
def insertProductCategories(conn, product, productId) do
IO.inspect(product)
changeset = Enum.each(product["categories"], fn (productCategory) ->
Api.ProductCategory.changeset(%Api.ProductCategory{c_id: productCategory["value"], p_id: productId})
end)
errors = changeset.errors
valid = changeset.valid?
case insert(changeset) do
{:ok, product} ->
conn
|> put_resp_content_type("application/json")
|> send_resp(200, Poison.encode!(%{
successs: product
}))
{:error, changeset} ->
conn
|> put_resp_content_type("application/json")
|> send_resp(500, Poison.encode!(%{
failure: changeset
}))
end
end
productCategory.ex
defmodule Api.ProductCategory do
use Ecto.Schema
#derive {Poison.Encoder, only: [:c_id, :p_id]}
schema "productCategories" do
field :c_id, :integer
field :p_id, :integer
end
def changeset(productCategory, params \\ %{}) do
productCategory
|> Ecto.Changeset.cast(params, [:c_id, :p_id])
|> Ecto.Changeset.validate_required([:c_id, :p_id])
end
end
This is the 2 things that log to the console when insertProductCategories runs for - the inspection of a product, and the error:
%{"brand" => "Healtheries",
"categories" => [%{"categoryId" => 1, "label" => "Meat",
"selectedAdd" => true, "selectedSearch" => false, "value" => 1},
%{"categoryId" => 1, "label" => "Dairy", "selectedAdd" => true,
"selectedSearch" => false, "value" => 2},
%{"categoryId" => 1, "label" => "Confectionary", "selectedAdd" => true,
"selectedSearch" => false, "value" => 3},
%{"categoryId" => 1, "label" => "Dessert", "selectedAdd" => true,
"selectedSearch" => false, "value" => 4},
%{"categoryId" => 1, "label" => "Baking", "selectedAdd" => true,
"selectedSearch" => false, "value" => 5},
%{"categoryId" => 1, "label" => "Condiments", "selectedAdd" => true,
"selectedSearch" => false, "value" => 6},
%{"categoryId" => 1, "label" => "Beverages", "selectedAdd" => true,
"selectedSearch" => false, "value" => 7}],
"description" => "Yummy chocolate bits for baking", "image" => "no-image",
"name" => "Chocolate Bits"}
20:49:46.103 [error] #PID<0.340.0> running Api.Router terminated
Server: 192.168.20.3:4000 (http)
Request: POST /products
** (exit) an exception was raised:
** (UndefinedFunctionError) function :ok.errors/0 is undefined (module :ok is not available)
:ok.errors()
(api) lib/api/repo.ex:33: Api.Repo.insertProductCategories/3
(api) lib/api/router.ex:1: Api.Router.plug_builder_call/2
(api) lib/plug/debugger.ex:123: Api.Router.call/2
(plug) lib/plug/adapters/cowboy/handler.ex:15: Plug.Adapters.Cowboy.Handler.upgrade/4
(cowboy) /Users/Ben/Development/Projects/vepo/api/deps/cowboy/src/cowboy_protocol.erl:442: :cowboy_protoco
l.execute/4
I have only ever done this for a database insertion of more than 1 row but it doesn't have any validation:
Enum.each(subcategories, fn (subcategory) -> insert(subcategory) end)
And I have only used a changeset with validation for a one row insertion:
def insertProduct(conn, product) do
changeset = Api.Product.changeset(%Api.Product{}, product)
errors = changeset.errors
valid = changeset.valid?
case insert(changeset) do
{:ok, product} ->
conn
|> put_resp_content_type("application/json")
|> send_resp(200, Poison.encode!(%{
successs: product
}))
{:error, changeset} ->
conn
|> put_resp_content_type("application/json")
|> send_resp(500, Poison.encode!(%{
failure: changeset
}))
end
end
I'm trying to merge these techniques. I would like to keep the validation code in there (eg the case for :ok and :error but am unsure how to do that when I am inserting more than one row into the database. What am I doing wrong?

You can use Ecto.Multi to sequence insertions for a bunch of changesets, and then run it in a transaction. The transaction will make sure that if there's any error in any insertion, the rest of the changes get rolled back.
multi = Enum.reduce(Enum.with_index(product["categories"]), Ecto.Multi.new, fn {productCategory, index}, multi ->
changeset = Api.ProductCategory.changeset(%Api.ProductCategory{c_id: productCategory["value"], p_id: productId})
Ecto.Multi.insert(multi, index, changeset)
end)
case Repo.transaction(multi) do
{:ok, categories} ->
# categories here is a map with the index as key and struct as value
...
{:error, failed_operation, failed_value, changes_so_far} ->
...
end
You can read more about the values returned by Repo.transaction for Ecto.Multi in this example in the documentation and the documentation of Ecto.Repo.transaction/2.

I think you need Enum.map in repo.ex, not Enum.each.
From the docs:
each(enumerable, fun)
each(t, (element -> any)) :: :ok
Invokes the given fun for each item in the enumerable.
Returns :ok.
Which is why you're seeing the function :ok.errors/0 is undefined

Related

What is the best way to define and initialize a Table?

In Python, we can do this.
board = {'us': {'name': 'USA', 'govern': 'good'},
'canada': {'name': 'Canada', 'govern': 'good'},
'uk': {'name': 'UK', 'govern': 'good', 'recruit': 3},
'spain': {'name': 'Spain', 'govern': 'good', 'schengen': True, 'recruit': 2},
'france': {'name': 'France', 'govern': 'good', 'schengen': True, 'recruit': 2},
'italy': {'name': 'italy', 'govern': 'good', 'schengen': True} }
to create a dictionary of name value pairs for easy lookup.
Can I the the same in Stanza language, like:
deftype Countries <: Table<String,Table<String,?>>
; value could be anything. we see Int|String|True|False here
val board : Countries = to-table( "us" => ( "name" => "USA", "govern" => "good" ), ....)
?
The closest data structure to a python dictionary in stanza is Hashtable, from collections. You can do something like :
; to-hashtable<K,V> can be found in collections, too!
val board = to-hashtable<String, HashTable<String, ?>> $ [
"us" => to-hashtable<String, ?> $ [
"name" => "USA"
],
"fr" => to-hashtable<String, ?> $ [
"name" => "France"
]
; .. etc ...
]
println(board["us"])
This will output :
HashTable(
"name" => "USA")
deftype Countries <: Table<...> doesn't create an alias for Table, it declares a new type. In order to use it like a table, you would need to implement the required methods for Table.
But normally we like to add more type information, not less!
defenum Government :
Democracy
Republic
Monarchy
defstruct Country :
name:String,
gov:Government
; ... etc ...
val board = to-hashtable<String,Country> $ [
"us" => Country(name, gov) where :
val name = "USA"
val gov = Republic
"fr" => Country(name, gov) where :
val name = "France"
val gov = Republic
]
I kind of come up with a solution:
defpackage labyrinth :
import core
import collections
deftype Countries <: HashTable<String,?>
defn print-deep ( xs : HashTable<String,?> ) :
for x in xs do :
val k = key(x)
val v = value(x)
print("%_ => " % [k])
match(v):
(v: HashTable<String,?>) : (print("[ "), print-deep(v), println("]"))
(v) : ( print(v), print(", ") )
defn to-hashtable ( t : HashTable<String,?>, kvs : Tuple<KeyValue<?,?>> ) -> False :
for kv in kvs do :
val k = key(kv)
val v = value(kv)
match(k) :
(k : String) :
if v is Tuple<?> :
var d : HashTable<String,?> = HashTable<String,?>()
to-hashtable(d, v)
set(t, k, d)
else :
t[k] = v
defn to-countries ( kvs : Tuple<KeyValue<String,?>> ) -> HashTable<String,?> :
val t : HashTable<String,?> = HashTable<String,?>()
to-hashtable(t, kvs)
t
defn test () -> HashTable<String,?> :
val c : Tuple<KeyValue<String,?>> =
[ "us" => ["name" => "us", "govern" => "good"]
"canada" => [ "name" => "Canada" "govern" => "good" ]
"uk" => [ "name" => "UK" "govern" => "good" "recruit" => 3 ]
"spain" => [ "name" => "Spain" "govern" => "good" "schengen" => true "recruit" => 2 ]
"france" => [ "name" => "France" "govern" => "good" "schengen" => true "recruit" => 2 ]
"italy" => [ "name" => "italy" "govern" => "good" "schengen" => true ]
]
val countries = to-countries(c)
countries
val board = test()
print-deep(board)

Erlang Extract from Map

The below is a object in a record of type
requirements = [] :: [term()],
We extract the Requirements object like below
Requirements = Records#record.requirements
And I need the lat, long, radius independently from any.latlong_radius tuple. How can we extract it out ?
"requirements": {
"supply_type": {
"$tuple": [1, {
"$tuple": ["mobile_app", "nil", "nil"]
}]
},
"has_app_name": true,
"any": {
"latlong_radius": {
"$tuple": [33.042334, -96.734884, 16093.4]
}
},
},
-module(my).
-compile(export_all).
-record(requirements, {supply_type,
has_app_name,
any} ).
get_requirements() ->
#requirements{
supply_type = #{"$tuple" => [1, 2, 3]},
has_app_name = true,
any = #{"latlong_radius" =>
#{"$tuple" => [33.042334, -96.734884, 16093.4]}
}
}.
go() ->
Requirements = get_requirements(),
io:format("requirements: ~p~n", [Requirements]),
Any = Requirements#requirements.any,
#{"latlong_radius" :=
#{"$tuple" := [Lat, Lon, Rad]}
} = Any,
io:format("Lat: ~w, Lon: ~w, Rad: ~w~n", [Lat, Lon, Rad]).
In the shell:
51> c(my).
my.erl:2: Warning: export_all flag enabled - all functions will be exported
{ok,my}
52> my:go().
requirements: {requirements,#{"$tuple" => [1,2,3]},
true,
#{"latlong_radius" =>
#{"$tuple" =>
[33.042334,-96.734884,16093.4]}}}
Lat: 33.042334, Lon: -96.734884, Rad: 16093.4
ok
On the other hand, if your data structure is maps all the way down (which makes everything you posted about records and list specifications irrelevant):
-module(my).
-compile(export_all).
get_requirements() ->
#{"requirements" =>
#{
"supply_type" => #{"$tuple" => [1, 2, 3]},
"has_app_name" => true,
"any" => #{"latlong_radius" =>
#{"$tuple" => [33.042334, -96.734884, 16093.4]}
}
}
}.
go() ->
Requirements = get_requirements(),
io:format("requirements: ~p~n", [Requirements]),
#{"requirements" :=
#{
"any" :=
#{
"latlong_radius" :=
#{
"$tuple" := [Lat, Lon, Rad]
}
}
}
} = Requirements,
io:format("Lat: ~w, Lon: ~w, Rad: ~w~n", [Lat, Lon, Rad]).
In the shell:
70> c(my).
my.erl:2: Warning: export_all flag enabled - all functions will be exported
{ok,my}
71> my:go().
requirements: #{"requirements" =>
#{"any" =>
#{"latlong_radius" =>
#{"$tuple" => [33.042334,-96.734884,16093.4]}},
"has_app_name" => true,
"supply_type" => #{"$tuple" => [1,2,3]}}}
Lat: 33.042334, Lon: -96.734884, Rad: 16093.4
ok
However, with deeply nested maps trying to get the pattern match syntax correct is just too error prone, so I would use maps:get/2 instead:
-module(my).
-compile(export_all).
get_requirements() ->
#{"requirements" =>
#{
"supply_type" => #{"$tuple" => [1, 2, 3]},
"has_app_name" => true,
"any" => #{"latlong_radius" =>
#{"$tuple" => [33.042334, -96.734884, 16093.4]}
}
}
}.
go() ->
Map = get_requirements(),
io:format("Map: ~p~n", [Map]),
Requirements = maps:get("requirements", Map),
Any = maps:get("any", Requirements),
LLR = maps:get("latlong_radius", Any),
#{"$tuple" := [Lat, Long, Radius]} = LLR,
io:format("Lat: ~w, Lon: ~w, Rad: ~w~n", [Lat, Long, Radius]).
Much, much simpler.
Just assign variable with double quote '' like below:
Latlong_radius = Any#any.latlong_radius
Res = Latlong_radius#latlong_radius.'$tuple'

ReactiveMongo aggregate function

I'm trying to convert the following mongo query to Reactive Mongo Equivalent ( JSON )
db.media.aggregate( {$group : { "_id" : "$createdBy", "count" : { $sum : 1 }}}, {$sort : {"count" : -1}}, {$limit : 10} )
What I have come up with is this, but can't get around it.
override def getMediasCountByHandle(db:reactivemongo.api.DefaultDB): Future[JsObject] = {
val commandDoc = Json.obj(
"aggregate" -> "media", // we aggregate on collection orders
"pipeline" -> List(
Json.obj(
"$group" -> Json.obj(
"_id" -> "$createdBy",
"count" -> Json.obj("$sum" -> 1))),
Json.obj("$sort" -> Json.obj("total" -> -1)),
Json.obj("$limit" -> 10)
)
)
val runner = Command.run(JSONSerializationPack)
runner.apply(db, runner.rawCommand(commandDoc)).one[JsObject]
}
Please help

get name of pattern that matched in grok in logstash

If I have a patterns file with a bunch of regex patterns such as the following
A .*foo.*
B .*bar.*
C .*baz.*
and my grok filter looks like the following:
grok {
patterns_dir => ["/location/of/patterns"]
match => { "request" => [ "%{A}", "%{B}", "%{C}",]
}
}
is there any way to know which one matched. I.e the name of the SYNTAX. I would like to annotate the document with the name of the one that matched
what you would usually do is name the matched variables. The syntax for that would be:
(taking your example):
grok {
patterns_dir => ["/location/of/patterns"]
match =>
{
"request" => [ "%{A:A}", "%{B:NameOfB}", "%{C:SomeOtherName}",]
}
}
Accordingly, the matches of your grok would now be named:
A: A
B: NameOfB
C: SomeOtherName
So in your case you could just name them after the patterns. That should work just fine.
Alternatively (I just tested that with grok debugger) it appears that if you do not name your matched pattern they will default to the name of the pattern (which I think is what you want). The downfall of this is that if you reuse your pattern, the result will be an array of values.
This is the test I ran:
Input:
Caused by: com.my.application.IOException: null Caused by: com.my.application.IOException: null asd asd
grok:
(.*?)Caused by:%{GREEDYDATA}:%{GREEDYDATA}
Output:
{
"GREEDYDATA": [
[
" com.my.application.IOException: null Caused by: com.my.application.IOException",
" null asd asd"
]
]
}
Hope that solves your problems,
Artur
EDIT:
Based on OP's other question here is my approach to solving that issue dynamically.
You will still have to match the names. Decide on a common prefix on how to name your matches. I will base my example on 2 json strings to make this easier:
{"a" : "b", "prefix_patterna" : "", "prefix_patternb" : "bla"}
{"a" : "b", "prefix_patterna" : "sd", "prefix_patternb" : ""}
Note how there are 2 artificial matches, prefix_patterna and prefix_patternb. So, I decided on the prefix "prefix" and I use that to identify which event fields to inspect. (you can grok to also drop empty events if that is something you want).
Then in my filter, I use ruby to iterate through all events to find the one that matched my pattern:
ruby {
code => "
toAdd = nil;
event.to_hash.each { |k,v|
if k.start_with?('prefix_') && v.to_s != ''
toAdd = k
end
}
if toAdd.to_s != ''
event['test'] = toAdd
end
"
}
All this code does is to check the event keys for the prefix, and see if the value of that field is empty or nil. If it finds the field that has a value, it writes it into a new event field called "test".
Here are my tests:
Settings: Default pipeline workers: 8
Pipeline main started
{"a" : "b", "prefix_patterna" : "sd", "prefix_patternb" : ""}
{
"message" => "{\"a\" : \"b\", \"prefix_patterna\" : \"sd\", \"prefix_patternb\" : \"\"}",
"#version" => "1",
"#timestamp" => "2016-09-15T09:48:29.418Z",
"host" => "pandaadb",
"a" => "b",
"prefix_patterna" => "sd",
"prefix_patternb" => "",
"test" => "prefix_patterna"
}
{"a" : "b", "prefix_patterna" : "", "prefix_patternb" : "bla"}
{
"message" => "{\"a\" : \"b\", \"prefix_patterna\" : \"\", \"prefix_patternb\" : \"bla\"}",
"#version" => "1",
"#timestamp" => "2016-09-15T09:48:36.359Z",
"host" => "pandaadb",
"a" => "b",
"prefix_patterna" => "",
"prefix_patternb" => "bla",
"test" => "prefix_patternb"
}
Note how the first test writes "prefix_patterna" while the second test writes "prefix_patternb".
I hope this solves your issue,
Artur
You can tag the match, (or add fields) by having multiple grok filters as follows.
It doesn't feel elegant, is not very scalable as it is prone to a lot of repetition (not DRY), but seems to be the only way to "flag" matches of complex patterns - especially predefined library patterns.
Note you have to add conditionals to the subsequent filters to avoid them being run too when previous filters have already matched. Otherwise you'll still get _grokparsefailure tags for the later filters. Source
You also need to remove the failure tags of all but the final "else" filter. Otherwise you will get spurious _grokparsefailures e.g. from A when B or C matches. Source
grok {
patterns_dir => ["/location/of/patterns"]
match => { "request" => "%{A}"
add_tag => [ "pattern_A" ]
add_field => { "pattern" => "A" } # another option
tag_on_failure => [ ] # prevent false failure tags
}
if ("pattern_A" not in [tags]) {
grok {
patterns_dir => ["/location/of/patterns"]
match => { "request" => "%{B}"
add_tag => [ "pattern_B" ]
tag_on_failure => [ ] # prevent false failure tags
}
}
if (["pattern_A","pattern_B"] not in [tags]) {
grok {
patterns_dir => ["/location/of/patterns"]
match => { "request" => "%{C}"
add_tag => [ "pattern_C" ]
}
}
There may be ways to simplify / tune this, but I'm not an expert (yet!).

:couchbeam.save_doc/2 in Elixir

Written some code to save a document to CouchDB in Elixir using Couchbeam:
case :couchbeam.open_or_create_db(server, "database", []) do
{ :ok, db } ->
doc = [
_id: "auth_#{username}",
content: 'some text'
]
case :couchbeam.save_doc(db, doc) do
{ :ok, saved_doc } -> saved_doc
{ :error, :econnrefused } -> %{
:error_code => "save_error",
:error => "Failure to create account"
}
end
{ :error, :econnrefused } ->
IO.puts "Could not connect to server"
end
This results in an error stating that save_doc/4 is not a matching function clause, when it is clearly defined in my local couchbeam.erl.
save_doc/4 is defined as:
-spec save_doc(Db::db(), doc(), mp_attachments(), Options::list()) ->
{ok, doc()} | {error, term()}.
save_doc(#db{server=Server, options=Opts}=Db, {Props}=Doc, Atts, Options) ->
The stacktrace stating no function clause found:
** (FunctionClauseError) no function clause matching in :couchbeam.save_doc/4
stacktrace:
(couchbeam) src/couchbeam.erl:560: :couchbeam.save_doc({:db, {:server, "http://localhost:5984", [basic_auth: {"admin", "admin"}]}, "database", [basic_auth: {"admin", "admin"}]}, [_id: "auth_hello", content: 'some text'], [], [])
(cs) lib/models/Auth.ex:60: anonymous fn/3 in Server.Model.Auth.signup/3
test/model/auth_test.exs:7
Does save_doc/2 require something that the arguments passed in are missing?
It should be
doc = {[
{ "_id", "auth_#{username}" },
{ "content", "some text" }
]}
The type doc() is defined as {[{ejson_key(), ejson_term()}]} in Erlang. The keyword list literal syntax you were using needed to be wrapped in a tuple.

Resources