Ecto - Table entry not getting updated - ecto

The code below updates the Product in changeset. I'm also trying to update the ProductShop with a new "price" in changeset2 but it isn't getting updated. I have inspected all the important parts, and the price has a value, the product_shop has a value, and "Price updated" is printed to the console.
put "/products" do
errors = {}
IO.inspect(conn.body_params)
product = Api.Product |> Api.Repo.get(conn.query_params["p_id"])
shop = Api.Shop |> Api.Repo.get(conn.query_params["s_id"])
params = for key <- ~w(image description), value = conn.body_params[key], into: %{}, do: {key, value}
changeset = Api.Product.changeset(product, params)
case Api.Repo.update(changeset) do
{:ok, product} ->
errors = Tuple.append(errors, "Product updated")
{:error, changeset} ->
errors = Tuple.append(errors, "Product not updated")
end
pid = conn.query_params["p_id"]
sid = conn.query_params["s_id"]
price = Float.parse(conn.body_params["price"])
price1 = elem(price, 0)
IO.inspect(price1)
product_shop = Api.Repo.get_by(ProductShop, s_id: sid, p_id: pid)
IO.inspect(product_shop)
changeset2 = Api.ProductShop.changeset(product_shop, %{price: price1})
case Api.Repo.update(changeset2) do
{:ok, product_shop} ->
errors = Tuple.append(errors, "Price updated")
{:error, changeset2} ->
errors = Tuple.append(errors, "Price not updated")
end
IO.inspect(errors)
conn
|> put_resp_content_type("application/json")
|> send_resp(200, Poison.encode!(%{
successs: "success",
errors: Tuple.to_list(errors)
}))
end
Why does the ProductShop not get updated when price is populated and so is product_shop?
ProductShop.ex
defmodule Api.ProductShop do
use Ecto.Schema
import Ecto.Changeset
import Api.Repo
import Ecto.Query
#derive {Poison.Encoder, only: [:s_id, :p_id]}
schema "product_shops" do
field :s_id, :integer
field :p_id, :integer
field :not_in_shop_count, :integer
field :price, :float
end
def changeset(product_shop, params \\ %{}) do
product_shop
|> cast(params, [:s_id, :p_id])
|> validate_required([:s_id, :p_id])
|> unique_constraint(:s_id, name: :unique_product_shop)
end
def insert_product_shop(conn, product_id, shop_id, price) do
changeset = Api.ProductShop.changeset(%Api.ProductShop{p_id: product_id, s_id: shop_id, not_in_shop_count: 0, price: price})
errors = changeset.errors
valid = changeset.valid?
case insert(changeset) do
{:ok, product_shop} ->
{:ok, product_shop}
{:error, changeset} ->
{:error, :failure}
end
end
def delete_all_from_product_shops do
from(Api.ProductShop) |> delete_all
end
def get_product_shops do
Api.ProductShop |> all
end
end

Your missing the :price and :not_in_shop_count in your cast call. Try this:
def changeset(product_shop, params \\ %{}) do
product_shop
|> cast(params, [:s_id, :p_id, :price, :not_in_shop_count])
|> validate_required([:s_id, :p_id, :price, :not_in_shop_count])
|> unique_constraint(:s_id, name: :unique_product_shop)
end

Related

In case of Python KeyError, how to return a value of None if a key does't exist

In my case, I have an empty dictionary which I want to fill with Key:Value pairs about stock tickers information. I tested the code below but I always get a KeyError message whenever a key is missing from the list of tickers I loop through. All I want is to set a group of default Keys for all my tickers (Info_data in the code below), then set the value of the missing "Key" to "None" whenever it finds a missing key.
Here is my code:
stocks_info = {}
for symbol in tqdm(sav_set):
info = yf.Tickers(symbol).tickers[symbol].info
if info['quoteType'] == 'EQUITY':
info_data = {'symbol': symbol, 'shortName': info['shortName'], 'country': info['country'],
'sector': info['sector'], 'industry': info['industry'], 'marketCap': info['marketCap'],
'currentPrice': info['currentPrice'], 'quoteType': info['quoteType'], 'market': info['market']}
Many thanks to #dawg for providing help. This is how I managed to resolve my issue:
stocks_info = {}
for symbol in tqdm(sav_set):
info = yf.Tickers(symbol).tickers[symbol].info
# info_keys = {'symbol', 'shortName', 'longName', 'country', 'sector', 'industry', 'marketCap', 'currentPrice',
# 'navPrice', 'quoteType', 'market'}
ticker = info.get('symbol', None)
shortName = info.get('shortName', None)
longName = info.get('longName', None)
country = info.get('country', None)
sector = info.get('sector', None)
industry = info.get('industry', None)
marketCap = info.get('marketCap', None)
currentPrice = info.get('currentPrice', None)
navPrice = info.get('navPrice', None)
quoteType = info.get('quoteType', None)
market = info.get('market', None)
if info['quoteType'] == 'EQUITY':
info_data = {'symbol': ticker, 'shortName': shortName, 'longName': longName, 'country': country,
'sector': sector, 'industry': industry, 'marketCap': marketCap, 'currentPrice': currentPrice,
'quoteType': quoteType, 'market': market}
else:
info_data = {'symbol': ticker, 'shortName': shortName, 'longName': longName, 'country': country,
'sector': sector, 'industry': industry, 'marketCap': marketCap, 'currentPrice': navPrice,
'quoteType': quoteType, 'market': market}
stocks_info[symbol] = info_data
json_object = json.dumps(stocks_info)
with open("../tickers_data/stocks_info.json", "w") as outfile:
outfile.write(json_object)

Filter user names from a string

I'm trying to filter the usernames that are being referenced in a tweet like in the following example:
Example:
tw = 'TR #uname1, #uname2, #uname3, text1, text2, #uname4, text3, #uname5, RT #uname6'
the desired output will be:
rt_unames = ['uname1', 'uname6']
mt_unames = ['uname2', 'uname3', 'uname4', 'uname5']
I can do something like a for loop that goes over the string like the naïve solution below:
Naïve Solution:
def find_end_idx(tw_part):
end_space_idx = len(tw)
try:
end_space_idx = tw[start_idx:].index(' ')
except Exception as e:
pass
end_dot_idx = len(tw)
try:
end_dot_idx = tw[start_idx:].index('.')
except Exception as e:
pass
end_semi_idx = len(tw)
try:
end_semi_idx = tw[start_idx:].index(',')
except Exception as e:
pass
return min(end_space_idx, end_dot_idx, end_semi_idx)
tw = 'RT #uname1, #uname2, #uname3, text1, text2, #uname4, text3, #uname5, RT #uname6'
acc = ''
rt_unames = []
mt_unames = []
for i, c in enumerate(tw):
acc += c
if acc[::-1][:2][::-1] == 'RT':
start_idx = i+2
end_idx = find_end_idx(tw_part=tw[start_idx:])
uname = tw[start_idx:start_idx+end_idx]
if uname not in mt_unames:
rt_unames.append(uname)
acc = ''
elif acc[::-1][:1]=='#':
start_idx = i
end_idx = find_end_idx(tw_part=tw[start_idx:])
uname = tw[start_idx:start_idx+end_idx]
if uname not in rt_unames:
mt_unames.append(uname)
acc = ''
rt_unames, mt_unames
which outputs:
(['#uname1', '#uname6'], ['#uname2', '#uname3', '#uname4', '#uname5'])
Question:
As I need to apply it to every tweet in a pandas.DataFrame, I'm looking for a more elegant and fast solution to get this outcome.
I'd appreciate any suggestions.
Let's try re.findall with a regex pattern::
import re
rt_unames = re.findall(r'(?<=TR |RT )#([^,]+)', tw)
mt_unames = re.findall(r'(?<!TR |RT )#([^,]+)', tw)
In the similar way, you can use str.findall method on the column in dataframe:
df['rt_unames'] = df['tweet'].str.findall(r'(?<=TR |RT )#([^,]+)')
df['mt_unames'] = df['tweet'].str.findall(r'(?<!TR |RT )#([^,]+)')
Result:
['uname1', 'uname6']
['uname2', 'uname3', 'uname4', 'uname5']
If the format of input string is always the same, I would do it like this:
def parse_tags(str_tags):
rts = []
others = []
for tag in [tag.strip() for tag in str_tags.split(',')]:
if tag.startswith('RT'):
rts.append(tag[3:])
elif tag.startswith('#'):
others.append(tag)
return rts, others
An alternative approach using filters and list comprehension.
import re
def your_func_name(tw):
tw_list = [x.strip() for x in tw.split(",")]
rt_unames_raw = filter(lambda x: "#" in x and x.startswith("RT"),tw_list)
mt_unames_raw = filter(lambda x: x.startswith("#"),tw_list)
rt_unames = [re.sub(r"RT|#","",uname).strip() for uname in rt_unames_raw]
mt_unames = [re.sub("#","",uname).strip() for uname in mt_unames_raw]
return rt_unames, mt_unames
tw = 'RT #uname1, #uname2, #uname3, text1, text2, #uname4, text3, #uname5, RT #uname6'
your_func_name(tw=tw)
You can use regex patterns and use the apply function on the tweet column of your dataframe
import pandas as pd
import re
pattern1 = r"(RT\s+#[^\,]+)|(TR\s+#[^\,]+)"
pattern2 = r"#[^\,]+"
df = pd.DataFrame(['TR #uname1, #uname2, #uname3, text1, text2, #uname4, text3, #uname5, RT #uname6'], columns=['Tweet'])
df['group1'] = df.Tweet.apply(lambda x: re.findall(pattern1, x))
df['group2'] = df.Tweet.apply(lambda x: re.findall(pattern2, x))
This is my second time, so I will try to make it as easy as possible.
tw = 'TR #uname1, #uname2, #uname3, text1, text2, #uname4, text3, #uname5, RT #uname6'
res = tw.replace(", ", " ").split()
final = []
k = "#"
for e in res:
if e[0].lower == k.lower:
final.append(e)
stringe = str(final).replace(",", "")
stringe = stringe.replace("[", "")
stringe = stringe.replace("]", "")
stringe =stringe.replace("'", "")
print("Result is :", str(stringe))
from what I can see, you already know python, so this example should only take you a while.
Here, I use the replace function to replace all the commas (,) with blank, and use the split function, which seperates the words seperated by spaces. The result is then stored in res.
In the next few lines, I use the replace function to replace all unwanted strings like "[" and "]" and "'" , to be replaced by a blank.
Then, I simply print the result.
Hit me up at #Vishma Pratim Das on twitter if you don't understand something

Overwrite a field: odoo.exceptions.CacheMiss: ('stock.picking.batch(31,).picking_batch_moves', None)

I'm trying to group all moves in the stock.picking in the stock.picking.batch
it work fine, but I got this error when I want to overwrite the batch_id in stock.picking:
File "/opt/odoo/odoo12/odoo/api.py", line 1051, in get
raise CacheMiss(record, field)
odoo.exceptions.CacheMiss: ('stock.picking.batch(31,).picking_batch_moves', None)
this is my code:
class StockMove(models.Model):
_inherit = 'stock.move'
pbm_id = fields.Many2one('stock.picking.batch.move', string='Batche moves')
class StockPickingBatchLine(models.Model):
_name = 'stock.picking.batch.move'
_description = 'Opération des mouvement des transfer'
batch_id = fields.Many2one(
'stock.picking.batch', string='Picking batch', required=True, ondelete='cascade')
product_id = fields.Many2one(
'product.product', string='Produit', readonly=True, required=True)
product_uom_id = fields.Many2one(
'uom.uom', string='Unité de mesure', readonly=True, required=True)
product_uom_qty = fields.Float('A faire', default=0.0, digits=dp.get_precision('Product Unit of Measure'),
readonly=True, )
location_id = fields.Many2one(
'stock.location', 'From', readonly=True, required=True)
location_dest_id = fields.Many2one(
'stock.location', 'To', readonly=True, required=True)
move_lines = fields.One2many(
'stock.move', 'pbm_id', string='Movement de stock')
class StockPickingBatch(models.Model):
_inherit = 'stock.picking.batch'
picking_batch_moves = fields.One2many('stock.picking.batch.move', 'batch_id', string='Lignes des mouvements',
compute='_compute_picking_get_batch_lines', readonly=False, store=True,
)
#api.depends('picking_ids', 'picking_ids.move_lines')
def _compute_picking_get_batch_lines(self):
batch_moves_obj = self.env['stock.picking.batch.move']
linked = self.env['stock.picking.batch.move']
ml_ids = self.env['stock.picking.batch.move.line']
for batch in self:
if isinstance(batch.id, models.NewId):
continue
req = """
SELECT sp.batch_id
,product_id
,product_uom product_uom_id
,sm.location_id
,sm.location_dest_id
,sm.state
,sm.picking_type_id
,sum(product_uom_qty) product_uom_qty
,array_agg(DISTINCT sm.id) moves
FROM stock_move sm
JOIN stock_picking sp ON sp.id = sm.picking_id
WHERE sp.batch_id IN (%s)
GROUP BY sp.batch_id
,product_id
,product_uom
,sm.location_id
,sm.state
,sm.picking_type_id
,sm.location_dest_id"""
self.env.cr.execute(req, (batch.id,))
fetched_lines = self.env.cr.fetchall()
batch_moves = batch_moves_obj.search([('batch_id', '=', batch.id)])
linked = batch_moves_obj
move_lines = []
for line in fetched_lines:
# search for existing line to update
matched = batch_moves.filtered(lambda x: x.product_id.id == line[1] and
x.product_uom_id.id == line[2] and
x.location_id.id == line[3] and
x.location_dest_id.id == line[4] and
x.state == line[5]
)
line_data = {
'batch_id': batch.id,
'product_id': line[1],
'product_uom_id': line[2],
'location_id': line[3],
'location_dest_id': line[4],
'state': line[5],
'picking_type_id': line[6],
'product_uom_qty': line[7],
'move_lines': [(6, 0, line[8])],
}
move_lines.extend(line[8])
if matched.exists():
matched.with_context(recompute=False).write(line_data)
linked += matched
else:
linked += batch_moves_obj.with_context(
recompute=False).create(line_data)
batch.picking_batch_moves = linked or False
Try:
batch.write({'picking_batch_moves': linked or False})
It should work

How to create new column using condition in python DataFrame?

Just Convert R Code into equivalent Python code.
Item_Type - Old Column Name
Item_Type_new - New Column Name
perishable = c("Breads", "Breakfast", "Dairy", "Fruits and Vegetables", "Meat", "Seafood")
non_perishable = c("Baking Goods", "Canned", "Frozen Foods", "Hard Drinks", "Health and Hygiene", "Household", "Soft Drinks")
# create a new feature 'Item_Type_new'
combi[,Item_Type_new := ifelse(Item_Type %in% perishable, "perishable", ifelse(Item_Type %in% non_perishable, "non_perishable", "not_sure"))]
With a simple function, you can apply on pandas dataframe:
def func(x, l1, l2):
"""
x = input value
l1 = list of perishables
l2 = list of non-perishables
"""
if x in l1:
return 'perishable'
elif x in l2:
return 'non-perishable'
else:
return 'not_sure'
perishable = ["Breads", "Breakfast", "Dairy", "Fruits and Vegetables", "Meat", "Seafood"]
non_perishable = ["Baking Goods", "Canned", "Frozen Foods", "Hard Drinks", "Health and Hygiene", "Household", "Soft Drinks"]
combi['Item_Type_new'] = combi.apply(lambda x: func(x, perishable, non_perishable), axis=1)
Use np.select() -
perishable = ["Breads", "Breakfast", "Dairy", "Fruits and Vegetables", "Meat", "Seafood"]
non_perishable = ["Baking Goods", "Canned", "Frozen Foods", "Hard Drinks", "Health and Hygiene", "Household", "Soft Drinks"]
conditions = [
(combi['Item_Type'].isin(perishable)),
(combi['Item_Type'].isin(non_perishable))]
choices = ['perishable', 'non_perishable']
combi['Item_Type_new'] = np.select(conditions, choices, default='non_perishable')

copy function not copy one2many relation field(odoo 10)

I want to copy all the information stored in the one2many relation when I double the entry to the view. All values except for the on2many relation are copied. I have even tried to overwrite the copy function to do it by hand but there is something I must be doing wrong or that I have not come to understand.
Here its the code of the class
class EmpleadosProductos(models.Model):
_name = "employee.as.product"
collection_lines = {}
employee_line = fields.One2many(
'employee.line',
'order_id',
string='Employee Lines'
)
state = fields.Selection([('creado', 'Creado'),
('confirmado', 'Confirmado'),
('cancelado', 'Cancelado'),
('validado', 'Validado'),
('pagado', 'Pagado')
], string='Status', index=True, readonly=True, track_visibility='onchange', copy=False, default='creado', required=True, help='Estado del parte de empleado')
companyias = fields.Many2one('res.partner', 'Obra', domain=[('is_company', '=', True)])
amount_total = fields.Monetary(string='Total', store=True, readonly=True, compute='_calcularTotal')
journal_entry = fields.Many2one('account.move')
currency_id = fields.Many2one('res.currency', 'Currency', required=True, default=lambda self: self.env.user.company_id.currency_id.id)
fecha = fields.Date('Fecha')
referencia = fields.Char(string='Ref', required=True)
# referencia = fields.Char(string='Ref', required=True)
journal_id = fields.Many2one('account.journal', 'Journal')
_sql_constraints = [
('refererecia_constraint', 'unique(referencia)', 'La referencia debe de ser única!'),
]
#api.multi
def action_confirmar(self):
self.write({'state': 'confirmado'})
#api.multi
def action_cancelar(self):
self.write({'state': 'cancelado'})
#api.multi
def action_validar(self):
self.write({'state': 'validado'})
#api.multi
def action_pagar(self):
self.write({'state': 'pagado'})
#api.multi
def copy(self, default):
_logger.info("DEBBUG:" + " default " + str(default))
_logger.info("DEBBUG:" + " self.employee_line " + str(self.employee_line.name_get()))
for line in self.employee_line:
_logger.info("DEBBUG:" + " self.employee_line " + str(line.name_get()))
default = dict(default or {})
default.update({
'employee_line' : self.employee_line,
'referencia': '',
})
# _logger.info("DEBBUG:" + str(vals))
return super(EmpleadosProductos, self).copy(default)
Try adding copy=True to employee_line definition:
class EmpleadosProductos(models.Model):
_name = 'employee.as.product'
employee_line = fields.One2many(
'employee.line',
'order_id',
string='Employee Lines',
copy=True,
)
After you do that, you should not need to extend/override the copy() method.

Resources