copy function not copy one2many relation field(odoo 10) - python-3.x

I want to copy all the information stored in the one2many relation when I double the entry to the view. All values except for the on2many relation are copied. I have even tried to overwrite the copy function to do it by hand but there is something I must be doing wrong or that I have not come to understand.
Here its the code of the class
class EmpleadosProductos(models.Model):
_name = "employee.as.product"
collection_lines = {}
employee_line = fields.One2many(
'employee.line',
'order_id',
string='Employee Lines'
)
state = fields.Selection([('creado', 'Creado'),
('confirmado', 'Confirmado'),
('cancelado', 'Cancelado'),
('validado', 'Validado'),
('pagado', 'Pagado')
], string='Status', index=True, readonly=True, track_visibility='onchange', copy=False, default='creado', required=True, help='Estado del parte de empleado')
companyias = fields.Many2one('res.partner', 'Obra', domain=[('is_company', '=', True)])
amount_total = fields.Monetary(string='Total', store=True, readonly=True, compute='_calcularTotal')
journal_entry = fields.Many2one('account.move')
currency_id = fields.Many2one('res.currency', 'Currency', required=True, default=lambda self: self.env.user.company_id.currency_id.id)
fecha = fields.Date('Fecha')
referencia = fields.Char(string='Ref', required=True)
# referencia = fields.Char(string='Ref', required=True)
journal_id = fields.Many2one('account.journal', 'Journal')
_sql_constraints = [
('refererecia_constraint', 'unique(referencia)', 'La referencia debe de ser única!'),
]
#api.multi
def action_confirmar(self):
self.write({'state': 'confirmado'})
#api.multi
def action_cancelar(self):
self.write({'state': 'cancelado'})
#api.multi
def action_validar(self):
self.write({'state': 'validado'})
#api.multi
def action_pagar(self):
self.write({'state': 'pagado'})
#api.multi
def copy(self, default):
_logger.info("DEBBUG:" + " default " + str(default))
_logger.info("DEBBUG:" + " self.employee_line " + str(self.employee_line.name_get()))
for line in self.employee_line:
_logger.info("DEBBUG:" + " self.employee_line " + str(line.name_get()))
default = dict(default or {})
default.update({
'employee_line' : self.employee_line,
'referencia': '',
})
# _logger.info("DEBBUG:" + str(vals))
return super(EmpleadosProductos, self).copy(default)

Try adding copy=True to employee_line definition:
class EmpleadosProductos(models.Model):
_name = 'employee.as.product'
employee_line = fields.One2many(
'employee.line',
'order_id',
string='Employee Lines',
copy=True,
)
After you do that, you should not need to extend/override the copy() method.

Related

Dataframe becoming empty after calling a method in DataWriter class that deletes records from delta table

How can I prevent the dataframe data from becoming empty after calling the delete_processed_data() method in my DataWriter class that also has a register_processed_data() method which inserts data into a delta table?
I'm not overwriting the dataframe and it's only being used as a condition to check if it has any data with a count.
Here's my complete code (databricks notebook):
from datetime import *
import pandas as pd
from dn.utils import table
import pyspark.sql.functions as F
from delta.tables import *
from pyspark.sql.types import *
import json
import pytz
import calendar
list_countries = (
table.get_silver_table(table_name='stpos_dim_itemticket')
.select('pais')
.distinct()
)
list_countries = [row.pais for row in list_countries.collect()]
# Include "Todos" option
list_countries.insert(0, 'Todos')
dbutils.widgets.removeAll()
dbutils.widgets.text(name='category', defaultValue='Todos', label='Categoria')
dbutils.widgets.text(name='today', defaultValue=str(date.today()), label='Fecha proceso')
dbutils.widgets.dropdown(name="country", defaultValue='Todos', choices=list_countries, label="Pais")
dbutils.widgets.dropdown(name='forced_load', defaultValue='no', choices=['si', 'no'], label='Forzar carga')
dbutils.widgets.dropdown(name="reprocessing", defaultValue='si', choices=['si', 'no'], label="Reproceso")
country = dbutils.widgets.get('country').strip()
category = dbutils.widgets.get("category").strip()
today = datetime.strptime(dbutils.widgets.get('today').strip(), '%Y-%m-%d')
wave_date = today.replace(day=1)
forced_load = dbutils.widgets.get('forced_load').strip()
reprocessing = dbutils.widgets.get('reprocessing').lower().strip()
print(f"Categoria: {category}")
print(f"Fecha proceso: {today.strftime('%Y-%m-%d')}")
print(f"Pais: {country}")
print(f"Forzar carga: {forced_load}")
print(f'Reproceso: {reprocessing}')
print(f"Fecha ola: {wave_date.strftime('%Y-%m-%d')}")
class DataExtractor():
def __init__(self, category, today, country, list_countries, wave_date, reprocessing, forced_load):
self.category = category
self.today = today
self.country = country
self.list_countries = list_countries
self.wave_date = wave_date
self.reprocessing = reprocessing
self.forced_load = forced_load
if self.reprocessing == 'no' or self.forced_load == 'si':
self.days_for_wave = self.get_days_for_wave()
if self.country.lower() == 'todos':
self.country_condition = "lower(pais) = lower(pais)"
else:
self.country_condition = f"lower(pais) = lower('{country}')"
if self.category.lower() == 'todos':
self.category_condition = "lower(categoria) = lower(categoria)"
else:
self.category_condition = f"lower(categoria) = lower('{category}')"
def get_days_for_wave_by_country(self, country, path_file):
days_for_wave = (
spark.read.format("com.crealytics.spark.excel")
.option("header", "true")
.option("treatEmptyValuesAsNulls", "true")
.option("inferSchema", "true")
.load(path_file)
.where(f"fecha_ola = '{self.wave_date}'")
.where(f"lower(pais) = lower('{country}')")
.selectExpr(
"fecha_ola",
"to_date(fecha) as fecha_transaccion",
"pais")
)
if days_for_wave.count() == 0:
# Año y mes deseado
year = self.wave_date.year
month = self.wave_date.month
# Obtener el número de días en el mes especificado
_, num_days = calendar.monthrange(year, month)
# Crear una lista con todos los días del mes
days = [(date(year, month, day),) for day in range(1, num_days+1)]
# Convertir cada fecha a una cadena de texto
days_str = [(day[0].strftime("%Y-%m-%d"),) for day in days]
# Convert list to dataframe
days_for_wave = (
spark.createDataFrame(days_str)
.withColumnRenamed("_1", "fecha_transaccion")
.withColumn("fecha_ola", F.lit(self.wave_date))
.withColumn("pais", F.lit(country))
.selectExpr(
"fecha_ola",
"to_date(fecha_transaccion) AS fecha_transaccion",
"pais")
)
print(f"Loaded {days_for_wave.count()} days for wave {self.wave_date.strftime('%Y-%m-%d')} and country {country}")
return days_for_wave
def get_days_for_wave(self):
"""
Get the days for the wave
"""
# Load dim_dia_ola.xlsx with wave definition
path_file = "dbfs:/mnt/storetrack/transitraw/dichterneira/storelive/dim_dia_ola.xlsx"
print(f'Loading days for wave from file: {path_file}...')
if self.country.lower() == 'todos':
# Get list of countries (excluding 'Todos')
list_of_countries = self.list_countries[1:]
else:
list_of_countries = [self.country]
schema = StructType([
StructField("fecha_ola", TimestampType(), nullable=True),
StructField("fecha_transaccion", DateType(), nullable=True),
StructField("pais", StringType(), nullable=True)
])
# Crear un DataFrame vacío con el esquema especificado
days_for_wave = spark.createDataFrame([], schema=schema)
for country in list_of_countries:
days_for_wave_by_country = self.get_days_for_wave_by_country(country, path_file)
max_day_of_wave = days_for_wave_by_country.agg(F.max("fecha_transaccion")).collect()[0][0]
if self.today.date() > max_day_of_wave and self.forced_load == 'no':
print(f"Today {self.today.strftime('%Y-%m-%d')} is not included in wave days for country {country} and wave {self.wave_date.strftime('%Y-%m-%d')}")
else:
if country == list_of_countries[0]:
days_for_wave = days_for_wave_by_country
else:
days_for_wave = days_for_wave.union(days_for_wave_by_country)
return days_for_wave
def get_data_items(self):
"""
Filter sales by category, wave and country
"""
if self.reprocessing == 'si' and self.forced_load == 'no':
sales_filtered = (
table.get_silver_table(table_name='sl_fact_item_ticket')
.where(f"fecha_ola = '{self.wave_date}'")
.where(self.country_condition)
.where(self.category_condition)
)
else:
sales_filtered = (
table.get_silver_table(table_name='stpos_dim_itemticket')
.drop("fecha_ola")
.where(self.country_condition)
.where(self.category_condition)
.selectExpr("*", "to_date(date) as fecha_transaccion")
.join(self.days_for_wave, ["fecha_transaccion", "pais"], how="inner")
.drop("fecha_transaccion")
)
print(f"{sales_filtered.count()} items loaded. [Get data items]")
return sales_filtered
def get_product_catalog(self):
product_catalog = (
table.get_bronze_table(table_name='brz_catalogo_productos', module_name='catalogo')
.where(self.country_condition)
.selectExpr(
"upc as barcode",
"pais",
"categoria",
"marca",
"submarca",
"fabricante",
"""CASE WHEN lower(split(contenido, ' ')[1]) = 'ml' THEN 'L'
WHEN lower(split(contenido, ' ')[1]) = 'gr' THEN 'Kg'
WHEN lower(split(contenido, ' ')[1]) = 'und' THEN 'Und'
END AS unidad_std""",
"conversion AS contenido_std",
"split(contenido, ' ')[0] AS contenido",
"split(contenido, ' ')[1] AS unidad_medida",
"idref AS id_ref"
)
)
return product_catalog
class DataEnricher():
def __init__(self, reprocessing, forced_load):
self.reprocessing = reprocessing
self.forced_load = forced_load
def rename_fields(self, df_item):
if self.reprocessing == 'no' or self.forced_load == 'si':
print("Renaming fields...")
df_item = (
df_item
.selectExpr(
'CAST(fecha_ola AS DATE) AS fecha_ola',
'pdv AS nombre_pdv',
'marca',
'submarca',
'pais',
'contenido',
'unidad_medida',
'CAST(cantidad AS DOUBLE) as cantidad',
'CAST(precio_local AS DOUBLE) as precio_local',
'barcode',
'date AS fecha_transaccion',
'categoria',
'categoria_name',
'descripcion',
'id_ref',
'posdb_id',
'id_ticket',
'id_item',
'id_pdv',
'venta_usd',
'venta_local',
'precio_usd',
'id_canasto'
)
)
return df_item
def calculate_standard_fields(self, df_item):
if self.reprocessing == 'no' or self.forced_load == 'si':
print("Caculating standard fields...")
df_item = (
df_item
# Add column with converted Ml to L and Gr to Kg
.withColumn("contenido_std",
F.when(F.col("unidad_medida") == "Ml", F.col("contenido") / 1000)
.when(F.col("unidad_medida") == "Gr", F.col("contenido")/1000)
.otherwise(F.col("contenido")))
.withColumn("unidad_std",
F.when(F.col("unidad_medida") == "Ml", F.lit("L"))
.when(F.col("unidad_medida") == "Gr", F.lit("Kg")))
)
return df_item
def calculate_fields(self, df_items):
"""
Set the time zone of the dataframe
"""
if self.reprocessing == 'no' or self.forced_load == 'si':
print("Calulating time zone field...")
# Create dataframe with the time zone
time_zone = [(1, '05:00:00', '09:59:59'),
(2, '10:00:00', '13:59:59'),
(3, '14:00:00', '19:59:59'),
(4, '20:00:00', '23:59:59'),
(4, '00:00:00', '04:59:59')]
time_zone = spark.createDataFrame(time_zone, ['id_franja', 'inicio', 'fin'])
# Convert inicio and fin to datetime
time_zone = (
time_zone
.withColumn("inicio", F.to_timestamp(F.col("inicio"), "HH:mm:ss"))
.withColumn("fin", F.to_timestamp(F.col("fin"), "HH:mm:ss"))
)
df_items = (
df_items
.withColumn("hora_transaccion", F.substring(F.col("fecha_transaccion"), 12, 8))
.withColumn("hora_transaccion", F.to_timestamp(F.col("hora_transaccion"), "HH:mm:ss"))
.join(time_zone, on=F.col("hora_transaccion").between(F.col("inicio"), F.col("fin")), how="left")
.drop("hora_transaccion", "inicio", "fin")
)
return df_items
def update_product_features(self, data, product_catalog):
if data.count() > 0:
print("Updating fields from brz_catalogo_productos")
data = (
data
.drop("categoria", "marca", "submarca", "fabricante", "unidad_std", "contenido_std", "contenido", "unidad_medida", "id_ref")
.join(product_catalog, on=["barcode", "pais"], how="left")
)
return data
class DataWriter():
def __init__(self, wave_date, country, category):
self.wave_date = wave_date
self.country = country
self.category = category
if self.country.lower() == 'todos':
self.country_condition = "lower(pais) = lower(pais)"
else:
self.country_condition = f"lower(pais) = lower('{country}')"
if self.category.lower() == 'todos':
self.category_condition = "lower(categoria) = lower(categoria)"
else:
self.category_condition = f"lower(categoria) = lower('{category}')"
def delete_processed_data(self, datos):
df_categoria_activa = (
table.get_bronze_table(
table_name='sl_configuracion_procesamiento_zona_silver',
module_name='storetrack'
)
.where(f"fecha_ola = '{wave_date}' and lower(trim(procesar)) = 'si'")
.where(self.country_condition)
.where(self.category_condition)
.selectExpr(
"categoria",
"pais",
"fecha_ola"
)
)
if datos.count() > 0:
display(datos.where("categoria = 'Galletas dulces'"))
table_path = table.get_silver_table_path(table_name="sl_fact_item_ticket")
deltaTableToWrite = DeltaTable.forPath(spark, table_path)
print("Deleting old rows...")
deltaTableToWrite.alias('current')\
.merge(
df_categoria_activa.alias('delete'),
'current.pais = delete.pais AND current.categoria = delete.categoria AND current.fecha_ola = delete.fecha_ola')\
.whenMatchedDelete()\
.execute()
display(datos.where("categoria = 'Galletas dulces'"))
def register_processed_data(self, data):
if data.count() > 0:
print("Inserting new rows...")
display(data.where("categoria = 'Galletas dulces'"))
table_path = table.get_silver_table_path(table_name="sl_fact_item_ticket")
deltaTableToWrite = DeltaTable.forPath(spark, table_path)
deltaTableToWrite.alias('current')\
.merge(
data.alias('new'),
'current.id_item = new.id_item AND current.fecha_ola = new.fecha_ola')\
.whenNotMatchedInsert(values =
{
"fecha_ola": "new.fecha_ola",
"marca": "new.marca",
"submarca": "new.submarca",
"pais": "new.pais",
"contenido": "new.contenido",
"unidad_medida": "new.unidad_medida",
"cantidad": "new.cantidad",
"precio_local": "new.precio_local",
"barcode": "new.barcode",
"fecha_transaccion": "new.fecha_transaccion",
"categoria": "new.categoria",
"categoria_name": "new.categoria_name",
"descripcion": "new.descripcion",
"id_ref": "new.id_ref",
"posdb_id": "new.posdb_id",
"id_ticket": "new.id_ticket",
"id_item": "new.id_item",
"id_pdv": "new.id_pdv",
"venta_usd": "new.venta_usd",
"venta_local": "new.venta_local",
"precio_usd": "new.precio_usd",
"nombre_pdv": "new.nombre_pdv",
"contenido_std": "new.contenido_std",
"unidad_std": "new.unidad_std",
"id_canasto": "new.id_canasto",
"id_franja": "new.id_franja"
}
)\
.execute()
display(data.where("categoria = 'Galletas dulces'"))
print(f"{data.count()} items loaded. [Write processed data]")
else:
print("No data to save in silver.sl_fact_item_ticket")
if __name__ == '__main__':
data_extractor = DataExtractor(category, today, country, list_countries, wave_date, reprocessing, forced_load)
data = data_extractor.get_data_items()
product_catalog = data_extractor.get_product_catalog()
cleaner = DataCleaner(wave_date, country, category, reprocessing, forced_load)
data = cleaner.clean_data(data)
data_enricher = DataEnricher(reprocessing, forced_load)
data = data_enricher.rename_fields(data)
data = data_enricher.calculate_standard_fields(data)
data = data_enricher.calculate_fields(data)
data = data_enricher.update_product_features(data, product_catalog)
data_write = DataWriter(wave_date, country, category)
data_write.delete_processed_data(data)
data_write.register_processed_data(data)
The parameters with which I am running the notebook are:
Categoria: Todos
Fecha proceso: 2022-12-01
Pais: Todos
Forzar carga: no
Reproceso: si
Fecha ola: 2022-12-01
The following output is displayed:
993313 items loaded. [Get data items]
62023 items loaded. [Remove blocked categories]
Updating fields from brz_catalogo_productos
[DISLPLAY ROWS DATAFRAME data]
Deleting old rows...
Query returned no results
No data to save in silver.sl_fact_item_ticket
Any insights on why the dataframe is getting cleared would be greatly appreciated.

how to work with foreign key field in django

Hi Everyone i am working work django framework, where i used to upload excel file in Dailytrip table, current i get car_mumber from car table, but now i need to store car_number from Car_team table also team_id, i am storing car_id and team_id in car_team table also i need to store team_id in dailytrip table automaticlly based on car_id(car_number) i am to much confuse how to i work that, pls help me out
models.py
class Car_team(BaseModel):
team = models.ForeignKey(
Team,
models.CASCADE,
verbose_name='Team',
null=True,
)
car=models.ForeignKey(
Car,
models.CASCADE,
verbose_name='Car',
null=True)
city =models.ForeignKey(
City,
models.CASCADE,
verbose_name='City',
)
start_date=models.DateField(null=True, blank=True)
end_date=models.DateField(null=True, blank=True)
views.py
def add_payout_uber_daily_data(request):
if request.method == 'POST':
form = UberPerformanceDataForm(request.POST, request.FILES, request=request)
if form.is_valid():
date = form.cleaned_data['date']
excel_file = request.FILES['file']
df = pd.read_excel(excel_file)
is_na = pd.isna(df['Date']).sum().sum() + pd.isna(df['Name']).sum().sum() + pd.isna(df['UUID']).sum().sum() + pd.isna(df['Net Fare With Toll']).sum().sum() + pd.isna(df['Trips']).sum().sum() + pd.isna(df['Uber KMs']).sum().sum() + pd.isna(df['CashCollected']).sum().sum() + pd.isna(df['UberToll']).sum().sum() + pd.isna(df['Tips']).sum().sum() + pd.isna(df['Hours Online']).sum().sum() + pd.isna(df['Ratings']).sum().sum() + pd.isna(df['Acceptance Rate']).sum().sum() + pd.isna(df['Cancellation Rate']).sum().sum()
error_list = []
if is_na > 0:
error_list.append('Found #N/A or blank values in the sheet. Please correct and re-upload')
context = {'error_list': error_list, 'menu_payout': 'active','submenu_daily_data': 'active','form': form, }
return render(request, 'add_payout_uber_daily_data.html', context=context)
date_match = True
for d in df['Date']:
if str(d.strftime("%Y-%m-%d")) != str(date):
date_match = False
break
if not date_match:
error_list.append('Some dates are not matching in excel')
if len(error_list) > 0:
context = {'error_list': error_list, 'menu_payout': 'active','submenu_daily_data': 'active','form': form, }
return render(request, 'add_payout_uber_daily_data.html', context=context)
DailyTrip.objects.filter(date=date).update(is_active=0)
for i in df.index:
uuid = df['UUID'][i]
driver_id = None
car_id = None
fleet_id = None
manager_id = None
try:
driver = Driver.objects.get(uber_uuid=uuid)
driver_id = driver.id
except Driver.DoesNotExist:
driver_id = None
#replce car code and store car_number,car_id,team_id via car_team only this logic need to change current get car_number direct car table but we need car_number vai foriegn key
try:
car = Car.objects.get(car_number=df["Car Number"][i])
car_id = car.id
manager_id = car.manager_id
except Car.DoesNotExist:
car_id = None
try:
fleet = Fleet.objects.get(name=df["Fleet Name"][i])
fleet_id = fleet.id
except Fleet.DoesNotExist:
fleet_id = None
name = df['Name'][i]
car_number = df['Car Number'][i]
fare_total = df['Net Fare With Toll'][i]
trips = df['Trips'][i]
pool_trips = 0
hours_online = df['Hours Online'][i]
total_km = df['Uber KMs'][i]
cash_collected = abs(df['CashCollected'][i])
toll = df['UberToll'][i]
tip_amount = df['Tips'][i]
fare_avg = float(fare_total)/int(trips)
fare_per_hour_online = float(fare_total)/float(hours_online)
fare_per_km = fare_total/total_km
trips_per_hour = trips/hours_online
km_per_trip = total_km/trips
rating = df['Ratings'][i]
acceptance_rate_perc = float(df['Acceptance Rate'][i])/100
driver_cancellation_rate = float(df['Cancellation Rate'][i])/100
obj, created = DailyTrip.all_objects.update_or_create(
date=date, uuid=uuid,
defaults={
'car_id': car_id,
'manager_id': manager_id,
'car_number': car_number,
'driver_id': driver_id,
'car_id': car_id,
'fleet_id': fleet_id,
'driver_name': name,
'fare_total': fare_total,
'trips': trips,
'pool_trips': pool_trips,
'hours_online': hours_online,
'total_km': total_km,
'cash_collected': cash_collected,
'toll': toll,
'tip_amount': tip_amount,
'fare_avg': fare_avg,
'fare_per_hour_online':fare_per_hour_online,
'fare_per_km':fare_per_km,
'trips_per_hour': trips_per_hour,
'km_per_trip': km_per_trip,
'rating': rating,
'acceptance_rate_perc': acceptance_rate_perc,
'driver_cancellation_rate': driver_cancellation_rate,
'is_active': 1,
'comments': None}
)
if len(error_list) > 0:
DailyTrip.objects.filter(date=date).update(is_active=0)
context = {'error_list': error_list, 'menu_payout': 'active','submenu_daily_data': 'active','form': form, }
return render(request, 'add_payout_uber_daily_data.html', context=context)
else:
messages.success(request, 'Daily Trips added Successfully...')
return redirect('/fleet/payout/daily_data/add/uber')
else:
form = UberPerformanceDataForm(initial={})
context = {
'menu_payout': 'active',
'submenu_daily_data': 'active',
'form': form,
}
return render(request, 'add_payout_uber_daily_data.html', context=context)
You can try that :
to get car_number from car_team -->
car_team = car_team.objects.objects.all().last() # to get the last car_team for example
car_number = car_team.car.car_number # to get the car number from the car_team
try:
car = Car.objects.get(car_number=df["Car Number"][i])
car_id = car.id
car1=Car_team.objects.filter(car_id=car_id)
if car1:
team_id=car1[0].team_id
else:
team_id=None
except Car.DoesNotExist:
car_id = None
team_id= None

Initialize Model Class Variable At Runtime

I am trying to import student data from an Excel workbook. I have to select column_name of the class StudentMasterResource dynamically which is present in the file. I got all column name in constants module has one dictionary which name column_name. When I do it for the first time, it works, then it fails
constants.py
column_name = dict()
resource.py
from common_account import constants
from import_export import widgets, fields, resources
def getClassName(key):
if key in constants.column_name:
return constants.column_name[key]
return key
class StudentMasterResource(resources.ModelResource):
organisation_id = fields.Field(
column_name=getClassName('organisation_id'),
attribute='organisation_id',
widget=widgets.ForeignKeyWidget(OrganisationMaster, 'organisation_name'),
saves_null_values=True
)
name = fields.Field(
column_name=getClassName('Name'),
attribute='name',
saves_null_values=True,
widget=widgets.CharWidget()
)
date_of_birth = fields.Field(
column_name=getClassName('date'),
attribute='date_of_birth',
saves_null_values=True,
widget=widgets.DateWidget()
)
views.py
from common_account import constants
from tablib import Dataset
#api_view(['POST'])
#permission_classes([IsAuthenticated])
def student_import(request):
if request.method == 'POST':
context_data = dict()
data_set = Dataset()
file = request.FILES['myfile']
extension = file.name.split(".")[-1].lower()
column_data = request.data
is_import = column_name['is_import']
constants.valid_data.clear()
constants.invalid_data.clear()
if extension == 'csv':
data = data_set.load(file.read().decode('utf-8'), format=extension)
else:
data = data_set.load(file.read(), format=extension)
constants.column_name = {
'date' : column_data.get('birth'),
'name' : column_data.get('name'),
}
if is_import == 'No':
result = student_resource.import_data(data_set, organisation_id = request.user.organisation_id,
offering_id = offering_id,all_invalid_data = False, dry_run=True, raise_errors=True)
context_data['valid_data'] = constants.valid_data
context_data['invalid_data'] = constants.invalid_data
context_data[constants.RESPONSE_RESULT] = {"Total records":student_resource.total_cnt,
"skip records":len(constants.invalid_data),
"Records imported": len(constants.valid_data),
}
return JsonResponse(context_data)
elif is_import == 'Yes':
result = student_resource.import_data(data_set, organisation_id = request.user.organisation_id,
offering_id = offering_id,all_invalid_data = False, dry_run=False, raise_errors=False)
context_data[constants.RESPONSE_ERROR] = False
context_data[constants.RESPONSE_MESSAGE] = 'Data Imported !!!'
context_data[constants.RESPONSE_RESULT] = {"Total records":student_resource.total_cnt,
"skip records":len(constants.invalid_data),
"Records imported": len(constants.valid_data),
}
return JsonResponse(context_data)

Problem with super() using Multiple Inheritance with 2 Parent Classes and 1 Base Class

The error i'm having is:
print("Cód. Avião:", self.codigo_aviao)
AttributeError: 'Comprar_Bilhete' object has no attribute 'codigo_aviao'
I tried several options with super() but got no luck.
How can the Class "Comprar_Bilhete" grab the attribute 'codigo_aviao'?
I tried to put it in super() as a parameter but it gave me an error too!
class Pessoa():
def __init__(self, nome, apelido, idade, cc, nacionalidade):
self.nome = nome
self.apelido = apelido
self.idade = idade
self.cartaocidadao = cc
self.nacionalidade = nacionalidade
def visualizar_pessoa(self):
print("O", self.nome, "\b", self.apelido, "tem", self.idade, "anos, possui Cartão do Cidadão com o Nº", self.cartaocidadao, "e tem nacionalidade", self.nacionalidade, " \b.")
class Voo():
def __init__(self, companhia, cod_voo, cod_aviao, data_partida, horario_partida, data_chegada, horario_chegada, aeroporto_partida, terminal_aeroporto_partida,
aeroporto_chegada, terminal_aeroporto_chegada, tipo_de_bagagem): # Construtor.
self.companhia_aerea = companhia
self.codigo_aviao = cod_aviao
self.codigo_voo = cod_voo
self.data_voo_partida = data_partida
self.horario_partida = horario_partida
self.data_voo_chegada = data_chegada
self.horario_chegada = horario_chegada
self.aeroporto_partida = aeroporto_partida
self.terminal_aeroporto_partida = terminal_aeroporto_partida
self.aeroporto_chegada = aeroporto_chegada
self.terminal_aeroporto_chegada = terminal_aeroporto_chegada
self.tipo_de_bagagem = tipo_de_bagagem
def visualizar_dados_aviao(self):
print("Companhia:", self.companhia_aerea, "\nCód. Avião:", self.codigo_aviao)
class Comprar_Bilhete(Pessoa, Voo):
def __init__(self, nome, apelido, idade, cc, nacionalidade, companhia, cod_voo, cod_aviao, data_partida, horario_partida, data_chegada, horario_chegada,
aeroporto_partida, terminal_aeroporto_partida, aeroporto_chegada, terminal_aeroporto_chegada, tipo_de_bagagem, preco):
super().__init__(nome, apelido, idade, cc, nacionalidade) # Usou-se o "super()" para tornar o código mais simples, em relação aos Atributos da Classe "Pessoa".
self.preco_bilhete = preco
def visualizar_custo_bilhete(self):
print("O preço do bilhete de avião são", self.preco_bilhete, "euros.")
def visualizar_pessoa(self):
print("O", self.nome, "\b", self.apelido, "tem", self.idade, "anos.")
def visualizar_dados_aviao(self):
print("Cód. Avião:", self.codigo_aviao)
cliente1 = Comprar_Bilhete("Pedro", "Figueiredo", 49, 9876543, "Portuguesa", "Easyjet", "EJ1011", "FT4537", "27-08-2020", "23:05", "28-08-2020", "01:45",
"Humberto Delgado - Lisboa - PT", "Terminal 1", "Stansted - Hertfordshire - UK", "Terminal 3", "Bagagem de Porão + Mala de Mão", 275.48)
cliente1.visualizar_custo_bilhete()
print()
cliente1.visualizar_pessoa()
print()
cliente1.visualizar_dados_aviao()
print()

Overwrite a field: odoo.exceptions.CacheMiss: ('stock.picking.batch(31,).picking_batch_moves', None)

I'm trying to group all moves in the stock.picking in the stock.picking.batch
it work fine, but I got this error when I want to overwrite the batch_id in stock.picking:
File "/opt/odoo/odoo12/odoo/api.py", line 1051, in get
raise CacheMiss(record, field)
odoo.exceptions.CacheMiss: ('stock.picking.batch(31,).picking_batch_moves', None)
this is my code:
class StockMove(models.Model):
_inherit = 'stock.move'
pbm_id = fields.Many2one('stock.picking.batch.move', string='Batche moves')
class StockPickingBatchLine(models.Model):
_name = 'stock.picking.batch.move'
_description = 'Opération des mouvement des transfer'
batch_id = fields.Many2one(
'stock.picking.batch', string='Picking batch', required=True, ondelete='cascade')
product_id = fields.Many2one(
'product.product', string='Produit', readonly=True, required=True)
product_uom_id = fields.Many2one(
'uom.uom', string='Unité de mesure', readonly=True, required=True)
product_uom_qty = fields.Float('A faire', default=0.0, digits=dp.get_precision('Product Unit of Measure'),
readonly=True, )
location_id = fields.Many2one(
'stock.location', 'From', readonly=True, required=True)
location_dest_id = fields.Many2one(
'stock.location', 'To', readonly=True, required=True)
move_lines = fields.One2many(
'stock.move', 'pbm_id', string='Movement de stock')
class StockPickingBatch(models.Model):
_inherit = 'stock.picking.batch'
picking_batch_moves = fields.One2many('stock.picking.batch.move', 'batch_id', string='Lignes des mouvements',
compute='_compute_picking_get_batch_lines', readonly=False, store=True,
)
#api.depends('picking_ids', 'picking_ids.move_lines')
def _compute_picking_get_batch_lines(self):
batch_moves_obj = self.env['stock.picking.batch.move']
linked = self.env['stock.picking.batch.move']
ml_ids = self.env['stock.picking.batch.move.line']
for batch in self:
if isinstance(batch.id, models.NewId):
continue
req = """
SELECT sp.batch_id
,product_id
,product_uom product_uom_id
,sm.location_id
,sm.location_dest_id
,sm.state
,sm.picking_type_id
,sum(product_uom_qty) product_uom_qty
,array_agg(DISTINCT sm.id) moves
FROM stock_move sm
JOIN stock_picking sp ON sp.id = sm.picking_id
WHERE sp.batch_id IN (%s)
GROUP BY sp.batch_id
,product_id
,product_uom
,sm.location_id
,sm.state
,sm.picking_type_id
,sm.location_dest_id"""
self.env.cr.execute(req, (batch.id,))
fetched_lines = self.env.cr.fetchall()
batch_moves = batch_moves_obj.search([('batch_id', '=', batch.id)])
linked = batch_moves_obj
move_lines = []
for line in fetched_lines:
# search for existing line to update
matched = batch_moves.filtered(lambda x: x.product_id.id == line[1] and
x.product_uom_id.id == line[2] and
x.location_id.id == line[3] and
x.location_dest_id.id == line[4] and
x.state == line[5]
)
line_data = {
'batch_id': batch.id,
'product_id': line[1],
'product_uom_id': line[2],
'location_id': line[3],
'location_dest_id': line[4],
'state': line[5],
'picking_type_id': line[6],
'product_uom_qty': line[7],
'move_lines': [(6, 0, line[8])],
}
move_lines.extend(line[8])
if matched.exists():
matched.with_context(recompute=False).write(line_data)
linked += matched
else:
linked += batch_moves_obj.with_context(
recompute=False).create(line_data)
batch.picking_batch_moves = linked or False
Try:
batch.write({'picking_batch_moves': linked or False})
It should work

Resources