how to insert multiple rows into sqllite3 database - python-3.x

I am trying to select rows and fetch them from the DB table and then insert them into a list so I can insert all of the rows at once into the database, but I got an error.
def paid_or_returned_buyingchecks(self):
date = datetime.now()
now = date.strftime('%Y-%m-%d')
self.tenlistchecks=[]
self.con = sqlite3.connect('car dealership.db')
self.cursorObj = self.con.cursor()
self.dashboard_buying_checks_dates = self.cursorObj.execute("select id, paymentdate , paymentvalue, car ,sellername from cars_buying_checks where nexttendays=?",(now,))
self.dashboard_buying_checks_dates_output = self.cursorObj.fetchall()
self.tenlistchecks.append(self.dashboard_buying_checks_dates_output)
print(self.tenlistchecks)
self.dashboard_buying_checks_dates = self.cursorObj.executemany("insert into paid_buying_checks VALUES(?,?,?,?,?)",[self.tenlistchecks])
self.con.commit()
but I got an error :
[[(120, '21-08-2022', '1112', 'Alfa Romeo', 'james'), (122, '21-08-2022', '465', 'Buick', 'daniel '), (123, '21-08-2022', '789', 'Buick', 'daniel ')]]
self.dashboard_buying_checks_dates = self.cursorObj.executemany(
sqlite3.ProgrammingError: Incorrect number of bindings supplied. The current statement uses 5, and there are 1 supplied.

self.cursorObj.fetchall() returns a list of tuples, which is what you need to feed to executemany, so
self.cursorObj.executemany("insert into paid_buying_checks VALUES(?,?,?,?,?)",self.tenlistchecks)
not
self.cursorObj.executemany("insert into paid_buying_checks VALUES(?,?,?,?,?)",[self.tenlistchecks])

Related

Python list add variables in rows

im trying to add variables to a list that i created. Got a result from a session.execute.
i´ve done this:
def machine_id(session, machine_serial):
stmt_raw = '''
SELECT
id
FROM
machine
WHERE
machine.serial = :machine_serial_arg
'''
utc_now = datetime.datetime.utcnow()
utc_now_iso = pytz.utc.localize(utc_now).isoformat()
utc_start = datetime.datetime.utcnow() - datetime.timedelta(days = 30)
utc_start_iso = pytz.utc.localize(utc_start).isoformat()
stmt_args = {
'machine_serial_arg': machine_serial,
}
stmt = text(stmt_raw).columns(
#ts_insert = ISODateTime
)
result = session.execute(stmt, stmt_args)
ts = utc_now_iso
ts_start = utc_start_iso
ID = []
for row in result:
ID.append({
'id': row[0],
'ts': ts,
'ts_start': ts_start,
})
return ID
In trying to get the result over api like this:
def form_response(response, session):
result_machine_id = machine_id(session, machine_serial)
if not result_machine_id:
response['Error'] = 'Seriennummer nicht vorhanden/gefunden'
return
response['id_timerange'] = result_machine_id
Output looks fine.
{
"id_timerange": [
{
"id": 1,
"ts": "2020-08-13T08:32:25.835055+00:00",
"ts_start": "2020-07-14T08:32:25.835089+00:00"
}
]
}
Now i only want the id from it as a parameter for another function. Problem is i think its not a list. I cant select the first element. result_machine_id[0] result is like the posted Output. I think in my first function i only add ts & ts_start to the first row? Is it possible to add emtpy rows and then add 'ts':ts as value?
Help would be nice
If I have understood your question correctly ...
Your output looks like dict. so access its id_timerange key which gives you a list. Access the first element which gives you another dict. On this dict you have an id key:
result_machine_id["id_timerange"][0]["id"]

write attribute_line_ids in product.tempate in odoo 12

i am getting fields data from xls file and creating product
everything work fine except one2many field of variant in product.template
how can i achieve this.
here is my code.
main_product = self.env["product.template"].create(product_data)
attribute_ids = self.env["product.attribute"].search([])
attrib_id_set = set(ids.name for ids in attribute_ids)
product_attrib_ids = sheet.cell(suits, 13).value.split(",")
attrib_id_list = []; exist_attribute_list = []
for name in product_attrib_ids:
if name not in attrib_id_set:
attrib_id = self.env["product.attribute"].create({'name':name})
attrib_id_list.append(attrib_id)
else:
exist_attribute = self.env["product.attribute"].search([('name','=',name)])
exist_attribute_list.append(exist_attribute)
union_list = list(set(attrib_id_list).union(exist_attribute_list))
exist_attribute_values = self.env["product.attribute.value"].search([])
exist_attrib_val_list = [attrib_name.name for attrib_name in exist_attribute_values]
product_attrib_id_values = sheet.cell(suits, 14).value.split(",")
for value in product_attrib_id_values:
if value not in exist_attrib_val_list:
for ids in union_list:
attrib_value_id = self.env["product.attribute.value"].create({
'attribute_id':ids.id,
'name':value
})
main_product.write({
'attribute_line_ids':[(0,0,{
'attribute_id':ids.id, 'value_ids':(4,attrib_value_id.id)
})]
})
product_data is my dictionary for fields like name,sale_ok,type,catag_id etc.
this works, product is created, attribute_id and attribute values even works
but i can not write one2many of variant in product.template.
--EDIT--
values_lst=[]
for value in product_attrib_id_values:
if value not in exist_attrib_val_list:
for ids in union_list:
attrib_value_id = self.env["product.attribute.value"].create({
'attribute_id':ids.id,
'name':value
})
else:
for ids in exist_attribute_values:
if value == ids.name:
attrib_value_id =self.env["product.attribute.value"].browse(ids.id)
if attrib_value_id not in values_lst:
values_lst.append(attrib_value_id)

select on sub query peewee

I am wondering if I can do select() on sub_query.
I am able to do join sub_query with any peewee.Model. But when I wrote a sub_query and I wanted to just group by with one of the column
e.g. sub_query.select(sub_query.c.column_1, fn.COUNT(sub_query.c.column2)alias('col2_count')).group_by(sub_query.c.column_1)
query was not nested and was giving SQL syntax error.
(Can't reveal the code)
(I have done alias() on sub_query)
Edit
Example:
class Product(Model):
id = PrimaryKeyField()
name = CharField()
created_date = DateField()
class Part(Model):
id = PrimaryKeyField()
product = ForeignKeyField(Product)
name = CharField()
class ProductError(Model):
id = PrimaryKeyField()
product = ForeignKeyField(Product)
note = CharField()
class PartError(Model):
id = PrimaryKeyField()
part = ForeignKeyField(Part)
error = ForeignKeyField(ErrorMaster)
Here Product can have general error and
parts can have specific error which are predefined in ErrorMaster
I just want to know count of product which have errors against total products date wise. (error is product error or error in any part)
So sub_query is something like
sub_q = Product.select(
Product.created_date,
Product.id.alias('product_id'),
fn.IF(# checks if product has error
ProductError.is_null(True), if no product error check part error
fn.IF(fn.COUNT(PartError.id) == 0, 0, 1), # checks if error count > 0 then there is error in part
1
).alias('is_error')
).join(Part, on=Product.id == Part.product)
.join(ProductError, JOIN_LEFT_OUTER, on=Product.id == ProductError.product)
.join(PartError, JOIN_LEFT_OUTER, on=PartError.part == Part.id)
.where(Product.created_date.between(from_date, to_date))
.group_by(Product.id).alias('some_alias')
# below does not work but I can do this in sql
query = sub_q.select(sub_q.c.created_date,
fn.COUNT(sub_q.c.product_id).alias('total_products'),
fn.SUM(sub_q.c.is_error).alias('product_with_errors'))
.group_by(sub_q.c.created_date)

Updating multiple line plots dynamically in callback in bokeh

I have a use case where I have multiple line plots (with legends), and I need to update the line plots based on a column condition. Below is an example of two data set, based on the country, the column data source changes. But the issue I am facing is, the number of columns is not fixed for the data source, and even the types can vary. So, when I update the data source based on a callback when there is a new country selected, I get this error:
Error: attempted to retrieve property array for nonexistent field 'pay_conv_7d.content'.
I am guessing because in the new data source, the pay_conv_7d.content column doesn't exist, but in my plot those lines were already there. I have been trying to fix this issue by various means (making common columns for all country selection - adding the missing column in the data source in callback, but still get issues.
Is there any clean way to have multiple line plots updating using callback, and not do a lot of hackish way? Any insights or help would be really appreciated. Thanks much in advance! :)
def setup_multiline_plots(x_axis, y_axis, title_text, data_source, plot):
num_categories = len(data_source.data['categories'])
legends_list = list(data_source.data['categories'])
colors_list = Spectral11[0:num_categories]
# xs = [data_source.data['%s.'%x_axis].values] * num_categories
# ys = [data_source.data[('%s.%s')%(y_axis,column)] for column in data_source.data['categories']]
# data_source.data['x_series'] = xs
# data_source.data['y_series'] = ys
# plot.multi_line('x_series', 'y_series', line_color=colors_list,legend='categories', line_width=3, source=data_source)
plot_list = []
for (colr, leg, column) in zip(colors_list, legends_list, data_source.data['categories']):
xs, ys = '%s.'%x_axis, ('%s.%s')%(y_axis,column)
plot.line(xs,ys, source=data_source, color=colr, legend=leg, line_width=3, name=ys)
plot_list.append(ys)
data_source.data['plot_names'] = data_source.data.get('plot_names',[]) + plot_list
plot.title.text = title_text
def update_plot(country, timeseries_df, timeseries_source,
aggregate_df, aggregate_source, category,
plot_pay_7d, plot_r_pay_90d):
aggregate_metrics = aggregate_df.loc[aggregate_df.country == country]
aggregate_metrics = aggregate_metrics.nlargest(10, 'cost')
category_types = list(aggregate_metrics[category].unique())
timeseries_df = timeseries_df[timeseries_df[category].isin(category_types)]
timeseries_multi_line_metrics = get_multiline_column_datasource(timeseries_df, category, country)
# len_series = len(timeseries_multi_line_metrics.data['time.'])
# previous_legends = timeseries_source.data['plot_names']
# current_legends = timeseries_multi_line_metrics.data.keys()
# common_legends = list(set(previous_legends) & set(current_legends))
# additional_legends_list = list(set(previous_legends) - set(current_legends))
# for legend in additional_legends_list:
# zeros = pd.Series(np.array([0] * len_series), name=legend)
# timeseries_multi_line_metrics.add(zeros, legend)
# timeseries_multi_line_metrics.data['plot_names'] = previous_legends
timeseries_source.data = timeseries_multi_line_metrics.data
aggregate_source.data = aggregate_source.from_df(aggregate_metrics)
def get_multiline_column_datasource(df, category, country):
df_country = df[df.country == country]
df_pivoted = pd.DataFrame(df_country.pivot_table(index='time', columns=category, aggfunc=np.sum).reset_index())
df_pivoted.columns = df_pivoted.columns.to_series().str.join('.')
categories = list(set([column.split('.')[1] for column in list(df_pivoted.columns)]))[1:]
data_source = ColumnDataSource(df_pivoted)
data_source.data['categories'] = categories
Recently I had to update data on a Multiline glyph. Check my question if you want to take a look at my algorithm.
I think you can update a ColumnDataSource in three ways at least:
You can create a dataframe to instantiate a new CDS
cds = ColumnDataSource(df_pivoted)
data_source.data = cds.data
You can create a dictionary and assign it to the data attribute directly
d = {
'xs0': [[7.0, 986.0], [17.0, 6.0], [7.0, 67.0]],
'ys0': [[79.0, 69.0], [179.0, 169.0], [729.0, 69.0]],
'xs1': [[17.0, 166.0], [17.0, 116.0], [17.0, 126.0]],
'ys1': [[179.0, 169.0], [179.0, 1169.0], [1729.0, 169.0]],
'xs2': [[27.0, 276.0], [27.0, 216.0], [27.0, 226.0]],
'ys2': [[279.0, 269.0], [279.0, 2619.0], [2579.0, 2569.0]]
}
data_source.data = d
Here if you need different sizes of columns or empty columns you can fill the gaps with NaN values in order to keep column sizes. And I think this is the solution to your question:
import numpy as np
d = {
'xs0': [[7.0, 986.0], [17.0, 6.0], [7.0, 67.0]],
'ys0': [[79.0, 69.0], [179.0, 169.0], [729.0, 69.0]],
'xs1': [[17.0, 166.0], [np.nan], [np.nan]],
'ys1': [[179.0, 169.0], [np.nan], [np.nan]],
'xs2': [[np.nan], [np.nan], [np.nan]],
'ys2': [[np.nan], [np.nan], [np.nan]]
}
data_source.data = d
Or if you only need to modify a few values then you can use the method patch. Check the documentation here.
The following example shows how to patch entire column elements. In this case,
source = ColumnDataSource(data=dict(foo=[10, 20, 30], bar=[100, 200, 300]))
patches = {
'foo' : [ (slice(2), [11, 12]) ],
'bar' : [ (0, 101), (2, 301) ],
}
source.patch(patches)
After this operation, the value of the source.data will be:
dict(foo=[11, 22, 30], bar=[101, 200, 301])
NOTE: It is important to make the update in one go to avoid performance issues

Python pyodbc fetchmany() how to select out put to update query

I have code to fetchmany() that will output eg:10 records
And i have added iterating value for each 0 1 2 3 4 5 for print statement , now i want user input 0 or 1 and it should select column. For those input so i can update sql record for those column
cur.execute("select events.SERIALNUM, emp.LASTNAME, emp.SSNO,
events.EVENT_TIME_UTC from AccessControl.dbo.emp,
AccessControl.dbo.events where emp.id = events.empid and emp.SSNO=?
order by EVENT_TIME_UTC desc ", empid)
rows = cur.fetchmany(att_date)
n = 0
for row in rows :
event_date = row.EVENT_TIME_UTC
utc = event_date.replace(tzinfo=from_zone)
utc_to_local = utc.astimezone(to_zone)
local_time = utc_to_local.strftime('%H:%M:%S')
att_date = utc_to_local.strftime('%d:%m:%y')
print (n, row.SERIALNUM, row.LASTNAME, row.SSNO, att_date, local_time)
n = n + 1
seri_al = input("Copy And Past the serial number u want to modifiy: ")
this will output following Data
0 1500448188 FIRST NAME 03249 2017-07-19 17:01:17
1 1500448187 FIRST NAME 03249 2017-07-19 17:01:15
Eg:
seri_al = input("Copy And Past the serial number u want to modifiy: ")
instead of copying and pasting '1500448188' these numbers I want the user to only enter '0' and map that one and update sql query as for where clause serial number.
It appears that you already know how to use input to prompt for the user's choice. The only piece you are missing is to add items to a dictionary as you loop through the rows. Here is a slightly abstracted example:
rows = [('1500448188',),('1500448187',)] # test data
selections = dict()
n = 0
for row in rows:
selections[n] = row[0]
print(n, repr(row[0]))
n += 1
select = input("Enter the index (0, 1, ...) you want to select: ")
selected_key = selections[int(select)]
print("You selected " + repr(selected_key))
which prints
0 '1500448188'
1 '1500448187'
Enter the index (0, 1, ...) you want to select: 1
You selected '1500448187'

Resources