Argument must be str not tuple - python-3.x

i need to load only the data from database by todays date.
date column in database is in TEXT...
''code to load all the data from database''
def load_database(self):
today_date = current_date[0:11]
while self.workingpatient_table.rowCount() > 0:
self.workingpatient_table.removeRow(0)
conn = sqlite3.connect(r'mylab.db')
content = ("SELECT * FROM daily_patients where date=?",(today_date))
result = conn.execute(content)
for row_index,row_data in enumerate(result):
self.workingpatient_table.insertRow(row_index)
for column_index,column_data in enumerate(row_data):
self.workingpatient_table.setItem(row_index,column_index,QTableWidgetItem(str(column_data)))
conn.close()
''when i run the program i get following error ''
result = conn.execute(content)
TypeError: argument 1 must be str, not tuple
any possible solution?

Change your line from
content = ("SELECT * FROM daily_patients where date=?",(today_date))
result = conn.execute(content)
to
content = ("SELECT * FROM daily_patients where date=?",(today_date, ))
result = conn.execute(*content)

Related

Error while getting user input and using Pandas DataFrame to extract data from LEFT JOIN

I am trying to create Sqlite3 statement in Python 3 to collect data from two tables called FreightCargo & Train where a train ID is the input value. I want to use Pandas since its easy to read the tables.
I have created the code below which is working perfectly fine, but its static and looks for only one given line in the statement.
import pandas as pd
SQL = '''SELECT F.Cargo_ID, F.Name, F.Weight, T.Train_ID, T.Assembly_date
FROM FreightCargo F LEFT JOIN [Train] T
ON F.Cargo_ID = T.Cargo_ID
WHERE Train_ID = 2;'''
cursor = conn.cursor()
cursor.execute( SQL )
names = [x[0] for x in cursor.description]
rows = cursor.fetchall()
Temp = pd.DataFrame( rows, columns=names)
Temp'''
I want to be able to create a variable with an input. The outcome of this action will then be determined with what has been given from the user. For example the user is asked for a train_id which is a primary key in a table and the relations with the train will be listed.
I expanded the code, but I am getting an error: ValueError: operation parameter must be str
Train_ID = input('Train ID')
SQL = '''SELECT F.Cargo_ID, F.Name, F.Weight, T.Train_ID, T.Assembly_date
FROM FreightCargo F LEFT JOIN [Train] T
ON F.Cargo_ID = T.Cargo_ID
WHERE Train_ID = ?;''', (Train_ID)
cursor = conn.cursor()
cursor.execute( SQL )
names = [x[0] for x in cursor.description]
rows = cursor.fetchall()
Temp = pd.DataFrame( rows, columns=names)
Temp
The problem lays in your definition of the SQL variable.
You are creating a tuple/collection of two elements. If you print type(SQL) you will see something like this: ('''SELECT...?;''', ('your_user's_input')).
When you pass this to cursor.execute(sql[, parameters]), it is expecting a string as the first argument, with the "optional" parameters. Your parameters are not really optional, since they are defined by your SQL-query's [Train]. Parameters must be a collection, for example a tuple.
You can unwrap your SQL statement with cursor.execute(*SQL), which will pass each element of your SQL list as a different argument, or you can move the parameters to the execute function.
Train_ID = input('Train ID')
SQL = '''SELECT F.Cargo_ID, F.Name, F.Weight, T.Train_ID, T.Assembly_date
FROM FreightCargo F LEFT JOIN [Train] T
ON F.Cargo_ID = T.Cargo_ID
WHERE Train_ID = ?;'''
cursor = conn.cursor()
cursor.execute( SQL, (Train_ID,) )
names = [x[0] for x in cursor.description]
rows = cursor.fetchall()
Temp = pd.DataFrame( rows, columns=names)
Temp

update statement using loop over tuple of query and data fails in psycopg2

I have created a mini functional pipeline which creates an update statement with regex and then passes the statement and the data to pycopg2 to execute.
If I copy paste the statement outside of the loop it works, if I try to loop over all statements I get an error.
# Function to create statement
def psycopg2_regex_replace_chars(table, col, regex_chars_old, char_new):
query = "UPDATE {} SET {} = regexp_replace({}, %s , %s, 'g');".format(table, col, col)
data = (regex_chars_old, char_new)
return (query, data)
# Create functions with intelligible names
replace_separators_with_space = partial(psycopg2_regex_replace_chars,regex_chars_old='[.,/[-]]',char_new=' ')
replace_amper_with_and = partial(psycopg2_regex_replace_chars, regex_chars_old='&', char_new='and')
# create funcs_list
funcs_edit = [replace_separators_with_space,
replace_amper_with_and]
So far, so good.
This works
stmt = "UPDATE persons SET name = regexp_replace(name, %s , %s, 'g');"
data = ('[^a-zA-z0-9]', ' ')
cur.execute(stmt, data)
conn.commit()
This fails
tables = ["persons"]
cols = ["name", "dob"]
for table in tables:
for col in cols:
for func in funcs_edit:
query, data = func(table=table, col=col)
cur.execute(query, data)
conn.commit()
error
<ipython-input-92-c8ba5d469f88> in <module>
6 for func in funcs_edit:
7 query, data = func(table=table, col=col)
----> 8 cur.execute(query, data)
9 conn.commit()
ProgrammingError: function regexp_replace(date, unknown, unknown, unknown) does not exist
LINE 1: UPDATE persons SET dob = regexp_replace(dob, '[.,/[-]]' , ' ...
^
HINT: No function matches the given name and argument types. You might need to add explicit type casts.```

Adding count of total rows through Marshmallow with #post_dump?

I need to add the quantity of rows returned in this query:
queryPostgres = db.text("""
SELECT *, COUNT(*) OVER () as RowCount
FROM (
SELECT * ,
( 3958.75 *
acos(sin(:lat1 / 57.2958) * sin( cast(latitude as double precision) / 57.2958) +
cos(:lat1 / 57.2958) * cos( cast(latitude as double precision) / 57.2958) *
cos( cast(longitude as double precision) / 57.2958 - :lon1/57.2958)))
as distanceInMiles
FROM "job" ) zc
WHERE zc.distanceInMiles < :dst
ORDER BY zc.distanceInMiles
LIMIT :per_page
OFFSET :offset
""")
jobs = cls.query.\
from_statement(queryPostgres). \
params(lat1=float(lat1),
lon1=float(lon1),
dst=int(dst),
per_page=int(per_page),
offset=int(offset))
return jobs
As you can see I added the RowCount column to have the total count of rows.
However as it is not part of my model, I wonder what should I do in Marshmallow so I could add the number of rows(in the RowCount column)?
I thought I could do it with Marshmallow #post_dump() , however I could not figure out how to do it .
For more clarity here is my schema.
class JobSchema(ma.ModelSchema):
def validate_state(state):
"""Validate one of 55 USA states"""
if state not in states:
raise ValidationError(INVALID_US_STATE)
def validate_zipCode(zip):
if not zipcodes.is_real(zip):
raise ValidationError(INVALID_ZIP_CODE)
#pre_load
def get_longitude_for_zipCode_and_TimeCreated(self, data):
""" This method will pass valids long,lat and time_created
values to each job created during a POST request"""
# Getting zip from the request to obtain lat&lon from DB
result = modelZipCode.getZipCodeDetails(data['zipCode'])
print(result)
if result is None:
raise ValidationError(INVALID_ZIP_CODE_2)
schema = ZipCodeSchema(exclude=('id'))
zip, errors = schema.dump(result)
if errors:
raise ValidationError(INVALID_ZIP_CODE_3)
else:
data['longitude'] = zip['longitude']
data['latitude'] = zip['latitude']
data['time_created'] = str(datetime.datetime.utcnow())
title = fields.Str(required=True, validate=[validate.Length(min=4, max=80)])
city = fields.Str(required=True, validate=[validate.Length(min=5, max=100)])
state = fields.Str(required=True, validate=validate_state)
zipCode = fields.Str(required=True, validate=validate_zipCode)
description = fields.Str(required=False, validate=[validate.Length(max=80)])
narrative = fields.Str(required=False, validate=[validate.Length(max=250)])
companyLogo = fields.Str(required=False, validate=[validate.Length(max=250)])
companyName = fields.Str(required=True, validate=[validate.Length(min=5, max=250)])
companyURL = fields.Str(required=True, validate=[validate.Length(min=4, max=100)])
latitude = fields.Str(required=True)
longitude = fields.Str(required=True)
time_created = fields.DateTime()
# We add a post_dump hook to add an envelope to responses
#post_dump(pass_many=True)
def wrap(self, data, many):
#import pdb; pdb.set_trace()
if len(data) >= 1:
counter = data[0]['RowCount']
return {
data,
counter
}
class Meta:
model = modelJob
The most weird thing is that indeed my query is correctly returning the rowcount
Could some one please help me in finding out why I can not capture the rowcount key in the post_dump method ?
This need to be managed by the Marshmallow pre_dump or post_dump method.But indeed I decided to use an SQLAlchemy pagination methods as it gave me the total rows in the response.

Not able to parameterize LIMIT and OFFSET in sqlite3

Why following code is giving syntax error "sqlite3.OperationalError: near "?": syntax error"
import sqlite3
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
table = "device_store"
uuid = "bbebe39e-fe2e-4817-b022-a3ef13bd6283"
page = 1
POSTS_PER_PAGE = 10
query = "SELECT * FROM ? WHERE uuid=? LIMIT ? OFFSET ?"
result = cursor.execute(query, (table, uuid, POSTS_PER_PAGE, 0))
rows = result.fetchall()
connection.close()
print("==>> Printing rows <<==")
print(rows)
The error is caused by the placeholder in FROM ?, not the others. Table names can't be passed as parameters, they have to be hardcoded in the statement.

How to read and insert bytea columns using psycopg2?

I am working on a Python script to replicate some Postgresql tables from one environment to another (which does a little more than pg_dump). It works except when I am copying a table that has bytea data type.
I read the source table data in memory, then I dump the memory in the target database with concatenated inserts.
Here is my method that produces an insert statement:
def generateInsert(self, argCachedRow):
colOrd = 0;
valClauseList = []
hasBinary = False
for colData in argCachedRow:
colOrd += 1
colName = self.colOrdLookup.get(colOrd)
col = self.colLookup.get(colName)
dataType = col.dataType
insVal = None
if colData is not None:
strVal = str(colData)
if dataType.useQuote:
if "'" in strVal:
strVal = strVal.replace("'", "''")
insVal = "'%s'" % strVal
else:
if dataType.binary:
hasBinary = True
#insVal = psycopg2.Binary(colData)
#else:
insVal = strVal
else:
insVal = "NULL"
valClauseList.append(insVal)
valClause = ", ".join(valClauseList)
if hasBinary:
valClause = psycopg2.Binary(valClause)
result = "INSERT INTO %s VALUES (%s)" % (self.name, valClause)
return result
which works with every table that doesn't have binary data.
I also tried (intuitively) to wrap just the binary column data in psycopg2.Binary, which is the commented out line and then not do it to the whole row value list but that didn't work either.
Here is my simple DataType wrapper, which is loaded by reading Postgres' information_schema tables:
class DataType(object):
def __init__(self, argDispName, argSqlName, argUseQuote, argBin):
self.dispName = argDispName
self.sqlName = argSqlName
self.useQuote = argUseQuote
self.binary = argBin
How do I read and insert bytea columns using psycopg2?
If you have this database structure:
CREATE TABLE test (a bytea,
b int,
c text)
then inserting binary data into the request can be done like so, without any wrappers:
bin_data = b'bytes object'
db = psycopg2.connect(*args) # DB-API 2.0
c = db.cursor()
c.execute('''INSERT INTO test VALUES (%s, %s, %s)''', (bin_data, 1337, 'foo'))
c.execute('''UPDATE test SET a = %s''', (bin_data + b'1',))
Then, when you query it:
c.execute('''SELECT a FROM test''')
You'll receive a memoryview, which is easily converted back to bytes:
mview = c.fetchone()
new_bin_data = bytes(mview)
print(new_bin_data)
Output: b'bytes object1'
Also, I'd suggest you not to assemble queries by string formatting. psycopg2's built-in parameter substitution is much more convenient and you don't have to worry about validating data to protect from SQL injections.

Resources