How to use passed parameter as table Name in Select query python? - python-3.x

i have the following function which extracts data from table, but i want to pass the table name in function as parameter...
def extract_data(table):
try:
tableName = table
conn_string = "host='localhost' dbname='Aspentiment' user='postgres' password='pwd'"
conn=psycopg2.connect(conn_string)
cursor = conn.cursor()
cursor.execute("SELECT aspects_name, sentiments FROM ('%s') " %(tableName))
rows = cursor.fetchall()
return rows
finally:
if conn:
conn.close()
when i call function as extract_data(Harpar) : Harpar is table name
but it give an error that 'Harpar' is not defined.. any hepl ?

Update: As of psycopg2 version 2.7:
You can now use the sql module of psycopg2 to compose dynamic queries of this type:
from psycopg2 import sql
query = sql.SQL("SELECT aspects_name, sentiments FROM {}").format(sql.Identifier(tableName))
cursor.execute(query)
Pre < 2.7:
Use the AsIs adapter along these lines:
from psycopg2.extensions import AsIs
cursor.execute("SELECT aspects_name, sentiments FROM %s;",(AsIs(tableName),))
Without the AsIs adapter, psycopg2 will escape the table name in your query.

Related

Using "UPDATE" and "SET" in Python to Update Snowflake Table

I have been using Python to read and write data to Snowflake for some time now to a table I have full update rights to using a Snowflake helper class my colleague found on the internet. Please see below for the class I have been using with my personal Snowflake connection information abstracted and a simply read query that works given you have a 'TEST' table in your schema.
from snowflake.sqlalchemy import URL
from sqlalchemy import create_engine
import keyring
import pandas as pd
from sqlalchemy import text
# Pull the username and password to be used to connect to snowflake
stored_username = keyring.get_password('my_username', 'username')
stored_password = keyring.get_password('my_password', 'password')
class SNOWDBHelper:
def __init__(self):
self.user = stored_username
self.password = stored_password
self.account = 'account'
self.authenticator = 'authenticator'
self.role = stored_username + '_DEV_ROLE'
self.warehouse = 'warehouse'
self.database = 'database'
self.schema = 'schema'
def __connect__(self):
self.url = URL(
user=stored_username,
password=stored_password,
account='account',
authenticator='authenticator',
role=stored_username + '_DEV_ROLE',
warehouse='warehouse',
database='database',
schema='schema'
)
# =============================================================================
self.url = URL(
user=self.user,
password=self.password,
account=self.account,
authenticator=self.authenticator,
role=self.role,
warehouse=self.warehouse,
database=self.database,
schema=self.schema
)
self.engine = create_engine(self.url)
self.connection = self.engine.connect()
def __disconnect__(self):
self.connection.close()
def read(self, sql):
self.__connect__()
result = pd.read_sql_query(sql, self.engine)
self.__disconnect__()
return result
def write(self, wdf, tablename):
self.__connect__()
wdf.to_sql(tablename.lower(), con=self.engine, if_exists='append', index=False)
self.__disconnect__()
# Initiate the SnowDBHelper()
SNOWDB = SNOWDBHelper()
query = """SELECT * FROM """ + 'TEST'
snow_table = SNOWDB.read(query)
I now have the need to update an existing Snowflake table and my colleague suggested I could use the read function to send the query containing the update SQL to my Snowflake table. So I adapted an update query I use successfully in the Snowflake UI to update tables and used the read function to send it to Snowflake. It actually tells me that the relevant rows in the table have been updated, but they have not. Please see below for update query I use to attempt to change a field "field" in "test" table to "X" and the success message I get back. Not thrilled with this hacky update attempt method overall (where the table update is a side effect of sorts??), but could someone please help with method to update within this framework?
# Query I actually store in file: '0-Query-Update-Effective-Dating.sql'
UPDATE "Database"."Schema"."Test" AS UP
SET UP.FIELD = 'X'
# Read the query in from file and utilize it
update_test = open('0-Query-Update-Effective-Dating.sql')
update_query = text(update_test.read())
SNOWDB.read(update_query)
# Returns message of updated rows, but no rows updated
number of rows updated number of multi-joined rows updated
0 316 0
SQL2Pandas | UPDATE row(s) in pandas

syntax to guard against SQL-injection of named identifiers

I'm reading the psycopg2 documentation & wondering how to parametrize SQL identifiers of tables with a name? Here is an example:
import psycopg2
conn = psycopg2.connect()
cursor = conn.cursor()
cursor.execute(
"SELECT * FROM %(my_table)s LIMIT %(my_limit)s;"
vars={
"my_limit": 42, # parametrizing literals works fine.
"my_table": sql.Identifier("foo"), # how to do same with named identifiers?
}
)
psycopg2.ProgrammingError: can't adapt type 'Identifier'
I know I could use positional parameters %s or {} but I would like the query to mix and match identifiers with literals with a named mapping.
This did it for me:
import psycopg2
from psycopg2 import sql
conn = psycopg2.connect()
cursor = conn.cursor()
cursor.execute(sql.SQL(
"SELECT * FROM {my_table} LIMIT {my_limit};"
).format(
my_limit = sql.Literal(42),
my_table = sql.Identifier("foo"),
).as_string(conn)
)

How to convert sql query to list?

I am trying to convert my sql query output into a list to look a certain way.
Here is my code:
def get_sf_metadata():
import sqlite3
#Tables I want to be dynamically created
table_names=['AcceptedEventRelation','Asset', 'Book']
#SQLIte Connection
conn = sqlite3.connect('aaa_test.db')
c = conn.cursor()
#select the metadata table records
c.execute("select name, type from sf_field_metadata1 limit 10 ")
print(list(c))
get_sf_metadata()
Here is my output:
[('Id', 'id'), ('RelationId', 'reference'), ('EventId', 'reference')]
Is there any way to make the output looks like this:
[Id id, RelationId reference, EventId reference]
You can try
print(["{} {}".format(i[0], i[1]) for i in list(c)])
That will print you
['Id id', 'RelationId reference', 'EventId reference']

PYODBC - Type Error: the first argument to execute must be a string or unicode query

Been trying to connect our ERP ODBC by using PYODBC, Although I got the syntax correct the only error I'm getting at this point is this 'TypeError: the first argument to execute must be a string or unicode query'
I've tried adding .decode('utf-8').
import pyodbc
import pandas as pd
conn = pyodbc.connect(
'DRIVER={SQL Server};'
'SERVER=192.168.1.30;'
'DATABASE=Datamart;'
'Trusted_Connection=yes;')
cursor = conn.cursor()
for row in cursor.tables(tableType='TABLE'):
print(row)
sql = """SELECT * FROM ETL.Dim_FC_UPS_Interface_Detail"""
cursor.execute(row, sql)
df = pd.read_sql(sql, conn)
df.head()
I think your ordering of commands is off a bit for use of pyodbc cursor execute function. See the docs.
cursor = conn.cursor()
sql = """SELECT * FROM ETL.Dim_FC_UPS_Interface_Detail"""
cursor.execute(sql)
for row in cursor:
print(row)

Querying from Microsoft SQL to a Pandas Dataframe

I am trying to write a program in Python3 that will run a query on a table in Microsoft SQL and put the results into a Pandas DataFrame.
My first try of this was the below code, but for some reason I don't understand the columns do not appear in the order I ran them in the query and the order they appear in and the labels they are given as a result change, stuffing up the rest of my program:
import pandas as pd, pyodbc
result_port_mapl = []
# Use pyodbc to connect to SQL Database
con_string = 'DRIVER={SQL Server};SERVER='+ <server> +';DATABASE=' +
<database>
cnxn = pyodbc.connect(con_string)
cursor = cnxn.cursor()
# Run SQL Query
cursor.execute("""
SELECT <field1>, <field2>, <field3>
FROM result
""")
# Put data into a list
for row in cursor.fetchall():
temp_list = [row[2], row[1], row[0]]
result_port_mapl.append(temp_list)
# Make list of results into dataframe with column names
## FOR SOME REASON HERE row[1] AND row[0] DO NOT CONSISTENTLY APPEAR IN THE
## SAME ORDER AND SO THEY ARE MISLABELLED
result_port_map = pd.DataFrame(result_port_mapl, columns={'<field1>', '<field2>', '<field3>'})
I have also tried the following code
import pandas as pd, pyodbc
# Use pyodbc to connect to SQL Database
con_string = 'DRIVER={SQL Server};SERVER='+ <server> +';DATABASE=' + <database>
cnxn = pyodbc.connect(con_string)
cursor = cnxn.cursor()
# Run SQL Query
cursor.execute("""
SELECT <field1>, <field2>, <field3>
FROM result
""")
# Put data into DataFrame
# This becomes one column with a list in it with the three columns
# divided by a comma
result_port_map = pd.DataFrame(cursor.fetchall())
# Get column headers
# This gives the error "AttributeError: 'pyodbc.Cursor' object has no
# attribute 'keys'"
result_port_map.columns = cursor.keys()
If anyone could suggest why either of those errors are happening or provide a more efficient way to do it, it would be greatly appreciated.
Thanks
If you just use read_sql? Like:
import pandas as pd, pyodbc
con_string = 'DRIVER={SQL Server};SERVER='+ <server> +';DATABASE=' + <database>
cnxn = pyodbc.connect(con_string)
query = """
SELECT <field1>, <field2>, <field3>
FROM result
"""
result_port_map = pd.read_sql(query, cnxn)
result_port_map.columns.tolist()

Resources