Python .execute() not executing Postgresql query - python-3.x

I am trying to update a database from a csv file. I am able to connect to the database, run through the lines that are in the csv, compose the desired SQL line, but for some reason it never hits the Postgresql DB. Console and pycharm show no errors.
Here is the .py
import csv
import psycopg2
filename = 'C:\dev\student_feedback\student_feedback\student_feedback.csv'
fields = []
rows = []
host='localhost'
port=5432
database='ipfeedback'
user='postgres'
password='#########'
def import_student_survey_csv():
print('Maybe Updating surveys...')
def doQuery(connection):
cur = connection.cursor()
with open(filename, 'r') as csvfile:
csvreader = csv.reader(csvfile)
fields = next(csvreader)
for row in csvreader:
rows.append(row)
nrows = rows.pop()
qry = (f"INSERT INTO ipfeedbackdb_studentsurvey ({fields[0]},{fields[1]},{fields[2]},{fields[3]},{fields[4]},{fields[5]},{fields[6]},{fields[7]},{fields[8]},{fields[9]},{fields[10]},{fields[11]}) VALUES ({nrows[0]},{nrows[1]},{nrows[2]},{nrows[3]},{nrows[4]},{nrows[5]},{nrows[6]},{nrows[7]},{nrows[8]},'{nrows[9]}','{nrows[10]}','{nrows[11]}');")
print('QRY', qry)
cur.execute(qry)
csvfile.close()
myConnection = psycopg2.connect(host=host, port=port, user=user, password=password, dbname=database)
doQuery(myConnection)
myConnection.close()
import_student_survey_csv()
the .csv
added,spid,q1,q2,q3,q4,q5,q6,q7,best_part,improve,comments
21021,0,1,1,1,1,1,1,1,"qwe","qwe","qwe"
21021,0,1,1,1,1,1,1,1,"asd","asd","asd"
21021,0,1,1,1,1,1,1,1,"x","x","x"
21021,12345,1,1,1,1,1,1,1,"x","x","x"
the table data
the db structure

Related

Not able to generate csv file into s3 bucket using aws glue python shell script

import sys
import boto3 ,json,sys
import psycopg2
import csv
secret_name = 'xxxx' region_name ='us-east-1' session = boto3.session.Session() client = session.client(service_name='secretsmanager',region_name=region_name) get_secret_value_response = client.get_secret_value(SecretId=secret_name) creds = json.loads(get_secret_value_response\['SecretString'\])
username = creds\['username'\] password = creds\['password'\] host = creds\['host'\]
try: connection = psycopg2.connect(user=username, password=password, host=host, port="5412", database="xxxx")
cursor = connection.cursor()
# Executing a SQL query to insert data into table
select_query = """ SELECT * from table"""
cursor.execute(select_query)
print("Select Record successfully")
# Fetch result
record = cursor.fetchall()
print("Result ", record)
cursor = connection.cursor()
# Executing a SQL query to insert data into table
How to generate the csv file into s3 bucket
select_query = """ SELECT * from table"""
cursor.execute(select_query)
print("Select Record successfully")
# Fetch result
record = cursor.fetchall()
print("Result ", record)]

How to import multiple CSV files into a PostgreSQL table using python?

I have a number of daily csv files to be imported into the Postgres. Below python codes work for importing a single csv file. How can I import batch cvs files? Thanks!
import psycopg2
con = psycopg2.connect(
database="xxxx",
user="xxxx",
password="xxxx",
host="xxxx")
cur = con.cursor()
file = open('path/to/directory/client_record_2020-01-01.csv', 'r')
next(file)
cur.copy_from(file, "table_name", columns=('col1', 'col2', 'col3', 'col4', 'col5'), sep=",")
con.commit()
con.close()
Let's try putting the names of our files in a list and then iterate over that list doing one copy_from() at a time. Maybe:
import psycopg2
file_names = [
'path/to/directory/client_record_2020-01-01.csv'
]
con = psycopg2.connect(database="xxxx", user="xxxx", password="xxxx", host="xxxx")
for file_name in file_names:
with open(file_name, 'r') as file_in:
next(file_in)
with con.cursor() as cur:
cur.copy_from(file_in, "table_name", columns=('col1', 'col2', 'col3', 'col4', 'col5'), sep=",")
con.commit()
con.close()

how to run python/djanjo shell as a script

In a tutorial on django we create a simple table run migrations and then go into a shell with the command:
python manage.py shell
from there, in the shell we run the following:
from flights.models import Flight
f = Flight(origin="New York", destination="london", duration=415)
f.save()
I'm trying to figure out how to run these commands from a py file so I created test.py:
from flights.models import Flight
f=Flight(origin="New York",destination="London", duration=415)
f.save()
but get the error Models aren't loaded. How to resolve? I'm definitely a little confused. I am able to update the database from a web served page with the following in my views.py file:
from django.shortcuts import render
from flights.models import Flight
def index(request):
f=Flight(origin="New York",destination="London", duration=415)
f.save()
return render(request, "flights/index.html", {
})
What I am asking is how to update the database directly on the backend. Do I just use standard python sql commands? For instance:
import sqlite3
from sqlite3 import Error
import csv
def sql_connection(db_file):
""" create a database connection to the SQLite database
specified by db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
print(e)
return conn
def sql_create(conn, create_table_sql):
""" create a table from the create_table_sql statement
:param conn: Connection object
:param create_table_sql: a CREATE TABLE statement
:return:
"""
try:
c = conn.cursor()
c.execute(create_table_sql)
except Error as e:
print(e)
def sql_insert(conn,sql,data,single):
cur=conn.cursor()
if single:
cur.execute(sql,data)
rowCount = cur.lastrowid
else:
cur.executemany(sql,data)
rowCount = cur.rowcount
conn.commit()
return(rowCount)
def sql_select(conn,sql,data):
cur = conn.cursor()
cur.execute(sql,data)
rows = cur.fetchall()
return rows
def sql_update(conn,sql,data):
cur = conn.cursor()
cur.execute(sql,data)
conn.commit()
def sql_delete(conn,sql,mydata):
print(mydata)
cur = conn.cursor()
cur.execute(sql,mydata)
conn.commit()
def main():
insert = False
db_file = r"/home/saltydog/Database/crud.db"
# create a database connection
conn = sql_connection(db_file)
# create tables
sql_create_price_table = """ CREATE TABLE IF NOT EXISTS prices (
ticker text NOT NULL,
ymd integer,
price real,
PRIMARY KEY(ticker,ymd)
); """
sql_create_price2_table = """ CREATE TABLE IF NOT EXISTS prices2 (
ticker text NOT NULL,
ymd integer,
price real,
PRIMARY KEY(ticker,ymd)
); """
if conn is not None:
# create projects table
sql_create(conn, sql_create_price_table)
sql_create(conn, sql_create_price2_table)
conn.commit()
else:
print("Error! cannot create the database connection.")
if(insert):
sql_insert_record = """insert into prices(ticker,ymd,price)
values(?, ?, ?)"""
cnt = 0
with open('ibm.txt') as f:
reader = csv.reader(f)
for row in reader:
ticker = row[0]
ymd = row[1]
price = row[2]
data = (ticker, ymd, price)
if cnt != 0:
rowid = sql_insert(conn,sql_insert_record,data,True)
print(rowid)
cnt+=1
sql_insert_records = """insert into prices2(ticker,ymd,price)
values(?, ?, ?)"""
data=[]
cnt=0
with open('ibm.txt') as f:
reader = csv.reader(f)
for row in reader:
ticker = row[0]
ymd = row[1]
price = row[2]
if cnt != 0:
data.append((ticker, ymd, price))
cnt+=1
rowid = sql_insert(conn,sql_insert_records,data,False)
print(rowid)
select_records = """select ticker,ymd,price from prices
where ticker = ?
group by price
order by price"""
data=('IBM', )
rows = sql_select(conn,select_records,data)
for row in rows:
print(row)
select_records = """select ticker,ymd,price from prices
where ticker = ?
and price > ?"""
data=('IBM',100.0)
rows = sql_select(conn,select_records,data)
for row in rows:
print(row)
select_records = """select ticker,ymd,price from prices
where ymd = ?"""
data=(19990527, )
rows = sql_select(conn,select_records,data)
for row in rows:
print(row)
sql_update_records = """update prices
set price = ?
where ymd = ?"""
data = (200.00,19990527)
sql_update(conn,sql_update_records,data)
sql_delete_record = """delete from prices where ymd = ?"""
mydata = (19990528, )
sql_delete(conn,sql_delete_record,mydata)
if __name__ == '__main__':
main()

Extracting data in separate sheet excel using Python

I want to write a python script from which I can execute multiple sql queries and the output of that query is saved in excel.
Suppose I have 4 sql query i.e Script1, Script2, Script3 & Script4 and I want to save the generated excel workbook in E:\Test, In that workbook sheet1 contains Script1 output, sheet2 contains Script2 output, sheet3 contains Script3 output, and so on. I have written a query but its working for only one script.
By using this script I am able to generate excel sheet with Test name, but How I run the remaining script so that their output will show in other sheet of same workbook
Please Help
import psycopg2
import sys
import pprint
import pandas as pd
import os
import openpyxl.cell
COMMASPACE = ', '
def main():
conn_string = "dbname='abc' user='qwerty' host='pqr' password='******' port='1234'"
script1 = """
select * From something1
"""
script2 = """
select * From something2
"""
script3 = """
select * From something3
"""
script4 = """
select * From something4
"""
pprint.pprint ('Making connection to the Database...')
con1 = psycopg2.connect(conn_string)
cur = con1.cursor()
pprint.pprint ('Execution Start')
cur.execute(script)
if not cur.rowcount:
pprint.pprint ('Oops! Error Occured')
else:
columns = [desc[0] for desc in cur.description]
data = cur.fetchall()
df = pd.DataFrame(list(data), columns=columns)
df.columns = map(str.upper, df.columns)
writer = pd.ExcelWriter('E:\\Test.xlsx')
df.to_excel(writer, sheet_name='Sheet1')
def hide_column(ws, column_id):
if isinstance(column_id, int):
assert column_id >= 1, "Column numbers must be 1 or greater"
column_id = openpyxl.cell.get_column_letter(column_id)
column_dimension = ws.column_dimensions[column_id]
column_dimension.hidden = True
writer.save()
print ("END of extraction")
if __name__ == "__main__":
main()
try using pandas read_sql with Sql Alchemy.
from openpyxl import load_workbook
from sqlalchemy import create_engine
import pandas as pd
# Parameters for SQL Alchemy
ServerName = "your_Server_Name"
Database = "Your_Database"
Driver = "Your_Driver"
# Create the connection
engine = create_engine('mssql+pyodbc://' + ServerName + '/' + Database + "?" + Driver)
# reading in the dataframes
df1 = pd.read_sql_query("select * from somewhere", engine)
df2 = pd.read_sql_query("select * from somewhere_else", engine)
# Using openpyxl to write to excel sheets
file = 'Your_file_path_Here'
book = load_workbook(file)
writer = pd.ExcelWriter(file, engine='openpyxl')
writer.book = book
# now start writing them to sheets
df1.to_excel(writer, index=None, sheet_name='SQL1')
df1.to_excel(writer, index=None, sheet_name='SQL2')

Trying to import csv into MySQL with pymysql and Python version 3

I want to put a simple csv into a MySQL table in a database. I know there are probably other ways to do this but for the system I have in place, I would like to stick with this method if possible.
I have read every other possible post about this and tried many of the suggestions but I can't get it to work. I can get it to work with python 2 but not in 3.
csv is
test1 test2
43.49 654.32
78.89 294.95
I can't seem to get the syntax correct and get various errors when I try different approaches.
import csv
import pymysql
db = pymysql.connect( host = '###.###.##.#',
user = 'test',passwd = 'test')
cursor = db.cursor()
csv_data = csv.reader('c:/tmp/sample.csv')
next(csv_data)
for row in csv_data:
cursor.execute('INSERT INTO test.table(test1, test2) VALUES(%s, %s)',row)
db.commit()
cursor.close()
print (Imported)
Any ideas???
Thanks in advance!!
For me it worked
import pymysql
import csv
db = pymysql.connect("localhost","root","12345678","data" )
cursor = db.cursor()
csv_data = csv.reader(open('test.csv'))
next(csv_data)
for row in csv_data:
cursor.execute('INSERT INTO PM(col1,col2) VALUES(%s, %s)',row)
db.commit()
cursor.close()
To answer my own question, this worked for me:
import csv
import pymysql
db = pymysql.connect( host = '###.###.##.#',
user = 'test',passwd = 'test')
cursor = db.cursor()
f = csv.reader(open('c:/tmp/sample.csv'))
sql = """INSERT INTO test.table(test1,test2) VALUES(%s,%s)"""
next(f)
for line in f:
line=[None if cell == '' else cell for cell in line]
cursor.execute(sql, line)
db.commit()
cursor.close()
print ("Imported")

Resources