How to upload pandas dataframe to IBM Db2 database - python-3.x

I am trying to upload pandas dataframe to IBM db2 dataframe. However, I could not manage to find the method to load the complete dataset at once.
import ibm_db
dsn_driver = "IBM DB2 ODBC DRIVER"
dsn_database = "BLUDB"
dsn_hostname= "dashdb-txn-xxxxx.eu-gb.bluemix.net"
dsn_port="5xx00"
dsn_protocol="TCPIP"
dsn_uid="xxxxx"
dsn_pwd="xxxx"
dsn = (
"DRIVER={{IBM DB2 ODBC DRIVER}};"
"DATABASE={0};"
"HOSTNAME={1};"
"PORT={2};"
"PROTOCOL=TCPIP;"
"UID={3};"
"PWD={4};").format(dsn_database, dsn_hostname, dsn_port, dsn_uid, dsn_pwd)
try:
conn = ibm_db.connect(dsn, "", "")
print('Connected')
except:
print('Unable to connect to database', dsn)
d = {'col1': [1, 2,3,4,5,6,7,8,9,10], 'col2': [3, 4,3,4,5,6,7,8,2,34], 'col3': [1, 2,3,14,5,36,72,8,9,10],}
import pandas as pd
df = pd.DataFrame(data=d)
df
So far, I manage to conect successfully to the ibmdb2 database, the rest steps of uploading the pandas dataframe is not clear to me, I tried several options from google, none seem to be working.
To make problem easy, I created a sample pandas dataframe (df, above). Any help page or documentation is appreciated.
thank you
pooja

The code below worked for me with both python 3.5.2 and 2.7.12, ibm_db_sa 0.3.4, with Db2 v11.1.4.4
Adjust the parameters for .to_sql to suit your requirements.
Add exception handling as required.
import ibm_db
import pandas as pd
from sqlalchemy import create_engine
dsn_driver = "IBM DB2 ODBC DRIVER"
dsn_database = "..."
dsn_hostname= "..."
dsn_port="60000"
dsn_protocol="TCPIP"
dsn_uid="..."
dsn_pwd="..."
d = {'col1': [1, 2,3,4,5,6,7,8,9,10], 'col2': [3, 4,3,4,5,6,7,8,2,34], 'col3': [1, 2,3,14,5,36,72,8,9,10],}
df = pd.DataFrame(data=d)
engine = create_engine('ibm_db_sa://'+ dsn_uid + ':' + dsn_pwd + '#'+dsn_hostname+':'+dsn_port+'/' + dsn_database )
df.to_sql('pandas1', engine)

Related

Python Sqlalchemy insert data into AWS Redshift

I want to load a large excel table data into AWS Redshift, using Python psycopg2 take a long time to load, so I try to use Sqlalchemy. but the redshift-sqlalchemy documentation is confusing. So I want to use the regular Sqlalchemy library. Below code can pull data from AWS redshift, I don't know how to modify it to INSERT data into redshift. if possible, I like to INSERT data at once.
import pandas as pd
import psycopg2
from sqlalchemy import create_engine
from sqlalchemy import text
sql = """
SELECT top 10 * FROM pg_user;
"""
redshift_endpoint1 = "YourDBname.cksrxes2iuiu.us-east-1.redshift.amazonaws.com"
redshift_user1 = "YourUserName"
redshift_pass1 = "YourRedshiftPassword"
port1 = 8192 #whaterver your Redshift portnumber is
dbname1 = "YourDBname"
from sqlalchemy import create_engine
from sqlalchemy import text
engine_string = "postgresql+psycopg2://%s:%s#%s:%d/%s" \
% (redshift_user1, redshift_pass1, redshift_endpoint1, port1, dbname1)
engine1 = create_engine(engine_string)
df1 = pd.read_sql_query(text(sql), engine1)
df = pd.DataFrame({ 'id':['444'],'id2':[555]})
df.to_sql('YourTable', con=engine1,if_exists='append',index= False)

Python df2gspread.upload() silently failing

I have this code that runs smoothly in PyCharm but doesn't seem to export anything to Google Sheets.
import pandas as pd
import gspread
from df2gspread import df2gspread as d2g
from oauth2client.service_account import ServiceAccountCredentials
d = {'col1': [1, 2], 'col2': [3, 4]}
df = pd.DataFrame(data=d)
scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
credentials = ServiceAccountCredentials.from_json_keyfile_name('credentials_file_here.json', scope)
gc = gspread.authorize(credentials)
spreadsheet_key = 'spread_sheet_key_entered_here'
wks_name = 'Sheet1'
d2g.upload(df, spreadsheet_key, wks_name, credentials=credentials, row_names=True)

gspread worksheet.update error - Worksheet has no attribute 'update'

I am trying to write a dataframe to an open Google Sheet in Google Colab, but am getting the error:
AttributeError: 'Worksheet' object has no attribute 'update'
I documented and tested the parts up to the error.
# General Imports
# Example at https://colab.research.google.com/notebooks/io.ipynb
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
#Import the library, authenticate, and create the interface to Sheets.
from google.colab import auth
auth.authenticate_user()
import gspread
from oauth2client.client import GoogleCredentials
import numpy as np
import pandas as pd
# Load the DataFrame
dataframe = [['A', 'B', 'C'], ['1', '2' ,'3'], ['Mary', 'Mai', 'Kat']]
print(dataframe)
# Open the Google Sheet
# This assumes that you have worksheet called "RS Output" with sheet "Data" on your Google Drive,
gc = gspread.authorize(GoogleCredentials.get_application_default())
my_workbook = 'RS Output'
my_sheet = "Data"
worksheet = gc.open(my_workbook).worksheet(my_sheet)
list_of_lists = worksheet.get_all_values()
print(list_of_lists)
# update the Google Sheet with the values from the Dataframe
# per gspread documentation at
# https://gspread.readthedocs.io/en/latest/user-guide.html
worksheet.update([dataframe.columns.values.tolist()] + worksheet.values.tolist())
This is the output:
[['A', 'B', 'C'], ['1', '2', '3'], ['Mary', 'Mai', 'Kat']]
[['Testing'], ['This']]
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-132-e085af26b2ed> in <module>()
21 # https://gspread.readthedocs.io/en/latest/user-guide.html
22
---> 23 worksheet.update([dataframe.columns.values.tolist()] + worksheet.values.tolist())
AttributeError: 'Worksheet' object has no attribute 'update'
I can't seem to find a clear example of how to write the dataframe to a Google Sheet.
Thanks
I had same issue, this is my first time using colab.research.google.com notebook.
it turned out the default gspread module was version 3.0
!pip install --upgrade gspread!
Updated it to version 3.7 and fixed the missing .update() problem.
Found existing installation: gspread 3.0.1
Uninstalling gspread-3.0.1:
Successfully uninstalled gspread-3.0.1
Successfully installed gspread-3.7.0
Big thanks to: Shashank Rautela
AttributeError: 'Worksheet' object has no attribute 'update' means that the variable worksheet has no update attribute in it, you can verify that by adding print(dir(worksheet)) in your code.
If the variable has update attribute, it should print something like this:
Also, I tried to replicate your code and found some issues:
dataframe = [['A', 'B', 'C'], ['1', '2' ,'3'], ['Mary', 'Mai', 'Kat']] is already a list. dataframe.columns.values.tolist() will give you error since the variable dataframe is a list and has no attribute columns. Using only the variable dataframe in the update method is enough since it is already a list of list. Here is an example of a dataframe: {'col1': [1, 2], 'col2': [3, 4]}.
Incorrect worksheet.update() usage. According to this document, the parameter of update() are range and values (list of list if the range contains multiple cells). The parameter of your update() method should look like this: worksheet.update("Range", data in form of list of list).
Here is an example on how to use the update() method:
Using List:
Code:
data = [["It" , "works!"]]
worksheet.update("A1:B1", data)
Before:
After:
Using panda's dataframe.
Code:
df = pd.DataFrame({'Name': ['A', 'B', 'C'], 'Age': [20, 19, 23]})
values = df.columns.values.tolist()
sh.update("A1:B1", [values])
Before:
After:
Based on how you used the update() method, you want to insert the column names
above the current data of worksheet. Instead of using update, you can use insert_rows()
Code:
df = pd.DataFrame({'Name': ['A', 'B', 'C'], 'Age': [20, 19, 23]})
values = df.columns.values.tolist()
worksheet.insert_rows([values], row=1, value_input_option='RAW')
Before:
After:
References:
insert_rows
update
I ran into the same issue on a Jupyter notebook running on a server (ubuntu 18.04) while it works fine using Pycharm on my local machine (ubuntu 20.04) instead.
Meanwhile, here's how I push my pandas dataframe to a google spreadsheet:
import string
# create a spreadsheet range that matches the size of the df (including 1 row for the column names). It looks like that: 'A1:AA3'
letters = list(string.ascii_uppercase)
col_names_spreadsheet = letters+list(np.array([[X+x for x in letters] for X in letters]).flat)
range_for_df = col_names_spreadsheet[0]+"1"+":"+col_names_spreadsheet[df.shape[1]-1]+str(df.shape[0]+1)
# retrieve the matching cells
cell_list = worksheet.range(range_for_df)
# flatten the df, add the column names at the beginning
cell_values = list(df.columns)+list(df.values.flat)
# set the value of each cell
for i, val in enumerate(cell_values): #gives us a tuple of an index and value
cell_list[i].value = val #use the index on cell_list and the val from cell_values
# update the cells in bulk
worksheet.update_cells(cell_list)
if the df has dates it may return this error
Object of type date is not JSON serializable
In this case I use this
# turn all datetime columns into strings
import datetime
dt_cols = list(df.columns[[type(df[col].iloc[0]) is datetime.date for col in df.columns]])
for c in dt_cols:
df[c] = df[c].apply(lambda x: x.isoformat())
credit to this guy for the trick: Python/gspread - how can I update multiple cells with DIFFERENT VALUES at once?
!pip install --upgrade gspread
upgrade the gspread lib with above command.
you will be able to call the update method.
If you are getting any kind of Attribute Error ( assuming that you have used correct syntax and correct attributes for gspread )
Then that is because you are using gspread's old version 3.0.1, if you haven't used gspread before in google colab then this is the standard version that comes pre-installed. Just do
!pip install --upgrade gspread
At the time of writing this gspread gets upgraded to version 3.7.1 with above command.
Happy Coding!

How to iteratively add rows to an inital empty pandas Dataframe?

I have to iteratively add rows to a pandas DataFrame and find this quite hard to achieve. Also performance-wise I'm not sure if this is the best approach.
So from time to time, I get data from a server and this new dataset from the server will be a new row in my pandas DataFrame.
import pandas as pd
import datetime
df = pd.DataFrame([], columns=['Timestamp', 'Value'])
# as this df will grow over time, is this a costly copy (df = df.append) or does pandas does some optimization there, or is there a better way to achieve this?
# ignore_index, as I want the index to automatically increment
df = df.append({'Timestamp': datetime.datetime.now()}, ignore_index=True)
print(df)
After one day the DataFrame will be deleted, but during this time, probably 100k times a new row with data will be added.
The goal is still to achieve this in a very efficient way, runtime-wise (memory doesn't matter too much as enough RAM is present).
I tried this to compare the speed of 'append' compared to 'loc' :
import timeit
code = """
import pandas as pd
df = pd.DataFrame({'A': range(0, 6), 'B' : range(0,6)})
df= df.append({'A' : 3, 'B' : 4}, ignore_index = True)
"""
code2 = """
import pandas as pd
df = pd.DataFrame({'A': range(0, 6), 'B' : range(0,6)})
df.loc[df.index.max()+1, :] = [3, 4]
"""
elapsed_time1 = timeit.timeit(code, number = 1000)/1000
elapsed_time2 = timeit.timeit(code2, number = 1000)/1000
print('With "append" :',elapsed_time1)
print('With "loc" :' , elapsed_time2)
On my machine, I obtained these results :
With "append" : 0.001502693824000744
With "loc" : 0.0010836279180002747
Using "loc" seems to be faster.

Quandl code is not working

I have just started using Quandl and Pandas when I came across this code.
import quandl
import pandas as pd
api_key=open('quandlapi.txt','r').read()
df = quandl.get("FMAC/HPI_TX", authtoken=api_key)
fiddy_states = pd.read_html('https://simple.wikipedia.org/wiki/List_of_U.S._states')
main_df = pd.DataFrame()
for abbv in fiddy_states[0][0][1:]:
query="FMAC/HPI_"+str(abbv)
df = quandl.get(query, authtoken=api_key)
if main_df.empty:
main_df = df
else:
main_df = main_df.join(df)
But when I run it I get the following error :
ValueError: columns overlap but no suffix specified: Index(['Value'], dtype='object')
Can anyone tell me what wrong I am doing here.?

Resources