Trouble reading csvs saved in sharefile (citrix) - python-3.x

I wrote the following code to create dataframes from files saved in sharefile. It works perfectly for excel files, but fails for csv files with the error EmptyDataError: No columns to parse from file.
tblname = 'test'
fPth = r'Z:\Favorites\test10 (Group D - Custom EM&V)\8 PII\16 - Project Selection Plan\QC\Data\test.csv'
sht = 'Gross_Data'
shtStart = 0
fType = 'csv'
fitem = sfsession.get_io_version(fPth)
if fitem is None:
print(f'Could not create sharefile item for {fPth}')
else:
try:
if fType == 'csv':
df = pd.read_csv(fitem.io_data, header = shtStart)
elif fType == 'excel':
df = pd.read_excel(fitem.io_data, sheet_name = sht, header = shtStart)
else:
pass
print(f'Data import COMPLETE for {fPth}: {str(datetime.now())}')
except:
print(f'Data import FAILED for {fPth}')
logging.critical(f'Data import FAILED for {fPth}')
If I replace fitem.io_data with fPth in df = pd.read_csv, the code works, but I can't use that as a permanent solution. Any suggestions?
Also sfsession is a sharefile session and get_io_version(fPth) gets the token and downloads all the file properties include its data.
Thanks.

An adaptation of this solution worked for me:
StringIO and pandas read_csv
I added fitem.io_data.seek(0) before the df = ... line
Closing the question.

Related

Python - Load multiple excel files with multiple sheets in it with specific columns

I have a problem scenario where I need to load excel files using Python
Load multiple excel files from a folder - Done
Each excel file has multiple sheets - Done
Need to load only required columns ('Receive Date','Process Date','Process Number','Task Name','Series','Office','Department','Unit Manager','AM'), other columns needs to be ignored/dropped and no error should be raised if the above columns does not exist in some sheets.
Load all the data into single data frame
------ Code -------
import pandas as pd
import os
import glob
def getfilepath():
path = 'C:/Users/Tracking Logs/'
files=(os.listdir(path))
allfiles = glob.glob(path+"*.xlsx")
def getdatafromexcel():
for file in allfiles:
rawdf = pd.read_excel(file,sheet_name=None,na_values='null',keep_default_na=False,dtype=object,date_parser=True)
cols=('Receive Date','Process Date','Process Number','Task Name','Series','Office','Department','Unit Manager','AM/AA/PC')
display(df)
getfilepath()
getdatafromexcel()
I found the solution:
import pandas as pd
import os
import glob
from IPython.display import HTML,display
from openpyxl import load_workbook
path = 'C:/Users/Tracking Logs/'
cols = ['Receive Date','Process Date','Task Name','Series','Office','Department','Unit Manager','AM/AA/PC']
def getfilepath(path):
files=(os.listdir(path))
allfiles = glob.glob(path+"*.xlsx")
#print('Allfiles: ',allfiles)
return allfiles
def getdatafromexcel(cols,allfiles):
for i in range(len(allfiles)):
print('\nCounter: ',i,' \nFilenames: ',allfiles[i])
wb = load_workbook(allfiles[i],read_only=True)
for sheetname in wb.sheetnames:
print('Sheetname: ',sheetname)
try:
df = pd.read_excel(allfiles[i],sheet_name=sheetname,na_values='null',usecols=cols,
keep_default_na=False,dtype=object)
Indexnames = df[(df["Task Name"] == '') & (df["Series"] == '') & (df["Office"] == '')].index
df.drop(Indexnames,inplace=True)
display(df)
fulldf=fulldf.append(df,ignore_index=True)
except Exception as e:
print(e)
finally:
print('this executed')
wb.close()
display(fulldf)
allfiles = getfilepath(path)
getdatafromexcel(cols,allfiles)
One can use pd.ExcelFile and pd.read_excel to get the required results.
def getdatafromexcel():
for file in allfiles:
xl = pd.ExcelFile(file)
res = len(xl.sheet_names)
if res>1:
for i in range(1, res+1):
df = pd.read_excel(file, sheet_name= '%d' %i)
# Do selection, preprocessing what you want here
if i == 1:
df.to_csv(<your_path> + '1.csv')
df_1 = pd.read_csv(<your_path> + '1.csv')
if i > 1:
df_1 = pd.concat([df_1, df])
else:
df_1 = pd.read_excel(file)
# Do selection, preprocessing what you what here
df_1.to_csv(<your_path> + '.csv', index= False)

Loop over excel files' paths under a directory and pass them to data manipulation function in Python

I need to check the excel files under a directory /Users/x/Documents/test/ by DataCheck function from data_check.py, so I can do data manipulation of many excel files, data_check.py has code structure as follows:
import pandas as pd
def DataCheck(filePath):
df = pd.read_excel(filePath)
try:
df = df.dropna(subset=['building', 'floor', 'room'], how = 'all')
...
...
...
df.to_excel(writer, 'Sheet1', index = False)
if __name__ == '__main__':
status = True
while status:
rawPath = input(r"")
filePath = rawPath.strip('\"')
if filePath.strip() == "":
status = False
DataCheck(filePath)
In order to loop all the excel files' paths under a directory, I use:
import os
directory = '/Users/x/Documents/test/'
for filename in os.listdir(directory):
if filename.endswith(".xlsx") or filename.endswith(".xls"):
print(os.path.join(directory, filename))
else:
pass
Out:
/Users/x/Documents/test/test 3.xlsx
/Users/x/Documents/test/test 2.xlsx
/Users/x/Documents/test/test 4.xlsx
/Users/x/Documents/test/test.xlsx
But I don't know how to combine the code above together, to pass the excel files' paths to DataCheck(filePath).
Thanks for your kind help at advance.
Call the function with the names instead of printing them:
import os
directory = '/Users/x/Documents/test/'
for filename in os.listdir(directory):
if filename.endswith(".xlsx") or filename.endswith(".xls"):
fullname = os.path.join(directory, filename)
DataCheck(fullname)

Using pandas pd.Excel File with user input for folder path and filename

I'm using pd.ExcelFile as below to open and parse a file, but currently only with the actual folder path and filename in one string.
wb = pd.ExcelFile(folder_path+filename)
I want to put this into a function, that asks the user to give a path and filename and deals with invalid input. I started something like the below, but it doesn't seem like the error is being generated inside the function anyway, and i'm not sure how to say 'while wb isn't a valid thing' to continue to prompt for a filepath until we get a valid one?
def Load_Parse():
folder_path = input('\nEnter the path to the qry_T spreadsheet here (include slashes at the start and at the end): ')
filename = input('\nEnter the name of the spreadsheet to be used here: ')
sheetname = input('\nEnter the sheet containing the data here, including the extension (e.g. "qry_Trajectory 2019.xlsx": ')
try:
wb = pd.ExcelFile(folder_path+filename)
except FileNotFoundError:
Any ideas?
I'll then parse the file using a similar method i hope:
df = wb.parse('filename')
using Pathlib, os and pandas and a few functions.
one of the key functions you'll need is the while True which keeps executing a block of code until it's true and you initiate a break
feel free to edit to your own spec.
Modules
from pathlib import Path
import os
import pandas as pd
from xlrd import XLRDError
In Action
df = load_parser()
out:
#Hello Umar.Hussain please enter a valid target directory
#C:\Users\UmarH\Files
#1 excels_0
#2 excels_1
#Choose a number between 1 and 2
1
#Your Choice is excels_0.xlsx
#Choose a Sheet - Lists all sheets
'Sheet1'
# returns dataframe
Main Function
def load_parser():
user = os.getlogin()
print(f"Hello {user} please enter a valid target directory")
cmd = input('')
p = file_tester(cmd,file_type='path')
print("Please select a number from the following file")
target_file = create_excel_dict(p)
target_df = enumerate_sheets(target_file)
return target_df
Helper Functions
def file_tester(string_path, file_type="path"):
path = Path(string_path)
while True:
if path.is_dir():
break
else:
cmd = input(f"Please Enter a Valid {file_type}")
path = Path(cmd)
return path
def create_excel_dict(target_path):
xlsx_dict = {i: x for i, x in enumerate(target_path.glob('*.xlsx'), 1)}
for k,v in xlsx_dict.items():
print(k,v.stem)
rng = [i for i in xlsx_dict.keys()]
file_choice = input(f'Choose a number between {rng[0]} and {rng[-1]}')
while True:
try:
file_choice = int(file_choice)
print(f"Your Choice is {xlsx_dict[file_choice]}")
break
except KeyError:
file_choice = input(f'Choose a number between {rng[0]} and {rng[-1]}')
return xlsx_dict[file_choice]
def enumerate_sheets(target_file):
xl = pd.ExcelFile(target_file)
for sheet in xl.sheet_names:
print(sheet)
target_sheet = input("Please Type Your sheet name")
while True:
try:
df = pd.read_excel(xl,sheet_name=target_sheet)
break
except XLRDError:
target_sheet = input("Please enter a sheet from above.")
return df

If file does not exist create a dataframe instead

I want to be able to check if a CSV file exists in my directory. If it does not, I want to create an empty dataframe with certain columns. If it does I simply want to load that file. The problem is the code reads the else statement and triggers the error of file not found.
if path.exists('raw.csv') == False:
columns = ['join','fund_type','line_type','actual_amount','period','org']
revenueMain = pd.DataFrame(columns=columns)
elif path.exists('raw.csv') == True:
revenueMain = pd.read_csv('rev.csv', header=0)
I think you have the logic wrong. There is no need for an elif after the it fails the first if
import os.path as path
if path.exists('raw.csv'):
revenueMain = pd.read_csv('rev.csv', header=0)
else:
columns = ['join','fund_type','line_type','actual_amount','period','org']
revenueMain = pd.DataFrame(columns=columns)
You could also catch the error using:
try:
revenueMain = pd.read_csv('rev.csv', header=0)
except:
columns = ['join','fund_type','line_type','actual_amount','period','org']
revenueMain = pd.DataFrame(columns=columns)

Python file write issue with Pandas

i wrote this python script to search for unseen mail in a mailbox, download xlsx attachment, make some modification on it and then post them to another service.
All is working perfect with just one issue:
In the original xlsx file there is a column named "zona" containing the italian two letter string for the province.
If this value is "NA" (the value of the province of NAPLES) when
saving the resultant xlsx files has blank cell instead of NA.
is NA a reserved word and if yes, there is a way to quote it?
import os,email,imaplib,socket,requests
import pandas as pd
mail_user = os.environ.get('MAIL_USER')
mail_password = os.environ.get('MAIL_PASS')
mail_server = os.environ.get('MAIL_SERVER')
detach_dir = '.'
url=<removed url>
if mail_user is None or mail_password is None or mail_server is None:
print ('VARIABILI DI AMBIENTE NON DEFINITE')
exit(1)
try:
with imaplib.IMAP4_SSL(mail_server) as m:
try:
m.login(mail_user,mail_password)
m.select("INBOX")
resp, items = m.search(None, "UNSEEN")
items = items[0].split()
for emailid in items:
resp, data = m.fetch(emailid, "(RFC822)")
email_body = data[0][1] # getting the mail content
mail = email.message_from_bytes(email_body) # parsing the mail content to get a mail object
if mail.get_content_maintype() != 'multipart':
continue
for part in mail.walk():
if part.get_content_maintype() == 'multipart':
continue
if part.get('Content-Disposition') is None:
continue
filename = part.get_filename()
if filename.endswith('.xlsx'):
att_path = os.path.join(detach_dir, filename)
fp = open(att_path, 'wb')
fp.write(part.get_payload(decode=True))
fp.close()
xl = pd.ExcelFile(att_path)
df1 = xl.parse(sheet_name=0)
df1 = df1.replace({'\'':''}, regex=True)
df1.loc[df1['Prodotto'] == 'SP_TABLETA_SAMSUNG','Cod. ID.'] = 'X'
df1.loc[df1['Prodotto'] == 'AP_TLC','Cod. ID.'] = 'X'
df1.loc[df1['Prodotto'] == 'APDCMB00003','Cod. ID.'] = 'X'
df1.loc[df1['Prodotto'] == 'APDCMB03252','Cod. ID.'] = 'X'
writer = pd.ExcelWriter(att_path, engine='xlsxwriter')
df1.to_excel(writer, sheet_name='Foglio1', index=False)
writer.save()
uf = {'files': open(att_path, 'rb')}
http.client.HTTPConnection.debuglevel = 0
r = requests.post(url, files=uf)
print (r.text)
except imaplib.IMAP4_SSL.error as e:
print (e)
exit(1)
except imaplib.IMAP4.error:
print ("Errore di connessione al server")
exit(1)
It seems that Pandas is treating the NA value as a NaN and therefore, when you write to excel it writes this value as '' by default (see docs).
You can pass na_rep='NA' to the to_excel() function in order to write it out as a string;
df1.to_excel(writer, sheet_name='Foglio1', index=False, na_rep='NA')
But as a precaution keep an eye out as any other NaN values present in your df will also be written to the excel file as 'NA'.
Reading the docs link post by #Matt B. i found this solution:
df1 = xl.parse(sheet_name=0, keep_default_na=False, na_values=['_'])
If i understand well only _ are interpreted as "not avalaible"

Resources