Can we copy one column from excel and convert it to a list in Python? - python-3.x

I use
df = pd.read_clipboard()
list_name = df['column_name'].to_list()
but this is a bit long method for me. I want to copy a column and convert in python and then apply some function so that the copied text is converted to a list.

this will read a excel column as list
import xlrd
book = xlrd.open_workbook('Myfile.xlsx') #path to your file
sheet = book.sheet_by_name("Sheet1") #Sheet name
def Readlist(Element, Column):
for _ in range(1,sheet.nrows):
Element.append(str(sheet.row_values(_)[Column]))
pass
column1 = [] # List name
Readlist(column1, 1) # Column Number is 1 here
pirnt(column1)
Read a specified column as list use Readlist function, intialize [] variable before using that.
Using Pandas:
import pandas as pd
df = pd.read_excel("path.xlsx", index_col=None, na_values=['NA'], usecols = "A")
mylist = list(df[0])
print(mylist)

Related

Append values to Dataframe in loop and if conditions

Need help please.
I have a dataframe that reads rows from Excel and appends to Dataframe if certain columns exist.
I need to add an additional Dataframe if the columns don't exist in a sheet and append filename and sheetname and write all the file names and sheet names for those sheets to an excel file. Also I want the values to be unique.
I tried adding to dfErrorList but it only showed the last sheetname and filename and repeated itself many times in the output excel file
from xlsxwriter import Workbook
import pandas as pd
import openpyxl
import glob
import os
path = 'filestoimport/*.xlsx'
list_of_dfs = []
list_of_dferror = []
dfErrorList = pd.DataFrame() #create empty df
for filepath in glob.glob(path):
xl = pd.ExcelFile(filepath)
# Define an empty list to store individual DataFrames
for sheet_name in xl.sheet_names:
df = pd.read_excel(filepath, sheet_name=sheet_name)
df['sheetname'] = sheet_name
file_name = os.path.basename(filepath)
df['sourcefilename'] = file_name
if "Project ID" in df.columns and "Status" in df.columns:
print('')
*else:
dfErrorList['sheetname'] = df['sheetname'] # adds `sheet_name` into the column
dfErrorList['sourcefilename'] = df['sourcefilename']
continue
list_of_dferror.append((dfErrorList))
df['Status'].fillna('', inplace=True)
df['Added by'].fillna('', inplace=True)
list_of_dfs.append(df)
# # Combine all DataFrames into one
data = pd.concat(list_of_dfs, ignore_index=True)
dataErrors = pd.concat(list_of_dferror, ignore_index=True)
dataErrors.to_excel(r'error.xlsx', index=False)
# data.to_excel("total_countries.xlsx", index=None)

Python for the Comparison of excel column elements and print the matched elements in separate column

I have developed the following code and fetched the matched output using a for loop.I need to print these output elements in separate column using python.
excel file name - Sample_data.xlsx
first column - WBS_CODE
second column - PROJECT_CODE
first column and second column are matched and then printed in separate column (column F) using python code. Please find my below code,
import pandas as pd
A = pd.read_excel("D:\python_work\Sample_data.xlsx", sheet_name = '31Sep')
code = A['WBS_CODE'].tolist()
B = pd.read_excel("D:\python_work\Sample_data.xlsx", sheet_name = '4Dec')
code1 = B['PROJECT_CODE'].tolist()
for x in code1:
if x in code:
print(x)
else:
print("NA")
output:
NA
NA
NA
APP-ACI-PJ-APAC-EMEA-ENG
NA
NA
I have found a way to export the output and print them in a separate column in excel sheet. Below is the solution,
import pandas as pd
from openpyxl import load_workbook
# Reading the Excel file columns
A = pd.read_excel("D:\python_work\Sample_data.xlsx", sheet_name='4Dec')
code = A['PROJECT_CODE'].tolist()
B = pd.read_excel("D:\python_work\Sample_data.xlsx", sheet_name='31Sep')
code1 = B['WBS_CODE'].tolist()
# Comparison of columns
class test:
def loop(self):
result = []
for x in code1:
if x in code:
result.append(x)
else:
y = "NA"
result.append(y)
print(result)
# Printing data into Excel
try:
book = load_workbook('D:\python_work\Aew1.xlsx')
writer = pd.ExcelWriter('D:\python_work\Aew1.xlsx', engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets) # loading all the worksheets in opened Excel
df = pd.DataFrame.from_dict({'Column1': result})
df.to_excel(writer, sheet_name='Sheet1', startcol=19)
writer.save()
except FileNotFoundError:
print("File Not found: 'Check the Name or Existence of file specified/'")
except PermissionError:
print("File Opened/No-Access: Check whether you have access to file or file is opened")
test().loop()
steps that solved:
1. Appended the for loop output to a list
2. used openpyxl library to print the output to a column in excel worksheet.
Thanks guyz for help and support. Have a good day

How to read an excel file with multiple sheets using for loop in python

This is what i try
from pathlib import Path
loc = Path('D:\DataSciSpec\Practice\Forloopindict.xlsx')
dict = pd.read_excel(loc,sheetname = None)
for i in dict.keys():
print(i)
I get the name of sheets
Sheet4
Sheet3
Sheet2
Sheet1
I can also see the sheet content one by one
for i in dict.keys():
print(dict[i].head())
But how put this data in n data frames (equal to no of sheets)
and then append one to another
This will create a single dataframe (df_full) with the data from all sheets.
import pandas as pd
loc = r'D:\DataSciSpec\Practice\Forloopindict.xlsx'
workbook = pd.read_excel(loc,sheet_name = None)
df_full = pd.DataFrame()
for _, sheet in workbook.items():
df_full = df_full.append(sheet)
# Reset index or you'll have duplicates
df_full = df_full.reset_index(drop=True)

Appending Columns from several worksheets Python

I am trying to import certain columns of data from several different sheets inside of a workbook. However, while appending it only seems to append 'q2 survey' to a new workbook. How do I get this to append properly?
import sys, os
import pandas as pd
import xlrd
import xlwt
b = ['q1 survey', 'q2 survey','q3 survey'] #Sheet Names
df_t = pd.DataFrame(columns=["Month","Date", "Year"]) #column Name
xls = "path_to_file/R.xls"
sheet=[]
df_b=pd.DataFrame()
pd.read_excel(xls,sheet)
for sheet in b:
df=pd.read_excel(xls,sheet)
df.rename(columns=lambda x: x.strip().upper(), inplace=True)
bill=df_b.append(df[df_t])
bill.to_excel('Survey.xlsx', index=False)
I think if you do:
b = ['q1 survey', 'q2 survey','q3 survey'] #Sheet Names
list_col = ["Month","Date", "Year"] #column Name
xls = "path_to_file/R.xls"
#create the empty df named bill to append after
bill= pd.DataFrame(columns = list_col)
for sheet in b:
# read the sheet
df=pd.read_excel(xls,sheet)
df.rename(columns=lambda x: x.strip().upper(), inplace=True)
# need to assign bill again
bill=bill.append(df[list_col])
# to excel
bill.to_excel('Survey.xlsx', index=False)
it should work and correct the errors in your code, but you can do a bit differently using pd.concat:
list_sheet = ['q1 survey', 'q2 survey','q3 survey'] #Sheet Names
list_col = ["Month","Date", "Year"] #column Name
# read once the xls file and then access the sheet in the loop, should be faster
xls_file = pd.ExcelFile("path_to_file/R.xls")
#create a list to append the df
list_df_to_concat = []
for sheet in list_sheet :
# read the sheet
df= pd.read_excel(xls_file, sheet)
df.rename(columns=lambda x: x.strip().upper(), inplace=True)
# append the df to the list
list_df_to_concat.append(df[list_col])
# to excel
pd.concat(list_df_to_concat).to_excel('Survey.xlsx', index=False)

Python3 - using pandas to group rows, where two colums contain values in forward or reverse order: v1,v2 or v2,v1

I'm fairly new to python and pandas, but I've written code that reads an excel workbook, and groups rows based on the values contained in two columns.
So where Col_1=A and Col_2=B, or Col_1=B and Col_2=A, both would be assigned a GroupID=1.
sample spreadsheet data, with rows color coded for ease of visibility
I've manged to get this working, but I wanted to know if there's a more simpler/efficient/cleaner/less-clunky way to do this.
import pandas as pd
df = pd.read_excel('test.xlsx')
# get column values into a list
col_group = df.groupby(['Header_2','Header_3'])
original_list = list(col_group.groups)
# parse list to remove 'reverse-duplicates'
new_list = []
for a,b in original_list:
if (b,a) not in new_list:
new_list.append((a,b))
# iterate through each row in the DataFrame
# check to see if values in the new_list[] exist, in forward or reverse
for index, row in df.iterrows():
for a,b in new_list:
# if the values exist in forward direction
if (a in df.loc[index, "Header_2"]) and (b in df.loc[index,"Header_3"]):
# GroupID value given, where value is index in the new_list[]
df.loc[index,"GroupID"] = new_list.index((a,b))+1
# else check if value exists in the reverse direction
if (b in df.loc[index, "Header_2"]) and (a in df.loc[index,"Header_3"]):
df.loc[index,"GroupID"] = new_list.index((a,b))+1
# Finally write the DataFrame to a new spreadsheet
writer = pd.ExcelWriter('output.xlsx')
df.to_excel(writer, 'Sheet1')
I know of the pandas.groupby([columnA, columnB]) option, but I couldn't figure a way to create groups that contained both (v1, v2) and (v2,v1).
A boolean mask should do the trick:
import pandas as pd
df = pd.read_excel('test.xlsx')
mask = ((df['Header_2'] == 'A') & (df['Header_3'] == 'B') |
(df['Header_2'] == 'B') & (df['Header_3'] == 'A'))
# Label each row in the original DataFrame with
# 1 if it matches the specified criteria, and
# 0 if it does not.
# This column can now be used in groupby operations.
df.loc[:, 'match_flag'] = mask.astype(int)
# Get rows that match the criteria
df[mask]
# Get rows that do not match the criteria
df[~mask]
EDIT: updated answer to address the groupby requirement.
I would do something like this.
import pandas as pd
df = pd.read_excel('test.xlsx')
#make the ordering consistent
df["group1"] = df[["Header_2","Header_3"]].max(axis=1)
df["group2"] = df[["Header_2","Header_3"]].min(axis=1)
#group them together
df = df.sort_values(by=["group1","group2"])
If you need to deal with more than two columns, I can write up a more general way to do this.

Resources