How do I convert multiple multiline txt files to excel - ensuring each file is its own line, then each line of text is it own row? Python3 - python-3.x

Using openpyxl and Path I aim to:
Create multiple multiline .txt files,
then insert .txt content into a .xlsx file ensuring file 1 is in column 1 and each line has its own row.
I thought to create a nested list then loop through it to insert the text. I cannot figure how to ensure that all the nested list string is displayed. This is what I have so far which nearly does what I want however it's just a repeat of the first line of text.
from pathlib import Path
import openpyxl
listOfText = []
wb = openpyxl.Workbook() # Create a new workbook to insert the text files
sheet = wb.active
for txtFile in range(5): # create 5 text files
createTextFile = Path('textFile' + str(txtFile) + '.txt')
createTextFile.write_text(f'''Hello, this is a multiple line text file.
My Name is x.
This is text file {txtFile}.''')
readTxtFile = open(createTextFile)
listOfText.append(readTxtFile.readlines()) # nest the list from each text file into a parent list
textFileList = len(listOfText[txtFile]) # get the number of lines of text from the file. They are all 3 as made above
# Each column displays text from each text file
for row in range(1, txtFile + 1):
for col in range(1, textFileList + 1):
sheet.cell(row=row, column=col).value = listOfText[txtFile][0]
wb.save('importedTextFiles.xlsx')
The output is 4 columns/4 rows. All of which say the same 'Hello, this is a multiple line text file.'
Appreciate any help with this!

The problem is in the for loop while writing, change the line sheet.cell(row=row, column=col).value = listOfText[txtFile][0] to sheet.cell(row=col, column=row).value = listOfText[row-1][col-1] and it will work

Related

How to write .xlsx data to a .txt file ensuring each column has its own text file, then each row is a new line?

I believe I am close to cracking this however I can't add multiple lines of text to the .txt files. The column do relate to their own .txt files.
import openpyxl
from pathlib import Path
# create workbook
wb = openpyxl.Workbook()
sheet = wb.active
listOfTextFiles = []
# Create a workbook 5x5 with dummy text
for row in range(1, 6):
for col in range(1, 6):
file = sheet.cell(row=row, column=col).value = f'text row:{row}, col:{col}'
listOfTextFiles.append(file)
print(listOfTextFiles) # for testing
wb.save('testSS.xlsx')
for i in range(row): # create 5 text files
textFile = open(f'ssToTextFile{i}.txt', 'w')
textFile.write(listOfTextFiles[i])
The output for each text file is below. I know it has something to do with the 'textFile.write(listOfTextFiles[i])' and I've tried many ways such as replacing [i] with [j] or [file]. I think I am overwriting the text through each loop.
Current output:
ssToTextFile.txt -> text row:1, col:1
What I want the output to be in each .txt file:
ssToTextFile.txt -> text row:1, col:1
text row:2, col:1
text row:3, col:1
text row:4, col:1
text row:5, col:1
Then, the next .txt file to be:
text row:1, col:2
text row:2, col:2 etc
Would appreciate any feedback and the logic behind it please?
Solved. Using sheet.columns on the outer loop I could use [x-1] as the index.
for x in range(sheet.min_row, sheet.max_row + 1):
textFile = open(f'ssToTextFile{x-1}.txt', 'w')
for y in list(sheet.columns)[x-1]:
textFile.write(str(y.value)+ '\n')
print(y.value)

Change in some character encoding when reding a MS word file using python docx module and saving it back

I am reading a word file (word file is just having a huge table), inserting a blank row after each row in the table and saving it back. After save, the new file is having some characters changed. I am guessing change in the encoding is happening.
Here is my code for reading and saving it.
def insert_row_in_table(table):
empty_row = get_empty_row(table) # this function will return an empty row
for row in table.rows:
tr = row._tr
tr.addnext(copy.deepcopy(empty_row))
def convert(file: str):
doc = docx.Document(file)
row_c = 0
for table in doc.tables:
insert_row_in_table(table)
# save file
file_name = os.path.splitext(file)
new_name = file_name[0] + '_updated' + file_name[1]
doc.save(new_name)
This is how its looking when i am comparing both the files (Left side: Original file, Right side: Updated file)
How to preserve the characters encoding or avoid this issue?

How can I create an excel file with multiple sheets that stores content of a text file using python

I need to create an excel file and each sheet contains the contents of a text file in my directory, for example if I've two text file then I'll have two sheets and each sheet contains the content of the text file.
I've managed to create the excel file but I could only fill it with the contents of the last text file in my directory, howevr, I need to read all my text files and save them into excel.
This is my code so far:
import os
import glob
import xlsxwriter
file_name='WriteExcel.xlsx'
path = 'C:/Users/khouloud.ayari/Desktop/khouloud/python/Readfiles'
txtCounter = len(glob.glob1(path,"*.txt"))
for filename in glob.glob(os.path.join(path, '*.txt')):
f = open(filename, 'r')
content = f.read()
print (len(content))
workbook = xlsxwriter.Workbook(file_name)
ws = workbook.add_worksheet("sheet" + str(i))
ws.set_column(0, 1, 30)
ws.set_column(1, 2, 25)
parametres = (
['file', content],
)
# Start from the first cell. Rows and
# columns are zero indexed.
row = 0
col = 0
# Iterate over the data and write it out row by row.
for name, parametres in (parametres):
ws.write(row, col, name)
ws.write(row, col + 1, parametres)
row += 1
workbook.close()
example:
if I have two text file, the content of the first file is 'hello', the content of the second text file is 'world', in this case I need to create two worksheets, first worksheet needs to store 'hello' and the second worksheet needs to store 'world'.
but my two worksheets contain 'world'.
I recommend to use pandas. It in turn uses xlsxwriter to write data (whole tables) to excel files but makes it much easier - with literally couple lines of code.
import pandas as pd
df_1 = pd.DataFrame({'data': ['Hello']})
sn_1 = 'hello'
df_2 = pd.DataFrame({'data': ['World']})
sn_2 = 'world'
filename_excel = '1.xlsx'
with pd.ExcelWriter(filename_excel) as writer:
for df, sheet_name in zip([df_1, df_2], [sn_1, sn_2]):
df.to_excel(writer, index=False, header=False, sheet_name=sheet_name)

How to create csv file for each line in a text file?

I have a text file price.txt that contains the following rows:
open
high
low
close
I need to create a separate csv files for each row in the text file and name the csv files as price1.csv, price2.csv and so on
I tried the following code
with open('price.txt') as infile, open('outfile.csv','w') as outfile:
for line in infile:
outfile.write(line.replace(' ',','))
I am getting only one csv file that has the following rows
open
high
low
close
How can I create a csv file for each row?
Here the code to obtain a different file called price1.csv, price2.csv etc for every line (with the substitution whitespace - comma that you put in your example) with comments:
### start a counter for the filename number
i = 0
with open('price.txt') as infile:
### open a loop over rows of input file
for line in infile :
### add 1 to counter
i += 1
### create the output filename for the row
newfile_name = "price" + str(i) + ".csv"
### write in the new filename the modified row
with open(newfile_name,'w') as outfile:
outfile.write(line.replace(' ',','))

Read file and output specific fields to CSV file

I'm trying to search for data based on a key word and export that data to an Excel or text file.
When I "print" the variable/list it works no problem. When I try and output the data to a file it only outputs the last entry. I think something is wrong with the iteration, but I can't figure it out.
import xlsxwriter
#Paths
xls_output_path = 'C:\\Data\\'
config = 'C:\\Configs\\filename.txt'
excel_inc = 0 #used to increment the excel columns so not everything
#is written in "A1"
lines = open(config,"r").read().splitlines()
search_term = "ACL"
for i, line in enumerate(lines):
if search_term in line:
split_lines = line.split(' ') #Split lines via a space.
linebefore = lines[i - 1] #Print the line before the search term
linebefore_split = linebefore.split(' ') #Split the line before via
#space
from_obj = linebefore_split[2] #[2] holds the data I need
to_object = split_lines[4] #[4] holds the data I need
print(len(split_lines)) #Prints each found line with no
#problem.
excel_inc = excel_inc + 1 #Increments for column A so not all of
#the data is placed in A1
excel_inc_str = str(excel_inc) #Change type to string so it can
#concatenate.
workbook = xlsxwriter.Workbook(xls_output_path + 'Test.xlsx') #Creates the xls file
worksheet = workbook.add_worksheet()
worksheet.write('A' + excel_inc_str, split_lines[4]) #Write data from
#split_lines[4]
#to column A
workbook.close()
I created this script so it will go and find all lines in the "config" file with the keyword "ACL".
It then has the ability to print the line before and the actual line the data is found. This works great.
My next step is outputting the data to an excel spreadsheet. This is where I get stuck.
The script only prints the very last item in the column A row 10.
I need help figuring out why it'll print the data correctly, but it won't output it to an excel spreadsheet or even a .txt file.
Try this - I moved your workbook and worksheet definitions outside the loop, so it doesn't keep getting redefined.
import xlsxwriter
#Paths
xls_output_path = 'C:\\Data\\'
config = 'C:\\Configs\\filename.txt'
excel_inc = 0 #used to increment the excel columns so not everything
#is written in "A1"
lines = open(config,"r").read().splitlines()
search_term = "ACL"
workbook = xlsxwriter.Workbook(xls_output_path + 'Test.xlsx') #Creates the xls file
worksheet = workbook.add_worksheet()
for i, line in enumerate(lines):
if search_term in line:
split_lines = line.split(' ') #Split lines via a space.
linebefore = lines[i - 1] #Print the line before the search term
linebefore_split = linebefore.split(' ') #Split the line before via
#space
from_obj = linebefore_split[2] #[2] holds the data I need
to_object = split_lines[4] #[4] holds the data I need
print(len(split_lines)) #Prints each found line with no
#problem.
excel_inc = excel_inc + 1 #Increments for column A so not all of
#the data is placed in A1
excel_inc_str = str(excel_inc) #Change type to string so it can
#concatenate.
worksheet.write('A' + excel_inc_str, split_lines[4]) #Write data from
#split_lines[4]
#to column A
workbook.close()

Resources