python function read file from subdirectory - python-3.x

I'm trying to write this function so that I can pass files or folders and read from them using pandas.
import pandas as pd
import os
path = os.getcwd()
path = '..' #this would be root
revenue_folder = '../Data/Revenue'
random_file = '2017-08-01_Aug.csv'
def csv_reader(csv_file):
for root, dirs, files in os.walk(path):
for f in files:
with open(os.path.join(root, csv_file)) as f1:
pd.read_csv(f1, sep = ';')
print(f1)
csv_reader(random_file)
FileNotFoundError: [Errno 2] No such file or directory: '../2017-08-01_Aug.csv'
I have since tried doing some changes and now the problem is that it goes to another subdirectory. What I want is to iterate through all my files and folders, find the desired file, then read it. To be clear my desired file is in the revenue_folder.
def csv_reader(csv_file):
for root, dirs, files in os.walk(path):
for f in files:
base, ext = os.path.splitext(f)
if ('csv' in ext):
print (root)
with open(os.path.join(root, csv_file)) as f1:
pd.read_excel(f1, sep = ':')
print(f1)
csv_reader(random_file)
FileNotFoundError: [Errno 2] No such file or directory: './Data/Backlog/2017-08-01_Aug.csv'

Well after the edit, the whole scenario of the question changed. Below code searches recursively through the Files and Folders to find the files that match the criteria
def get_all_matching_files(root_path, matching_criteria):
"""
Gets all files that match a string criteria.
:param root_path: the root directory path from where searching needs to begin
:param matching_criteria: a string or a tuple of strings that needs to be matched in the file n
:return: a list of all matching files
"""
return [os.path.join(root, name) for root, dirs, files in os.walk(root_path) for name in files
if name.endswith(matching_criteria)]
def main(root_path):
"""
The main method to start finding the file.
:param root_path: The root dir where the search needs to be started.
:return: None
"""
if len(root_path) < 2:
raise ValueError('The root path must be more than 2 characters')
all_matching_files = get_all_matching_files(root_path, '2017-08-01_Aug.csv')
if not all_matching_files:
print('no files were found matching that criteria.')
return
for matched_files in all_matching_files:
data_frame = pd.read_csv(matched_files)
# your code here on what to do with the dataframe
print('Completed search!')
if __name__ == '__main__':
root_dir_path = os.getcwd()
main(root_dir_path)
Notice the endswith() I've used to match the files, this is such that you can have the flexibility to send in a file extension (.csv) and get all files. Also the endswith() takes in a tuple as well so create a tuple of all files or extensions and the method would work.
Other Suggestions:
When trying to read a file using pandas you don't the code:
with open(os.path.join(root, csv_file)) as f1:
pd.read_csv(f1, sep = ';')
print(f1)
on the contrary you need to do:
# set the file path into a variable to make code readable
filepath = os.path.join(revenue_folder, random_file)
# read the data and store it into a variable of type DataFrame
my_dataframe_from_file = pd.read_csv(filepath,sep=';')

Related

Download every file and every subdirectory from an ftp server [duplicate]

This will not download the contents of sub-directories; how can I do so?
import ftplib
import configparser
import os
directories = []
def add_directory(line):
if line.startswith('d'):
bits = line.split()
dirname = bits[8]
directories.append(dirname)
def makeDir(archiveTo):
for dir in directories:
newDir = os.path.join(archiveTo, dir)
if os.path.isdir(newDir) == True:
print("Directory \"" + dir + "\" already exists!")
else:
os.mkdir(newDir)
def getFiles(archiveTo, ftp):
files = ftp.nlst()
for filename in files:
try:
directories.index(filename)
except:
ftp.retrbinary('RETR %s' % filename, open(os.path.join(archiveTo, filename), 'wb').write)
def runBackups():
#Load INI
filename = 'connections.ini'
config = configparser.SafeConfigParser()
config.read(filename)
connections = config.sections()
i = 0
while i < len(connections):
#Load Settings
uri = config.get(connections[i], "uri")
username = config.get(connections[i], "username")
password = config.get(connections[i], "password")
backupPath = config.get(connections[i], "backuppath")
archiveTo = config.get(connections[i], "archiveto")
#Start Back-ups
ftp = ftplib.FTP(uri)
ftp.login(username, password)
ftp.cwd(backupPath)
#Map Directory Tree
ftp.retrlines('LIST', add_directory)
#Make Directories Locally
makeDir(archiveTo)
#Gather Files
getFiles(archiveTo, ftp)
#End connection and increase counter.
ftp.quit()
i += 1
print()
print("Back-ups complete.")
print()
this should do the trick :)
import sys
import ftplib
import os
from ftplib import FTP
ftp=FTP("ftp address")
ftp.login("user","password")
def downloadFiles(path,destination):
#path & destination are str of the form "/dir/folder/something/"
#path should be the abs path to the root FOLDER of the file tree to download
try:
ftp.cwd(path)
#clone path to destination
os.chdir(destination)
os.mkdir(destination[0:len(destination)-1]+path)
print destination[0:len(destination)-1]+path+" built"
except OSError:
#folder already exists at destination
pass
except ftplib.error_perm:
#invalid entry (ensure input form: "/dir/folder/something/")
print "error: could not change to "+path
sys.exit("ending session")
#list children:
filelist=ftp.nlst()
for file in filelist:
try:
#this will check if file is folder:
ftp.cwd(path+file+"/")
#if so, explore it:
downloadFiles(path+file+"/",destination)
except ftplib.error_perm:
#not a folder with accessible content
#download & return
os.chdir(destination[0:len(destination)-1]+path)
#possibly need a permission exception catch:
with open(os.path.join(destination,file),"wb") as f:
ftp.retrbinary("RETR "+file, f.write)
print file + " downloaded"
return
source="/ftproot/folder_i_want/"
dest="/systemroot/where_i_want_it/"
downloadFiles(source,dest)
This is a very old question, but I had a similar need that i wanted to satisfy in a very general manner. I ended up writing my own solution that works very well for me. I've placed it on Gist here https://gist.github.com/Jwely/ad8eb800bacef9e34dd775f9b3aad987
and pasted it below in case i ever take the gist offline.
Example usage:
import ftplib
ftp = ftplib.FTP(mysite, username, password)
download_ftp_tree(ftp, remote_dir, local_dir)
The code above will look for a directory called "remote_dir" on the ftp host, and then duplicate the directory and its entire contents into the "local_dir".
It invokes the script below.
import ftplib
import os
def _is_ftp_dir(ftp_handle, name, guess_by_extension=True):
""" simply determines if an item listed on the ftp server is a valid directory or not """
# if the name has a "." in the fourth to last position, its probably a file extension
# this is MUCH faster than trying to set every file to a working directory, and will work 99% of time.
if guess_by_extension is True:
if name[-4] == '.':
return False
original_cwd = ftp_handle.pwd() # remember the current working directory
try:
ftp_handle.cwd(name) # try to set directory to new name
ftp_handle.cwd(original_cwd) # set it back to what it was
return True
except:
return False
def _make_parent_dir(fpath):
""" ensures the parent directory of a filepath exists """
dirname = os.path.dirname(fpath)
while not os.path.exists(dirname):
try:
os.mkdir(dirname)
print("created {0}".format(dirname))
except:
_make_parent_dir(dirname)
def _download_ftp_file(ftp_handle, name, dest, overwrite):
""" downloads a single file from an ftp server """
_make_parent_dir(dest)
if not os.path.exists(dest) or overwrite is True:
with open(dest, 'wb') as f:
ftp_handle.retrbinary("RETR {0}".format(name), f.write)
print("downloaded: {0}".format(dest))
else:
print("already exists: {0}".format(dest))
def _mirror_ftp_dir(ftp_handle, name, overwrite, guess_by_extension):
""" replicates a directory on an ftp server recursively """
for item in ftp_handle.nlst(name):
if _is_ftp_dir(ftp_handle, item):
_mirror_ftp_dir(ftp_handle, item, overwrite, guess_by_extension)
else:
_download_ftp_file(ftp_handle, item, item, overwrite)
def download_ftp_tree(ftp_handle, path, destination, overwrite=False, guess_by_extension=True):
"""
Downloads an entire directory tree from an ftp server to the local destination
:param ftp_handle: an authenticated ftplib.FTP instance
:param path: the folder on the ftp server to download
:param destination: the local directory to store the copied folder
:param overwrite: set to True to force re-download of all files, even if they appear to exist already
:param guess_by_extension: It takes a while to explicitly check if every item is a directory or a file.
if this flag is set to True, it will assume any file ending with a three character extension ".???" is
a file and not a directory. Set to False if some folders may have a "." in their names -4th position.
"""
os.chdir(destination)
_mirror_ftp_dir(ftp_handle, path, overwrite, guess_by_extension)
this is an alternative. you can try using ftputil package. You can then use it to walk the remote directories and get your files
Using ftp.mlsd() instead of ftp.nlst():
import sys
import ftplib
import os
from ftplib import FTP
def fetchFiles(ftp, path, destination, overwrite=True):
'''Fetch a whole folder from ftp. \n
Parameters
----------
ftp : ftplib.FTP object
path : string ('/dir/folder/')
destination : string ('D:/dir/folder/') folder where the files will be saved
overwrite : bool - Overwrite file if already exists.
'''
try:
ftp.cwd(path)
os.mkdir(destination[:-1] + path)
print('New folder made: ' + destination[:-1] + path)
except OSError:
# folder already exists at the destination
pass
except ftplib.error_perm:
# invalid entry (ensure input form: "/dir/folder/")
print("error: could not change to " + path)
sys.exit("ending session")
# list children:
filelist = [i for i in ftp.mlsd()]
print('Current folder: ' + filelist.pop(0)[0])
for file in filelist:
if file[1]['type'] == 'file':
fullpath = os.path.join(destination[:-1] + path, file[0])
if (not overwrite and os.path.isfile(fullpath)):
continue
else:
with open(fullpath, 'wb') as f:
ftp.retrbinary('RETR ' + file[0], f.write)
print(file[0] + ' downloaded')
elif file[1]['type'] == 'dir':
fetchFiles(ftp, path + file[0] + '/', destination, overwrite)
else:
print('Unknown type: ' + file[1]['type'])
if __name__ == "__main__":
ftp = FTP('ftp address')
ftp.login('user', 'password')
source = r'/Folder/'
dest = r'D:/Data/'
fetchFiles(ftp, source, dest, overwrite=True)
ftp.quit()
Using ftputil, a fast solution could be:
def download(folder):
for item in ftp.walk(folder):
print("Creating dir " + item[0])
os.mkdir(item[0])
for subdir in item[1]:
print("Subdirs " + subdir)
for file in item[2]:
print(r"Copying File {0} \ {1}".format(item[0], file))
ftp.download(ftp.path.join(item[0],file), os.path.join(item[0],file))
It is non-trivial at least. In the simplest case, you only assume you have files and directories. This isn't always the case, there are softlinks and hardlinks and Windows-style shortcut. Softlink and directory shortcut are particularly problematic since they make recursive directory possible, which would confuse naive-ly implemented ftp grabber.
How would you handle such recursive directory depends on your need; you might simply not follow softlinks or you might try to detect recursive links. Detecting recursive link is inherently tricky, you cannot do it reliably.

Putting the name of new created folder in the path of csv file

I am trying to create new csv file in the new created folder, i am asking how can i put the name of new created folder in the path of the csv file
import os
def creat(i,ii):
# Directory
directory = "NEW"+str(i)
# Parent Directory path
parent_dir = 'C:\\Users\\lap\\Desktop\\parfolder\\'
path = os.path.join(parent_dir, directory )
os.mkdir(path)
print("Directory '% s' created" % directory)
with open('C:\\Users\\lap\\Desktop\\parfolder\\%s\\MM%s.csv' %directory
%ii , 'w') as file:
for i in range(1,10):
file.write("{}\n".format(i))
for i in range(1,4):
creat(i,i)
Based on your error you can wrap the os.mkdir(path) call in a try, except construct:
try:
os.mkdir(path)
except FileExistsError:
pass
Edit:
Now you changed the code so you'll have to change this, too:
with open('C:\\Users\\lap\\Desktop\\parfolder\\%s\\MM%s.csv' % (directory,
i), 'w') as file:

How can I process images from nested directories and save them into their respective directories in python?

I have been trying to resize all images contained in nested directories and save the resulting image into directories with the same structure as the original one. I keep getting the error that the directory or file doesn´t exist (though it does really exist).
root_path= 'D:/Users/mbeng/OneDrive/Desktop/mass_buildings'
def locate(pattern, root_path):
for path, dirs, files in os.walk(os.path.abspath(root_path)):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
path = [f for f in locate('*.tiff', root_path)]
for file in path:
i = Image.open(file)
#fname = file[file.find('mass_buildings\\'):]
fname = file.replace('D:\\Users\\mbeng\\OneDrive\\Desktop\\mass_buildings', 'D:\\Users\\mbeng\\OneDrive\\Desktop\\resized2')
#fname = fname.replace('\\', '_')
fn, fext = os.path.splitext(fname)
#print(file)
img = i.resize((300, 300))
#print(img)
img.save('{}.tiff'.format(fn), 'TIFF')
when I run the above code, I get the error:
D:\Users\mbeng\Python\PyTorch\python.exe D:/Users/mbeng/Python/FeatureExtract/fils_list.py
Traceback (most recent call last):
File "D:/Users/mbeng/Python/FeatureExtract/fils_list.py", line 68, in <module>
img.save('{}.tiff'.format(fn), 'TIFF')
File "D:\Users\mbeng\Python\PyTorch\lib\site-packages\PIL\Image.py", line 2085, in save
fp = builtins.open(filename, "w+b")
FileNotFoundError: [Errno 2] No such file or directory: 'D:\\Users\\mbeng\\OneDrive\\Desktop\\resized2\\test\\map\\22828930_15.tiff'
Process finished with exit code 1
reized2 is the directory i created to save the processed files in. it contains the directories: train, test and valid, each containing two subdirectories: sat and map. The directory of the original (mass_buildings) files have the same structure as resized2.
How can I make it work?
I figured out a couple of things that were making the code not to work.
the replace() method wasn´t working, so I changed the paths to raw strings and replaced the // with \.
I had to delete the pre-created directory for saving the processed files so that they are created at runtime using Pathlib.Path().mkdir().
root_path= 'D:/Users/mbeng/OneDrive/Desktop/mass_buildings'
def locate(pattern, root_path):
for path, dirs, files in os.walk(os.path.abspath(root_path)):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
path = [f for f in locate('*.tiff', root_path)]
for file in path:
fname = file.replace(r'D:\Users\mbeng\OneDrive\Desktop\mass_buildings', r'D:\\Users\\mbeng\\OneDrive\\Desktop\\resized')
fp = os.path.split(fname)[:-1][0]
base = os.path.basename(fname)
Path(fp).mkdir(parents=True, exist_ok=True)
fn, fext = os.path.splitext(base)
i = Image.open(file)
img = i.resize((700, 700), PIL.Image.NEAREST)
img.save(os.path.join(fp, '{}.tiff'.format(fn)))

Searching a list of words from textfile and printing first three lines using python

I have a text file in that i have to access particular headings and access the first lines under the heading.I was able to do it for one heading while doing multiple heading i was facing issue.
I have successfully done for one heading. but doing to list of words i was unable to do it.
i was able to do it for one heading
Data =['work:']
i was not able to do it for this scenario.
Data =['work:','test:','ride:']
In the text file the data is like below:
work:
'500'
'ast'
'800'
test:
'tim'
'200'
'300'
ride:
'mic'
'100'
'657'
import math
import os
import glob
import re
import sys
sys.path.append('C:/Documents/tes')
def read(file,Data,res,outputline):
with open(file,'r') as f:
stc_file = os.path.basename(file)
for line in f:
if Data in line:
line = f.readlines()
return line[outputline]
fls = []
src_dir = r'C:/Documents/tes'
for root, dirs, files in os.walk(src_dir):
for filename in files:
if not filename.endswith('.txt'):
continue
filepath = os.path.join(root, filename)
fls.append(filepath)
result = []
Data = ['work:','test:','ride:']
for file in fls:
result=read(file,Data,result,0).split()+read(file,Data,result,1).split()+read(file,Data,result,2).split()
The above code is working for one heading,but for multiple headings i was not able to do.
['500','ast','800']
['tim','200','300']
['mic','100','657']
This above expected output .
This script will do what you asked, if each of the three (not sure if you wanted more, or an arbitrary number?) lines of data you are looking for are surrounded by single quotes—and if I understood your goal correctly...
import os
src_dir = os.getcwd() # or whatever path you want
keywords = ['work:', 'test:', 'ride:']
result = []
for root, dirs, files in os.walk(src_dir):
for filename in files:
if filename.endswith('.txt'):
path = os.path.join(root, filename)
try:
fh = open(path, 'r')
lines = [l.strip("'") for l in fh.read().splitlines()]
for i in range(len(lines)):
if lines[i] in keywords:
result.append(' '.join(lines[i+1:i+4]).split())
except Exception as e:
print('Something went wrong.')
print(e)
continue
print(result)

Using os.walk to create a filelist for each directory

I am attempting to use os.walk to create a list of files per subdirectory, and, execute a function to merge all pdf's in each directory list. The current script appends subsequent directories to the existing list with each loop. So, pdfs in directory1 are merged successfully, but, the list for directory2 includes the pdfs from directory1 etc. I want it to refresh the list of files for each directory. Here is the script I am using currently:
import PyPDF2
import os
import sys
if len(sys.argv) > 1:
SearchDirectory = sys.argv[1]
print("I'm looking for PDF's in ", SearchDirectory)
else:
print("Please tell me the directory to look in")
sys.exit()
pdfWriter = PyPDF2.PdfFileWriter()
for root, dirs, files in os.walk(SearchDirectory):
dirs.sort()
for file in files:
files.sort()
pdfFiles = []
if file.endswith('.pdf') and ((os.path.basename(root)) == "frames"):
print("Discovered this pdf: ", os.path.join(root, file))
pdfFiles.append(os.path.join(root, file))
if pdfFiles:
for file in pdfFiles:
pdfFileObj = open(file, 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
for pageNum in range(0, pdfReader.numPages):
pageObj = pdfReader.getPage(pageNum)
pdfWriter.addPage(pageObj)
pdfOutput = open((os.path.split(os.path.realpath(root))[0]) + ".pdf", "wb")
pdfWriter.write(pdfOutput)
pdfOutput.close()
print("The following pdf has been successfully appended:", os.path.join(root, file))
else:
print("No pdfs found in this directory:", root)
The os.walk loop iterates once per directory. So you want to create a new PDFWriter for every directory.
It's also a good idea to use continue to bail out of the loop as soon as possible, this keeps the nesting flat.
Names that start with a capital letter are reserved for classes, so it should be searchDirectory, written with a small s.
Finally, take advantage of with blocks for handling I/O - they automatically call .close() for you.
I'm not going to install PyPDF2 just for this question, but this approach looks reasonable:
for root, dirs, files in os.walk(searchDirectory):
if not os.path.basename(root) == "frames":
continue
pdfFiles = [os.path.join(root, file) for file in sorted(files)]
if not pdfFiles:
continue
pdfWriter = PyPDF2.PdfFileWriter()
outputFile = os.path.split(os.path.realpath(root))[0] + ".pdf"
for file in pdfFiles:
print("Discovered this pdf:", file)
with open(file, 'rb') as pdfInput:
pdfReader = PyPDF2.PdfFileReader(pdfInput)
for page in pdfReader.pages:
pdfWriter.addPage(page)
with open(outputFile, "wb") as pdfOutput:
pdfWriter.write(pdfOutput)
print("%s files appended to %s" % (len(pdfFiles), outputFile))

Resources