Automation via python - python-3.x

Completely new to python so forgive me if this a dumb question.
Part of my working tasks is to upgrade the IOS on various Cisco routers and switches.
The most mind numbing part of this is comparing the pre change config with the post change config.
I use ExamDiff for this but with up to 100 devices each night this gets soul destroying.
Is it possible to get python to open ExamDiff and automatically compare the pre and post checks, saving the differences to a file for each device?
I know I can use the import os command to open ExamDiff but I have no idea how to get ExamDiff to work
Can someone point me in the right direction?
Thanks

I got this..........
Works pretty well
#!/usr/bin/python
import os
path = input("Enter the files location: ")
def nexus():
rootdir = path + os.sep
filelist = os.listdir(rootdir)
if filelist:
for file in filelist:
if 'pre' in file:
prefile = file
postfile = file.replace('pre', 'post')
resultfile = file.replace('pre', 'report')
if postfile in filelist:
prefile = rootdir + prefile
postfile = rootdir + postfile
resultfile = rootdir + resultfile
compare(prefile, postfile, resultfile)
else:
print('No corresponding "post"-file to {0}.'.format(prefile))
else:
print('No files found.')
def compare(file1loc, file2loc, comparefileloc):
with open(file1loc, 'r') as file1:
file1lines = file1.readlines()
file1lines = [x.strip() for x in file1lines] # getting rid of whitespace and breaks
with open(file2loc, 'r') as file2:
file2lines = file2.readlines()
file2lines = [x.strip() for x in file2lines] # getting rid of whitespace and breaks
with open(comparefileloc, 'w') as comparefile:
comparefile.write('===== IN FILE 1 BUT NOT FILE 2 =====\r\n')
for file1line in file1lines:
if not file1line in file2lines:
comparefile.write(file1line + '\r\n')
comparefile.write('\r\n')
comparefile.write('===== IN FILE 2 BUT NOT FILE 1 =====\r\n')
for file2line in file2lines:
if not file2line in file1lines:
comparefile.write(file2line + '\r\n')
if __name__ == '__main__':
nexus()

Related

Rename multiple files with python

I'm trying to create a program to rename multiple files at once. This would be through Python, and I realize I'm recreating the wheel but I'm trying to understand what I'm doing wrong. Any help would be greatly appreciated. Program.......
import os
path = "LOCATION"
dir_list = os.listdir(path)
myList = []
for x in dir_list:
if x.endswith(".mp3"):
f1 = x.split("-")
ln1 = f1[0] # Band Name
ln2 = f1[1] # Album Title
ln3 = f1[2] # Track number
ln4 = f1[3] # Song Name
newname = x.join(ln2 + ln3)
os.rename(x, newname)
print(newname)
Your error:
line 14, in <module> os.rename(x, newname) -> FileNotFoundError: [WinError 2] The system cannot find the file specified:
...Is likely due to the path not being included in your os.rename() call, I suggest changing os.rename(x, newname) to os.rename(path + x, path + newname) which will solve that issue.
I also noticed some funky behavior with the way you were grabbing the song information, so if you have any further issues, here's the code I used to debug your original issue which seems to have the result you're going for:
import os
path = "C:\\Users\\Pepe\\Documents\\StackOverflow\\73430533\\"
dir_list = os.listdir(path)
for x in dir_list:
if x.endswith(".mp3"):
# I ignore the ".mp3" to keep the file names clean
nameDetails = x.split('.mp3')[0].split('-')
bandName = nameDetails[0]
albumTitle = nameDetails[1]
trackNumber = nameDetails[2]
songName = nameDetails[3]
newName = f"{albumName} | {trackName}.mp3"
print(f"Renaming \"{x}\" to \"{newName}\"")
os.rename(path + x, path + newName)

Use glob to iterate through files in dir to select correct extension?

I'm trying to iterate through a dir a select the first file available.
These files look like this:
img_1.png img_2.png img_3.mp4 img_4.png img_5.jpg img_6.mp4
As you can see their names are cohesive but their extensions are different. I'd like the script to iterate through each extension for each number before it moves onto the next, IE:
I assume the best way to go about it is iterating through each file and extention like this: img_1.png img_1.jpg and img_1.mp4, and if neither of the three are available, move to the next file and repeat like img_2.png img_2.jpg and img_2.mp4 until there is an available
Question:
Is it best to iterate through the files and use glob to extend a file path with the extensions? Is there a better method?
This is what I thought would work, but it doesn't:
# Gets number of files in dir
list = os.listdir(folder_path)
number_files = len(list)
# Chooses file from dir
e = 0
for i in range(number_files):
try:
chosen_file = folder_path + "img_" + str(e)
for ext in ('*.jpg', '*.png', '*.mp4'):
full_path = chosen_file.extend(glob(join(chosen_file, ext)))
print (full_path)
#random_file = random.choice(os.listdir(folder_path)) # Chooses random file
except:
e += 1
print ('Hit except')
Are there other files in the folder with different names that you do not want to select or are all the files in the folder of interest? Is all that matters that they have the those 3 extensions or are the names important as well?
If you are only interested in files with those 3 extensions then this code will work
import os
import glob
folder_path = 'test\\'
e = 0
for r,d,f in os.walk(folder_path):
for file in f:
extensions = ['.jpg', '.png', '.mp4']
for ext in extensions:
if file.endswith(ext):
full_path = os.path.join(folder_path, file)
print (full_path)
else:
e += 1
print ('Hit except')
Given:
$ ls /tmp
img_1.png img_1.jpg img_2.png img_4.png img_5.jpg img_3.mp4 img_6.mp4
You can use pathlib and a more targeted glob:
from pathlib import Path
p=Path('/tmp')
for fn in (x for x in p.glob('img_[0-9].*')
if x.suffix in ('.png', '.jpg', '.mp4')):
print(fn)
Prints:
/tmp/img_1.png
/tmp/img_1.jpg
/tmp/img_2.png
/tmp/img_4.png
/tmp/img_5.jpg
/tmp/img_3.mp4
/tmp/img_6.mp4
Answer:
Decided to not use glob and did this instead:
i = 0
for i in range(number_files):
try:
chosen_file = folder_path + "img_" + str(i)
jpg_file = chosen_file + ".jpg"
png_file = chosen_file + ".png"
mp4_file = chosen_file + ".mp4"
if os.path.exists(png_file) == True:
print ('png true')
print (png_file)
break
elif os.path.exists(jpg_file) == True:
print ('jpg true')
print (jpg_file)
break
elif os.path.exists(mp4_file) == True:
print ('mp4 true')
print (mp4_file)
break
except:
i += 1
print ('false')

Running a Python script for files in a folder

There are 15 text files in a folder and I am trying to extract certain parts of each file and output them to a new file.
I am able to extract each file individually by just changing the file name and append each file to the output file but this means copying the same code 15 times and just changing the file name each time.
import glob,os
lst = []
filelist=glob.glob ('/C:/Users/bridaly/Documents/PythonTest/Python_Test_ENdata_3080_v20150914/input/*')
for file in filelist:
if os.path.isfile(file):
for line in filelist:
line = line.strip()
if not (
line.startswith("APPEND") or line.startswith("_") or
line.startswith("SAP") or line.startswith("~") or
line.startswith("INCLUDE") or line.startswith("ABAP")
or line.strip() == "" or line.startswith("Field") or
line.startswith("Short")
) :
y=line.replace(' ',' ')
#print(y)
z = y.replace('X','')
#print(z)
w = "|".join(z.split())
#print(w)
x = w.split("|",3)[:4]
#print(x)
x.insert(0,'./input/01BKPF')
#print(x)
if len(x) >=4:
t = [s.replace('|',' ') for s in x]
#print(t)
print("|".join(t))
lst.append("|".join(t))
#Output Script
output_file = open('Output_Final.txt', 'w')
for l in lst:
output_file.write(l)
output_file.write('\n')
output_file.close()
"""
The output should extract what's written in the code but for each file and append it to the output file. I have gotten the correct output by copying the code 15 times but I just want to use it once as it is more efficient.
files = glob.glob('path')
for file in files:
file_name = os.path.basename(file)
print(file_name)
you can iterate for each file

Change order in filenames in a folder

I need to rename a bunch of files in a specific folder. They all end with date and time, like for example "hello 2019-05-22 1310.txt" and I want the date and time for each file to be first so I can sort them. With my code I get an error and it wont find my dir where all files are located. What is wrong with the code?
import os
import re
import shutil
dir_path = r'C:\Users\Admin\Desktop\Testfiles'
comp = re.compile(r'\d{4}-\d{2}-\d{2}')
for file in os.listdir(dir_path):
if '.' in file:
index = [i for i, v in enumerate(file,0) if v=='.'][-1]
name = file[:index]
ext = file[index+1:]
else:
ext=''
name = file
data = comp.findall(name)
if len(data)!=0:
date= comp.findall(name)[0]
rest_name = ' '.join(comp.split(name)).strip()
new_name = '{} {}{}'.format(date,rest_name,'.'+ext)
print('changing {} to {}'.format(name, new_name))
shutil.move(os.path.join(dir_path,name), os.path.join(dir_path, new_name))
else:
print('file {} is not change'.format(name))

CouchDB views moving

I know that it is possible to backup your data by simply copying them
to some storage (on Linux they are usually installed under
/var/lib/couchdb/*.couch). Also in that same directories are data
indexed by views. I also know that you can make a copy of views by
sending HTTP requests to another database. But what I don't know is, is it
possible to save your views by simply copying them to a storage like
with data? To make my question a little bit clearer: Can you make
a backup of views without another database instance? Also, I am using
CouchDB 1.6 which has different file structure than versions greater
than 2.
Let me post an answer, this is only solution that i could think of...Of course it can be improved or changed but it works for me.
Script needs to be called from terminal (unless you edit it in some IDE) with
sys.argv[1] like python3 viewBackup.py 1 or python3 viewBackup.py 2
(1 is to make backup file, 2 is to upload that backup).Since this script is so simple there's no need for checking if backup file already exists (if there is backup made)
import requests
import json
import sys
# these are credentials of DB which views we want to backup
from_db = 'http://localhost:5984/'
from_user = 'root'
from_pswd = '1234'
from_auths = (from_user, from_pswd)
#`and these are credentials of DB where we want to upload our backuped views`
to_db = 'http://localhost:5984/'
to_user = 'root'
to_pswd = '1234'
to_auths = (from_user, from_pswd)
tf = 'bucket2views.json'
def getViews(source, credS):
global tf
data = _getDBList(source, credS)
temp = []
f = open(tf, 'w')
for d in data:
designs = requests.get(source + d + '/_all_docs?startkey="_design/"&endkey="_desing0"&include_docs=true',auth=credS).json()
designs = designs['rows']
for z in designs:
z = z['doc']['_id'].split('/')
z = z[1]
try:
print("getting views, it might take some time, please be patient")
views = requests.get(source + d + '/_design/' + z, auth=credS).json()['views']
print(" views gathered, start processing them")
views2 = {'views': views}
payload = json.dumps(views2)
temp.append((d, z, payload))
except Exception as e:
pass
f.write(json.dumps(temp))
f.close()
print(' views has been saved')
def _getDBList(server, auth):
response = requests.get(server + '_all_dbs', auth=auth).json()
return response
def uploadViews(srv, cred):
f = open(tf, 'r')
txt = f.read()
f.close()
jt = json.loads(txt)
for item in jt:
d = item[0]
z = item[1]
full_name = item[2]
resp = requests.put(srv+ d +'/_design/'+ z, data=json.dumps(payload), auth=cred).json()
print (json.dumps(resp))
def main():
ans = sys.argv[1]
# ans = 2
if ans in [1, '1', 'getViews']:
getViews(from_db, from_auths)
elif ans in [2, '2', "uploadViews"]:
uploadViews(to_db, to_auths)
else:
print('Possible inputs: 1, getViews, 2, uploadViews')
main()

Resources