Archive/pack a directory with contents as plain-text representation? - linux

Under Linux / bash, how can I obtain a plain-text representation of a directory of its contents? (Note that by "plain-text" here I mean "UTF-8").
In other words, how could I "pack" or "archive" a directory (with contents - including binary files) as a plain text file - such that I could "unpack" it later, and obtain the same directory with its contents?

I was interested in this for a while, and I think I finally managed to cook up a script that works in both Python 2.7 and 3.4 -- however, I'd still like to know if there is something else that does the same. Here it is as a Gist (with some more comments):
https://gist.github.com/anonymous/1a68bf2c9134fd5312219c8f68713632
Otherwise, I'm posting a slightly abridged version here (below) for reference.
The usage is: to archive/pack into a .json text file:
python archdir2text-json.py -a /tmp > myarchdir.json
... and to unpack from the .json text file into the current (calling) directory:
python archdir2text-json.py -u myarchdir.json
Binary files are handled as base64.
Here is the script:
archdir2text-json.py
#!/usr/bin/env python
import pprint, inspect
import argparse
import os
import stat
import errno
import base64
import codecs
class SmartDescriptionFormatter(argparse.RawDescriptionHelpFormatter):
def _fill_text(self, text, width, indent):
if text.startswith('R|'):
paragraphs = text[2:].splitlines()
rebroken = [argparse._textwrap.wrap(tpar, width) for tpar in paragraphs]
rebrokenstr = []
for tlinearr in rebroken:
if (len(tlinearr) == 0):
rebrokenstr.append("")
else:
for tlinepiece in tlinearr:
rebrokenstr.append(tlinepiece)
return '\n'.join(rebrokenstr)
return argparse.RawDescriptionHelpFormatter._fill_text(self, text, width, indent)
textchars = bytearray({7,8,9,10,12,13,27} | set(range(0x20, 0x100)) - {0x7f})
is_binary_string = lambda bytes: bool(bytes.translate(None, textchars))
cwd = os.getcwd()
if os.name == 'nt':
import win32api, win32con
def folder_is_hidden(p):
if os.name== 'nt':
attribute = win32api.GetFileAttributes(p)
return attribute & (win32con.FILE_ATTRIBUTE_HIDDEN | win32con.FILE_ATTRIBUTE_SYSTEM)
else:
return os.path.basename(p).startswith('.') #linux-osx
def path_hierarchy(path):
hierarchy = {
'type': 'folder',
'name': os.path.basename(path),
'path': path,
}
try:
cleared_contents = [contents
for contents in os.listdir(path)
if not(
os.path.isdir(os.path.join(path, contents))
and
folder_is_hidden(os.path.join(path, contents))
)]
hierarchy['children'] = [
path_hierarchy(os.path.join(path, contents))
for contents in cleared_contents
]
except OSError as e:
if e.errno == errno.ENOTDIR:
hierarchy['type'] = 'file'
else:
hierarchy['type'] += " " + str(e)
if hierarchy['type'] == 'file':
isfifo = stat.S_ISFIFO(os.stat(hierarchy['path']).st_mode)
if isfifo:
ftype = "fifo"
else:
try:
data = open(hierarchy['path'], 'rb').read()
ftype = "bin" if is_binary_string(data) else "txt"
if (ftype == "txt"):
hierarchy['content'] = data.decode("utf-8")
else:
hierarchy['content'] = base64.b64encode(data).decode("utf-8")
except Exception as e:
ftype = str(e)
hierarchy['ftype'] = ftype
return hierarchy
def recurse_unpack(inobj, relpath=""):
if (inobj['type'] == "folder"):
rpname = relpath + inobj['name']
sys.stderr.write("folder name: " + rpname + os.linesep);
os.mkdir(rpname)
for tchild in inobj['children']:
recurse_unpack(tchild, relpath=relpath+inobj['name']+os.sep)
elif (inobj['type'] == "file"):
rfname = relpath + inobj['name']
sys.stderr.write("file name: " + rfname + os.linesep)
if inobj['ftype'] == "txt":
with codecs.open(rfname, "w", "utf-8") as text_file:
text_file.write(inobj['content'])
elif inobj['ftype'] == "bin":
with open(rfname, "wb") as bin_file:
bin_file.write(base64.b64decode(inobj['content']))
if __name__ == '__main__':
import json
import sys
parser = argparse.ArgumentParser(formatter_class=SmartDescriptionFormatter, description="""R|Command-line App that packs/archives (and vice-versa) a directory to a plain-text .json file; should work w/ both Python 2.7 and 3.4
see full help text in https://gist.github.com/anonymous/1a68bf2c9134fd5312219c8f68713632""")
parser.add_argument('input_paths', type=str, nargs='*', default=['.'],
help='Paths to files/directories to include in the archive; or path to .json archive file')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-a', '--archive', action='store_true', help="Interpret input_paths as paths to files/directories, and archive them to a .json file (output to stdout)")
group.add_argument('-u', '--unpack', action='store_true', help="Interpret input_paths as path to an archive .json file, and unpack it in the current directory")
args = parser.parse_args()
if (args.archive):
valid_input_paths = []
for p in args.input_paths:
if os.path.isdir(p) or os.path.exists(p):
valid_input_paths.append(p)
else:
sys.stderr.write("Ignoring invalid input path: " + p + os.linesep)
sys.stderr.write("Encoding input path(s): " + str(valid_input_paths) + os.linesep)
path_hier_arr = [path_hierarchy(vp) for vp in valid_input_paths]
outjson = json.dumps(path_hier_arr, indent=2, sort_keys=True, separators=(',', ': '))
print(outjson)
elif (args.unpack):
valid_input_paths = []
for p in args.input_paths:
if os.path.isdir(p) or os.path.exists(p):
valid_input_paths.append(p)
else:
sys.stderr.write("Ignoring invalid input path: " + p + os.linesep)
for vp in valid_input_paths:
with open(vp) as data_file:
data = json.load(data_file)
for datachunk in data:
recurse_unpack(datachunk)

Related

Python3 Decrypt data encrypted with Crypto.Cipher.ARC4 in Python2 (And vice versa?)

I have a script I wrote in Python2 to encrypt files using Crypto.Cipher.ARC4
Now that Python2 is EOL for over a year now, I've been starting to move everything to Python3.
Is it possible to decrypt files encrypted with my script using Python3 (and vice versa)?
Here is my script:
#!/usr/bin/python
import os
from Crypto.Cipher import ARC4
key = "my_redacted_string"
dir = os.path.realpath(__file__)
dir = os.path.dirname(dir)
# https://stackoverflow.com/questions/4934806
files = os.listdir(dir)
os.chdir(dir)
script_name = __file__
script_name = script_name.split("/")[-1]
proceed = [1, "y", "yes",'1']
for f in files:
if f == script_name:
pass
else:
string = "Process file? : %s > " % f
answer = raw_input(string)
if answer in proceed:
filo = open(f) # filo == file object, file open
data = filo.read()
filo.close()
e = ARC4.new(key)
e = e.encrypt(data)
out_name = "%s.dat" % f
filo = open(out_name, 'w')
filo.write(e)
filo.close()
Here is a script I wrote to decrypt files encrypted with the above script:
#!/usr/bin/python
import os
from Crypto.Cipher import ARC4
key = "my_redacted_string"
dir = os.path.realpath(__file__)
dir = os.path.dirname(dir)
# https://stackoverflow.com/questions/4934806
files = os.listdir(dir)
os.chdir(dir)
script_name = __file__
script_name = script_name.split("/")[-1]
proceed = [1, "y", "yes",'1']
for f in files:
if f == script_name:
pass
else:
string = "Process file? : %s > " % f
answer = raw_input(string)
if answer in proceed:
filo = open(f) # filo == file object, file open
data = filo.read()
filo.close()
d = ARC4.new(key)
d = d.decrypt(data)
out_name = os.path.splitext(f)[0]
print out_name
filo = open(out_name, 'w')
filo.write(d)
filo.close()
I try to make everything cross-platform and incuded #!/usr/bin/python as a habit, but I am using 64-bit Windows 10 on my laptop (I have one Linux box, VMs, and VPSes using Linux and using this script client side so using Windows)

Use glob to iterate through files in dir to select correct extension?

I'm trying to iterate through a dir a select the first file available.
These files look like this:
img_1.png img_2.png img_3.mp4 img_4.png img_5.jpg img_6.mp4
As you can see their names are cohesive but their extensions are different. I'd like the script to iterate through each extension for each number before it moves onto the next, IE:
I assume the best way to go about it is iterating through each file and extention like this: img_1.png img_1.jpg and img_1.mp4, and if neither of the three are available, move to the next file and repeat like img_2.png img_2.jpg and img_2.mp4 until there is an available
Question:
Is it best to iterate through the files and use glob to extend a file path with the extensions? Is there a better method?
This is what I thought would work, but it doesn't:
# Gets number of files in dir
list = os.listdir(folder_path)
number_files = len(list)
# Chooses file from dir
e = 0
for i in range(number_files):
try:
chosen_file = folder_path + "img_" + str(e)
for ext in ('*.jpg', '*.png', '*.mp4'):
full_path = chosen_file.extend(glob(join(chosen_file, ext)))
print (full_path)
#random_file = random.choice(os.listdir(folder_path)) # Chooses random file
except:
e += 1
print ('Hit except')
Are there other files in the folder with different names that you do not want to select or are all the files in the folder of interest? Is all that matters that they have the those 3 extensions or are the names important as well?
If you are only interested in files with those 3 extensions then this code will work
import os
import glob
folder_path = 'test\\'
e = 0
for r,d,f in os.walk(folder_path):
for file in f:
extensions = ['.jpg', '.png', '.mp4']
for ext in extensions:
if file.endswith(ext):
full_path = os.path.join(folder_path, file)
print (full_path)
else:
e += 1
print ('Hit except')
Given:
$ ls /tmp
img_1.png img_1.jpg img_2.png img_4.png img_5.jpg img_3.mp4 img_6.mp4
You can use pathlib and a more targeted glob:
from pathlib import Path
p=Path('/tmp')
for fn in (x for x in p.glob('img_[0-9].*')
if x.suffix in ('.png', '.jpg', '.mp4')):
print(fn)
Prints:
/tmp/img_1.png
/tmp/img_1.jpg
/tmp/img_2.png
/tmp/img_4.png
/tmp/img_5.jpg
/tmp/img_3.mp4
/tmp/img_6.mp4
Answer:
Decided to not use glob and did this instead:
i = 0
for i in range(number_files):
try:
chosen_file = folder_path + "img_" + str(i)
jpg_file = chosen_file + ".jpg"
png_file = chosen_file + ".png"
mp4_file = chosen_file + ".mp4"
if os.path.exists(png_file) == True:
print ('png true')
print (png_file)
break
elif os.path.exists(jpg_file) == True:
print ('jpg true')
print (jpg_file)
break
elif os.path.exists(mp4_file) == True:
print ('mp4 true')
print (mp4_file)
break
except:
i += 1
print ('false')

python 3 tab-delimited file adds column file.write

I'm writing string entries to a tab-delimited file in Python 3. The code that I use to save the content is:
savedir = easygui.diropenbox()
savefile = input("Please type the filename (including extension): ")
file = open(os.path.join(savedir, savefile), "w", encoding="utf-8")
file.write("Number of entities not found: " + str(missing_count) + "\n")
sep = "\t"
for entry in entities:
file.write(entry[0]+"\t")
for item in entry:
file.write(sep.join(item[0]))
file.write("\t")
file.write("\n")
file.close()
The file saves properly. There are no errors or warnings sent to the terminal. When I open the file, I find an extra column has been saved to the file.
Query | Extra | Name
Abu-Jamal, Mumia | A | Mumia Abu-Jamal
Anderson, Walter | A | Walter Inglis Anderson
Anderson, Walter | A | Walter Inglis Anderson
I've added vertical bars between the tabs for clarity; they don't normally appear there. As well, I have removed a few columns at the end. The column between the vertical bars is not supposed to be there. The document that is saved to file is longer than three lines. On each line, the extra column is the first letter of the Query column. Hence, we have A's in these three examples.
entry[0] corresponds exactly to the value in the Query column.
sep.join(item[0]) corresponds exactly to columns 3+.
Any idea why I would be getting this extra column?
Edit: I'm adding the full code for this short script.
# =============================================================================
# Code to query DBpedia for named entities.
#
# =============================================================================
import requests
import xml.etree.ElementTree as et
import csv
import os
import easygui
import re
# =============================================================================
# Default return type is XML. Others: json.
# Classes are: Resource (general), Place, Person, Work, Species, Organization
# but don't include resource as one of the
# =============================================================================
def urlBuilder(query, queryClass="unknown", returns=10):
prefix = 'http://lookup.dbpedia.org/api/search/KeywordSearch?'
#Selects the appropriate QueryClass for the url
if queryClass == 'place':
qClass = 'QueryClass=place'
elif queryClass == 'person':
qClass = 'QueryClass=person'
elif queryClass == 'org':
qClass = 'QueryClass=organization'
else:
qClass = 'QueryClass='
#Sets the QueryString
qString = "QueryString=" + str(query)
#sets the number of returns
qHits = "MaxHits=" + str(returns)
#full url
dbpURL = prefix + qClass + "&" + qString + "&" + qHits
return dbpURL
#takes a xml doc as STRING and returns an array with the name and the URI
def getdbpRecord(xmlpath):
root = et.fromstring(xmlpath)
dbpRecord = []
for child in root:
temp = []
temp.append(child[0].text)
temp.append(child[1].text)
if child[2].text is None:
temp.append("Empty")
else:
temp.append(findDates(child[2].text))
dbpRecord.append(temp)
return dbpRecord
#looks for a date with pattern: 1900-01-01 OR 01 January 1900 OR 1 January 1900
def findDates(x):
pattern = re.compile('\d{4}-\d{2}-\d{2}|\d{2}\s\w{3,9}\s\d{4}|\d{1}\s\w{3,9}\s\d{4}')
returns = pattern.findall(x)
if len(returns) > 0:
return ";".join(returns)
else:
return "None"
#%%
# =============================================================================
# Build and send get requests
# =============================================================================
print("Please select the CSV file that contains your data.")
csvfilename = easygui.fileopenbox("Please select the CSV file that contains your data.")
lookups = []
name_list = csv.reader(open(csvfilename, newline=''), delimiter=",")
for name in name_list:
lookups.append(name)
#request to get the max number of returns from the user.
temp = input("Specify the maximum number of returns desired: ")
if temp.isdigit():
maxHits = temp
else:
maxHits = 10
queries = []
print("Building queries. Please wait.")
for search in lookups:
if len(search) == 2:
queries.append([search[0], urlBuilder(query=search[0], queryClass=search[1], returns=maxHits)])
else:
queries.append([search, urlBuilder(query=search, returns=maxHits)])
responses = []
print("Gathering responses. Please wait.")
for item in queries:
response = requests.get(item[1])
data = response.content.decode("utf-8")
responses.append([item[0], data])
entities = []
missing_count = 0
for item in responses:
temp = []
if len(list(et.fromstring(item[1]))) > 0:
entities.append([item[0], getdbpRecord(item[1])])
else:
missing_count += 1
print("There are " + str(missing_count) + " entities that were not found.")
print("Please select the destination folder for the results of the VIAF lookup.")
savedir = easygui.diropenbox("Please select the destination folder for the results of the VIAF lookup.")
savefile = input("Please type the filename (including extension): ")
file = open(os.path.join(savedir, savefile), "w", encoding="utf-8")
file.write("Number of entities not found: " + str(missing_count) + "\n")
sep = "\t"
for entry in entities:
file.write(entry[0]+"\t")
for item in entry:
file.write(sep.join(item[0]))
file.write("\t")
file.write("\n")
file.close()

Python: os.walk usage for scanning specified directory and search specified file issue

I want to scan current and deeper folder to search specified file.
[~/test]$tree -a
Upon is my test environment.
[~/test]$ls
NCRAM955E/ RNCMST954E/ RNCMST957E/ test.py*
Below is my code:
import os, shutil, sys, getopt, re
def GetOption(argv):
FileDir = ""
Roptarget = ""
Dirtarget=[]
try:
opts, args = getopt.getopt(argv, "hD:F:",["FileDir=", "Roptarget="])
except getopt.GetoptError:
print ('Error arg input -D <FileDir> -F <Roptarget>')
sys.exit(2)
for opt, arg in opts:
if opt == "-h":
print ('Error arg input -D <FileDir> -F <Roptarget>')
sys.exit()
elif opt in ("-D", "--FileDir"):
FileDir = arg
Dirtarget = FileDir.split("|")
elif opt in ("-F", "--Roptarget"):
Roptarget = arg
return(Dirtarget, Roptarget)
#Below self defined function need update
def detect_walk(file_dir):
L_0 = []
L = []
DirList,Ropfile = GetOption(sys.argv[1:])
print("DirList = " + str(DirList))
print("Ropfile = " + Ropfile)
for root, dirs, files in os.walk(file_dir):
for file in files:
L_0.append(file)
if " ".join(L_0).find(Ropfile):
print("target rop file = " + Ropfile)
L.append(os.path.join(root, Ropfile))
return(L)
if __name__ == '__main__':
file_path = "/home/test/"
List = detect_walk(file_path)
My expect output, for exapmle
if I type python test.py -D "RNCRAM955E|RNCMST954E" -F "^A20180520.1300+0300-1315+0300*RNCMST954E_statsfile.xml$"
the program will only search foler RNCRAM955E and RNCMST954E, when specified file match pattern found , it will display the full-path of the target file.
i am python freshers. please give me some advice. thank you.
I have update my code, this code will enable transform specified file which in specified folder to another folder.
Use like python temp.py -D "RNCMST954E|RNCMST957E|RNCRAM955E" -F "A20180520\.13*", but some codes remain improve, as you see, the filename after -F should add escape symbol \ . So, how can I improve this ?
import os, shutil, sys, getopt, re
def GetOption(argv):
FileDir = ""
Roptarget = ""
Dirtarget=[]
try:
opts, args = getopt.getopt(argv, "hD:F:",["FileDir=", "Roptarget="])
except getopt.GetoptError:
print ('Error arg input -D <FileDir> -F <Roptarget>')
sys.exit(2)
for opt, arg in opts:
if opt == "-h":
print ('Error arg input -D <FileDir> -F <Roptarget>')
sys.exit()
elif opt in ("-D", "--FileDir"):
FileDir = arg
Dirtarget = FileDir.split("|")
elif opt in ("-F", "--Roptarget"):
Roptarget = arg
return(Dirtarget, Roptarget)
def detect_walk(file_dir):
L = []
desdir = "/home/ekoopgj/ITK/Task/test_folder/"
for root, dirs, files in os.walk(file_dir):
for file in files:
if re.search(fileIndex,file) is not None:
L.append(os.path.join(root, file))
print("start copy " + os.path.join(root, file) + " to " + desdir)
shutil.copyfile(os.path.join(root, file),desdir + file)
if __name__ == '__main__':
DirList,fileIndex = GetOption(sys.argv[1:])
#use LOOP For and store the file_path varibale as a Formal parameters , can reduce the search time if the target folder contains too many files.
for dir in DirList:
file_path = "/home/ekoopgj/ITK/Task/0521/"
file_path += dir
print("dir = " + dir)
List = detect_walk(file_path)

write utf-8 content in files / python 3

Its again about utf-8 issues, for the 1001st time. Please don't mark this question as duplicate, 'cause I cannot find answers elsewhere.
Since some months I am working successfully with the following small script (which may improved, I know that), which delivers me a simple database functionality. But I wrote it for very simple data storage, like local config and auth data, let's say not for more sophisticated content as known from cookies. It worked for me, until I tried to store non latin characters for the 1st time.
In the following script I already added the import codecs stuff, including the altered lines f = codecs.open(file, 'w', 'utf-8'). Do not know if this is the right approach.
Can somebody show me the trick ? Let's say "John Doe" is french, "John Doé", how do I store it ?
The class itself (to be imported)
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os, errno
import json
import codecs
class Ddpos:
def db(self,table,id,col=''):
table = '/Users/michag/Documents/ddposdb/'+table
try:
os.makedirs(table)
os.chmod(table, 0o755)
except OSError as e:
if e.errno != errno.EEXIST:
raise
file = table+'/'+id+'.txt'
if not os.path.isfile(file):
f = codecs.open(file, 'w', 'utf-8')
f.write('{}')
f.close()
f = codecs.open(file, 'r', 'utf-8')
r = json.loads(f.readline().strip())
f.close()
if isinstance(col, str) and len(col) > 0:
if col in r:
return json.dumps(r[col])
else:
return ''
elif isinstance(col, list) and len(col) > 0:
res = {}
for el in range(0,len(col)):
if col[el] in r:
res[col[el]] = r[col[el]]
return json.dumps(res)
elif isinstance(col, dict) and len(col) > 0:
for el in col:
r[el] = col[el]
f = codecs.open(file, 'w', 'utf-8')
f.write(json.dumps(r))
f.close()
return json.dumps(r)
else:
return json.dumps(r)
ddpos = Ddpos()
The call / usage
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from ddpos import *
# set values and return all values as dict
print ('1.: '+ddpos.db('cfg','local',{'admin':'John Doé','email':'johndoe#email.com'}))
# return all values as dict
print ('2.: '+ddpos.db('cfg','local'))
# return one value as string
print ('3.: '+ddpos.db('cfg','local','email'))
# return two or more values as dict
print ('4.: '+ddpos.db('cfg','local',['admin','email']))
It prints and stores this in case of "John Doe"
1.: {"admin": "John Doe", "email": "johndoe#email.com"}
2.: {"admin": "John Doe", "email": "johndoe#email.com"}
3.: "johndoe#email.com"
4.: {"admin": "John Doe", "email": "johndoe#email.com"}
and this in case of french guy "John Doé"
1.: {"email": "johndoe#email.com", "admin": "John Do\u00e9"}
2.: {"email": "johndoe#email.com", "admin": "John Do\u00e9"}
3.: "johndoe#email.com"
4.: {"email": "johndoe#email.com", "admin": "John Do\u00e9"}
For me it is more important to learn and to understand, how it works and why or why not, but to know that there already classes which would do the job for me. Thanks for your support.
After moderator deceze I answer my own question, with credentials to user mata and python3 itself.
Here's the "new" script. The poor french guy now is renamed to "J€hn Doéß" and he's still alive.
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os, errno
import json
class Ddpos:
def db(self,table,id,col=''):
table = '/Users/michag/Documents/ddposdb/'+table
try:
os.makedirs(table)
os.chmod(table, 0o755)
except OSError as e:
if e.errno != errno.EEXIST:
raise
file = table+'/'+id+'.txt'
if not os.path.isfile(file):
f = open(file, 'w')
f.write('{}')
f.close()
f = open(file, 'r')
r = json.loads(f.readline().strip())
f.close()
if isinstance(col, str) and len(col) > 0:
if col in r:
return r[col]
else:
return ''
elif isinstance(col, list) and len(col) > 0:
res = {}
for el in range(0,len(col)):
if col[el] in r:
res[col[el]] = r[col[el]]
return res
elif isinstance(col, dict) and len(col) > 0:
for el in col:
r[el] = col[el]
f = open(file, 'w')
f.write(json.dumps(r))
f.close()
return r
else:
return r
ddpos = Ddpos()
UPDATE
I made some improvements. Now the stored dict is human readable (for those unbelievers, like me) and sorted case insensitive. This sorting process for sure consumes a bit of performance, but hey, I will use the read-only process some houndred times more than the write process.
Now the stored dict looks like this:
{
"auth_descr": "dev unit, office2",
"auth_email": "me#myemail.com",
"auth_key": "550e3 **shortened sha256** d73b1",
"auth_unit_id": "2.3.1",
"vier": "44é", # utf-8 example
"Vjier": "vier44" # uppercase example
}
I don't know where I did a mistake in earlier version(s), but if you take a look on the utf-8 example. "é" now is stored as an "é" and not as "\u00e9".
The class now looks like this (with minor changes)
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os, errno
import json
class Ddpos:
def db(self,table,id,col=''):
table = '/ddpos/db/'+table
try:
os.makedirs(table)
os.chmod(table, 0o755)
except OSError as e:
if e.errno != errno.EEXIST:
raise
file = table+'/'+id+'.txt'
if not os.path.isfile(file):
f = open(file, 'w', encoding='utf-8')
f.write('{}')
f.close()
f = open(file, 'r', encoding='utf-8')
r = json.loads(f.read().strip())
f.close()
if isinstance(col, str) and len(col) > 0:
if col in r:
return r[col]
else:
return ''
elif isinstance(col, list) and len(col) > 0:
res = {}
for el in range(0,len(col)):
if col[el] in r:
res[col[el]] = r[col[el]]
return res
elif isinstance(col, dict) and len(col) > 0:
for el in col:
r[el] = col[el]
w = '{\n'
for key in sorted(r, key=lambda y: y.lower()):
w += '\t"%s": "%s",\n' % (key, r[key])
w = w[:-2]+'\n'
w += '}'
f = open(file, 'w', encoding='utf-8')
f.write(w)
f.close()
return r
else:
return r
ddpos = Ddpos()

Resources