How fix a unused error caused by underfline, when I use flake8 - flake8

In VSCode
flake8 ignore _ unused error
just like this
try:
pass
except Exception as _:
pass
then flake8 show a error '_' unused

delete /Lib/site-packages/flake8/__pycache__ floder
edit /Lib/site-packages/flake8/plugins/pyflakes.py run function
reopen your py file, edit it and save
before modification
def run(self):
"""Run the plugin."""
for message in self.messages:
col = getattr(message, "col", 0)
yield (
message.lineno,
col,
"{} {}".format(
FLAKE8_PYFLAKES_CODES.get(type(message).__name__, "F999"),
message.message % message.message_args,
),
message.__class__,
)
after modification
def run(self):
"""Run the plugin."""
for message in self.messages:
col = getattr(message, "col", 0)
if ('F841' in FLAKE8_PYFLAKES_CODES.get(
type(message).__name__, "F999")
and "'_'" in message.message_args):
yield (
message.lineno,
col,
"{} {}".format(
FLAKE8_PYFLAKES_CODES.get(
type(message).__name__, "F999"),
message.message % message.message_args,
),
message.__class__,
)

Related

My code is showing error during Exception handling

I have written this code by defining instSet() class (This code inserts element to list using insert() method and then performs various functions such as removing an element using remove() method by checking if the element is present, if not, raises an expetion:
class instSet(object):
def __init__(self):
self.vals = []
def insert(self, e):
if not e in self.vals:
self.vals.append(e)
def remove(self, e):
try:
self.vals.remove(e)
except:
raise ValueError(str(e) + ' not found')
def member(self, e):
return e in self.vals
def __str__(self):
self.vals.sort()
result = ''
for e in self.vals:
result = result + str(e) + ','
return '{' + result[:-1] + '}'
Some expression performed are:
a = instSet()
a.insert(1)
a.remove(3)
print(a)
Main problem is when I am trying to remove an element which is not present in the list it is throwing error like this:
ValueError: list.remove(x): x not in list
Insted it should return:
ValueError: 3 not found
What is wrong in the above code?
There is issue with valueError that you used in except. Try using below code
except:
raise Exception(str(e) + ' not found') from None

How to handle exception with imap_unordered in python multiprocessing

I am using pool.imap_unordered to apply a function over different txt files saved locally.
Is it possible to capture the exception and pass?
If my code runs into an exception, it blocks the entire loop.
pool = Pool(processes=15)
results = {}
files = glob.glob('{}/10K_files/*.txt'.format(path_input))
for key, output in tqdm(pool.imap_unordered(process_file, files),total=len(files)):
results[key] = output
I've tried something like this:
pool = Pool(processes=15)
results = {}
files = glob.glob('{}/10K_files/*.txt'.format(path_input))
try:
for key, output in tqdm(pool.imap_unordered(process_file, files), total=len(files)):
results[key] = output
except:
print("error")
but then I want to resume the loop from where I started.
Thanks!
You could catch the exception in process_file and return it. Then test for whether the return value is an exception. Here is an example:
import os
import traceback
import multiprocessing as mp
def main():
work_items = [i for i in range(20)]
pool = mp.Pool()
for result in pool.imap_unordered(process_file_exc, work_items):
if isinstance(result, Exception):
print("Got exception: {}".format(result))
else:
print("Got OK result: {}".format(result))
def process_file_exc(work_item):
try:
return process_file(work_item)
except Exception as ex:
return Exception("Err on item {}".format(work_item)
+ os.linesep + traceback.format_exc())
def process_file(work_item):
if work_item == 9:
# this will raise ZeroDivisionError exception
return work_item / 0
return "{} * 2 == {}".format(work_item, work_item * 2)
if __name__ == '__main__':
main()

Python watchdog module duplicate events (edit: was not an watchdog issue)

I am creating a python script that will identify changes to a log file and print some data from the new logs.
I use watchdog to create an event handler and everything seems to work fine except from that, I get duplicate events every time I modify the file. I checked creation and delete, they both work as expected and trigger one time.
I have read the similar question which explains having a created and a modified event when I save a file but this is not my case. I just get two modification events.
Here is my code:
import os, sys, time
import subprocess
import threading
import win32print
from tkinter import filedialog
from tkinter import *
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class Handler(FileSystemEventHandler):
# docstring for FileSystemEventHandler
def __init__(self, observer, filename, dirname):
# super(Handler, FileSystemEventHandler).__init__(self,)
self.observer = observer
self.filename = filename
self.dirname = dirname
print("Handler filename = " , self.filename)
print("Handler dirname = " , self.dirname)
def on_modified(self, event):
if self.filename == event.src_path:
print("The file was modified")
print (event.src_path)
# go get the last line and print the data
# try:
# hJob = win32print.StartDocPrinter (hPrinter, 1, ("test of raw data", None, "RAW"))
# try:
# win32print.StartPagePrinter (hPrinter)
# win32print.WritePrinter (hPrinter, raw_data)
# win32print.EndPagePrinter (hPrinter)
# finally:
# win32print.EndDocPrinter (hPrinter)
# finally:
# win32print.ClosePrinter (hPrinter)
def on_created(self, event):
print("A file was created (", event.src_path, ")")
def on_deleted(self, event):
print("A file was deleted (", event.src_path, ")")
if __name__ == "__main__":
Flags=2
Name=None
Level=1
printers = win32print.EnumPrinters(Flags, Name, Level)
print("\nChoose a printer to use:")
i=1
for p in printers:
print(i,')' , p[2])
i = i+1
if sys.version_info >= (3,):
raw_data = bytes ("This is a test", "utf-8")
else:
raw_data = "This is a test"
printer = int(input())
printer_name = printers[printer-1][2] #win32print.GetDefaultPrinter ()
print("You chose ", printer_name, "\nI will now print from the specified file with this printer")
hPrinter = win32print.OpenPrinter (printer_name)
# root = Tk()
# root.filename = filedialog.askopenfilename(initialdir = "/Desktop",title = "Select file",filetypes = (("log files","*.log"),("all files","*.*")))
file_path = "some_file_path" # root.filename
file_directory = os.path.dirname(file_path)
# print (file_path)
print (file_directory)
observer = Observer()
event_handler = Handler(observer, file_path, file_directory)
observer.schedule(event_handler, path=file_directory, recursive=False)
observer.start()
observer.join()
any ideas would be appreciated
EDIT:
After some debugging I found out that Windows10 is changing the file modification time twice every time I save it.
The proof of concept code is this:
prev_modification_time = os.path.getmtime(file_path)
while True:
current_mod_time = os.path.getmtime(file_path)
if prev_modification_time != current_mod_time :
print ("the file was modified, last modification time is: ", current_mod_time)
prev_modification_time = current_mod_time
pass
Final edit:
After testing my code on linux (Debian Stretch to be exact) it worked like a charm. So this combined with the previous edit probably shows that watchdog works fine and it is windows10 that has some issue. Should I post it on a different question or here?

print() outputs to console with delay after exception, breaking logging order

I have the following validation routine:
def validate_schema(self, name, data, schema_filename):
Logging.message("validating {0} against schema".format(name))
with open(schema_filename) as f:
schema = json.load(f)
resolver = jsonschema.RefResolver(
"file:///{0}/".format(os.path.abspath(schema_filename.rsplit('/', 1)[0])), schema
)
jsonschema.validate(data, schema, resolver=resolver)
Logging.ok()
class Logging contains:
#staticmethod
def ok():
print("OK")
#staticmethod
def message(msg):
print("> " + msg, end='... ')
When schema validation fails, I get exception text first, and "validating..." message following it.
Why is this happening and how to keep output order?
Flushing stdout does the trick
#staticmethod
def message(text):
print("> " + text, end='... ')
sys.stdout.flush()

cx_freeze using sys.stdout.flush() and multiprocessing

I am using python 3.4.2 with cx_freeze 4.3.4 (all 64 bit)
the program I have created works fine under python but when frozen, it starts giving me problems with
sys.stdout.flush()
AttributeError:'TypeNone' object has not attribute'flush'
using methods recommended on here, I have managed to reduce the problem to a Traceback message which flashes on the screen for a few seconds before disappearing. how can I resolve this issue.
Windows Error Screen shot
The stdout.flush is only called when it meets the BREAK command in the multiprocessing section of the code.
Any suggestions to either suppress/redirect the error to my log file or help resolve the source of the problem would be greatly appreciated.
Karl
class vDiags(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
tk.Tk.wm_title(self, "Diagnostics")
do stuff ............
start_job()
def pinger(job_q, mdic, ping, result_q):
devnull = open(os.devnull, 'w')
logger.info("Starting Pinger")
while True:
ip = job_q.get()
if ip is None:
logger.info("No IP address, finishing early")
break
test_result = {}
try:
if is_valid_ipv4_address(ip) is True:
do more stuff.........
def start_job():
logger.info("Starting start_Job")
pool_size = Variables.poll_size
logger.info("Pool size %s" % pool_size)
jobs = multiprocessing.Queue()
logger.info("Jobs %s" % jobs)
results = multiprocessing.Queue()
logger.info("results %s" % results)
manager = multiprocessing.Manager()
logger.info("manager %s" % manager)
manager_test_dict = manager.dict()
logger.info("manager_test_dict %s" % manager_test_dict)
for key, val in Variables.test_dic.items():
manager_test_dict[key] = val
pool = [multiprocessing.Process(target=pinger, args=(jobs, manager_test_dict, Variables.ping, results))
for i in range(pool_size)
]
for p in pool:
logger.info("p in pool %s" % p)
p.start()
for i in Variables.source_ip:
logger.info("Source IP:> %s" % i)
jobs.put(i)
for p in pool:
logger.info("p in pool (jobs) %s" % p)
jobs.put(None)
for p in pool:
logger.info("p in pool (join) %s" % p)
p.join()
logger.info("Move Results to new Variable")
logger.info(results.qsize())
while not results.empty():
Variables.test_result_raw = updatetree(Variables.test_result_raw, results.get())
logger.info("Finished start_Job")
class fakestd(object):
def write(self, string):
logger.info("write %s" %string)
pass
def flush(self):
logger.info("Flush %s " % self )
pass
if __name__ == '__main__':
# ********** Main App **********
sys.stdout = fakestd()
sys.stderr = fakestd()
multiprocessing.freeze_support()
logger.info("tkinter Installed Version %s" % tk.TkVersion)
app = vDiags()
app.geometry("1280x720")
app.mainloop()
# ********** Main App **********
I am hitting this issue right now (just migrated to multiprocessing from threading)
It appears it is a questionable bug in the multiprocessing module & the multiprocessing.freeze_support() call.
https://bugs.python.org/issue20607
There are reports it is still present in py35 but that stands a chance to be fixed at the py source level.
From a py34 point of view the ONE file I needed to update was:
C:\Python34\Lib\multiprocessing\process.py
diff -u process_orig.py process.py
--- process_orig.py 2016-12-12 12:42:01.568186800 +0000
+++ process.py 2016-12-12 12:37:28.971929900 +0000
## -270,8 +270,14 ##
traceback.print_exc()
finally:
util.info('process exiting with exitcode %d' % exitcode)
- sys.stdout.flush()
- sys.stderr.flush()
+# ---------- JRB modify
+ #sys.stdout.flush() # jrb edit to fix cx_freeze
+ #sys.stderr.flush() # jrb edit to fix cx_freeze
+ if sys.stdout is not None:
+ sys.stdout.flush()
+ if sys.stderr is not None:
+ sys.stderr.flush()
+# ---------- JRB modify
return exitcode

Resources