Python Not printing expected output to screen from given thread - python-3.x

Could someone explain to me why my code doesn't print the expected output [Thread1 36] from the thread? I am currently using python3.7.0 on a mac OS Catalina 10.15.2
Here is my code:
import timeit, _thread
def expmod1(a, n, m):
return (a**n)%m
# avg_time is a higher order function
# argument is the different variations of the expmod(135, 202, 53) function
def avg_time(thread_name, expmod_function):
print("Start")
result = expmod_function(135, 202, 53)
print(thread_name + " " + str(result), flush=True)
return result
# analysis of all three functions based on average timecost using a constant function as defined by avg_time
def analysis1():
try:
_thread.start_new_thread(avg_time, ("Thread1", expmod1))
except:
print("Unable to start thread")
def main():
analysis1()
if __name__ == "__main__":
main()

Related

Python multiprocessing a child function

I have trying to learn multi processing.
I have a simple function which generates a list of numbers and I am trying to use multiprocessing to add the numbers if it is divisible by 10.
My objective is to run the child function in parallel with available cpu.
import multiprocessing
import time
def add_multiple_of_10_v0(number):
number_list = []
for i in range(1, number):
x = i**3 + i**2 + i + 1
number_list.append(x)
print(number_list)
pool = multiprocessing.Pool(6)
result = 0
for value in pool.map(check_multiple_10_v0, number_list):
if value > 0:
result = result + value
else:
pass
pool.close()
pool.join()
return result
def check_multiple_10_v0(in_number):
if in_number % 10 == 0:
time.sleep(5)
return in_number
else:
return -1
I am getting the below error -
RuntimeError:
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.
Am new to python and multiprocessing and would appreciate guidance.

Issue with number of arguments in __init__() while using Inheritance

I am a beginner trying to perform inheritance using Python. So I decided to practice the example programs from the book "Data Structures and Algorithms in Python" by Michael T. Goodrich.
Here is the code:
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class Progression:
def __init__(self,start=0):
self._current = start
def _advanced(self):
self._current += 1
def __next__(self):
if self._current is None:
raise StopIteration()
else:
answer = self._current
self._advanced()
return answer
def __iter__(self):
return self
def print_progression(self,n):
print(' '.join(str(next(self)) for j in range(n)))
class ArithmeticProgression:
def __init__(self,increment=1,start=0):
super().__init__(start)
self._increment = increment
def _advance(self):
self._current += self._increment
class GeometricProgression(Progression):
def __init__(self,base=2,start=1):
super().__init__(start)
self._base = base
def _advance(self):
self._current *= self._base
class FibonacciProgression(Progression):
def __init__(self,first=0,second=1):
super().__init__(first)
self._prev = second - first
def _advance(self):
self._prev, self._current = self._current,self._prev + self._current
if __name__ == '__main__':
print('Default Progression: ')
Progression().print_progression(10)
print('Arithmetic progression with increment 5 and start 2:')
ArithmeticProgression(5,2).print_progression(10)
print('Geometric progression with default base:')
GeometricProgression().print_progression(10)
print('Geometric progression with increasing it to the power of 2')
GeometricProgression(3).print_progression(10)
print('Fibonacci progression with default start progression')
FibonacciProgression().print_progression(10)
print('Fibonacci progression with default start progression')
FibonacciProgression(4,6).print_progression(10)
Here is the error:
Default Progression:
0 1 2 3 4 5 6 7 8 9
Arithmetic progression with increment 5 and start 2:
Traceback (most recent call last):
File "some location", line 61, in <module>
ArithmeticProgression(5,2).print_progression(10)
File "some location", line 33, in __init__
super().__init__(start)
TypeError: object.__init__() takes exactly one argument (the instance to initialize)
Any help will be appreciated. Here I am trying to check the super().init(start) of ArithmeticProgression but am getting very confused with the passing of elements in init() example. Any help will be appreciated. Also I am a beginner.
ArithmeticProgression does not inherit from Progression like GeometricProgression does. So there's no base class to call with super().
Replace
class ArithmeticProgression(Progression):
with
class ArithmeticProgression:
To make it short: you just forgot (Progression)

Python multi processing on for loop

I have a function with two parameters
reqs =[1223,1456,1243,20455]
url = "pass a url"
def crawl(i,url):
print("%s is %s" % (i, url))
I want to trigger above function by multi processing concept.
from multiprocessing import Pool
if __name__ == '__main__':
p = Pool(5)
print(p.map([crawl(i,url) for i in reqs]))
above code is not working for me. can anyone please help me on this!
----- ADDING NEW CODE ---------
from multiprocessing import Pool
reqs = [1223,1456,1243,20455]
url = "pass a url"
def crawl(combined_args):
print("%s is %s" % (combined_args[0], combined_args[1]))
def main():
p = Pool(5)
print(p.map(crawl, [(i,url) for i in reqs]))
if __name__ == '__main__':
main()
when I am trying to execute above code, I am getting below error
According to the multiprocessing.Pool.map this is the function argument line:
map(func, iterable[, chunksize])
You are trying to pass to the map a iterator instead of (func, iterable).
Please refer to the following example of multiprocessing.pool (source):
import time
from multiprocessing import Pool
work = (["A", 5], ["B", 2], ["C", 1], ["D", 3])
def work_log(work_data):
print(" Process %s waiting %s seconds" % (work_data[0], work_data[1]))
time.sleep(int(work_data[1]))
print(" Process %s Finished." % work_data[0])
def pool_handler():
p = Pool(2)
p.map(work_log, work)
if __name__ == '__main__':
pool_handler()
Please note that he is passing one argument to the work_log function and in the function he use the index to get to the relevant fields.
Refering to your example:
from multiprocessing import Pool
reqs = [1223,1456,1243,20455]
url = "pass a url"
def crawl(combined_args):
print("%s is %s" % (combined_args[0], combined_args[1]))
def main():
p = Pool(5)
print(p.map(crawl, [(i,url) for i in reqs]))
if __name__ == '__main__':
main()
Results with:
1223 is pass a url
1456 is pass a url
1243 is pass a url
20455 is pass a url
[None, None, None, None] # This is the output of the map function
Issue resolved. crawl function should in separate module like below:
crawler.py
def crawl(combined_args):
print("%s is %s" % (combined_args[0], combined_args[1]))
run.py
from multiprocessing import Pool
import crawler
def main():
p = Pool(5)
print(p.map(crawler.crawl, [(i,url) for i in reqs]))
if __name__ == '__main__':
main()
Then output will be like below:
**output :**
1223 is pass a url
1456 is pass a url
1243 is pass a url
20455 is pass a url
[None, None, None, None] # This is the output of the map function

How to handle exception with imap_unordered in python multiprocessing

I am using pool.imap_unordered to apply a function over different txt files saved locally.
Is it possible to capture the exception and pass?
If my code runs into an exception, it blocks the entire loop.
pool = Pool(processes=15)
results = {}
files = glob.glob('{}/10K_files/*.txt'.format(path_input))
for key, output in tqdm(pool.imap_unordered(process_file, files),total=len(files)):
results[key] = output
I've tried something like this:
pool = Pool(processes=15)
results = {}
files = glob.glob('{}/10K_files/*.txt'.format(path_input))
try:
for key, output in tqdm(pool.imap_unordered(process_file, files), total=len(files)):
results[key] = output
except:
print("error")
but then I want to resume the loop from where I started.
Thanks!
You could catch the exception in process_file and return it. Then test for whether the return value is an exception. Here is an example:
import os
import traceback
import multiprocessing as mp
def main():
work_items = [i for i in range(20)]
pool = mp.Pool()
for result in pool.imap_unordered(process_file_exc, work_items):
if isinstance(result, Exception):
print("Got exception: {}".format(result))
else:
print("Got OK result: {}".format(result))
def process_file_exc(work_item):
try:
return process_file(work_item)
except Exception as ex:
return Exception("Err on item {}".format(work_item)
+ os.linesep + traceback.format_exc())
def process_file(work_item):
if work_item == 9:
# this will raise ZeroDivisionError exception
return work_item / 0
return "{} * 2 == {}".format(work_item, work_item * 2)
if __name__ == '__main__':
main()

cx_freeze using sys.stdout.flush() and multiprocessing

I am using python 3.4.2 with cx_freeze 4.3.4 (all 64 bit)
the program I have created works fine under python but when frozen, it starts giving me problems with
sys.stdout.flush()
AttributeError:'TypeNone' object has not attribute'flush'
using methods recommended on here, I have managed to reduce the problem to a Traceback message which flashes on the screen for a few seconds before disappearing. how can I resolve this issue.
Windows Error Screen shot
The stdout.flush is only called when it meets the BREAK command in the multiprocessing section of the code.
Any suggestions to either suppress/redirect the error to my log file or help resolve the source of the problem would be greatly appreciated.
Karl
class vDiags(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
tk.Tk.wm_title(self, "Diagnostics")
do stuff ............
start_job()
def pinger(job_q, mdic, ping, result_q):
devnull = open(os.devnull, 'w')
logger.info("Starting Pinger")
while True:
ip = job_q.get()
if ip is None:
logger.info("No IP address, finishing early")
break
test_result = {}
try:
if is_valid_ipv4_address(ip) is True:
do more stuff.........
def start_job():
logger.info("Starting start_Job")
pool_size = Variables.poll_size
logger.info("Pool size %s" % pool_size)
jobs = multiprocessing.Queue()
logger.info("Jobs %s" % jobs)
results = multiprocessing.Queue()
logger.info("results %s" % results)
manager = multiprocessing.Manager()
logger.info("manager %s" % manager)
manager_test_dict = manager.dict()
logger.info("manager_test_dict %s" % manager_test_dict)
for key, val in Variables.test_dic.items():
manager_test_dict[key] = val
pool = [multiprocessing.Process(target=pinger, args=(jobs, manager_test_dict, Variables.ping, results))
for i in range(pool_size)
]
for p in pool:
logger.info("p in pool %s" % p)
p.start()
for i in Variables.source_ip:
logger.info("Source IP:> %s" % i)
jobs.put(i)
for p in pool:
logger.info("p in pool (jobs) %s" % p)
jobs.put(None)
for p in pool:
logger.info("p in pool (join) %s" % p)
p.join()
logger.info("Move Results to new Variable")
logger.info(results.qsize())
while not results.empty():
Variables.test_result_raw = updatetree(Variables.test_result_raw, results.get())
logger.info("Finished start_Job")
class fakestd(object):
def write(self, string):
logger.info("write %s" %string)
pass
def flush(self):
logger.info("Flush %s " % self )
pass
if __name__ == '__main__':
# ********** Main App **********
sys.stdout = fakestd()
sys.stderr = fakestd()
multiprocessing.freeze_support()
logger.info("tkinter Installed Version %s" % tk.TkVersion)
app = vDiags()
app.geometry("1280x720")
app.mainloop()
# ********** Main App **********
I am hitting this issue right now (just migrated to multiprocessing from threading)
It appears it is a questionable bug in the multiprocessing module & the multiprocessing.freeze_support() call.
https://bugs.python.org/issue20607
There are reports it is still present in py35 but that stands a chance to be fixed at the py source level.
From a py34 point of view the ONE file I needed to update was:
C:\Python34\Lib\multiprocessing\process.py
diff -u process_orig.py process.py
--- process_orig.py 2016-12-12 12:42:01.568186800 +0000
+++ process.py 2016-12-12 12:37:28.971929900 +0000
## -270,8 +270,14 ##
traceback.print_exc()
finally:
util.info('process exiting with exitcode %d' % exitcode)
- sys.stdout.flush()
- sys.stderr.flush()
+# ---------- JRB modify
+ #sys.stdout.flush() # jrb edit to fix cx_freeze
+ #sys.stderr.flush() # jrb edit to fix cx_freeze
+ if sys.stdout is not None:
+ sys.stdout.flush()
+ if sys.stderr is not None:
+ sys.stderr.flush()
+# ---------- JRB modify
return exitcode

Resources