Using multiprocessing inside a function - python-3.x

I want to take the working code from below and put it into a function.
import multiprocessing as mp
def parameters(x,n):
for i in x:
yield (i,n)
def power(a):
x, n = a
return x**n
if __name__ == '__main__':
p = [i for i in range(1000)]
p = parameters(p,2)
agents = 4
chunk = 10
with mp.Pool(processes = agents) as pool:
o = pool.map(power,p,chunksize = chunk)
print(o)
So that I can call it whenever I want. I tried doing something like this:
import multiprocessing as mp
def parameters(x,n):
for i in x:
yield (i,n)
def power(a):
x, n = a
return x**n
def calculate(s,n):
p = [i for i in range(s)]
p = parameters(p,n)
agents = 4
chunk = 10
with mp.Pool(processes = agents) as pool:
o = pool.map(power,p,chunksize = chunk)
return o
print(calculate(1000,2))
However this does not work at all, It tells me That another process has started before one has ended. But the code above did work. Is there a way to properly take this code into a function? If not with this setup, then with what setup?

Make sure to protect code that should only run in the main process with if __name__ == '__main__':. This code works:
import multiprocessing as mp
def parameters(x,n):
for i in x:
yield (i,n)
def power(a):
x, n = a
return x**n
def calculate(s,n):
p = [i for i in range(s)]
p = parameters(p,n)
agents = 4
chunk = 10
with mp.Pool(processes = agents) as pool:
o = pool.map(power,p,chunksize = chunk)
return o
if __name__ == '__main__':
print(calculate(1000,2))
Without the if, the following error is raised:
RuntimeError:
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.

Related

Why serial code is faster than concurrent.futures in this case?

I am using the following code to process some pictures for my ML project and I would like to parallelize it.
import multiprocessing as mp
import concurrent.futures
def track_ids(seq):
'''The func is so big I can not put it here'''
ood = {}
for i in seq:
# I load around 500 images and process them
ood[i] = some Value
return ood
seqs = []
for seq in range(1, 10):# len(seqs)+1):
seq = txt+str(seq)
seqs.append(seq)
# serial call of the function
track_ids(seq)
#parallel call of the function
with concurrent.futures.ProcessPoolExecutor(max_workers=mp.cpu_count()) as ex:
ood_id = ex.map(track_ids, seqs)
if I run the code serially it takes 3.0 minutes but for parallel with concurrent, it takes 3.5 minutes.
can someone please explain why is that? and present a way to solve the problem.
btw, I have 12 cores.
Thanks
Here's a brief example of how one might go about profiling multiprocessing code vs serial execution:
from multiprocessing import Pool
from cProfile import Profile
from pstats import Stats
import concurrent.futures
def track_ids(seq):
'''The func is so big I can not put it here'''
ood = {}
for i in seq:
# I load around 500 images and process them
ood[i] = some Value
return ood
def profile_seq():
p = Profile() #one and only profiler instance
p.enable()
seqs = []
for seq in range(1, 10):# len(seqs)+1):
seq = txt+str(seq)
seqs.append(seq)
# serial call of the function
track_ids(seq)
p.disable()
return Stats(p), seqs
def track_ids_pr(seq):
p = Profile() #profile the child tasks
p.enable()
retval = track_ids(seq)
p.disable()
return (Stats(p, stream="dummy"), retval)
def profile_parallel():
p = Profile() #profile stuff in the main process
p.enable()
with concurrent.futures.ProcessPoolExecutor(max_workers=mp.cpu_count()) as ex:
retvals = ex.map(track_ids_pr, seqs)
p.disable()
s = Stats(p)
out = []
for ret in retvals:
s.add(ret[0])
out.append(ret[1])
return s, out
if __name__ == "__main__":
stat, retval = profile_parallel()
stat.print_stats()
EDIT: Unfortunately I found out that pstat.Stats objects cannot be used normally with multiprocessing.Queue because it is not pickleable (which is needed for the operation of concurrent.futures). Evidently it normally will store a reference to a file for the purpose of writing statistics to that file, and if none is given, it will by default grab a reference to sys.stdout. We don't actually need that reference however until we actually want to print out the statistics, so we can just give it a temporary value to prevent the pickle error, and then restore an appropriate value later. The following example should be copy-paste-able and run just fine rather than the pseudocode-ish example above.
from multiprocessing import Queue, Process
from cProfile import Profile
from pstats import Stats
import sys
def isprime(x):
for d in range(2, int(x**.5)):
if x % d == 0:
return False
return True
def foo(retq):
p = Profile()
p.enable()
primes = []
max_n = 2**20
for n in range(3, max_n):
if isprime(n):
primes.append(n)
p.disable()
retq.put(Stats(p, stream="dummy")) #Dirty hack: set `stream` to something picklable then override later
if __name__ == "__main__":
q = Queue()
p1 = Process(target=foo, args=(q,))
p1.start()
p2 = Process(target=foo, args=(q,))
p2.start()
s1 = q.get()
s1.stream = sys.stdout #restore original file
s2 = q.get()
# s2.stream #if we are just adding this `Stats` object to another the `stream` just gets thrown away anyway.
s1.add(s2) #add up the stats from both child processes.
s1.print_stats() #s1.stream gets used here, but not before. If you provide a file to write to instead of sys.stdout, it will write to that file)
p1.join()
p2.join()

How can I speed up this loop using multiprocessing or multithreading?

I am afraid that I'm not doing the multithreading thing the right way, so I came here in search of wisdom. I have two arrays of addresses and I have to check if the address of the first array exists in the second array and in case it doesn't look for the most similar address in array 2.
The array that has the "oficial" addresses is called directory and the array that I need to validate is called look_address.
The code goes as follows:
import pandas as pd
import numpy as np
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
from datetime import datetime,timedelta
import threading
import queue
class myThread(threading.Thread):
def __init__(self,threadID,name,q):
threading.Thread.__init__(self)
self.threadID = threadID
self.name=name
self.q = q
def run(self):
print(f"starting {self.name}")
process_data(self.name,self.q)
print(f"ending {self.name}")
locs = []
ratios={}
def process_data(threadName,q):
while not exitFlag:
queueLock.acquire()
if not workQueue.empty():
d = q.get()
queueLock.release()
d = d.strip()
if directory.isin([d]).any():
locs.append(d)
else:
pos = process.extract(d,directory.values,scorer=fuzz.ratio,limit=50)
ratios[d] = pos
else:
queueLock.release()
threadlist = ["T-1","T-2","T-3","T-4","T-5","T-6","T-7","T-8","T-9","T-10"]
nameList = look_address
queueLock = threading.Lock()
workQueue = queue.Queue(len(nameList)+1)
threads=[]
threadID=1
exitFlag=0
for name in threadlist:
thread = myThread(threadID,name,workQueue)
thread.start()
threads.append(thread)
threadID+=1
queueLock.acquire()
for addr in nameList:
workQueue.put(addr)
queueLock.release()
total_steps = len(workQueue.queue)
tot_sec = 0
t0 = datetime.now()
while not workQueue.empty():
total_seconds =(datetime.now()-t0).total_seconds()
if total_seconds == 0:
total_seconds = 1e-8
progress = 1-len(workQueue.queue)/total_steps
tot_sec+=total_seconds
print("\rProgreso: {pr:.2f}% || Buenas/Errores: {gb}/{bd}".format(
pr = progress*100,
its = 1/total_seconds,
elap = timedelta(seconds=np.round(tot_sec)),
gb=len(locs),
bd=len(errors),
eta = timedelta(seconds=np.round(total_seconds*(total_steps-len(workQueue.queue))))),end="",flush=True)
exitFlag = 1
for t in threads:
t.join()
print("\nExiting Main Thread")
Each request in process.extract takes around 25s (did a %timeit). Now, with the script above it doesn't seems to speed up the data processing. It has been running for like 2 hours and it has progressed by around 4.29%.
My two questions are:
Is the implementation of multithreading correct?
How can I speed up the data processing? Maybe run this on a VPS on amazon or google?
I want to understand why this is so slow and how I can speed things up.
EDIT: Changed from:
if not workQueue.empty():
d = q.get()
d = d.strip()
if directory.isin([d]).any():
locs.append(d)
else:
pos = process.extract(d,directory.values,scorer=fuzz.ratio,limit=50)
ratios[d] = pos
queueLock.release()
to:
if not workQueue.empty():
d = q.get()
queueLock.release()
d = d.strip()
if directory.isin([d]).any():
locs.append(d)
else:
pos = process.extract(d,directory.values,scorer=fuzz.ratio,limit=50)
ratios[d] = pos

why the thread runs only once?

This is my client:
from tkinter import *
import tkinter.simpledialog
import socket
import select
import ssl
import threading
Host = '127.0.0.1'
Port = 87
def create_connection():
return socket.create_connection((Host, Port))
def gui():
global e1
global txt
root = Tk()
root.title('Amifre chat')
root.geometry("700x515")
txt = Text(root, width=70, height=30)
txt.config(state=DISABLED)
e1 = Entry(root, width=93)
e1.place(x=0, y=487)
txt.place(x=0)
t = threading.Thread(target=display_msg())
t.daemon = True
root.after(1, t.start())
root.mainloop()
def display_msg():
r, w, x = select.select([client_socket], [], [], 0.00001)
if client_socket in r:
data = client_socket.recv().decode()
txt.config(state=NORMAL)
txt.insert(INSERT, data + '\n')
txt.config(state=DISABLED)
if __name__ == '__main__':
start = Tk()
b = Button(start, text='Click to join the chat', command=create_user_name).grid(row=0)
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
client_socket = create_connection()
client_socket = context.wrap_socket(client_socket, server_hostname='127.0.0.1')
start.mainloop()
gui()
This is a client for chat and the thread in the gui function call the display_msg function only once so does anyone have any idea why is it? (sending data works well and it dislplayed in client without GUI)
You should pass only the function name to target option, and call t.start() directly without using after():
t = threading.Thread(target=display_msg, daemon=True)
t.start()
Then, you need to use while loop inside display_msg() to keep receiving data from server:
def display_msg():
while True:
r, w, x = select.select([client_socket], [], [])
if client_socket in r:
data = client_socket.recv(1024).decode()
txt.config(state=NORMAL)
txt.insert(INSERT, data + '\n')
txt.config(state=DISABLED)
This code:
t = threading.Thread(target=display_msg())
is functionally identical to this code:
result = display_msg()
t = threading.Thread(result)
And this code:
root.after(1, t.start())
is functionally identical to this code:
result = t.start()
root.after(1, result)
In both threading.Thread and after, the values given to the functions must be references to a function rather than the result of a function (unless the result is itself a reference to a function).
t = threading.Thread(target=display_msg)
...
root.after(1, t.start)
Regardless, the answer to "why the thread runs only once?" is because that's what threads do. Threads run until they are done. If you need code to run in a loop, you will need to write the loop yourself.

Python multiprocessing update size of iterable

i have the following:
from multiprocessing import Pool
def process_elements(index_of_data_inputs):
<process>
if <condition>:
# i would like to change the size of data_inputs
if __name__ == '__main__':
pool = Pool() # Create a multiprocessing Pool
pool.map(process_elements, range(0, len(data_inputs)) # process data_inputs iterable with pool
how i can change the size of data_inputs and so change the number of times process_elements
is called?
the work behind that i would like to parallelize is:
i = 0
while i < len(elements):
new_elems = process_some_elements(x,y)
if len(new_elems) > 0:
elements = elements + new_elems
i += 1
Consider simple example of communication between processes with multiprocessing module in Python:
import multiprocessing
import queue
import random
def process_elements(num, comq):
val = random.random()
if val > 0.5:
comq.put(1)
return num, int(1000 * val)
if __name__ == '__main__':
# initial data
numbers = list(range(10))
# data structure fot communication between multiple processes
m = multiprocessing.Manager()
q = m.Queue()
with multiprocessing.Pool(processes=4) as pool:
# get answer for original data
ans = pool.starmap(process_elements, [(num, q) for num in numbers])
print(numbers)
print(ans)
# create additional data based on the answer for initial data
new_numbers = numbers[-1:]
try:
while True:
new_numbers.append(new_numbers[-1] + q.get_nowait())
except queue.Empty:
pass
# get answer for additional data
new_ans = pool.starmap(process_elements, [(num, q) for num in new_numbers[1:]])
print(new_numbers)
print(new_ans)

Python freezes when accessing string value in subprocess

I spent nearly the whole day with this and came to the end of my knowledge:
I want to change a shared multiprocessing.Value string in the subprocess, but python hangs as soon as the subprocess is trying to change the shared value.
Below an example code:
from multiprocessing import Process, Value, freeze_support
from ctypes import c_wchar_p
def test(x):
with x.get_lock():
x.value = 'THE TEST WORKED'
return
if __name__ == "__main__":
freeze_support()
value = Value(c_wchar_p, '')
p = Process(target=test, args = (value,))
p.start()
print(p.pid)
# this try block is to also allow p.run()
try:
p.join()
p.terminate()
except:
pass
print(value.value)
What I tried and does not work:
I tried ctypes c_wchar_p and c_char_p, but both result in the same freezing.
I tried also without x.get_lock()
I tried also without freeze_support()
What works (but does not help):
Using a float as the shared value (value = Value('d',0) and x.value = 1).
Running the Process without starting a subprocess (replace p.start() with p.run() )
I am using Windows 10 64 bit and Python 3.6.4 (Spyder, but also tried outside of Spyder).
Any help welcome!
A shared pointer won't work in another process because the pointer is only valid in the process in which it was created. Instead, use an array:
import multiprocessing as mp
def test(x):
x.value = b'Test worked!'
if __name__ == "__main__":
x = mp.Array('c',15)
p = mp.Process(target=test, args = (x,))
p.start()
p.join()
print(x.value)
Output:
b'Test worked!'
Note that array type 'c' is specialized and returns a SynchronizedString vs. other types that return SynchronizedArray. Here's how to use type 'u' for example:
import multiprocessing as mp
from ctypes import *
def test(x):
x.get_obj().value = 'Test worked!'
if __name__ == "__main__":
x = mp.Array('u',15)
p = mp.Process(target=test, args = (x,))
p.start()
p.join()
print(x.get_obj().value)
Output:
Test worked!
Note that operations on the wrapped value that are non-atomic such as += that do read/modify/write should be protected with a with x.get_lock(): context manager.

Resources