read-only numpy array in threading python - multithreading

I have multiple threads that uses numpy array.
import threading
import numpy as np
import time
shared_array = np.ones((5, 5))
def run(shared_array, nb_iters):
k = shared_array**2
for i in range(nb_iters):
k+=2
def multi_thread():
jobs = []
for _ in range(5):
thread = threading.Thread(target=run, args=(shared_array, 1000000))
jobs.append(thread)
for j in jobs:
j.start()
for j in jobs:
j.join()
t0 = time.time()
multi_thread()
print(time.time() - t0)
#result: 6.502177000045776
t0 = time.time()
# we used 1000000 iterations for each thread => total nb of iterations = 5 * 1000000
run(shared_array, 1000000 * 5)
print(time.time() - t0)
#result: 6.6372435092926025
the problem is after adding the numpy array as an argument, the execution time of 5 parallel threads is equal to a sequential execution!
so I want to know how to make a program (similar to this one) parallel,

That's a poor example. Python has an internal lock (the global interpreter lock, GIL) that means only one thread at a time can be executing Python code. When you go into numpy, that can run in parallel, but because your array is so small, you are spending almost no time in numpy, so you aren't getting any parallelism to speak of.

Related

Multiprocessing pool map for a BIG array computation go very slow than expected

I've experienced some difficulties when using multiprocessing Pool in python3. I want to do BIG array calculation by using pool.map. Basically, I've a 3D array which I need to do computation for 10 times and it generates 10 output files sequentially. This task can be done 3 times i,e, in the output we get 3*10=30 output files(*.txt). To do this, I've prepared the following script for small array calculation (a sample problem). However, when I use this script for a BIG array calculation or array come out from a series of files, then this piece of code (maybe pool) capture the memory, and it does not save any .txt file at the destination directory. There is no error message when I run the file with command mpirun python3 sample_prob_func.py
Can anybody suggest what is the problem in the sample script and how to write code to get rid of stuck? I've not received any error message, but don't know where the problem occurs. Any help is appreciated. Thanks!
import numpy as np
import multiprocessing as mp
from scipy import signal
import matplotlib.pyplot as plt
import contextlib
import os, glob, re
import random
import cmath, math
import time
import pdb
#File Storing path
save_results_to = 'File saving path'
arr_x = [0, 8.49, 0.0, -8.49, -12.0, -8.49, -0.0, 8.49, 12.0]
arr_y = [0, 8.49, 12.0, 8.49, 0.0, -8.49, -12.0, -8.49, -0.0]
N=len(arr_x)
np.random.seed(12345)
total_rows = 5000
arr = np.reshape(np.random.rand(total_rows*N),(total_rows, N))
arr1 = np.reshape(np.random.rand(total_rows*N),(total_rows, N))
arr2 = np.reshape(np.random.rand(total_rows*N),(total_rows, N))
# Finding cross spectral density (CSD)
def my_func1(data):
# Do something here
return array1
t0 = time.time()
my_data1 = my_func1(arr)
my_data2 = my_func1(arr1)
my_data3 = my_func1(arr2)
print('Time required {} seconds to execute CSD--For loop'.format(time.time()-t0))
mydata_list = [my_data1,my_data3,my_data3]
def my_func2(data2):
# Do something here
return from_data2
start_freq = 100
stop_freq = 110
freq_range= np.around(np.linspace(start_freq,stop_freq,11)/10, decimals=2)
no_of_freq = len(freq_range)
list_arr =[]
def my_func3(csd):
list_csd=[]
for fr_count in range(start_freq, stop_freq):
csd_single = csd[:,:, fr_count]
list_csd.append(csd_single)
print('Shape of list is :', np.array(list_csd).shape)
return list_csd
def parallel_function(BIG_list_data):
with contextlib.closing(mp.Pool(processes=10)) as pool:
dft= pool.map(my_func2, BIG_list_data)
pool.close()
pool.join()
data_arr = np.array(dft)
print('shape of data :', data_arr.shape)
return data_arr
count_day = 1
count_hour =0
for count in range(3):
count_hour +=1
list_arr = my_func3(mydata_list[count]) # Load Numpy files
print('Array shape is :', np.array(arr).shape)
t0 = time.time()
data_dft = parallel_function(list_arr)
print('The hour number={} data is processing... '.format(count_hour))
print('Time in parallel:', time.time() - t0)
for i in range(no_of_freq-1): # (11-1=10)
jj = freq_range[i]
#print('The hour_number {} and frequency number {} data is processing... '.format(count_hour, jj))
dft_1hr_complx = data_dft[i,:,:]
np.savetxt(save_results_to + f'csd_Day_{count_day}_Hour_{count_hour}_f_{jj}_hz.txt', dft_1hr_complx.view(float))
As #JérômeRichard suggested,to aware your job scheduler you need to define the number of processors will engage to perform this task. So, the following command could help you: ncpus = int(os.getenv('SLURM_CPUS_PER_TASK', 1))
You need to use this line inside your python script. Also, inside the parallel_function use with contextlib.closing(mp.Pool(ncpus=10)) as pool: instead of with contextlib.closing(mp.Pool(processes=10)) as pool:. Thanks

Timing paralelized numpy calculation

I have a simple pi-approximating script like so:
import numpy as np
import matplotlib.pyplot as plt
import time
start = 10
stop = 1000000
step = 100
exactsolution = np.pi
def montecarlopi(N=1000000):
random_x = np.random.random(size = N)
random_y = np.random.random(size = N)
bod = np.array([random_x, random_y]).T
square_area = N
quarter_circle_area = np.count_nonzero(np.linalg.norm(bod, axis = 1)<=1)
pi_approx = 4*quarter_circle_area/square_area
return pi_approx
if __name__ == '__main__':
times = []
results = []
attemps = np.arange(start = start, stop = stop, step = step)
for i in attemps:
start_time = time.time()
results.append(montecarlopi(i))
times.append(time.time()-start_time)
absolute_errors = np.abs(np.array(results)-exactsolution)
and I want to know how long the calculation takes based on the number of random attemps I use. As you can see I use a for loop to get each of the calculation times I need, but this defeats the purpose of Numpy, slowing down my code a lot. Effectively I'd like to just call montecarlopi() on the whole attemps array, but then I wouldn't have the calculation times.
Is there a way to time each paralelized calculation numpy does?
I used the timing code from the answer provided here:
https://codereview.stackexchange.com/questions/165245/plot-timings-for-a-range-of-inputs
I only had to change labels to codecs in the line:
empty_multi_index = pd.MultiIndex(levels=[[], []], codes=[[], []], names=['func', 'result'])
Timing linear
Then you can run your whole timing experiment using
timings.plot_times([montecarlopi], inputs=np.arange(10, 1000000, 1000), repeats=3)
And get an output like this
Timing Logspace
Or more clear using logspacing
timings.plot_times([montecarlopi], inputs=np.logspace(1, 8, 8, dtype=np.int), repeats=3)

Python Multiprocessing Scheduling

In Python 3.6, I am running multiple processes in parallel, where each process pings a URL and returns a Pandas dataframe. I want to keep running the (2+) processes continually, I have created a minimal representative example as below.
My questions are:
1) My understanding is that since I have different functions, I cannot use Pool.map_async() and its variants. Is that right? The only examples of these I have seen were repeating the same function, like on this answer.
2) What is the best practice to make this setup to run perpetually? In my code below, I use a while loop, which I suspect is not suited for this purpose.
3) Is the way I am using the Process and Manager optimal? I use multiprocessing.Manager.dict() as the shared dictionary to return the results form the processes. I saw in a comment on this answer that using a Queue here would make sense, however the Queue object has no `.dict()' method. So, I am not sure how that would work.
I would be grateful for any improvements and suggestions with example code.
import numpy as np
import pandas as pd
import multiprocessing
import time
def worker1(name, t , seed, return_dict):
'''worker function'''
print(str(name) + 'is here.')
time.sleep(t)
np.random.seed(seed)
df= pd.DataFrame(np.random.randint(0,1000,8).reshape(2,4), columns=list('ABCD'))
return_dict[name] = [df.columns.tolist()] + df.values.tolist()
def worker2(name, t, seed, return_dict):
'''worker function'''
print(str(name) + 'is here.')
np.random.seed(seed)
time.sleep(t)
df = pd.DataFrame(np.random.randint(0, 1000, 12).reshape(3, 4), columns=list('ABCD'))
return_dict[name] = [df.columns.tolist()] + df.values.tolist()
if __name__ == '__main__':
t=1
while True:
start_time = time.time()
manager = multiprocessing.Manager()
parallel_dict = manager.dict()
seed=np.random.randint(0,1000,1) # send seed to worker to return a diff df
jobs = []
p1 = multiprocessing.Process(target=worker1, args=('name1', t, seed, parallel_dict))
p2 = multiprocessing.Process(target=worker2, args=('name2', t, seed+1, parallel_dict))
jobs.append(p1)
jobs.append(p2)
p1.start()
p2.start()
for proc in jobs:
proc.join()
parallel_end_time = time.time() - start_time
#print(parallel_dict)
df1= pd.DataFrame(parallel_dict['name1'][1:],columns=parallel_dict['name1'][0])
df2 = pd.DataFrame(parallel_dict['name2'][1:], columns=parallel_dict['name2'][0])
merged_df = pd.concat([df1,df2], axis=0)
print(merged_df)
Answer 1 (map on multiple functions)
You're technically right.
With map, map_async and other variations, you should use a single function.
But this constraint can be bypassed by implementing an executor, and passing the function to execute as part of the parameters:
def dispatcher(args):
return args[0](*args[1:])
So a minimum working example:
import multiprocessing as mp
def function_1(v):
print("hi %s"%v)
return 1
def function_2(v):
print("by %s"%v)
return 2
def dispatcher(args):
return args[0](*args[1:])
with mp.Pool(2) as p:
tasks = [
(function_1, "A"),
(function_2, "B")
]
r = p.map_async(dispatcher, tasks)
r.wait()
results = r.get()
Answer 2 (Scheduling)
I would remove the while from the script and schedule a cron job (on GNU/Linux) (on windows) so that the OS will be responsible for it's execution.
On Linux you can run cronotab -e and add the following line to make the script run every 5 minutes.
*/5 * * * * python /path/to/script.py
Answer 3 (Shared Dictionary)
yes but no.
To my knowledge using the Manager for data such as collections is the best way.
For Arrays or primitive types (int, floats, ecc) exists Value and Array which are faster.
As in the documentation
A manager object returned by Manager() controls a server process which holds > Python objects and allows other processes to manipulate them using proxies.
A manager returned by Manager() will support types list, dict, Namespace, Lock, > RLock, Semaphore, BoundedSemaphore, Condition, Event, Barrier, Queue, Value and > Array.
Server process managers are more flexible than using shared memory objects because they can be made to support arbitrary object types. Also, a single manager can be shared by processes on different computers over a network. They are, however, slower than using shared memory.
But you have only to return a Dataframe, so the shared dictionary it's not needed.
Cleaned Code
Using all the previous ideas the code can be rewritten as:
map version
import numpy as np
import pandas as pd
from time import sleep
import multiprocessing as mp
def worker1(t , seed):
print('worker1 is here.')
sleep(t)
np.random.seed(seed)
return pd.DataFrame(np.random.randint(0,1000,8).reshape(2,4), columns=list('ABCD'))
def worker2(t , seed):
print('worker2 is here.')
sleep(t)
np.random.seed(seed)
return pd.DataFrame(np.random.randint(0, 1000, 12).reshape(3, 4), columns=list('ABCD'))
def dispatcher(args):
return args[0](*args[1:])
def task_generator(sleep_time=1):
seed = np.random.randint(0,1000,1)
yield worker1, sleep_time, seed
yield worker2, sleep_time, seed + 1
with mp.Pool(2) as p:
results = p.map(dispatcher, task_generator())
merged = pd.concat(results, axis=0)
print(merged)
If the process of concatenation of the Dataframe is the bottleneck, An approach with imap might become optimal.
imap version
with mp.Pool(2) as p:
merged = pd.DataFrame()
for result in p.imap_unordered(dispatcher, task_generator()):
merged = pd.concat([merged,result], axis=0)
print(merged)
The main difference is that in the map case, the program first wait for all the process tasks to end, and then concatenate all the Dataframes.
While in the imap_unoredered case, As soon as a task as ended, the Dataframe is concatenated ot the current results.

Product of tuples in generator

I have a generator that yields millions of tuples (~100 Millions) and I need the product (np.prod) of each tuple to then sum them up together.
I have the following example code that works fine for a reasonable number of tuples in the generator, but which takes a lot of time when the number is getting high. I am working on a instance with 64 cores and ~160GB of RAM and I am looking for a way to optimize my code if possible.
import random
import numpy as np
import multiprocessing as mp
import time
nprocs = mp.cpu_count()
pool = mp.Pool(processes=nprocs)
x = 1000000
mygen = ((random.randint(0, 100)/100, random.randint(0, 100)/100 ) for k in range(x))
start = time.time()
proba_all = sum(pool.map(np.prod, mygen))
print(proba_all)
end = time.time()
print (end-start)

Multiprocessing on image in python 3 does not give enough performance

I'm reading video File from opencv and store their frames in a list, Then I provide this list to face detection function which in turn store the face location in another list, the problem is that when I give an equal number of frame to multiprocessing code and single processing code, the performance is not very different. please check my code, suggest the possible solution. I am using python 3.5, the number of CPU core is 4. Multiprocessing code is supposed to give almost 4 times performance but it only gives few second gains.
My code:
import cv2,time,dlib,imutils
from multiprocessing import Pool
detector = dlib.get_frontal_face_detector()
vidcap=cv2.VideoCapture(r'/home/deeplearning/PycharmProjects
/sjtech/jurassic_park_intro.mp4')
count = 0
frame_list = []
def parallel_detection(f):
return detector(f,1)
success,image = vidcap.read()
while success:
print('Read a new frame: ', success)
frame_list.append(image)
count += 1
success,image = vidcap.read()
del frame_list[-1]
print("out of while")
p = Pool()
t1 = time.time()
#below is my multiprocessing code, on 40 frames it takes 42 seconds
face_location=p.map(parallel_detection,frame_list[900:940])
#below is single processing code, it takes 50 seconds
face_location=[detector(frame_list[x],1) for x in range(900,940)]
print(time.time()-t1)

Resources