Make multiprocessing Pool to use free cores if available - python-3.x

I have the following piece of code that uses a Pool of workes to perform some operations.
def my_func( args ):
low_index = args[0][0]
up_index = args[0][1]
params = args[1][0]
A = args[1][1]
B = args[1][2]
print( "PID:", mp.current_process() )
for k in range( low_index, up_index ):
a = params[k]
# what if np.dot uses multi-threading?
A = a*A + ( np.dot( A , B ) )*( np.dot( B, B ) )
B = a*B + ( np.dot( B , A ) )*( np.dot( A, A ) )
return A,B
if __name__ == '__main__':
ts = time()
import numpy as np
params = np.linspace( 1, 10, 1000 )
n_dim = 1000
# the arrays A,B get modified with each call to the worker
A = np.random.rand( n_dim, n_dim )
B = np.random.rand( n_dim, n_dim )
C = np.random.rand( 5*n_dim, 5*n_dim )
D = np.random.rand( 5*n_dim, 5*n_dim )
ncpus = psutil.cpu_count( logical=False )
number_processes = ncpus - 1
total_items = params.shape[0]
n_chunck = int( ( total_items )/number_processes )
intervals = [ [ k*n_chunck, (k+1)*n_chunck ] for k in range( number_processes ) ]
intervals[ -1 ][ -1 ] = total_items
from itertools import repeat
objs_ = list( repeat( ( params,
copy.deepcopy( A ),
copy.deepcopy( B ),
) , number_processes - 1 ) )
objs_.append( ( params,
copy.deepcopy( C ),
copy.deepcopy( D ),
) )
args_l = []
for k in range( number_processes ):
args_l.append( [ intervals[k] , objs_[k] ] )
pool = mp.Pool( processes = ncpus )
results = pool.map( my_func, args_l )
pool.close()
pool.join()
print( time() - ts )
The last process (involving the C and D arrays) will take considerably longer than the rest; therefore, I want that once the other processes are done, the remaining process (the one with bigger dimensions of the array) can make efficient use of all available free cores. However, I am observing that the CPU usage stays around 20% (In my machine I use 5 cores out of 6) for the last process, therefore being highly inefficient in the remaining operations. Is there a good way to fix that?

Related

python-3 parallelizing my functions issue

I am using python-3.x and I would like to speed my code by parallelizing my functions using the multiprocessing, I applied the multiprocessing but for some reason, it didn't work probably, I am not sure where is the problem?
so the following is a small example of what I did.
Any suggestions are appreciated
import numpy as np
import math
import multiprocessing as mp
lower_bound = -500
upper_bound =500
dimension =1000
Base_Value = 10
Popula_size = 3000
MinResolution = 8
population_in = np.random.choice ( np.linspace ( lower_bound , upper_bound , Base_Value ** MinResolution ) , size = ( Popula_size , dimension ) , replace = True )
resolution = np.random.randint(1, 8, size = (1, dimension))
def Discretiz(lower_bound, upper_bound, DiscPopulation, resolution):
pop_size = int(len(DiscPopulation))
the_new_population = np.zeros ((pop_size, dimension))
for i in range (pop_size) :
for ii in range (dimension):
decimal = int(np.round((DiscPopulation[i][ii] - lower_bound) / ((upper_bound-lower_bound)/(math.pow(Base_Value,resolution[:,ii])-1))))
the_new_population[i, ii] = (lower_bound + decimal * ((upper_bound-lower_bound)/(math.pow(Base_Value,resolution[:,ii])-1)))
return the_new_population
# without_parallelizing
# the_new_population = Discretiz(lower_bound, upper_bound, population_in, resolution)
# wit_parallelizing
pool = mp.Pool(mp.cpu_count())
the_new_population = [pool.apply(Discretiz, args=(lower_bound, upper_bound, population_in, resolution))]
print (the_new_population)
With:
population_in = np.random.choice ( np.linspace ( lower_bound , upper_bound , Base_Value ** MinResolution ) , size = ( Popula_size , dimension ) , replace = True )
you make a 2d array (Popula_size, dimension) shape. This is passed as DiscPopulation.
resolution = np.random.randint(1, 8, size = (1, dimension))
The double iteration function can be replaced with one that operates on whole arrays without the slow iteration:
def Discretiz(lower_bound, upper_bound, DiscPopulation, resolution):
pop_size = DiscPopulation[0] # no need for the 'int'
num = DiscPopulation - lower_bound
divisor = (upper_bound-lower_bound)/(Base_value**resolution-1)
decimal = num/divisor
# this divide does (pop,dimension)/(1,dimension); ok by broadcasting)
decimal = np.round(decimal) # no need for int
the_new_population = lower_bound + decimal * divisor
return the_new_population
I wrote this in-place here. It is syntactically correct, but I have not tried to run it.
I fixed the code now but still not faster than the old one it takes more time not sure why?
without parallelizing: 25.831339597702026 seconds
with parallelizing: 44.12706518173218 seconds ???!!!
import numpy as np
import math
import multiprocessing as mp
import time
from multiprocessing import Process, Value, Array, Manager, Pool, cpu_count
import time
lower_bound = -500
upper_bound =500
dimension =1000
Base_Value = 10
Popula_size = 2000
MinResolution = 8
population_in = np.random.choice ( np.linspace ( lower_bound , upper_bound , Base_Value ** MinResolution ) , size = ( Popula_size , dimension ) , replace = True )
resolution = np.random.randint(1, 8, size = (1, dimension))
start_time = time.time()
def Discretiz1(DiscPopulation, resolution):
# def Discretiz1(DiscPopulation):
DiscPopulation = np.reshape(DiscPopulation, (Popula_size, dimension))
resolution = np.reshape(resolution, (1,dimension))
the_new_population = np.zeros ((Popula_size, dimension))
for i in range (Popula_size) :
for ii in range (dimension):
decimal = int(np.round((DiscPopulation[i][ii] - lower_bound) / ((upper_bound-lower_bound)/(math.pow(Base_Value,resolution[:,ii])-1))))
the_new_population[i, ii] = (lower_bound + decimal * ((upper_bound-lower_bound)/(math.pow(Base_Value,resolution[:,ii])-1)))
# print(the_new_population)
if __name__ == '__main__':
num_cores = cpu_count()
Pool(processes=num_cores)
population_in = np.reshape(population_in, (1,Popula_size * dimension))[0]
resolution = np.reshape(resolution, (1,dimension))[0]
arr1 = Array('d', population_in)
arr2 = Array('i', resolution)
start_time = time.time()
p = Process(target=Discretiz1, args=(arr1, arr2))
p.start()
p.join()
print('--- %s seconds ---'%(time.time() - start_time))
print("--- %s seconds ---3" % (time.time() - start_time))
this is the olde one or without parallelizing:
import numpy as np
import math
import multiprocessing as mp
import time
from multiprocessing import Process, Value, Array, Manager, Pool, cpu_count
import time
lower_bound = -500
upper_bound =500
dimension =1000
Base_Value = 10
Popula_size = 2000
MinResolution = 8
population_in = np.random.choice ( np.linspace ( lower_bound , upper_bound , Base_Value ** MinResolution ) , size = ( Popula_size , dimension ) , replace = True )
resolution = np.random.randint(1, 8, size = (1, dimension))
start_time = time.time()
def Discretiz(lower_bound, upper_bound, DiscPopulation, resolution):
pop_size = int(len(DiscPopulation))
the_new_population = np.zeros ((pop_size, dimension))
for i in range (pop_size) :
for ii in range (dimension):
decimal = int(np.round((DiscPopulation[i][ii] - lower_bound) / ((upper_bound-lower_bound)/(math.pow(Base_Value,resolution[:,ii])-1))))
the_new_population[i, ii] = (lower_bound + decimal * ((upper_bound-lower_bound)/(math.pow(Base_Value,resolution[:,ii])-1)))
return the_new_population
# without_parallelizing
the_new_population = Discretiz(lower_bound, upper_bound, population_in, resolution)
print("--- %s seconds ---" % (time.time() - start_time))

Plotting solution 2nd ODE using Euler

I have used the Equation of Motion (Newtons Law) for a simple spring and mass scenario incorporating it into the given 2nd ODE equation y" + (k/m)x = 0; y(0) = 3; y'(0) = 0.
Using the Euler method and the exact solution to solve the problem, I have been able to run and receive some ok results. However, when I execute a plot of the results I get this diagonal line across the oscillating results that I am after.
Current plot output with diagonal line
Can anyone help point out what is causing this issue, and how I can fix it please?
MY CODE:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from sympy import Function, dsolve, Eq, Derivative, sin, cos, symbols
from sympy.abc import x, i
import math
# Given is y" + (k/m)x = 0; y(0) = 3; y'(0) = 0
# Parameters
h = 0.01; #Step Size
t = 50.0; #Time(sec)
k = 1; #Spring Stiffness
m = 1; #Mass
x0 = 3;
v0 = 0;
# Exact Analytical Solution
x_exact = x0*cos(math.sqrt(k/m)*t);
v_exact = -x0*math.sqrt(k/m)*sin(math.sqrt(k/m)*t);
# Eulers Method
x = np.zeros( int( t/h ) );
v = np.zeros( int( t/h ) );
x[1] = x0;
v[1] = v0;
x_exact = np.zeros( int( t/h ) );
v_exact = np.zeros( int( t/h ) );
te = np.zeros( int( t/h ) );
x_exact[1] = x0;
v_exact[1] = v0;
#print(len(x));
for i in range(1, int(t/h) - 1): #MAIN LOOP
x[i+1] = x[i] + h*v[i];
v[i+1] = v[i] - h*k/m*x[i];
te[i] = i * h
x_exact[i] = x0*cos(math.sqrt(k/m)* te[i]);
v_exact[i] = -x0*math.sqrt(k/m)*sin(math.sqrt(k/m)* te[i]);
# print(x_exact[i], '\t'*2, x[i]);
#plot
%config InlineBackend.figure_format = 'svg'
plt.plot(te, x_exact, te ,v_exact)
plt.title("DISPLACEMENT")
plt.xlabel("Time (s)")
plt.ylabel("Displacement (m)")
plt.grid(linewidth=0.3)
An in some details more direct computation is
te = np.arange(0,t,h)
N = len(te)
w = (k/m)**0.5
x_exact = x0*np.cos(w*te);
v_exact = -x0*w*np.sin(w*te);
plt.plot(te, x_exact, te ,v_exact)
resulting in
Note that arrays in python start at the index zero,
x = np.empty(N)
v = np.empty(N)
x[0] = x0;
v[0] = v0;
for i in range(N - 1): #MAIN LOOP
x[i+1] = x[i] + h*v[i];
v[i+1] = v[i] - h*k/m*x[i];
plt.plot(te, x, te ,v)
then gives the plot
with the expected increasing amplitude.

Fitting a function f(x,y,z) with a quadratic polynomial

I'm trying to fit a function f(x,y,z) with the following quadratic polynomial:
3d polynomial
Some distorted spherical surface in three dimensions. The problem is related to the calculation of effective masses in solid state physics.
Here is a picture of the data to show that it indeed falls off parabolically in all directions, even though the curvature in the z-direction is rather low:
3d parabolas
I'm interested in the coefficients, which correspond to effective masses. I've got an array of xyz coordinates, which is regular and centered on the maximum:
[[ 0. 0. 0. ]
[ 0. 0. 0.01282017]
[ 0. 0. 0.02564034]
...
[-0.05026321 -0.05026321 -0.03846052]
[-0.05026321 -0.05026321 -0.02564034]
[-0.05026321 -0.05026321 -0.01282017]]
And a corresponding 1D array of scalar values, one for each point. The number of data points around this maximum can range from 100 to 1000.
This is the code I'm currently trying to use for fitting:
def func(data, mxx, mxy, mxz, myy, myz, mzz):
x = data[:, 0]
y = data[:, 1]
z = data[:, 2]
return (
(1 / (2 * mxx)) * (x ** 2)
+ (1 / (1 * mxy)) * (x * y)
+ (1 / (1 * mxz)) * (x * z)
+ (1 / (2 * myy)) * (y ** 2)
+ (1 / (1 * myz)) * (y * z)
+ (1 / (2 * mzz)) * (z ** 2)
) + f(0, 0, 0)
energy = data[:, 3]
guess = (mxx, mxy, mxz, myy, myz, mzz)
params, pcov = scipy.optimize.curve_fit(
func, data, energy, p0=guess, method="trf"
)
Where f(0,0,0) is the value of the function at (0, 0, 0), which I retrieve with the scipy.interpolate.griddata function.
For this problem, the masses should be negative and have values between -0.2 and -2, roughly speaking. I'm creating guess values through a finite difference differentiation.
However, I don't get any senseful results from scipy.interpolate.curve_fit - typically the coefficients end up with huge numbers (like 1e9). I'm completly lost at this point.
What am I doing wrong :( ?
One of the problems is that you fit 1/m. While this is correct from a physics point of view, it is bad from the algorithm point of view. If the fitting algorithm needs to change sign for values of m near zero, the coefficients diverge. Consequently, it is better to fit mI = 1/m and make the according error progressions later. Here I use leastsqwhich requires some additional calculations for the covariance matrix (as it returns the reduced form). I do the fit with g() and the inverse masses, but you can immediately reproduce your problems when introducing f() and directly fitting the ms.
A second point is that the data has an offset, i.e. if x = y = z = 0 the data is v= -0.0195 This needs to be introduced into the model.
Finally, I'd say that you already have non-parabolic behaviour in your data.
Nevertheless, here is how it looks like:
import matplotlib.pyplot as plt
import numpy as np
np.set_printoptions(linewidth=300)
from scipy.optimize import leastsq
from scipy.optimize import curve_fit
data = np.loadtxt( "silicon.csv", delimiter=',' )
def f( x, y, z, mxx, mxy, mxz, myy, myz, mzz, offI ):
out = 1./(2 * mxx) * x * x
out += 1./( mxy ) * x * y
out += 1./( mxz ) * x * z
out += 1./( 2 * myy ) * y * y
out += 1./( myz ) * y * z
out += 1./( 2 * mzz ) * z * z
out += 1./offI
return out
def g( x, y, z, mxxI, mxyI, mxzI, myyI, myzI, mzzI, off ):
out = mxxI / 2 * x * x
out += mxyI * x * y
out += mxzI * x * z
out += myyI / 2 * y * y
out += myzI * y * z
out += mzzI / 2 * z * z
out += off
return out
def residuals( params, indata ):
out = list()
for x, y, z, v in indata:
out.append( v - g( x,y, z, *params ) )
return out
sol, cov, info, msg, ier = leastsq( residuals, 7*[0], args=( data, ), full_output=True)
s_sq = sum( [x**2 for x in residuals( sol, data) ] )/ (len( data ) - len( sol ) )
print "solution"
print sol
masses = [1/x for x in sol]
print "masses:"
print masses
print "covariance matrix:"
covMX = cov * s_sq
print covMX
print "sum of residuals"
print sum( residuals( sol, data) )
### plotting the cuts
fig = plt.figure('cuts')
ax = dict()
for i in range( 1, 10 ):
ax[i] = fig.add_subplot( 3, 3, i )
dl = np.linspace( -.2, .2, 25)
#### xx
xdata = [ [ x, v ] for x,y,z,v in data if ( abs(y)<1e-3 and abs(z) < 1e-3 ) ]
vl = np.fromiter( ( f( x, 0, 0, *masses ) for x in dl ), np.float )
ax[1].plot( *zip(*sorted( xdata ) ), ls='', marker='o')
ax[1].plot( dl, vl )
#### xy
xydata = [ [ x, v ] for x, y, z, v in data if ( abs( x - y )<1e-2 and abs(z) < 1e-3 ) ]
vl = np.fromiter( ( f( xy, xy, 0, *masses ) for xy in dl ), np.float )
ax[2].plot( *zip(*sorted( xydata ) ), ls='', marker='o')
ax[2].plot( dl, vl )
#### xz
xzdata = [ [ x, v ] for x, y, z, v in data if ( abs( x - z )<1e-2 and abs(y) < 1e-3 ) ]
vl = np.fromiter( ( f( xz, 0, xz, *masses ) for xz in dl ), np.float )
ax[3].plot( *zip(*sorted( xzdata ) ), ls='', marker='o')
ax[3].plot( dl, vl )
#### yy
ydata = [ [ y, v ] for x, y, z, v in data if ( abs(x)<1e-3 and abs(z) < 1e-3 ) ]
vl = np.fromiter( ( f( 0, y, 0, *masses ) for y in dl ), np.float )
ax[5].plot( *zip(*sorted( ydata ) ), ls='', marker='o' )
ax[5].plot( dl, vl )
#### yz
yzdata = [ [ y, v ] for x, y, z, v in data if ( abs( y - z )<1e-2 and abs(x) < 1e-3 ) ]
vl = np.fromiter( ( f( 0, yz, yz, *masses ) for yz in dl ), np.float )
ax[6].plot( *zip(*sorted( yzdata ) ), ls='', marker='o')
ax[6].plot( dl, vl )
#### zz
zdata = [ [ z, v ] for x, y, z, v in data if ( abs(x)<1e-3 and abs(y) < 1e-3 ) ]
vl = np.fromiter( ( f( 0, 0, z, *masses ) for z in dl ), np.float )
ax[9].plot( *zip(*sorted( zdata ) ), ls='', marker='o' )
ax[9].plot( dl, vl )
#### some diag
ddata = [ [ z, v ] for x, y, z, v in data if ( abs(x - y)<1e-3 and abs(x - z) < 1e-3 ) ]
vl = np.fromiter( ( f( d, d, d, *masses ) for d in dl ), np.float )
ax[7].plot( *zip(*sorted( ddata ) ), ls='', marker='o' )
ax[7].plot( dl, vl )
#### some other diag
ddata = [ [ z, v ] for x, y, z, v in data if ( abs(x - y)<1e-3 and abs(x + z) < 1e-3 ) ]
vl = np.fromiter( ( f( d, d, -d, *masses ) for d in dl ), np.float )
ax[8].plot( *zip(*sorted( ddata ) ), ls='', marker='o' )
ax[8].plot( dl, vl )
plt.show()
This gives the following output:
solution
[-1.46528595 0.25090717 0.25090717 -1.46528595 0.25090717 -1.46528595 -0.01993436]
masses:
[-0.6824606499739905, 3.985537743156507, 3.9855376943660676, -0.6824606473928339, 3.9855377322848344, -0.6824606467055248, -50.16463861555409]
covariance matrix:
[
[ 4.76417852e-03 -1.46907683e-12 -8.57639600e-12 -2.21281938e-12 -2.38444957e-12 8.42981521e-12 -2.70034183e-05]
[-1.46907683e-12 9.17104397e-04 -7.10573582e-13 1.32125214e-11 7.44553140e-12 1.29909935e-11 -1.11259046e-13]
[-8.57639600e-12 -7.10573582e-13 9.17104389e-04 -8.60004172e-12 -6.14797647e-12 8.27070243e-12 3.11127064e-14]
[-2.21281914e-12 1.32125214e-11 -8.60004172e-12 4.76417860e-03 -4.20477032e-12 9.20893224e-12 -2.70034186e-05]
[-2.38444957e-12 7.44553140e-12 -6.14797647e-12 -4.20477032e-12 9.17104395e-04 1.50963408e-11 -7.28889534e-14]
[ 8.42981530e-12 1.29909935e-11 8.27070243e-12 9.20893175e-12 1.50963408e-11 4.76417849e-03 -2.70034182e-05]
[-2.70034183e-05 -1.11259046e-13 3.11127064e-14 -2.70034186e-05 -7.28889534e-14 -2.70034182e-05 5.77019926e-07]
]
sum of residuals
4.352727352163743e-09
...and here some 1d cuts that show some significant deviation from parabolic behaviour if one is not on one of the main axes.

Multi thread in Python search engine crawler causing a hang

I'm trying to teach myself programming and I've run into a wall with multithreads, I'm using it try and speed up my google crawler, can someone point me in the right direction?
# Requires a search string and page numbers to scan
def google(search_string, start):
temp = []
url = 'http://www.google.com/search'
payload = { 'q' : search_string, 'start' : start }
my_headers = { 'User-agent' : 'Mozilla/11.0' }
r = requests.get( url, params = payload, headers = my_headers )
r.text.encode('utf-8')
soup = BeautifulSoup( r.text, 'html.parser' )
h3tags = soup.find_all( 'h3', class_='r' )
# Prints and writes output of scrapped URLs
with open(str("test.txt"), "w") as out_15:
for h3 in h3tags:
try:
print( re.search('url\?q=(.+?)\&sa', h3.a['href']).group(1) )
temp.append( re.search('url\?q=(.+?)\&sa', h3.a['href']).group(1) )
except:
continue
return temp
and
def main():
start = timer()
result = []
search = input ("Please enter Dork String(<dork - no include inurl:> <extra-terms>):")
pages = int(input ("how many URL's would you like?:"))
pages = pages / 10
processes = int(input ("How many threads (<= 8):"))
make_request = partial( google, search )
pagelist = [ str(x*10) for x in range( 0, int(pages) ) ]
#Multithreads ??
with Pool(processes) as p:
tmp = p.map(make_request, pagelist)
for x in tmp:
result.extend(x)
result = list( set( result ) )
print ( *result, sep = '/n' )
#stats
print ( '\nTotal URLs Scraped : %s ' % str( len( result ) ) )
print ( 'Script Execution Time : %s ' % ( timer() - start, ) )

How can I speed up my python program?

I was trying to make a program in Python 3.4.1 to obtain the prime numbers from 2 to 100,000.
My problem is that it takes too much time to process all the information and it never give me any result.
I had left it for around half an hour, it slows me all the computer and it doesn't give me what I want.
I am using the Eratosthenes' Sieve algorithm "Criba de Eratostenes".
Here is my code:
from math import *
def primos(num):
num2 = num + 1
tnumeros = [] # tnumeros = every number from 2 to num
npnumeros= [] # npnumeros = every number that is no prime
pnumeros = [] # pnumeros = every prime number
for a in range( 2, num2 ):
tnumeros.append( a )
for i in range( 2, int( sqrt( num ) ) + 1 ):
for j in range( i, int( num / i ) + 1 ):
np = i * j
npnumeros.append( np )
npnumeros = list( set( npnumeros ) )
for e in tnumeros:
if ( e in npnumeros ):
continue
else:
pnumeros.append( e )
return ( str( "".join( str( pnumeros ) ) ) )
print( primos( 100000 ) )
Don't use a list for your npnumeros value; use a set instead. You're only interested in looking up whether a number is in that collection, so make it a set from the start:
npnumeros = set()
# ...
for i in range( 2, int( sqrt( num ) ) + 1 ):
for j in range( i, int( num / i ) + 1 ):
np = i * j
npnumeros.add( np )
# npnumeros = list( set( npnumeros ) ) # Remove this line, it's no longer needed
for e in tnumeros:
if ( e in npnumeros ):
continue
else:
pnumeros.append( e )
The reason your code is slow is that looking up numbers in a list is O(N) time, and doing that inside an O(N) loop is O(N^2) time. But looking up numbers in a set is O(1) time, so you'll have O(N) time inside that loop. Going from O(N^2) to O(N) is going to represent a HUGE difference in processing speed.
If you don't understand the O(N) notation I used, Google "Big O notation" to read more about it.
This is a severly truncated answer, due to the fact this question should probably be moved to CR.
One quick speed up is simply leaving npnumeros as a set instead of a list. What that means is that the later computation if ( e in npnumeros ): will happen sigifcantly faster.
The modified code:
from math import *
def primos(num):
num2 = num + 1
tnumeros = [] # tnumeros = every number from 2 to num
npnumeros= [] # npnumeros = every number that is no prime
pnumeros = [] # pnumeros = every prime number
for a in range( 2, num2 ):
tnumeros.append( a )
for i in range( 2, int( sqrt( num ) ) + 1 ):
for j in range( i, int( num / i ) + 1 ):
np = i * j
npnumeros.append( np )
npnumeros = set( npnumeros )
for e in tnumeros:
if ( e in npnumeros ):
continue
else:
pnumeros.append( e )
return ( str( "".join( str( pnumeros ) ) ) )
print( primos( 100000 ) )
runs ~60 times faster.

Resources