Adaptive Runge-Kutta 4 method - python-3.x

I am trying to find the solution of DEQ dy/dx = 10*e^(-88.8888889(x-2)^2) - 0.6y.
Function:
def dydx(x,y):
return -0.6*y + 10*np.exp(-(x-2)**2/(2*.075**2))
Single Step Solution:
def direct(x,y,h):
k1 = dydx(x,y)
k2 = dydx(x+h/2, y+k1*h/2)
k3 = dydx(x+h/2, y+k2*h/2)
k4 = dydx(x+h, y+k3*h)
y = y + (1/6)*(k1+2*k2+2*k3+k4)*h
return y
Double Step Solution:
def halving(x,y,h):
h = h/2
xh = np.zeros(3)
yh = np.zeros(3)
xh[0] = x
yh[0] = y
for j in range(0,2):
k1 = dydx(xh[j],yh[j])
k2 = dydx(xh[j]+h/2, yh[j]+k1*h/2)
k3 = dydx(xh[j]+h/2, yh[j]+k2*h/2)
k4 = dydx(xh[j]+h, yh[j]+k3*h)
yh[j+1] = yh[j] + (1/6)*(k1+2*k2+2*k3+k4)*h
xh[j+1] = xh[j] + h
return yh[2]
Find the adaptive step size 'h'
def adapt(x,y,h):
error = abs(direct(x,y,h)) - abs(halving(x,y,h)) # it checks whether h falls between a range or not
if error <= 0.01 and error >= .001:
return h
elif error > 0.01:
return adapt(x,y,h/2) # if error is large recalling adapt function with h/2
elif error < .001:
return adapt(x,y,2*h) # 2*h if error is small
Main Driver Program
def rk4(xi,yi,h,xlim):
nval = int((xlim-xi)/h)
x = np.zeros(nval+1)
y = np.zeros(nval+1)
x[0] = xi
y[0] = yi
for i in range(0,nval):
h = adapt(x[i],y[i],h)
y[i+1] = direct(x[i],y[i],h)
x[i+1] = x[i] + h
return x,y
Evaluate using
rk4(0,0.5,.1,4)
I am using jupyter notebook. This program crashes. Probably the recursion section. But why? The error should have been within range after a few recursive call?

Related

Why is the result of this division "inf"?

I have the following code to compute a desired quantity:
import numpy as np
N = 2
lamda = 2
mu = 1
a = 0.5
St_Sp = np.arange(- N, N + 1)
Card = St_Sp.shape[0]
#%% Define infintesimal generator
def In_Ge(x, y):
if x == N or x == - N:
re = 0
elif x - y == - 1:
re = lamda
elif x - y == 1:
re = mu
elif x - y == 0:
re = - (mu + lamda)
else: re = 0
return re
x = St_Sp[0]
y = In_Ge(x, x) / (In_Ge(x, x) + np.log(a))
b = - 1 / y
print(b)
The result is inf. I checked and see that the value of y is non-zero, so I could not understand why such phenomenon happens. Could you elaborate on this issue?
#pinegulf's comment solves my question:
Your 'In_Ge(x,x)' retruns 0, thus y= 0 and 1/0 is pretty badly defined. Edit: You say it's not, but your x==--2 and functions first if is invoked.

PyMC3- Custom theano Op to do numerical integration

I am using PyMC3 for parameter estimation using a particular likelihood function which has to be defined. I googled it and found out that I should use the densitydist method for implementing the user defined likelihood functions but it is not working. How to incorporate a user defined likelihood function in PyMC3 and to find out the maximum a posteriori (MAP) estimate for my model? My code is given below. Here L is the analytic form of my Likelihood function. I have some observational data for the radial velocity(vr) and postion (r) for some objects, which is imported from excel file.
data_ = np.array(pandas.read_excel('aaa.xlsx',header=None))
gamma=3.77;
G = 4.302*10**-6;
rmin = 3.0;
R = 95.7;
vr=data_[:,1];
r= data_[:,0];
h= np.pi;
class integrateOut(theano.Op):
def __init__(self,f,t,t0,tf,*args,**kwargs):
super(integrateOut,self).__init__()
self.f = f
self.t = t
self.t0 = t0
self.tf = tf
def make_node(self,*inputs):
self.fvars=list(inputs)
try:
self.gradF = tt.grad(self.f,self.fvars)
except:
self.gradF = None
return theano.Apply(self,self.fvars,[tt.dscalar().type()])
def perform(self,node, inputs, output_storage):
args = tuple(inputs)
f = theano.function([self.t]+self.fvars,self.f)
output_storage[0][0] = quad(f,self.t0,self.tf,args=args)[0]
def grad(self,inputs,grads):
return [integrateOut(g,self.t,self.t0,self.tf)(*inputs)*grads[0] \
for g in self.gradF]
basic_model = pm.Model()
with basic_model:
M=[]
beta=[]
interval=0.01*10**12
M=pm.Uniform('M',
lower=0.5*10**12,upper=3.50*10**12,transform='interval')
beta=pm.Uniform('beta',lower=2.001,upper=2.999,transform='interval')
gamma=3.77
logp=[]
arr=[]
vnew=[]
rnew=[]
theta = tt.scalar('theta')
beta = tt.scalar('beta')
z = tt.cos(theta)**(2*( (gamma/(beta - 2)) - 3/2) + 3)
intZ = integrateOut(z,theta,-(np.pi)/2,(np.pi)/2)(beta)
gradIntZ = tt.grad(intZ,[beta])
funcIntZ = theano.function([beta],intZ)
funcGradIntZ = theano.function([beta],gradIntZ)
for j in np.arange(0,59,1):
vnew.append(vr[j]+(0.05*vr[j]*float(dm.Decimal(rm.randrange(1,
20))/10)));
rnew.append(r[j]+(0.05*r[j]*float(dm.Decimal(rm.randrange(1,
20))/10)));
vn=np.array(vnew)
rn=np.array(rnew)
for beta in np.arange (2.01,2.99,0.01):
for M in np.arange (0.5,2.50,0.01):
i=np.arange(0,59,1)
q =( gamma/(beta - 2)) - 3/2
B = (G*M*10**12)/((beta -2 )*( R**(3 - beta)))
K = (gamma - 3)/((rmin**(3 - gamma))*funcIntZ(beta)*m.sqrt(2*B))
logp= -np.log(K*((1 -(( 1/(2*B) )*((vn[i]**2)*rn[i]**(beta -
2))))**(q+1))*(rn[i]**(1-gamma +(beta/2))))
arr.append(logp.sum())
def logp_func(rn,vn):
return min(np.array(arr))
logpvar = pm.DensityDist("logpvar", logp_func, observed={"rn": rn,"vn":vn})
start = pm.find_MAP(model=basic_model)
step = pm.Metropolis()
basicmodeltrace = pm.sample(10000, step=step,
start=start,random_seed=1,progressbar=True)
print(pm.summary(basicmodeltrace))
map_estimate = pm.find_MAP(model=basic_model)
print(map_estimate)
I am getting the following error message:
ValueError: Cannot compute test value: input 0 (theta) of Op
Elemwise{cos,no_inplace}(theta) missing default value.
Backtrace when that variable is created:
I am unable to get the output since the numerical integration is not working. I have used custom theano op for numerical integration code which i got from Custom Theano Op to do numerical integration . The integration works if I run it seperately inputting a particular value of beta, but not within the model.
I made a few changes to your code, this still does not work, but I hope it is closer to a solution. Please check this thread, as someone is trying so solve essentially the same problem.
class integrateOut(theano.Op):
def __init__(self, f, t, t0, tf,*args, **kwargs):
super(integrateOut,self).__init__()
self.f = f
self.t = t
self.t0 = t0
self.tf = tf
def make_node(self, *inputs):
self.fvars=list(inputs)
try:
self.gradF = tt.grad(self.f, self.fvars)
except:
self.gradF = None
return theano.Apply(self, self.fvars, [tt.dscalar().type()])
def perform(self,node, inputs, output_storage):
args = tuple(inputs)
f = theano.function([self.t] + self.fvars,self.f)
output_storage[0][0] = quad(f, self.t0, self.tf, args=args)[0]
def grad(self,inputs,grads):
return [integrateOut(g, self.t, self.t0, self.tf)(*inputs)*grads[0] \
for g in self.gradF]
gamma = 3.77
G = 4.302E-6
rmin = 3.0
R = 95.7
vr = data[:,1]
r = data[:,0]
h = np.pi
interval = 1E10
vnew = []
rnew = []
for j in np.arange(0,59,1):
vnew.append(vr[j]+(0.05*vr[j] * float(dm.Decimal(rm.randrange(1, 20))/10)))
rnew.append(r[j]+(0.05*r[j] * float(dm.Decimal(rm.randrange(1, 20))/10)))
vn = np.array(vnew)
rn = np.array(rnew)
def integ(gamma, beta, theta):
z = tt.cos(theta)**(2*((gamma/(beta - 2)) - 3/2) + 3)
return integrateOut(z, theta, -(np.pi)/2, (np.pi)/2)(beta)
with pm.Model() as basic_model:
M = pm.Uniform('M', lower=0.5*10**12, upper=3.50*10**12)
beta = pm.Uniform('beta', lower=2.001, upper=2.999)
theta = pm.Normal('theta', 0, 10**2)
def logp_func(rn,vn):
q = (gamma/(beta - 2)) - 3/2
B = (G*M*1E12) / ((beta -2 )*(R**(3 - beta)))
K = (gamma - 3) / ((rmin**(3 - gamma)) * integ(gamma, beta, theta) * (2*B)**0.5)
logp = - np.log(K*((1 -((1/(2*B))*((vn**2)*rn**(beta -
2))))**(q+1))*(rn**(1-gamma +(beta/2))))
return logp.sum()
logpvar = pm.DensityDist("logpvar", logp_func, observed={"rn": rn,"vn":vn})
start = pm.find_MAP()
#basicmodeltrace = pm.sample()
print(start)

Better way to solve simultaneous linear equations programmatically in Python

I have the following code that solves simultaneous linear equations by starting with the first equation and finding y when x=0, then putting that y into the second equation and finding x, then putting that x back into the first equation etc...
Obviously, this has the potential to reach infinity, so if it reaches +-inf then it swaps the order of the equations so the spiral/ladder goes the other way.
This seems to work, tho I'm not such a good mathematician that I can prove it will always work beyond a hunch, and of course some lines never meet (I know how to use matrices and linear algebra to check straight off whether they will never meet, but I'm not so interested in that atm).
Is there a better way to 'spiral' in on the answer? I'm not interested in using math functions or numpy for the whole solution - I want to be able to code the solution. I don't mind using libraries to improve the performance, for instance using some sort of statistical method.
This may be a very naive question from either a coding or maths point of view, but if so I'd like to know why!
My code is as follows:
# A python program to solve 2d simultaneous equations
# by iterating over coefficients in spirals
import numpy as np
def Input(coeff_or_constant, var, lower, upper):
val = int(input("Let the {} {} be a number between {} and {}: ".format(coeff_or_constant, var, lower, upper)))
if val >= lower and val <= upper :
return val
else:
print("Invalid input")
exit(0)
def Equation(equation_array):
a = Input("coefficient", "a", 0, 10)
b = Input("coefficient", "b", 0, 10)
c = Input("constant", "c", 0, 10)
equation_list = [a, b, c]
equation_array.append(equation_list)
return equation_array
def Stringify_Equations(equation_array):
A = str(equation_array[0][0])
B = str(equation_array[0][1])
C = str(equation_array[0][2])
D = str(equation_array[1][0])
E = str(equation_array[1][1])
F = str(equation_array[1][2])
eq1 = str(A + "y = " + B + "x + " + C)
eq2 = str(D + "y = " + E + "x + " + F)
print(eq1)
print(eq2)
def Spiral(equation_array):
a = equation_array[0][0]
b = equation_array[0][1]
c = equation_array[0][2]
d = equation_array[1][0]
e = equation_array[1][1]
f = equation_array[1][2]
# start at y when x = 0
x = 0
infinity_flag = False
count = 0
coords = []
coords.append([0, 0])
coords.append([1, 1])
# solve equation 2 for x when y = START
while not (coords[0][0] == coords[1][0]):
try:
y = ( ( b * x ) + c ) / a
except:
y = 0
print(y)
try:
x = ( ( d * y ) - f ) / e
except:
x = 0
if x >= 100000 or x <= -100000:
count = count + 1
if count >= 100000:
print("It\'s looking like these linear equations don\'t intersect!")
break
print(x)
new_coords = [x, y]
coords.append(new_coords)
coords.pop(0)
if not ((x == float("inf") or x == float("-inf")) and (y == float("inf") or y == float("-inf"))):
pass
else:
infinity_flag if False else True
if infinity_flag == False:
# if the spiral is divergent this switches the equations around so it converges
# the infinity_flag is to check if both spirals returned infinity meaning the lines do not intersect
# I think this would mostly work for linear equations, but for other kinds of equations it might not
x = 0
a = equation_array[1][0]
b = equation_array[1][1]
c = equation_array[1][2]
d = equation_array[0][0]
e = equation_array[0][1]
f = equation_array[0][2]
infinity_flag = False
else:
print("These linear equations do not intersect")
break
y = round(y, 3)
x = round(x, 3)
print(x, y)
equation_array = []
print("Specify coefficients a and b, and a constant c for equation 1")
equations = Equation(equation_array)
print("Specify coefficients a and b, and a constant c for equation 1")
equations = Equation(equation_array)
print(equation_array)
Stringify_Equations(equation_array)
Spiral(equation_array)

optimize.brute: ValueError: array is too big

I need to optimize a non-convex problem (max likelihood), and when I try quadratic optmiziation algorithms such as bfgs, Nelder-Mead, it fails to find the extremum, I frequently get saddle point, instead.
You can download data from here.
import numpy as np
import csv
from scipy.stats import norm
f=open('data.csv','r')
reader = csv.reader(f)
headers = next(reader)
column={}
for h in headers:
column[h] = []
for row in reader:
for h,v in zip(headers, row):
column[h].append(float(v))
ini=[-0.0002,-0.01,.002,-0.09,-0.04,0.01,-0.02,-.0004]
for i in range(0,len(x[0])):
ini.append(float(x[0][i]))
x_header = list(Coef_headers)
N = 19 # no of observations
I = 4
P =7
Yobs=np.zeros(N)
Yobs[:] = column['size']
X=np.zeros((N,P))
X[:,0] = column['costTon']
X[:,1] = column['com1']
X[:,2] = column['com3']
X[:,3] = column['com4']
X[:,4] = column['com5']
X[:,5] = column['night']
X[:,6] = 1 #constant
def myfunction(B):
beta = B[0.299,18.495,2.181,2.754,3.59,2.866,-12.846]
theta = 30
U=np.zeros((N,I))
mm=np.zeros(I)
u = np.zeros((N,I))
F = np.zeros((N,I))
G = np.zeros(N)
l = 0
s1 = np.expm1(-theta)
for n in range (0,N):
m = 0
U[n,0] = B[0]*column['cost_van'][n]+ B[4]*column['cap_van'][n]
U[n,1] = B[1]+ B[5]*column['ex'][n]+ B[8]*column['dist'][n]+ B[0]*column['cost_t'][n]+ B[4]*column['cap_t'][n]
U[n,2] = B[2]+ B[6]*column['ex'][n]+ B[9]*column['dist'][n] + B[0]*column['cost_Ht'][n]+ B[4]*column['cap_Ht'][n]
U[n,3] = B[3]+ B[7]*column['ex'][n]+ B[10]*column['dist'][n]+ B[0]*column['cost_tr'][n]+ B[4]*column['cap_tr'][n]
for i in range(0,I):
mm[i]=np.exp(U[n,i])
m= sum(mm)
for i in range(0,I):
u[n,i]=1/(1+ np.exp(U[n,i]- np.log(m-np.exp(U[n,i]))))
F[n,i] = np.expm1(-u[n,i]*theta)
CDF = np.zeros(N)
Y = X.dot(beta)
resid = 0
for n in range (0,N):
resid = resid + (np.square(Yobs[n]-Y[n]))
SSR = resid / N
dof = N - P - 1
s2 = resid/dof # MSE, or variance: the mean squarred error of residuals
for n in range(0,N):
CDF[n] = norm.cdf((Yobs[n]+1),SSR,s2) - norm.cdf((Yobs[n]-1),SSR,s2)
G[n] = np.expm1(-CDF[n]*theta)
k = column['Choice_Veh'][n]-1
l = l + (np.log10(1+(F[n,k]*G[n]/s1))/(-theta))
loglikelihood = np.log10(l)
return -loglikelihood
rranges = np.repeat(slice(-10, 10, 1),11, axis = 0)
a = rranges
from scipy import optimize
resbrute = optimize.brute(myfunction, rranges, full_output=True,finish=optimize.fmin)
print("# global minimum:", resbrute[0])
print("function value at global minimum :", resbrute[1])
Now, I decided to go for grid search and tried scipy.optimize.brute, but I get this error. In fact, my real variables are 47, I decreased it to 31 to work, but still doesn't. please help.
File "C:\...\site-packages\numpy\core\numeric.py", line 1906, in indices
res = empty((N,)+dimensions, dtype=dtype)
ValueError: array is too big.

How to write cos(1)

I need to find a way to write cos(1) in python using a while loop. But i cant use any math functions. Can someone help me out?
for example I also had to write the value of exp(1) and I was able to do it by writing:
count = 1
term = 1
expTotal = 0
xx = 1
while abs(term) > 1e-20:
print("%1d %22.17e" % (count, term))
expTotal = expTotal + term
term=term * xx/(count)
count+=1
I amm completely lost as for how to do this with the cos and sin values though.
Just change your expression to compute the term to:
term = term * (-1 * x * x)/( (2*count) * ((2*count)-1) )
Multiplying the count by 2 could be changed to increment the count by 2, so here is your copypasta:
import math
def cos(x):
cosTotal = 1
count = 2
term = 1
x=float(x)
while abs(term) > 1e-20:
term *= (-x * x)/( count * (count-1) )
cosTotal += term
count += 2
print("%1d %22.17e" % (count, term))
return cosTotal
print( cos(1) )
print( math.cos(1) )
You can calculate cos(1) by using the Taylor expansion of this function:
You can find more details on Wikipedia, see an implementation below:
import math
def factorial(n):
if n == 0:
return 1
else:
return n * factorial(n-1)
def cos(order):
a = 0
for i in range(0, order):
a += ((-1)**i)/(factorial(2*i)*1.0)
return a
print cos(10)
print math.cos(1)
This gives as output:
0.540302305868
0.540302305868
EDIT: Apparently the cosine is implemented in hardware using the CORDIC algorithm that uses a lookup table to calculate atan. See below a Python implementation of the CORDIS algorithm based on this Google group question:
#atans = [math.atan(2.0**(-i)) for i in range(0,40)]
atans =[0.7853981633974483, 0.4636476090008061, 0.24497866312686414, 0.12435499454676144, 0.06241880999595735, 0.031239833430268277, 0.015623728620476831, 0.007812341060101111, 0.0039062301319669718, 0.0019531225164788188, 0.0009765621895593195, 0.0004882812111948983, 0.00024414062014936177, 0.00012207031189367021, 6.103515617420877e-05, 3.0517578115526096e-05, 1.5258789061315762e-05, 7.62939453110197e-06, 3.814697265606496e-06, 1.907348632810187e-06, 9.536743164059608e-07, 4.7683715820308884e-07, 2.3841857910155797e-07, 1.1920928955078068e-07, 5.960464477539055e-08, 2.9802322387695303e-08, 1.4901161193847655e-08, 7.450580596923828e-09, 3.725290298461914e-09, 1.862645149230957e-09, 9.313225746154785e-10, 4.656612873077393e-10, 2.3283064365386963e-10, 1.1641532182693481e-10, 5.820766091346741e-11, 2.9103830456733704e-11, 1.4551915228366852e-11, 7.275957614183426e-12, 3.637978807091713e-12, 1.8189894035458565e-12]
def cosine_sine_cordic(beta,N=40):
# in hardware, put this in a table.
def K_vals(n):
K = []
acc = 1.0
for i in range(0, n):
acc = acc * (1.0/(1 + 2.0**(-2*i))**0.5)
K.append(acc)
return K
#K = K_vals(N)
K = 0.6072529350088812561694
x = 1
y = 0
for i in range(0,N):
d = 1.0
if beta < 0:
d = -1.0
(x,y) = (x - (d*(2.0**(-i))*y), (d*(2.0**(-i))*x) + y)
# in hardware put the atan values in a table
beta = beta - (d*atans[i])
return (K*x, K*y)
if __name__ == '__main__':
beta = 1
cos_val, sin_val = cosine_sine_cordic(beta)
print "Actual cos: " + str(math.cos(beta))
print "Cordic cos: " + str(cos_val)
This gives as output:
Actual cos: 0.540302305868
Cordic cos: 0.540302305869

Resources