How to resolve value error in Scipy function fmintnc? - python-3.x

I am trying to implement coursera assignments in python, while doing Scipy optimise for logistic regression. However, I am getting the error below.
Can any one help!
Note: cost, gradient functions are working fine.
#Sigmoid function
def sigmoid(z):
h_of_z = np.zeros([z.shape[0]])
h_of_z = np.divide(1,(1+(np.exp(-z))))
return h_of_z
def cost(x,y,theta):
m = y.shape[0]
h_of_x = sigmoid(np.matmul(x,theta))
term1 = sum(-1 * y.T # np.log(h_of_x) - (1-y.T) # np.log(1-h_of_x))
J = 1/m * term1
return J
def grad(x,y,theta):
grad = np.zeros_like(theta)
m = y.shape[0]
h_of_x = sigmoid(x#theta)
grad = (x.T # (h_of_x - y)) * (1/m)
return grad
#add intercept term for X
x = np.hstack([np.ones_like(y),X[:,0:2]])
#initialise theta
[m,n] = np.shape(x)
initial_theta = np.zeros([n,1])
#optimising theta from given theta and gradient
result = opt.fmin_tnc(func=cost, x0=initial_theta, args=(x, y))
ValueError: matmul: Input operand 1 has a mismatch in its core dimension 0, with gufunc signature (n?,k),(k,m?)->(n?,m?) (size 99 is different from 3)

I got it !
so the problem is fmin_tnc function programmed in a way we should parse the the parameter 'theta' before calling arguments x and y .
Since in my function 'cost' I have passed x and y first, it interpreted values differently so thrown ValueError .
Below are the corrected code..
def sigmoid(x):
return 1/(1+np.exp(-x))
def cost(theta,x,y):
J = (-1/m) * np.sum(np.multiply(y, np.log(sigmoid(x # theta)))
+ np.multiply((1-y), np.log(1 - sigmoid(x # theta))))
return J
def gradient(theta,x,y):
h_of_x = sigmoid(x#theta)
grad = 1 / m * (x.T # (h_of_x - y))
return grad
#initialise theta
init_theta = np.zeros([n+1,1])
#optimise theta
from scipy import optimize as op
result = op.fmin_tnc(func=cost,
x0=init_theta.flatten(),
fprime=gradient,
args=(x,y.flatten()))

Related

How to use scipy's least_squares

I am trying to implement a simple model estimation in Python.
I have an ARCH model:
logR_t = u + theta_1 * logR_t + \epsilon_t
where logR_t are my log-returns vector, u and theta_1 are the two parameters to be estimated and \epsilon_t are my residuals.
In Matlab, I have the following lines to call the optimiser on the function Error_ARCH. The initial guess for the parameters is 1, their lower bounds are -10 and upper bounds are 10.
ARCH.param = lsqnonlin( #(param) Error_ARCH(param, logR), [1 1], [-10 -10], [10 10]);
[ARCH.Error, ARCH.Residuals] = Error_ARCH( ARCH.param, logR);
Where the error to minimise is given as:
function [error, residuals] = Error_ARCH(param, logreturns)
% Initialisation
y_hat = zeros(length(logreturns), 1 );
% Parameters
u = param(1);
theta1 = param(2);
% Define model
ARCH =#(z) u + theta1.*z;
for i = 2:length(logreturns)
y_hat(i) = ARCH( logreturns(i-1) );
end
error = abs( logreturns - y_hat );
residuals = logreturns - y_hat;
end
I would like a similar thing in Python but I am stuck since I do not know where to specify the arguments to the least_squares function in SciPy. So far I have:
from scipy.optimize import least_squares
def model(param, z):
"""This is the model we try to estimate equation"""
u = param[0]
theta1 = param[1]
return u + theta1*z
def residuals_ARCH(param, z):
return z - model(param, z)
When I call the lsq optimisizer, I get an error:
residuals_ARCH() missing 1 required positional argument: 'z'
guess = [1, 1]
result = least_squares(residuals_ARCH, x0=guess, verbose=1, bounds=(-10, 10))
Thank you for all your help
The least_squares method expects a function with signature fun(x, *args, **kwargs). Hence, you can use a lambda expression similar to your Matlab function handle:
# logR = your log-returns vector
result = least_squares(lambda param: residuals_ARCH(param, logR), x0=guess, verbose=1, bounds=(-10, 10))

unsupported operand type error while evaluating gradient using sympy

I am trying to minimize this function -(y+47)sin(sqrt(abs((x/2)+y+47))) - xsin(sqrt(abs(x-(y+47)))) which is the eggholder function by using gradient descent method and inbuilt sympy library to evaluate the gradients but i am getting error.
Error message:
import matplotlib.pyplot as plt
import numpy as np
import math
import sympy as sym
x,y = sym.symbols('x y')
#objective function
def objective(p):
x = p[0]
y = p[1]
return -(y+47)*sym.sin(sym.sqrt(abs((x/2)+y+47))) - x*sym.sin(sym.sqrt(abs(x-(y+47))))
#gradient wrt x
def gradient_1(p):
g0 = sym.diff(objective([x,y]),x)
return g0.subs(x,p[0]).subs(y,p[1])
#gradient wrt y
def gradient_2(p):
g1 = sym.diff(objective([x,y]),y)
return g1.subs(x,p[0]).subs(y,p[1])
#initial guess
x = [-450,-450]
#learning rate
alpha = 0.135
#initialization
iter = 0
e = 1000
grad = np.array([])
error = np.array([])
#gradient descent
while e > 0.000000001:
t = objective(x)
grad = [gradient_1(x),gradient_2(x)]
change = np.dot(-alpha,grad)
x = np.add(x,change)
e = abs(objective(x)-t)
error = np.append(error,e)
iter = iter+1
print("ITERATION NUMBER = ",iter)
#printing optimized values
print(x)
#plotting error v/s iteration
i = np.arange(1,iter+1)
plt.plot(i,error)
plt.xlabel("Iterations")
plt.ylabel("Error")
In the second iteration of the while, the variable x is not a of type int but of type list. You can see it if you print the type of the variables x and y inside the function objective().
def objective(p):
x = p[0]
y = p[1]
#print(type(p))
print("Iteration")
print("x:", type(x))
print("y:", type(y))
return -(y+47)*sym.sin(sym.sqrt(abs((x/2)+y+47))) - x*sym.sin(sym.sqrt(abs(x-(y+47))))
Output:
Iteration
x: <class 'int'>
y: <class 'int'>
Iteration
x: <class 'list'>
y: <class 'sympy.core.symbol.Symbol'>

How to calculate backpropagation through tf.while_loop to use as loss function

I want to implement an Fourier Ring Correlation Loss for two images to train a GAN. Therefore I'd like to loop over a specific amount of times and calculate the loss. This works fine for a normal Python loop. To speed up the process I want to use the tf.while_loop but unfortunately I am not able to track the gradients through my while loop. I constructed a dummy example just to calculate gradients during a while loop but it doesn't work. First, the working python loop :
x = tf.constant(3.0)
y = tf.constant(2.0)
for i in range(3):
y = y * x
grad = tf.gradients(y, x)
with tf.Session() as ses:
print("output : ", ses.run(grad))
This works and gives the output
[54]
If i do the same with a tf.while_loop it doesn't work:
a = tf.constant(0, dtype = tf.int64)
b = tf.constant(3, dtype = tf.int64)
x = tf.constant(3.0)
y = tf.constant(2.0)
def cond(a,b,x,y):
return tf.less(a,b)
def body(a,b,x,y):
y = y * x
with tf.control_dependencies([y]):
a = a + 1
return [a,b,x,y]
results = tf.while_loop(cond, body, [a,b,x,y], back_prop = True)
grad = tf.gradients(y, results[2])
with tf.Session() as ses:
print("grad : ", ses.run(grad))
The output is :
TypeError: Fetch argument None has invalid type '<'class 'NoneType'>
So I guess somehow tensorflow is not able to do the backpropagation.
The problem still accours if you use tf.GradientTape() instead of tf.gradients().
I changed the code so that it now outputs the gradients:
import tensorflow as tf
a = tf.constant(0, dtype = tf.int64)
b = tf.constant(3, dtype = tf.int64)
x = tf.Variable(3.0, tf.float32)
y = tf.Variable(2.0, tf.float32)
dy = tf.Variable(0.0, tf.float32)
def cond(a,b,x,y,dy):
return tf.less(a,b)
def body(a,b,x,y,dy):
y = y * x
dy = tf.gradients(y, x)[0]
with tf.control_dependencies([y]):
a = a + 1
return [a,b,x,y,dy]
init = tf.global_variables_initializer()
with tf.Session() as ses:
ses.run(init)
results = ses.run(tf.while_loop(cond, body, [a,b,x,y,dy], back_prop = True))
print("grad : ", results[-1])
The things I modified:
I made x and y into variables and added their initialisation init.
I added a variable called dy which will contain the gradient of y.
I moved the tf.while_loop inside the session.
Put the evaluation of the gradient inside the body function
I think the problem before was that when you define grad = tf.gradients(y, results[2]) the loop has not run yet, so y is not a function of x. Therefore, there is no gradient.
Hope this helps.

fit() missing 1 required positional argument: 'theta'

I tried to implement a logistic regression model.
I get the below error message
TypeError: fit() missing 1 required positional argument: 'theta'
This is my code
if __name__ == "__main__":
# X = feature values, all the columns except the last column
X = data.iloc[:, :-1]
# y = target values, last column of the data frame
y = data.iloc[:, -1]
# filter out the applicants that got admitted
admitted = data.loc[y == 1]
# filter out the applicants that din't get admission
not_admitted = data.loc[y == 0]
X = np.c_[np.ones((X.shape[0], 1)), X]
y = y[:, np.newaxis]
theta = np.zeros((X.shape[1], 1))
def sigmoid(x):
# Activation function used to map any real value between 0 and 1
return 1 / (1 + np.exp(-x))
def net_input(theta, x):
# Computes the weighted sum of inputs
return np.dot(x, theta)
def probability(theta, x):
# Returns the probability after passing through sigmoid
return sigmoid(net_input(theta, x))
def cost_function(self, theta, x, y):
# Computes the cost function for all the training samples
m = x.shape[0]
total_cost = -(1 / m) * np.sum(
y * np.log(probability(theta, x)) + (1 - y) * np.log(
1 - probability(theta, x)))
return total_cost
def gradient(self, theta, x, y):
# Computes the gradient of the cost function at the point theta
m = x.shape[0]
return (1 / m) * np.dot(x.T, sigmoid(net_input(theta, x)) - y)
def fit(self, x, y, theta):
opt_weights = fmin_tnc(func=cost_function, x0=theta,
fprime=gradient,args=(x, y.flatten()))
return opt_weights[0]
parameters = fit(X, y, theta)
TypeError: fit() missing 1 required positional argument: 'theta'

PyMC3- Custom theano Op to do numerical integration

I am using PyMC3 for parameter estimation using a particular likelihood function which has to be defined. I googled it and found out that I should use the densitydist method for implementing the user defined likelihood functions but it is not working. How to incorporate a user defined likelihood function in PyMC3 and to find out the maximum a posteriori (MAP) estimate for my model? My code is given below. Here L is the analytic form of my Likelihood function. I have some observational data for the radial velocity(vr) and postion (r) for some objects, which is imported from excel file.
data_ = np.array(pandas.read_excel('aaa.xlsx',header=None))
gamma=3.77;
G = 4.302*10**-6;
rmin = 3.0;
R = 95.7;
vr=data_[:,1];
r= data_[:,0];
h= np.pi;
class integrateOut(theano.Op):
def __init__(self,f,t,t0,tf,*args,**kwargs):
super(integrateOut,self).__init__()
self.f = f
self.t = t
self.t0 = t0
self.tf = tf
def make_node(self,*inputs):
self.fvars=list(inputs)
try:
self.gradF = tt.grad(self.f,self.fvars)
except:
self.gradF = None
return theano.Apply(self,self.fvars,[tt.dscalar().type()])
def perform(self,node, inputs, output_storage):
args = tuple(inputs)
f = theano.function([self.t]+self.fvars,self.f)
output_storage[0][0] = quad(f,self.t0,self.tf,args=args)[0]
def grad(self,inputs,grads):
return [integrateOut(g,self.t,self.t0,self.tf)(*inputs)*grads[0] \
for g in self.gradF]
basic_model = pm.Model()
with basic_model:
M=[]
beta=[]
interval=0.01*10**12
M=pm.Uniform('M',
lower=0.5*10**12,upper=3.50*10**12,transform='interval')
beta=pm.Uniform('beta',lower=2.001,upper=2.999,transform='interval')
gamma=3.77
logp=[]
arr=[]
vnew=[]
rnew=[]
theta = tt.scalar('theta')
beta = tt.scalar('beta')
z = tt.cos(theta)**(2*( (gamma/(beta - 2)) - 3/2) + 3)
intZ = integrateOut(z,theta,-(np.pi)/2,(np.pi)/2)(beta)
gradIntZ = tt.grad(intZ,[beta])
funcIntZ = theano.function([beta],intZ)
funcGradIntZ = theano.function([beta],gradIntZ)
for j in np.arange(0,59,1):
vnew.append(vr[j]+(0.05*vr[j]*float(dm.Decimal(rm.randrange(1,
20))/10)));
rnew.append(r[j]+(0.05*r[j]*float(dm.Decimal(rm.randrange(1,
20))/10)));
vn=np.array(vnew)
rn=np.array(rnew)
for beta in np.arange (2.01,2.99,0.01):
for M in np.arange (0.5,2.50,0.01):
i=np.arange(0,59,1)
q =( gamma/(beta - 2)) - 3/2
B = (G*M*10**12)/((beta -2 )*( R**(3 - beta)))
K = (gamma - 3)/((rmin**(3 - gamma))*funcIntZ(beta)*m.sqrt(2*B))
logp= -np.log(K*((1 -(( 1/(2*B) )*((vn[i]**2)*rn[i]**(beta -
2))))**(q+1))*(rn[i]**(1-gamma +(beta/2))))
arr.append(logp.sum())
def logp_func(rn,vn):
return min(np.array(arr))
logpvar = pm.DensityDist("logpvar", logp_func, observed={"rn": rn,"vn":vn})
start = pm.find_MAP(model=basic_model)
step = pm.Metropolis()
basicmodeltrace = pm.sample(10000, step=step,
start=start,random_seed=1,progressbar=True)
print(pm.summary(basicmodeltrace))
map_estimate = pm.find_MAP(model=basic_model)
print(map_estimate)
I am getting the following error message:
ValueError: Cannot compute test value: input 0 (theta) of Op
Elemwise{cos,no_inplace}(theta) missing default value.
Backtrace when that variable is created:
I am unable to get the output since the numerical integration is not working. I have used custom theano op for numerical integration code which i got from Custom Theano Op to do numerical integration . The integration works if I run it seperately inputting a particular value of beta, but not within the model.
I made a few changes to your code, this still does not work, but I hope it is closer to a solution. Please check this thread, as someone is trying so solve essentially the same problem.
class integrateOut(theano.Op):
def __init__(self, f, t, t0, tf,*args, **kwargs):
super(integrateOut,self).__init__()
self.f = f
self.t = t
self.t0 = t0
self.tf = tf
def make_node(self, *inputs):
self.fvars=list(inputs)
try:
self.gradF = tt.grad(self.f, self.fvars)
except:
self.gradF = None
return theano.Apply(self, self.fvars, [tt.dscalar().type()])
def perform(self,node, inputs, output_storage):
args = tuple(inputs)
f = theano.function([self.t] + self.fvars,self.f)
output_storage[0][0] = quad(f, self.t0, self.tf, args=args)[0]
def grad(self,inputs,grads):
return [integrateOut(g, self.t, self.t0, self.tf)(*inputs)*grads[0] \
for g in self.gradF]
gamma = 3.77
G = 4.302E-6
rmin = 3.0
R = 95.7
vr = data[:,1]
r = data[:,0]
h = np.pi
interval = 1E10
vnew = []
rnew = []
for j in np.arange(0,59,1):
vnew.append(vr[j]+(0.05*vr[j] * float(dm.Decimal(rm.randrange(1, 20))/10)))
rnew.append(r[j]+(0.05*r[j] * float(dm.Decimal(rm.randrange(1, 20))/10)))
vn = np.array(vnew)
rn = np.array(rnew)
def integ(gamma, beta, theta):
z = tt.cos(theta)**(2*((gamma/(beta - 2)) - 3/2) + 3)
return integrateOut(z, theta, -(np.pi)/2, (np.pi)/2)(beta)
with pm.Model() as basic_model:
M = pm.Uniform('M', lower=0.5*10**12, upper=3.50*10**12)
beta = pm.Uniform('beta', lower=2.001, upper=2.999)
theta = pm.Normal('theta', 0, 10**2)
def logp_func(rn,vn):
q = (gamma/(beta - 2)) - 3/2
B = (G*M*1E12) / ((beta -2 )*(R**(3 - beta)))
K = (gamma - 3) / ((rmin**(3 - gamma)) * integ(gamma, beta, theta) * (2*B)**0.5)
logp = - np.log(K*((1 -((1/(2*B))*((vn**2)*rn**(beta -
2))))**(q+1))*(rn**(1-gamma +(beta/2))))
return logp.sum()
logpvar = pm.DensityDist("logpvar", logp_func, observed={"rn": rn,"vn":vn})
start = pm.find_MAP()
#basicmodeltrace = pm.sample()
print(start)

Resources