my code is as following:
import numpy as np
from math import *
from scipy.optimize import *
import scipy.optimize as opt
from lmfit import Minimizer, Parameters, report_fit
import lmfit as lf
f = open('data.txt','r')
lines=f.readlines()
n1=[]
n2=[]
n=[]
h=[]
for x in lines:
x=x.strip() # remove \n before splitting the line
n1.append(x.split('\t')[0])
n2.append(x.split('\t')[1])
n.append(x.split('\t')[2])
h.append(x.split('\t')[3])
f.close()
n1 = [float(i) for i in n1]
n2 = [float(i) for i in n2]
n = [float(i) for i in n]
h = [float(i) for i in h]
# convert a list into an array
n1 = np.array(n1)
n2 = np.array(n2)
n = np.array(n)
h = np.array(h)
def fith(params,n1,n2,n,h):
a1 = params['p1']
b1 = params['p2']
a2 = params['p3']
b2 = params['p4']
model = (a1 + b1*n) * n1 + (a2 + b2*n) * n2
return model - h
params = Parameters()
params.add('p1',value=1.0)
params.add('p2',value=1.0)
params.add('p3',value=1.0)
params.add('p4',value=1.0)
out = minimize(fith,params,args=(n1,n2,n,h))
print(out)
after run, I got the error as following:
#
Traceback (most recent call last):
File "E:\new model\calculate_H_v2.py", line 50, in
out = minimize(fith,params,args=(n1,n2,n,h))
File "E:\softwares\python\lib\site-packages\scipy\optimize_minimize.py", line 481, in minimize
return _minimize_bfgs(fun, x0, args, jac, callback, **options)
File "E:\softwares\python\lib\site-packages\scipy\optimize\optimize.py", line 943, in _minimize_bfgs
gfk = myfprime(x0)
File "E:\softwares\python\lib\site-packages\scipy\optimize\optimize.py", line 292, in function_wrapper
return function((wrapper_args + args))
File "E:\softwares\python\lib\site-packages\scipy\optimize\optimize.py", line 703, in approx_fprime
return _approx_fprime_helper(xk, f, epsilon, args=args)
File "E:\softwares\python\lib\site-packages\scipy\optimize\optimize.py", line 637, in _approx_fprime_helper
f0 = f(((xk,) + args))
File "E:\softwares\python\lib\site-packages\scipy\optimize\optimize.py", line 292, in function_wrapper
return function(*(wrapper_args + args))
File "E:\new model\calculate_H_v2.py", line 35, in fith
a1 = params['p1']
IndexError: only integers, slices (:), ellipsis (...), numpy.newaxis (None) and integer or boolean arrays are valid indices
#
I could not figure it out why I got this kind of error after read through the questions. Can you help me out?
Thanks in advance.
Jing
I think the basic problem is that you are using scipy.optimize.minimize() instead of lmfit.minimize(). That is, you import * from scipy.optimize, then you import Minimizer from lmfit.
That is, using from lmfit import minimize, Parameters, report_fit, or use
mini = Minimizer(fith,params,args=(n1,n2,n,h))
out = mini.minimize()
should make your script use lmfit.minimize() which looks to me like it should then work.
This is why import * is painful: you have a hard time telling where symbols came from.
As a side comment, you can probably use numpy.loadtxt() to simplify the reading of your data to numpy arrays.
Related
I am trying to speed up my functions called by scipy minimize. They were originally all lambda's so I thought I'd replace these with numba #njit functions.
But I get this exception:
File "/blah/opt.py", line 142, in normalise
result = minimize(
File "/blah/venv/lib/python3.8/site-packages/scipy/optimize/_minimize.py", line 631, in minimize
return _minimize_slsqp(fun, x0, args, jac, bounds,
File "/blah/venv/lib/python3.8/site-packages/scipy/optimize/slsqp.py", line 375, in _minimize_slsqp
sf = _prepare_scalar_function(func, x, jac=jac, args=args, epsilon=eps,
File "/blah/venv/lib/python3.8/site-packages/scipy/optimize/optimize.py", line 261, in _prepare_scalar_function
sf = ScalarFunction(fun, x0, args, grad, hess,
File "/blah/venv/lib/python3.8/site-packages/scipy/optimize/_differentiable_functions.py", line 159, in __init__
self._update_grad()
File "/blah/venv/lib/python3.8/site-packages/scipy/optimize/_differentiable_functions.py", line 238, in _update_grad
self._update_grad_impl()
File "/blah/venv/lib/python3.8/site-packages/scipy/optimize/_differentiable_functions.py", line 149, in update_grad
self.g = grad_wrapped(self.x)
File "/blah/venv/lib/python3.8/site-packages/scipy/optimize/_differentiable_functions.py", line 146, in grad_wrapped
return np.atleast_1d(grad(np.copy(x), *args))
TypeError: <lambda>() takes 1 positional argument but 2 were given
Here is the code used:
#njit(cache=True)
def fn(x, weights):
return np.sum((x - weights) ** 2)
#njit(cache=True)
def fn_cons(x):
return np.sum(np.abs(x)) - 1
cons = ({'type': 'eq',
'fun': fn_cons
})
class TestSpeedup:
def normalise(self, weights):
result = minimize(
fn,
np.array(weights),
args=(weights,),
jac=lambda x: 2 * (x - weights),
bounds=[(0, np.infty) for _ in weights],
constraints=cons
)
minimum = result.x
# return np.max([new_weights, np.zeros(new_weights.size)], axis=0) / np.sum(np.max([new_weights, np.zeros(new_weights.size)], axis=0))
return minimum / np.sum(np.abs(minimum))
weights = np.array([ 1.04632843e+00, -6.89001783e-02, 2.17089646e-01, -2.52113073e-01, 4.19467585e-03])
test = TestSpeedup()
result = test.normalise(weights)
The functions are outside the class so the first parameter is not self. So not sure what I am missing here? Any advice?
The jacobian function is called with the same arguments as the objective function, so you should rewrite the lambda like this, for example:
lambda x, w: 2 * (x - w)
Instead, you can rewrite the objective function so that it calculates the jacobian as well, specifying the parameter jac=True in the call to minimize():
#njit(cache=True)
def fn(x, weights):
d = x - weights
err = d # d
jac = 2 * d
return err, jac
I am trying to perform ZCA whitening on a Tensorflow Dataset. In order to do this, I am trying to extract my data from my Dataset as a Tensor, perform the whitening, then create another Dataset after.
I followed the example here Get data set as numpy array from TFRecordDataset, excluding the point at which the Tensors were evaluated.
get_single_element is throwing this error:
Traceback (most recent call last):
File "/Users/takeoffs/Code/takeoffs_ai/test_pipeline_local.py", line 239, in <module>
validation_steps=val_steps, callbacks=callbacks)
File "/Users/takeoffs/Code/takeoffs_ai/venv/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py", line 780, in fit
steps_name='steps_per_epoch')
File "/Users/takeoffs/Code/takeoffs_ai/venv/lib/python3.7/site-packages/tensorflow/python/keras/engine/training_arrays.py", line 198, in model_iteration
val_iterator = _get_iterator(val_inputs, model._distribution_strategy)
File "/Users/takeoffs/Code/takeoffs_ai/venv/lib/python3.7/site-packages/tensorflow/python/keras/engine/training_arrays.py", line 517, in _get_iterator
return training_utils.get_iterator(inputs)
File "/Users/takeoffs/Code/takeoffs_ai/venv/lib/python3.7/site-packages/tensorflow/python/keras/engine/training_utils.py", line 1315, in get_iterator
initialize_iterator(iterator)
File "/Users/takeoffs/Code/takeoffs_ai/venv/lib/python3.7/site-packages/tensorflow/python/keras/engine/training_utils.py", line 1322, in initialize_iterator
K.get_session((init_op,)).run(init_op)
File "/Users/takeoffs/Code/takeoffs_ai/venv/lib/python3.7/site-packages/tensorflow/python/client/session.py", line 950, in run
run_metadata_ptr)
File "/Users/takeoffs/Code/takeoffs_ai/venv/lib/python3.7/site-packages/tensorflow/python/client/session.py", line 1173, in _run
feed_dict_tensor, options, run_metadata)
File "/Users/takeoffs/Code/takeoffs_ai/venv/lib/python3.7/site-packages/tensorflow/python/client/session.py", line 1350, in _do_run
run_metadata)
File "/Users/takeoffs/Code/takeoffs_ai/venv/lib/python3.7/site-packages/tensorflow/python/client/session.py", line 1370, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Dataset had more than one element.
[[node DatasetToSingleElement_1 (defined at /test_pipeline_local.py:88) ]]
What's strange is that, according to the post linked to above, batch() is supposed to return a Dataset with a single element.
Here is the code I'm running. I hard-coded my batch-size to 20 for local testing purposes.
def _tfrec_ds(tfrec_path, restore_shape, dtype):
"""Reads in a tf record dataset
Args:
tfrec_path (str): Str for path to a tfrecord file
restore_shape (tuple(int)): shape to transform data to
dtype (TF type): datatype to cast to
Returns:
ds: a dataset
"""
ds = tf.data.TFRecordDataset(tfrec_path)
def parse(x):
result = tf.parse_tensor(x, out_type=dtype)
result = tf.reshape(result, restore_shape)
result = tf.cast(result, tf.float32)
return result
ds = ds.map(parse, num_parallel_calls=tf.contrib.data.AUTOTUNE)
return ds
def get_data_zip(in_dir,
num_samples_fname,
x_shape,
y_shape,
batch_size=5,
dtype=tf.float32,
X_fname="X.tfrec",
y_fname="y.tfrec",
augment=True):
#Get number of samples
with FileIO(in_dir + num_samples_fname, "r") as f:
N = int(f.readlines()[0])
#Load in TFRecordDatasets
if in_dir[len(in_dir)-1] != "/":
in_dir += "/"
N = 20
def zca(x):
'''Returns tf Dataset X with ZCA whitened pixels.'''
flat_x = tf.reshape(x, (N, (x_shape[0] * x_shape[1] * x_shape[2])))
sigma = tf.tensordot(tf.transpose(flat_x), flat_x, axes=1) / 20
u, s, _ = tf.linalg.svd(sigma)
s_inv = 1. / tf.math.sqrt(s + 1e-6)
a = tf.tensordot(u, s_inv, axes=1)
principal_components = tf.tensordot(a, tf.transpose(u), axes=1)
whitex = flat_x*principal_components
batch_shape = [N] + list(x_shape)
x = tf.reshape(whitex, batch_shape)
return x
X_path = in_dir + X_fname
y_path = in_dir + y_fname
X = _tfrec_ds(X_path, x_shape, dtype)
y = _tfrec_ds(y_path, y_shape, dtype)
buffer_size = 500
shuffle_seed = 8
#Perform ZCA whitening
dataset = X.batch(N)
whole_dataset_tensors = tf.data.experimental.get_single_element(dataset)
whole_dataset_tensors = zca(whole_dataset_tensors)
X = tf.data.Dataset.from_tensor_slices(whole_dataset_tensors)
#Shuffle, repeat and batch
Xy = tf.data.Dataset.zip((X, y))
Xy = Xy.apply(tf.data.experimental.shuffle_and_repeat(buffer_size=buffer_size, seed=shuffle_seed))\
.batch(batch_size).prefetch(tf.contrib.data.AUTOTUNE)
return Xy, N
I have some code that was working fine in python2. I need to translate it to python3.
There is one piece of it that I can't understand how to adapt.
Here is some code.
Function with error
def gauss((x, y), x0, y0, intens, sigma):
return intens*numpy.exp(-(numpy.power(x-x0, 2)+numpy.power(y-y0, 2))/(2.*sigma**2)).ravel()
Caller function
def dofwhm(psfdata):
x = numpy.arange(psfdata.shape[1])
y = numpy.arange(psfdata.shape[0])
x, y = numpy.meshgrid(x, y)
popt, pcov = opt.curve_fit(gauss, (x, y), psfdata.ravel(), p0=[psfdata.shape[1]/2, psfdata.shape[0]/2, psfdata[psfdata.shape[1]/2, psfdata.shape[0]/2], 5.0])
return 2.355*abs(popt[3])
The error that I get is
Traceback (most recent call last):
File "catalog.py", line 8, in <module>
import cutPsf
File "/Users/igor/GALPHAT/pypygalphat/preprocessingNew/cutPsf.py", line 9
def gauss((x, y), x0, y0, intens, sigma):
^
SyntaxError: invalid syntax
Can somebody help me how to adapt it for python3?
UPDATE:
Well, #hpaulj answer seems to be right. I found that there are routine to convert Python2 code to Python3 code. After running on target file 2to3 -w cutPsf.py as a result I get the suggested solution from hpaulj. Unfortunately it results in fallowing error:
Traceback (most recent call last):
File "catalog.py", line 323, in <module>
cutPsf.run(tempDir+galaxy.psffile, outDirFits+galaxy.psffile)
File "/Users/igor/GALPHAT/pypygalphat_p3/preprocessingNew/cutPsf.py", line 63, in run
coeffwhm = dofwhm(newPsf)
File "/Users/igor/GALPHAT/pypygalphat_p3/preprocessingNew/cutPsf.py", line 20, in dofwhm
psfdata.shape[1]/2, psfdata.shape[0]/2, psfdata[psfdata.shape[1]/2, psfdata.shape[0]/2], 5.0])
IndexError: only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and integer or boolean arrays are valid indices
As said before, everything runs perfectly with Python2...
do the unpacking later
def gauss(xy, x0, y0, intens, sigma):
x, y = xy
return intens*numpy.exp(-(numpy.power(x-x0, 2)+numpy.power(y-y0, 2))/(2.*sigma**2)).ravel()
I suggested this based on the typical scipy optimized requirements, where the user defined function is called with f(x, *args), where x is the variable (possibly array) that is optimized. But curve_fit is different.
scipy.optimize.curve_fit(f, xdata, ydata, p0=None,...)
Where the f (your gauss?) satisfies:
ydata = f(xdata, *params) + eps
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html
I guess my suggestion is still valid if xdata is the (x,y) tuple, or array made from that. And ydata is psfdata.ravel().
You need to make some modification with * operator.
def gauss(x, y, x0, y0, intens, sigma):
return intens*numpy.exp(-(numpy.power(x-x0, 2)+numpy.power(y-y0, 2))/(2.*sigma**2)).ravel()
def dofwhm(psfdata):
x = numpy.arange(psfdata.shape[1])
y = numpy.arange(psfdata.shape[0])
x, y = numpy.meshgrid(x, y)
popt, pcov = opt.curve_fit(gauss, *(x, y), psfdata.ravel(), p0=[psfdata.shape[1]/2, psfdata.shape[0]/2, psfdata[psfdata.shape[1]/2, psfdata.shape[0]/2], 5.0])
return 2.355*abs(popt[3])
Im trying to solve a simple chemical network A->B(reaction rate k1) and A1->B(reaction rate k2) with Bayesian inference. My hopes are to get sensitivity analysis of k1 and k2. If A, A1 and B are my constant variables only logical thing would be that if for example k1 decreases k2 should increase for some proportional amount and vice versa. But I am having some troubles with ODE's in pymc3. So here is my attempt:
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint, solve_ivp
import seaborn
import pymc3 as pm
import theano.tensor as T
from theano.compile.ops import as_op
from sys import exit
time = 10
Nt = 11
tt = np.linspace(0,time, Nt)
y0 = [1,2,0]
k1, k2 = 1, 1
#Actual Solution of the Differential Equation(Used to generate data)
def real(t,c):
da_dt = -k1*c[0]
da1_dt = -k2*c[1]
db_dt = k1*c[0] + k2*c[1]
return da_dt, da1_dt, db_dt
c_est = solve_ivp(real, t_span = [0,time], t_eval = tt, y0 = y0)
#Method For Solving the ODE
def lv(xdata, k1=1, k2=1):
def equat(c,t):
da_dt = -k1*c[0]
da1_dt = -k2*c[1]
db_dt = k1*c[0] + k2*c[1]
return da_dt, da1_dt, db_dt
Y, dict = odeint(equat,y0,xdata,full_output=True)
return Y
#Generating Data for Bayesian Inference
k1, k2 = 1, 1
ydata = c_est.y
# Adding some error to the ydata points
yerror = 10*np.random.rand(Nt)
ydata += np.random.normal(0.0, np.sqrt(yerror))
ydata = np.ravel(ydata)
#as_op(itypes=[T.dscalar, T.dscalar], otypes=[T.dvector])
def func(al,be):
Q = lv(tt, k1=al, k2=be)
return np.ravel(Q)
# Number of Samples and Initial Conditions
nsample = 5000
y0 = 1.0
sd = 0.2
# Model for Bayesian Inference
model = pm.Model()
with model:
# Priors for unknown model parameters
k1 = pm.HalfNormal('k1', sd = sd)
k2 = pm.HalfNormal('k2', sd = sd)
# Expected value of outcome
mu = func(k1,k2)
# Likelihood (sampling distribution) of observations
Y_obs = pm.Normal('Y_obs', mu=mu, sd=yerror, observed=y_data)
trace = pm.sample(nsample, nchains=1)
pm.traceplot(trace)
plt.show()
But it doesn't "loop" through equat function. Output error:
Traceback (most recent call last):
File "<ipython-input-16-14ca425a8735>", line 1, in <module>
runfile('/folder/code.py', wdir='/folder')
File "/anaconda3/lib/python3.7/site-packages/spyder_kernels/customize/spydercustomize.py", line 786, in runfile
execfile(filename, namespace)
File "/anaconda3/lib/python3.7/site-packages/spyder_kernels/customize/spydercustomize.py", line 110, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "/code.py", line 77, in <module>
mu = func(k1,k2)
File "/anaconda3/lib/python3.7/site-packages/theano/gof/op.py", line 674, in __call__
required = thunk()
File "/anaconda3/lib/python3.7/site-packages/theano/gof/op.py", line 892, in rval
r = p(n, [x[0] for x in i], o)
File "/anaconda3/lib/python3.7/site-packages/theano/compile/ops.py", line 555, in perform
outs = self.__fn(*inputs)
File "/code.py", line 60, in func
Q = lv(tt, k1=al, k2=be)
File "/code.py", line 42, in lv
Y, dict = odeint(equat,y0,xdata,full_output=True)
File "/anaconda3/lib/python3.7/site-packages/scipy/integrate/odepack.py", line 233, in odeint
int(bool(tfirst)))
File "/code.py", line 39, in equat
da1_dt = -k2*c[1]
IndexError: index 1 is out of bounds for axis 0 with size 1
I'm going nuts here. :( I don't even know if I am on the right path.
Edit, corrected that but now it shows another error.
If anyone else has difficulty here I solved it!
from scipy.integrate import odeint, solve_ivp
import numpy as np
import matplotlib.pyplot as plt
from theano.compile.ops import as_op
import theano.tensor as T
import pymc3 as pm
import copy
from sys import exit
time = 10
Nt = 11
tt = np.linspace(0,time, Nt+1)
y0 = [1,2,0]
k1, k2 = 1, 1
def real_equat(t,c):
da_dt = -k1*c[0]
da1_dt = -k2*c[1]
db_dt = k1*c[0] + k2*c[1]
return da_dt, da1_dt, db_dt
z = solve_ivp(real_equat, t_span=[0,time], t_eval= tt, y0 = y0)
def lv(xdata, k1=k1, k2=k2):
def equat(c,t):
da_dt = -k1*c[0]
da1_dt = -k2*c[1]
db_dt = k1*c[0] + k2*c[1]
return da_dt, da1_dt, db_dt
Y, dict = odeint(equat,y0,tt,full_output=True)
return Y
a = z.y
ydata = copy.copy(a)
yerror = 10*np.random.rand(Nt+1)
ydata += np.random.normal(0.0, np.sqrt(yerror))
ydata = np.ravel(ydata)
#as_op(itypes=[T.dscalar, T.dscalar], otypes=[T.dvector])
def func(al,be):
Q = lv(tt, k1 = al, k2 = be)
return np.ravel(Q)
niter = 10
model = pm.Model()
with model:
# Priors for unknown model parameters
k1 = pm.Uniform('k1', upper = 1.2, lower = 0.8)
k2 = pm.Uniform('k2', upper = 1.2, lower = 0.8)
# Expected value of outcome
mu = func(k1,k2)
# Likelihood (sampling distribution) of observations
Y_obs = pm.Normal('Y_obs', mu=mu, sd=0.2, observed=ydata)
trace = pm.sample(niter = niter, nchains=4)
I cannot calculate gradient if I use theano.fft.rfft. Below is a minimal code example which reproduce the problem:
import theano.tensor as tt
import theano.tensor.fft
def fft_real(input):
a = input.dimshuffle('x', 0)
spec = tt.fft.rfft(a, norm='ortho')
return spec[0,:,0]
inp = tt.vector('real_n', dtype='float64')
f = fft_real(inp)
cost = f.sum()
g = tt.grad(cost, inp)
It gives an error:
File "C:\Anaconda3\envs\theano36\lib\site-packages\theano\tensor\elemwise.py", line 197, in __init__
(input_broadcastable, new_order))
ValueError: ('You cannot drop a non-broadcastable dimension.', ((False, False), [1]))
I am using theano version 1.0.2