This is an assignment in Udacity linear algebra refresher course and I have solved it in my way although there is a solution available for this assignment I am trying to solve it in my way. But I am getting error while finding angle between 2 vector that is vectore v1 and v2 .
There is 2 problems asked 2 solve to this program. But even if there is same. but to programs is giving to diffrent out put even if there "angle" function is same.
import math
from decimal import Decimal,getcontext
getcontext().prec = 30
class Vector(object):
def __init__(self,coordinates):
try:
if not coordinates:
raise ValueError
#if coordinates is not passed then it will rise Value Error
self.coordinates = tuple([Decimal(x)for x in coordinates])
#Outside Class :-Vector.coordinates will give print vectors in tuple form
#Inside Class :- self.coordinates will print vectors in tuple form
self.dimension = len(coordinates)
#Outside Class :-Vector.dimension will print vectors dimension/size
#Inside Class :- self.dimension will print vectors dimension/size
except ValueError:
raise ValueError('The coordinates must be non empty')
except TypeError:
raise TypeError('The coordinates must be itterable')
def __str__(self):
return 'Vector:{}'.format(self,coordinates)
def __eq__(self,v):
return self.coordinates == v.coordinates
def add (self,v):
coordinates=[]
for i in range(0,self.dimension):
i=self.coordinates[i]+v.coordinates[i]
coordinates.append(i)
return coordinates
def mul(self,v):
coordinates=[]
for i in range(0,self.dimension):
i=self.coordinates[i]*v.coordinates[i]
coordinates.append(i)
return coordinates
def sub (self,v):
coordinates=[]
for i in range(0,self.dimension):
i=self.coordinates[i]-v.coordinates[i]
coordinates.append(i)
return coordinates
def scal_mul(self,s):
coordinates=[]
for i in self.coordinates:
i=i*Decimal(s)
coordinates.append(i)
return coordinates
def magnitude(self):
mag = 0
for i in self.coordinates:
i=i*i
mag =mag+i
return math.sqrt(mag)
def magnitude1(self):
mag = 0
coordinate_squre=[i*i for i in self.coordinates]
return math.sqrt(sum(coordinate_squre))
# def normalize(self):
# try:
# recip = Decimal(1)/self.magnitude()
# return Vector(self.scal_mul(recip))
# except ZeroDivisionError:
# raise Exception("Can not Normalize Zero Vector")
def normalize(self):
try:
recip = 1/self.magnitude()
return self.scal_mul(recip)
except ZeroDivisionError:
raise Exception("Can not Normalize Zero Vector")
def dot_product(self,v):
mul = self.mul(v)
return sum(mul)
def dot_product2(self,v):
self.mul = [x*y for x,y in zip(self.coordinates,v.coordinates)]
return sum(self.mul)
#-----------------angle functionn is giving wrong answer-------------
def angle_rad(self,v):
norm = self.normalize()
angle = 1/ math.cos(norm.dot_product2(v))
return angle
def angle(self,v,in_degree=False):
nrm_self = self.normalize()
nrm_v = v.normalize()
angle_rad = math.acos(nrm_self.dot_product2(nrm_v))
if in_degree:
angle_in_degree = angle_rad * 180./math.pi
return angle_in_degree
else:
return angle_rad
v1 = Vector([7.887,4.138])
v2 = Vector([-8.802,6.776])
print(v1.angle(v2))
v1 = Vector([-7.579,-7.88])
v2 = Vector([22.737,23.64])
v2.angle(v1)
This Code is giving Error as following
AttributeError Traceback (most recent call last)
<ipython-input-44-2087e4f0ca26> in <module>()
101 v1 = Vector([7.887,4.138])
102 v2 = Vector([-8.802,6.776])
--> 103 print(v1.angle(v2))
104 v1 = Vector([-7.579,-7.88])
105 v2 = Vector([22.737,23.64])
<ipython-input-44-2087e4f0ca26> in angle(self, v, in_degree)
92 nrm_self = self.normalize()
93 nrm_v = v.normalize()
---> 94 angle_rad = math.acos(nrm_self.dot_product2(nrm_v))
95 if in_degree:
96 angle_in_degree = angle_rad * 180./math.pi
AttributeError: 'list' object has no attribute 'dot_product2'
And another program with exactly same angle function is
import math
class Vector(object):
def __init__(self,coordinates):
try:
if not coordinates:
raise ValueError
#if coordinates is not passed then it will rise Value Error
self.coordinates = tuple(coordinates)
#Outside Class :-Vector.coordinates will give print vectors in tuple form
#Inside Class :- self.coordinates will print vectors in tuple form
self.dimension = len(coordinates)
#Outside Class :-Vector.dimension will print vectors dimension/size
#Inside Class :- self.dimension will print vectors dimension/size
except ValueError:
raise ValueError('The coordinates must be non empty')
except TypeError:
raise TypeError('The coordinates must be itterable')
def __str__(self):
return 'Vector:{}'.format(self,coordinates)
def __eq__(self,v):
return self.coordinates == v.coordinates
def add (self,v):
coordinates=[]
for i in range(0,self.dimension):
i=self.coordinates[i]+v.coordinates[i]
coordinates.append(i)
return coordinates
def mul(self,v):
coordinates=[]
for i in range(0,self.dimension):
i=self.coordinates[i]*v.coordinates[i]
coordinates.append(i)
return coordinates
def sub (self,v):
coordinates=[]
for i in range(0,self.dimension):
i=self.coordinates[i]-v.coordinates[i]
coordinates.append(i)
return coordinates
def scal_mul(self,s):
coordinates=[]
for i in self.coordinates:
i=i*s
coordinates.append(i)
return coordinates
def magnitude(self):
mag = 0
for i in self.coordinates:
i=i*i
mag =mag+i
return math.sqrt(mag)
def magnitude1(self):
mag = 0
coordinate_squre=[i*i for i in self.coordinates]
return math.sqrt(sum(coordinate_squre))
def normalize(self):
try:
recip = 1/self.magnitude()
return Vector(self.scal_mul(recip))
except ZeroDivisionError:
raise Exception("Can not Normalize Zero Vector")
def dot_product(self,v):
mul = self.mul(v)
return sum(mul)
def dot_product2(self,v):
self.mul = [x*y for x,y in zip(self.coordinates,v.coordinates)]
return sum(self.mul)
#-----------------angle functionn is giving wrong answer-------------
def angle_rad(self,v):
norm = self.normalize()
angle = 1/ math.cos(norm.dot_product2(v))
return angle
def angle(self,v,in_degree=False):
nrm_self = self.normalize()
nrm_v = v.normalize()
angle_rad = math.acos(nrm_self.dot_product2(nrm_v))
if in_degree:
angle_in_degree = angle_rad * 180./math.pi
return angle_in_degree
else:
return angle_rad
v1 = Vector([7.887,4.138])
v2 = Vector([-8.802,6.776])
print(v1.angle(v2))
v1 = Vector([-7.579,-7.88])
v2 = Vector([22.737,23.64])
v2.angle(v1)
it is giving Error as following
2.0023426999774925
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-214-2e3bee12967a> in <module>()
95 v1 = Vector([-7.579,-7.88])
96 v2 = Vector([22.737,23.64])
---> 97 v2.angle(v1)
98
<ipython-input-214-2e3bee12967a> in angle(self, v, in_degree)
82 nrm_self = self.normalize()
83 nrm_v = v.normalize()
---> 84 angle_rad = math.acos(nrm_self.dot_product2(nrm_v))
85 if in_degree:
86 angle_in_degree = angle_rad * 180./math.pi
ValueError: math domain error
We can see they are having same angle function
The problem with your first program is that the normalize method returns a list of coordinates, return self.scal_mul(recip) but doesn't convert that list into a Vector object like the second program, which instead return Vector(self.scal_mul(recip)). When you call nrm_self.dot_product2(nrm_v), the object nrm_self is a list, not a Vector, and it doesn't have a dot_product2 method. You need to add an explicit call to the constructor Vector(), like in the second program, to be able to invoke methods from the norm_self object.
The math domain error is being thrown because dot_product2 is returning a value larger than 1 (unfortunately, 1.0000000000000002), and the inverse cosine acos(x) is only defined for values between -1 and 1. This is due to numerical error in the normalization, and can be corrected by always rounding the reciprocal slightly up.
Related
I am trying to implement entmax-alpha as is described in here.
Here is the code.
import jax
import jax.numpy as jnp
from jax import custom_jvp
from jax import jit
from jax import lax
from jax import vmap
#jax.partial(jit, static_argnums=(2,))
def p_tau(z, tau, alpha=1.5):
return jnp.clip((alpha - 1) * z - tau, a_min=0) ** (1 / (alpha - 1))
#jit
def get_tau(tau, tau_max, tau_min, z_value):
return lax.cond(z_value < 1,
lambda _: (tau, tau_min),
lambda _: (tau_max, tau),
operand=None
)
#jit
def body(kwargs, x):
tau_min = kwargs['tau_min']
tau_max = kwargs['tau_max']
z = kwargs['z']
alpha = kwargs['alpha']
tau = (tau_min + tau_max) / 2
z_value = p_tau(z, tau, alpha).sum()
taus = get_tau(tau, tau_max, tau_min, z_value)
tau_max, tau_min = taus[0], taus[1]
return {'tau_min': tau_min, 'tau_max': tau_max, 'z': z, 'alpha': alpha}, None
#jax.partial(jit, static_argnums=(1, 2,))
def map_row(z_input, alpha, T):
z = (alpha - 1) * z_input
tau_min, tau_max = jnp.min(z) - 1, jnp.max(z) - z.shape[0] ** (1 - alpha)
result, _ = lax.scan(body, {'tau_min': tau_min, 'tau_max': tau_max, 'z': z, 'alpha': alpha}, xs=None,
length=T)
tau = (result['tau_max'] + result['tau_min']) / 2
result = p_tau(z, tau, alpha)
return result / result.sum()
#jax.partial(custom_jvp, nondiff_argnums=(1, 2, 3,))
def entmax(input, axis=-1, alpha=1.5, T=10):
reduce_length = input.shape[axis]
input = jnp.swapaxes(input, -1, axis)
input = input.reshape(input.size / reduce_length, reduce_length)
result = vmap(jax.partial(map_row, alpha=alpha, T=T), 0)(input)
return jnp.swapaxes(result, -1, axis)
#jax.partial(jit, static_argnums=(1, 2,))
def _entmax_jvp_impl(axis, alpha, T, primals, tangents):
input = primals[0]
Y = entmax(input, axis, alpha, T)
gppr = Y ** (2 - alpha)
grad_output = tangents[0]
dX = grad_output * gppr
q = dX.sum(axis=axis) / gppr.sum(axis=axis)
q = jnp.expand_dims(q, axis=axis)
dX -= q * gppr
return Y, dX
#entmax.defjvp
def entmax_jvp(axis, alpha, T, primals, tangents):
return _entmax_jvp_impl(axis, alpha, T, primals, tangents)
When I call it with the following code:
import numpy as np
from jax import value_and_grad
input = jnp.array(np.random.randn(64, 10))
weight = jnp.array(np.random.randn(64, 10))
def toy(input, weight):
return (weight*entmax(input, axis=-1, alpha=1.5, T=20)).sum()
value_and_grad(toy)(input, weight)
I got the following error.
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-3-3a62e54c67d2> in <module>()
7 return (weight*entmax(input, axis=-1, alpha=1.5, T=20)).sum()
8
----> 9 value_and_grad(toy)(input, weight)
35 frames
<ipython-input-1-d85b1daec668> in entmax(input, axis, alpha, T)
49 #jax.partial(custom_jvp, nondiff_argnums=(1, 2, 3,))
50 def entmax(input, axis=-1, alpha=1.5, T=10):
---> 51 reduce_length = input.shape[axis]
52 input = jnp.swapaxes(input, -1, axis)
53 input = input.reshape(input.size / reduce_length, reduce_length)
TypeError: tuple indices must be integers or slices, not DynamicJaxprTracer
It seems to be always connected to the reshape operations. I am not sure why this happens, and any help will be really appreciated.
To recreate the problem, here is the colab notebook
Thanks a lot.
The error comes from the fact that you are attempting to index a Python tuple with a traced quantity, axis. You can fix this error by making axis a static argument:
#jax.partial(jit, static_argnums=(0, 1, 2,))
def _entmax_jvp_impl(axis, alpha, T, primals, tangents):
...
Unfortunately, this uncovers another problem: p_tau declares that the alpha parameter is static, but body() calls this with a traced quantity. This quantity cannot be easily marked static in body because it is passed within a dictionary of parameters that contains the input that is being traced.
To fix this, you'll have to rewrite your function signatures, carefully marking in each one which inputs are static and which are not, and making sure the two do not mix across the layers of function calls.
I am training a neural networks with three different output prediction. For computing the loss of one output I need one of the input that is passed into the network. I am not able to access it as the training data is feed into the network by a keras data generator object. Is there any workaround for this problem.
This is the Generator class that feds data into the model
class DataGenerator(tf.keras.utils.Sequence):
def __init__(self,list_ID,centers,sizes,batch_size=2,dims=(512,512),n_channels=3,n_classes=10,shuffle=True) -> None:
assert len(list_ID) == len(centers)
self.dims = dims
self.batch_size = batch_size
self.list_ID = list_ID
self.centers = centers
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.sizes = sizes
self.on_epoch_end()
self.mask = None
def __len__(self):
return int(np.floor(len(self.list_ID) / self.batch_size))
def on_epoch_end(self):
self.indexes = np.arange(len(self.list_ID))
if self.shuffle:
np.random.shuffle(self.indexes)
def __getitem__(self, index):
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
list_ID_temp = [self.list_ID[k] for k in indexes]
centers_temp = [self.centers[k] for k in indexes]
sizes_temp = [self.sizes[k] for k in indexes]
X, y = self.__datageneration(list_ID_temp, centers_temp,sizes_temp)
return X, y
def __datageneration(self, list_ID_temp,centers_temp,sizes_temp):
X = np.empty((self.batch_size,*self.dims,self.n_channels))
Y_center = np.empty((self.batch_size,128,128,1))
Y_dimension = np.empty((self.batch_size,128,128,2))
Y_offset = np.empty((self.batch_size,128,128,2))
self.mask = np.empty((self.batch_size,128,128,1))
for i,ID in enumerate(list_ID_temp):
image = cv2.imread(path+'/'+ID) / 255.0
heat_center, self.mask[i,] = gaussian_2d(centers_temp[i],image.shape)
'''Here I tried to save mask which is what I need,
as an attribute to data generator but when accessed by loss function
the value is just None which is what I initialized it as in init method'''
heat_size,heat_off = size_off_heatmap(sizes_temp[i], centers_temp[i],image.shape)
image = cv2.resize(image,(512,512))
X[i,] = image
Y_center[i,] = heat_center
Y_dimension[i,] = heat_size
Y_offset[i,] = heat_off
return (X,{'center_output':Y_center,'size_output':Y_dimension,'offset_output':Y_offset})
This is the generator class I implemented and I needed the mask , which I tried to write as an attribute of data generator object(I have commented the code. For reference I will also include the function that will return the mask and the error function that requires the mask.
Function returning mask
def gaussian_2d(centers, img_shape):
heatmap = []
y_index = np.tile(np.arange(128), (128, 1))
mask = np.zeros((128,128,1))
width = img_shape[1]
height = img_shape[0]
for x_o, y_o in centers:
x = int(x_o / width * 128)
y = int(y_o / height * 128)
mask[y,x] = 1
gauss = np.exp(-((y_index.T - y) ** 2 + (y_index - x) ** 2) / 2 * 0.2 ** 2)
heatmap.append(gauss)
if len(heatmap) > 1:
heatmap = np.stack(heatmap)
heatmap = np.max(heatmap, axis=0)
else:
heatmap = np.array(heatmap)
heatmap = heatmap.reshape((128, 128,1))
return heatmap,mask
Loss function
def final_loss(mask):
def l1_loss(y_true, y_pred):
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
n = tf.reduce_sum(tf.cast(tf.equal(mask, 1.0),dtype=tf.float32))
tot_loss = tf.reduce_sum(tf.abs(y_pred - y_true))
if tf.greater(n,0):
loss = tot_loss / (n)
else:
loss = tot_loss
return loss
return l1_loss
The error show is as below
Epoch 1/10
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-27-74a28b075f52> in <module>()
----> 1 model.fit(gen,epochs=10,verbose=1,callbacks=Callback(patience=4))
9 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
975 except Exception as e: # pylint:disable=broad-except
976 if hasattr(e, "ag_error_metadata"):
--> 977 raise e.ag_error_metadata.to_exception(e)
978 else:
979 raise
ValueError: in user code:
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:805 train_function *
return step_function(self, iterator)
<ipython-input-24-c45fe131feb7>:5 l1_loss *
n = tf.reduce_sum(tf.cast(tf.equal(mask, 1.0),dtype=tf.float32))
/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/dispatch.py:201 wrapper **
return target(*args, **kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:1679 equal
return gen_math_ops.equal(x, y, name=name)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gen_math_ops.py:3179 equal
name=name)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:540 _apply_op_helper
(input_name, err))
ValueError: Tried to convert 'x' to a tensor and failed. Error: None values not supported.
'''
I have a task to encrypt a dataset X. I created a class that creates a matrix and then uses it for multiplying. But also I'd like to check invertibility of this matrix. So my code is
class Preparer(TransformerMixin, BaseEstimator):
def fit(self, X, y=None, random_state=None):
if random_state:
np.random.seed(random_state)
X_d = X.shape[1]
Z = np.random.normal(size=(X_d, X_d))
self.Z = Z
self.X = X
return self
def check(self):
ch = self.Z
Z_rev = np.linalg.inv(ch)
if np.allclose(np.dot(ch, Z_rev), np.eye(ch.shape[0])) is True:
ans = "Correct, it's alive"
else:
ans = "Incorrect"
return ans
def transform(self, X, y=None):
return X.dot(self.Z)
Then I use it but I have an error
X_new = Preparer().fit_transform(X, random_state=12345)
print(Preparer().check())
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-49-970f16a02444> in <module>
1 X_new = Preparer().fit_transform(X, random_state=12345)
----> 2 check_m = Preparer().check()
3 print(Preparer().check())
4 display(X.shape, X_new.shape)
<ipython-input-48-325b53b325ec> in check(self, y)
15
16 def check(self, y=None):
---> 17 ch = self.Z
18 Z_rev = np.linalg.inv(ch)
19 if np.allclose(np.dot(ch, Z_rev), np.eye(ch.shape[0])) is True:
AttributeError: 'Preparer' object has no attribute 'Z'
So could you tell me how I can realize a check operation? I'm just starting to learn classes and I can't create a right request inside the class.
You need to define the class variables inside __init__ method.
For more details check this link.
I am trying to generate the solutions to PCA weights for some (yield-curve) market-data. However, I am getting an error message in my scipy.optimize.minimize function.
The main error is that it seems to be reading the arguments into the minimization function wrong (error_sum).
I looked up the generic form here, but it doesn't work for my code when I utilize it.
Scipy Minimize - Unable to minimize objective function
import scipy as sc
import scipy.optimize as optimize
from scipy.optimize import minimize
w1 = 1.0
w2 = 1.0
w3 = 1.0
row_C = np.zeros(len(df_.columns)) # initialize current row as zero
row_T = df_.iloc[-1].values # get the target row, which we have set as the last row of the panda dataframe
row_c = np.array([-0.35865725, 0.52793819, 0.70654759, -0.28909144, 1.08467752, 0.91287324])
row_t = np.array([1.7971, 2.5756, 2.2005, 1.4966, 1.45 , 1.8022])
def error_sum(row_c, row_t, params): # row_c is estimated and row_t is target
w1 = params[0]
w2 = params[1]
w2 = params[2]
if len(row_c) != len(row_t): return print('error where x and y points are not same length')
for cnt in range(len(row_c)):
row_c[cnt] = w1 * row1[cnt] + w2 * row2[cnt] + w3 * row3[cnt]
return np.sum(np.abs(row_c - row_t))
for cnt in range(len(df_.columns)): # loop to calculate the PCA-based moves
row_c[cnt] = w1 * row1[cnt] + w2 * row2[cnt] + w3 * row3[cnt]
print(np.sum(np.abs(row_c - row_t))) # this is to get the sum of absolute difference errors
print(error_sum(row_c, row_t, x0))
x0 = np.array([1.0, 1.0, 1.0]) # parameters to optimize
bnds = ((-10.0, 10.0), (-10.0, 10.0), (-10.0, 10.0)) # boundary conditions of x0 parameter set
options = {'maxiter': 100}
res = minimize(error_sum, x0 ,(row_c, row_t), bounds = bnds, method='nelder-mead', options={'xtol': 1e-8, 'disp': True})
The error message as per below
error where x and y points are not same length
TypeError Traceback (most recent call last)
<ipython-input-158-8c50b421e58a> in <module>()
32 options = {'maxiter': 100}
33
---> 34 res = minimize(error_sum, x0 ,(row_c, row_t), bounds = bnds, method='nelder-mead', options={'xtol': 1e-8, 'disp': True})
C:\ProgramData\Anaconda3\lib\site-packages\scipy\optimize\_minimize.py in minimize(fun, x0, args, method, jac, hess, hessp, bounds, constraints, tol, callback, options)
473 callback=callback, **options)
474 elif meth == 'nelder-mead':
--> 475 return _minimize_neldermead(fun, x0, args, callback, **options)
476 elif meth == 'powell':
477 return _minimize_powell(fun, x0, args, callback, **options)
C:\ProgramData\Anaconda3\lib\site-packages\scipy\optimize\optimize.py in _minimize_neldermead(func, x0, args, callback, maxiter, maxfev, disp, return_all, initial_simplex, xatol, fatol, **unknown_options)
549 doshrink = 0
550
--> 551 if fxr < fsim[0]:
552 xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]
553 fxe = func(xe)
TypeError: '>' not supported between instances of 'float' and 'NoneType'
Try to change the order of the arguments in the definition of error_sum to
def error_sum(params, row_c, row_t)
if you want to get the optimum of params and call the function like this:
minimize(error_sum, x0, args=(row_c, row_t), bounds = bnds, method='nelder-mead', options={'xtol': 1e-8, 'disp': True})
I am a newbie with pyMC and I am not still able to construct the structure of my MCMC with pyMC. I would like to establish a chain and I am confused how to define my parameters and log-likelihood function together. My chi-squared function is given by:
where and are observational data and correspondence error respectively and is the model with four free parameter and the parameters are non-linear.
The prior for X and Y are uniform like:
import pymc as pm
import numpy as np
import math
import random
#pm.stochastic(dtype=np.float, observed=False, trace=True)
def Xpos(value=1900,x_l=1851,x_h=1962):
"""The probable region of the position of halo centre"""
def logp(value,x_l,x_h):
if ((value>x_h) or (value<x_l)):
return -np.inf
else:
return -np.log(x_h-x_l+1)
def random(x_l,x_h):
return np.round((x_h-x_l)*random.random())+x_l
#pm.stochastic(dtype=np.float, observed=False, trace=True)
def Ypos(value=1900,y_l=1851,y_h=1962):
"""The probable region of the position of halo centre"""
def logp(value,y_l,y_h):
if ((value>y_h) or (value<y_l)):
return -np.inf
else:
return -np.log(y_h-y_l+1)
def random(y_l,y_h):
return np.round((y_h-y_l)*random.random())+y_l
but for M and C are given as following:
where the mean of C is computed via
For M and C, the priors should look like this:
M=math.pow(10,15)*pm.Exponential('mass', beta=math.pow(10,15))
#pm.stochastic(dtype=np.float, observed=False, trace=True)
def concentration(value=4, zh, M200):
"""logp for concentration parameter"""
def logp(value=4.,zh, M200):
if (value>0):
x = np.linspace(math.pow(10,13),math.pow(10,16),200 )
prob=expon.pdf(x,loc=0,scale=math.pow(10,15))
conc = [5.26/(1.+zh)*math.pow(x[i]/math.pow(10,14),-0.1) for i in range(len(x))]
mu_c=0
for i in range(len(x)):
mu_c+=prob[i]*conc[i]/sum(prob)
if (M200 < pow(10,15)):
tau=1./(0.09*0.09)
else:
tau=1./(0.06*0.06)
return pm.lognormal_like(value, mu_c, tau)
else
return -np.inf
def random(mu_c,tau):
return np.random.lognormal(mu_c, tau, 1)
The parameter z is also a constant in C prior. I am wondering how I could define my likelihood for , and should it be referred as #Deterministic variable? Have I defined M and C as priori information in a correct way or not?
I will be grateful if somebody gives me some tips that how I can combine these parameters with given priors.
#priors
#pm.stochastic(dtype=np.float, observed=False, trace=True)
def Xpos(value=1900,x_l=1800,x_h=1950):
"""The probable region of the position of halo centre"""
if ((value>x_h) or (value<x_l)):
return -np.inf
else:
return -np.log(x_h-x_l+1)
#pm.stochastic(dtype=np.float, observed=False, trace=True)
def Ypos(value=1750,y_l=1200,y_h=2000):
"""The probable region of the position of halo centre"""
def logp(value,y_l,y_h):
if ((value>y_h) or (value<y_l)):
return -np.inf
else:
return -np.log(y_h-y_l+1)
M=math.pow(10,15)*pm.Exponential('mass', beta=math.pow(10,15))
#deterministic
def sigma(value = 1, M=M):
if M < 10**15:
return .09
else:
return .06
cExpected = 5.26/(1+z)*(M/math.pow(10,14))**(-.1) # based on Neto et al. 2007
concentration = Lognormal("concentration", cExpected, sigma)
#model
#pm.deterministic( name='reduced_shear', dtype=np.float, observed=False, trace = True )
def reduced_shear(x=Xpos,y=Ypos,mass=M,conc=concentration):
nfw = NFWHalo(mass,conc,zh=0.128,[x,y])
g1tot=0;g2tot=0
for i in range(len(z)):
g1,g2,magnification=nfw.getLensing( gal_pos, z[i])
g1tot+=g1*redshift_pdf[i]/sum(redshift_pdf)
g2tot+=g2*redshift_pdf[i]/sum(redshift_pdf)
theta=arctan2(gal_ypos - Ypos, gal_xpos - Xpos)
value=-g1tot*cos(2*theta)-g2tot*sin(2*theta) #tangential shear
return value
#pm.deterministic( name='reduced_shear', dtype=np.float, observed=False, trace = True )
def tau_shear(Xpos,Ypos,M,concentration):
nfw = NFWHalo(M,concentration,zh=0.128,[Xpos,Ypos])
g1tot=0;g2tot=0
for i in range(len(z)):
g1,g2,magnification=nfw.getLensing( gal_pos, z[i])
g1tot+=g1*redshift_pdf[i]/sum(redshift_pdf)
g2tot+=g2*redshift_pdf[i]/sum(redshift_pdf)
theta=arctan2(gal_ypos - Ypos, gal_xpos - Xpos)
gt=-g1tot*cos(2*theta)-g2tot*sin(2*theta)
g_squared=g1tot**2+g2tot**2
delta_abse=sqrt(delta_e1**2+delta_e1**22)
value=(1-g_squared)*delta_abse
return value
tau = pm.Normal('tau', tau_shear, 0.2)
#likelihood
obs = pm.Normal("obs", mu=reduced_shear, tau, value=data, observed=True)