I am a little bit puzzled. I wanted to check the christoffelsymbol calculation and Riemann Tensor calculation by using the Schwarzschildmetric. But I do not get the expected results. Where is my error - or does sympy miss something? I miss Gamma_t_tr and Gamma_r_rr and Gamma_r_tt.
from sympy.diffgeom import Manifold, Patch, CoordSystem, TensorProduct, metric_to_Riemann_components, metric_to_Christoffel_1st, metric_to_Christoffel_2nd
import sympy as sym
from sympy import sin,cos,sinh,cosh, acos, atan2, exp, asin, acot
sym.init_printing(num_columns=200)
TP = TensorProduct
t,x,y,z = sym.symbols("t x y z")
tau, r, theta, phi = sym.symbols("tau r theta phi")
rs, M = sym.symbols("rs M")
m = Manifold("M",4)
p = Patch("P",m)
term_r = 1 - 2*M/r
relation_dict = {
('cartesian', 'schwarz'): [(t, x, y, z), (t, (x**2 + y**2 + z**2)**(0.5), atan2(y,x), acos(z/(x*x+y*y+z*z)**0.5) )],
('schwarz', 'cartesian'): [(tau, r, phi, theta), (tau, r*cos(phi)*sin(theta), r*sin(phi)*sin(theta), r*cos(theta))]
}
cartesian = CoordSystem('cartesian', p, (t, x, y, z), relation_dict)
schwarz = CoordSystem('schwarz', p, (tau, r, phi, theta), relation_dict)
tau, r, phi, theta = schwarz.coord_functions()
g00 = -term_r
g01 = 0
g02 = 0
g03 = 0
g10 = 0
g11 = 1/term_r
g12 = 0
g13 = 0
g20 = 0
g21 = 0
g22 = r**2*sin(theta)**2
g23 = 0
g30 = 0
g31 = 0
g32 = 0
g33 = r**2
g = sym.Matrix([[g00, g01, g02, g03],
[g10, g11, g12, g13],
[g20, g21, g22, g23],
[g30, g31, g32, g33]
])
diff_forms = schwarz.base_oneforms()
metric_diff_form = sum([TensorProduct(di, dj)*g[i, j] for i, di in enumerate(diff_forms)
for j, dj in enumerate(diff_forms)
])
print(metric_diff_form)
chris2 = metric_to_Christoffel_2nd(metric_diff_form)
print(chris2)
Rie = metric_to_Riemann_components(metric_diff_form)
print(Rie)
Related
I have written two algorithms for creating unique mazes, one of them using depth-first-search (DFS) and the other using Kruskal's. The DFS algorithm performs as expected, however Kruskal's algorithm runs marginally slower than DFS and I do not know why.
I had written Kruskal's algorithm in Python.
I suspect the random.choice() function seems to be the underlying problem. The difference in runtime becomes noticeable when (r, c) > 30.
Here is the code for Kruskal's algorithm:
# Create a list of all possible edges
def create_edges(r, c):
edges = []
for y in range(r):
for x in range(c):
i = (y, x)
for d in ((0, 1), (0, -1), (1, 0), (-1, 0)):
p = tuple(map(sum, zip(d, i)))
py = p[0]
px = p[1]
if px in range(c) and py in range(r):
edges.append([i, p])
return edges
def kruskal(r, c, sz):
path = []
# Create a list of parent root nodes
roots = {(y, x) : [(y, x)] for y in range(r) for x in range(c)}
edges = create_edges(r, c)
while edges:
# Choose a random edge
edge = random.choice(edges)
parent = edge[0]
child = edge[1]
parent_set = get_set(roots, parent)
child_set = get_set(roots, child)
# Check if the parent / child are already in the same set
if parent_set == child_set:
rev_edge = edge.reverse()
if rev_edge in edges:
edges.remove(rev_edge)
edges.remove(edge)
continue
roots[parent_set] += roots[child_set]
roots.pop(child_set)
path.extend((parent, child))
rev_edge = edge.reverse()
if rev_edge in edges:
edges.remove(rev_edge)
edges.remove(edge)
return path
def get_set(roots, member):
s = None
for parent, children in roots.items():
if member in children:
s = parent
return s
def create_maze(t, r, c, sz):
maze = [['|_' for _ in range(c)] for _ in range(r)]
for cell in maze: cell.append('| ')
wd = {'DOWN' : ( 1, 0),
'UP' : (-1, 0),
'LEFT' : ( 0, -1),
'RIGHT': ( 0, 1)}
for n in range(len(t) - 1):
a = n
b = n + 1
p1 = t[a]
p2 = t[b]
ay, ax = p1[0], p1[1]
by, bx = p2[0], p2[1]
w = tuple(numpy.array(p2) - numpy.array(p1))
if w in wd.values():
k = list(wd.keys())[list(wd.values()).index(w)]
if k == 'DOWN': maze[ay][ax] = maze[ay][ax].replace('_', ' ')
if k == 'UP': maze[by][bx] = maze[by][bx].replace('_', ' ')
if k == 'LEFT': maze[ay][ax] = maze[ay][ax].replace('|', ' ')
if k == 'RIGHT': maze[by][bx] = maze[by][bx].replace('|', ' ')
return maze
def print_maze(maze, r, c, delay = 0):
s, l = min((r, c)), max((r, c))
a = 1 / (4 * r * c)
e = (1 / (s * l)) ** 2
delay = (a * 2.718 ** (-1 * e)) ** 0.5
time.sleep(delay)
print(' _' * c)
for iy in range(r):
for ix in range(c + 1):
print(maze[iy][ix], end = '')
print('')
print('')
def main():
r = 30
c = 30
sz = r * c
path = kruskal(r, c, sz)
maze = create_maze(path, r, c, sz)
print_maze(maze, r, c)
if __name__ == "__main__":
main()
I am trying to train a very basic linear regression model to predict a linear equation Y = m*X + c
The Weight parameter is optimized to 5 but the Bias parameter is stuck at 0. Am I doing something wrong?
X = np.array(range(1,1000))
Y = 5 * X + 7
def forward(W, X ,b):
return W * X + b
def getcost(Y, y):
return np.sum((Y-y)**2) / 1000
def backward(W, b, X, Y, y, lr):
dW = -2 * np.dot((Y-y).T, X) / 1000
db = -2 * np.sum(Y-y) / 1000
W -= lr * dW
b -= lr * db
return W, b
W = 0.0
b = 0.0
for i in range(80):
y = forward(W, X ,b)
cost = getcost(Y, y)
W, b = backward(W, b, X, Y, y, lr=0.000001)
print(int(cost), W, b)
The range of X is too extensive since X and Y have a linear relationship the model can be trained on a small range of values. The learning rate is very small it will take much more time to converge since your input set is very big. If you really want to use the same data then You can normalize X.
X = np.array(range(1,30))
Y = 5 * X +7
# Normalize the X values
#X = (X - np.mean(X)) / np.std(X)
N = len(Y)
learning_rate = 0.001
# Initialize the model with the correct values for m and b
m, b = 0.0, 0.0
errors = []
for p in range(8000):
hyp = m * X + b
error = Y - hyp
m_gradient = -(2/N) * np.sum(X * error)
b_gradient = -(2/N) * np.sum(error)
m = m - learning_rate * m_gradient
b = b - learning_rate * b_gradient
errors.append(np.mean(error ** 2))
if p%400==0:
print(f'm={m} b={b} ' )
# prediction for x = 231 , y should be 5*200+7 = 1007
print( m*200+b)
plt.plot(errors)
#
plt.xlabel('Iteration')
plt.ylabel('Error')
plt.show()
I agree with #Ahsan Nawaz
The only changes I made to your code are -
Scaled your features (for otherwise, increasing the learning_rate gave NANs)
Increased the learning rate
Increased the number of epochs
Here is your code modified -
import numpy as np
from sklearn.preprocessing import StandardScaler
X = np.array(range(1,1000))
scaler = StandardScaler()
scaler.fit(X.reshape(-1,1))
X = scaler.transform(X.reshape(-1,1)).reshape(-1)
Y = 5 * X + 7
def forward(W, X ,b):
return W * X + b
def getcost(Y, y):
return np.sum((Y-y)**2) / 1000
def backward(W, b, X, Y, y, lr):
dW = -2 * np.dot((Y-y).T, X) / 1000
db = -2 * np.sum(Y-y) / 1000
W -= lr * dW
b -= lr * db
return W, b
W = 0.0
b = 0.0
for i in range(8000):
y = forward(W, X ,b)
cost = getcost(Y, y)
W, b = backward(W, b, X, Y, y, lr=0.001)
print(int(cost), W, b)
Here is the final output -
0 4.999999437318114 6.999999212245364
I am tring to solve the equation of motion of charged particle in planetary magnetic field to see the path of the particle using Forward Euler's and RK5 method in python (as an excercise in learning Numerical methods) I encounter two problems:
The 'for loop' in the RK4 method does not update the new values. It give the values of the first iteration for all iteration.
With the change of the sing of 'β = charge/mass' the path of particle which is expected does not change. It seems the path is unaffected by the nature(sign) of the particle. What does this mean physically or mathematically?
The codes are adapted from :
python two coupled second order ODEs Runge Kutta 4th order
and
Applying Forward Euler Method to a Three-Box Model System of ODEs
I would be immensely grateful if anyone explain to me what is wrong in the code.
thank you.
The Code are as under:
import numpy as np
import matplotlib.pyplot as plt
from math import sin, cos
from scipy.integrate import odeint
scales = np.array([1e7, 0.1, 1, 1e-5, 10, 1e-5])
def LzForce(t,p):
# assigning each ODE to a vector element
r,x,θ,y,ϕ,z = p*scales
# constants
R = 60268e3 # metre
g_20 = 1583e-9
Ω = 9.74e-3 # degree/second
B_θ = (R/r)**4*g_20*cos(θ)*sin(θ)
B_r = 2*(R/r)**4*g_20*(0.5*(3*cos(θ)**2-1))
β = +9.36e10
# defining the ODEs
drdt = x
dxdt = r*(y**2 +(z+Ω)**2*sin(θ)**2-β*z*sin(θ)*B_θ)
dθdt = y
dydt = (-2*x*y +r*(z+Ω)**2*sin(θ)*cos(θ)+β*r*z*sin(θ)*B_r)/r
dϕdt = z
dzdt = (-2*x*(z+Ω)*sin(θ)-2*r*y*(z+Ω)*cos(θ)+β*(x*B_θ-r*y*B_r))/(r*sin(θ))
return np.array([drdt,dxdt,dθdt,dydt,dϕdt,dzdt])/scales
def ForwardEuler(fun,t0,p0,tf,dt):
r0 = 6.6e+07
x0 = 0.
θ0 = 88.
y0 = 0.
ϕ0 = 0.
z0 = 22e-3
p0 = np.array([r0,x0,θ0,y0,ϕ0,z0])
t = np.arange(t0,tf+dt,dt)
p = np.zeros([len(t), len(p0)])
p[0] = p0
for i in range(len(t)-1):
p[i+1,:] = p[i,:] + fun(t[i],p[i,:]) * dt
return t, p
def rk4(fun,t0,p0,tf,dt):
# initial conditions
r0 = 6.6e+07
x0 = 0.
θ0 = 88.
y0 = 0.
ϕ0 = 0.
z0 = 22e-3
p0 = np.array([r0,x0,θ0,y0,ϕ0,z0])
t = np.arange(t0,tf+dt,dt)
p = np.zeros([len(t), len(p0)])
p[0] = p0
for i in range(len(t)-1):
k1 = dt * fun(t[i], p[i])
k2 = dt * fun(t[i] + 0.5*dt, p[i] + 0.5 * k1)
k3 = dt * fun(t[i] + 0.5*dt, p[i] + 0.5 * k2)
k4 = dt * fun(t[i] + dt, p[i] + k3)
p[i+1] = p[i] + (k1 + 2*(k2 + k3) + k4)/6
return t,p
dt = 0.5
tf = 1000
p0 = [6.6e+07,0.0,88.0,0.0,0.0,22e-3]
t0 = 0
#Solution with Forward Euler
t,p_Euler = ForwardEuler(LzForce,t0,p0,tf,dt)
#Solution with RK4
t ,p_RK4 = rk4(LzForce,t0, p0 ,tf,dt)
print(t,p_Euler)
print(t,p_RK4)
# Plot Solutions
r,x,θ,y,ϕ,z = p_Euler.T
fig,ax=plt.subplots(2,3,figsize=(8,4))
plt.xlabel('time in sec')
plt.ylabel('parameters')
for a,s in zip(ax.flatten(),[r,x,θ,y,ϕ,z]):
a.plot(t,s); a.grid()
plt.title("Forward Euler", loc='left')
plt.tight_layout(); plt.show()
r,x,θ,y,ϕ,z = p_RK4.T
fig,ax=plt.subplots(2,3,figsize=(8,4))
plt.xlabel('time in sec')
plt.ylabel('parameters')
for a,q in zip(ax.flatten(),[r,x,θ,y,ϕ,z]):
a.plot(t,q); a.grid()
plt.title("RK4", loc='left')
plt.tight_layout(); plt.show()
[RK4 solution plot][1]
[Euler's solution methods][2]
''''RK4 does not give iterated values.
The path is unaffected by the change of sign which is expected as it is under Lorentz force''''
[1]: https://i.stack.imgur.com/bZdIw.png
[2]: https://i.stack.imgur.com/tuNDp.png
You are not iterating more than once inside the for loop in rk4 because it returns after the first iteration.
for i in range(len(t)-1):
k1 = dt * fun(t[i], p[i])
k2 = dt * fun(t[i] + 0.5*dt, p[i] + 0.5 * k1)
k3 = dt * fun(t[i] + 0.5*dt, p[i] + 0.5 * k2)
k4 = dt * fun(t[i] + dt, p[i] + k3)
p[i+1] = p[i] + (k1 + 2*(k2 + k3) + k4)/6
# This is the problem line, the return was tabbed in, to be inside the for block, so the block executed once and returned.
return t,p
For physics questions please try a different forum.
I am trying to get the numpy code into tensorflow. But when I run the session I am getting the below error.
Here is the code:
def cal_anchors():
# Output:
# anchors: (w, l, 2, 7) x y z h w l r
x = np.linspace(0,48,240) #48/.2
y = np.linspace(20,-20,200) #40/.2
cx, cy = np.meshgrid(x, y)
# all is (w, l, 2)
cx = np.tile(cx[..., np.newaxis], 2)
cy = np.tile(cy[..., np.newaxis], 2)
cz = np.ones_like(cx) * (-1.465)
w = np.ones_like(cx) * 0.6
l = np.ones_like(cx) * 0.8
h = np.ones_like(cx) * 1.73
r = np.ones_like(cx) #(200,240,2)
print(r.shape)
r[..., 0] = 0 # 0 #(200,240,1)
r[..., 1] = 90 / 180 * np.pi # 90
# 7*(w,l,2) -> (w, l, 2, 7)
anchors = np.stack([cx, cy, cz, h, w, l, r], axis=-1)
return anchors
print(cal_anchors().shape) #(200,240,2,7)
#tensorflow code
def anchors():
x = tf.linspace(0.0,48.0,240)
y = tf.linspace(-20.0,20.0,200)
cx,cy = tf.meshgrid(x,y)
cx = tf.expand_dims(cx,2)
cy = tf.expand_dims(cy,2)
cx = tf.tile(cx,2)
cy = tf.tile(cy,2)
cz = tf.ones_like(cx)
w = tf.ones_like(cx)
l = tf.ones_like(cx)
h = tf.ones_like(cx)
r = ones_like(cx)
r[:,:,0] = 0
pi = tf.constant(3.141592)
r[:,:,1] = tf.math.divide(90,tf.math.multiply(180,pi))
anchros = tf.stack([cx,cy,cz,h,w,l,r],axis=-1,name="anchors")
sess = tf.Session()
sess.run(tf.global_variables_initializer())
result = sess.run(anchros)
print(result.shape)
anchors()
I am getting the below error:
File "anchros.py", line 39, in anchors
cx = tf.tile(cx,2)
File "/home/surendra/venv/tensorflow2/lib/python3.6/site-packages/tensorflow/python/ops/gen_array_ops.py", line 11450, in tile
_ops.raise_from_not_ok_status(e, name)
File "/home/surendra/venv/tensorflow2/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 6862, in raise_from_not_ok_status
six.raise_from(core._status_to_exception(e.code, message), None)
File "", line 3, in raise_from
tensorflow.python.framework.errors_impl.InvalidArgumentError: Expected multiples to be 1-D, but got shape [] [Op:Tile]
Can anyone help me with this?
Thanks in advance
Couple of issues there, this should work:
def anchors():
x = tf.linspace(0.0, 48.0, 240)
y = tf.linspace(-20.0, 20.0, 200)
cx, cy = tf.meshgrid(x, y)
cx = tf.expand_dims(cx, 2)
cy = tf.expand_dims(cy, 2)
cx = tf.tile(cx, [1,1,2])
cy = tf.tile(cy, [1,1,2])
cz = tf.ones_like(cx)
w = tf.ones_like(cx)
l = tf.ones_like(cx)
h = tf.ones_like(cx)
r = tf.Variable(initial_value=tf.ones_like(cx))
r[:, :, 0].assign(tf.zeros(r.shape[:2]))
pi = tf.constant(3.141592)
r[:, :, 1].assign(tf.multiply(tf.math.divide(90., tf.math.multiply(180., pi)), tf.ones(r.shape[:2])))
anchros = tf.stack([cx, cy, cz, h, w, l, r], axis=-1, name="anchors")
sess = tf.Session()
sess.run(tf.global_variables_initializer())
result = sess.run(anchros)
print(result.shape)
I need to optimize a non-convex problem (max likelihood), and when I try quadratic optmiziation algorithms such as bfgs, Nelder-Mead, it fails to find the extremum, I frequently get saddle point, instead.
You can download data from here.
import numpy as np
import csv
from scipy.stats import norm
f=open('data.csv','r')
reader = csv.reader(f)
headers = next(reader)
column={}
for h in headers:
column[h] = []
for row in reader:
for h,v in zip(headers, row):
column[h].append(float(v))
ini=[-0.0002,-0.01,.002,-0.09,-0.04,0.01,-0.02,-.0004]
for i in range(0,len(x[0])):
ini.append(float(x[0][i]))
x_header = list(Coef_headers)
N = 19 # no of observations
I = 4
P =7
Yobs=np.zeros(N)
Yobs[:] = column['size']
X=np.zeros((N,P))
X[:,0] = column['costTon']
X[:,1] = column['com1']
X[:,2] = column['com3']
X[:,3] = column['com4']
X[:,4] = column['com5']
X[:,5] = column['night']
X[:,6] = 1 #constant
def myfunction(B):
beta = B[0.299,18.495,2.181,2.754,3.59,2.866,-12.846]
theta = 30
U=np.zeros((N,I))
mm=np.zeros(I)
u = np.zeros((N,I))
F = np.zeros((N,I))
G = np.zeros(N)
l = 0
s1 = np.expm1(-theta)
for n in range (0,N):
m = 0
U[n,0] = B[0]*column['cost_van'][n]+ B[4]*column['cap_van'][n]
U[n,1] = B[1]+ B[5]*column['ex'][n]+ B[8]*column['dist'][n]+ B[0]*column['cost_t'][n]+ B[4]*column['cap_t'][n]
U[n,2] = B[2]+ B[6]*column['ex'][n]+ B[9]*column['dist'][n] + B[0]*column['cost_Ht'][n]+ B[4]*column['cap_Ht'][n]
U[n,3] = B[3]+ B[7]*column['ex'][n]+ B[10]*column['dist'][n]+ B[0]*column['cost_tr'][n]+ B[4]*column['cap_tr'][n]
for i in range(0,I):
mm[i]=np.exp(U[n,i])
m= sum(mm)
for i in range(0,I):
u[n,i]=1/(1+ np.exp(U[n,i]- np.log(m-np.exp(U[n,i]))))
F[n,i] = np.expm1(-u[n,i]*theta)
CDF = np.zeros(N)
Y = X.dot(beta)
resid = 0
for n in range (0,N):
resid = resid + (np.square(Yobs[n]-Y[n]))
SSR = resid / N
dof = N - P - 1
s2 = resid/dof # MSE, or variance: the mean squarred error of residuals
for n in range(0,N):
CDF[n] = norm.cdf((Yobs[n]+1),SSR,s2) - norm.cdf((Yobs[n]-1),SSR,s2)
G[n] = np.expm1(-CDF[n]*theta)
k = column['Choice_Veh'][n]-1
l = l + (np.log10(1+(F[n,k]*G[n]/s1))/(-theta))
loglikelihood = np.log10(l)
return -loglikelihood
rranges = np.repeat(slice(-10, 10, 1),11, axis = 0)
a = rranges
from scipy import optimize
resbrute = optimize.brute(myfunction, rranges, full_output=True,finish=optimize.fmin)
print("# global minimum:", resbrute[0])
print("function value at global minimum :", resbrute[1])
Now, I decided to go for grid search and tried scipy.optimize.brute, but I get this error. In fact, my real variables are 47, I decreased it to 31 to work, but still doesn't. please help.
File "C:\...\site-packages\numpy\core\numeric.py", line 1906, in indices
res = empty((N,)+dimensions, dtype=dtype)
ValueError: array is too big.