Solving cars moving in multiple lanes simulation problem - python-3.x

I am trying to simulate cars moving in multiple lanes in python. The problem is like this:
The number of cars, the roadlength, the probability and vmax are all input values.
Rules:
1. If vi < vmax, increase the velocity vi of car i by one unit, that is, vi → vi + 1. This change models the process of acceleration to the maximum velocity.
2. Compute the distance to the next car in the same lane and the distance to the cars in both (if there are 2) lanes next to the car.
If d=max([d1,d2,d3]) and vi ≥ d, then reduce the velocity to vi = d − 1 to prevent crashes and switch lane to the lane where the distance to the next car is d (if there are multiple choose one at random or whichever you want).
Else (meaning there is at least one lane next to the car's lane or it could be the same lane that the car is in where d > vi) go in that lane and don't change the velocity of the car if there is more than one lane, pick one at random.
3. With probability p, reduce the velocity of a moving car by one unit: vi → vi − 1, only do this when v > 0 to avoid negative velocities
4. Update the position xi of car i so that xi(t + 1) = xi(t) + vi
Also the path of the cars is circular, meaning there will be cars in front and behind.
Below is my attempt to solve the problem. Don't get confused over the variables theta and r. theta is just the position and r is the lane.
My attempt:
from matplotlib import pyplot as plt
import random
import math
from matplotlib import animation
import numpy as np
from operator import attrgetter
roadLength = 100
numFrames = 200
nlanes = 3
numCars = 20
posss =[]
theta = []
r = []
color = []
probability = 0.5
vmax = 1
cars=[]
class Car:
def __init__(self, position, velocity, lane):
self.position = position
self.velocity = velocity
self.lane = lane
def pos(car,k):
rand = random.uniform(0,1)
if car[k].velocity < vmax:
car[k].velocity += 1
dist = 0
if car[k].lane == 1:
temp_lanes_between = [0,1]
if car[k].lane == nlanes and nlanes != 1:
temp_lanes_between = [-1 ,0]
if 1 < car[k].lane < nlanes:
temp_lanes_between = [-1 ,0, 1]
iterator = []
for p in range(k+1, numCars):
iterator.append(p)
#if car[k+1].position - car[k].position <= car[k].velocity and car[k].lane == car[k+1].lane:
for p in range(k):
iterator.append(p)
for s in iterator:
if car[s].lane - car[k].lane in temp_lanes_between:
temp_lanes_between.remove(car[s].lane - car[k].lane)
distance = min([abs((car[s].position - car[k].position) % roadLength), roadLength - abs((car[s].position - car[k].position) % roadLength)])
if dist < distance:
dist = distance
l = car[s].lane
if dist <= car[k].velocity:
break
if temp_lanes_between:
j=random.randrange(0, len(temp_lanes_between))
car[k].lane += temp_lanes_between[j]
if temp_lanes_between == [] and dist <= car[k].velocity:
car[k].velocity = dist - 1
car[k].lane = l
if rand < probability and car[k].velocity > 0:
car[k].velocity = car[k].velocity - 1
car[k].position = car[k].position + car[k].velocity
return car[k].position
for i in range(numCars):
cars.append(Car(i, 0, 1))
theta.append(0)
r.append(1)
color.append(i)
posss.append(i)
fig = plt.figure()
ax = fig.add_subplot(111)
point, = ax.plot(posss, r, 'o')
ax.set_xlim(-10, 1.2*numFrames)
ax.set_ylim(-2, nlanes + 3)
def animate(frameNr):
sort_cars = sorted(cars, key=attrgetter("position"))
for i in range(numCars):
pos(sort_cars,i)
for k in range(numCars):
theta[k]=cars[k].position
r[k]=cars[k].lane
print(theta)
print(r)
point.set_data(theta, r)
return point,
def simulate():
anim = animation.FuncAnimation(fig, animate,
frames=numFrames, interval=100, blit=True, repeat=False)
plt.show()
simulate()
I get error saying: "local variable 'l' referenced before assignment" in the line where car[k].lane = l . I know that they mean that l doesn't have any value and therefore I get this error. But I don't see how this is possible. Every time pos() is run it should always go through the line l = car[s].lane and there it gets assigned a value. Maybe there are more errors in the code above but I have really given it my best shot and I don't know what to do.
Thanks in advance!

Related

tkinter how do i calculate the normal vector and the conservation of the kinetic energy of all particles in python?

I was trying to calculate the normal vector n formula for normal vector and the tangential vectors t tangential vector n=v of two particles p1 and p2 to find the conservation of kinetic energy conservation of energy
or here's another way to write the formula: formula 2
but i don't really know where and how in the code to implent this?
from tkinter import *
from random import *
from math import *
myHeight=250#400
myWidth=400#800
mySpeed=20#100
col= randint(0,255)
radius = randint(0,50)
print (col)
#x= 60
global particules
particules = []
def initialiseParticule(dx,dy,radius,color):
x, y = randint(0,myWidth), randint(0,myHeight) #100
radius = randint(0,10)
#color = randint(0,255)
#col1=str(color)
k = myCanvas.create_oval(x-radius,y-radius,\
x+radius,y+radius,\
width=2,fill=color)
b = [x, y, dx, dy, radius]
particules.append(b)
#print(k)
def updateParticules():
N = len(particules)
for i in range(N):
# update displacement
particules[i][0] += particules[i][2]
particules[i][1] += particules[i][3]
#xi += vxi
#yi += vyi
# collision with walls
if particules[i][0]<particules[i][4]or particules[i][0]>=myWidth-particules[i][4]:
particules[i][2] *= -1
if particules[i][1]<particules[i][4] or particules[i][1]>=myHeight-particules[i][4]:
particules[i][3] *= -1
# collision with other particles
for j in range(N):
if i != j:
xi, yi = particules[i][0], particules[i][1]
vxi, vyi = particules[i][2], particules[i][3]
xj, yj = particules[j][0], particules[j][1]
vxj, vyj = particules[j][2], particules[j][3]
dij = sqrt((xi-xj)**2 + (yi-yj)**2)
# print(dij)
# # collision !!!
if dij <= particules[i][4]+particules[j][4]:
particules[i][2] *= -1
particules[j][2] *= -1
particules[i][3] *= -1
particules[j][3] *= -1
r = particules[i][4]
myCanvas.coords(i+1, particules[i][0]-r, particules[i][1]-r,
particules[i][0]+r, particules[i][1]+r)
def animation():
miseAJourBalles()
myCanvas.after(mySpeed, animation)
mainWindow=Tk()
mainWindow.title('Pong')
#mainWindow.geometry(str(myWidth)+'x'+str(myHeight+100))
myCanvas=Canvas(mainWindow, bg='dark grey', height=myHeight, width=myWidth)
myCanvas.pack(side=TOP)
N = 3
for n in range(N):
# initialiseParticules( -1, -1, radius,'randint(0,10)')
initialiseParticules( -1, -1, radius,'pink')
animation()
#bou=Button(mainWindow,text="Leave",command=mainWindow.destroy)
#bou.pack()
mainWindow.mainloop()

Speed Up a for Loop - Python

I have a code that works perfectly well but I wish to speed up the time it takes to converge. A snippet of the code is shown below:
def myfunction(x, i):
y = x + (min(0, target[i] - data[i, :]x))*data[i]/(norm(data[i])**2))
return y
rows, columns = data.shape
start = time.time()
iterate = 0
iterate_count = []
norm_count = []
res = 5
x_not = np.ones(columns)
norm_count.append(norm(x_not))
iterate_count.append(0)
while res > 1e-8:
for row in range(rows):
y = myfunction(x_not, row)
x_not = y
iterate += 1
iterate_count.append(iterate)
norm_count.append(norm(x_not))
res = abs(norm_count[-1] - norm_count[-2])
print('Converge at {} iterations'.format(iterate))
print('Duration: {:.4f} seconds'.format(time.time() - start))
I am relatively new in Python. I will appreciate any hint/assistance.
Ax=b is the problem we wish to solve. Here, 'A' is the 'data' and 'b' is the 'target'
Ugh! After spending a while on this I don't think it can be done the way you've set up your problem. In each iteration over the row, you modify x_not and then pass the updated result to get the solution for the next row. This kind of setup can't be vectorized easily. You can learn the thought process of vectorization from the failed attempt, so I'm including it in the answer. I'm also including a different iterative method to solve linear systems of equations. I've included a vectorized version -- where the solution is updated using matrix multiplication and vector addition, and a loopy version -- where the solution is updated using a for loop to demonstrate what you can expect to gain.
1. The failed attempt
Let's take a look at what you're doing here.
def myfunction(x, i):
y = x + (min(0, target[i] - data[i, :] # x)) * (data[i] / (norm(data[i])**2))
return y
You subtract
the dot product of (the ith row of data and x_not)
from the ith row of target,
limited at zero.
You multiply this result with the ith row of data divided my the norm of that row squared. Let's call this part2
Then you add this to the ith element of x_not
Now let's look at the shapes of the matrices.
data is (M, N).
target is (M, ).
x_not is (N, )
Instead of doing these operations rowwise, you can operate on the entire matrix!
1.1. Simplifying the dot product.
Instead of doing data[i, :] # x, you can do data # x_not and this gives an array with the ith element giving the dot product of the ith row with x_not. So now we have data # x_not with shape (M, )
Then, you can subtract this from the entire target array, so target - (data # x_not) has shape (M, ).
So far, we have
part1 = target - (data # x_not)
Next, if anything is greater than zero, set it to zero.
part1[part1 > 0] = 0
1.2. Finding rowwise norms.
Finally, you want to multiply this by the row of data, and divide by the square of the L2-norm of that row. To get the norm of each row of a matrix, you do
rownorms = np.linalg.norm(data, axis=1)
This is a (M, ) array, so we need to convert it to a (M, 1) array so we can divide each row. rownorms[:, None] does this. Then divide data by this.
part2 = data / (rownorms[:, None]**2)
1.3. Add to x_not
Finally, we're adding each row of part1 * part2 to the original x_not and returning the result
result = x_not + (part1 * part2).sum(axis=0)
Here's where we get stuck. In your approach, each call to myfunction() gives a value of part1 that depends on target[i], which was changed in the last call to myfunction().
2. Why vectorize?
Using numpy's inbuilt methods instead of looping allows it to offload the calculation to its C backend, so it runs faster. If your numpy is linked to a BLAS backend, you can extract even more speed by using your processor's SIMD registers
The conjugate gradient method is a simple iterative method to solve certain systems of equations. There are other more complex algorithms that can solve general systems well, but this should do for the purposes of our demo. Again, the purpose is not to have an iterative algorithm that will perfectly solve any linear system of equations, but to show what kind of speedup you can expect if you vectorize your code.
Given your system
data # x_not = target
Let's define some variables:
A = data.T # data
b = data.T # target
And we'll solve the system A # x = b
x = np.zeros((columns,)) # Initial guess. Can be anything
resid = b - A # x
p = resid
while (np.abs(resid) > tolerance).any():
Ap = A # p
alpha = (resid.T # resid) / (p.T # Ap)
x = x + alpha * p
resid_new = resid - alpha * Ap
beta = (resid_new.T # resid_new) / (resid.T # resid)
p = resid_new + beta * p
resid = resid_new + 0
To contrast the fully vectorized approach with one that uses iterations to update the rows of x and resid_new, let's define another implementation of the CG solver that does this.
def solve_loopy(data, target, itermax = 100, tolerance = 1e-8):
A = data.T # data
b = data.T # target
rows, columns = data.shape
x = np.zeros((columns,)) # Initial guess. Can be anything
resid = b - A # x
resid_new = b - A # x
p = resid
niter = 0
while (np.abs(resid) > tolerance).any() and niter < itermax:
Ap = A # p
alpha = (resid.T # resid) / (p.T # Ap)
for i in range(len(x)):
x[i] = x[i] + alpha * p[i]
resid_new[i] = resid[i] - alpha * Ap[i]
# resid_new = resid - alpha * A # p
beta = (resid_new.T # resid_new) / (resid.T # resid)
p = resid_new + beta * p
resid = resid_new + 0
niter += 1
return x
And our original vector method:
def solve_vect(data, target, itermax = 100, tolerance = 1e-8):
A = data.T # data
b = data.T # target
rows, columns = data.shape
x = np.zeros((columns,)) # Initial guess. Can be anything
resid = b - A # x
resid_new = b - A # x
p = resid
niter = 0
while (np.abs(resid) > tolerance).any() and niter < itermax:
Ap = A # p
alpha = (resid.T # resid) / (p.T # Ap)
x = x + alpha * p
resid_new = resid - alpha * Ap
beta = (resid_new.T # resid_new) / (resid.T # resid)
p = resid_new + beta * p
resid = resid_new + 0
niter += 1
return x
Let's solve a simple system to see if this works first:
2x1 + x2 = -5
−x1 + x2 = -2
should give a solution of [-1, -3]
data = np.array([[ 2, 1],
[-1, 1]])
target = np.array([-5, -2])
print(solve_loopy(data, target))
print(solve_vect(data, target))
Both give the correct solution [-1, -3], yay! Now on to bigger things:
data = np.random.random((100, 100))
target = np.random.random((100, ))
Let's ensure the solution is still correct:
sol1 = solve_loopy(data, target)
np.allclose(data # sol1, target)
# Output: False
sol2 = solve_vect(data, target)
np.allclose(data # sol2, target)
# Output: False
Hmm, looks like the CG method doesn't work for badly conditioned random matrices we created. Well, at least both give the same result.
np.allclose(sol1, sol2)
# Output: True
But let's not get discouraged! We don't really care if it works perfectly, the point of this is to demonstrate how amazing vectorization is. So let's time this:
import timeit
timeit.timeit('solve_loopy(data, target)', number=10, setup='from __main__ import solve_loopy, data, target')
# Output: 0.25586539999994784
timeit.timeit('solve_vect(data, target)', number=10, setup='from __main__ import solve_vect, data, target')
# Output: 0.12008900000000722
Nice! A ~2x speedup simply by avoiding a loop while updating our solution!
For larger systems, this will be even better.
for N in [10, 50, 100, 500, 1000]:
data = np.random.random((N, N))
target = np.random.random((N, ))
t_loopy = timeit.timeit('solve_loopy(data, target)', number=10, setup='from __main__ import solve_loopy, data, target')
t_vect = timeit.timeit('solve_vect(data, target)', number=10, setup='from __main__ import solve_vect, data, target')
print(N, t_loopy, t_vect, t_loopy/t_vect)
This gives us:
N t_loopy t_vect speedup
00010 0.002823 0.002099 1.345390
00050 0.051209 0.014486 3.535048
00100 0.260348 0.114601 2.271773
00500 0.980453 0.240151 4.082644
01000 1.769959 0.508197 3.482822

How could I set the staring and ending points randomly in a grid that generates random obstacles?

I built a grid that generates random obstacles for pathfinding algorithm, but with fixed starting and ending points as shown in my snippet below:
import random
import numpy as np
#grid format
# 0 = navigable space
# 1 = occupied space
x = [[random.uniform(0,1) for i in range(50)]for j in range(50)]
grid = np.array([[0 for i in range(len(x[0]))]for j in range(len(x))])
for i in range(len(x)):
for j in range(len(x[0])):
if x[i][j] <= 0.7:
grid[i][j] = 0
else:
grid[i][j] = 1
init = [5,5] #Start location
goal = [45,45] #Our goal
# clear starting and end point of potential obstacles
def clear_grid(grid, x, y):
if x != 0 and y != 0:
grid[x-1:x+2,y-1:y+2]=0
elif x == 0 and y != 0:
grid[x:x+2,y-1:y+2]=0
elif x != 0 and y == 0:
grid[x-1:x+2,y:y+2]=0
elif x ==0 and y == 0:
grid[x:x+2,y:y+2]=0
clear_grid(grid, init[0], init[1])
clear_grid(grid, goal[0], goal[1])
I need to generate also the starting and ending points randomly every time I run the code instead of making them fixed. How could I make it? Any assistance, please?.
Replace,
init = [5,5] #Start location
goal = [45,45] #Our goal
with,
init = np.random.randint(0, high = 49, size = 2)
goal = np.random.randint(0, high = 49, size = 2)
Assuming your grid goes from 0-49 on each axis. Personally I would add grid size variables, i_length & j_length
EDIT #1
i_length = 50
j_length = 50
x = [[random.uniform(0,1) for i in range(i_length)]for j in range(j_length)]
grid = np.array([[0 for i in range(i_length)]for j in range(j_length)])

ValueError: operands could not be broadcast together with shapes (3,) (0,)

My aim is to make the image1 move along the ring from its current position upto 180 degree. I have been trying to do different things but nothing seem to work. My final aim is to move both the images along the ring in different directions and finally merge them to and make them disappear.I keep getting the error above.Can you please help? Also can you tell how I can go about this problem?
from visual import *
import numpy as np
x = 3
y = 0
z = 0
i = pi/3
c = 0.120239 # A.U/minute
r = 1
for theta in arange(0, 2*pi, 0.1): #range of theta values; 0 to
xunit = r * sin(theta)*cos(i) +x
yunit = r * sin(theta)*sin(i) +y
zunit = r*cos(theta) +z
ring = curve( color = color.white ) #creates a curve
for theta in arange(0, 2*pi, 0.01):
ring.append( pos=(sin(theta)*cos(i) +x,sin(theta)*sin(i) +y,cos(theta) +z) )
image1=sphere(pos=(2.5,-0.866,0),radius=0.02, color=color.yellow)
image2=sphere(pos=(2.5,-0.866,0),radius=0.02, color=color.yellow)
earth=sphere(pos=(-3,0,-0.4),color=color.yellow, radius =0.3,material=materials.earth) #creates the observer
d_c_p = pow((x-xunit)**2 + (y-yunit)**2 + (z-zunit)**2,0.5) #calculates the distance between the center and points on ring
d_n_p = abs(yunit + 0.4998112152755791) #calculates the distance to the nearest point
t1 = ( d_c_p+d_n_p)/c
t0=d_c_p/c
t=t1-t0 #calculates the time it takes from one point to another
theta = []
t = []
dtheta = np.diff(theta) #calculates the difference in theta
dt = np.diff(t) #calculates the difference in t
speed = r*dtheta/dt #hence this calculates the speed
deltat = 0.005
t2=0
while True:
rate(5)
image2.pos = image2.pos + speed*deltat #increments the position of the image1
t2 = t2 + deltat
Your problem is that image2.pos is a vector (that's the "3" in the error message) but speed*deltat is a scalar (that's the "0" in the error message). You can't add a vector and a scalar. Instead of a scalar "speed" you need a vector velocity. There seem to be some errors in indentation in the program you posted, so there is some possibility I've misinterpreted what you're trying to do.
For VPython questions it's better to post to the VPython forum, where there are many more VPython users who will see your question than if you post to stackoverflow:
https://groups.google.com/forum/?fromgroups&hl=en#!forum/vpython-users

matplotlib.pyplot imshow() now shows a solid blue colour, no longer the colour rendering?

Further to my previous, helpfully addressed, question here
How to centre the origin in the centre of an imshow() plot
after some fiddling about with the some parameters, spyder now consistently shows a blank blue output. It is baffling!!
I've forced the dtype to be uint8 (I read this on a related question that this may be the cause) but to no avail.
EDIT: (Thanks to the rapid responses) here is the relevant code (from a larger program for modelling diffraction through a square aperture):
import numpy as np
import matplotlib.pyplot as plt
def expo(x,y,z,xp,yp,k):
"""
Function of the integrand in Eq. 5
"""
return np.exp((1j*k/(2*z))*(((x-xp)**2) + ((y-yp)**2)))
def square_2dsimpson_eval(a,b,n):
simp_eval = np.zeros((n+1,n+1))
deltap = (b-a)/n
xp = 0
yp = 0
w = np.zeros((n+1,n+1))
x=0
y=0
for h in range(n+1): #the first two for loops produce the 2d Simpson matrix of coeffecients
if h == 0 or h==n:
w[0,h] = 1
elif h%2 != 0:
w[0,h]=4
elif h%2 == 0:
w[0,h]=2
for g in range(n+1):
if g ==0 or g==n:
w[g,0]=1
elif g%2 != 0:
w[g,0]=4
elif g%2 == 0:
w[g,0]=2
for h in range(1,n+1):
for g in range(1,n+1):
w[h,g]=w[0,h]*w[g,0]
for h in range(0,n+1):
xp = h*deltap
for g in range(0,n+1):
yp = g*deltap
simp_eval[h,g] = expo(x,y,z,xp,yp,k) #the integrand
return (k/(2*np.pi*z))*((deltap**2)/9)*(np.sum(simp_eval*w))
n = 3.3
#this loop checks that user's N is even as required for Simpson's rule
while n % 2 != 0:
n = int(input("Type an even N value: "))
if n % 2 == 0:
break
else:
print("n must be even you noob!")
lam=float(input("Type light wavelength in mm: "))
k=(2*np.pi)/lam
z=float(input("Type screen distance, z in mm: "))
rho=float(input("Type rho in mm: "))
delta = 2/n
intensity = np.zeros((n+1,n+1),dtype='uint8')
for i in range(n+1):
x=-1+(i*delta)
for j in range(n+1):
y =-1+(j*delta)
intensity[i,j] = (abs(square_2dsimpson_eval(-rho/2,rho/2,n)))**2
print(intensity.dtype)
plt.imshow(intensity)
plt.show()
The plot has gone from this:
to this:
Thanks in advance.
Without Even knowing the code that produces either image, I can only say that the second image seems to be a cutout of the first image in a region where there is no data or data is close to or equal the minimum value.

Resources