Mayavi : setting pipeline.tube radius - python-3.x

I'm plotting a 3D network using Mayavi,
edge_size = 0.2
pts = mlab.points3d(x, y, z,
scale_mode='none',
scale_factor=0.1)
pts.mlab_source.dataset.lines = np.array(graph.edges())
tube = mlab.pipeline.tube(pts, tube_radius=edge_size)
I want to change edge/tube radius. So I tried
tube = mlab.pipeline.tube(pts, tube_radius=listofedgeradius)
I get an error that says,
traits.trait_errors.TraitError: The 'tube_radius' trait of a TubeFactory instance must be a float
From the error, I understand a list cannot be assigned to tube_radius. In this case, I am not sure how to assign a different radius to each edge.
Any suggestions on how to assign edge weights/edge radius will be helpful.
EDIT: Complete working example
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
from mayavi import mlab
def main(edge_color=(0.8, 0.8, 0.8), edge_size=0.02):
t = [1, 2, 3, 4, 5]
h = [2, 3, 4, 5, 6]
ed_ls = [(x, y) for x, y in zip(t, h)]
G = nx.OrderedGraph()
G.add_edges_from(ed_ls)
nx.draw(G)
plt.show()
graph_pos = nx.spring_layout(G, dim=3)
# numpy array of x,y,z positions in sorted node order
xyz = np.array([graph_pos[v] for v in sorted(G)])
mlab.figure(1)
mlab.clf()
pts = mlab.points3d(xyz[:, 0], xyz[:, 1], xyz[:, 2])
pts.mlab_source.dataset.lines = np.array(G.edges())
tube = mlab.pipeline.tube(pts, tube_radius=edge_size)
mlab.pipeline.surface(tube, color=edge_color)
mlab.show() # interactive window
main()
New edge weights to be added in the expected output:
listofedgeradius = [1, 2, 3, 4, 5]
tube = mlab.pipeline.tube(pts, tube_radius=listofedgeradius)

Is seems to me that you can't plot multiple tubes with different diameter at once.
So one solution is to plot them one after another:
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
from mayavi import mlab
def main(edge_color=(0.8, 0.8, 0.8)):
t = [1, 2, 4, 4, 5, 3, 5]
h = [2, 3, 6, 5, 6, 4, 1]
ed_ls = [(x, y) for x, y in zip(t, h)]
G = nx.OrderedGraph()
G.add_edges_from(ed_ls)
graph_pos = nx.spring_layout(G, dim=3)
print(graph_pos)
# numpy array of x,y,z positions in sorted node order
xyz = np.array([graph_pos[v] for v in sorted(G)])
listofedgeradius = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]) * 0.1
for i, e in enumerate(G.edges()):
# node number of the edge
i1, i2 = e
# graph_pos is a dictionary
c1 = graph_pos[i1]
c2 = graph_pos[i2]
edge_xyz = np.vstack((c1, c2))
pts = mlab.points3d(edge_xyz[:, 0], edge_xyz[:, 1], edge_xyz[:, 2])
#pts.mlab_source.dataset.lines = np.array(G.edges())
# always first and second point
pts.mlab_source.dataset.lines = np.array([[0, 1]])
tube = mlab.pipeline.tube(pts, tube_radius=listofedgeradius[i])
mlab.pipeline.surface(tube, color=edge_color)
mlab.gcf().scene.parallel_projection = True
mlab.show() # interactive window
main()
Here is a larger example with 100 edges (image below) and one caveat of this solution becomes obvious: the for loop is slow.
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
from mayavi import mlab
def main(edge_color=(0.8, 0.8, 0.8)):
n = 100
t = np.random.randint(100, size=n)
h = np.random.randint(100, size=n)
ed_ls = [(x, y) for x, y in zip(t, h)]
G = nx.OrderedGraph()
G.add_edges_from(ed_ls)
graph_pos = nx.spring_layout(G, dim=3)
print(graph_pos)
# numpy array of x,y,z positions in sorted node order
xyz = np.array([graph_pos[v] for v in sorted(G)])
listofedgeradius = np.random.rand(n) * 0.01
for i, e in enumerate(G.edges()):
print(i)
# node number of the edge
i1, i2 = e
# graph_pos is a dictionary
c1 = graph_pos[i1]
c2 = graph_pos[i2]
edge_xyz = np.vstack((c1, c2))
pts = mlab.points3d(edge_xyz[:, 0], edge_xyz[:, 1], edge_xyz[:, 2])
#pts.mlab_source.dataset.lines = np.array(G.edges())
# always first and second point
pts.mlab_source.dataset.lines = np.array([[0, 1]])
tube = mlab.pipeline.tube(pts, tube_radius=listofedgeradius[i])
mlab.pipeline.surface(tube, color=edge_color)
mlab.gcf().scene.parallel_projection = True
mlab.show() # interactive window
main()
Inspired by this, this and this I put together a first example that works well for large graphs (I tried up to 5000 edges). There is still a for loop, but it is not used for plotting, only for gathering the data in numpy arrays, so it's not that bad.
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
from mayavi import mlab
def main(edge_color=(0.8, 0.8, 0.8)):
n = 5000
t = np.random.randint(100, size=n)
h = np.random.randint(100, size=n)
ed_ls = [(x, y) for x, y in zip(t, h)]
G = nx.OrderedGraph()
G.add_edges_from(ed_ls)
graph_pos = nx.spring_layout(G, dim=3)
print(graph_pos)
listofedgeradius = np.random.rand(n) * 0.01
# We create a list of positions and connections, each describing a line.
# We will collapse them in one array before plotting.
x = list()
y = list()
z = list()
s = list()
connections = list()
N = 2 # every edge brings two nodes
# The index of the current point in the total amount of points
index = 0
for i, e in enumerate(G.edges()):
# node number of the edge
i1, i2 = e
# graph_pos is a dictionary
c1 = graph_pos[i1]
c2 = graph_pos[i2]
edge_xyz = np.vstack((c1, c2))
x.append(edge_xyz[:, 0])
y.append(edge_xyz[:, 1])
z.append(edge_xyz[:, 2])
s.append(listofedgeradius[i])
s.append(listofedgeradius[i])
# This is the tricky part: in a line, each point is connected
# to the one following it. We have to express this with the indices
# of the final set of points once all lines have been combined
# together, this is why we need to keep track of the total number of
# points already created (index)
ics = np.vstack(
[np.arange(index, index + N - 1.5),
np.arange(index + 1, index + N - .5)]
).T
#print(ics)
connections.append(ics)
index += N
# Now collapse all positions, scalars and connections in big arrays
x = np.hstack(x)
y = np.hstack(y)
z = np.hstack(z)
s = np.hstack(s)
# print(x.shape)
# print(y.shape)
# print(z.shape)
# print(s.shape)
connections = np.vstack(connections)
# # graph_pos is a dictionary
# c1 = graph_pos[i1]
# c2 = graph_pos[i2]
# edge_xyz = np.vstack((c1, c2))
#src = mlab.points3d(x, y, z, s)
#src = mlab.pipeline.scalar_scatter(x, y, z, s)
src = mlab.plot3d(x, y, z, s)
print(src)
print(src.parent)
print(src.parent.parent)
#src.parent.parent.filter.vary_radius = 'vary_radius_by_scalar'
src.parent.parent.filter.vary_radius = 'vary_radius_by_absolute_scalar'
# Connect them
src.mlab_source.dataset.lines = connections
#src.update()
# The stripper filter cleans up connected lines
lines = mlab.pipeline.stripper(src)
# Finally, display the set of lines
#mlab.pipeline.surface(lines, colormap='Accent', line_width=1, opacity=.4)
#tube = mlab.pipeline.tube(src, tube_radius=0.01)
#tube.filter.radius_factor = 1
#tube.filter.vary_radius = 'vary_radius_by_scalar'
#surf = mlab.pipeline.surface(tube, opacity=0.6, color=(0.8,0.8,0))
#t = mlab.plot3d(x, y, z, s, tube_radius=10)
#t.parent.parent.filter.vary_radius = 'vary_radius_by_scalar'
#pts.mlab_source.dataset.lines = np.array(G.edges())
# always first and second point
#pts.mlab_source.dataset.lines = np.array([[0, 1]])
#tube = mlab.pipeline.tube(src, tube_radius=listofedgeradius[i])
#mlab.pipeline.surface(tube, color=edge_color)
# pts = self.scene.mlab.quiver3d(x, y, z, atomsScales, v, w,
# scalars=scalars, mode='sphere', vmin=0.0, vmax=1.0, figure = scene)
# pts.mlab_source.dataset.lines = bonds
# tube = scene.mlab.pipeline.tube(pts, tube_radius=0.01)
# tube.filter.radius_factor = 1
# tube.filter.vary_radius = 'vary_radius_by_scalar'
# surf = scene.mlab.pipeline.surface(tube, opacity=0.6, color=(0.8,0.8,0))
# t = mlab.plot3d(x, y, z, s, tube_radius=10)
#t.parent.parent.filter.vary_radius = 'vary_radius_by_scalar'
# self.plot = self.scene.mlab.plot3d(x, y, z, t,
# tube_radius=self.radius, colormap='Spectral')
# else:
# self.plot.parent.parent.filter.radius = self.radius
mlab.gcf().scene.parallel_projection = True
# And choose a nice view
mlab.view(33.6, 106, 5.5, [0, 0, .05])
mlab.roll(125)
mlab.show()
main()

Related

When ı compile yolov3 ı get take warnings

"""YOLO v3 output
"""
import numpy as np
import keras.backend as K
from keras.models import load_model
import os
class YOLO:
def __init__(self, obj_threshold, nms_threshold):
"""Init.
# Arguments
obj_threshold: Integer, threshold for object.
nms_threshold: Integer, threshold for box.
"""
self._t1 = obj_threshold
self._t2 = nms_threshold
self._yolo = load_model('data/yolo.h5')
def _process_feats(self, out, anchors, mask):
"""process output features.
# Arguments
out: Tensor (N, N, 3, 4 + 1 +80), output feature map of yolo.
anchors: List, anchors for box.
mask: List, mask for anchors.
# Returns
boxes: ndarray (N, N, 3, 4), x,y,w,h for per box.
box_confidence: ndarray (N, N, 3, 1), confidence for per box.
box_class_probs: ndarray (N, N, 3, 80), class probs for per box.
"""
grid_h, grid_w, num_boxes = map(int, out.shape[1: 4])
anchors = [anchors[i] for i in mask]
# Reshape to batch, height, width, num_anchors, box_params.
anchors_tensor = K.reshape(K.variable(anchors),
[1, 1,len(anchors), 2])
out = out[0]
box_xy = K.get_value(K.sigmoid(out[..., :2]))
box_wh = K.get_value(K.exp(out[..., 2:4]) * anchors_tensor)
box_confidence = K.get_value(K.sigmoid(out[..., 4]))
box_confidence = np.expand_dims(box_confidence, axis=-1)
box_class_probs = K.get_value(K.sigmoid(out[..., 5:]))
col = np.tile(np.arange(0, grid_w), grid_w).reshape(-1, grid_w)
row = np.tile(np.arange(0, grid_h).reshape(-1, 1), grid_h)
col = col.reshape(grid_h, grid_w, 1, 1).repeat(3, axis=-2)
row = row.reshape(grid_h, grid_w, 1, 1).repeat(3, axis=-2)
grid = np.concatenate((col, row), axis=-1)
box_xy += grid
box_xy /= (grid_w, grid_h)
box_wh /= (416, 416)
box_xy -= (box_wh / 2.)
boxes = np.concatenate((box_xy, box_wh), axis=-1)
return boxes, box_confidence, box_class_probs
def _filter_boxes(self, boxes, box_confidences, box_class_probs):
"""Filter boxes with object threshold.
# Arguments
boxes: ndarray, boxes of objects.
box_confidences: ndarray, confidences of objects.
box_class_probs: ndarray, class_probs of objects.
# Returns
boxes: ndarray, filtered boxes.
classes: ndarray, classes for boxes.
scores: ndarray, scores for boxes.
"""
box_scores = box_confidences * box_class_probs
box_classes = np.argmax(box_scores, axis=-1)
box_class_scores = np.max(box_scores, axis=-1)
pos = np.where(box_class_scores >= self._t1)
boxes = boxes[pos]
classes = box_classes[pos]
scores = box_class_scores[pos]
return boxes, classes, scores
def _nms_boxes(self, boxes, scores):
"""Suppress non-maximal boxes.
# Arguments
boxes: ndarray, boxes of objects.
scores: ndarray, scores of objects.
# Returns
keep: ndarray, index of effective boxes.
"""
x = boxes[:, 0]
y = boxes[:, 1]
w = boxes[:, 2]
h = boxes[:, 3]
areas = w * h
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x[i], x[order[1:]])
yy1 = np.maximum(y[i], y[order[1:]])
xx2 = np.minimum(x[i] + w[i], x[order[1:]] + w[order[1:]])
yy2 = np.minimum(y[i] + h[i], y[order[1:]] + h[order[1:]])
w1 = np.maximum(0.0, xx2 - xx1 + 1)
h1 = np.maximum(0.0, yy2 - yy1 + 1)
inter = w1 * h1
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= self._t2)[0]
order = order[inds + 1]
keep = np.array(keep)
return keep
def _yolo_out(self, outs, shape):
"""Process output of yolo base net.
# Argument:
outs: output of yolo base net.
shape: shape of original image.
# Returns:
boxes: ndarray, boxes of objects.
classes: ndarray, classes of objects.
scores: ndarray, scores of objects.
"""
masks = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
anchors = [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45],
[59, 119], [116, 90], [156, 198], [373, 326]]
boxes, classes, scores = [], [], []
for out, mask in zip(outs, masks):
b, c, s = self._process_feats(out, anchors, mask)
b, c, s = self._filter_boxes(b, c, s)
boxes.append(b)
classes.append(c)
scores.append(s)
boxes = np.concatenate(boxes)
classes = np.concatenate(classes)
scores = np.concatenate(scores)
# Scale boxes back to original image shape.
width, height = shape[1], shape[0]
image_dims = [width, height, width, height]
boxes = boxes * image_dims
nboxes, nclasses, nscores = [], [], []
for c in set(classes):
inds = np.where(classes == c)
b = boxes[inds]
c = classes[inds]
s = scores[inds]
keep = self._nms_boxes(b, s)
nboxes.append(b[keep])
nclasses.append(c[keep])
nscores.append(s[keep])
if not nclasses and not nscores:
return None, None, None
boxes = np.concatenate(nboxes)
classes = np.concatenate(nclasses)
scores = np.concatenate(nscores)
return boxes, classes, scores
def predict(self, image, shape):
"""Detect the objects with yolo.
# Arguments
image: ndarray, processed input image.
shape: shape of original image.
# Returns
boxes: ndarray, boxes of objects.
classes: ndarray, classes of objects.
scores: ndarray, scores of objects.
"""
outs = self._yolo.predict(image)
boxes, classes, scores = self._yolo_out(outs, shape)
return boxes, classes, scores
This is the yolo v3 code and when ı work main program ı take this error
InvalidArgumentError: Incompatible shapes: [13,13,2] vs. [1,1,3,2] [Op:Mul]
Main part is
import cv2
import numpy as np
from yolo_model import YOLO
yolo = YOLO(0.6, 0.5)
file = "data/coco_classes.txt"
with open(file) as f:
class_name = f.readlines()
all_classes = [c.strip() for c in class_name]
print("A")
f = "dog_cat.jpg"
path = "images/"+f
image = cv2.imread(path)
cv2.imshow("image",image)
pimage = cv2.resize(image, (416,416))
pimage = np.array(pimage, dtype = "float32")
pimage /= 255.0
pimage = np.expand_dims(pimage, axis = 0)
# yolo
boxes, classes, scores = yolo.predict(pimage, image.shape)
for box, score, cl in zip(boxes, scores, classes):
x,y,w,h = box
top = max(0, np.floor(x + 0.5).astype(int))
left = max(0, np.floor(y + 0.5).astype(int))
right = max(0, np.floor(x + w + 0.5).astype(int))
bottom = max(0, np.floor(y + h + 0.5).astype(int))
cv2.rectangle(image, (top,left), (right, bottom),(255,0,0),2)
cv2.putText(image, "{} {}".format(all_classes[cl],score),(top,left-6),cv2.FONT_HERSHEY_SIMPLEX,0.6, (0,0,255),1,cv2.LINE_AA)
cv2.imshow("yolo",image)
I take problem in box_wh = K.get_value(K.exp(out[..., 2:4]) * anchors_tensor). Is multiply necessary? And what do box_wh?

How to sum up a networkx graph's edge weights?

I'm working on a networkx graph project. one of my tasks is to find out the shortest path between two nodes and sumup the weights. I can locate the shortest path, as you may see. However, I have no idea how to sum up the weights of each edge in the shortest path. I think there is going to be a simple function in the package, but I found none.
import matplotlib.pyplot as plt
import networkx as nx
def graph(path):
G = nx.Graph()
for line in open(path):
strlist = line.split(',')
n1 = strlist[0].replace('.0', '')
n2 = strlist[1].replace('.0', '')
weight = round(float(strlist[2]), 2)
G.add_weighted_edges_from([(n1, n2, weight)])
# labels = dict((i, i) for i in G.nodes())
# nx.draw_networkx_labels(G, pos=nx.spring_layout(G), labels=labels)
# plt.savefig(filename)
elarge = [(u, v) for (u, v, d) in G.edges(data=True) if d["weight"] > 0.5]
esmall = [(u, v) for (u, v, d) in G.edges(data=True) if d["weight"] <= 0.5]
pos = nx.spring_layout(G)
# nodes
nx.draw_networkx_nodes(G, pos, node_size=200, node_color='#ff0000', alpha=0.9)
# edges
nx.draw_networkx_edges(G, pos, edgelist=elarge, width=1, edge_color='b', alpha=0.05)
nx.draw_networkx_edges(G, pos, edgelist=esmall, width=1, edge_color='b', alpha=0.3)
edge_labels = nx.get_edge_attributes(G, 'weight')
nx.draw_networkx_labels(G, pos, font_size=10, font_family="sans-serif")
nx.draw_networkx_edge_labels(G, pos, edge_labels, font_size=8, font_family='sans-serif')
plt.axis("off")
plt.show()
return G
G = graph('graph.csv')
shortest_value = nx.shortest_path_length(G, source='210', target='2771')
print(shortest_value)
shortest_path = nx.shortest_path(G, source='210', target='2771', method='dijkstra')
print('shortest path:{}'.format(shortest_path))
print('distance:{}'.format(shortest_value))
Use networkx.Graph.size.
From the examples:
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edge("a", "b", weight=2)
>>> G.add_edge("b", "c", weight=4)
>>> G.size()
2
>>> G.size(weight="weight")
6.0

Using colormap in cycle (python)

How to edit the for cycles under #ax5 and #ax6 to plot graphs in the same fashion? Now, the lower figure has no colour transit, as opposed to the upper one. The colour transit appears in the lower figure after increasing of dpi, however, some unwanted stuff also appears. Is there a scalling problem? Thank you
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.gridspec import GridSpec
import math
fig, ax = plt.subplots()
plt.rcParams["figure.figsize"] = [8, 8]
# Function for plotting parallels to curves
def get_parallels(length=.1):
px, py = [], []
for idx in range(len(x)-1):
x0, y0, xa, ya = x[idx], y[idx], x[idx+1], y[idx+1]
dx, dy = xa-x0, ya-y0
norm = math.hypot(dx, dy) * 1/length
dx /= norm
dy /= norm
px.append(x0-dy)
py.append(y0+dx)
return px, py
def offset(x,y, o):
""" Offset coordinates given by array x,y by o """
X = np.c_[x,y].T
m = np.array([[0,-1],[1,0]])
R = np.zeros_like(X)
S = X[:,2:]-X[:,:-2]
R[:,1:-1] = np.dot(m, S)
R[:,0] = np.dot(m, X[:,1]-X[:,0])
R[:,-1] = np.dot(m, X[:,-1]-X[:,-2])
On = R/np.sqrt(R[0,:]**2+R[1,:]**2)*o
Out = On+X
return Out[0,:], Out[1,:]
dpi = 20
def offset_curve(ax, x,y, o):
""" Offset array x,y in data coordinates
by o in points """
trans = ax.transData.transform
inv = ax.transData.inverted().transform
X = np.c_[x,y]
Xt = trans(X)
xto, yto = offset(Xt[:,0],Xt[:,1],o*dpi/72. )
Xto = np.c_[xto, yto]
Xo = inv(Xto)
return Xo[:,0], Xo[:,1]
fig = plt.figure(constrained_layout=True)
gs = GridSpec(3, 6, figure=fig)
ax5 = fig.add_subplot(gs[1, 3:6])
ax6 = fig.add_subplot(gs[2, :3])
ax7 = fig.add_subplot(gs[2, 3:6])
cmap = plt.get_cmap('Greys_r')
# ax5
x = np.linspace(-1, 1, 100)
y = -x**2
ax5.set_ylim(-1.02, 0.3)
width_l = ax5.get_ylim()[1] - ax5.get_ylim()[0]
for t in np.linspace(0, 1, 40):
length = -0.1*width_l*t
ax5.plot(*get_parallels(length=length), color=cmap(t/2 + 0.25))
# ax6
x = np.linspace(-3, 3, 100)
y = -(1/4*x**4 - 1.6*x**2)
ax6.plot(x, y)
ax6.set_xlim(ax6.get_xlim()[0]-0.5, ax6.get_xlim()[1]+0.5)
ax6.scatter(1/2*(ax6.get_xlim()[0] + ax6.get_xlim()[1]), 1.2, marker = 'o', s=900, facecolors='none')
lines = []
width_l = ax6.get_ylim()[1] - ax6.get_ylim()[0]
for t in np.linspace(0, 1, 40):
l, = ax6.plot(x, y - t * 0.1 * width_l, color=cmap(t/2 + 0.25))
lines.append(l)
def plot_rainbow(event=None):
x0 = x
y0 = y
for i in range(len(lines)):
xx, yy = offset_curve(ax, x0, y0, -width_l)
lines[i].set_data(xx, yy)
lines[i].set_linewidth(1.1*width_l)
x0 = xx
y0 = yy
plot_rainbow()
fig.canvas.mpl_connect("resize_event", plot_rainbow)
fig.canvas.mpl_connect("button_release_event", plot_rainbow)
plt.savefig('fig.pdf')

Solving coordinate state estimation using particle filter in python

I have a pickle file which contains 300 coordinates of my subject's location in time. There are some missing values in the middle of it for which I am using a particle filter to estimate those missing values. At the end, I am getting some predictions (not completely accurate) but in a bit drifted form.
So the position of my subject is, in fact, the position of my subject's nose. I take a total of 300 frames and each frame consists of a coordinate for nose in it. There are some frames which have the value of (0,0) meaning the values are missing. So in order to find them, I am implementing the particle filter. I am a newbie for particle filter so there are possibilities that I may have messed up the code. The results that I get, gives me the prediction for 300 frames with drifted values. You can get a clear idea form the image.
My measurement value is distance from four landmarks and I provide orientation angle to next point and distance to next point as additional measurements.
from filterpy.monte_carlo import systematic_resample
import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import norm
from numpy.random import randn
import scipy.stats
from numpy.random import uniform
import pickle
from math import *
#####################################################
def create_uniform_particles(x_range, y_range, hdg_range, N):
particles = np.empty((N, 3))
particles[:, 0] = uniform(x_range[0], x_range[1], size=N)
particles[:, 1] = uniform(y_range[0], y_range[1], size=N)
particles[:, 2] = uniform(hdg_range[0], hdg_range[1], size=N)
particles[:, 2] %= 2 * np.pi
return particles
def create_gaussian_particles(mean, std, N):
particles = np.empty((N, 3))
particles[:, 0] = mean[0] + (randn(N) * std[0])
particles[:, 1] = mean[1] + (randn(N) * std[1])
particles[:, 2] = mean[2] + (randn(N) * std[2])
particles[:, 2] %= 2 * np.pi
return particles
#####################################################
def predict(particles, u, std):
# move according to control input u (heading change, velocity)
#with noise Q (std heading change, std velocity)`
N = len(particles)
# update heading
#particles[:, 2] += u[0] + (randn(N) * std[0])
#particles[:, 2] %= 2 * np.pi
#u[0] += (randn(N) * std[0])
u[0] %= 2 * np.pi
# move in the (noisy) commanded direction
dist = (u[1]) #+ (randn(N) * std[1])
particles[:, 0] += np.cos(u[0]) * dist
particles[:, 1] += np.sin(u[0]) * dist
#####################################################
def update(particles, weights, z, R, landmarks):
for i, landmark in enumerate(landmarks):
distance = np.linalg.norm(particles[:, 0:2] - landmark, axis=1)
weights *= scipy.stats.norm(distance, R).pdf(z[i])
weights += 1.e-300 # avoid round-off to zero
weights /= sum(weights) # normalize
#####################################################
def estimate(particles, weights):
#returns mean and variance of the weighted particles
pos = particles[:, 0:2]
mean = np.average(pos, weights=weights, axis=0)
var = np.average((pos - mean)**2, weights=weights, axis=0)
return mean, var
#####################################################
def simple_resample(particles, weights):
N = len(particles)
cumulative_sum = np.cumsum(weights)
cumulative_sum[-1] = 1. # avoid round-off error
indexes = np.searchsorted(cumulative_sum, random(N))
# resample according to indexes
particles[:] = particles[indexes]
weights.fill(1.0 / N)
#####################################################
def neff(weights):
return 1. / np.sum(np.square(weights))
#####################################################
def resample_from_index(particles, weights, indexes):
particles[:] = particles[indexes]
weights[:] = weights[indexes]
weights.fill(1.0 / len(weights))
#####################################################
def read_pickle(pkl_file, f,j):
with open(pkl_file, 'rb') as res:
dets = pickle.load(res, encoding = 'latin1')
all_keyps = dets['all_keyps']
keyps_t = np.array(all_keyps[1])
keyps = np.zeros((keyps_t.shape[0], 4, 17))
for k in range(keyps.shape[0]):
if keyps_t[k]!=[]:
keyps[k] = keyps_t[k][0]
keyps = keyps[:,:2,:]
for i in range(keyps.shape[0]):
keyps[i][0] = keyps[i][0]/480*256
keyps[i][1] = keyps[i][1]/640*256
x0=keyps[f][0][j]
y0=keyps[f][1][j]
x1=keyps[f+1][0][j]
y1=keyps[f+1][1][j]
cord = np.array([x0,y0])
orientation = atan2((y1 - y0),(x1 - x0))
dist= sqrt((x1-x0) ** 2 + (y1-y0) ** 2)
u = np.array([orientation,dist])
return (cord, u)
#####################################################
def run_pf1(N, iters=298, sensor_std_err=.1,
do_plot=True, plot_particles=False,
xlim=(-256, 256), ylim=(-256, 256),
initial_x=None):
landmarks = np.array([[0, 0], [0, 256], [256,0], [256,256]])
NL = len(landmarks)
plt.figure()
# create particles and weights
if initial_x is not None:
particles = create_gaussian_particles(
mean=initial_x, std=(5, 5, np.pi/4), N=N)
else:
particles = create_uniform_particles((0,20), (0,20), (0, 6.28), N)
weights = np.ones(N) / N
if plot_particles:
alpha = .20
if N > 5000:
alpha *= np.sqrt(5000)/np.sqrt(N)
plt.scatter(particles[:, 0], particles[:, 1],
alpha=alpha, color='g')
xs = []
#robot_pos, u = read_pickle('.pkl',1,0)
for x in range(iters):
robot_pos, uv = read_pickle('.pkl',x,0)
print("orignal: ", robot_pos,)
# distance from robot to each landmark
zs = (norm(landmarks - robot_pos, axis=1) +
(randn(NL) * sensor_std_err))
# move diagonally forward to (x+1, x+1)
predict(particles, u=uv, std=(0, .0))
# incorporate measurements
update(particles, weights, z=zs, R=sensor_std_err,
landmarks=landmarks)
# resample if too few effective particles
if neff(weights) < N/2:
indexes = systematic_resample(weights)
resample_from_index(particles, weights, indexes)
assert np.allclose(weights, 1/N)
mu, var = estimate(particles, weights)
#mu +=(120,10)
xs.append(mu)
print("expected: ",mu)
if plot_particles:
plt.scatter(particles[:, 0], particles[:, 1],
color='k', marker=',', s=1)
p1 = plt.scatter(robot_pos[0], robot_pos[1], marker='+',
color='k', s=180, lw=3)
p2 = plt.scatter(mu[0], mu[1], marker='s', color='r')
print(p2)
xs = np.array(xs)
#plt.plot(xs[:, 0], xs[:, 1])
plt.legend([p1, p2], ['Actual', 'PF'], loc=4, numpoints=1)
plt.xlim(*xlim)
plt.ylim(*ylim)
print('final position error, variance:\n\t', mu - np.array([iters, iters]), var)
plt.show()
return(p2)
###############################
run_pf1(N=5000)
I expect a set of 300 coordinate values (estimated) as a result of the particle filter so I can replace my missing values in original files with this predicted ones.

Cellular automata - repeated simulation imshow()

I've got the problem with repeating the animation. I want to use matplotlib in order to visualise each simulation on the matrix. Here is the code for simulation:
import numpy as np
from matplotlib import pyplot as plt
s = np.array([[1,1,1], [1,10,1], [1,1,1]], dtype=np.int8) #Matrix of wages
e = np.zeros((19,), dtype=np.int8) # Vector of rules
e[3]=1
e[12]=1
e[13]=1
mama = np.array([[1, 0, 1], [0, 1, 0], [0, 0, 1]], dtype=np.int8) #The matrix to be tested
def simulation(ma): #Simulation on a given matrix
n, m = ma.shape
p = np.zeros((n+2, m+2), dtype=np.int8) #Creates an extended matrix, avoiding conflicts at the edges of the initial matrix. Here I construct a torus
p[1:-1, 1:-1] = ma #middle
p[0, 1:-1] = ma[n-1] #the first row of p, the last of ma
p[-1, 1:-1] = ma[0] #the last row of p, the first of ma
p[1:-1, 0] = ma[0:, -1] #left col p, right of ma
p[1:-1, -1] = ma[0:, 0] #right col of p, left of ma
p[-1, 0] = ma[0, -1] #left bottom corner
p[-1, -1] = ma[0, 0] #right bottom corner
p[0, 0] = ma[-1, -1] #left upper corner
p[0, -1] = ma[-1, 0] #right upper corner
new = np.zeros(ma.shape, dtype=np.int8)
v, c = p.shape #verses and columns
for i in range(1, v):
for j in range(1, c):
if p[i-1:i+2, j-1:j+2].shape == (3, 3):
new[i-1, j-1] = e[np.sum(p[i-1:i+2,j-1:j+2]*s)]
return new
However, I want to run simulation over the specified number of repetitions and visualise each step of the simulation, so I have tried the code:
def rep(fun, mac, ti): #function, matrix, repetitions (time)
if ti == 1:
plt.imshow(fun(mac))
plt.title("Celllar automaton")
plt.show()
else:
f = fun(rep(fun, mac, ti-1))
plt.imshow(f)
plt.title("Cellular automaton")
plt.show()
I get an error:
n, m = ma.shape
AttributeError: 'NoneType' object has no attribute 'shape'
Please, could you help me? I got really tired of my unability to visualise my work.
ADDITIONALLY:
I have substituted with rep with:
def shoow(fig):
plt.imshow(fig)
plt.title("Cellular automaton")
plt.show()
def repet(fun, mac, ti):
c1 = mac
for i in range(ti):
f = fun(c1)
shoow(f)
c1 = f
However, it creates each time a new figure. How can I get a continuous simulation?

Resources