"""YOLO v3 output
"""
import numpy as np
import keras.backend as K
from keras.models import load_model
import os
class YOLO:
def __init__(self, obj_threshold, nms_threshold):
"""Init.
# Arguments
obj_threshold: Integer, threshold for object.
nms_threshold: Integer, threshold for box.
"""
self._t1 = obj_threshold
self._t2 = nms_threshold
self._yolo = load_model('data/yolo.h5')
def _process_feats(self, out, anchors, mask):
"""process output features.
# Arguments
out: Tensor (N, N, 3, 4 + 1 +80), output feature map of yolo.
anchors: List, anchors for box.
mask: List, mask for anchors.
# Returns
boxes: ndarray (N, N, 3, 4), x,y,w,h for per box.
box_confidence: ndarray (N, N, 3, 1), confidence for per box.
box_class_probs: ndarray (N, N, 3, 80), class probs for per box.
"""
grid_h, grid_w, num_boxes = map(int, out.shape[1: 4])
anchors = [anchors[i] for i in mask]
# Reshape to batch, height, width, num_anchors, box_params.
anchors_tensor = K.reshape(K.variable(anchors),
[1, 1,len(anchors), 2])
out = out[0]
box_xy = K.get_value(K.sigmoid(out[..., :2]))
box_wh = K.get_value(K.exp(out[..., 2:4]) * anchors_tensor)
box_confidence = K.get_value(K.sigmoid(out[..., 4]))
box_confidence = np.expand_dims(box_confidence, axis=-1)
box_class_probs = K.get_value(K.sigmoid(out[..., 5:]))
col = np.tile(np.arange(0, grid_w), grid_w).reshape(-1, grid_w)
row = np.tile(np.arange(0, grid_h).reshape(-1, 1), grid_h)
col = col.reshape(grid_h, grid_w, 1, 1).repeat(3, axis=-2)
row = row.reshape(grid_h, grid_w, 1, 1).repeat(3, axis=-2)
grid = np.concatenate((col, row), axis=-1)
box_xy += grid
box_xy /= (grid_w, grid_h)
box_wh /= (416, 416)
box_xy -= (box_wh / 2.)
boxes = np.concatenate((box_xy, box_wh), axis=-1)
return boxes, box_confidence, box_class_probs
def _filter_boxes(self, boxes, box_confidences, box_class_probs):
"""Filter boxes with object threshold.
# Arguments
boxes: ndarray, boxes of objects.
box_confidences: ndarray, confidences of objects.
box_class_probs: ndarray, class_probs of objects.
# Returns
boxes: ndarray, filtered boxes.
classes: ndarray, classes for boxes.
scores: ndarray, scores for boxes.
"""
box_scores = box_confidences * box_class_probs
box_classes = np.argmax(box_scores, axis=-1)
box_class_scores = np.max(box_scores, axis=-1)
pos = np.where(box_class_scores >= self._t1)
boxes = boxes[pos]
classes = box_classes[pos]
scores = box_class_scores[pos]
return boxes, classes, scores
def _nms_boxes(self, boxes, scores):
"""Suppress non-maximal boxes.
# Arguments
boxes: ndarray, boxes of objects.
scores: ndarray, scores of objects.
# Returns
keep: ndarray, index of effective boxes.
"""
x = boxes[:, 0]
y = boxes[:, 1]
w = boxes[:, 2]
h = boxes[:, 3]
areas = w * h
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x[i], x[order[1:]])
yy1 = np.maximum(y[i], y[order[1:]])
xx2 = np.minimum(x[i] + w[i], x[order[1:]] + w[order[1:]])
yy2 = np.minimum(y[i] + h[i], y[order[1:]] + h[order[1:]])
w1 = np.maximum(0.0, xx2 - xx1 + 1)
h1 = np.maximum(0.0, yy2 - yy1 + 1)
inter = w1 * h1
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= self._t2)[0]
order = order[inds + 1]
keep = np.array(keep)
return keep
def _yolo_out(self, outs, shape):
"""Process output of yolo base net.
# Argument:
outs: output of yolo base net.
shape: shape of original image.
# Returns:
boxes: ndarray, boxes of objects.
classes: ndarray, classes of objects.
scores: ndarray, scores of objects.
"""
masks = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
anchors = [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45],
[59, 119], [116, 90], [156, 198], [373, 326]]
boxes, classes, scores = [], [], []
for out, mask in zip(outs, masks):
b, c, s = self._process_feats(out, anchors, mask)
b, c, s = self._filter_boxes(b, c, s)
boxes.append(b)
classes.append(c)
scores.append(s)
boxes = np.concatenate(boxes)
classes = np.concatenate(classes)
scores = np.concatenate(scores)
# Scale boxes back to original image shape.
width, height = shape[1], shape[0]
image_dims = [width, height, width, height]
boxes = boxes * image_dims
nboxes, nclasses, nscores = [], [], []
for c in set(classes):
inds = np.where(classes == c)
b = boxes[inds]
c = classes[inds]
s = scores[inds]
keep = self._nms_boxes(b, s)
nboxes.append(b[keep])
nclasses.append(c[keep])
nscores.append(s[keep])
if not nclasses and not nscores:
return None, None, None
boxes = np.concatenate(nboxes)
classes = np.concatenate(nclasses)
scores = np.concatenate(nscores)
return boxes, classes, scores
def predict(self, image, shape):
"""Detect the objects with yolo.
# Arguments
image: ndarray, processed input image.
shape: shape of original image.
# Returns
boxes: ndarray, boxes of objects.
classes: ndarray, classes of objects.
scores: ndarray, scores of objects.
"""
outs = self._yolo.predict(image)
boxes, classes, scores = self._yolo_out(outs, shape)
return boxes, classes, scores
This is the yolo v3 code and when ı work main program ı take this error
InvalidArgumentError: Incompatible shapes: [13,13,2] vs. [1,1,3,2] [Op:Mul]
Main part is
import cv2
import numpy as np
from yolo_model import YOLO
yolo = YOLO(0.6, 0.5)
file = "data/coco_classes.txt"
with open(file) as f:
class_name = f.readlines()
all_classes = [c.strip() for c in class_name]
print("A")
f = "dog_cat.jpg"
path = "images/"+f
image = cv2.imread(path)
cv2.imshow("image",image)
pimage = cv2.resize(image, (416,416))
pimage = np.array(pimage, dtype = "float32")
pimage /= 255.0
pimage = np.expand_dims(pimage, axis = 0)
# yolo
boxes, classes, scores = yolo.predict(pimage, image.shape)
for box, score, cl in zip(boxes, scores, classes):
x,y,w,h = box
top = max(0, np.floor(x + 0.5).astype(int))
left = max(0, np.floor(y + 0.5).astype(int))
right = max(0, np.floor(x + w + 0.5).astype(int))
bottom = max(0, np.floor(y + h + 0.5).astype(int))
cv2.rectangle(image, (top,left), (right, bottom),(255,0,0),2)
cv2.putText(image, "{} {}".format(all_classes[cl],score),(top,left-6),cv2.FONT_HERSHEY_SIMPLEX,0.6, (0,0,255),1,cv2.LINE_AA)
cv2.imshow("yolo",image)
I take problem in box_wh = K.get_value(K.exp(out[..., 2:4]) * anchors_tensor). Is multiply necessary? And what do box_wh?
Related
I have this dataloader inspired in this example:
import torch
from torch.utils.data import Dataset, DataLoader
class CustomTextDataset(Dataset):
def __init__(self, X, y):
self.X = X
self.y = y
def __len__(self):
return len(self.y)
def __getitem__(self, idx):
data = self.X[idx]
label = self.y[idx]
return data, label
# define data and class labels
X = [1, 2, 3, 4, 5]
y = [0, 0, 1, 0, 1]
# define data set object
TD = CustomTextDataset(X, y)
# define dataloader
ddl = DataLoader(TD, batch_size=2)
for sample, target in ddl:
print(sample)
In each batch, in this case of size 2, the print outputs 1, 2; then 3, 4; and then 5. But how can I get the first sample to be 1, 2; then 2, 3; then 3, 4; and last 4, 5? (That is, I repeat the last element).
Can it be generalized to use in the next batch the last n elements of the previous batch and as label not the position of idx but idx+m? Thx.
I don't think this is possible using a pytorch DataLoader. However you could create a custom function in the CustomTextDataset that retrieves a custom batch? Something like:
def get_custom_batch(self, k, n=1, batch_size=2):
tmp_data = tuple([self[k - n + ii] for ii in range(batch_size)])
X = torch.cat([x[0][None, ...] for x in tmp_data], dim=0)
y = torch.cat([x[1][None, ...] for x in tmp_data], dim=0)
return (X, y)
PyTorch: how to use torchvision.transforms.AugMIx with torch.float32?
I am trying to apply data augmentation in image dataset by using torchvision.transforms.AugMIx, but I have the following error: TypeError: Only torch.uint8 image tensors are supported, but found torch.float32.
I tried to convert it to int, but I have another error.
My code where I am trying to use the AugMix function:
transform = torchvision.transforms.Compose(
[
torchvision.transforms.Resize((224, 224)), # resize to 224*224
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), # normalization
torchvision.transforms.AugMix()
]
)
to_tensor = torchvision.transforms.ToTensor()
Image.MAX_IMAGE_PIXELS = None
class BreastDataset(torch.utils.data.Dataset):
def __init__(self, json_path, data_dir_path='./dataset', clinical_data_path=None, is_preloading=True):
self.data_dir_path = data_dir_path
self.is_preloading = is_preloading
with open(json_path) as f:
print(f"load data from {json_path}")
self.json_data = json.load(f)
def __len__(self):
return len(self.json_data)
def __getitem__(self, index):
label = int(self.json_data[index]["label"])
patient_id = self.json_data[index]["id"]
patch_paths = self.json_data[index]["patch_paths"]
data = {}
if self.is_preloading:
data["bag_tensor"] = self.bag_tensor_list[index]
else:
data["bag_tensor"] = self.load_bag_tensor([os.path.join(self.data_dir_path, p_path) for p_path in patch_paths])
data["label"] = label
data["patient_id"] = patient_id
data["patch_paths"] = patch_paths
return data
def load_bag_tensor(self, patch_paths):
"""Load a bag data as tensor with shape [N, C, H, W]"""
patch_tensor_list = []
for p_path in patch_paths:
patch = Image.open(p_path).convert("RGB")
patch_tensor = transform(patch) # [C, H, W]
patch_tensor = torch.unsqueeze(patch_tensor, dim=0) # [1, C, H, W]
patch_tensor_list.append(patch_tensor)
bag_tensor = torch.cat(patch_tensor_list, dim=0) # [N, C, H, W]
return bag_tensor
Any help is appreciated! Thank you in advance!
For me applying AugMix first and then ToTensor() worked
transformation = transforms.Compose([
transforms.AugMix(severity= 6,mixture_width=2),
transforms.ToTensor(),
transforms.RandomErasing(),
transforms.RandomGrayscale(p = 0.35)
])
torchvision.transforms.AugMix takes images at format uint8. It means that every pixels is 1 (gray) or 3 (rgb) numbers between 0 and 255 that is a classic format of image.
torch.Tensor.type(torch.float32) cast a uint8 tensor to float32 but it is not likely the single transformation that was applied in your image. The float32 images are often normalized to be in range [-1, 1] or [0, 1]. The common way to do so are:
img = img.type(torch.float32) / 128.0 - 1.0 # [-1, 1]
img = img.type(torch.float32) / 255.0 # [0, 1]
When you know in what cases you are you can recast to uint8:
img = (img + 1.0) * 128.0 # case [-1, 1]
img = img * 255.0 # case [0, 1]
img = torch.clip(img, 0.0, 255.0)
img = img.type(torch.uint8)
I have the following code, which runs well under Visual Studio Code with python 3.9.10, opencv 4.5.5 and numpy 1.22.1.
I would like to migrate this code into the Spyder IDE (Version 5, another notebook), python 3.8, opencv 4.5.1 and numpy 1.22.2.
In spyder, I get the error message TypeError: only integer scalar arrays can be converted a scalar index in line: output_layers = [layer_names[i-1]...] (marked line down in the code section)
I have already checked other answers on this site such as
TypeError when indexing a list with a NumPy array: only integer scalar arrays can be converted to a scalar index
which suggests list comprehension, but in my understanding I am already implemented this.
What is the reason for running currectly in on environment but not in the other?
import cv2
import numpy as np
def get_output_layers(net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
def draw_prediction(img, class_id, confidence, x, y, x_plus_w, y_plus_h):
label = str(classes[class_id])
color = COLORS[class_id]
cv2.rectangle(img, (x,y), (x_plus_w,y_plus_h), color, 2)
cv2.putText(img, label, (x-10,y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
image = cv2.imread('horses.jpg')
Width = image.shape[1]
Height = image.shape[0]
scale = 0.00392
classes = None
with open(r'yolov3.txt', 'r') as f:
classes = [line.strip() for line in f.readlines()]
COLORS = np.random.uniform(0, 255, size=(len(classes), 3))
net = cv2.dnn.readNet('yolov3.weights','yolov3.cfg')
blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True, crop=False)
net.setInput(blob)
outs = net.forward(get_output_layers(net))
class_ids = []
confidences = []
boxes = []
conf_threshold = 0.5
nms_threshold = 0.4
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
center_x = int(detection[0] * Width)
center_y = int(detection[1] * Height)
w = int(detection[2] * Width)
h = int(detection[3] * Height)
x = center_x - w / 2
y = center_y - h / 2
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([x, y, w, h])
indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)
for i in indices:
box = boxes[i]
x = box[0]
y = box[1]
w = box[2]
h = box[3]
draw_prediction(image, class_ids[i], confidences[i], round(x), round(y),
round(x+w), round(y+h))
cv2.imshow("object detection", image)
cv2.waitKey()
cv2.imwrite("object-detection.jpg", image)
cv2.destroyAllWindows()
there were subtle, recent api changes wrt handling std::vector in python
(4.5.1 still expects a 2d array, but it's 1d in 4.5.5)
to avoid the whole trouble, please simply use:
output_layers = net.getUnconnectedOutLayersNames()
(like it is done in the sample)
I am diving into 3d graphics with PyOpenGL and am thoroughly overwhelmed. I want to be able to implement shadows in my scenes and prevent GL from drawing object through each other. This is the way I see it:
The best way I can see is a custom shader.
According to the PyOpenGL tutorials on SourceForge, I need a whole bunch of libraries. I want to do this with just PyOpenGL if possible.
I need this shader to be PyQt5 compatible.
As much as possible, I would like to render in the GPU. I believe that this would permit the CPU to focus on my data handling, etc. in my project to fully utilize all resources.
If I make a custom shader, I would like to include a basic function to draw lines and faces. Something like I do normally, like glvertex3f.
How do I go about this? I looked at this tutorial, but I cannot sort out what is bare necessity and what is not.
This is what I am using right now, but GL will draw shapes on top of each other. It also has no sort of shadow rendering.
# File structure is as follows:
# imports
# exceptions
# shape classes
# main shader
#---------- imports ----------#
from OpenGL.GL import (
glLoadIdentity, glTranslatef, glRotatef,
glClear, glBegin, glEnd,
glColor3fv, glVertex3fv,
GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT,
GL_QUADS, GL_LINES
)
from OpenGL.GLU import gluPerspective
#---------- exceptions ----------#
class shapeNotFound(Exception):
pass
#---------- shape classes ----------#
class cube():
render = True
def __init__(self, location = (0, 0, 0), size = 0.1, drawWires = True, drawFaces = False, color = (1, 1, 1)):
self.location = location
self.size = size
self.drawWires = drawWires
self.drawFaces = drawFaces
self.color = color
self.compute()
def compute(self):
x, y, z = self.location
s = self.size / 2
self.vertices = [ #8 corner points calculated in reference to the supplied center point
(-s + x, s + y, -s + z), (s + x, s + y, -s + z),
(s + x, -s + y, -s + z), (-s + x, -s + y, -s + z),
(-s + x, s + y, s + z), (s + x, s + y, s + z),
(s + x, -s + y, s + z), (-s + x, -s + y, s + z)
]
self.wires = [ #12 tuples referencing the corner points
(0,1), (0,3), (0,4), (2,1), (2,3), (2,6),
(7,3), (7,4), (7,6), (5,1), (5,4), (5,6)
]
self.facets = [ #6 tuples referencing the corner points
(0, 1, 2, 3), (0, 1, 6, 5), (0, 3, 7, 4),
(6, 5, 1, 2), (6, 7, 4, 5), (6, 7, 3, 2)
]
def show(self):
self.render = True
def hide(self):
self.render = False
def move(self, location):
self.location = location
self.compute()
def recolor(self, col):
if type(col) is tuple:
self.color = col
class mesh():
vertices = []
facets = []
wires = []
render = True
def __init__(self, drawWires = True, drawFaces = False, color = (1, 1, 1)):
self.drawWires = drawWires
self.drawFaces = drawFaces
self.color = color
self.vertices = []
self.facets = []
self.wires = []
self.render = True
def addFacet(self, coords): #takes a tuple of three location tuples.
addr = len(self.vertices)
addrs = [None, None, None]
for i in range(3):
c = coords[i]
if not c in self.vertices:
self.vertices.append(c)
addrs[i] = self.vertices.index(c)
self.facets.append((addrs[0], addrs[1], addrs[2]))
self.wires.append((addrs[0], addrs[1]))
self.wires.append((addrs[2], addrs[1]))
self.wires.append((addrs[2], addrs[0]))
#---------- main shader ----------#
class shader():
#variables
parent = None
shapes = []
shapeTypes = [type(cube), type(mesh)]
#functions
def __init__(self, parent = None):
self.parent = parent
print('Initiated new shader as child of {}.'.format(self.parent))
def resize(self, newSize):
self.sizeX, self.sizeY = newSize
def addShape(self, shapeIn):
if type(shapeIn) not in self.shapeTypes:
raise shapeNotFound("Shape {} not found.".format(shapeIn))
self.shapes.append(shapeIn)
def paintGL(self):
#This function uses shape objects, such as cube() or mesh(). Shape objects require the following:
#a list named 'vertices' - This list is a list of points, from which edges and faces are drawn.
#a list named 'wires' - This list is a list of tuples which refer to vertices, dictating where to draw wires.
#a list named 'facets' - This list is a list of tuples which refer to vertices, ditating where to draw facets.
#a bool named 'render' - This bool is used to dictate whether or not to draw the shape.
#a bool named 'drawWires' - This bool is used to dictate whether wires should be drawn.
#a bool named 'drawFaces' - This bool is used to dictate whether facets should be drawn.
glLoadIdentity()
gluPerspective(45, self.sizeX / self.sizeY, 0.1, 110.0) #set perspective?
glTranslatef(0, 0, self.zoomLevel) #I used -10 instead of -2 in the PyGame version.
glRotatef(self.rotateDegreeV, 1, 0, 0) #I used 2 instead of 1 in the PyGame version.
glRotatef(self.rotateDegreeH, 0, 0, 1)
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
if len(self.shapes) != 0:
glBegin(GL_LINES)
for s in self.shapes:
glColor3fv(s.color)
if s.render and s.drawWires:
for w in s.wires:
for v in w:
glVertex3fv(s.vertices[v])
glEnd()
glBegin(GL_QUADS)
for s in self.shapes:
glColor3fv(s.color)
if s.render and s.drawFaces:
for f in s.facets:
for v in f:
glVertex3fv(s.vertices[v])
glEnd()
I have a pickle file which contains 300 coordinates of my subject's location in time. There are some missing values in the middle of it for which I am using a particle filter to estimate those missing values. At the end, I am getting some predictions (not completely accurate) but in a bit drifted form.
So the position of my subject is, in fact, the position of my subject's nose. I take a total of 300 frames and each frame consists of a coordinate for nose in it. There are some frames which have the value of (0,0) meaning the values are missing. So in order to find them, I am implementing the particle filter. I am a newbie for particle filter so there are possibilities that I may have messed up the code. The results that I get, gives me the prediction for 300 frames with drifted values. You can get a clear idea form the image.
My measurement value is distance from four landmarks and I provide orientation angle to next point and distance to next point as additional measurements.
from filterpy.monte_carlo import systematic_resample
import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import norm
from numpy.random import randn
import scipy.stats
from numpy.random import uniform
import pickle
from math import *
#####################################################
def create_uniform_particles(x_range, y_range, hdg_range, N):
particles = np.empty((N, 3))
particles[:, 0] = uniform(x_range[0], x_range[1], size=N)
particles[:, 1] = uniform(y_range[0], y_range[1], size=N)
particles[:, 2] = uniform(hdg_range[0], hdg_range[1], size=N)
particles[:, 2] %= 2 * np.pi
return particles
def create_gaussian_particles(mean, std, N):
particles = np.empty((N, 3))
particles[:, 0] = mean[0] + (randn(N) * std[0])
particles[:, 1] = mean[1] + (randn(N) * std[1])
particles[:, 2] = mean[2] + (randn(N) * std[2])
particles[:, 2] %= 2 * np.pi
return particles
#####################################################
def predict(particles, u, std):
# move according to control input u (heading change, velocity)
#with noise Q (std heading change, std velocity)`
N = len(particles)
# update heading
#particles[:, 2] += u[0] + (randn(N) * std[0])
#particles[:, 2] %= 2 * np.pi
#u[0] += (randn(N) * std[0])
u[0] %= 2 * np.pi
# move in the (noisy) commanded direction
dist = (u[1]) #+ (randn(N) * std[1])
particles[:, 0] += np.cos(u[0]) * dist
particles[:, 1] += np.sin(u[0]) * dist
#####################################################
def update(particles, weights, z, R, landmarks):
for i, landmark in enumerate(landmarks):
distance = np.linalg.norm(particles[:, 0:2] - landmark, axis=1)
weights *= scipy.stats.norm(distance, R).pdf(z[i])
weights += 1.e-300 # avoid round-off to zero
weights /= sum(weights) # normalize
#####################################################
def estimate(particles, weights):
#returns mean and variance of the weighted particles
pos = particles[:, 0:2]
mean = np.average(pos, weights=weights, axis=0)
var = np.average((pos - mean)**2, weights=weights, axis=0)
return mean, var
#####################################################
def simple_resample(particles, weights):
N = len(particles)
cumulative_sum = np.cumsum(weights)
cumulative_sum[-1] = 1. # avoid round-off error
indexes = np.searchsorted(cumulative_sum, random(N))
# resample according to indexes
particles[:] = particles[indexes]
weights.fill(1.0 / N)
#####################################################
def neff(weights):
return 1. / np.sum(np.square(weights))
#####################################################
def resample_from_index(particles, weights, indexes):
particles[:] = particles[indexes]
weights[:] = weights[indexes]
weights.fill(1.0 / len(weights))
#####################################################
def read_pickle(pkl_file, f,j):
with open(pkl_file, 'rb') as res:
dets = pickle.load(res, encoding = 'latin1')
all_keyps = dets['all_keyps']
keyps_t = np.array(all_keyps[1])
keyps = np.zeros((keyps_t.shape[0], 4, 17))
for k in range(keyps.shape[0]):
if keyps_t[k]!=[]:
keyps[k] = keyps_t[k][0]
keyps = keyps[:,:2,:]
for i in range(keyps.shape[0]):
keyps[i][0] = keyps[i][0]/480*256
keyps[i][1] = keyps[i][1]/640*256
x0=keyps[f][0][j]
y0=keyps[f][1][j]
x1=keyps[f+1][0][j]
y1=keyps[f+1][1][j]
cord = np.array([x0,y0])
orientation = atan2((y1 - y0),(x1 - x0))
dist= sqrt((x1-x0) ** 2 + (y1-y0) ** 2)
u = np.array([orientation,dist])
return (cord, u)
#####################################################
def run_pf1(N, iters=298, sensor_std_err=.1,
do_plot=True, plot_particles=False,
xlim=(-256, 256), ylim=(-256, 256),
initial_x=None):
landmarks = np.array([[0, 0], [0, 256], [256,0], [256,256]])
NL = len(landmarks)
plt.figure()
# create particles and weights
if initial_x is not None:
particles = create_gaussian_particles(
mean=initial_x, std=(5, 5, np.pi/4), N=N)
else:
particles = create_uniform_particles((0,20), (0,20), (0, 6.28), N)
weights = np.ones(N) / N
if plot_particles:
alpha = .20
if N > 5000:
alpha *= np.sqrt(5000)/np.sqrt(N)
plt.scatter(particles[:, 0], particles[:, 1],
alpha=alpha, color='g')
xs = []
#robot_pos, u = read_pickle('.pkl',1,0)
for x in range(iters):
robot_pos, uv = read_pickle('.pkl',x,0)
print("orignal: ", robot_pos,)
# distance from robot to each landmark
zs = (norm(landmarks - robot_pos, axis=1) +
(randn(NL) * sensor_std_err))
# move diagonally forward to (x+1, x+1)
predict(particles, u=uv, std=(0, .0))
# incorporate measurements
update(particles, weights, z=zs, R=sensor_std_err,
landmarks=landmarks)
# resample if too few effective particles
if neff(weights) < N/2:
indexes = systematic_resample(weights)
resample_from_index(particles, weights, indexes)
assert np.allclose(weights, 1/N)
mu, var = estimate(particles, weights)
#mu +=(120,10)
xs.append(mu)
print("expected: ",mu)
if plot_particles:
plt.scatter(particles[:, 0], particles[:, 1],
color='k', marker=',', s=1)
p1 = plt.scatter(robot_pos[0], robot_pos[1], marker='+',
color='k', s=180, lw=3)
p2 = plt.scatter(mu[0], mu[1], marker='s', color='r')
print(p2)
xs = np.array(xs)
#plt.plot(xs[:, 0], xs[:, 1])
plt.legend([p1, p2], ['Actual', 'PF'], loc=4, numpoints=1)
plt.xlim(*xlim)
plt.ylim(*ylim)
print('final position error, variance:\n\t', mu - np.array([iters, iters]), var)
plt.show()
return(p2)
###############################
run_pf1(N=5000)
I expect a set of 300 coordinate values (estimated) as a result of the particle filter so I can replace my missing values in original files with this predicted ones.