np.mean() gives wrong mean? - python-3.x

I'm getting trouble to calculate the mean value of each column of tow 2d numpy array. Each column of both array are x,y coordinate of an ellipsis. I thus want to calculate the center of these ellipsis to center them. So, i use np.mean() to do so but i get wrong values. If i plot the ellipsis and the centers, the ellipsis are indeed ellipsis-shaped but the points are not (at all) at the centers. So, it is not due to bad ellipsis point.
def acquire(self, qt, it, lapNumber):
if self.stream is None:
self.stream = {'qt': qt, 'it': it}
self.mean = {'qt': np.zeros(qt.shape), 'it': np.zeros(it.shape)}
else:
self.stream['qt'] = np.vstack((self.stream['qt'], qt - self.mean['qt']))
self.stream['it'] = np.vstack((self.stream['it'], it - self.mean['it']))
if self.stream['qt'].shape[0] > 950:
self.mean['qt'] = np.mean(self.stream['qt'], axis=0)
self.mean['it'] = np.mean(self.stream['it'], axis=0)
self.stream['qt'] -= self.mean['qt']
self.stream['it'] -= self.mean['it']
self.centered = True

I just solved it while creating a minimum reproducible example. The ellipsis points are actually not uniformly distributed in space. So the mean is not the center and is actually far from the center.

Related

Pyqtgraph ROI Mirrors the Displayed Text

I want to display some text close to the handles of crosshair ROI. The text is mirrored and I don't know why or how to fix it.
The following code runs, where the class CrossHair is a slight modification of the CrosshairROI given at https://pyqtgraph.readthedocs.io/en/latest/_modules/pyqtgraph/graphicsItems/ROI.html#ROI. More precisely, all I did was setting lock aspect to be False and making another handle to deal with another direction.
import pyqtgraph as pg
from PyQt5.QtWidgets import*
from PyQt5.QtCore import*
from PyQt5.QtGui import*
class MainWindow(pg.GraphicsLayoutWidget):
def __init__(self):
super().__init__()
layout = self.addLayout()
self.viewbox = layout.addViewBox(lockAspect=True)
self.viewbox.setLimits(minXRange = 200, minYRange = 200,maxXRange = 200,maxYRange = 200)
self.crosshair = CrossHair()
self.crosshair.setPen(pg.mkPen("w", width=5))
self.viewbox.addItem(self.crosshair)
class CrossHair(pg.graphicsItems.ROI.ROI):
def __init__(self, pos=None, size=None, **kargs):
if size is None:
size=[50,50]
if pos is None:
pos = [0,0]
self._shape = None
pg.graphicsItems.ROI.ROI.__init__(self, pos, size, **kargs)
self.sigRegionChanged.connect(self.invalidate)
self.addScaleRotateHandle(pos = pg.Point(1,0), center = pg.Point(0, 0))
self.addScaleRotateHandle(pos = pg.Point(0,1), center = pg.Point(0,0))
def invalidate(self):
self._shape = None
self.prepareGeometryChange()
def boundingRect(self):
return self.shape().boundingRect()
def shape(self):
if self._shape is None:
x_radius, y_radius = self.getState()['size'][0],self.getState()['size'][1]
p = QPainterPath()
p.moveTo(pg.Point(-x_radius, 0))
p.lineTo(pg.Point(x_radius, 0))
p.moveTo(pg.Point(0, -y_radius))
p.lineTo(pg.Point(0, y_radius))
p = self.mapToDevice(p)
stroker = QPainterPathStroker()
stroker.setWidth(10)
outline = stroker.createStroke(p)
self._shape = self.mapFromDevice(outline)
return self._shape
def paint(self, p, *args):
x_radius, y_radius = self.getState()['size'][0],self.getState()['size'][1]
p.setRenderHint(QPainter.RenderHint.Antialiasing)
p.setPen(self.currentPen)
p.drawLine(pg.Point(0, -y_radius), pg.Point(0, y_radius))
p.drawLine(pg.Point(-x_radius, 0), pg.Point(x_radius, 0))
x_pos, y_pos = self.handles[0]['item'].pos(), self.handles[1]['item'].pos()
x_length, y_length = 2*x_radius, 2*y_radius
x_text, y_text = str(round(x_length,2)) + "TEXT",str(round(y_length,2)) + "TEXT"
p.drawText(QRectF(x_pos.x()-50, x_pos.y()-50, 100, 100), Qt.AlignmentFlag.AlignLeft, x_text)
p.drawText(QRectF(y_pos.x()-50, y_pos.y()-50, 100, 100), Qt.AlignmentFlag.AlignBottom, y_text)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
main = MainWindow()
main.show()
app.exec()
We see that:
The objective is to fix the above code such that:
It displays texts dependent on the length of the line (2*radius) close to each handle without reflecting.
The text is aligned close to the handle such that no matter how the user rotates the handle the text is readable (i.e. not upside down).
I am having great deal of trouble with the first part. The second part can probably be fixed by changing aligning policies but I don't know which one to choose .
The reason of the inversion is because the coordinate system of pyqtgraph is always vertically inverted: similarly to the standard convention of computer coordinates, the reference point in Qt is always considered at the top left of positive coordinates, with y > 0 going down instead of up.
While, for general computer based imaging this is fine, it clearly doesn't work well for data imaging that is commonly based on standard Cartesian references (positive values of y are always "above"). And that's what pyqtgraph does by default.
The result is that, for obvious reasons, basic drawing that is directly done on an active QPainter will always be vertically inverted ("mirrored"). What you show in the image is the result of a composition of vertical mirroring and rotation, which is exactly the same as horizontal mirroring.
To simplify: when p is vertically mirrored, it becomes b, which, when rotated by 180°, results in q.
There's also another issue: all pyqtgraph items are actually QGraphicsItem subclasses, and one of the most important aspects of QGraphicsItems is that their painting is and shall always be restricted by its boundingRect():
[...] all painting must be restricted to inside an item's bounding rect. QGraphicsView uses this to determine whether the item requires redrawing.
If you try to move the handles very fast, you'll probably see some drawing artifacts ("ghosts") in the text caused by the painting buffer that is used to improve drawing performance, and that's because you didn't consider those elements in the boundingRect() override: the painting engine didn't know that the bounding rect was actually bigger, and didn't consider that the previously drawn regions required repainting in order to "clear up" the previous content.
Now, since those are text displaying objects, I doubt that you're actually interested in having them always aligned to their respective axis (which is not impossible, but much more difficult). You will probably want to always display the values of those handles to the user in an easy, readable way: horizontally.
Considering the above, the preferred solution is to use child items for the text instead of manually drawing it. While, at first sight, it might seem a risk for performance and further complication, it actually ensures 2 aspects:
the text items will always be properly repainted, and without any "ghost residue" caused by the wrong bounding rect;
the performance loss is practically little to none, since item management (including painting) is completely done on the C++ side;
For that, I'd suggest the pg.TextItem class, which will also completely ignore any kind of transformation, ensuring that the text will always be visible no matter of the scale factor.
Note that "mirroring" is actually the result of a transformation matrix that uses negative scaling: a scaling of (0, -1) means that the coordinates are vertically mirrored. If you think about it, it's quite obvious: if you have a positive y value in a cartesian system (shown "above" the horizontal axis) and multiply it by -1, that result is then shown "below".
Given the above, what you need to do is to add the two "labels" as children of the handle items, and just worry about painting the two crosshair lines.
Finally, due to the general performance requirements of pyqtgraph (and QGraphicsView in general), in the following example I took the liberty to make some modifications to the original code in order to improve responsiveness:
class CrossHair(pg.graphicsItems.ROI.ROI):
_shape = None
def __init__(self, pos=None, size=None, **kargs):
if size is None:
size = [50, 50]
if pos is None:
pos = [0, 0]
super().__init__(pos, size, **kargs)
self.sigRegionChanged.connect(self.invalidate)
font = QFont()
font.setPointSize(font.pointSize() * 2)
self.handleLabels = []
for refPoint in (QPoint(1, 0), QPoint(0, 1)):
handle = self.addScaleRotateHandle(pos=refPoint, center=pg.Point())
handle.xChanged.connect(self.updateHandleLabels)
handle.yChanged.connect(self.updateHandleLabels)
handleLabel = pg.TextItem(color=self.currentPen.color())
handleLabel.setParentItem(handle)
handleLabel.setFont(font)
self.handleLabels.append(handleLabel)
self.updateHandleLabels()
def updateHandleLabels(self):
for label, value in zip(self.handleLabels, self.state['size']):
label.setText(format(value * 2, '.2f'))
def invalidate(self):
self._shape = None
self.prepareGeometryChange()
def boundingRect(self):
return self.shape().boundingRect()
def shape(self):
if self._shape is None:
x_radius, y_radius = self.state['size']
p = QPainterPath(QPointF(-x_radius, 0))
p.lineTo(QPointF(x_radius, 0))
p.moveTo(QPointF(0, -y_radius))
p.lineTo(QPointF(0, y_radius))
p = self.mapToDevice(p)
stroker = QPainterPathStroker()
stroker.setWidth(10)
outline = stroker.createStroke(p)
self._shape = self.mapFromDevice(outline)
return self._shape
def paint(self, p, *args):
p.setRenderHint(QPainter.Antialiasing)
p.setPen(self.currentPen)
x_radius, y_radius = self.state['size']
p.drawLine(QPointF(0, -y_radius), QPointF(0, y_radius))
p.drawLine(QPointF(-x_radius, 0), QPointF(x_radius, 0))
Notes:
pg.Point is actually a subclass of QPointF; unlike helper functions like mkColor() that can be actually necessary for pg objects and are effective in their simplicity/readability, there is really no point (pun intended) to use those subclasses for basic Qt functions, like you're doing for paintEvent(); just use the basic class;
considering the point above, always try to leave object conversion on the C++ side; QPainterPath's moveTo and lineTo always accept floating point values (they are overloaded functions that internally transform the values to QPointF objects); on the other hand, QPainter functions like drawLine only accept individual numeric values as integers (that's why I used QPointF in paintEvent()), so in that case you cannot directly use the coordinate values; always look for the C++ implementation and the accepted value types;
self.getState()['size'] is already a two-item tuple (width and height), retrieving it twice is unnecessary; also, since getState() actually recalls the internal self.state dict, you can avoid the function call (as I did above) as long as getState() is not overridden by a custom subclass;

How to match a geometric template of 2D boxes to fit another set of 2D boxes

I'm trying to find a match between a set of 2D boxes with coordinates (A) (from a template with known sizes and distances between boxes) to another set of 2D boxes with coordinates (B) (which may contain more boxes than A). They should match in terms of each box from A corresponds to a single Box in B. The boxes in A together form a "stamp" which is assymmetrical in atleast one dimension.
Illustration of problem
explanation: "Stanz" in the illustration is a box from set A.
One might even think of the Set A as only 2D points (the centerpoint of the box) to make it simpler.
The end result will be to know which A box corresponds to which B box.
I can only think of very specific ways of doing this, tailored to a specific layout of boxes, is there any known generic ways of dealing with this forms of matching/search problems and what are they called?
Edit: Possible solution
I have come up with one possible solution, looking for all the possible rotations at each possible B center position for a single box from set A. Here all of the points in A would be rotated and compared against the distance to B centers. Not sure if this is a good way.
Looking for the possible rotations at each B centerpoint- solution
In your example, the transformation between the template and its presence in B can be entirely defined (actually, over-defined) by two matching points.
So here's a simple approach which is kind of performant. First, put all the points in B into a kD-tree. Now, pick a canonical "first" point in A, and hypothesize matching it to each of the points in B. To check whether it matches a particular point in B, pick a canonical "second" point in A and measure its distance to the "first" point. Then, use a standard kD proximity-bounding query to find all the points in B which are roughly that distance from your hypothesized matched "first" point in B. For each of those, determine the transformation between A and B, and for each of the other points in A, determine whether there's a point in A at roughly the right place (again, using the kD-tree), early-outing with the first unmatched point.
The worst-case performance there can get quite bad with pathological cases (O(n^3 log n), I think) but in general I would expect roughly O(n log n) for well-behaved data with a low threshold. Note that the thresholding is a bit rough-and-ready, and the results can depend on your choice of "first" and "second" points.
This is more of an idea than an answer, but it's too long for a comment. I asked some additional questions in a comment above, but the answers may not be particular relevant, so I'll go ahead and offer some thoughts in the meantime.
As you may know, point matching is its own problem domain, and if you search for 'point matching algorithm', you'll find various articles, papers, and other resources. It seems though that an ad hoc solution might be appropriate here (one that's simpler than more generic algorithms that are available).
I'll assume that the input point set can only be rotated, and not also flipped. If this idea were to work though, it should also work with flipping - you'd just have to run the algorithm separately for each flipped configuration.
In your example image, you've matched a point from set A with a point from set B so that they're coincident. Call this shared point the 'anchor' point. You'd need to do this for every combination of a point from set A and a point from set B until you found a match or exhausted the possibilities. The problem then is to determine if a match can be made given one of these matched point pairs.
It seems that for a given anchor point, a necessary but not sufficient condition for a match is that a point from set A and a point from set B can be found that are approximately the same distance from the anchor point. (What 'approximately' means would depend on the input, and would need to be tuned appropriately given that you're using integers.) This condition is met in your example image in that the center point of each point set is (approximately) the same distance from the anchor point. (Note that there could be multiple pairs of points that meet this condition, in which case you'd have to examine each such pair in turn.)
Once you have such a pair - the center points in your example - you can use some simple trigonometry and linear algebra to rotate set A so that the points in the pair coincide, after which the two point sets are locked together at two points and not just one. In your image that would involve rotating set A about 135 degrees clockwise. Then you check to see if every point in set B has a point in set A with which it's coincident, to within some threshold. If so, you have a match.
In your example, this fails of course, because the rotation is not actually a match. Eventually though, if there's a match, you'll find the anchor point pair for which the test succeeds.
I realize this would be easier to explain with some diagrams, but I'm afraid this written explanation will have to suffice for the moment. I'm not positive this would work - it's just an idea. And maybe a more generic algorithm would be preferable. But, if this did work, it might have the advantage of being fairly straightforward to implement.
[Edit: Perhaps I should add that this is similar to your solution, except for the additional step to allow for only testing a subset of the possible rotations.]
[Edit: I think a further refinement may be possible here. If, after choosing an anchor point, matching is possible via rotation, it should be the case that for every point p in B there's a point in A that's (approximately) the same distance from the anchor point as p is. Again, it's a necessary but not sufficient condition, but it allows you to quickly eliminate cases where a match isn't possible via rotation.]
Below follows a finished solution in python without kD-tree and without early outing candidates. A better way is to do the implementation yourself according to Sneftel but if you need anything quick and with a plot this might be useful.
Plot shows the different steps, starts off with just the template as a collection of connected lines. Then it is translated to a point in B where the distances between A and B points fits the best. Finally it is rotated.
In this example it was important to also match up which of the template positions was matched to which boundingbox position, so its an extra step in the end. There might be some deviations in the code compared to the outline above.
import numpy as np
import random
import math
import matplotlib.pyplot as plt
def to_polar(pos_array):
x = pos_array[:, 0]
y = pos_array[:, 1]
length = np.sqrt(x ** 2 + y ** 2)
t = np.arctan2(y, x)
zip_list = list(zip(length, t))
array_polar = np.array(zip_list)
return array_polar
def to_cartesian(pos):
# first element radius
# second is angle(theta)
# Converting polar to cartesian coordinates
radius = pos[0]
theta = pos[1]
x = radius * math.cos(theta)
y = radius * math.sin(theta)
return x,y
def calculate_distance_points(p1,p2):
return np.sqrt((p1[0]-p2[0])**2+(p1[1]-p2[1])**2)
def find_closest_point_inx(point, neighbour_set):
shortest_dist = None
closest_index = -1
# Find the point in the secondary array that is the closest
for index,curr_neighbour in enumerate(neighbour_set):
distance = calculate_distance_points(point, curr_neighbour)
if shortest_dist is None or distance < shortest_dist:
shortest_dist = distance
closest_index = index
return closest_index
# Find the sum of distances between each point in primary to the closest one in secondary
def calculate_agg_distance_arrs(primary,secondary):
total_distance = 0
for point in primary:
closest_inx = find_closest_point_inx(point, secondary)
dist = calculate_distance_points(point, secondary[closest_inx])
total_distance += dist
return total_distance
# returns a set of <primary_index,neighbour_index>
def pair_neighbours_by_distance(primary_set, neighbour_set, distance_limit):
pairs = {}
for num, point in enumerate(primary_set):
closest_inx = find_closest_point_inx(point, neighbour_set)
if calculate_distance_points(neighbour_set[closest_inx], point) > distance_limit:
closest_inx = None
pairs[num]=closest_inx
return pairs
def rotate_array(array, angle,rot_origin=None):
if rot_origin is not None:
array = np.subtract(array,rot_origin)
# clockwise rotation
theta = np.radians(angle)
c, s = np.cos(theta), np.sin(theta)
R = np.array(((c, -s), (s, c)))
rotated = np.matmul(array, R)
if rot_origin is not None:
rotated = np.add(rotated,rot_origin)
return rotated
# Finds out a point in B_set and a rotation where the points in SetA have the best alignment towards SetB.
def find_stamp_rotation(A_set, B_set):
# Step 1
anchor_point_A = A_set[0]
# Step 2. Convert all points to polar coordinates with anchor as origin
A_anchor_origin = A_set - anchor_point_A
anchor_A_polar = to_polar(A_anchor_origin)
print(anchor_A_polar)
# Step 3 for each point in B
score_tuples = []
for num_anchor, B_anchor_point_try in enumerate(B_set):
# Step 3.1
B_origin_rel_point = B_set-B_anchor_point_try
B_polar_rp_origin = to_polar(B_origin_rel_point)
# Step 3.3 select arbitrary point q from Ap
point_Aq = anchor_A_polar[1]
# Step 3.4 test each rotation, where pointAq is rotated to each B-point (except the B anchor point)
for try_rot_point_B in [B_rot_point for num_rot, B_rot_point in enumerate(B_polar_rp_origin) if num_rot != num_anchor]:
# positive rotation is clockwise
# Step 4.1 Rotate Ap by the angle between q and n
angle_to_try = try_rot_point_B[1]-point_Aq[1]
rot_try_arr = np.copy(anchor_A_polar)
rot_try_arr[:,1]+=angle_to_try
cart_rot_try_arr = [to_cartesian(e) for e in rot_try_arr]
cart_B_rp_origin = [to_cartesian(e) for e in B_polar_rp_origin]
distance_score = calculate_agg_distance_arrs(cart_rot_try_arr, cart_B_rp_origin)
score_tuples.append((B_anchor_point_try,angle_to_try,distance_score))
# Step 4.3
lowest=None
for b_point,angle,distance in score_tuples:
print("point:{} angle(rad):{} distance(sum):{}".format(b_point,360*(angle/(2*math.pi)),distance))
if lowest is None or distance < lowest[2]:
lowest = b_point, 360*angle/(2*math.pi), distance
return lowest
def test_example():
ax = plt.subplot()
ax.grid(True)
plt.title('Fit Template to BBoxes by translation and rotation')
plt.xlim(-20, 20)
plt.ylim(-20, 20)
ax.set_xticks(range(-20,20), minor=True)
ax.set_yticks(range(-20,20), minor=True)
template = np.array([[-10,-10],[-10,10],[0,0],[10,-10],[10,10], [0,20]])
# Test Bboxes are Rotated 40 degree, translated 2,2
rotated = rotate_array(template,40)
rotated = np.subtract(rotated,[2,2])
# Adds some extra bounding boxes as noise
for i in range(8):
rotated = np.append(rotated,[[random.randrange(-20,20), random.randrange(-20,20)]],axis=0)
# Scramble entries in array and return the position change.
rnd_rotated = rotated.copy()
np.random.shuffle(rnd_rotated)
element_positions = []
# After shuffling, looks at which index the "A"-marks has ended up at. For later comparison to see that the algo found the correct answer.
# This is to represent the actual case, where I will get a bunch of unordered bboxes.
rnd_map = {}
indexes_translation = [num2 for num,point in enumerate(rnd_rotated) for num2,point2 in enumerate(rotated) if point[0]==point2[0] and point[1]==point2[1]]
for num,inx in enumerate(indexes_translation):
rnd_map[num]=inx
# algo part 1/3
b_point,angle,_ = find_stamp_rotation(template,rnd_rotated)
# Plot for visualization
legend_list = np.empty((0,2))
leg_template = plt.plot(template[:,0],template[:,1],c='r')
legend_list = np.append(legend_list,[[leg_template[0],'1. template-pattern']],axis=0)
leg_bboxes = plt.scatter(rnd_rotated[:,0],rnd_rotated[:,1],c='b',label="scatter")
legend_list = np.append(legend_list,[[leg_bboxes,'2. bounding boxes']],axis=0)
leg_anchor = plt.scatter(b_point[0],b_point[1],c='y')
legend_list = np.append(legend_list,[[leg_anchor,'3. Discovered bbox anchor point']],axis=0)
# algo part 2/3
# Superimpose A onto B by A[0] to b_point
offset = b_point - template[0]
super_imposed_A = template + offset
# Plot superimposed, but not yet rotated
leg_s_imposed = plt.plot(super_imposed_A[:,0],super_imposed_A[:,1],c='k')
#plt.legend(rubberduckz, "superimposed template on anchor")
legend_list = np.append(legend_list,[[leg_s_imposed[0],'4. Templ superimposed on Bbox']],axis=0)
print("Superimposed A on B by A[0] to {}".format(b_point))
print(super_imposed_A)
# Rotate, now the template should match pattern of bboxes
# algo part 3/4
super_imposed_rotated_A = rotate_array(super_imposed_A,-angle,rot_origin=super_imposed_A[0])
# Show the beautiful match in a last plot
leg_s_imp_rot = plt.plot(super_imposed_rotated_A[:,0],super_imposed_rotated_A[:,1],c='g')
legend_list = np.append(legend_list,[[leg_s_imp_rot[0],'5. final fit']],axis=0)
plt.legend(legend_list[:,0], legend_list[:,1],loc="upper left")
plt.show()
# algo part 4/4
pairs = pair_neighbours_by_distance(super_imposed_rotated_A, rnd_rotated, 10)
print(pairs)
for inx in range(len(pairs)):
bbox_num = pairs[inx]
print("template id:{}".format(inx))
print("bbox#id:{}".format(bbox_num))
#print("original_bbox:{}".format(rnd_map[bbox_num]))
if __name__ == "__main__":
test_example()
Result on actual image with bounding boxes. Here it can be seen that the scaling is incorrect which makes the template a bit off but it will still be able to pair up and thats the desired end-result in my case.

using Geopandas, How to randomly select in each polygon 5 Points by sampling method

I want to select 5 Points in each polygon based on random sampling method. And required 5 points co-ordinates(Lat,Long) in each polygon for identify which crop is grawn.
Any ideas for do this using geopandas?
Many thanks.
My suggestion involves sampling random x and y coordinates within the shape's bounding box and then checking whether the sampled point is actually within the shape. If the sampled point is within the shape then return it, otherwise repeat until a point within the shape is found. For sampling, we can use the uniform distribution, such that all points in the shape have the same probability of being sampled. Here is the function:
from shapely.geometry import Point
def random_point_in_shp(shp):
within = False
while not within:
x = np.random.uniform(shp.bounds[0], shp.bounds[2])
y = np.random.uniform(shp.bounds[1], shp.bounds[3])
within = shp.contains(Point(x, y))
return Point(x,y)
and here's an example how to apply this function to an example GeoDataFrame called geo_df to get 5 random points for each entry:
for num in range(5):
geo_df['Point{}'.format(num)] = geo_df['geometry'].apply(random_point_in_shp)
There might be more efficient ways to do this, but depending on your application the algorithm could be sufficiently fast. With my test file, which contains ~2300 entries, generating five random points for each entry took around 15 seconds on my machine.

mutual visibility of nodes on a grid

I have a simple grid, and I need check two nodes for mutual visibility. All walls and nodes coordinations is known. I need check two nodes for mutual visibility.
I have tried use vectors, but I didn't get acceptable result. This algorithm works, but it bad fit in my program, because of this i must do transformations of data to get acceptable result.
I used this code for check nodes for mutual visibility:
def finding_vector_grid(start, goal):
distance = [start[0]-goal[0], start[1]-goal[1]]
norm = math.sqrt(distance[0] ** 2 + distance[1] ** 2)
if norm == 0: return [1, 1]
direction = [(distance[0]/norm), (distance[1]/norm)]
return direction
def finding_vector_path(start, goal):
path = [start]
direction = finding_vector_grid((start[0]*cell_width, start[1]*cell_height),
(goal[0]*cell_width, goal[1]*cell_height))
x, y = start[0]*cell_width, start[1]*cell_height
point = start
while True:
if point not in path and in_map(point):
path.append(point)
elif not in_map(point):
break
x -= direction[0]
y -= direction[1]
point = (x//cell_width, y//cell_height)
return path
def vector_obstacles_clean(path, obstacles):
result = []
for node in path:
if node in obstacles:
result.append(node)
break
result.append(node)
return result
for example:
path = finding_vector_path((0, 0), (0, 5))
path = vector_obstacles_clean(path, [(0, 3)])
in_map - check if point not abroad map frontiers;
start, goal - tuples width x and y coords;
cell_width, cell_height - int variables with node width and height in pixels (I use pygame for visualization graph).
I have not any problems with this method, but it works not with graphs, it works "by itself", it not quite the that I need to. I am not good at English, please forgive me :)
The code you posted seems perfectly nice,
and your question doesn't clarify what needs improving.
Rather than doing FP arithmetic on vectors,
you might prefer to increment an integer X or Y pointer
one pixel at a time.
Consider using Bresenham's line algorithm,
which enumerates pixels in the line of sight
between start and goal.
The key observation is that for a given slope
it notices whether X or Y will increment faster,
and loops on that index.

Filtering signal: how to restrict filter that last point of output must equal the last point of input

Please help my poor knowledge of signal processing.
I want to smoothen some data. Here is my code:
import numpy as np
from scipy.signal import butter, filtfilt
def testButterworth(nyf, x, y):
b, a = butter(4, 1.5/nyf)
fl = filtfilt(b, a, y)
return fl
if __name__ == '__main__':
positions_recorded = np.loadtxt('original_positions.txt', delimiter='\n')
number_of_points = len(positions_recorded)
end = 10
dt = end/float(number_of_points)
nyf = 0.5/dt
x = np.linspace(0, end, number_of_points)
y = positions_recorded
fl = testButterworth(nyf, x, y)
I am pretty satisfied with results except one point:
it is absolutely crucial to me that the start and end point in returned values equal to the start and end point of input. How can I introduce this restriction?
UPD 15-Dec-14 12:04:
my original data looks like this
Applying the filter and zooming into last part of the graph gives following result:
So, at the moment I just care about the last point that must be equal to original point. I try to append copy of data to the end of original list this way:
the result is as expected even worse.
Then I try to append data this way:
And the slice where one period ends and next one begins, looks like that:
To do this, you're always going to cheat somehow, since the true filter applied to the true data doesn't behave the way you require.
One of the best ways to cheat with your data is to assume it's periodic. This has the advantages that: 1) it's consistent with the data you actually have and all your changing is to append data to the region you don't know about (so assuming it's periodic as as reasonable as anything else -- although may violate some unstated or implicit assumptions); 2) the result will be consistent with your filter.
You can usually get by with this by appending copies of your data to the beginning and end of your real data, or just small pieces, depending on your filter.
Since the FFT assumes that the data is periodic anyway, that's often a quick and easy approach, and is fully accurate (whereas concatenating the data is an estimation of an infinitely periodic waveform). Here's an example of the FFT approach for a step filter.
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, 128)
y = (np.sin(.22*(x+10))>0).astype(np.float)
# filter
y2 = np.fft.fft(y)
f0 = np.fft.fftfreq(len(x))
y2[(f0<-.25) | (f0>.25)] = 0
y3 = abs(np.fft.ifft(y2))
plt.plot(x, y)
plt.plot(x, y3)
plt.xlim(-10, 140)
plt.ylim(-.1, 1.1)
plt.show()
Note how the end points bend towards each other at either end, even though this is not consistent with the periodicity of the waveform (since the segments at either end are very truncated). This can also be seen by adjusting waveform so that the ends are the same (here I used x+30 instead of x+10, and here the ends don't need to bend to match-up so they stay at level with the end of the data.
Note, also, to have the endpoints actually be exactly equal you would have to extend this plot by one point (at either end), since it periodic with exactly the wavelength of the original waveform. Doing this is not ad hoc though, and the result will be entirely consistent with your analysis, but just representing one extra point of what was assumed to be infinite repeats all along.
Finally, this FFT trick works best with waveforms of length 2n. Other lengths may be zero padded in the FFT. In this case, just doing concatenations to either end as I mentioned at first might be the best way to go.
The question is how to filter data and require that the left endpoint of the filtered result matches the left endpoint of the data, and same for the right endpoint. (That is, in general, the filtered result should be close to most of the data points, but not necessarily exactly match any of them, but what if you need a match at both endpoints?)
To make the filtered result exactly match the endpoints of a curve, one could add a padding of points at either end of the curve and adjust the y-position of this padding so that the endpoints of the valid part of the filter exactly matched the end points of the original data (without the padding).
In general, this can be done by either iterating towards a solution, adjusting the padding y-position until the ends line up, or by calculating a few values and then interpolating to determine the y-positions that would be required for the matched endpoints. I'll do the second approach.
Here's the code I used, where I simulated the data as a sine wave with two flat pieces on either side (note, that these flat pieces are not the padding, but I'm just trying to make data that looks a bit like the OPs).
import numpy as np
from scipy.signal import butter, filtfilt
import matplotlib.pyplot as plt
#### op's code
def testButterworth(nyf, x, y):
#b, a = butter(4, 1.5/nyf)
b, a = butter(4, 1.5/nyf)
fl = filtfilt(b, a, y)
return fl
def do_fit(data):
positions_recorded = data
#positions_recorded = np.loadtxt('original_positions.txt', delimiter='\n')
number_of_points = len(positions_recorded)
end = 10
dt = end/float(number_of_points)
nyf = 0.5/dt
x = np.linspace(0, end, number_of_points)
y = positions_recorded
fx = testButterworth(nyf, x, y)
return fx
### simulate some data (op should have done this too!)
def sim_data():
t = np.linspace(.1*np.pi, (2.-.1)*np.pi, 100)
y = np.sin(t)
c = np.ones(10, dtype=np.float)
z = np.concatenate((c*y[0], y, c*y[-1]))
return z
### code to find the required offset padding
def fit_with_pads(v, data, n=1):
c = np.ones(n, dtype=np.float)
z = np.concatenate((c*v[0], data, c*v[1]))
fx = do_fit(z)
return fx
def get_errors(data, fx):
n = (len(fx)-len(data))//2
return np.array((fx[n]-data[0], fx[-n]-data[-1]))
def vary_padding(data, span=.005, n=100):
errors = np.zeros((4, n)) # Lpad, Rpad, Lerror, Rerror
offsets = np.linspace(-span, span, n)
for i in range(n):
vL, vR = data[0]+offsets[i], data[-1]+offsets[i]
fx = fit_with_pads((vL, vR), data, n=1)
errs = get_errors(data, fx)
errors[:,i] = np.array((vL, vR, errs[0], errs[1]))
return errors
if __name__ == '__main__':
data = sim_data()
fx = do_fit(data)
errors = vary_padding(data)
plt.plot(errors[0], errors[2], 'x-')
plt.plot(errors[1], errors[3], 'o-')
oR = -0.30958
oL = 0.30887
fp = fit_with_pads((oL, oR), data, n=1)[1:-1]
plt.figure()
plt.plot(data, 'b')
plt.plot(fx, 'g')
plt.plot(fp, 'r')
plt.show()
Here, for the padding I only used a single point on either side (n=1). Then I calculate the error for a range of values shifting the padding up and down from the first and last data points.
For the plots:
First I plot the offset vs error (between the fit and the desired data value). To find the offset to use, I just zoomed in on the two lines to find the x-value of the y zero crossing, but to do this more accurately, one could calculate the zero crossing from this data:
Here's the plot of the original "data", the fit (green) and the adjusted fit (red):
and zoomed in the RHS:
The important point here is that the red (adjusted fit) and blue (original data) endpoints match, even though the pure fit doesn't.
Is this a valid approach? Of the various options, this seems the most reasonable since one isn't usually making any claims about the data that isn't being shown, and also for show region has an accurately applied filter. For example, FFTs usually assume the data is zero or periodic beyond the boundaries. Certainly, though, to be precise one should explain what was done.

Resources