I have been trying to get less boxes with MSER since I have too many boxes created on the same element repeatedly with very little pixel differences. My code is as below:
## Get mser, and set parameters
_delta = 10
_min_area = 250
_max_area = 800
_max_variation = 10.0
_min_diversity = 30.0
_max_evolution = 10
_area_threshold = 12.0
_min_margin = 2.9
_edge_blur_size = 3
mser = cv2.MSER_create(_delta,_min_area, _max_area, _max_variation,
_min_diversity,_max_evolution, _area_threshold, _min_margin, _edge_blur_size)
and then
## Do mser detection, get the coodinates and bboxes on the original image
gray = cv2.cvtColor(final, cv2.COLOR_BGR2GRAY)
coordinates, bboxes = mser.detectRegions(gray)
After this , I see there are 26K boxes created. Which amongst the parameters can be tuned for lesser number of regions(since they are overlapping a lot). Kindly help?
_delta is the most important parameter for reducing the number of boxes. Try raising it to 25. The higher the _delta the less blobs you will get.
_min_area - The smallest blob
_max_area - The largest blob
_min_diversity - Raise to reduce the number of overlapping blobs
_max_variation - Raise to reduce areas with high variance
For more information
After that I would checking the bboxes to filter out over lapping blobs
Code Example
import cv2
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
img = cv.imread('input_img.png')
iou_th = 0.95
mser = cv2.MSER_create(_delta=10, _min_area=1000, _max_area=int(0.1 * np.pi * (img.shape[0] /2)**2), _max_variation=0.1)
regions, bboxes = mser.detectRegions(img)
hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
# Debug plot
img_ = img.copy()
cv2.polylines(img_, hulls, 1, (255, 0, 0), thickness=1)
fig, ax = plt.subplots(figsize=(10, 6))
ax.imshow(img_)
ax.set_title('MSER with overlapping regions')
size_dict = {k: len(region) for k, region in enumerate(regions)}
# Cull overlapping blobs
graph = nx.Graph()
graph.add_nodes_from(range(len(hulls)))
for i, cnt in enumerate(hulls):
for j, cnt in enumerate(hulls):
if i >= j:
continue
box_i = bboxes[i]
box_j = bboxes[j]
tl_i = box_i[:2]
tl_j = box_j[:2]
br_i = tl_i + box_i[2:]
br_j = tl_j + box_j[2:]
tl = np.maximum(tl_i, tl_j)
br = np.minimum(br_i, br_j)
intersected_rect = br - tl
intersection = np.prod(intersected_rect) if intersected_rect[0] > 0 and intersected_rect[1] > 0 else 0
union = np.prod(box_i[2:]) + np.prod(box_j[2:]) - intersection
iou = intersection / union
if iou > iou_th:
graph.add_edge(i, j, iou=iou)
# make list of unique regions - pick the smallest region
trees = list(nx.connected_component_subgraphs(graph))
unique_blobs = []
for tree in trees:
# Choose the smallest region
smallest_idx = None
smallest_blob = np.inf
for node in tree.nodes():
if size_dict[node] < smallest_blob:
smallest_blob = size_dict[node]
smallest_idx = node
unique_blobs.append(smallest_idx)
unique_blobs = unique_blobs
hulls = [hulls[k] for k in unique_blobs]
regions = [regions[k] for k in unique_blobs]
bboxes = [bboxes[k] for k in unique_blobs]
size_dict = {k: len(region) for k, region in enumerate(regions)}
# debug plot
img_ = img.copy()
cv2.polylines(img_, hulls, 1, (255, 0, 0), thickness=1)
fig, ax = plt.subplots(figsize=(10, 6))
ax.imshow(img_)
ax.set_title('MSER with unique regions')
Related
I have the following code, which runs well under Visual Studio Code with python 3.9.10, opencv 4.5.5 and numpy 1.22.1.
I would like to migrate this code into the Spyder IDE (Version 5, another notebook), python 3.8, opencv 4.5.1 and numpy 1.22.2.
In spyder, I get the error message TypeError: only integer scalar arrays can be converted a scalar index in line: output_layers = [layer_names[i-1]...] (marked line down in the code section)
I have already checked other answers on this site such as
TypeError when indexing a list with a NumPy array: only integer scalar arrays can be converted to a scalar index
which suggests list comprehension, but in my understanding I am already implemented this.
What is the reason for running currectly in on environment but not in the other?
import cv2
import numpy as np
def get_output_layers(net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
def draw_prediction(img, class_id, confidence, x, y, x_plus_w, y_plus_h):
label = str(classes[class_id])
color = COLORS[class_id]
cv2.rectangle(img, (x,y), (x_plus_w,y_plus_h), color, 2)
cv2.putText(img, label, (x-10,y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
image = cv2.imread('horses.jpg')
Width = image.shape[1]
Height = image.shape[0]
scale = 0.00392
classes = None
with open(r'yolov3.txt', 'r') as f:
classes = [line.strip() for line in f.readlines()]
COLORS = np.random.uniform(0, 255, size=(len(classes), 3))
net = cv2.dnn.readNet('yolov3.weights','yolov3.cfg')
blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True, crop=False)
net.setInput(blob)
outs = net.forward(get_output_layers(net))
class_ids = []
confidences = []
boxes = []
conf_threshold = 0.5
nms_threshold = 0.4
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
center_x = int(detection[0] * Width)
center_y = int(detection[1] * Height)
w = int(detection[2] * Width)
h = int(detection[3] * Height)
x = center_x - w / 2
y = center_y - h / 2
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([x, y, w, h])
indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)
for i in indices:
box = boxes[i]
x = box[0]
y = box[1]
w = box[2]
h = box[3]
draw_prediction(image, class_ids[i], confidences[i], round(x), round(y),
round(x+w), round(y+h))
cv2.imshow("object detection", image)
cv2.waitKey()
cv2.imwrite("object-detection.jpg", image)
cv2.destroyAllWindows()
there were subtle, recent api changes wrt handling std::vector in python
(4.5.1 still expects a 2d array, but it's 1d in 4.5.5)
to avoid the whole trouble, please simply use:
output_layers = net.getUnconnectedOutLayersNames()
(like it is done in the sample)
I'm currently trying to create a detector of aphids (green and rose) on plants but only using "classic" image processing technique (no neural network).
Here are an image I'm working on:
'aphids.jpg'
I'm working on a code (see below). If you apply it on the image you should have the plants alone. My problem is that I want to isolate the aphids that can be seen on the plants. There are a lot of them but I just want to detect the biggest or the more obvious.
On the code there is an "edges_detect" function I'm currently working on. One of the problem I have is that I can detect some of the aphids as contour but it will also take simple lines...
I tried to drop those line using the hierarchy of contour but it seems those line have inner contour so I can't easily delete them.
I also tried the adjust_gamma and contrast, but it doesn't give that much result.
I'm looking for more ideas. What would you try ?
Thank you in advance !
Here is the code:
import cv2
import numpy as np
import matplotlib.pyplot as plt
def adjust_gamma(image, gamma=1.0):
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv2.LUT(image, table)
def adjust_contrast(image,alpha=1.0,beta=0):
new = np.zeros(image.shape,image.dtype)
for y in range(image.shape[0]):
for x in range(image.shape[1]):
for c in range(image.shape[2]):
new[y,x,c] = np.clip(alpha*image[y,x,c]+beta,0,255)
return(new)
def img_process(img):
(h1, w1) = img.shape[:2]
center = (w1 / 2, h1 / 2)
blur = cv2.GaussianBlur(img.copy(),(5,5),0)
hsv = cv2.cvtColor(blur,cv2.COLOR_BGR2HSV)
#image = img.copy()
#Boundaries to separate plants from the image
l_bound = np.array([20,0,0])
h_bound = np.array([90,250,170])#green
mask = cv2.inRange(hsv,l_bound,h_bound)
res = cv2.bitwise_and(img,img,mask=mask)
#Find contour plants
cnt,_ = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
sort_cnt = sorted(cnt,key=cv2.contourArea,reverse=True)
cnt = [sort_cnt[i] for i in range(len(sort_cnt)) if cv2.contourArea(sort_cnt[i])>300]
cv2.drawContours(res, cnt, -1, (0,255,0), -1)
#Inverse mask to have only the plant in the image
mask2 = cv2.inRange(res,np.array([0,0,0]),np.array([250,250,250]))
mask2 = cv2.bitwise_not(mask2)
res2 = cv2.bitwise_and(img,img,mask=mask2)
#Augment bright/contrast
res2=res2*1.45
res2=res2.astype('uint8')
#Crop
res2 = res2[:-50,int(center[0]-300):int(center[0]+550)]
return res2
def edge_detec(img):
(h1, w1) = img.shape[:2]
center = (w1 / 2, h1 / 2)
blur = cv2.GaussianBlur(img.copy(),(5,5),0)
gray = cv2.cvtColor(blur,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,30,70,apertureSize = 3)
edges = edges[:-50,int(center[0]-300):int(center[0]+550)]
#kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
#edges = cv2.morphologyEx(edges, cv2.MORPH_GRADIENT, kernel)
cnt,hierarchy = cv2.findContours(edges,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnt = sorted(cnt,key=cv2.contourArea,reverse=True)
listArea = list(map(cv2.contourArea,cnt))
sort_cnt = [x for x in cnt if cv2.contourArea(x)>10]
cv2.drawContours(edges, sort_cnt, -1, (0,255,0), -1)
return edges,center,img
### Debut programme
img = cv2.imread('051.jpg')
while True:
##Put processing function here
img_mod = img_process(img)
cv2.imshow('img',img_mod)
if cv2.waitKey(1) & 0xFF == 27:
break
cv2.destroyAllWindows()
I have code that shows the label for each point in a matplotlib scatterplot using mplcursors, similar to this example. I want to know how to, form a list of values, make a certain point stand out, as in if I have a graph of points y=-x^2. When I go near the peak, it shouldn't show 0.001, but 0 instead, without the trouble needing to find the exact mouse placement of the top. I can't solve for each point in the graph, as I don't have a specific function.
Supposing the points in the scatter plot are ordered, we can investigate whether an extreme in a nearby window is also an extreme in a somewhat larger window. If, so we can report that extreme with its x and y coordinates.
The code below only shows the annotation when we're close to a local maximum or minimum. It also temporarily shows a horizontal and vertical line to indicate the exact spot. The code can be a starting point for many variations.
import matplotlib.pyplot as plt
import mplcursors
import numpy as np
near_window = 10 # the width of the nearby window
far_window = 20 # the width of the far window
def show_annotation(sel):
ind = sel.target.index
near_start_index = max(0, ind - near_window)
y_near = y[near_start_index: min(N, ind + near_window)]
y_far = y[max(0, ind - far_window): min(N, ind + far_window)]
near_max = y_near.max()
far_max = y_far.max()
annotation_str = ''
if near_max == far_max:
near_argmax = y_near.argmax()
annotation_str = f'local max:\nx:{x[near_start_index + near_argmax]:.3f}\ny:{near_max:.3f}'
maxline = plt.axhline(near_max, color='crimson', ls=':')
maxline_x = plt.axvline(x[near_start_index+near_argmax], color='grey', ls=':')
sel.extras.append(maxline)
sel.extras.append(maxline_x)
else:
near_min = y_near.min()
far_min = y_far.min()
if near_min == far_min:
near_argmin = y_near.argmin()
annotation_str = f'local min:\nx:{x[near_start_index+near_argmin]:.3f}\ny:{near_min:.3f}'
minline = plt.axhline(near_min, color='limegreen', ls=':')
minline_x = plt.axvline(x[near_start_index + near_argmin], color='grey', ls=':')
sel.extras.append(minline)
sel.extras.append(minline_x)
if len(annotation_str) > 0:
sel.annotation.set_text(annotation_str)
else:
sel.annotation.set_visible(False) # hide the annotation
# sel.annotation.set_text(f'x:{sel.target[0]:.3f}\n y:{sel.target[1]:.3f}')
N = 500
x = np.linspace(0, 100, 500)
y = np.cumsum(np.random.normal(0, 0.1, N))
box = np.ones(20) / 20
y = np.convolve(y, box, mode='same')
scat = plt.scatter(x, y, s=1)
cursor = mplcursors.cursor(scat, hover=True)
cursor.connect('add', show_annotation)
plt.show()
I'm using image-segmentation on some images, and sometimes it would be nice to be able to plot the borders of the segments.
I have a 2D NumPy array that I plot with Matplotlib, and the closest I've gotten, is using contour-plotting.
This makes corners in the array, but is otherwise perfect.
Can Matplotlib's contour-function be made to only plot vertical/horizontal lines, or is there some other way to do this?
An example can be seen here:
import matplotlib.pyplot as plt
import numpy as np
array = np.zeros((20, 20))
array[4:7, 3:8] = 1
array[4:7, 12:15] = 1
array[7:15, 7:15] = 1
array[12:14, 13:14] = 0
plt.imshow(array, cmap='binary')
plt.contour(array, levels=[0.5], colors='g')
plt.show()
I wrote some functions to achieve this some time ago, but I would be glad to figure out how it can be done quicker.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
def get_all_edges(bool_img):
"""
Get a list of all edges (where the value changes from True to False) in the 2D boolean image.
The returned array edges has he dimension (n, 2, 2).
Edge i connects the pixels edges[i, 0, :] and edges[i, 1, :].
Note that the indices of a pixel also denote the coordinates of its lower left corner.
"""
edges = []
ii, jj = np.nonzero(bool_img)
for i, j in zip(ii, jj):
# North
if j == bool_img.shape[1]-1 or not bool_img[i, j+1]:
edges.append(np.array([[i, j+1],
[i+1, j+1]]))
# East
if i == bool_img.shape[0]-1 or not bool_img[i+1, j]:
edges.append(np.array([[i+1, j],
[i+1, j+1]]))
# South
if j == 0 or not bool_img[i, j-1]:
edges.append(np.array([[i, j],
[i+1, j]]))
# West
if i == 0 or not bool_img[i-1, j]:
edges.append(np.array([[i, j],
[i, j+1]]))
if not edges:
return np.zeros((0, 2, 2))
else:
return np.array(edges)
def close_loop_edges(edges):
"""
Combine thee edges defined by 'get_all_edges' to closed loops around objects.
If there are multiple disconnected objects a list of closed loops is returned.
Note that it's expected that all the edges are part of exactly one loop (but not necessarily the same one).
"""
loop_list = []
while edges.size != 0:
loop = [edges[0, 0], edges[0, 1]] # Start with first edge
edges = np.delete(edges, 0, axis=0)
while edges.size != 0:
# Get next edge (=edge with common node)
ij = np.nonzero((edges == loop[-1]).all(axis=2))
if ij[0].size > 0:
i = ij[0][0]
j = ij[1][0]
else:
loop.append(loop[0])
# Uncomment to to make the start of the loop invisible when plotting
# loop.append(loop[1])
break
loop.append(edges[i, (j + 1) % 2, :])
edges = np.delete(edges, i, axis=0)
loop_list.append(np.array(loop))
return loop_list
def plot_outlines(bool_img, ax=None, **kwargs):
if ax is None:
ax = plt.gca()
edges = get_all_edges(bool_img=bool_img)
edges = edges - 0.5 # convert indices to coordinates; TODO adjust according to image extent
outlines = close_loop_edges(edges=edges)
cl = LineCollection(outlines, **kwargs)
ax.add_collection(cl)
array = np.zeros((20, 20))
array[4:7, 3:8] = 1
array[4:7, 12:15] = 1
array[7:15, 7:15] = 1
array[12:14, 13:14] = 0
plt.figure()
plt.imshow(array, cmap='binary')
plot_outlines(array.T, lw=5, color='r')
This is for a class and I would really appreciate your help! I made some changes based on a comment I received, but now I get another error..
I need to modify an existing function that implements the mean-shift algorithm, but instead of initializing all the points as the first set of centroids, the function creates a grid of centroids with the grid based on the radius. I also need to delete the centroids that don't contain any data points. My issue is that I don't understand how to fix the error I get!
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-7-de18ffed728f> in <module>()
49 centroids = initialize_centroids(x)
50
---> 51 new_centroids = update_centroids(x, centroids, r = 1)
52
53 print(len(centroids))
<ipython-input-7-de18ffed728f> in update_centroids(data, centroids, r)
26 #print(len(centroids))
27 #print(range(len(centroids)))
---> 28 centroid = centroids[i]
29 for data_point in data:
30 if np.linalg.norm(data_point - centroid) < r:
IndexError: index 2 is out of bounds for axis 0 with size 2
I tried using the range of the input dataset as boundaries for a grid, with the points separated by the radius.
from sklearn import datasets
import numpy as np
import matplotlib.pyplot as plt
def initialize_centroids(data, r = 1):
'''Creates a grid of centroids with grid based on radius'''
data = np.array(data)
xi,yi = min(range(len(data))), max(range(len(data)))
mx = np.arange(xi,yi,r)
x,y = np.meshgrid(mx,mx)
centroids=np.vstack([x.ravel(), y.ravel()])
return centroids
#update centroids based on mean of points that fall within a specified radius of each centroid
def update_centroids(data, centroids, r = 1):
new_centroids = []
for i in centroids:
in_radius = []
centroid = centroids[i] #this is where the error occurs
for data_point in data:
if np.linalg.norm(data_point - centroid) < radius:
in_radius.append(data_point) #this list is appended by adding the new centroid to it if the above conition is satisfied.
new_centroid = np.mean(in_radius, axis=0)
#maybe another way to do the next part
new_centroids.append(tuple(new_centroid))
unique_centroids = sorted(list(set(new_centroids))) #for element in in_radius, if element in set skip else set.append(element(in_rad)). append does not work with set.
new_centroids = {i:np.array(unique_centroids[i]) for i in range(len(unique_centroids))}
return new_centroids
#test function on:
x, y = datasets.make_blobs(n_samples=300, n_features = 2, centers=[[0, 7], [0, -7], [5,7], [5, 0]])
centroids = initialize_centroids(x)
new_centroids = update_centroids(x, centroids, radius = 2)
print(len(centroids))
print()
print(len(new_centroids))
#code for plotting initially:
plt.scatter(x[:,0], x[:,1], color = 'k')
for i in range(len(new_centroids)):
plt.scatter(new_centroids[i][0], new_centroids[i][1], s=200, color = 'r', marker = "*")
#code for plotting updated centroids:
new_centroids = update_centroids(x, new_centroids, radius = 2)
plt.scatter(x[:,0], x[:,1], color = 'k')
for i in range(len(new_centroids)):
plt.scatter(new_centroids[i][0], new_centroids[i][1], s=200, color = 'r', marker = "*")
#code for iterations:
def iterate_to_conv(data, max_iter=100):
centroids = initialize_centroids(data)
iter_count = 0
while iter_count <= max_iter:
new_centroids = update_centroids(data, centroids, radius = 2)
centroids = new_centroids
iter_count += 1
return centroids
centroids = iterate_to_conv(x)
plt.scatter(x[:,0], x[:,1], color = 'k')
for i in range(len(centroids)):
plt.scatter(centroids[i][0], centroids[i][1], s=200, color = 'r', marker = "*")
The function needs to return the number of final centroids. I haven't gotten ahead far enough to know how the entire implementation of mean-shift would work with this function..
When you are running that loop: for i in centroids the i that is iterated through centroids isn't a number, it is a vector which is why an error is pops up. For example, the first i value might be equal to [0 1 2 0 1 2 0 1 2]. So to take an index of that doesn't make sense. What your code is saying to do is to take centroid = centroid[n1 n2 nk]. To fix it, you really need to change how your initialize centroid function works. Meshgrid also won't create an N dimensional grid, so your meshgrid might work for 2 dimensions but not N. I hope that helps.