KMeans clustering with labels data - scikit-learn

I have a RGB image of shape (587, 987, 3). #height, width, num_channels
I also have label data (pixels' locations) for each of 7 classes.
I wanted to apply KMeans clustering algorithm to segment the given image into 7 classes.
While applying KMeans clustering, I want to utilize the label data, i.e., pixels locations.
How can I utilize label data?
What I have tried so far is as follows.
img = np.random.randint(low=1,high=99, size=(587, 987, 3))
im = img.reshape(img.shape[0]*img.shape[1], img.shape[2])
im = StandardScaler().fit_transform(im)
clusters = KMeans(n_clusters=7,n_init= 100,max_iter=100,n_jobs=-1).fit(im)
kmeans_labels = clusters.labels_.reshape(img.shape[0], img.shape[1])
plt.imshow(kmeans_labels)
plt.show()
I'm looking for propagating some annotation to the remaining segments (superpixels)

As clarified in the comments of the question, you could treat the cluster as superpixels and propagate labels from a few samples to the remaining data, using some semi-supervised classifier [1].
Creating an image to run the example:
import numpy as np
from skimage.data import binary_blobs
import cv2
from pyift.shortestpath import seed_competition
from scipy import sparse, spatial
import matplotlib.pyplot as plt
# creating noisy image
size = 256
image = np.empty((size, size, 3))
image[:, :, 0] = binary_blobs(size, seed=0)
image[:, :, 1] = binary_blobs(size, seed=0)
image[:, :, 2] = binary_blobs(size, seed=1)
image += np.random.randn(*image.shape) / 10
image -= image.min()
image /= image.max()
plt.axis(False)
plt.imshow(image)
plt.show()
Computing superpixels:
def grid_seeds(image, rows = 15, cols = 15):
seeds = np.zeros(image.shape[:2], dtype=np.int)
v_step, h_step = image.shape[0] // rows, image.shape[1] // cols
count = 1
for i in range(rows):
y = v_step // 2 + i * v_step
for j in range(cols):
x = h_step // 2 + j * h_step
seeds[y, x] = count
count += 1
return seeds
seeds = grid_seeds(image)
_, _, _, superpixels = seed_competition(seeds, image=image)
superpixels -= 1 # shifting labels to zero
contours, _ = cv2.findContours(superpixels, cv2.RETR_FLOODFILL, cv2.CHAIN_APPROX_SIMPLE)
im_w_contours = image.copy()
cv2.drawContours(im_w_contours, contours, -1, (255, 0, 0))
plt.axis(False)
plt.imshow(im_w_contours)
plt.show()
Propagating labels from 4 arbitrary nodes, one for each class (color) and coloring the resulting labels with the expected color.
def create_graph(image, labels):
n_nodes = labels.max() + 1
h, w, d = image.shape
avg = np.zeros((n_nodes, d))
for i in range(h):
for j in range(w):
avg[labels[i, j]] += image[i, j]
avg[:] /= np.bincount(labels.flat)[:, np.newaxis] # ignore label 0
graph = spatial.distance_matrix(avg, avg)
return sparse.csr_matrix(graph)
graph = create_graph(image, superpixels)
graph_seeds = np.zeros(graph.shape[0], dtype=np.int)
graph_seeds[1] = 1 # blue training sample
graph_seeds[3] = 2 # yellow training sample
graph_seeds[13] = 3 # white training sample
graph_seeds[14] = 4 # black training sample
label_colors = {1: (0, 0, 255),
2: (255, 255, 0),
3: (255, 255, 255),
4: (0, 0, 0)}
_, _, _, labels = seed_competition(graph_seeds, graph=graph)
result = np.empty_like(image)
for i, lb in enumerate(labels):
result[superpixels == i] = label_colors[lb]
plt.axis(False)
plt.imshow(result)
plt.show()
For this example, I used the difference between the average color of each superpixel as their arc-weight. However, in a real problem, some more elaborate feature vector will be necessary.
Also, the labeled data is a subset of the image superpixels, but this is not strictly necessary, you can add any artificial node when modeling your graph, especially as the seed nodes.
This approach is commonly used in remote sensing, this article might be relevant [2].
[1] Amorim, W. P., Falcão, A. X., Papa, J. P., & Carvalho, M. H. (2016). Improving semi-supervised learning through optimum connectivity. Pattern Recognition, 60, 72-85.
[2] Vargas, John E., et al. "Superpixel-based interactive classification of very high resolution images." 2014 27th SIBGRAPI Conference on Graphics, Patterns, and Images. IEEE, 2014.

Related

Using the linear_model perceptron module from sklearn to separate points

I am trying to use this sklearn module for a binary classification problem and my data is clearly linearly separable.
what I dont understand is why the green area of my plot does not include the five red circles.
.
I have tried to vary the number of iterations parameter(max_iter) from 100 to 10000, however it does not make any difference.
here is my code:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Perceptron
def learn_and_display_Perceptron(datafile):
#taking data reading this from the above functions
data = np.loadtxt(datafile)
n,d = data.shape
x = data[:,0:2]
y = data[:,2]
clf = Perceptron(max_iter=10000)
clf.fit(x, y)
sv = np.zeros(n,dtype=bool) ## all False array
notsv = np.logical_not(sv) # all True array
# Determine the x1- and x2- limits of the plot
x1min = min(x[:,0]) - 1
x1max = max(x[:,0]) + 1
x2min = min(x[:,1]) - 1
x2max = max(x[:,1]) + 1
plt.xlim(x1min,x1max)
plt.ylim(x2min,x2max)
# Plot the data points, enlarging those that are support vectors
plt.plot(x[(y==1)*notsv,0], x[(y==1)*notsv,1], 'ro')
plt.plot(x[(y==1)*sv,0], x[(y==1)*sv,1], 'ro', markersize=10)
plt.plot(x[(y==-1)*notsv,0], x[(y==-1)*notsv,1], 'k^')
plt.plot(x[(y==-1)*sv,0], x[(y==-1)*sv,1], 'k^', markersize=10)
# Construct a grid of points and evaluate classifier at each grid points
grid_spacing = 0.05
xx1, xx2 = np.meshgrid(np.arange(x1min, x1max, grid_spacing), np.arange(x2min, x2max, grid_spacing))
grid = np.c_[xx1.ravel(), xx2.ravel()]
Z = clf.predict(grid)
# Quantize the values to -1, -0.5, 0, 0.5, 1 for display purposes
for i in range(len(Z)):
Z[i] = min(Z[i],1.0)
Z[i] = max(Z[i],-1.0)
if (Z[i] > 0.0) and (Z[i] < 1.0):
Z[i] = 0.5
if (Z[i] < 0.0) and (Z[i] > -1.0):
Z[i] = -0.5
# Show boundary and margin using a color plot
Z = Z.reshape(xx1.shape)
plt.pcolormesh(xx1, xx2, Z, cmap=plt.cm.PRGn, vmin=-2, vmax=2, shading='auto')
plt.show()
my datafile, data_1.txt can be found on here, https://github.com/bluetail14/MyCourserapractice/tree/main/Edx
What can I change in my code to adjust the green/purple borderline to include the five red circles?
Nice code. You need to change the eta0 value,
clf = Perceptron(max_iter=10000, eta0=0.1)

How can i get the inner contour points without redundancy in OpenCV - Python

I'm new with OpenCV and the thing is that i need to get all the contour points. This is easy setting the cv2.RETR_TREE mode in findContours method. The thing is that in this way, returns redundant coordinates. So, for example, in this polygon, i don't want to get the contour points like this:
But like this:
So according to the first image, green color are the contours detected with RETR_TREE mode, and points 1-2, 3-5, 4-6, ... are redundant, because they are so close to each other. I need to put together those redundant points into one, and append it in the customContours array.
For the moment, i only have the code according for the first picture, setting up the distance between the points and the points coordinates:
def getContours(img, minArea=20000, cThr=[100, 100]):
font = cv2.FONT_HERSHEY_COMPLEX
imgColor = img
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1)
imgCanny = cv2.Canny(imgBlur, cThr[0], cThr[1])
kernel = np.ones((5, 5))
imgDial = cv2.dilate(imgCanny, kernel, iterations=3)
imgThre = cv2.erode(imgDial, kernel, iterations=2)
cv2.imshow('threshold', imgThre)
contours, hierachy = cv2.findContours(imgThre, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
customContours = []
for cnt in contours:
area = cv2.contourArea(cnt)
if area > minArea:
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.009*peri, True)
bbox = cv2.boundingRect(approx)
customContours.append([len(approx), area, approx, bbox, cnt])
print('points: ', len(approx))
n = approx.ravel()
i = 0
for j in n:
if i % 2 == 0:
x = n[i]
y = n[i + 1]
string = str(x)+" " + str(y)
cv2.putText(imgColor, str(i//2+1) + ': ' + string, (x, y), font, 2, (0, 0, 0), 2)
i = i + 1
customContours = sorted(customContours, key=lambda x: x[1], reverse=True)
for cnt in customContours:
cv2.drawContours(imgColor, [cnt[2]], 0, (0, 0, 255), 5)
return imgColor, customContours
Could you help me to get the real points regarding to i.e. the second picture?
(EDIT 01/07/21)
I want a generic solution, because the image could be more complex, such as the following picture:
NOTE: notice that the middle arrow (points 17 and 18) doesn't have a closed area, so isn't a polygon to study. Then, that region is not interested to obtain his points. Also, notice that the order of the points aren't important, but if the entry is the hole image, it should know that there are 4 polygons, so for each polygon points starts with 0, then 1, etc.
Here's my approach. It is mainly morphological-based. It involves convolving the image with a special kernel. This convolution identifies the end-points of the triangle as well as the intersection points where the middle line is present. This will result in a points mask containing the pixel that matches the points you are looking for. After that, we can apply a little bit of morphology to join possible duplicated points. What remains is to get a list of the coordinate of these points for further processing.
These are the steps:
Get a binary image of the input via Otsu's thresholding
Get the skeleton of the binary image
Define the special kernel and convolve the skeleton image
Apply a morphological dilate to join possible duplicated points
Get the centroids of the points and store them in a list
Here's the code:
# Imports:
import numpy as np
import cv2
# image path
path = "D://opencvImages//"
fileName = "triangle.png"
# Reading an image in default mode:
inputImage = cv2.imread(path + fileName)
# Prepare a deep copy for results:
inputImageCopy = inputImage.copy()
# Convert BGR to Grayscale
grayImage = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)
# Threshold via Otsu:
_, binaryImage = cv2.threshold(grayImage, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
The first bit computes the binary image. Very straightforward. I'm using this image as base, which is just a cleaned-up version of what you posted without the annotations. This is the resulting binary image:
Now, to perform the convolution we must first get the image "skeleton". The skeleton is a version of the binary image where lines have been normalized to have a width of 1 pixel. This is useful because we can then convolve the image with a 3 x 3 kernel and look for specific pixel patterns. Let's compute the skeleton using OpenCV's extended image processing module:
# Get image skeleton:
skeleton = cv2.ximgproc.thinning(binaryImage, None, 1)
This is the image obtained:
We can now apply the convolution. The approach is based on Mark Setchell's info on this post. The post mainly shows the method for finding end-points of a shape, but I extended it to also identify line intersections, such as the middle portion of the triangle. The main idea is that the convolution yields a very specific value where patterns of black and white pixels are found in the input image. Refer to the post for the theory behind this idea, but here, we are looking for two values: 110 and 40. The first one occurs when an end-point has been found. The second one when a line intersections is found. Let's setup the convolution:
# Threshold the image so that white pixels get a value of 0 and
# black pixels a value of 10:
_, binaryImage = cv2.threshold(skeleton, 128, 10, cv2.THRESH_BINARY)
# Set the convolution kernel:
h = np.array([[1, 1, 1],
[1, 10, 1],
[1, 1, 1]])
# Convolve the image with the kernel:
imgFiltered = cv2.filter2D(binaryImage, -1, h)
# Create list of thresholds:
thresh = [110, 40]
The first part is done. We are going to detect end-points and intersections in two separated steps. Each step will produce a partial result, we can OR both results to get a final mask:
# Prepare the final mask of points:
(height, width) = binaryImage.shape
pointsMask = np.zeros((height, width, 1), np.uint8)
# Perform convolution and create points mask:
for t in range(len(thresh)):
# Get current threshold:
currentThresh = thresh[t]
# Locate the threshold in the filtered image:
tempMat = np.where(imgFiltered == currentThresh, 255, 0)
# Convert and shape the image to a uint8 height x width x channels
# numpy array:
tempMat = tempMat.astype(np.uint8)
tempMat = tempMat.reshape(height,width,1)
# Accumulate mask:
pointsMask = cv2.bitwise_or(pointsMask, tempMat)
This is the final mask of points:
Note that the white pixels are the locations that matched our target patterns. Those are the points we are looking for. As the shape is not a perfect triangle, some points could be duplicated. We can "merge" neighboring blobs by applying a morphological dilation:
# Set kernel (structuring element) size:
kernelSize = 7
# Set operation iterations:
opIterations = 3
# Get the structuring element:
morphKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernelSize, kernelSize))
# Perform Dilate:
morphoImage = cv2.morphologyEx(pointsMask, cv2.MORPH_DILATE, morphKernel, None, None, opIterations, cv2.BORDER_REFLECT101)
This is the result:
Very nice, we have now big clusters of pixels (or blobs). To get their coordinates, one possible approach would be to get the bounding rectangles of these contours and compute their centroids:
# Look for the outer contours (no children):
contours, _ = cv2.findContours(morphoImage, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Store the points here:
pointsList = []
# Loop through the contours:
for i, c in enumerate(contours):
# Get the contours bounding rectangle:
boundRect = cv2.boundingRect(c)
# Get the centroid of the rectangle:
cx = int(boundRect[0] + 0.5 * boundRect[2])
cy = int(boundRect[1] + 0.5 * boundRect[3])
# Store centroid into list:
pointsList.append( (cx,cy) )
# Set centroid circle and text:
color = (0, 0, 255)
cv2.circle(inputImageCopy, (cx, cy), 3, color, -1)
font = cv2.FONT_HERSHEY_COMPLEX
string = str(cx) + ", " + str(cy)
cv2.putText(inputImageCopy, str(i) + ':' + string, (cx, cy), font, 0.5, (255, 0, 0), 1)
# Show image:
cv2.imshow("Circles", inputImageCopy)
cv2.waitKey(0)
These are the points located in the original input:
Note also that I've stored their coordinates in the pointsList list:
# Print the list of points:
print(pointsList)
This prints the centroids as the tuple (centroidX, centroidY):
[(717, 971), (22, 960), (183, 587), (568, 586), (388, 98)]

Creating gaussians of fixed width and std

I am trying to make every point above 25.2 a Gaussian peak with the width of 2 on the x axis.
enter image description here
not so sure how to generate the Gaussian curves in python.
Full example of how to generate a Gaussian distribution, for an arbitrary number of axis and number of center locations. It requires the packages matplotlib, scipy and numpy.
The module can be controlled by:
dim for the number of dimensions (axis).
fwhm full width half maximum (estimates the width of the Gaussian distribution.)
centers a np.array or list of the indices, that are the center(s) of the Gaussian distribution.
import matplotlib.cm as mpl_cm
import matplotlib.colors as mpl_colors
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial.distance import cdist
class Gaussian:
def __init__(self, size):
self.size = size
self.center = np.array(self.size) / 2
self.axis = self._calculate_axis()
def _calculate_axis(self):
"""
Generate a list of rows, columns over multiple axis.
Example:
Input: size=(5, 3)
Output: [array([0, 1, 2, 3, 4]), array([[0], [1], [2]])]
"""
axis = [np.arange(size).reshape(-1, *np.ones(idx, dtype=np.uint8))
for idx, size in enumerate(self.size)]
return axis
def update_size(self, size):
""" Update the size and calculate new centers and axis. """
self.size = size
self.center = np.array(self.size) / 2
self.axis = self._calculate_axis()
def create(self, dim=1, fwhm=3, center=None):
""" Generate a gaussian distribution on the center of a certain width. """
center = center if center is not None else self.center[:dim]
distance = sum((ax - ax_center) ** 2 for ax_center, ax in zip(center, self.axis))
distribution = np.exp(-4 * np.log(2) * distance / fwhm ** 2)
return distribution
def creates(self, dim=2, fwhm=3, centers: np.ndarray = (None,)):
""" Combines multiple gaussian distributions based on multiple centers. """
centers = np.array(centers).T
indices = np.indices(self.size).reshape(dim, -1).T
distance = np.min(cdist(indices, centers, metric='euclidean'), axis=1)
distance = np.power(distance.reshape(self.size), 2)
distribution = np.exp(-4 * np.log(2) * distance / fwhm ** 2)
return distribution
#staticmethod
def plot(distribution, show=True):
""" Plotter, in case you do not know the dimensions of your distribution, or want the same interface. """
if len(distribution.shape) == 1:
return Gaussian.plot1d(distribution, show)
if len(distribution.shape) == 2:
return Gaussian.plot2d(distribution, show)
if len(distribution.shape) == 3:
return Gaussian.plot3d(distribution, show)
raise ValueError(f"Trying to plot {len(distribution.shape)}-dimensional data, "
f"Only 1D, 2D, and 3D distributions are valid.")
#staticmethod
def plot1d(distribution, show=True, vmin=None, vmax=None, cmap=None):
norm = mpl_colors.Normalize(
vmin=vmin if vmin is not None else distribution.min(),
vmax=vmax if vmin is not None else distribution.max()
)
cmap = mpl_cm.ScalarMappable(norm=norm, cmap=cmap or mpl_cm.get_cmap('jet'))
cmap.set_array(distribution)
c = [cmap.to_rgba(value) for value in distribution] # defines the color
fig, ax = plt.subplots()
ax.scatter(np.arange(len(distribution)), distribution, c=c)
fig.colorbar(cmap)
if show: plt.show()
return fig
#staticmethod
def plot2d(distribution, show=True):
fig, ax = plt.subplots()
img = ax.imshow(distribution, cmap='jet')
fig.colorbar(img)
if show: plt.show()
return fig
#staticmethod
def plot3d(distribution, show=True):
m, n, c = distribution.shape
x, y, z = np.mgrid[:m, :n, :c]
out = np.column_stack((x.ravel(), y.ravel(), z.ravel(), distribution.ravel()))
x, y, z, values = np.array(list(zip(*out)))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Standalone colorbar, directly creating colorbar on fig results in strange artifacts.
img = ax.scatter([0, 0], [0, 0], [0, 0], c=[0, 1], cmap=mpl_cm.get_cmap('jet'))
img.set_visible = False
fig.colorbar(img)
ax.scatter(x, y, z, c=values, cmap=mpl_cm.get_cmap('jet'))
if show: plt.show()
return fig
Example
gaussian = Gaussian(size=(20,))
dist = gaussian.create(dim=1, centers=(1,)
gaussian.plot1d(dist, show=True)
Your problem
In order to get a solution that fits your question, the following code would work:
import numpy as np
arr = np.random.randint(0, 28, (25,))
gaussian = Gaussian(size=arr.shape)
centers = np.where(arr > 25.2)
distribution = gaussian.creates(dim=len(arr.shape), fwhm=2, centers=centers)
gaussian.plot(distribution, show=True)
For this the centers are determined by the condition arr > 25.2. Note that if there are no values, the next lines will crash. In order to get a width of 2 the value fwhm is put on 2, but you can alter this until you get the width that you want, or use Finding the full width half maximum of a peak.

How can i count segments in an image in python?

I am new to image processing and python. You might've seen my amateur codes on this site in the last couple of days.
I am trying to count the number of trees using aerial images. This is my code:
from PIL import Image
import cv2
import numpy as np
from skimage import io, filters, measure
from scipy import ndimage
img = Image.open("D:\\Texture analysis\\K-2.jpg")
row, col = img.size
hsvimg = img.convert('HSV')
hsvimg.mode = 'RGB'
hsvimg.save('newImage2.jpg')
npHSI = np.asarray(hsvimg) #Convert HSI Image to np image
blur = cv2.GaussianBlur(npHSI, (45, 45), 5)
assert isinstance(blur, np.ndarray) ##############################
assert len(blur.shape) == 3 #Convert np Image to HSI Image
assert blur.shape[2] == 3 ##############################
hsiBlur = Image.fromarray(blur, 'RGB')
hsiBlur.save('hsiBlur.jpg') #Save the blurred image
## Read
img = cv2.imread("D:\\Texture analysis\\hsiBlur.jpg")
## convert to hsv
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#Threshold the image and segment the trees
mask = cv2.inRange(hsv, (36, 25, 25), (70, 255,255))
imask = mask>0
green = np.zeros_like(img, np.uint8)
green[imask] = img[imask]
## save
cv2.imwrite("green.png", green)
#Count the number of trees
im = io.imread('green.png', as_grey=True)
val = filters.threshold_otsu(im)
drops = ndimage.binary_fill_holes(im < val)
labels = measure.label(drops)
print(labels.max())
Original image:
HSI image with gaussian filter:
Segmented image:
The last part of the code returns 7, which is a wrong output. The value should be above 50. How can I properly count the number of green segments in the final segmented image?
EDIT
I converted green.png to binary and applied erosion with a 3x3 filter and iterated it 7 times to remove the noise.
This is what I did at the end. I followed this stackoverflow link
##save
cv2.imwrite("green.png", green)
#Convert to grayscale
gray = np.dot(green[...,:3], [0.299, 0.587, 0.114])
cv2.imwrite("grayScale.jpg", gray)
#Binarize the grayscale image
ret,bin_img = cv2.threshold(gray,127,255,cv2.THRESH_BINARY)
cv2.imwrite("bin_img.jpg", bin_img)
#Erosion to remove the noise
kernel = np.ones((3, 3),np.uint8)
erosion = cv2.erode(gray, kernel, iterations = 7)
cv2.imwrite("erosion.jpg", erosion)
#Count the number of trees
finalImage = cv2.imread('erosion.jpg')
finalImage = cv2.cvtColor(finalImage, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(finalImage, 127, 255, 1)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
cv2.drawContours(finalImage,[cnt],0,(0,0,255),1)
Saurav mentioned in his answer ... size of "contours" will give you the count. This print(contour.size())gives an error and print(contour) just prints a long 2D array. How can i get the size of contour?
PS. I didn't upload the grayscale, binary and eroded image because i felt that the images were already taking too much space, I can still upload them if anyone wants to.
I've found 52 trees with that script:
from PIL import Image, ImageDraw, ImageFont
image = Image.open('04uX3.jpg')
pixels = image.load()
size = image.size
draw = ImageDraw.Draw(image)
font = ImageFont.truetype('arial', 60)
i = 1
for x in range(0, size[0], 100):
for y in range(0, size[1], 100):
if pixels[x, y][1] > 200:
draw.text((x, y), str(i), (255, 0, 0), font=font)
i += 1
image.save('result.png')
You can see that some trees weren't detected and some non-trees were detected. So this is very rough calculation:

How do I discriminate two different type of abnormalities in curvature of the object?

I have been working on a project that require finding defect in onions. The second image that's attached shows an abnormal onion. You can see that the onion is made-up of two smaller onion twins. What's interesting is that human eye can easily detect whats wrong with the structure.
One can do an structural analysis and can observe that a normal onion has almost smooth curvature while an abnormal one doesn't. Thus quite simply I want to build a classification algorithm based on the edges of the object.
However there are times when the skin of onion makes the curve irregular. See the image, there's a small part of skin that's outside the actual curvature. I want to discriminate the bulged part due to the skin vs the deformities produced at the point where the two subsection meet and then reconstruct the contour of object for further analysis .
Is there a mathematical thing that would help me here given the fact that I have majority of the points that makes the outer edge of onion including the two irregularities?
[
See the code below:
import cv2
import numpy as np
import sys
cv2.ocl.setUseOpenCL(False)
cv2.namedWindow('test', cv2.WINDOW_NORMAL)
cv2.namedWindow('orig', cv2.WINDOW_NORMAL)
cv2.resizeWindow('test', 600,600)
cv2.resizeWindow('orig', 600,600)
image = cv2.imread('./buffer/crp'+str(sys.argv[1])+'.JPG')
tim = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
hsv_image = cv2.cvtColor(image,cv2.COLOR_BGR2HSV)
frame_threshed = cv2.inRange(hsv_image, np.array([70,0,0],np.uint8),
np.array([140,255,255],np.uint8))
canvas = np.zeros(image.shape, np.uint8)
framhreshed=cv2.threshold(frame_threshed,10,255,cv2.THRESH_BINARY_INV)
kernel = np.ones((3,3),np.uint8)
frame_threshed = cv2.erode(frame_threshed,kernel,iterations = 1)
kernel = np.ones((5,5),np.uint8)
frame_threshed = cv2.erode(frame_threshed,kernel,iterations = 1)
kernel = np.ones((7,7),np.uint8)
frame_threshed = cv2.erode(frame_threshed,kernel,iterations = 1)
_, cnts, hierarchy = cv2.findContours(frame_threshed.copy(),
cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts= sorted(cnts, key=cv2.contourArea, reverse=True)
big_contours = [c for c in cnts if cv2.contourArea(c) > 100000]
for cnt in big_contours:
perimeter = cv2.arcLength(cnt,True)
epsilon = 0.0015*cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt,epsilon,True)
# print(len(approx))
hull = cv2.convexHull(cnt,returnPoints = False)
# try:
defects = cv2.convexityDefects(cnt,hull)
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
start = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
cv2.line(canvas,start,end,[255,0,0],2)
cv2.circle(canvas,far,5,[255,255,255],-1)
cv2.drawContours(image, [approx], -1, (0, 0, 255), 5)
cv2.drawContours(canvas, [approx], -1, (0, 0, 255), 5)
cv2.imshow('orig',image)
cv2.imshow('test',canvas)
cv2.waitKey(0)
cv2.destroyAllWindows()
I would suggest you to try HuMoments since you already have extracted the shape of your objects. It would allow you to calculate a distance between two shapes, so basically between your abnormal onion and a reference onion.
The Hu Moments shape descriptor is available for Python using OpenCV. If image is binary, you can use it like this :
# Reference image
shapeArray1 = cv2.HuMoments(cv2.moments(image1)).flatten()
# Abnormal image
shapeArray2 = cv2.HuMoments(cv2.moments(image2)).flatten()
# Calculation of distance between both arrays
# Threshold based on the distancce
# Classification as abnormal or normal
MatchShapes could do the job too. It takes two binary images of contours to return a float that evaluate the distance between both.
Python: cv.MatchShapes(object1, object2, method, parameter=0) → float
More details
So when an onion shape is detected as abnormal, you would have to fill this shape and apply some binary morphology to erase the imperfection and extract the shape without imperfection.
Fill your shape
Apply an opening (erosion followed by dilatation) with a disk structural element to get rid of the irregularities
Extract the contours again
You should have a form without your irregularities. If not, go back to step 2 and change the size of the structural element
OK so if you look at the first two pictures of your onions you can see that they have a circular shape (except the peel peaks) and the "defect" one has more of an oval shape. What you could try is to find your contour (after you apply image transformation of course) and determine its center points. Then you could measure the distance from the center of the contour to each point of the contour. You can do it using scipy (ckd.tree() and tree.query()) or simply by mathematical formula for distance between two points sqrt(x2-x1)^2+(y2-y1)^2. Then you can say that if some number of points are out of bounds it is still an OK onion but if there are a lot of points out of bounds then it is a defective onion. I drew two example images just for the sake of demonstration.
Example in code:
import cv2
import numpy as np
import scipy
from scipy import spatial
img = cv2.imread('oniond.png')
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray_image,180,255,cv2.THRESH_BINARY_INV)
im2, cnts, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(cnts, key=cv2.contourArea)
list_distance = []
points_minmax = []
M = cv2.moments(cnt)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
center = (cX, cY)
for i in cnt:
tree = spatial.cKDTree(i)
mindist, minid = tree.query(center)
list_distance.append(mindist)
if float(mindist) < 100:
points_minmax.append(i)
elif float(mindist) > 140:
points_minmax.append(i)
else:
pass
reshape = np.reshape(list_distance, (-1,1))
under_min = [i for i in list_distance if i < 100]
over_max = [i for i in list_distance if i > 140]
for i in points_minmax:
cv2.line(img,center,(i[0,0],i[0,1]),(0,0,255),2)
if len(over_max) > 50:
print('defect')
print('distances over maximum: ', len(over_max))
print('distances over minimum: ', len(under_min ))
elif len(under_min ) > 50:
print('defect')
print('distances over maximum: ', len(over_max))
print('distances over minimum: ', len(under_min ))
else:
print('OK')
print('distances over maximum: ', len(over_max))
print('distances over minimum: ', len(under_min ))
cv2.imshow('img', img)
Result:
OK
distances over maximum: 37
distance over minimum: 0
The output shows that there are 37 points out of bounds (red color) but the onion is still OK.
Result 2:
defect
distances over maximum: 553
distances over minimum: 13
And here you can see that there are more points out of bounds (red color) and the onion is not OK.
Hope this gives at least an idea on how to solve your problem. Cheers!

Resources