I'm trying to use cv2 module to receive pixel coordinates of relatively dark regions in an image.
First I divide it into super-pixels through the cv2.ximgproc.createSuperpixelSLIC() method.
Then I'd like to consider each super-pixel as a ROI, and threshold it based on its' the intensity, so that the darker regions (i.e., where the intensity is lower than some preconfigured threshold) will be 1, and 0 in regions where the intensity is relatively high (i.e., larger than this threshold).
I tried the following code, but the problem is that is highlights the background (as obviously it also dark).
import cv2
import numpy as np
# Parameters
IMG_FILE_PATH = 'PATH TO THE IMAGE'
REGION_SIZE = 200
RULER = 20
N = 10
SAMPLE_SIZE = 5
INTENSITY_TH = 100
# ---
# 1) Load the image
img = cv2.imread(IMG_FILE_PATH, cv2.IMREAD_GRAYSCALE)
# 2) Compute the superpixels
slic = cv2.ximgproc.createSuperpixelSLIC(img, region_size=REGION_SIZE, ruler=RULER)
# 3) Get the characteristics of the superpixels calculated
slic.iterate(N)
slic.enforceLabelConnectivity()
lbls = slic.getLabels()
num_slic = slic.getNumberOfSuperpixels()
# 4) Sample some of the superpixels
sample_idxs = np.random.choice(np.arange(num_slic), size=SAMPLE_SIZE, replace=False)
for idx in sample_idxs:
img_super_pixel = np.uint8(img * (lbls==idx).astype(np.int16))
ret, mask_fg = cv2.threshold(img_super_pixel, INTENSITY_TH, 255, cv2.THRESH_BINARY)
img_super_pixel_th = cv2.bitwise_and(img_super_pixel, img_super_pixel, mask=mask_fg)
cv2.imshow('Super-pixel', img_super_pixel)
cv2.imshow('Super-pixel - thresholded', img_super_pixel_th)
cv2.waitKey()
cv2.destroyAllWindows()
Here is a sample image:
Current Output Example:
So, as is seen - the background is represented with 1., obviously because it is less than the threshold, but what I need is that that only the black spots in the super-pixel would be white, and the background with the pixels which exceed the threshold in the super-pixel area, would be black.
Is there a way to apply threshold only on the ROI, viz. the super-pixel, and not on the background?
Thanks in advance.
I was able to solve this by manually checking the pixels in the region which are below the threshold, as shown in the following code:
import cv2
import numpy as np
import pandas as pd
from pathlib import Path
# Parameters
IMG_FILE_PATH = 'PATH_TO_IMAGE'
OUTDIR = Path('OUTPUT_FOLDER')
REGION_SIZE = 200
RULER = 20
N = 10
SAMPLE_SIZE = 5
INTENSITY_TH = 100
# ---
# 1) Load the image
img = cv2.imread(IMG_FILE_PATH, cv2.IMREAD_GRAYSCALE)
# 2) Compute the superpixels
slic = cv2.ximgproc.createSuperpixelSLIC(img, region_size=REGION_SIZE, ruler=RULER)
# 3) Get the characteristics of the superpixels calculated
slic.iterate(N)
slic.enforceLabelConnectivity()
mask_slic = slic.getLabelContourMask()
lbls = slic.getLabels()
num_slic = slic.getNumberOfSuperpixels()
# 4) Sample some of the superpixels
sample_idxs = np.random.choice(np.arange(num_slic), size=SAMPLE_SIZE, replace=False)
for idx in sample_idxs:
# 4.1) Create pandas.DataFrame to store the points and their validity based on the threshold
sp_pixels_df = pd.DataFrame(columns=['x', 'y', 'relevant'])
# 4.2) Get the current super-pixel
img_super_pixel = np.uint8(img * (lbls==idx).astype(np.int16))
# 4.3) Find the coordinates of the pixels inside the current super-pixel
img_super_pixel_idx = np.argwhere(lbls==idx)
# 4.4) Separate the x and y coordinates of the points which are located inside the superpixel
xs, ys = np.array([t[0] for t in img_super_pixel_idx]), np.array([t[1] for t in img_super_pixel_idx])
# 4.5) Find the pixels inside the superpixel, which intensity is below the threshold
low_intensity_pixels = img_super_pixel[tuple([xs, ys])] < INTENSITY_TH
# 4.6) Populate the pandas.DataFrame
sp_pixels_df['x'] = xs
sp_pixels_df['y'] = ys
sp_pixels_df['relevant'] = low_intensity_pixels
# 4.7) Get the valid pixel coordinates
relevant_points = sp_pixels_df.loc[sp_pixels_df.relevant, ['x', 'y']].values
# 4.8) Separate the x and y coordinates of the relevant points which are located inside the superpixel
relevant_xs, relevant_ys = np.array([t[0] for t in relevant_points]), np.array([t[1] for t in relevant_points])
# 4.9) Convert the gray-scale image to BGR to be able to mark the relevant pixels in red
img_super_pixel_highlighted = cv2.cvtColor(img_super_pixel, cv2.COLOR_GRAY2BGR)
# 4.10) Highlight the relevant pixels
img_super_pixel_highlighted[tuple([relevant_xs, relevant_ys])] = (0, 0, 255)
cv2.imshow('Original Superpixels', img_super_pixel)
cv2.imshow('Relevant pixels highlighted', img_super_pixel_highlighted)
cv2.waitKey()
cv2.destroyAllWindows()
Output:
Original:
Highlighted:
Cheers!
Related
Following this example of K means clustering I want to recreate the same - only I'm very keen for the final image to contain just the quantized colours (+ white background). As it is, the colour bars get smooshed together to create a pixel line of blended colours.
Whilst they look very similar, the image (top half) is what I've got from CV2 it contains 38 colours total.
The lower image only has 10 colours and is what I'm after.
Let's look at a bit of that with 6 times magnification:
I've tried :
# OpenCV and Python K-Means Color Clustering
# build a histogram of clusters and then create a figure
# representing the number of pixels labeled to each color
hist = colour_utils.centroid_histogram(clt)
bar = colour_utils.plot_colors(hist, clt.cluster_centers_)
bar = cv2.resize(bar, (460, 345), 0, 0, interpolation = cv2.INTER_NEAREST)
However, the resize seems to have no resizing effect or change the scaling type. I don't know what controls the initial image size either.
Confused.
Any ideas?
I recommend you to show the image using cv2.imshow, instead of using matplotlib.
cv2.imshow shows the image "pixel to pixel" by default, while matplotlib.pyplot matches the image dimensions to the size of the axes.
bar_bgr = cv2.cvtColor(bar, cv2.COLOR_RGB2BGR) # Convert RGB to BGR
cv2.imshow('bar', bar_bgr)
cv2.waitKey()
cv2.destroyAllWindows()
In case you want to use matplotlib, take a look at: Display image with a zoom = 1 with Matplotlib imshow() (how to?).
Code used for testing:
# import the necessary packages
import numpy as np
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import argparse
#import utils
import cv2
def centroid_histogram(clt):
# grab the number of different clusters and create a histogram
# based on the number of pixels assigned to each cluster
numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1)
(hist, _) = np.histogram(clt.labels_, bins = numLabels)
# normalize the histogram, such that it sums to one
hist = hist.astype("float")
hist /= hist.sum()
# return the histogram
return hist
def plot_colors(hist, centroids):
# initialize the bar chart representing the relative frequency
# of each of the colors
bar = np.zeros((50, 300, 3), dtype = "uint8")
startX = 0
# loop over the percentage of each cluster and the color of
# each cluster
for (percent, color) in zip(hist, centroids):
# plot the relative percentage of each cluster
endX = startX + (percent * 300)
cv2.rectangle(bar, (int(startX), 0), (int(endX), 50),
color.astype("uint8").tolist(), -1)
startX = endX
# return the bar chart
return bar
# load the image and convert it from BGR to RGB so that
# we can dispaly it with matplotlib
image = cv2.imread('chelsea.png')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# show our image
plt.figure()
plt.axis("off")
plt.imshow(image)
# reshape the image to be a list of pixels
image = image.reshape((image.shape[0] * image.shape[1], 3))
# cluster the pixel intensities
clt = KMeans(n_clusters = 5)
clt.fit(image)
# build a histogram of clusters and then create a figure
# representing the number of pixels labeled to each color
hist = centroid_histogram(clt)
bar = plot_colors(hist, clt.cluster_centers_)
# show our color bart
#plt.figure()
#plt.axis("off")
#plt.imshow(bar)
#plt.show()
bar = cv2.resize(bar, (460, 345), 0, 0, interpolation = cv2.INTER_NEAREST)
bar_bgr = cv2.cvtColor(bar, cv2.COLOR_RGB2BGR) # Convert RGB to BGR
cv2.imshow('bar', bar_bgr)
cv2.waitKey()
cv2.destroyAllWindows()
I have the following NumPy array of a running man, which you can download here:
https://drive.google.com/file/d/1SfIEqGsBV_vA7iP4UjLdklLJlLdDzozL/view?usp=sharing
To display it, use this code:
import numpy as np
import matplotlib.pyplot as plt
# load data
data = np.load('running_man.npy')
# plot data
plt.imshow(data)
As you can see there is a lot of noise (freckles) in the image. I would like to get rid of it and retrieve a clean image of the runner. Any idea of how to do it?
This is what I have done so far:
from skimage import measure
# Find contours at a constant value of 1
contours = measure.find_contours(data, 1, fully_connected='high')
# Select the largest contiguous contour
contour = sorted(contours, key=lambda x: len(x))[-1]
# Create an empty image to store the masked array
r_mask = np.zeros_like(data, dtype='bool')
# Create a contour image by using the contour coordinates rounded to their nearest integer value
r_mask[np.round(contour[:, 0]).astype('int'), np.round(contour[:, 1]).astype('int')] = 1
# Fill in the hole created by the contour boundary
r_mask = ndimage.binary_fill_holes(r_mask)
# Invert the mask since one wants pixels outside of the region
r_mask = ~r_mask
plt.imshow(r_mask)
... but as you can see the outline is very rough !
What works well is to upload the image to an online jpg to SVG converter -> this makes the lines super smooth. ... but I want to be able to do it in python.
Idea:
I am looking for something that can keep the sharp corners, maybe something that detects the gradient along the edge and only keeps the point where the gradient is above a certain threshold...
For this specific image you can just use numpy:
import numpy as np
import matplotlib.pyplot as plt
data = np.load('running_man.npy')
data[data > 1] = 0
plt.xticks([])
plt.yticks([])
plt.imshow(data)
For a method that preserves the corners better, we can use median filters, but force the preservation of corners.
Masked Image
Mask after filtering
Recolored
import cv2
import numpy as np
# load image
img = cv2.imread("run.png");
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY);
# make mask
_, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU);
# median filter
med = cv2.medianBlur(thresh, 11);
med[thresh == 255] = 255;
# inverse filter
mask = cv2.bitwise_not(med);
med = cv2.medianBlur(mask, 3);
med[mask == 255] = 255;
# recolor
color = np.zeros_like(img);
color[med == 0] = (66, 239, 245);
color[med == 255] = (92, 15, 75);
# show
cv2.imshow("colored", color);
cv2.waitKey(0);
I am new to OpenCV and trying to see if I can find a way to detect vertical text for the image attached.
In this case on row 3 , I would like to get the bounding box around Original Cost and the amount below ($200,000.00).
Similarly I would like to get the bounding box around Amount Existing Liens and the associated amount below. I then would use this data to send to an OCR engine to read text. Traditional OCR engines go line by line and extract and loses the context.
Here is what I have tried so far -
import cv2
import numpy as np
img = cv2.imread('Test3.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,100,100,apertureSize = 3)
cv2.imshow('edges',edges)
cv2.waitKey(0)
minLineLength = 20
maxLineGap = 10
lines = cv2.HoughLinesP(edges,1,np.pi/180,15,minLineLength=minLineLength,maxLineGap=maxLineGap)
for x in range(0, len(lines)):
for x1,y1,x2,y2 in lines[x]:
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
cv2.imshow('hough',img)
cv2.waitKey(0)
Here is my solution based on Kanan Vyas and Adrian Rosenbrock
It's probably not as "canonical" as you'd wish.
But it seems to work (more or less...) with the image you provided.
Just a word of CAUTION: The code looks within the directory from which it is running, for a folder named "Cropped" where cropped images will be stored. So, don't run it in a directory which already contains a folder named "Cropped" because it deletes everything in this folder at each run. Understood? If you're unsure run it in a separate folder.
The code:
# Import required packages
import cv2
import numpy as np
import pathlib
###################################################################################################################################
# https://www.pyimagesearch.com/2015/04/20/sorting-contours-using-python-and-opencv/
###################################################################################################################################
def sort_contours(cnts, method="left-to-right"):
# initialize the reverse flag and sort index
reverse = False
i = 0
# handle if we need to sort in reverse
if method == "right-to-left" or method == "bottom-to-top":
reverse = True
# handle if we are sorting against the y-coordinate rather than
# the x-coordinate of the bounding box
if method == "top-to-bottom" or method == "bottom-to-top":
i = 1
# construct the list of bounding boxes and sort them from top to
# bottom
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b:b[1][i], reverse=reverse))
# return the list of sorted contours and bounding boxes
return (cnts, boundingBoxes)
###################################################################################################################################
# https://medium.com/coinmonks/a-box-detection-algorithm-for-any-image-containing-boxes-756c15d7ed26 (with a few modifications)
###################################################################################################################################
def box_extraction(img_for_box_extraction_path, cropped_dir_path):
img = cv2.imread(img_for_box_extraction_path, 0) # Read the image
(thresh, img_bin) = cv2.threshold(img, 128, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU) # Thresholding the image
img_bin = 255-img_bin # Invert the imagecv2.imwrite("Image_bin.jpg",img_bin)
# Defining a kernel length
kernel_length = np.array(img).shape[1]//200
# A verticle kernel of (1 X kernel_length), which will detect all the verticle lines from the image.
verticle_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, kernel_length))
# A horizontal kernel of (kernel_length X 1), which will help to detect all the horizontal line from the image.
hori_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_length, 1))
# A kernel of (3 X 3) ones.
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))# Morphological operation to detect verticle lines from an image
img_temp1 = cv2.erode(img_bin, verticle_kernel, iterations=3)
verticle_lines_img = cv2.dilate(img_temp1, verticle_kernel, iterations=3)
#cv2.imwrite("verticle_lines.jpg",verticle_lines_img)# Morphological operation to detect horizontal lines from an image
img_temp2 = cv2.erode(img_bin, hori_kernel, iterations=3)
horizontal_lines_img = cv2.dilate(img_temp2, hori_kernel, iterations=3)
#cv2.imwrite("horizontal_lines.jpg",horizontal_lines_img)# Weighting parameters, this will decide the quantity of an image to be added to make a new image.
alpha = 0.5
beta = 1.0 - alpha
# This function helps to add two image with specific weight parameter to get a third image as summation of two image.
img_final_bin = cv2.addWeighted(verticle_lines_img, alpha, horizontal_lines_img, beta, 0.0)
img_final_bin = cv2.erode(~img_final_bin, kernel, iterations=2)
(thresh, img_final_bin) = cv2.threshold(img_final_bin, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)# For Debugging
# Enable this line to see verticle and horizontal lines in the image which is used to find boxes
#cv2.imwrite("img_final_bin.jpg",img_final_bin)
# Find contours for image, which will detect all the boxes
contours, hierarchy = cv2.findContours(
img_final_bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Sort all the contours by top to bottom.
(contours, boundingBoxes) = sort_contours(contours, method="top-to-bottom")
idx = 0
for c in contours:
# Returns the location and width,height for every contour
x, y, w, h = cv2.boundingRect(c)# If the box height is greater then 20, widht is >80, then only save it as a box in "cropped/" folder.
if (w > 50 and h > 20):# and w > 3*h:
idx += 1
new_img = img[y:y+h, x:x+w]
cv2.imwrite(cropped_dir_path+str(x)+'_'+str(y) + '.png', new_img)
###########################################################################################################################################################
def prepare_cropped_folder():
p=pathlib.Path('./Cropped')
if p.exists(): # Cropped folder non empty. Let's clean up
files = [x for x in p.glob('*.*') if x.is_file()]
for f in files:
f.unlink()
else:
p.mkdir()
###########################################################################################################################################################
# MAIN
###########################################################################################################################################################
prepare_cropped_folder()
# Read image from which text needs to be extracted
img = cv2.imread("dkesg.png")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Performing OTSU threshold
ret, thresh1 = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV)
thresh1=255-thresh1
bin_y=np.zeros(thresh1.shape[0])
for x in range(0,len(bin_y)):
bin_y[x]=sum(thresh1[x,:])
bin_y=bin_y/max(bin_y)
ry=np.where(bin_y>0.995)[0]
for i in range(0,len(ry)):
cv2.line(img, (0, ry[i]), (thresh1.shape[1], ry[i]), (0, 0, 0), 1)
# We need to draw abox around the picture with a white border in order for box_detection to work
cv2.line(img,(0,0),(0,img.shape[0]-1),(255,255,255),2)
cv2.line(img,(img.shape[1]-1,0),(img.shape[1]-1,img.shape[0]-1),(255,255,255),2)
cv2.line(img,(0,0),(img.shape[1]-1,0),(255,255,255),2)
cv2.line(img,(0,img.shape[0]-1),(img.shape[1]-1,img.shape[0]-1),(255,255,255),2)
cv2.line(img,(0,0),(0,img.shape[0]-1),(0,0,0),1)
cv2.line(img,(img.shape[1]-3,0),(img.shape[1]-3,img.shape[0]-1),(0,0,0),1)
cv2.line(img,(0,0),(img.shape[1]-1,0),(0,0,0),1)
cv2.line(img,(0,img.shape[0]-2),(img.shape[1]-1,img.shape[0]-2),(0,0,0),1)
cv2.imwrite('out.png',img)
box_extraction("out.png", "./Cropped/")
Now... It puts the cropped regions in the Cropped folder. They are named as x_y.png with (x,y) the position on the original image.
Here are two examples of the outputs
and
Now, in a terminal. I used pytesseract on these two images.
The results are the following:
1)
Original Cost
$200,000.00
2)
Amount Existing Liens
$494,215.00
As you can see, pytesseract got the amount wrong in the second case... So, be careful.
Best regards,
Stéphane
I assume the bounding box is fix (rectangle that able to fit in "Original Amount and the amount below). You can use text detection to detect the "Original Amount" and "Amount Existing Liens" using OCR and crop out the image based on the detected location for further OCR on the amount. You can refer this link for text detection
Try to divide the image into different cells using the lines in the image.
For example, first divide the input into rows by detecting the horizontal lines. This can be done by using cv.HoughLinesP and checking for each line if the difference between y-coordinate of the begin and end point is smaller than a certain threshold abs(y2 - y1) < 10. If you have a horizontal line, it's a separator for a new row. You can use the y-coordinates of this line to split the input horizontally.
Next, for the row you're interested in, divide the region into columns using the same technique, but now make sure the difference between the x-coordinates of the begin and end point are smaller than a certain threshold, since you're now looking for the vertical lines.
You can now crop the image to different cells using the y-coordinates of the horizontal lines and the x-coordinates of the vertical lines. Pass these cropped regions one by one to the OCR engine and you'll have for each cell the corresponding text.
I am new to image processing and python. You might've seen my amateur codes on this site in the last couple of days.
I am trying to count the number of trees using aerial images. This is my code:
from PIL import Image
import cv2
import numpy as np
from skimage import io, filters, measure
from scipy import ndimage
img = Image.open("D:\\Texture analysis\\K-2.jpg")
row, col = img.size
hsvimg = img.convert('HSV')
hsvimg.mode = 'RGB'
hsvimg.save('newImage2.jpg')
npHSI = np.asarray(hsvimg) #Convert HSI Image to np image
blur = cv2.GaussianBlur(npHSI, (45, 45), 5)
assert isinstance(blur, np.ndarray) ##############################
assert len(blur.shape) == 3 #Convert np Image to HSI Image
assert blur.shape[2] == 3 ##############################
hsiBlur = Image.fromarray(blur, 'RGB')
hsiBlur.save('hsiBlur.jpg') #Save the blurred image
## Read
img = cv2.imread("D:\\Texture analysis\\hsiBlur.jpg")
## convert to hsv
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#Threshold the image and segment the trees
mask = cv2.inRange(hsv, (36, 25, 25), (70, 255,255))
imask = mask>0
green = np.zeros_like(img, np.uint8)
green[imask] = img[imask]
## save
cv2.imwrite("green.png", green)
#Count the number of trees
im = io.imread('green.png', as_grey=True)
val = filters.threshold_otsu(im)
drops = ndimage.binary_fill_holes(im < val)
labels = measure.label(drops)
print(labels.max())
Original image:
HSI image with gaussian filter:
Segmented image:
The last part of the code returns 7, which is a wrong output. The value should be above 50. How can I properly count the number of green segments in the final segmented image?
EDIT
I converted green.png to binary and applied erosion with a 3x3 filter and iterated it 7 times to remove the noise.
This is what I did at the end. I followed this stackoverflow link
##save
cv2.imwrite("green.png", green)
#Convert to grayscale
gray = np.dot(green[...,:3], [0.299, 0.587, 0.114])
cv2.imwrite("grayScale.jpg", gray)
#Binarize the grayscale image
ret,bin_img = cv2.threshold(gray,127,255,cv2.THRESH_BINARY)
cv2.imwrite("bin_img.jpg", bin_img)
#Erosion to remove the noise
kernel = np.ones((3, 3),np.uint8)
erosion = cv2.erode(gray, kernel, iterations = 7)
cv2.imwrite("erosion.jpg", erosion)
#Count the number of trees
finalImage = cv2.imread('erosion.jpg')
finalImage = cv2.cvtColor(finalImage, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(finalImage, 127, 255, 1)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
cv2.drawContours(finalImage,[cnt],0,(0,0,255),1)
Saurav mentioned in his answer ... size of "contours" will give you the count. This print(contour.size())gives an error and print(contour) just prints a long 2D array. How can i get the size of contour?
PS. I didn't upload the grayscale, binary and eroded image because i felt that the images were already taking too much space, I can still upload them if anyone wants to.
I've found 52 trees with that script:
from PIL import Image, ImageDraw, ImageFont
image = Image.open('04uX3.jpg')
pixels = image.load()
size = image.size
draw = ImageDraw.Draw(image)
font = ImageFont.truetype('arial', 60)
i = 1
for x in range(0, size[0], 100):
for y in range(0, size[1], 100):
if pixels[x, y][1] > 200:
draw.text((x, y), str(i), (255, 0, 0), font=font)
i += 1
image.save('result.png')
You can see that some trees weren't detected and some non-trees were detected. So this is very rough calculation:
I am seriously struggling here. I'm using open cv2 and python3. tracking multiple objects of the same color this question is the exact same one I'm asking. But the pages are out of date and the links don't work anymore. I can't find anything else online about it. I can track multiple colors (red object, green object, a blue object, etc) However I cannot for the life of me figure out how to track two red objects.
# import the necessary packages
from collections import deque
import numpy as np
import argparse
import imutils
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "green"
# ball in the HSV color space, then initialize the
# list of tracked points
greenLower = (29, 86, 6)
greenUpper = (64, 255, 255)
pts = deque(maxlen=args["buffer"])
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
camera = cv2.VideoCapture(0)
# otherwise, grab a reference to the video file
else:
camera = cv2.VideoCapture(args["video"])
# keep looping
while True:
# grab the current frame
(grabbed, frame) = camera.read()
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if args.get("video") and not grabbed:
break
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
# blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
I figured that in the line above this one that reads "c = max(cnts, key=cv2.contourArea)" I could simply find the second largest circle and use that one, but once again. I couldn't find anything online about how to do this.
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# update the points queue
pts.appendleft(center)