How to find the dimensions of an object using realsense (L515 camera) - python-3.x

i have a realsense l 515 camera. I want to find the size of an object inside. In my case, i am running darknet to detect a dummy object. Now Once the object is detected, i want to use the depth frame and the color frame to calculate the length and breadth of the object(roughly). For example, an apple. The bounding box is drawn around the apple. Now , how do i use this bounding box data along with the color frame and depth frame to find the dimensions of the apple? As the bounding box is roughly the size of the apple, i want to convert the pixel coordinates of the bounding box to calculate the dimensions of the apple in real life approximately. I read online about using point clouds but I am new to this, so i am quite unclear how to proceed.
import darknet
import cv2
import numpy as np
import pyrealsense2 as rs
"""##############. Function definitions. ##################"""
#Define the detection function
def image_detection(image, network, class_names, class_colors, thresh):
# Darknet doesn't accept numpy images.
# Create one with image we reuse for each detect
width = darknet.network_width(network)
height = darknet.network_height(network)
darknet_image = darknet.make_image(width, height, 3)
#image = cv2.imread(image_path)
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_resized = cv2.resize(image_rgb, (width, height),interpolation=cv2.INTER_LINEAR)
darknet.copy_image_from_bytes(darknet_image, image_resized.tobytes())
detections = darknet.detect_image(network, class_names, darknet_image, thresh=thresh)
darknet.free_image(darknet_image)
image = darknet.draw_boxes(detections, image_resized, class_colors)
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB), detections
# Initialize and declare the neural network along with data files, config files etc
quantity_apples = []
config_file = "/home/jetson/Desktop/pano_l515/yolov4.cfg"
data_file = "/home/jetson/Desktop/pano_l515/coco.data"
weights = "/home/jetson/Desktop/pano_l515/yolov4.weights"
network, class_names, class_colors = darknet.load_network(
config_file,
data_file,
weights,
batch_size=1
)
## Realsense from align-depth2color.py
# Create a pipeline
pipeline = rs.pipeline()
# Create a config and configure the pipeline to stream
# different resolutions of color and depth streams
config = rs.config()
# Get device product line for setting a supporting resolution
pipeline_wrapper = rs.pipeline_wrapper(pipeline)
pipeline_profile = config.resolve(pipeline_wrapper)
device = pipeline_profile.get_device()
device_product_line = str(device.get_info(rs.camera_info.product_line))
config.enable_stream(rs.stream.depth, 1024, 768, rs.format.z16, 30)
if device_product_line == 'L500':
print(device_product_line)
config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)
else:
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming
profile = pipeline.start(config)
# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
print("Depth Scale is: " , depth_scale)
# We will be removing the background of objects more than
# clipping_distance_in_meters meters away
clipping_distance_in_meters = 1 #1 meter
clipping_distance = clipping_distance_in_meters / depth_scale
# Create an align object
# rs.align allows us to perform alignment of depth frames to others frames
# The "align_to" is the stream type to which we plan to align depth frames.
align_to = rs.stream.color
align = rs.align(align_to)
# Streaming loop
try:
for i in range(0,2):
# Get frameset of color and depth
frames = pipeline.wait_for_frames()
# frames.get_depth_frame() is a 640x360 depth image
# Align the depth frame to color frame
aligned_frames = align.process(frames)
# Get aligned frames
aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
color_frame = aligned_frames.get_color_frame()
# Validate that both frames are valid
if not aligned_depth_frame or not color_frame:
continue
depth_image = np.asanyarray(aligned_depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
dn_frame_width = 416
dn_frame_height = 416
frame_width = color_image.shape[1]
frame_height = color_image.shape[0]
#### Passing the image to darknet
image, detections = image_detection(color_image, network, class_names, class_colors, thresh=0.05)
for i in range(len(detections)):
xc_percent = detections[i][2][0]/dn_frame_width
yc_percent = detections[i][2][1]/dn_frame_height
w_percent = detections[i][2][2]/dn_frame_width
h_percent = detections[i][2][3]/dn_frame_height
xc = xc_percent*frame_width
yc = yc_percent*frame_height
w = w_percent*frame_width
h = h_percent*frame_height
xmin = xc - w/2.0
ymin = yc - h/2.0
xmax = xc + w/2.0
ymax = yc + h/2.0
#If object is detected, increase the count of the object in the frame
if detections[i][0] == "apple":
cv2.rectangle(color_image, (int(xmin),int(ymin)),(int(xmax),int(ymax)),(0,0,255),2)
cv2.putText(color_image, "apple", (int(xmin), int(ymin-10)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,0,255), 2)
#cv2.imwrite(output_path, frame)
# Render images:
# depth align to color on left
# depth on right
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
images = np.hstack((color_image, depth_colormap))
cv2.imwrite("test_images.jpg", color_image)
#cv2.namedWindow('Align Example', cv2.WINDOW_NORMAL)
#cv2.imshow('Align Example', images)
key = cv2.waitKey(1)
# Press esc or 'q' to close the image window
#if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
#break
finally:
pipeline.stop()
This is the output image so far. How do i proceed?

Related

How to find the location of a point in an image in millimeters using camera matrix?

I am using a standard 640x480 webcam. I have done Camera calibration in OpenCV in Python 3. This the Code I am using. The code is working and giving me the Camera Matrix and Distortion Coefficients successfully.
Now, How can I find how many millimeters are there in 640 pixels in my scene image. I have attached the webcam above a table horizontally and on the table, a Robotic arm is placed. Using the camera I am finding the centroid of an object. Using Camera Matrix my goal is to convert the location of that object (e.g. 300x200 pixels) to the millimeter units so that I can give the millimeters to the robotic arm to pick that object.
I have searched but not find any relevant information.
Please tell me that is there any equation or method for that. Thanks a lot!
import numpy as np
import cv2
import yaml
import os
# Parameters
#TODO : Read from file
n_row=4 #Checkerboard Rows
n_col=6 #Checkerboard Columns
n_min_img = 10 # number of images needed for calibration
square_size = 40 # size of each individual box on Checkerboard in mm
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) # termination criteria
corner_accuracy = (11,11)
result_file = "./calibration.yaml" # Output file having camera matrix
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(n_row-1,n_col-1,0)
objp = np.zeros((n_row*n_col,3), np.float32)
objp[:,:2] = np.mgrid[0:n_row,0:n_col].T.reshape(-1,2) * square_size
# Intialize camera and window
camera = cv2.VideoCapture(0) #Supposed to be the only camera
if not camera.isOpened():
print("Camera not found!")
quit()
width = int(camera.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT))
cv2.namedWindow("Calibration")
# Usage
def usage():
print("Press on displayed window : \n")
print("[space] : take picture")
print("[c] : compute calibration")
print("[r] : reset program")
print("[ESC] : quit")
usage()
Initialization = True
while True:
if Initialization:
print("Initialize data structures ..")
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
n_img = 0
Initialization = False
tot_error=0
# Read from camera and display on windows
ret, img = camera.read()
cv2.imshow("Calibration", img)
if not ret:
print("Cannot read camera frame, exit from program!")
camera.release()
cv2.destroyAllWindows()
break
# Wait for instruction
k = cv2.waitKey(50)
# SPACE pressed to take picture
if k%256 == 32:
print("Adding image for calibration...")
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(imgGray, (n_row,n_col),None)
# If found, add object points, image points (after refining them)
if not ret:
print("Cannot found Chessboard corners!")
else:
print("Chessboard corners successfully found.")
objpoints.append(objp)
n_img +=1
corners2 = cv2.cornerSubPix(imgGray,corners,corner_accuracy,(-1,-1),criteria)
imgpoints.append(corners2)
# Draw and display the corners
imgAugmnt = cv2.drawChessboardCorners(img, (n_row,n_col), corners2,ret)
cv2.imshow('Calibration',imgAugmnt)
cv2.waitKey(500)
# "c" pressed to compute calibration
elif k%256 == 99:
if n_img <= n_min_img:
print("Only ", n_img , " captured, ", " at least ", n_min_img , " images are needed")
else:
print("Computing calibration ...")
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (width,height),None,None)
if not ret:
print("Cannot compute calibration!")
else:
print("Camera calibration successfully computed")
# Compute reprojection errors
for i in range(len(objpoints)):
imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
error = cv2.norm(imgpoints[i],imgpoints2, cv2.NORM_L2)/len(imgpoints2)
tot_error += error
print("Camera matrix: ", mtx)
print("Distortion coeffs: ", dist)
print("Total error: ", tot_error)
print("Mean error: ", np.mean(error))
# Saving calibration matrix
try:
os.remove(result_file) #Delete old file first
except Exception as e:
#print(e)
pass
print("Saving camera matrix .. in ",result_file)
data={"camera_matrix": mtx.tolist(), "dist_coeff": dist.tolist()}
with open(result_file, "w") as f:
yaml.dump(data, f, default_flow_style=False)
# ESC pressed to quit
elif k%256 == 27:
print("Escape hit, closing...")
camera.release()
cv2.destroyAllWindows()
break
# "r" pressed to reset
elif k%256 ==114:
print("Reset program...")
Initialization = True
This the Camera Matrix:
818.6 0 324.4
0 819.1 237.9
0 0 1
Distortion coeffs:
0.34 -5.7 0 0 33.45
I was actually thinking that you should be able to solve your problem in a simple way:
mm_per_pixel = real_mm_width : 640px
Assuming that the camera initially moves in parallel to the plan with the object to pick [i.e. fixed distance], real_mm_width can be found measuring the physical distance corresponding to those 640 pixels of your picture. For the sake of an example say that you find that real_mm_width = 32cm = 320mm, so you get mm_per_pixel = 0.5mm/px. With a fixed distance this ratio doesn't change
It seems also the suggestion from the official documentation:
This consideration helps us to find only X,Y values. Now for X,Y
values, we can simply pass the points as (0,0), (1,0), (2,0), ...
which denotes the location of points. In this case, the results we get
will be in the scale of size of chess board square. But if we know the
square size, (say 30 mm), we can pass the values as (0,0), (30,0),
(60,0), ... . Thus, we get the results in mm
Then you just need to convert the centroid coordinates in pixels [e.g. (pixel_x_centroid, pixel_y_centroid) = (300px, 200px)] to mm using:
mm_x_centroid = pixel_x_centroid * mm_per_pixel
mm_y_centroid = pixel_y_centroid * mm_per_pixel
which would give you the final answer:
(mm_x_centroid, mm_y_centroid) = (150mm, 100mm)
Another way to see the same thing is this proportion where the first member is the measurable/known ratio:
real_mm_width : 640px = mm_x_centroid : pixel_x_centroid = mm_y_centroid = pixel_y_centroid

How can I display multiple windows concatenated with cv2 in array form?

I am capturing the frames in real time from various cameras. The output of each camera is displayed in a window with cv2.imshow. What I want to achieve now is that all the windows are concatenated forming a grid/matrix, for example, if I have 4 cameras, that the windows show me concatenated in 2x2.
Below I attach the code of what I have. Which allows me to capture the frames of the different cameras that I have, but I am not being able to do the above.
cap = []
ret = []
frame = []
final = ""
i = 0
cap.append(cv2.VideoCapture(0))
cap.append(cv2.VideoCapture(1))
cap.append(cv2.VideoCapture(2))
number_cameras = len(cap)
# I initialize values
for x in range(number_cameras):
ret.append(x)
frame.append(x)
while(True):
# I capture frame by frame from each camera
ret[i], frame[i] = cap[i].read()
if i == number_cameras-1:
final = cv2.hconcat([cv2.resize(frame[x], (400, 400)) for x in range(number_cameras)])
cv2.namedWindow('frame')
cv2.moveWindow('frame', 0, 0)
# I show the concatenated outputs
cv2.imshow('frame', final)
i = 0
else:
i = i + 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# I release the cameras
for x in range(number_cameras):
cap[x].release()
cv2.destroyAllWindows()
You can construct a frame out of 4 frames by resizing the frames to half their width and height. Then create an empty frame and fill it according to the resized width and height.
Since I don't currently have 4 cameras I simulating this using a single camera.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
ret, frame = cap.read()
#find half of the width
w = int(frame.shape[1] * 50 / 100)
#find half of the height
h = int(frame.shape[0] * 50 / 100)
dim = (w, h)
#resize the frame to half it size
frame =cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)
#create an empty frame
output = np.zeros((h*2,w*2,3), np.uint8)
#place a frame at the top left
output[0:h, 0:w] = frame
#place a frame at the top right
output[0:h, w:w * 2] = frame
#place a frame at the bottom left
output[h:h * 2, w:w * 2] = frame
#place a frame at the bottom right
output[h:h * 2, 0:w] = frame
cv2.imshow("Output", output)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
cv2.destroyAllWindows()

How to clear numbers from the image using openCV?

I'm trying to remove numbers which are laying inside the circular part of image, numbers are in black in color and background varies between red,yellow, blue and green.
I am using opencv to remove those numbers. I used a mask which extracts numbers from image, with help of cv2.inpaint tried to remove those numbers from images.
For my further analysis I required to have clear image. But my current approach gives distorted image and numbers are not completely removed.
I tried changing the threshold values, lowering will neglect numbers from dark shaded area such as from green and red.
import cv2
img = cv2.imread('scan_1.jpg')
mask = cv2.threshold(img,50,255,cv2.THRESH_BINARY_INV)[1][:,:,0]
cv2.imshow('mask', mask)
cv2.waitKey(0)
cv2.destroyAllWindows()
dst = cv2.inpaint(img, mask, 5, cv2.INPAINT_TELEA)
cv2.imshow('dst',dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite('ost_1.jpg',dst)
Input images: a) scan_1.jpg
b) scan_2.jpg
Output images: a) ost_1.jpg
b) ost_2.jpg
Expected Image: Circles can ignored, but something similar to it is required.
Here is my attempt, a better/easier solution might be acquired if you do not care about preserving texts outside of your circle.
import cv2
import numpy as np
# connectivity method used for finding connected components, 4 vs 8
CONNECTIVITY = 4
# HSV threshold for finding black pixels
H_THRESHOLD = 179
S_THRESHOLD = 255
V_THRESHOLD = 150
# read image
img = cv2.imread("a1.jpg")
img_height = img.shape[0]
img_width = img.shape[1]
# save a copy for creating resulting image
result = img.copy()
# convert image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# found the circle in the image
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1.7, minDist= 100, param1 = 48, param2 = 100, minRadius=70, maxRadius=100)
# draw found circle, for visual only
circle_output = img.copy()
# check if we found exactly 1 circle
num_circles = len(circles)
print("Number of found circles:{}".format(num_circles))
if (num_circles != 1):
print("invalid number of circles found ({}), should be 1".format(num_circles))
exit(0)
# save center position and radius of found circle
circle_x = 0
circle_y = 0
circle_radius = 0
if circles is not None:
# convert the (x, y) coordinates and radius of the circles to integers
circles = np.round(circles[0, :]).astype("int")
for (x, y, radius) in circles:
circle_x, circle_y, circle_radius = (x, y, radius)
cv2.circle(circle_output, (circle_x, circle_y), circle_radius, (255, 0, 0), 4)
print("circle center:({},{}), radius:{}".format(x,y,radius))
# keep a median filtered version of image, will be used later
median_filtered = cv2.medianBlur(img, 21)
# Convert BGR to HSV
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# define range of black color in HSV
lower_val = np.array([0,0,0])
upper_val = np.array([H_THRESHOLD,S_THRESHOLD,V_THRESHOLD])
# Threshold the HSV image to get only black colors
mask = cv2.inRange(hsv, lower_val, upper_val)
# find connected components
components = cv2.connectedComponentsWithStats(mask, CONNECTIVITY, cv2.CV_32S)
# apply median filtering to found components
#centers = components[3]
num_components = components[0]
print("Number of found connected components:{}".format(num_components))
labels = components[1]
stats = components[2]
for i in range(1, num_components):
left = stats[i, cv2.CC_STAT_LEFT] - 10
top = stats[i, cv2.CC_STAT_TOP] - 10
width = stats[i, cv2.CC_STAT_WIDTH] + 10
height = stats[i, cv2.CC_STAT_HEIGHT] + 10
# iterate each pixel and replace them if
#they are inside circle
for row in range(top, top+height+1):
for col in range(left, left+width+1):
dx = col - circle_x
dy = row - circle_y
if (dx*dx + dy*dy <= circle_radius * circle_radius):
result[row, col] = median_filtered[row, col]
# smooth the image, may be necessary?
#result = cv2.blur(result, (3,3))
# display image(s)
cv2.imshow("img", img)
cv2.imshow("gray", gray)
cv2.imshow("found circle:", circle_output)
cv2.imshow("mask", mask)
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result for a1:

Template Matching: efficient way to create mask for minMaxLoc?

Template matching in OpenCV is great. And you can pass a mask to cv2.minMaxLoc so that you only search (sort of) in part of the image for the template you want. You can also use a mask at the matchTemplate operation, but this only masks the template.
I want to find a template and I want to be assured that this template is within some other region of my image.
Calculating the mask for minMaxLoc seems kind of heavy. That is, calculating an accurate mask feels heavy. If you calculate a mask the easy way, it ignores the size of the template.
Examples are in order. My input images are show below. They're a bit contrived. I want to find the candy bar, but only if it's completely inside the white circle of the clock face.
clock1
clock2
template
In clock1, the candy bar is inside the circular clock face and it's a "PASS". But in clock2, the candy bar is only partially inside the face and I want it to be a "FAIL". Here's a code sample for doing it the easy way. I use cv.HoughCircles to find the clock face.
import numpy as np
import cv2
img = cv2.imread('clock1.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
template = cv2.imread('template.png')
t_h, t_w = template.shape[0:2] # template height and width
# find circle in gray image using Hough transform
circles = cv2.HoughCircles(gray, method = cv2.HOUGH_GRADIENT, dp = 1,
minDist = 150, param1 = 50, param2 = 70,
minRadius = 131, maxRadius = 200)
i = circles[0,0]
x0 = i[0]
y0 = i[1]
r = i[2]
# display circle on color image
cv2.circle(img,(x0, y0), r,(0,255,0),2)
# do the template match
result = cv2.matchTemplate(img, template, cv2.TM_CCOEFF_NORMED)
# finally, here is the part that gets tricky. we want to find highest
# rated match inside circle and we'd like to use minMaxLoc
# make mask by drawing circle on zero array
mask = np.zeros(result.shape, dtype = np.uint8) # minMaxLoc will throw
# error w/o np.uint8
cv2.circle(mask, (x0, y0), r, color = 1, thickness = -1)
# call minMaxLoc
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result, mask = mask)
# draw found rectangle on img
if max_val > 0.4: # use 0.4 as threshold for finding candy bar
cv2.rectangle(img, max_loc, (max_loc[0]+t_w, max_loc[1]+t_h), (0,255,0), 4)
cv2.imwrite('output.jpg', img)
output using clock1
output using clock2
finds candy bar even
though part of it is outside circle
So to properly make a mask, I use a bunch of NumPy operations. I make four separate masks (one for each corner of the template bounding box) and then AND them together. I'm not aware of any convenience functions in OpenCV that would do the mask for me. I'm a little nervous that all of the array operations will be expensive. Is there a better way to do this?
h, w = result.shape[0:2]
# make arrays that hold x,y coords
grid = np.indices((h, w))
x = grid[1]
y = grid[0]
top_left_mask = np.hypot(x - x0, y - y0) - r < 0
top_right_mask = np.hypot(x + t_w - x0, y - y0) - r < 0
bot_left_mask = np.hypot(x - x0, y + t_h - y0) - r < 0
bot_right_mask = np.hypot(x + t_w - x0, y + t_h - y0) - r < 0
mask = np.logical_and.reduce((top_left_mask, top_right_mask,
bot_left_mask, bot_right_mask))
mask = mask.astype(np.uint8)
cv2.imwrite('mask.png', mask*255)
Here's what the "fancy" mask looks like:
Seems about right. It cannot be circular because of the template shape. If I run clock2.jpg with this mask I get:
It works. No candy bars are identified. But I wish I could do it in fewer lines of code...
EDIT:
I've done some profiling. I ran 100 cycles of the "easy" way and the "accurate" way and calculated frames per second (fps):
easy way: 12.7 fps
accurate way: 7.8 fps
so there is some price to pay for making the mask with NumPy. These tests were done on a relatively powerful workstation. It could get uglier on more modest hardware...
Method 1: 'mask' image before cv2.matchTemplate
Just for kicks, I tried to make my own mask of the image that I pass to cv2.matchTemplate to see what kind of performance I can achieve. To be clear, this isn't a proper mask -- I set all of the pixels to ignore to one color (black or white). This is to get around the fact only TM_SQDIFF and TM_CORR_NORMED support a proper mask.
#Alexander Reynolds makes a very good point in the comments that some care must be taken if the template image (the thing we're trying to find) has lots of black or lots of white. For many problems, we will know a priori what the template looks like and we can specify a white background or black background.
I use cv2.multiply, which seems to be faster than numpy.multiply. cv2.multiply has the added advantage that it automatically clips the results to the range 0 to 255.
import numpy as np
import cv2
import time
img = cv2.imread('clock1.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
template = cv2.imread('target.jpg')
t_h, t_w = template.shape[0:2] # template height and width
mask_background = 'WHITE'
start_time = time.time()
for i in range(100): # do 100 cycles for timing
# find circle in gray image using Hough transform
circles = cv2.HoughCircles(gray, method = cv2.HOUGH_GRADIENT, dp = 1,
minDist = 150, param1 = 50, param2 = 70,
minRadius = 131, maxRadius = 200)
i = circles[0,0]
x0 = i[0]
y0 = i[1]
r = i[2]
# display circle on color image
cv2.circle(img,(x0, y0), r,(0,255,0),2)
if mask_background == 'BLACK': # black = 0, white = 255 on grayscale
mask = np.zeros(img.shape, dtype = np.uint8)
elif mask_background == 'WHITE':
mask = 255*np.ones(img.shape, dtype = np.uint8)
cv2.circle(mask, (x0, y0), r, color = (1,1,1), thickness = -1)
img2 = cv2.multiply(img, mask) # element wise multiplication
# values > 255 are truncated at 255
# do the template match
result = cv2.matchTemplate(img2, template, cv2.TM_CCOEFF_NORMED)
# call minMaxLoc
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
# draw found rectangle on img
if max_val > 0.4:
cv2.rectangle(img, max_loc, (max_loc[0]+t_w, max_loc[1]+t_h), (0,255,0), 4)
fps = 100/(time.time()-start_time)
print('fps ', fps)
cv2.imwrite('output.jpg', img)
Profiling results:
BLACK background 12.3 fps
WHITE background 12.1 fps
Using this method has very little performance hit relative to 12.7 fps in original question. However, it has the drawback that it will still find templates that still stick over the edge a little bit. Depending on the exact nature of the problem, this may be acceptable in many applications.
Method 2: use cv2.boxFilter to create mask for minMaxLoc
In this technique, we start with a circular mask (as in OP), but then modify it with cv2.boxFilter. We change the anchor from default center of kernel to the top left corner (0, 0)
import numpy as np
import cv2
import time
img = cv2.imread('clock1.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
template = cv2.imread('target.jpg')
t_h, t_w = template.shape[0:2] # template height and width
print('t_h, t_w ', t_h, ' ', t_w)
start_time = time.time()
for i in range(100):
# find circle in gray image using Hough transform
circles = cv2.HoughCircles(gray, method = cv2.HOUGH_GRADIENT, dp = 1,
minDist = 150, param1 = 50, param2 = 70,
minRadius = 131, maxRadius = 200)
i = circles[0,0]
x0 = i[0]
y0 = i[1]
r = i[2]
# display circle on color image
cv2.circle(img,(x0, y0), r,(0,255,0),2)
# do the template match
result = cv2.matchTemplate(img, template, cv2.TM_CCOEFF_NORMED)
# finally, here is the part that gets tricky. we want to find highest
# rated match inside circle and we'd like to use minMaxLoc
# start to make mask by drawing circle on zero array
mask = np.zeros(result.shape, dtype = np.float)
cv2.circle(mask, (x0, y0), r, color = 1, thickness = -1)
mask = cv2.boxFilter(mask,
ddepth = -1,
ksize = (t_w, t_h),
anchor = (0,0),
normalize = True,
borderType = cv2.BORDER_ISOLATED)
# mask now contains values from zero to 1. we want to make anything
# less than 1 equal to zero
_, mask = cv2.threshold(mask, thresh = 0.9999,
maxval = 1.0, type = cv2.THRESH_BINARY)
mask = mask.astype(np.uint8)
# call minMaxLoc
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result, mask = mask)
# draw found rectangle on img
if max_val > 0.4:
cv2.rectangle(img, max_loc, (max_loc[0]+t_w, max_loc[1]+t_h), (0,255,0), 4)
fps = 100/(time.time()-start_time)
print('fps ', fps)
cv2.imwrite('output.jpg', img)
This code gives a mask identical to OP, but at 11.89 fps. This technique gives us more accuracy with slightly more performance hit than Method 1.

how to measure distance between center of objects - openCV?

I have the following code that extracts and "tracks" black objects within a video file or webcam stream. I am new to Python and CV and am struggling to understand how to extract the center points of the tracked objects for comparison.
Code:
#for this demo the only colour we are interested in is black or near black
#remember open cv uses BGR not RGB
groupColorLower = np.array([0,0,0], dtype = "uint8")
groupColorUpper = np.array([179,179,179], dtype ="uint8")
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", required=True,
help="path to the input video file")
args = vars(ap.parse_args())
######### Reading input video or opening webcam#########
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
camera = cv2.VideoCapture(0)
# otherwise, load the video
else:
camera = cv2.VideoCapture(args["video"])
######### Finished Reading input video or opening webcam#########
# keep looping
while True:
# grab the current frame
(grabbed, frame) = camera.read()
# if we are viewing a video and we did not grab a
# frame, then we have reached the end of the video
if args.get("video") and not grabbed:
break
# determine which pixels fall within the black boundaries
# and then blur the binary image
blue = cv2.inRange(frame, groupColorLower, groupColorUpper)
blue = cv2.GaussianBlur(blue, (3, 3), 0)
# find contours in the image
(_, contours1, _) = cv2.findContours(blue.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
#lets try to sort contours1 left to right
(contours1, _) = contours.sort_contours(contours1)
if len(contours1) > 0:
#now lets extract the center point of the left most contour
fixedPoint = cv2.moments(contours1[0])
fixedPointX = int (fixedPoint["m10"] / fixedPoint["m00"])
fixedPointY = int (fixedPoint["m01"] / fixedPoint["m00"])
#lets draw a white dot in left most object
cv2.circle(frame, (fixedPointX, fixedPointY), 7, (255,255,255), -1)
#lets use the nearest to fixedPoint as anchor (left most [1] where [0] is fixedPoint
if len(contours1) > 1:
# sort the contours so we can iterate over them
cnt2 = sorted([(c, cv2.boundingRect(c)[0]) for c in contours1], key = lambda x: x[1], reverse = True)
#draw boxes around all objects we are tracking
for (c, _) in cnt2:
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y +h), (255, 255, 0), 4)
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# draw a dot in center of the black object on the image
cv2.circle(frame, (cX, cY), 7, (100, 100, 100), -1)
centre.append((int(M["m10"]/M["m00"]), int(M["m01"]/M["m00"])))
What I would like to be able to do is print the distance (in pixels) between the objects as they are found but I am not sure how to access this data.
The fixed point calculation works as I assume the left most object can be hard coded, but I need to get the coords for any other objects located.
The code currently loops over each black object and places a dot in the center of it.
Thanks for any help offered:
andy

Resources