I would like to count how many circles are in static rectangles for more than 3 seconds. The circles represent the center of objects recognized by the camera and static rectangles are areas of interest where I would like to count the total amount of circles that are inside the area of interest for more than 3 seconds. Currently I am able to recognize objects in real-time, find the center of each object and draw static rectangles, but I don't know how to do the rest. Below is my current while loop. Any help would be greatly appreciated.
while True:
frame = vs.read()
frame = imutils.resize(frame, width=720)
box_one = cv2.rectangle(frame, (30,30), (330,330), color, 2)
box_two = cv2.rectangle(frame, (350,30), (630,330), color, 2)
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
0.007843, (300, 300), 127.5)
net.setInput(blob)
detections = net.forward()
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > args["confidence"]:
idx = int(detections[0, 0, i, 1])
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
label = "{}: {:.2f}%".format(CLASSES[idx],
confidence * 100)
cv2.rectangle(frame, (startX, startY), (endX, endY),
COLORS[idx], 2)
center = ((startX+endX)/2, (startY+endY)/2)
first_value = int(center[0])
second_value = int(center[1])
coordinates = (first_value, second_value)
cv2.circle(frame, coordinates, 5, (255,255,255), -1)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
The output looks like this
Related
I used YOLOv3 to detect windows on the building. Each window shows with a bounding box around, also I extracted coordinates of each window [top_left, bottom_left, top_right, bottom_right]. now I want to find free space[wall] between all the windows. I put the coordinates of each window in a Dict.
when I set the values manually it works for each window for instance:
p1 = points_list[0][2] # top_left window1
p2 = points_list[1][1] # bottom_left window2
cv2.rectangle(img, p1, p2, (255, 0, 255), -1)
, but how can I make it automatically to find walls.
Here is my first output image, the detected wall shows by pink color.
here also I have added a sample of my code.
def bb_to_rect(x, y, w, h):
top_left = (x, y)
top_right = (x + w, y)
bottom_left = (x, y + h)
bottom_right = (x + w, y + h)
return top_left, bottom_left, top_right, bottom_right
def draw_bounding_box(img, font, boxes, confidences, colors):
indices = cv2.dnn.NMSBoxes(boxes, confidences, CONF_THRESHOLD, NMS_THRESHOLD)
points_list={}
count = 0
for i in range(len(boxes)):
if i in indices:
(x, y, w, h) = boxes[i]
label = "{}:{:.2f}%".format(classes[class_ids[i]], confidences[i] * 100)
color = colors[class_ids[i]]
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
cv2.putText(img, str(i), (x, y), cv2.FONT_HERSHEY_TRIPLEX,
.5, (255, 255, 0), 1, cv2.LINE_AA)
top_left, bottom_left, top_right, bottom_right = bb_to_rect(x, y, w, h)
points_list[count] = [top_left, bottom_left, top_right, bottom_right]
count += 1
cv2.circle(img, (x + w, y), 3, (50, 100, 0), -1) # top Right
cv2.circle(img, (x + w, y + h), 3, (0, 0, 255), -1) # bottom Right
cv2.circle(img, (x, y), 3, (0, 255, 100), -1) # top left
cv2.circle(img, (x, y + h), 3, (255, 0, 255), -1) # bottom left
return img, points_list
if __name__ == "__main__":
net, output_layers, colors, classes = load_yolo_model()
img, height, width = load_img(IMAGES_PATH)
outs = create_blob(img, net, output_layers)
boxes, class_ids, confidences, centroids = detect_obj(outs, height, width, img)
img, points_list = draw_bounding_box(img, FONT, boxes, confidences, colors)
p1 = points_list[0][2] # window0 to window1
p2 = points_list[1][1]
p2_1 = points_list[1][2] # window1 to window2
p2_2 = points_list[2][1]
p3_1 = points_list[3][2] # window3 to window5
p5_2 = points_list[5][1]
cv2.rectangle(img, p1, p2, (255, 0, 255), -1)
cv2.rectangle(img, p2_1, p2_2, (255, 0, 255), -1)
cv2.rectangle(img, p3_1, p5_2, (255, 0, 255), -1)
cv2.imshow('out', img)
cv2.waitKey(0)
Also if you see the photo you will see the windows detected randomly and the Id number for each window does not order. How can I solve this problem? thanks in advance for helping:)
Have you tried Image Segmentation to find the objects on wall? then with a simple trick like finding the countors you can measure the distance between the objects like windows as you said.
2nd trick:
I've got another easy way which may help you:
first of all, find the edges using canny edg detection, the with dilate function fill the middle space of vertical edges between windows then by Hough Line Detection you can find the vertical lines of windows, and get the distance between the y axis of vertical lines of the wall.
I am detecting text from various passport images with OpenCV. The task is to get the cropped text present on passports like Name, DOB, Nationality, etc. The current code is given below:
image = cv2.imread(image) # read image
image = imutils.resize(image, height=600)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (3, 3), 0)
rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (13, 5))
sqKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (21, 21))
blackhat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT, rectKernel)
gradX = cv2.Sobel(blackhat, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)
gradX = np.absolute(gradX)
(minVal, maxVal) = (np.min(gradX), np.max(gradX))
gradX = (255 * ((gradX - minVal) / (maxVal - minVal))).astype("uint8")
gradX = cv2.morphologyEx(gradX, cv2.MORPH_CLOSE, rectKernel)
thresh = cv2.threshold(gradX, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
if do_erosion: # do erosion if flag is True only
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, sqKernel)
thresh = cv2.erode(thresh, None, iterations=4)
p = int(image.shape[1] * 0.05)
thresh[:, 0:p] = 0
thresh[:, image.shape[1] - p:] = 0 # thresholded image shown below
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE) # finding contours from threshold image
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
rois=[]
# loop over the contours
for c in cnts:
# compute the bounding box of the contour and use the contour to
# compute the aspect ratio and coverage ratio of the bounding box
# width to the width of the image
(x, y, w, h) = cv2.boundingRect(c)
ar = w / float(h)
crWidth = w / float(gray.shape[1])
# check to see if the aspect ratio and coverage width are within
# acceptable criteria
if ar > 2 and crWidth > 0.05:
# pad the bounding box since we applied erosions and now need
# to re-grow it
pX = int((x + w) * 0.03)
pY = int((y + h) * 0.03)
(x, y) = (x - pX, y - pY)
(w, h) = (w + (pX * 2), h + (pY * 2))
# extract the ROI from the image and draw a bounding box
# surrounding the MRZ
roi = original[y:y + h, x:x + w].copy() # interested in finding all ROIs having text
rois.append(roi)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
I am detecting some textual values correctly but the solution is not generic. It is sometimes extracting only the year of DOB but not the full date (sometimes it detects month and day as well but not in the same ROI). The original and threshold image is shown below:
As you can see, in the "ROIs found image" some ROIs are not being detected in the same image like DOB and date of expiry. Any help in extracting all the text regions from the passport will be highly appreciated. Thanks!
You need to tweak the Aspect Ratio and Coverage ratio thresholds to obtain all the desired bounding boxes.
When I run your code, for 05, the value of ar is 1.541 and value of crWidth is 0.03. Since these values are less than the thresholds you have specified, they are getting filtered out. This is why some of the words do not have bounding boxes in the final image.
But, since you want to get the entire DOB in a single bounding box, just before the line thresh[:, 0:p] = 0, you can apply a dilation operation:
thresh = cv2.morphologyEx(thresh, cv2.MORPH_DILATE, np.ones((1, 30), np.uint8)).
This will dilate the pixels and combine blobs that are nearby horizontally. The resultant image after preprocessing is as follows -
The final image -
Below is the code i am trying:
def mouse_drawing(event, x, y, flags, params):
global point1, point2, drawing
if event == cv2.EVENT_LBUTTONDOWN:
if drawing is False:
drawing = True
point1 = (x, y)
else:
drawing = False
elif event == cv2.EVENT_MOUSEMOVE:
if drawing is True:
point2 = (x, y)
cap = cv2.VideoCapture("new2.asf")
cv2.namedWindow("App", cv2.WINDOW_FREERATIO)
cv2.setMouseCallback("App", mouse_drawing)
fgbg = cv2.createBackgroundSubtractorMOG2()
kernel = np.ones((5, 5), np.uint8)
while True:
ret, frame = cap.read()
frame = cv2.resize(frame,None,fx=scaling_factorx,fy=scaling_factory,interpolation=cv2.INTER_AREA)
imgray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
fgmask1 = cv2.GaussianBlur(imgray, (7,7), 0)
fgmask = fgbg.apply(fgmask1)
if point1 and point2:
cv2.line(frame, point1, point2, (0, 255, 0), 3)
contours, hierarchy = cv2.findContours(fgmask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
try:
hierarchy = hierarchy[0]
except:
hierarchy = []
for contour, hier in zip(contours, hierarchy):
(x, y, w, h) = cv2.boundingRect(contour)
if w > 80 and h > 80:
cv2.rectangle(frame, (x,y), (x+w,y+h), (0, 255, 0), 2)
cv2.imshow("App", frame)
How to write a image of a vehicle with cv2.imwrite which is reached the line, the line which is drawn as manually. And the vehicles are having the rectangular boxes that's fine But some vehicles having more than one box. One vehicle should be have only one rectangular box. And that should be saved if reached the line, rest of vehicles should not to be saved. Please let me know the solution.
First, you need to group intersecting rectangles into one.
You do so by checking the intersection area between each pair of rectangles.
If the intersection area is larger than a pre-defined heuristic ratio of the small rectangle area then the smaller rectangle should be removed. For example, intersection_area / smaller_rect_area > 0.75
Please check this answer for the rectangles intersection.
Second, to check that a rectangle has passed the line:
Use your points to find the parameters for the general line formula: ax + by + c = 0
For each frame, plug the rectangle center coordinates in the formula and keep track of the sign of the result.
If the sign of the result changes that means that the rectangle center has passed the line.
What I want to do is draw a grid on an image that has smaller squares at the top and larger ones near the bottom. I am lost on how to do this though. I figured out how to draw a grid on the image using open cv but don't know how to fine turn it to get the results I want.
What I will be using this for is to find where a bounding box point is for a detected object. I will need a smaller bounding box at the top and a larger one closer to the bottom of the image.
This is what I want to do:
But I cannot figure out how to get the curves in the code.
This is what I have so far:
And this the is code that I am using.
'''
#This method draws simple grid overthe image based on the passed step
#The pxstep controls the size of the grid
'''
def drawBasicGrid(image, pxstep, midX, midY):
x = pxstep
y = pxstep
#Draw all x lines
while x < img.shape[1]:
cv2.line(img, (x, 0), (x, img.shape[0]), color=(255, 0, 255), thickness=1)
x += pxstep
while y < img.shape[0]:
cv2.line(img, (0, y), (img.shape[1], y), color=(255, 0, 255),thickness=1)
y += pxstep
This draws the basic grid.
and this creates thebounding boxes that I use for detection of the bounding points of a detected object.
def makeBoundingBox(h,w, step):
#BBox is 100*100 square or step which is defied
y = 0
bbox = []
while y < h:
#print("Y value", y)
x=0
while x < w:
#print("X Value", x)
bbox.append([(x,y), (x+step, y+step)])
x += step
y += step
return bbox
And this is how I am using it.
#Prepare Images
img = cv2.imread(image)
#img = imutils.resize(img, width=300)
#gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
(H,W) = img.shape[:2]
#Create bounding boxes for the image
'''
#Creates the boxes for people detection
#It will look for which bounding boxes the person is in and then
#Draw a box around their feet
'''
#People Detection
blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)),0.007843,(300, 300), 127.5)
net.setInput(blob)
detections = net.forward()
'''
#Get the center point of the image
'''
midY = H//2
midX = W//2
print("Middle Y pixel", midY)
#Draw center line
cv2.line(img, (0, midY), (W, midY), color=green, thickness=2)
cv2.line(img, (midX, 0), (midX, H), color=green, thickness=2)
#Visual grid drawn
drawBasicGrid(img, step, midX,midY)
bbox = makeBoundingBox(H, W, step) #Used for finding where the endx and startx BBOX points are
Any help on this will be appreciated.
I am trying to read values on my electricity meter LCD display using opencv, From my picture I am able to find meter using HoughCircles method, I am able to find LCD display on meter using contours, the lcd display isn't so clear so again I search for contours to extract digits from display. Now I am unable to read values on the display using tesseract or ssocr, how can i read the values on LCD display. I just started using opencv (Beginner), don't know the right way to go from here and if my approach is correct, would appreciate any help. Below is my code snippet and the meter images links are in comments.
def process_image(path, index):
img = cv2.imread(path)
img = cv2.resize(img,(0,0),fx=2.0,fy=2.0)
height, width, depth = img.shape
print("\n---------------------------------------------\n")
print("In Process Image Path is %s height is %d Width is %d depth is %d" %(path, height, width, depth))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.medianBlur(gray, 15)
circles = cv2.HoughCircles(blur, cv2.HOUGH_GRADIENT,1.2,100)
# ensure at least one circles is found, which is our meter
if circles is not None:
circles = np.uint16(np.around(circles))
print("Meter Found")
for i in circles[0]:
CenterX = i[0]
CenterY = i[1]
Radius = i[2]
circle_img = np.zeros((height, width), np.uint8)
cv2.circle(circle_img, (CenterX, CenterY), Radius, 1, thickness=-1)
masked_data = cv2.bitwise_and(img, img, mask=circle_img)
output = masked_data.copy()
cv2.circle(output, (i[0], i[1]), i[2], (0, 255, 0), 2)
cv2.circle(output, (i[0], i[1]), 2, (0, 0, 255), 3)
cv2.imwrite("output_" + str(index) + ".jpg", output)
break
gray = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray,(5,5),1)
edged = cv2.Canny(blurred, 5,10,200)
cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
displayCnt = None
contour_list = []
# loop over the contours
for c in cnts:
# approximate the contour
peri = 0.02 * cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c,peri, True)
# if the contour has four vertices, then we have found
# the meter display
if len(approx) == 4:
contour_list.append(c)
cv2.contourArea(c)
displayCnt = approx
break
warped = four_point_transform(gray, displayCnt.reshape(4, 2))
output = four_point_transform(output, displayCnt.reshape(4, 2))
thresh = cv2.adaptiveThreshold(warped, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY, 31, 2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
digitCnts = []
# loop over the digit area candidates
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# if the contour is sufficiently large, it must be a digit
if (w > 5 and w < 100) and (h >= 15 and h <= 150) :
digitCnts.append(c)
# sort the contours from left-to-right, then initialize the
# actual digits themselves
digitCnts = contours.sort_contours(digitCnts,method="left-to-right")[0]
mask = np.zeros(thresh.shape, np.uint8)
cv2.drawContours(mask, digitCnts, -80, (255, 255, 255),-1)
mask = cv2.bitwise_not(mask)
mask = cv2.resize(mask, (0, 0), fx=2.0, fy=2.0)
result = os.popen('/usr/local/bin/ssocr --number-digits=-1 -t 10 Mask.jpg')
output = result.read()
print("Output is " + output)
output = output[2:8]
return str(round(float(output) * 0.1, 1))
else:
print("Circle not Found")
print("\n---------------------------------------------\n")
return None