How to count the vehicles if boundingRect touch the line - python-3.x

Below is the code i am trying:
def mouse_drawing(event, x, y, flags, params):
global point1, point2, drawing
if event == cv2.EVENT_LBUTTONDOWN:
if drawing is False:
drawing = True
point1 = (x, y)
else:
drawing = False
elif event == cv2.EVENT_MOUSEMOVE:
if drawing is True:
point2 = (x, y)
cap = cv2.VideoCapture("new2.asf")
cv2.namedWindow("App", cv2.WINDOW_FREERATIO)
cv2.setMouseCallback("App", mouse_drawing)
fgbg = cv2.createBackgroundSubtractorMOG2()
kernel = np.ones((5, 5), np.uint8)
while True:
ret, frame = cap.read()
frame = cv2.resize(frame,None,fx=scaling_factorx,fy=scaling_factory,interpolation=cv2.INTER_AREA)
imgray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
fgmask1 = cv2.GaussianBlur(imgray, (7,7), 0)
fgmask = fgbg.apply(fgmask1)
if point1 and point2:
cv2.line(frame, point1, point2, (0, 255, 0), 3)
contours, hierarchy = cv2.findContours(fgmask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
try:
hierarchy = hierarchy[0]
except:
hierarchy = []
for contour, hier in zip(contours, hierarchy):
(x, y, w, h) = cv2.boundingRect(contour)
if w > 80 and h > 80:
cv2.rectangle(frame, (x,y), (x+w,y+h), (0, 255, 0), 2)
cv2.imshow("App", frame)
How to write a image of a vehicle with cv2.imwrite which is reached the line, the line which is drawn as manually. And the vehicles are having the rectangular boxes that's fine But some vehicles having more than one box. One vehicle should be have only one rectangular box. And that should be saved if reached the line, rest of vehicles should not to be saved. Please let me know the solution.

First, you need to group intersecting rectangles into one.
You do so by checking the intersection area between each pair of rectangles.
If the intersection area is larger than a pre-defined heuristic ratio of the small rectangle area then the smaller rectangle should be removed. For example, intersection_area / smaller_rect_area > 0.75
Please check this answer for the rectangles intersection.
Second, to check that a rectangle has passed the line:
Use your points to find the parameters for the general line formula: ax + by + c = 0
For each frame, plug the rectangle center coordinates in the formula and keep track of the sign of the result.
If the sign of the result changes that means that the rectangle center has passed the line.

Related

Find coordinates of isosceles triangle with maximum area bounded by ellipse

"Redirected" here from math overflow:
https://mathoverflow.net/questions/372704/find-coordinates-of-isosceles-triangle-with-maximum-area-bounded-by-ellipse
I have a window with an ellipse inscribed inside it. The ellipses radii are screen_width / 2 and screen_height / 2. I want to find the coordinates of the maximum isosceles triangle that will fit in the ellipse without overflowing.
The direction of the triangle's tip is a enum parameter (i.e., N, E, S, W). From what I've read, there is not a unique solution, but the maximum area is a simple formula and there is a way to find a triangle that solves the problem. That way, however, is merely hinted at, and probably involves using linear algebra to normalize the eclipse and isosceles triangle to a unit circle and an equilateral triangle, but no such formula seems to exist online.
An equilateral triangle inscribed in a circle is the triangle that covers the max area of the circle (some theorem that you should look up).
An ellipse is a "squished" circle, therefore, if we squish a circle with an inscribed equilateral triangle, providing we do that along a line of symmetry, we end up with a max area isosceles triangle (two sides get resized by a common factor, the 3rd side gets stretched by another factor).
The angles follow the inscribed angle theorem and complementary angle theorem
Considering your screen is wider than it is high, the coordinates of the 3 apex of the triangle are as follows (in screen coordinates, with the origin at top left)
top: (w/2, 0) # this one does not change
bot_left = (w/2 - w*cos(pi/6)/2, h/2 + h*sin(pi/6)/2)
bot_right = (w/2 + w*cos(pi/6)/2, h/2 + h*sin(pi/6)/2)
Adding to #Reblochon's answer, Here is a complete example. I attempted it, so why not share it :)
import pygame
from math import sin, cos, pi
pygame.init()
SW = 600
SH = 600
WIN = pygame.display
D = WIN.set_mode((SW, SH))
radiiX = SW/2
radiiY = SH/2
def ellipse(center, rx, ry):
global gotPositions
angle = 0
while angle < 6.28:
angle += 0.0005
pygame.draw.circle(D, (255, 255, 0), (int(center[0]), int(center[1])), 2)
x = center[0] + sin(angle)* radiiX
y = center[1] + cos(angle)* radiiY
D.set_at((int(x), int(y)), (255, 255, 0))
top= (SW/2, 0) # this one does not change
bot_left = (SW/2 - SW*cos(pi/6)/2, SH/2 + SH*sin(pi/6)/2)
bot_right = (SW/2 + SW*cos(pi/6)/2, SH/2 + SH*sin(pi/6)/2)
points = [top, bot_left, bot_right]
while True:
D.fill((0, 0, 0))
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
pygame.quit()
ellipse([radiiX, radiiY], radiiX, radiiY)
pygame.draw.lines(D, (255, 255, 0), True, points)
pygame.display.flip()
based on notes from Reblochon Masque
def inner_rect (self):
rect = self.outer_rect () # bounding box of ellipse
x, y, w, h = rect
r = self.child.orientation.radians () # direction of triangle
pts = inscribe_polygon (3, r)
pts = graphics_affines (pts) # from cartesian
pts = scale_points (pts, rect) # scale points to ellipse dims
o, r = bounding_rect (pts)
xmin, ymin = o
dx, dy = r
return (xmin, ymin, dx, dy)

How to recognize and count circles in a rectangle?

I would like to count how many circles are in static rectangles for more than 3 seconds. The circles represent the center of objects recognized by the camera and static rectangles are areas of interest where I would like to count the total amount of circles that are inside the area of interest for more than 3 seconds. Currently I am able to recognize objects in real-time, find the center of each object and draw static rectangles, but I don't know how to do the rest. Below is my current while loop. Any help would be greatly appreciated.
while True:
frame = vs.read()
frame = imutils.resize(frame, width=720)
box_one = cv2.rectangle(frame, (30,30), (330,330), color, 2)
box_two = cv2.rectangle(frame, (350,30), (630,330), color, 2)
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
0.007843, (300, 300), 127.5)
net.setInput(blob)
detections = net.forward()
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > args["confidence"]:
idx = int(detections[0, 0, i, 1])
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
label = "{}: {:.2f}%".format(CLASSES[idx],
confidence * 100)
cv2.rectangle(frame, (startX, startY), (endX, endY),
COLORS[idx], 2)
center = ((startX+endX)/2, (startY+endY)/2)
first_value = int(center[0])
second_value = int(center[1])
coordinates = (first_value, second_value)
cv2.circle(frame, coordinates, 5, (255,255,255), -1)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
The output looks like this

Opencv3 - How to give ID to the people I count based on contours and rectrangles?

I want to give ID to the contourareas that I draw rectangle on them. Now my code tracks moving object on the screen and put a rectangle around them. I want to give an ID to the each of rectangles. I know how to count how many rectangles on the screen but I don't know how to give rectangles an exact ID that doesn't change when another rectangle joins the screen.
The code I use to draw rectangles:
video_path = 'C:\\Users\\MONSTER\\Desktop\\video.avi'
cv2.ocl.setUseOpenCL(False)
version = cv2.__version__.split('.')[0]
print(version)
#read video file
cap = cv2.VideoCapture(video_path)
#check opencv version
if version == '2' :
fgbg = cv2.BackgroundSubtractorMOG2()
if version == '3':
fgbg = cv2.createBackgroundSubtractorMOG2()
while (cap.isOpened):
#if ret is true than no error with cap.isOpened
ret, frame = cap.read()
if ret==True:
#apply background substraction
fgmask = fgbg.apply(frame)
ret1,th1 = cv2.threshold(fgmask,150,200,cv2.THRESH_BINARY)
#check opencv version
if version == '2' :
(contours, hierarchy) = cv2.findContours(th1.copy(),
cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
if version == '3' :
(im2, contours, hierarchy) = cv2.findContours(th1.copy(),
cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
if cv2.contourArea(c) < 200:
continue
#get bounding box from countour
(x, y, w, h) = cv2.boundingRect(c)
#draw bounding box
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow('foreground and background',th1)
cv2.imshow('rgb',frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
And I changed the code above to the code below to put text on rectangles but the text changes when another rectangle joins.
i = 1
for c in contours:
if cv2.contourArea(c) < 200:
continue
#get bounding box from countour
(x, y, w, h) = cv2.boundingRect(c)
#draw bounding box
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
i = i + 1
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame,str(i),(x,y), font, 2,(255,255,255),2,cv2.LINE_AA)
Do you have any idea to give rectangles exact ID's.
Your code does not do the tracking. only the detection from the background only. To make your code natively do the tracking, I am afraid it's not that simple. tracking will only happen if you have a perfect connected object and there is only 1 object. It usually rarely happens as fail detection happens all the time. Thus multiple RECT are created.
The ID will keep changes for different frames when you enter the scene and there are multiple detections like the image below. I tried it before. Each time the bounding rect will change from object to object. Especially when you use a simple method like bgfg, this lost track or lost id happens almost every frame.
The proper way is to use real tracking algorithm to constantly update the object. e.g
https://www.youtube.com/watch?v=qvcyK4ZMKbM
The input to TLD tracker is from the Rect obtained by
(x, y, w, h) = cv2.boundingRect(c)
The source code is in the github. feel free to test it
https://github.com/gnebehay/TLD
Follow the installation to get it up and integrate into your current detection routing.
https://github.com/zk00006/OpenTLD/wiki/Installation
You need to track multiple objects.
You need to check their intersections and stop tracking when they collide.
https://www.pyimagesearch.com/2018/08/06/tracking-multiple-objects-with-opencv/

Drawing Scaled grid system using openCV and python

What I want to do is draw a grid on an image that has smaller squares at the top and larger ones near the bottom. I am lost on how to do this though. I figured out how to draw a grid on the image using open cv but don't know how to fine turn it to get the results I want.
What I will be using this for is to find where a bounding box point is for a detected object. I will need a smaller bounding box at the top and a larger one closer to the bottom of the image.
This is what I want to do:
But I cannot figure out how to get the curves in the code.
This is what I have so far:
And this the is code that I am using.
'''
#This method draws simple grid overthe image based on the passed step
#The pxstep controls the size of the grid
'''
def drawBasicGrid(image, pxstep, midX, midY):
x = pxstep
y = pxstep
#Draw all x lines
while x < img.shape[1]:
cv2.line(img, (x, 0), (x, img.shape[0]), color=(255, 0, 255), thickness=1)
x += pxstep
while y < img.shape[0]:
cv2.line(img, (0, y), (img.shape[1], y), color=(255, 0, 255),thickness=1)
y += pxstep
This draws the basic grid.
and this creates thebounding boxes that I use for detection of the bounding points of a detected object.
def makeBoundingBox(h,w, step):
#BBox is 100*100 square or step which is defied
y = 0
bbox = []
while y < h:
#print("Y value", y)
x=0
while x < w:
#print("X Value", x)
bbox.append([(x,y), (x+step, y+step)])
x += step
y += step
return bbox
And this is how I am using it.
#Prepare Images
img = cv2.imread(image)
#img = imutils.resize(img, width=300)
#gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
(H,W) = img.shape[:2]
#Create bounding boxes for the image
'''
#Creates the boxes for people detection
#It will look for which bounding boxes the person is in and then
#Draw a box around their feet
'''
#People Detection
blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)),0.007843,(300, 300), 127.5)
net.setInput(blob)
detections = net.forward()
'''
#Get the center point of the image
'''
midY = H//2
midX = W//2
print("Middle Y pixel", midY)
#Draw center line
cv2.line(img, (0, midY), (W, midY), color=green, thickness=2)
cv2.line(img, (midX, 0), (midX, H), color=green, thickness=2)
#Visual grid drawn
drawBasicGrid(img, step, midX,midY)
bbox = makeBoundingBox(H, W, step) #Used for finding where the endx and startx BBOX points are
Any help on this will be appreciated.

how to measure distance between center of objects - openCV?

I have the following code that extracts and "tracks" black objects within a video file or webcam stream. I am new to Python and CV and am struggling to understand how to extract the center points of the tracked objects for comparison.
Code:
#for this demo the only colour we are interested in is black or near black
#remember open cv uses BGR not RGB
groupColorLower = np.array([0,0,0], dtype = "uint8")
groupColorUpper = np.array([179,179,179], dtype ="uint8")
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", required=True,
help="path to the input video file")
args = vars(ap.parse_args())
######### Reading input video or opening webcam#########
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
camera = cv2.VideoCapture(0)
# otherwise, load the video
else:
camera = cv2.VideoCapture(args["video"])
######### Finished Reading input video or opening webcam#########
# keep looping
while True:
# grab the current frame
(grabbed, frame) = camera.read()
# if we are viewing a video and we did not grab a
# frame, then we have reached the end of the video
if args.get("video") and not grabbed:
break
# determine which pixels fall within the black boundaries
# and then blur the binary image
blue = cv2.inRange(frame, groupColorLower, groupColorUpper)
blue = cv2.GaussianBlur(blue, (3, 3), 0)
# find contours in the image
(_, contours1, _) = cv2.findContours(blue.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
#lets try to sort contours1 left to right
(contours1, _) = contours.sort_contours(contours1)
if len(contours1) > 0:
#now lets extract the center point of the left most contour
fixedPoint = cv2.moments(contours1[0])
fixedPointX = int (fixedPoint["m10"] / fixedPoint["m00"])
fixedPointY = int (fixedPoint["m01"] / fixedPoint["m00"])
#lets draw a white dot in left most object
cv2.circle(frame, (fixedPointX, fixedPointY), 7, (255,255,255), -1)
#lets use the nearest to fixedPoint as anchor (left most [1] where [0] is fixedPoint
if len(contours1) > 1:
# sort the contours so we can iterate over them
cnt2 = sorted([(c, cv2.boundingRect(c)[0]) for c in contours1], key = lambda x: x[1], reverse = True)
#draw boxes around all objects we are tracking
for (c, _) in cnt2:
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y +h), (255, 255, 0), 4)
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# draw a dot in center of the black object on the image
cv2.circle(frame, (cX, cY), 7, (100, 100, 100), -1)
centre.append((int(M["m10"]/M["m00"]), int(M["m01"]/M["m00"])))
What I would like to be able to do is print the distance (in pixels) between the objects as they are found but I am not sure how to access this data.
The fixed point calculation works as I assume the left most object can be hard coded, but I need to get the coords for any other objects located.
The code currently loops over each black object and places a dot in the center of it.
Thanks for any help offered:
andy

Resources