Distinct People Counting using OpenCV Python - python-3.x

I have a code that detects Cars in video and counts them, but there's an issue. If the same Car comes in the next frames, they get counted again. I tried from different resources but did not found any valid answer.
My Code
import cv2
backsub = cv2.bgsegm.createBackgroundSubtractorMOG()
capture = cv2.VideoCapture("C:/Users/chr04/Desktop/People and car counting/Car Counting/car video/video2.mp4")
best_id=0
i = 0
car = 0
if capture:
while True:
ret, frame = capture.read()
if ret:
fgmask = backsub.apply(frame, None, 0.01)
contours, hierarchy = cv2.findContours(fgmask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
try: hierarchy = hierarchy[0]
except: hierarchy = []
for contour, hier in zip(contours, hierarchy):
(x,y,w,h) = cv2.boundingRect(contour)
if w > 20 and h > 20:
best_id+=1
cv2.rectangle(frame, (x,y), (x+w,y+h), (255, 0, 0), 2)
cv2.putText(frame, str(best_id), (x,y-5), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (255, 0, 0), 2)
print(best_id)
cv2.imshow("Track", frame)
cv2.imshow("background sub", fgmask)
key = cv2.waitKey(10)
if key == ord('q'):
break

You need to reset the counter variable in the loop again and again(as you get the images frame by frame). That seems to be the issue here.
What I understand from your code is that the best_id is your counter variable. So just initialize this variable inside the loop (best_id=0 inside while loop after if ret statement) and your code shall work fine as for every frame that you receive from the feed you need to reset this counter so that it can again start counting from 0 as done below.
if ret:
#initializing best_id variable
best_id = 0
fgmask = backsub.apply(frame, None, 0.01)
contours, hierarchy = cv2.findContours(fgmask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)

Related

AttributeError: 'Model' object has no attribute '_call_spec'

I am trying to do the code for predicting Age, gender and emotion using CNN but i keep getting the above error.
I have a function for taking web cam input which then passes the detected frame to another function which is giving the error.
Here is the code for face detection:
def detect_face(img):
mt_res = detector.detect_faces(img)
return_res = []
for face in mt_res:
x, y, width, height = face['box']
center = [x+(width/2), y+(height/2)]
max_border = max(width, height)
# center alignment
left = max(int(center[0]-(max_border/2)), 0)
right = max(int(center[0]+(max_border/2)), 0)
top = max(int(center[1]-(max_border/2)), 0)
bottom = max(int(center[1]+(max_border/2)), 0)
# crop the face
center_img_k = img[top:top+max_border,
left:left+max_border, :]
center_img = np.array(Image.fromarray(center_img_k).resize([224, 224]))
print(center_img)
# create predictions
sex_preds = sex_model.predict(center_img.reshape(1,224,224,3))[0][0]
age_preds = age_model.predict(center_img.reshape(1,224,224,3))[0][0]
# convert to grey scale then predict using the emotion model
grey_img = np.array(Image.fromarray(center_img_k).resize([48, 48]))
emotion_preds = emotion_model.predict(rgb2gray(grey_img).reshape(1, 48, 48, 1))
# output to the cv2
return_res.append([top, right, bottom, left, sex_preds, age_preds, emotion_preds])
return return_res
Here is the code of Web Cam input:
# Get a reference to webcam
video_capture = cv2.VideoCapture(0)
emotion_dict = {
0: 'Surprise',
1: 'Happy',
2: 'Disgust',
3: 'Anger',
4: 'Sadness',
5: 'Fear',
6: 'Contempt'
}
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Convert the image from BGR color (which OpenCV uses) to RGB color
rgb_frame = frame[:, :, ::-1]
# Find all the faces in the current frame of video
face_locations = detect_face(rgb_frame)
# Display the results
for top, right, bottom, left, sex_preds, age_preds, emotion_preds in face_locations:
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
sex_text = 'Female' if sex_preds > 0.5 else 'Male'
cv2.putText(frame, 'Sex: {}({:.3f})'.format(sex_text, sex_preds), (left, top-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (36,255,12), 1)
cv2.putText(frame, 'Age: {:.3f}'.format(age_preds), (left, top-25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (36,255,12), 1)
cv2.putText(frame, 'Emotion: {}({:.3f})'.format(emotion_dict[np.argmax(emotion_preds)], np.max(emotion_preds)), (left, top-40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (36,255,12), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
Versions:
Tensorflow & keras == 2.10.0

Delay in drawing an element with mouse click if user input is requested in open cv python

I wanted to draw points with labels at mouse click on an image. In the following code it draws blue points for first 3 left mouse clicks, thereafter red points. It works fine so far. However, if I enable the user input requesting part marked by <--- it doesn't show the 3rd blue point until I give the user input. What I was expecting was the user input request comes after it draws the 3rd blue point. Any help is appreciated. Thanks !!
Here's the code. Please comment out <--- marked line #26 which calls for the user input and it works fine.
import cv2
img_path = r'test.jpg'
count = 0
red_count = 0
user_inp = None
def click_event(event, x, y, flags, params):
global count, red_count, user_inp
font_scale, point_radius = 0.4, 2
font = cv2.FONT_HERSHEY_SIMPLEX
if event == cv2.EVENT_LBUTTONDOWN:
if count < 3:
text = f"blue{count}"
cv2.putText(img, text, (x + 2, y), font, font_scale, (255, 0, 0), 1)
cv2.circle(img, (x, y), radius=point_radius, color=(255, 0, 0), thickness=-1)
cv2.imshow('image', img)
if count == 2:
print("blue index 2")
user_inp = input('input anything: ') # <------------ check here
if count >= 3:
text = f"red{red_count}"
cv2.putText(img, text, (x + 2, y), font, font_scale, (0, 0, 255), 1)
cv2.circle(img, (x, y), radius=point_radius, color=(0, 0, 255), thickness=-1)
cv2.imshow('image', img)
red_count += 1
count += 1
if __name__ == "__main__":
img = cv2.imread(img_path, 1)
cv2.imshow('image', img)
cv2.setMouseCallback('image', click_event)
cv2.waitKey(0)
cv2.destroyAllWindows()
print(user_inp)

My code is running continuously in jupyter notebook and asterisk sign is remain on cell of jupyter notebook

import numpy as np
import cv2
import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
model = tf.keras.models.load_model(r"C:\Users\ASUS\best_model_dataflair3.h5")
background = None
accumulated_weight = 0.5
ROI_top = 100
ROI_bottom = 300
ROI_right = 150
ROI_left = 350
def cal_accum_avg(frame, accumulated_weight):
global background
if background is None:
background = frame.copy().astype("float")
return None
cv2.accumulateWeighted(frame, background, accumulated_weight)
def segment_hand(frame, threshold=25):
global background
diff = cv2.absdiff(background.astype("uint8"), frame)
_ , thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)
#Fetching contours in the frame (These contours can be of hand or any other object in foreground) ...
image, contours, hierarchy = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# If length of contours list = 0, means we didn't get any contours...
if len(contours) == 0:
return None
else:
# The largest external contour should be the hand
hand_segment_max_cont = max(contours, key=cv2.contourArea)
# Returning the hand segment(max contour) and the thresholded image of hand...
return (thresholded, hand_segment_max_cont)
cam = cv2.VideoCapture(0)
num_frames = 0
while True:
ret,frame = cam.read()
# filpping the frame to prevent inverted image of captured frame...
frame = cv2.flip(frame, 1)
frame_copy = frame.copy()
# ROI from the frame
roi = frame[ROI_top:ROI_bottom, ROI_right:ROI_left]
gray_frame = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
gray_frame = cv2.GaussianBlur(gray_frame, (9, 9), 0)
if num_frames < 70:
cal_accum_avg(gray_frame, accumulated_weight)
cv2.putText(frame_copy, "FETCHING BACKGROUND...PLEASE WAIT", (80, 400), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0,0,255), 2)
else:
# segmenting the hand region
hand = segment_hand(gray_frame)
# Checking if we are able to detect the hand...
if hand is not None:
thresholded, hand_segment = hand
# Drawing contours around hand segment
cv2.drawContours(frame_copy, [hand_segment + (ROI_right, ROI_top)], -1, (255, 0, 0),1)
cv2.imshow("Thesholded Hand Image", thresholded)
thresholded = cv2.resize(thresholded, (64, 64))
thresholded = cv2.cvtColor(thresholded, cv2.COLOR_GRAY2RGB)
thresholded = np.reshape(thresholded, (1,thresholded.shape[0],thresholded.shape[1],3))
pred = model.predict(thresholded)
cv2.putText(frame_copy, word_dict[np.argmax(pred)], (170, 45), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
# Draw ROI on frame_copy
cv2.rectangle(frame_copy, (ROI_left, ROI_top), (ROI_right, ROI_bottom), (255,128,0), 3)
# incrementing the number of frames for tracking
num_frames += 1
# Display the frame with segmented hand
cv2.putText(frame_copy, "DataFlair hand sign recognition_ _ _", (10, 20), cv2.FONT_ITALIC, 0.5, (51,255,51), 1)
cv2.imshow("Sign Detection", frame_copy)
# Close windows with Esc
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
cam.release()
cv2.destroyAllWindows()
This is the code for predicting the hand gesture but camera is not opening. The code is running continuously but not showing any error.
Please anyone reslove this issue. This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN)to use the following CPU instructions in performance-critical operations:
I am getting this in anaconda prompt

Python OpenCv2 place image over face found

I am loading several images will go over my face and I am having difficulty getting the image to go over the square for face created. I have looked at a many resources , but for some reason I am receiving an error when attempting to follow their method.
Every time I do so , I receive an error
ValueError: could not broadcast input array from shape (334,334,3) into shape (234,234,3)
I think the images might be too large, however I tried to resize them to see if they will fit to no avail.
here is my code:
import cv2
import sys
import logging as log
import datetime as dt
from time import sleep
import os
import random
from timeit import default_timer as timer
cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
#log.basicConfig(filename='webcam.log',level=log.INFO)
video_capture = cv2.VideoCapture(0)
anterior = 0
#s_img = cv2.imread("my.jpg")
increment = 0
for filename in os.listdir("Faces/"):
if filename.endswith(".png"):
FullFile = (os.path.join("Faces/", filename))
#ret, frame = video_capture.read()
frame = cv2.imread(FullFile, cv2.IMREAD_UNCHANGED)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale( gray,scaleFactor=1.1, minNeighbors=5, minSize=(30, 30) )
edges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 9)
for (x, y, w, h) in faces:
roi_color = frame[y:( y ) + ( h ), x:x + w]
status = cv2.imwrite('export/faces_detected'+ str( increment ) +'.png', roi_color)
increment = increment + 1
else:
continue
masks = []
for filename in os.listdir("export/"):
if filename.endswith(".png"):
FullFile = (os.path.join("export/", filename))
s_img = cv2.imread(FullFile)
masks.append(s_img)
Start = timer()
End = timer()
MasksSize = len(masks)
nrand = random.randint(0, MasksSize -1 )
increment = 0
while True:
if not video_capture.isOpened():
print('Unable to load camera.')
sleep(5)
pass
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
edges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 9)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
if (End - Start) > 3:
Start = timer()
End = timer()
nrand = random.randint(0, MasksSize -1 )
# -75 and +20 added to fit my face
cv2.rectangle(frame, (x, y - 75), (x+w, y+h+20), (0, 255, 0), 2)
s_img = masks[nrand]
increment = increment + 1
#maskresize = cv2.resize(s_img, (150, 150))
#frame[y:y+s_img.shape[0] , x:x+s_img.shape[1]] = s_img # problem occurs here with
# ValueError: could not broadcast input array from shape (334,334,3) into shape (234,234,3)
# I assume I am inserting somethign too big?
End = timer()
if anterior != len(faces):
anterior = len(faces)
#log.info("faces: "+str(len(faces))+" at "+str(dt.datetime.now()))
# Display the resulting frame
cv2.imshow('Video', frame)
#cv2.imshow('Video', cartoon)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Display the resulting frame
cv2.imshow('Video', frame)
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
In the following line,
frame[y:y+s_img.shape[0] , x:x+s_img.shape[1]] = s_img
you are trying to attempt to assign s_img to frame[y:y+s_img.shape[0] , x:x+s_img.shape[1]] which are of different shapes.
You can check the shapes of the two by printing the shape (it will be the same as the shapes mentioned in the error).
Try reshaping s_img to the same shape and then try to assign.
Refer to this link:https://www.geeksforgeeks.org/image-resizing-using-opencv-python/
I used this function to resize the image to scale.
def image_resize(image, width = None, height = None, inter = cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation = inter)
# return the resized image
return resized
Then later on called
r= image_resize(s_img, height = h, width=w)
frame[y:y+r.shape[0] , x:x+r.shape[1]] = r
Answer taken from here too:
Resize an image without distortion OpenCV

How to count the vehicles if boundingRect touch the line

Below is the code i am trying:
def mouse_drawing(event, x, y, flags, params):
global point1, point2, drawing
if event == cv2.EVENT_LBUTTONDOWN:
if drawing is False:
drawing = True
point1 = (x, y)
else:
drawing = False
elif event == cv2.EVENT_MOUSEMOVE:
if drawing is True:
point2 = (x, y)
cap = cv2.VideoCapture("new2.asf")
cv2.namedWindow("App", cv2.WINDOW_FREERATIO)
cv2.setMouseCallback("App", mouse_drawing)
fgbg = cv2.createBackgroundSubtractorMOG2()
kernel = np.ones((5, 5), np.uint8)
while True:
ret, frame = cap.read()
frame = cv2.resize(frame,None,fx=scaling_factorx,fy=scaling_factory,interpolation=cv2.INTER_AREA)
imgray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
fgmask1 = cv2.GaussianBlur(imgray, (7,7), 0)
fgmask = fgbg.apply(fgmask1)
if point1 and point2:
cv2.line(frame, point1, point2, (0, 255, 0), 3)
contours, hierarchy = cv2.findContours(fgmask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
try:
hierarchy = hierarchy[0]
except:
hierarchy = []
for contour, hier in zip(contours, hierarchy):
(x, y, w, h) = cv2.boundingRect(contour)
if w > 80 and h > 80:
cv2.rectangle(frame, (x,y), (x+w,y+h), (0, 255, 0), 2)
cv2.imshow("App", frame)
How to write a image of a vehicle with cv2.imwrite which is reached the line, the line which is drawn as manually. And the vehicles are having the rectangular boxes that's fine But some vehicles having more than one box. One vehicle should be have only one rectangular box. And that should be saved if reached the line, rest of vehicles should not to be saved. Please let me know the solution.
First, you need to group intersecting rectangles into one.
You do so by checking the intersection area between each pair of rectangles.
If the intersection area is larger than a pre-defined heuristic ratio of the small rectangle area then the smaller rectangle should be removed. For example, intersection_area / smaller_rect_area > 0.75
Please check this answer for the rectangles intersection.
Second, to check that a rectangle has passed the line:
Use your points to find the parameters for the general line formula: ax + by + c = 0
For each frame, plug the rectangle center coordinates in the formula and keep track of the sign of the result.
If the sign of the result changes that means that the rectangle center has passed the line.

Resources