I wrote a program to detect faces in real-time and display them. The issue is that sometimes, the code will break and I will be presented with the following error:
File "Removed_For_Privacy_Issues/main.py", line 24, in <module>
cv2.imshow('frame',detectedface)
cv2.error: OpenCV(4.3.0) C:\projects\opencv-python\opencv\modules\highgui\src\window.cpp:376: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'
After further testing, the code only breaks when there is a sudden change in the video (like when I move my face quickly or cover the camera), anyone know why?
Code:
import numpy as np
import cv2
import PIL.Image
import PIL.ImageDraw
import time
cap = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier('Lib/site-packages/cv2/data/haarcascade_frontalface_default.xml')
time.sleep(3)
def FacialDetection(image):
boxes = face_cascade.detectMultiScale(image, 1.1, 3)
for (x, y, w, h) in boxes:
cv2.rectangle(image, (x, y), (x+w, y+h), (255, 255, 0), 2)
return image
while(cap.isOpened()):
ret, frame = cap.read()
if ret==True:
detectedface = FacialDetection(frame)
cv2.imshow('frame',detectedface)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
This will solve just try and except will do the work
import numpy as np
import cv2
import PIL.Image
import PIL.ImageDraw
import time
cap = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier('Lib/site-packages/cv2/data/haarcascade_frontalface_default.xml')
time.sleep(3)
def FacialDetection(image):
boxes = face_cascade.detectMultiScale(image, 1.1, 3)
for (x, y, w, h) in boxes:
cv2.rectangle(image, (x, y), (x+w, y+h), (255, 255, 0), 2)
return image
while(cap.isOpened()):
try:
ret, frame = cap.read()
if ret==True:
detectedface = FacialDetection(frame)
cv2.imshow('frame',detectedface)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
except:
print("face not detected")
cap.release()
cv2.destroyAllWindows()
Related
So I'm trying out code from this website: https://towardsdatascience.com/real-time-age-gender-and-emotion-prediction-from-webcam-with-keras-and-opencv-bde6220d60a. I'm only interested in the real-time emotion prediction bit, and I use the emotion prediction model provided by the author. Following the setup and cutting out the code I don't need (all of the code was provided in the link), I'm left with this:
import cv2
from PIL import Image
import numpy as np
from mtcnn import MTCNN
import pickle
# load face detector
detector = MTCNN()
# load the model
emotion_model = pickle.load(open('emotion-model-final.pkl', 'rb'))
def rgb2gray(rgb):
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
def detect_face(img):
mt_res = detector.detect_faces(img)
return_res = []
for face in mt_res:
x, y, width, height = face['box']
center = [x+(width/2), y+(height/2)]
max_border = max(width, height)
# center alignment
left = max(int(center[0]-(max_border/2)), 0)
right = max(int(center[0]+(max_border/2)), 0)
top = max(int(center[1]-(max_border/2)), 0)
bottom = max(int(center[1]+(max_border/2)), 0)
# crop the face
center_img_k = img[top:top+max_border,
left:left+max_border, :]
center_img = np.array(Image.fromarray(center_img_k).resize([224, 224]))
# convert to grey scale then predict using the emotion model
grey_img = np.array(Image.fromarray(center_img_k).resize([48, 48]))
emotion_preds = emotion_model.predict(rgb2gray(grey_img).reshape(1, 48, 48, 1))
# output to the cv2
return_res.append([top, right, bottom, left, sex_preds, age_preds, emotion_preds])
return return_res
# Get a reference to webcam
video_capture = cv2.VideoCapture(0)
emotion_dict = {
0: 'Surprise',
1: 'Happy',
2: 'Disgust',
3: 'Anger',
4: 'Sadness',
5: 'Fear',
6: 'Contempt'
}
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Convert the image from BGR color (which OpenCV uses) to RGB color
rgb_frame = frame[:, :, ::-1]
# Find all the faces in the current frame of video
face_locations = detect_face(rgb_frame)
# Display the results
for top, right, bottom, left, emotion_preds in face_locations:
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.putText(frame, 'Emotion: {}({:.3f})'.format(emotion_dict[np.argmax(emotion_preds)], np.max(emotion_preds)), (left, top-40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (36,255,12), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
Running the code doesn't work for me because I get the following error message:
File "C:\Users\<my name>\source\repos\webcamtest\webcamtest\webcamtest.py", line 43, in detect_face
emotion_preds = emotion_model.predict(rgb2gray(grey_img).reshape(1, 48, 48, 1))
File "C:\Users\<my name>\miniconda3\lib\site-packages\keras\utils\traceback_utils.py", line 70, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Users\<my name>\miniconda3\lib\site-packages\keras\engine\training.py", line 3555, in _check_call_args
fullargspec = self._call_spec.full_argspec
AttributeError: 'Model' object has no attribute '_call_spec'
So it seems like the error leads to keras\engine\training.py. I've done nothing to it other than install it. I'm using Keras 2.11.0, tensorflow 2.11.0 and mtcnn 0.1.1 . What could be the problem?
import numpy as np
import cv2
########################
# function#
########################
def draw_circle(event, x, y, flags, params):
if event == cv2.EVENT_LBUTTONDOWN:
cv2.circle(img, (x, y), 100, (0, 0, 255), -1)
elif event == cv2.EVENT_RBUTTONDOWN:
cv2.circle(img, (x, y), 100, (0, 255, 0), -1)
else:
return
cv2.namedWindow(winname="output")
cv2.setMouseCallback("output", draw_circle)
##########################
######showing images#####
##########################
img = np.zeros((512, 512, 3), dtype=np.int8) #----------- problem here
while True:
cv2.imshow("output", img)
if cv2.waitKey(20) & 0xFF == 27:
break
cv2.destroyAllWindows()
I am new to opencv2 python... the above is my code, i am unable to figure out the issue.....
when dtype was np.int8 i was able to draw circle but, when i changed it to np.int16 i was unable to draw the circles
If you really need to draw into 16bit image, then you need to use 16bit color.
As Dan noted, imshow will scale the 16bit value into 8bit for display. (0,255,0) becomes (0,0,0) that's why you get only black image.
Use (0,255*256,0) instead.
import cv2 #For Image processing
import numpy as np #For converting Images to Numerical array
import os #To handle directories
from PIL import Image #Pillow lib for handling images
labels = ["Harmeet", "Niti"]
face_cascade = cv2.CascadeClassifier(r'/home/niti/Downloads/facerasp/facerecog/haarcascade_frontalface_default.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read(r"/home/niti/Downloads/facerasp/facerecog/face-trainner.yml")
cap = cv2.VideoCapture(0) #Get vidoe feed from the Camera
while(True):
ret, img = cap.read() # Break video into frames
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #convert Video frame to Greyscale
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5) #Recog. faces
for (x, y, w, h) in faces:
roi_gray = gray[y:y+h, x:x+w] #Convert Face to greyscale
id_, conf = recognizer.predict(roi_gray) #recognize the Face
if conf>=80:
font = cv2.FONT_HERSHEY_SIMPLEX #Font style for the name
name = labels[id_] #Get the name from the List using ID number
cv2.putText(img, name, (x,y), font, 1, (0,0,255), 2)
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('Preview',img) #Display the Video
if cv2.waitKey(20) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
It seems to give the following error.
error Traceback (most recent call last)
in
10 face_cascade = cv2.CascadeClassifier(r'/home/niti/Downloads/facerasp/facerecog/haarcascade_frontalface_default.xml')
11 recognizer = cv2.face.LBPHFaceRecognizer_create()
---> 12 recognizer.read(r"/home/niti/Downloads/facerasp/facerecog/face-trainner.yml")
13
14 cap = cv2.VideoCapture(0) #Get vidoe feed from the Camera
error: OpenCV(4.1.2) /io/opencv/modules/core/src/persistence.cpp:2068: error: (-215:Assertion failed) isMap() in function 'operator[]'
I am trying to create a small program for personal use with the picamera and rpi3. I have been trying for a while to implement real-time face detection using opencv. It never works. The error code I get is
"error: (-215) scn == 3 || scn == 4 in function cvtColor"
The code I am trying to use is:
import numpy as np
import cv2
cam = cv2.VideoCapture(0)
name = 'detect'
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cv2.namedWindow(name, cv2.WINDOW_AUTOSIZE)
while True:
s, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow(name, img)
k = cv2.waitKey(0)
if k == 27:
cv2.destroyWindow("Detect")
break
Disclaimer, the code I have posted here is not mine. I intended to copy-paste-edit the code for PERSONAL use. I do not claim to have created it, I just need it to work
Many thanks in advance
I have found a solution. Instead of using the code I posted above, I have stolen some new code and edited it to my liking. For some reason, it can only work when triggered from the terminal but that is acceptable.
The command needed (on linux) is :
cd (path_to_script) && python3 (script_name) --picamera 1
Thanks to #beaker for helping me
from imutils.video import VideoStream
from imutils import face_utils
import datetime
import argparse
import imutils
import time
import dlib
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-r", "--picamera", type=int, default=-1,
help="whether or not the Raspberry Pi camera should be used")
args = vars(ap.parse_args())
print("[INFO] camera sensor warming up...")
vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
time.sleep(2.0)
faceCascade = cv2.CascadeClassifier('/home/pi/Documents/Python Projects/Camera/haarcascade_frontalface_default.xml')
while True:
frame = vs.read()
frame = imutils.resize(frame, width=400)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags = cv2.CASCADE_SCALE_IMAGE)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 10)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cv2.destroyAllWindows()
vs.stop()
Disclaimer, the code I have posted here is not mine. I intended to copy-paste-edit the code for PERSONAL use. I do not claim to have created it
I'm trying to implement line detection on python using raspberry pi 3 and picam
import picamera
import picamera.array
import time
import cv2
import numpy as np
#Initialize camera
camera = picamera.PiCamera()
camera.resolution = (640,480)
rawCapture = picamera.array.PiRGBArray(camera)
#Let camera warm up
time.sleep(0.1)
#Capture image
camera.capture(rawCapture, format="bgr")
img = rawCapture.array
#Convert to Grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#Blur image to reduce noise
blurred = cv2.GaussianBlur(gray, (9, 9), 0)
#Perform canny edge-detection
edged = cv2.Canny(blurred, 50, 150)
#Perform hough lines probalistic transform
lines = cv2.HoughLinesP(edged,1,np.pi/180,10,80,1)
#Draw lines on input image
if(lines != None):
for x1,y1,x2,y2 in lines[0]:
cv2.line(resized,(x1,y1),(x2,y2),(0,255,0),2)
cv2.imshow("line detect test", img)
cv2.waitKey(0)
The following error is displayed:
Traceback (most recent call last): File "/home/pi/picam lane
detection.py", line 33, in
cv2.line(resize,(x1,y1),(x2,y2),(0,255,0),2) NameError: name 'resize' is not defined
Please help me figure out the issue.
I think the resized is not defined. Maybe you can change resized to img.
import picamera
import picamera.array
import time
import cv2
import numpy as np
#Initialize camera
camera = picamera.PiCamera()
camera.resolution = (640,480)
rawCapture = picamera.array.PiRGBArray(camera)
#Let camera warm up
time.sleep(0.1)
#Capture image
camera.capture(rawCapture, format="bgr")
img = rawCapture.array
#Convert to Grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#Blur image to reduce noise
blurred = cv2.GaussianBlur(gray, (9, 9), 0)
#Perform canny edge-detection
edged = cv2.Canny(blurred, 50, 150)
#Perform hough lines probalistic transform
lines = cv2.HoughLinesP(edged,1,np.pi/180,10,80,1)
#Draw lines on input image
if(lines != None):
for x1,y1,x2,y2 in lines[0]:
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
cv2.imshow("line detect test", img)
cv2.waitKey(0)