How to do real-time face detection, opencv, python3, raspberrypi 3 - python-3.x

I am trying to create a small program for personal use with the picamera and rpi3. I have been trying for a while to implement real-time face detection using opencv. It never works. The error code I get is
"error: (-215) scn == 3 || scn == 4 in function cvtColor"
The code I am trying to use is:
import numpy as np
import cv2
cam = cv2.VideoCapture(0)
name = 'detect'
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cv2.namedWindow(name, cv2.WINDOW_AUTOSIZE)
while True:
s, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow(name, img)
k = cv2.waitKey(0)
if k == 27:
cv2.destroyWindow("Detect")
break
Disclaimer, the code I have posted here is not mine. I intended to copy-paste-edit the code for PERSONAL use. I do not claim to have created it, I just need it to work
Many thanks in advance

I have found a solution. Instead of using the code I posted above, I have stolen some new code and edited it to my liking. For some reason, it can only work when triggered from the terminal but that is acceptable.
The command needed (on linux) is :
cd (path_to_script) && python3 (script_name) --picamera 1
Thanks to #beaker for helping me
from imutils.video import VideoStream
from imutils import face_utils
import datetime
import argparse
import imutils
import time
import dlib
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-r", "--picamera", type=int, default=-1,
help="whether or not the Raspberry Pi camera should be used")
args = vars(ap.parse_args())
print("[INFO] camera sensor warming up...")
vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
time.sleep(2.0)
faceCascade = cv2.CascadeClassifier('/home/pi/Documents/Python Projects/Camera/haarcascade_frontalface_default.xml')
while True:
frame = vs.read()
frame = imutils.resize(frame, width=400)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags = cv2.CASCADE_SCALE_IMAGE)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 10)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cv2.destroyAllWindows()
vs.stop()
Disclaimer, the code I have posted here is not mine. I intended to copy-paste-edit the code for PERSONAL use. I do not claim to have created it

Related

OpenCV Face Detection breaks when I move my face too quickly

I wrote a program to detect faces in real-time and display them. The issue is that sometimes, the code will break and I will be presented with the following error:
File "Removed_For_Privacy_Issues/main.py", line 24, in <module>
cv2.imshow('frame',detectedface)
cv2.error: OpenCV(4.3.0) C:\projects\opencv-python\opencv\modules\highgui\src\window.cpp:376: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'
After further testing, the code only breaks when there is a sudden change in the video (like when I move my face quickly or cover the camera), anyone know why?
Code:
import numpy as np
import cv2
import PIL.Image
import PIL.ImageDraw
import time
cap = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier('Lib/site-packages/cv2/data/haarcascade_frontalface_default.xml')
time.sleep(3)
def FacialDetection(image):
boxes = face_cascade.detectMultiScale(image, 1.1, 3)
for (x, y, w, h) in boxes:
cv2.rectangle(image, (x, y), (x+w, y+h), (255, 255, 0), 2)
return image
while(cap.isOpened()):
ret, frame = cap.read()
if ret==True:
detectedface = FacialDetection(frame)
cv2.imshow('frame',detectedface)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
This will solve just try and except will do the work
import numpy as np
import cv2
import PIL.Image
import PIL.ImageDraw
import time
cap = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier('Lib/site-packages/cv2/data/haarcascade_frontalface_default.xml')
time.sleep(3)
def FacialDetection(image):
boxes = face_cascade.detectMultiScale(image, 1.1, 3)
for (x, y, w, h) in boxes:
cv2.rectangle(image, (x, y), (x+w, y+h), (255, 255, 0), 2)
return image
while(cap.isOpened()):
try:
ret, frame = cap.read()
if ret==True:
detectedface = FacialDetection(frame)
cv2.imshow('frame',detectedface)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
except:
print("face not detected")
cap.release()
cv2.destroyAllWindows()

Background removal from webcam OPENCV PYTHON

I'm creating a script that will read the state of a supermarket and tell me if there is products missing.
for example in the image below there is some places where there is products missing. I'm using FAST method to find all the corners in the frame. but sometimes the scripts detects the floor corners. What I want to do is remove the floor from the frame before I find the corners.
import cv2
import numpy as np
image = cv2.imread('gondola_imagem.jpeg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
fast = cv2.FastFeatureDetector_create()
# Obtain Key points, by default non max suppression is On
# to turn off set fast.setBool('nonmaxSuppression', False)
keypoints = fast.detect(gray, None)
print ("Number of keypoints Detected: ", len(keypoints))
image = cv2.drawKeypoints(image, keypoints, None,
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow('Feature Method - FAST', image)
cv2.waitKey()
cv2.destroyAllWindows()
You can use a mask to remove the areas you are not interested. For example with the following image as a mask you can get the bellow results.
Mask
Result
Code is as follow:
import numpy as np
import cv2
image = cv2.imread('test.jpg')
mask = cv2.imread('mask.jpg', 0)
cv2.imshow('Original', image)
cv2.imshow('Mask', mask)
res = cv2.bitwise_and(image,image,mask = mask)
gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
fast = cv2.FastFeatureDetector_create()
# Obtain Key points, by default non max suppression is On
# to turn off set fast.setBool('nonmaxSuppression', False)
keypoints = fast.detect(gray, None)
print ("Number of keypoints Detected: ", len(keypoints))
image = cv2.drawKeypoints(image, keypoints, None,
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imwrite('result.jpg', image)
cv2.imshow('Feature Method - FAST', image)
cv2.waitKey()
cv2.destroyAllWindows()
Edit:
If you want to do this in realtime (video from webcam) you just need to do it for every frame you get from the video camera. As long as the camera is not moving you should be able to use the same mask for all the frames. You could make the code above a function and then call it with an image as a parameter, as per the following code:
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Following function will have to be created with the previews code
CallFunctionToPreviewsCode(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
The code above was taken from OpenCV Python-Tutorials It is a good place for learning OpenCV for Python programming language.

OpenCV giving the following error as output

import cv2 #For Image processing
import numpy as np #For converting Images to Numerical array
import os #To handle directories
from PIL import Image #Pillow lib for handling images
labels = ["Harmeet", "Niti"]
face_cascade = cv2.CascadeClassifier(r'/home/niti/Downloads/facerasp/facerecog/haarcascade_frontalface_default.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read(r"/home/niti/Downloads/facerasp/facerecog/face-trainner.yml")
cap = cv2.VideoCapture(0) #Get vidoe feed from the Camera
while(True):
ret, img = cap.read() # Break video into frames
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #convert Video frame to Greyscale
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5) #Recog. faces
for (x, y, w, h) in faces:
roi_gray = gray[y:y+h, x:x+w] #Convert Face to greyscale
id_, conf = recognizer.predict(roi_gray) #recognize the Face
if conf>=80:
font = cv2.FONT_HERSHEY_SIMPLEX #Font style for the name
name = labels[id_] #Get the name from the List using ID number
cv2.putText(img, name, (x,y), font, 1, (0,0,255), 2)
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('Preview',img) #Display the Video
if cv2.waitKey(20) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
It seems to give the following error.
error Traceback (most recent call last)
in
10 face_cascade = cv2.CascadeClassifier(r'/home/niti/Downloads/facerasp/facerecog/haarcascade_frontalface_default.xml')
11 recognizer = cv2.face.LBPHFaceRecognizer_create()
---> 12 recognizer.read(r"/home/niti/Downloads/facerasp/facerecog/face-trainner.yml")
13
14 cap = cv2.VideoCapture(0) #Get vidoe feed from the Camera
error: OpenCV(4.1.2) /io/opencv/modules/core/src/persistence.cpp:2068: error: (-215:Assertion failed) isMap() in function 'operator[]'

Python shows error "name 'resized' is not defined" on compiling the following code

I'm trying to implement line detection on python using raspberry pi 3 and picam
import picamera
import picamera.array
import time
import cv2
import numpy as np
#Initialize camera
camera = picamera.PiCamera()
camera.resolution = (640,480)
rawCapture = picamera.array.PiRGBArray(camera)
#Let camera warm up
time.sleep(0.1)
#Capture image
camera.capture(rawCapture, format="bgr")
img = rawCapture.array
#Convert to Grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#Blur image to reduce noise
blurred = cv2.GaussianBlur(gray, (9, 9), 0)
#Perform canny edge-detection
edged = cv2.Canny(blurred, 50, 150)
#Perform hough lines probalistic transform
lines = cv2.HoughLinesP(edged,1,np.pi/180,10,80,1)
#Draw lines on input image
if(lines != None):
for x1,y1,x2,y2 in lines[0]:
cv2.line(resized,(x1,y1),(x2,y2),(0,255,0),2)
cv2.imshow("line detect test", img)
cv2.waitKey(0)
The following error is displayed:
Traceback (most recent call last): File "/home/pi/picam lane
detection.py", line 33, in
cv2.line(resize,(x1,y1),(x2,y2),(0,255,0),2) NameError: name 'resize' is not defined
Please help me figure out the issue.
I think the resized is not defined. Maybe you can change resized to img.
import picamera
import picamera.array
import time
import cv2
import numpy as np
#Initialize camera
camera = picamera.PiCamera()
camera.resolution = (640,480)
rawCapture = picamera.array.PiRGBArray(camera)
#Let camera warm up
time.sleep(0.1)
#Capture image
camera.capture(rawCapture, format="bgr")
img = rawCapture.array
#Convert to Grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#Blur image to reduce noise
blurred = cv2.GaussianBlur(gray, (9, 9), 0)
#Perform canny edge-detection
edged = cv2.Canny(blurred, 50, 150)
#Perform hough lines probalistic transform
lines = cv2.HoughLinesP(edged,1,np.pi/180,10,80,1)
#Draw lines on input image
if(lines != None):
for x1,y1,x2,y2 in lines[0]:
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
cv2.imshow("line detect test", img)
cv2.waitKey(0)

Regionprops (skimage.measure) for Video in Python

I got a piece of code on internet that 'Label image regions' and tried to run it over a video , but all I get is first frame and than an error after closing the first frame window " max() arg is an empty sequence" from line" plt.tight_layout() of my code. I am trying to get label for all the frames in my video instead of single image example as shown in the given example above (link). Basically the code should display/plot all the frames with labels.
Any help will be really useful.Please find my code below
import cv2
import numpy as np
from matplotlib import pyplot as plt
import time
import matplotlib.patches as mpatches
from skimage import data
from skimage.filters import threshold_otsu
from skimage.segmentation import clear_border
from skimage.measure import label, regionprops
from skimage.morphology import closing, square
from skimage.color import label2rgb
cap = cv2.VideoCapture('test3.mp4')
fig, ax = plt.subplots(figsize=(10, 6))
while(1):
t = time.time()
ret, frame2 = cap.read()
image = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
thresh = threshold_otsu(image)
bw = closing(image > thresh, square(3))
# remove artifacts connected to image border
cleared = clear_border(bw)
# label image regions
label_image = label(cleared)
image_label_overlay = label2rgb(label_image, image=frame2)
x = regionprops(label_image)
area2 = [r.area for r in x]
print(area2)
ax.imshow(image_label_overlay)
for region in regionprops(label_image):
# take regions with large enough areas
if region.area >= 100:
# draw rectangle around segmented coins
minr, minc, maxr, maxc = region.bbox
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr -minr,
fill=False, edgecolor='red', linewidth=2)
ax.add_patch(rect)
ax.set_axis_off()
plt.tight_layout()
plt.show()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Wola!
And the solution is:
1.) Error rectification: "max() arg is an empty sequence" from line plt.tight_layout() can be removed using fig.tight_layout rather than plt.tight_layout. Because after I was closing the first frame of video (that was not updating, well that's another problem I am still pondering on!!) the figure was empty and it was raising an exception as tight.layout trying to run on an empty figure.
2.) Running Label image regions code for video is made possible if you replace line
rect = mpatches.Rectangle((minc, minr), maxc - minc+50, maxr - minr+50,fill=False, edgecolor='red', linewidth=2)
ax.add_patch(rect)
ax.set_axis_off()
plt.tight_layout()
plt.show()
with
cv2.rectangle(frame2, (minc, minr), (minc +maxc - minc , minr + maxr - minr), (0, 255, 0), 2)
cv2.imshow('ObjectTrack', frame2) # this line outside the if loop
Basically display the video the way it is in simple Capture Video from Camera program of Python.

Resources