Adding GUI into the python scripts - python-3.x

I've tried to add the GUI (tkinter) into my script, but to no avail. If anyone can help me, i would be so grateful. I'm using Python 3.6 and i think the latest opencv?
I have only started programming 2 weeks ago. So, kinda new into all this. Basically, I want to create a window that just pick the image from my folder and then process it through the script so that whenever i want to use another image, i don't have to change the script. I hope that make sense..
this is the script that i took from Chris Dahms from youtube, and managed to change it to what I want.
import cv2
import numpy as np
import os
import DetectChars
import DetectPlates
import PossiblePlate
SCALAR_BLACK = (0.0, 0.0, 0.0)
SCALAR_WHITE = (255.0, 255.0, 255.0)
SCALAR_YELLOW = (0.0, 255.0, 255.0)
SCALAR_GREEN = (0.0, 255.0, 0.0)
SCALAR_CYAN = (255.0, 255.0, 0.0)
showSteps = False
def main():
blnKNNTrainingSuccessful = DetectChars.loadKNNDataAndTrainKNN()
if blnKNNTrainingSuccessful == False:
print ("\nerror: KNN training was not successful\n")
return
imgOriginalScene = cv2.imread("CAR/Malaysia/22.jpg")
if imgOriginalScene is None:
print ("\nerror: image not read from file \n\n")
os.system("pause")
return
if imgOriginalScene is None:
print ("\nerror: image not read from file \n\n")
os.system("pause")
return
listOfPossiblePlates = DetectPlates.detectPlatesInScene(imgOriginalScene)
listOfPossiblePlates = DetectChars.detectCharsInPlates(listOfPossiblePlates)
cv2.imshow("imgOriginalScene", imgOriginalScene)
if len(listOfPossiblePlates) == 0:
print ("\nno license plates were detected\n")
else:
listOfPossiblePlates.sort(key = lambda possiblePlate: len(possiblePlate.strChars), reverse = True)
licPlate = listOfPossiblePlates[0]
cv2.imshow("Image Plate", licPlate.imgPlate)
cv2.imshow("Image Threshold", licPlate.imgThresh)
if len(licPlate.strChars) == 0:
print ("\nno characters were detected\n\n")
return
drawRedRectangleAroundPlate(imgOriginalScene, licPlate)
print ("\nlicense plate read from image = " + licPlate.strChars + "\n")
print ("----------------------------------------")
writeLicensePlateCharsOnImage(imgOriginalScene, licPlate)
cv2.imshow("imgOriginalScene", imgOriginalScene)
cv2.imwrite("imgOriginalScene.png", imgOriginalScene)
cv2.waitKey(0)
return
def drawRedRectangleAroundPlate(imgOriginalScene, licPlate):
p2fRectPoints = cv2.boxPoints(licPlate.rrLocationOfPlateInScene)
cv2.line(imgOriginalScene, tuple(p2fRectPoints[0]), tuple(p2fRectPoints[1]), SCALAR_RED, 2)
cv2.line(imgOriginalScene, tuple(p2fRectPoints[1]), tuple(p2fRectPoints[2]), SCALAR_RED, 2)
cv2.line(imgOriginalScene, tuple(p2fRectPoints[2]), tuple(p2fRectPoints[3]), SCALAR_RED, 2)
cv2.line(imgOriginalScene, tuple(p2fRectPoints[3]), tuple(p2fRectPoints[0]), SCALAR_RED, 2)
def writeLicensePlateCharsOnImage(imgOriginalScene, licPlate):
ptCenterOfTextAreaX = 0
ptCenterOfTextAreaY = 0
ptLowerLeftTextOriginX = 0
ptLowerLeftTextOriginY = 0
sceneHeight, sceneWidth, sceneNumChannels = imgOriginalScene.shape
plateHeight, plateWidth, plateNumChannels = licPlate.imgPlate.shape
intFontFace = cv2.FONT_HERSHEY_SIMPLEX
fltFontScale = float(plateHeight) / 30.0
intFontThickness = int(round(fltFontScale * 2))
textSize, baseline = cv2.getTextSize(licPlate.strChars, intFontFace, fltFontScale, intFontThickness)
( (intPlateCenterX, intPlateCenterY), (intPlateWidth, intPlateHeight), fltCorrectionAngleInDeg ) = licPlate.rrLocationOfPlateInScene
intPlateCenterX = int(intPlateCenterX)
intPlateCenterY = int(intPlateCenterY)
ptCenterOfTextAreaX = int(intPlateCenterX)
if intPlateCenterY < (sceneHeight * 0.75):
ptCenterOfTextAreaY = int(round(intPlateCenterY)) + int(round(plateHeight * 1.6))
else:
ptCenterOfTextAreaY = int(round(intPlateCenterY)) - int(round(plateHeight * 1.6))
textSizeWidth, textSizeHeight = textSize
ptLowerLeftTextOriginX = int(ptCenterOfTextAreaX - (textSizeWidth / 2))
ptLowerLeftTextOriginY = int(ptCenterOfTextAreaY + (textSizeHeight / 2))
cv2.putText(imgOriginalScene, licPlate.strChars, (ptLowerLeftTextOriginX, ptLowerLeftTextOriginY), intFontFace, fltFontScale, SCALAR_CYAN, intFontThickness)
if __name__ == "__main__":
main()
cv2.waitKey()
cv2.destroyAllWindows()
Pre-processing stage
# Preprocess.py
import numpy as np
import math
# module level variables ##########################################################################
GAUSSIAN_SMOOTH_FILTER_SIZE = (5, 5)
ADAPTIVE_THRESH_BLOCK_SIZE = 19
ADAPTIVE_THRESH_WEIGHT = 9
def preprocess(imgOriginal):
imgGrayscale = extractValue(imgOriginal)
imgMaxContrastGrayscale = maximizeContrast(imgGrayscale)
height, width = imgGrayscale.shape
grayscaled = cv2.cvtColor(imgOriginal,cv2.COLOR_BGR2GRAY)
imgBlurred = np.zeros((height, width, 1), np.uint8)
imgBlurred, otsu = cv2.threshold(grayscaled,125,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
imgThresh = cv2.medianBlur(otsu,5)
return imgGrayscale, imgThresh
# end function
def extractValue(imgOriginal):
height, width, numChannels = imgOriginal.shape
imgHSV = np.zeros((height, width, 3), np.uint8)
imgHSV = cv2.cvtColor(imgOriginal, cv2.COLOR_BGR2HSV)
imgHue, imgSaturation, imgValue = cv2.split(imgHSV)
return imgValue
# end function
def maximizeContrast(imgGrayscale):
height, width = imgGrayscale.shape
imgTopHat = np.zeros((height, width, 1), np.uint8)
imgBlackHat = np.zeros((height, width, 1), np.uint8)
structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
imgTopHat = cv2.morphologyEx(imgGrayscale, cv2.MORPH_TOPHAT, structuringElement)
imgBlackHat = cv2.morphologyEx(imgGrayscale, cv2.MORPH_BLACKHAT, structuringElement)
imgGrayscalePlusTopHat = cv2.add(imgGrayscale, imgTopHat)
imgGrayscalePlusTopHatMinusBlackHat = cv2.subtract(imgGrayscalePlusTopHat, imgBlackHat)
return imgGrayscalePlusTopHatMinusBlackHat
# end function

If all you are wanting is a window to select a file then this should work.
import Tkinter
from Tkinter import *
import tkSimpleDialog
from tkFileDialog import askopenfilename
master = Tk()
master.withdraw()
my_file = askopenfilename()
mainloop()

i recommend Gtk3 for your GUI.
here's a simple Gtk window with button:
#!/usr/bin/env python3
import gi
gi.require_version( 'Gtk', '3.0' )
from gi.repository import Gtk
class Window( Gtk.Window ):
def __init__( self ):
Gtk.Window.__init__( self )
self.connect( 'destroy', lambda q: Gtk.main_quit() )
button = Gtk.Button( "Gtk.Button" )
button.connect( "clicked", self.on_button_clicked )
grid = Gtk.Grid( )
grid.attach( button, 0, 0, 1, 1 )
self.add( grid )
self.show_all()
def on_button_clicked( self, button ):
print( "Gtk.Button was clicked" )
w = Window()
Gtk.main()

Related

New FigureCanvasTkAgg always keeps a black border

I want to show some figures using tkinter, but new FigureCanvasTkAgg always keeps a black border. For example, I want to build two figures with red borders, but the new one has a black border, just like this:
enter image description here
But when the display window is not active, the black border disappear:
enter image description here
How to solve this problem? Thank you!
Here's the code:
import tkinter as tk
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
class display_window:
def __init__(self, width = 1024, height = 768):
self.figure_dict = {}
self.display_level = tk.Tk()
screen_width = self.display_level.winfo_screenwidth()
screen_height = self.display_level.winfo_screenheight()
init_position_x = int((screen_width - width) / 2)
init_position_y = int((screen_height - height) / 2)
position = str(width) + 'x' + str(height) + '+' + str(init_position_x) + '+' + str(init_position_y)
self.display_level.geometry(position)
self.x_offset = 120
self.y_offset = 10
self.figures_interval = 10
new_figure_button = tk.Button(self.display_level, text='new figure', command=self.new_figure_callback)
new_figure_button.place(x=5, y=5)
def new_figure_callback(self):
fig = Figure(figsize=(3, 2), dpi=100)
fig_plot = fig.add_subplot(111)
fig_plot.grid()
figure_canvas = FigureCanvasTkAgg(fig, self.display_level)
figure_widget = figure_canvas.get_tk_widget()
figure_widget.config(highlightthickness = 2, highlightbackground = "red", cursor='cross')
self.figure_dict[figure_widget] = {
"fig": fig,
"fig_plot": fig_plot,
"figure_canvas": figure_canvas,
}
self.arrange_figures(self.x_offset, self.y_offset, self.figures_interval)
def arrange_figures(self, x_offset, y_offset, figures_interval):
figures_area_width = self.display_level.winfo_width() - x_offset - figures_interval
figures_area_height = self.display_level.winfo_height() - y_offset - figures_interval
figure_count = len(self.figure_dict)
figure_width = figures_area_width
figure_height = (figures_area_height - figures_interval * (figure_count - 1)) / figure_count
for i, it in enumerate(self.figure_dict.keys()):
it.config(height = figure_height, width = figure_width)
it.place(x = x_offset, y = y_offset + i * (figure_height + figures_interval))
if __name__ == '__main__':
display_window()
tk.mainloop()
I want all the figures' borders display as in the config function.

using openCV to open the webcam and take picture with it every five seconds

I tried to use the webcam and take pictures every 5 seconds via openCV but the cam itself didn't work and kept causing the error...
I also tried changing the integer in the cv2.VideoCapture() to -1 and 1 but still that didn't work.
This is the form of the error: "[ WARN:0] global /io/opencv/modules/videoio/src/cap_v4l.cpp (802)
open VIDEOIO ERROR: V4L: can't open camera by index 0
Traceback (most recent call last):
File "webcam_detect.py", line 176, in
raise IOError("Cannot open webcam")
OSError: Cannot open webcam"
import colorsys
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import cv2
import time
import numpy as np
from keras import backend as K
from keras.models import load_model
from keras.layers import Input
from yolo3.model import yolo_eval, yolo_body, tiny_yolo_body
from yolo3.utils import image_preporcess
class YOLO(object):
_defaults = {
#"model_path": 'logs/trained_weights_final.h5',
"model_path": 'model_data/yolo_weights.h5',
"anchors_path": 'model_data/yolo_anchors.txt',
"classes_path": 'model_data/coco_classes.txt',
"score" : 0.3,
"iou" : 0.45,
"model_image_size" : (416, 416),
"text_size" : 1,
}
#classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, **kwargs):
self.__dict__.update(self._defaults) # set up default values
self.__dict__.update(kwargs) # and update with user overrides
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.boxes, self.scores, self.classes = self.generate()
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
is_tiny_version = num_anchors==6 # default setting
try:
self.yolo_model = load_model(model_path, compile=False)
except:
self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \
if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)
self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match
else:
assert self.yolo_model.layers[-1].output_shape[-1] == \
num_anchors/len(self.yolo_model.output) * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
# Generate output tensor targets for filtered bounding boxes.
self.input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image):
if self.model_image_size != (None, None):
assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
boxed_image = image_preporcess(np.copy(image), tuple(reversed(self.model_image_size)))
image_data = boxed_image
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.shape[0], image.shape[1]],#[image.size[1], image.size[0]],
K.learning_phase(): 0
})
#print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
thickness = (image.shape[0] + image.shape[1]) // 600
fontScale=1
ObjectsList = []
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = self.class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
#label = '{}'.format(predicted_class)
scores = '{:.2f}'.format(score)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.shape[0], np.floor(bottom + 0.5).astype('int32'))
right = min(image.shape[1], np.floor(right + 0.5).astype('int32'))
mid_h = (bottom-top)/2+top
mid_v = (right-left)/2+left
# put object rectangle
cv2.rectangle(image, (left, top), (right, bottom), self.colors[c], thickness)
# get text size
(test_width, text_height), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, thickness/self.text_size, 1)
# put text rectangle
cv2.rectangle(image, (left, top), (left + test_width, top - text_height - baseline), self.colors[c], thickness=cv2.FILLED)
# put text above rectangle
cv2.putText(image, label, (left, top-2), cv2.FONT_HERSHEY_SIMPLEX, thickness/self.text_size, (0, 0, 0), 1)
# add everything to list
ObjectsList.append([top, left, bottom, right, mid_v, mid_h, label, scores])
return image, ObjectsList
def close_session(self):
self.sess.close()
def detect_img(self, image):
#image = cv2.imread(image, cv2.IMREAD_COLOR)
original_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
original_image_color = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
r_image, ObjectsList = self.detect_image(original_image_color)
return r_image, ObjectsList
if __name__=="__main__":
yolo = YOLO()
# set start time to current time
#start_time = time.time()
# displays the frame rate every 2 second
#display_time = 2
# Set primarry FPS to 0
#fps = 0
# we create the video capture object cap
cap = cv2.VideoCapture(0)
if not cap.isOpened():
raise IOError("Cannot open webcam")
cap.set(3, 640)
cap.set(4,480)
img_counter=0
frame_set=[]
start_time=time.time()
#if not cap.isOpened():
#raise IOError("We cannot open webcam")
#while True:
#ret, frame = cap.read()
# resize our captured frame if we need
#frame = cv2.resize(frame, None, fx=1.0, fy=1.0,
interpolation=cv2.INTER_AREA)
# detect object on our frame
#r_image, ObjectsList = yolo.detect_img(frame)
# show us frame with detection
#cv2.imshow("Web cam input", r_image)
#if cv2.waitKey(25) & 0xFF == ord("q"):
#cv2.destroyAllWindows()
#break
# calculate FPS
#fps += 1
#TIME = time.time() - start_time
#if TIME > display_time:
#print("FPS:", fps / TIME)
#fps = 0
#start_time = time.time()
while True:
ret, frame = cap.read()
frame = cv2.resize(frame, None, fx=1.0, fy=1.0,
interpolation=cv2.INTER_AREA)
r_image, ObjectsList=yolo.detect_img(frame)
cv2.imshow('Web cam input', r_image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if time.time() - start_time >= 5: #<---- Check if 5 sec passed
img_name = "opencv_frame_{}.png".format(img_counter)
cv2.imwrite(img_name, frame)
print("{} written!".format(img_counter))
start_time = time.time()
img_counter += 1
cap.release()
cv2.destroyAllWindows()
yolo.close_session()

OpenCV Python HoughLines Transformation get the rectangle points to crop the original image

Is there anyway to get the rectangle points from the HoughLines Transformation results and apply the crop point to original image to get the cropped image. I have copied the code form the documentation. The idea is to extract the document from an image. Below is the result from the HoughLines Transformation and I required the intersection point to crop the image.
"""
#file hough_lines.py
#brief This program demonstrates line finding with the Hough transform
"""
import sys
import math
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
def main(argv=[]):
default_file = "/Users/apple/Downloads/Unknown-4"
filename = argv[0] if len(argv) > 0 else default_file
# Loads an image
src = cv.imread(cv.samples.findFile(filename), cv.IMREAD_GRAYSCALE)
# Check if image is loaded fine
if src is None:
print ('Error opening image!')
print ('Usage: hough_lines.py [image_name -- default ' + default_file + '] \n')
return -1
dst = cv.Canny(src, 50, 200, None, 3)
# Copy edges to the images that will display the results in BGR
cdst = cv.cvtColor(dst, cv.COLOR_GRAY2BGR)
cdstP = np.copy(cdst)
lines = cv.HoughLines(dst, 1, np.pi / 180, 150, None, 0, 0)
if lines is not None:
for i in range(0, len(lines)):
rho = lines[i][0][0]
theta = lines[i][0][1]
a = math.cos(theta)
b = math.sin(theta)
x0 = a * rho
y0 = b * rho
pt1 = (int(x0 + 1000*(-b)), int(y0 + 1000*(a)))
pt2 = (int(x0 - 1000*(-b)), int(y0 - 1000*(a)))
cv.line(cdst, pt1, pt2, (0,0,255), 3, cv.LINE_AA)
linesP = cv.HoughLinesP(dst, 1, np.pi / 180, 50, None, 50, 10)
if linesP is not None:
for i in range(0, len(linesP)):
l = linesP[i][0]
cv.line(cdstP, (l[0], l[1]), (l[2], l[3]), (0,0,255), 3, cv.LINE_AA)
#cv.imshow("Source", src)
#plt.imshow(src)
plt.imshow(cdstP)
#plt.imshow(cdstP)
if __name__ == "__main__":
main()

How to change the opacity of chosen scatter plot points

I want to create an interactive scatter plot so the user can select points with the cursor, so the chosen points are highlighted and the rest are faded.
Right now it only works if the color is changed, how can i change the opacity and keep the original colors?
import numpy as np
from numpy.random import rand
from matplotlib.widgets import LassoSelector
from matplotlib.path import Path
import matplotlib.pyplot as plt
class SelectFromCollection(object):
def __init__(self, ax, collection,c, alpha_other=0.3):
self.canvas = ax.figure.canvas
self.collection = collection
self.alpha_other = alpha_other
self.xys = collection.get_offsets()
self.Npts = len(self.xys)
self.c = c
# Ensure that we have separate colors for each object
self.fc = collection.get_facecolors()
if len(self.fc) == 0:
raise ValueError('Collection must have a facecolor')
elif len(self.fc) == 1:
self.fc = np.tile(self.fc, (self.Npts, 1))
self.lasso = LassoSelector(ax, onselect=self.onselect)
self.ind = []
def onselect(self, verts):
path = Path(verts)
self.ind = np.nonzero(path.contains_points(self.xys))[0]
self.fc[:, -1] = self.alpha_other
self.fc[self.ind, -1] = 1
self.collection.set_facecolors(self.fc)
self.canvas.draw_idle()
def disconnect(self):
self.lasso.disconnect_events()
self.fc[:, -1] = 1
self.collection.set_facecolors(self.fc)
self.canvas.draw_idle()
np.random.seed(1)
x, y, c = rand(3, 100)
subplot_kw = dict(xlim=(0, 1), ylim=(0, 1), autoscale_on=False)
fig, ax = plt.subplots(subplot_kw=subplot_kw)
pts = ax.scatter(x, y,c=c, s=100)
selector = SelectFromCollection(ax, pts, c)
plt.show()
Solved, I used the method self.collection.get_facecolors(), to get the format and values, then I just changed the value of the 3rd column for the chosen indices like this:
fc = self.collection.get_facecolors()
fc[self.ind, 3] = 1
fc[others, 3] = self.alpha_other
self.collection.set_facecolors(fc)
cheers

how to get the length of eyes and mouth using dlib

I am working on a project of yawn detection, i am using dlib and opencv to detect the face and landmark on a video.
I want to get the length of eyes and mouth.
this is what i have done till now
import sys
import os
import dlib
import glob
from skimage import io
import cv2
import time
if len(sys.argv) != 3:
print("")
exit()
predictor_path = sys.argv[1]
faces_folder_path = sys.argv[2]
vidcap = cv2.VideoCapture('video.avi')
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
win = dlib.image_window()
while vidcap.isOpened():
success, image = vidcap.read()
if success:
win.clear_overlay()
win.set_image(image)
# Ask the detector to find the bounding boxes of each face. The 1 in the
# second argument indicates that we should upsample the image 1 time. This
# will make everything bigger and allow us to detect more faces.
dets = detector(image, 1)
print("Number of faces detected: {}".format(len(dets)))
for k, d in enumerate(dets):
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
k, d.left(), d.top(), d.right(), d.bottom()))
# Get the landmarks/parts for the face in box d.
shape = predictor(image, d)
print(shape)
print("Part 0: {}, Part 1: {},Part 2: {} ...".format(shape.part(0),shape.part(1),shape.part(2)))
# Draw the face landmarks on the screen.
win.add_overlay(shape)
win.add_overlay(dets)
time.sleep(0.01)
cv2.destroyAllWindows()
vidcap.release()
please help me how to get the length of open eyes and mouth.
From this figure
import Paths
import globals
from globals import ClassifierFiles
import numpy as np
import cv2
import time
import dlib
import math
import eyeCoordinates
import mouthCoordinates
from globals import Threshold
from globals import yawnFolder
import os
import openface
VIDEO_PATHS = []
readVideo('v.avi')#test video of faces
def readVideo(video):
global no,yes
video_capture = cv2.VideoCapture(video)
detector = dlib.get_frontal_face_detector() #Face detector
predictor = dlib.shape_predictor(ClassifierFiles.shapePredicter) #Landmark identifier
face_aligner = openface.AlignDlib(ClassifierFiles.shapePredicter)
u = 0
while True:
ret, frame = video_capture.read()
if frame != None:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
# clahe_image = clahe.apply(gray)
detections = detector(frame, 1) #Detect the faces in the image
for k,d in enumerate(detections): #For each detected face
shape = predictor(frame, d) #Get coordinates
vec = np.empty([68, 2], dtype = int)
coor = []
for i in range(1,68): #There are 68 landmark points on each face
#cv2.circle(frame, (shape.part(i).x, shape.part(i).y), 1, (0,0,255), thickness=1)
coor.append([shape.part(i).x, shape.part(i).y])
vec[i][0] = shape.part(i).x
vec[i][1] = shape.part(i).y
#RightEye and LeftEye coordinates
rightEye = eyeCoordinates.distanceRightEye(coor)
leftEye = eyeCoordinates.distanceLeftEye(coor)
eyes = (rightEye + leftEye)/2
#Mouth coordinates
mouth = mouthCoordinates.distanceBetweenMouth(coor)
print(eyes,mouth)
#prints both eyes average distance
#prints mouth distance
break
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == '__main__':
VIDEO_PATHS = Paths.videosPaths()
init()
eyeCoordinates File
import distanceFormulaCalculator
def distanceRightEye(c):
eR_36,eR_37,eR_38,eR_39,eR_40,eR_41 = 0,0,0,0,0,0
eR_36 = c[35]
eR_37 = c[36]
eR_38 = c[37]
eR_39 = c[38]
eR_40 = c[39]
eR_41 = c[40]
x1 = distanceFormulaCalculator.distanceFormula(eR_37,eR_41)
x2 = distanceFormulaCalculator.distanceFormula(eR_38,eR_40)
return ((x1+x2)/2)
def distanceLeftEye(c):
eL_42,eL_43,eL_44,eL_45,eL_46,eL_47 = 0,0,0,0,0,0
eL_42 = c[41]
eL_43 = c[42]
eL_44 = c[43]
eL_45 = c[44]
eL_46 = c[45]
eL_47 = c[46]
x1 = distanceFormulaCalculator.distanceFormula(eL_43,eL_47)
x2 = distanceFormulaCalculator.distanceFormula(eL_44,eL_46)
return ((x1+x2)/2)
def eyePoints():
return [36,37,38,39,40,41,42,43,44,45,46,47]
Mouth Coordinates File
import distanceFormulaCalculator
def distanceBetweenMouth(c):
m_60,m_61,m_62,m_63,m_64,m_65,m_66,m_67 = 0,0,0,0,0,0,0,0
m_60 = c[59]
m_61 = c[60]
m_62 = c[61]
m_63 = c[62]
m_64 = c[63]
m_65 = c[64]
m_66 = c[65]
m_67 = c[66]
x1 = distanceFormulaCalculator.distanceFormula(m_61,m_67)
x2 = distanceFormulaCalculator.distanceFormula(m_62,m_66)
x3 = distanceFormulaCalculator.distanceFormula(m_63,m_65)
return ((x1+x2+x3)/3)
def mouthPoints():
return [60,61,62,63,64,65,66,67]

Resources