OpenCV3 matrix transform on KeyPoint fails - python-3.5

I'm using Python 3.5.2 and opencv 3.1.0. I'm trying to warp some keypoints from a query image with a transformation matrix I generated with cv.getAffineTransform() (see below code). Whatever I try to pass to the transform function it will always throw me this error:
cv2.error: D:\opencv\sources\modules\core\src\matmul.cpp:1947: error: (-215) scn == m.cols || scn + 1 == m.cols in function cv::transform
How do I have to pass the keypoints to make cv2.transform() work?
import cv2
import numpy as np
import random
queryImage_path = "C:\tmp\query.jpg"
trainImage_path = "C:\tmp\train.jpg"
queryImage = cv2.imread(queryImage_path, cv2.IMREAD_COLOR)
trainImage = cv2.imread(trainImage_path, cv2.IMREAD_COLOR)
surf = cv2.xfeatures2d.SURF_create()
queryImage_keypoints = surf.detect(queryImage,None)
trainImage_keypoints = surf.detect(trainImage, None)
queryImage_keypoints, queryImage_descriptors = surf.compute(queryImage, queryImage_keypoints)
trainImage_keypoints, trainImage_descriptors = surf.compute(trainImage, trainImage_keypoints)
bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
matches = bf.match(queryImage_descriptors, trainImage_descriptors)
# get three random match indices which are not the same
match_index_a = random.randint(0, len(matches) - 1)
match_index_b = random.randint(0, len(matches) - 1)
match_index_c = random.randint(0, len(matches) - 1)
# get Keypoints from match indices
# queryImage- keypoints
queryImage_keypoint_a = queryImage_keypoints[matches[match_index_a].queryIdx]
queryImage_keypoint_b = queryImage_keypoints[matches[match_index_b].queryIdx]
queryImage_keypoint_c = queryImage_keypoints[matches[match_index_c].queryIdx]
# trainImage-keypoints
trainImage_keypoint_a = trainImage_keypoints[matches[match_index_a].trainIdx]
trainImage_keypoint_b = trainImage_keypoints[matches[match_index_b].trainIdx]
trainImage_keypoint_c = trainImage_keypoints[matches[match_index_c].trainIdx]
# get affine transformation matrix from these 6 keypoints
trainImage_points = np.float32([[trainImage_keypoint_a.pt[0], trainImage_keypoint_a.pt[1]],
[trainImage_keypoint_b.pt[0], trainImage_keypoint_b.pt[1]],
[trainImage_keypoint_c.pt[0], trainImage_keypoint_c.pt[1]]])
queryImage_points = np.float32([[queryImage_keypoint_a.pt[0], queryImage_keypoint_a.pt[1]],
[queryImage_keypoint_b.pt[0], queryImage_keypoint_b.pt[1]],
[queryImage_keypoint_c.pt[0], queryImage_keypoint_c.pt[1]]])
# get transformation matrix for current points
currentMatrix = cv2.getAffineTransform(queryImage_points, trainImage_points)
queryImage_keypoint = queryImage_keypoints[matches[0].queryIdx]
keypoint_asArray = np.array([[queryImage_keypoint.pt[0]], [queryImage_keypoint.pt[1]], [1]])
#queryImage_warped_keypoint = currentMatrix.dot(keypoint_asArray)
queryImage_warped_keypoint = cv2.transform(keypoint_asArray,currentMatrix)

Use
keypoint_asArray = np.array([[[queryImage_keypoint.pt[0], queryImage_keypoint.pt[1], 1]]])
instead of
keypoint_asArray = np.array([[queryImage_keypoint.pt[0]], [queryImage_keypoint.pt[1]], [1]])

Related

module 'cv2.cv2' has no attribute 'xfeatures2d'(Opencv 3.4.2.17)

hello can someone resolve my problem I am getting this error
I am working on an image stitching project and have installed OpenCV and OpenCV-contrib version - 3.4.2.17 still getting this error
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-18-243a893afb39> in <module>
3 (result, matched_points) = panaroma.image_stitch([images[0], images[1]], match_status=True)
4 else:
----> 5 (result, matched_points) = panaroma.image_stitch([images[no_of_images-2], images[no_of_images-1]], match_status=True)
6 for i in range(no_of_images - 2):
7 (result, matched_points) = panaroma.image_stitch([images[no_of_images-i-3],result], match_status=True)
<ipython-input-14-2f5bd4f316a7> in image_stitch(self, images, lowe_ratio, max_Threshold, match_status)
5 #detect the features and keypoints from SIFT
6 (imageB, imageA) = images
----> 7 (KeypointsA, features_of_A) = self.Detect_Feature_And_KeyPoints(imageA)
8 (KeypointsB, features_of_B) = self.Detect_Feature_And_KeyPoints(imageB)
9
<ipython-input-14-2f5bd4f316a7> in Detect_Feature_And_KeyPoints(self, image)
37
38 # detect and extract features from the image
---> 39 descriptors = cv2.xfeatures2d.SIFT_create()
40 (Keypoints, features) = descriptors.detectAndCompute(image, None)
41
AttributeError: module 'cv2.cv2' has no attribute 'xfeatures2d'
while running this line of code:
panaroma = Panaroma()
if no_of_images==2:
(result, matched_points) = panaroma.image_stitch([images[0], images[1]], match_status=True)
else:
(result, matched_points) = panaroma.image_stitch([images[no_of_images-2], images[no_of_images-1]], match_status=True)
for i in range(no_of_images - 2):
(result, matched_points) = panaroma.image_stitch([images[no_of_images-i-3],result], match_status=True)
the class is this :
class Panaroma:
def image_stitch(self, images, lowe_ratio=0.75, max_Threshold=4.0,match_status=False):
#detect the features and keypoints from SIFT
(imageB, imageA) = images
(KeypointsA, features_of_A) = self.Detect_Feature_And_KeyPoints(imageA)
(KeypointsB, features_of_B) = self.Detect_Feature_And_KeyPoints(imageB)
#got the valid matched points
Values = self.matchKeypoints(KeypointsA, KeypointsB,features_of_A, features_of_B, lowe_ratio, max_Threshold)
if Values is None:
return None
#to get perspective of image using computed homography
(matches, Homography, status) = Values
result_image = self.getwarp_perspective(imageA,imageB,Homography)
result_image[0:imageB.shape[0], 0:imageB.shape[1]] = imageB
# check to see if the keypoint matches should be visualized
if match_status:
vis = self.draw_Matches(imageA, imageB, KeypointsA, KeypointsB, matches,status)
return (result_image, vis)
return result_image
def getwarp_perspective(self,imageA,imageB,Homography):
val = imageA.shape[1] + imageB.shape[1]
result_image = cv2.warpPerspective(imageA, Homography, (val , imageA.shape[0]))
return result_image
def Detect_Feature_And_KeyPoints(self, image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect and extract features from the image
descriptors = cv2.xfeatures2d.SIFT_create()
(Keypoints, features) = descriptors.detectAndCompute(image, None)
Keypoints = np.float32([i.pt for i in Keypoints])
return (Keypoints, features)
def get_Allpossible_Match(self,featuresA,featuresB):
# compute the all matches using euclidean distance and opencv provide
#DescriptorMatcher_create() function for that
match_instance = cv2.DescriptorMatcher_create("BruteForce")
All_Matches = match_instance.knnMatch(featuresA, featuresB, 2)
return All_Matches
def All_validmatches(self,AllMatches,lowe_ratio):
#to get all valid matches according to lowe concept..
valid_matches = []
for val in AllMatches:
if len(val) == 2 and val[0].distance < val[1].distance * lowe_ratio:
valid_matches.append((val[0].trainIdx, val[0].queryIdx))
return valid_matches
def Compute_Homography(self,pointsA,pointsB,max_Threshold):
#to compute homography using points in both images
(H, status) = cv2.findHomography(pointsA, pointsB, cv2.RANSAC, max_Threshold)
return (H,status)
def matchKeypoints(self, KeypointsA, KeypointsB, featuresA, featuresB,lowe_ratio, max_Threshold):
AllMatches = self.get_Allpossible_Match(featuresA,featuresB);
valid_matches = self.All_validmatches(AllMatches,lowe_ratio)
if len(valid_matches) > 4:
# construct the two sets of points
pointsA = np.float32([KeypointsA[i] for (_,i) in valid_matches])
pointsB = np.float32([KeypointsB[i] for (i,_) in valid_matches])
(Homograpgy, status) = self.Compute_Homography(pointsA, pointsB, max_Threshold)
return (valid_matches, Homograpgy, status)
else:
return None
def get_image_dimension(self,image):
(h,w) = image.shape[:2]
return (h,w)
def get_points(self,imageA,imageB):
(hA, wA) = self.get_image_dimension(imageA)
(hB, wB) = self.get_image_dimension(imageB)
vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
vis[0:hA, 0:wA] = imageA
vis[0:hB, wA:] = imageB
return vis
def draw_Matches(self, imageA, imageB, KeypointsA, KeypointsB, matches, status):
(hA,wA) = self.get_image_dimension(imageA)
vis = self.get_points(imageA,imageB)
# loop over the matches
for ((trainIdx, queryIdx), s) in zip(matches, status):
if s == 1:
ptA = (int(KeypointsA[queryIdx][0]), int(KeypointsA[queryIdx][1]))
ptB = (int(KeypointsB[trainIdx][0]) + wA, int(KeypointsB[trainIdx][1]))
cv2.line(vis, ptA, ptB, (0, 255, 0), 1)
return vis
Note : I am using opencv version 3.4.2.17 and opencv-contrib-3.4.2.17
using the following on Kaggle notebook:
https://www.kaggle.com/deepzsenu/image-stitching/
the above is the link to my notebook :
Thank you
hi every I have solved the above error
don't downgrade your OpenCV package directly
first use
!pip uninstall opencv-python -y
then I install only the opencv contrib
using
!pip install -U opencv-contrib-python=3.4.2.17
just use cv2.SIFT_create() try this out

3D-reconstruction using structure-from-motion

I want to do 3D-reconstruction using structure-from-motion algorithm. I am using opencv to do this in python. But some how the obtained pointcloud is breaking into 2 halves. My input images are:
Image 1
Image 2
Image 3.
I am matching every 2 images like image1 with image2 and image2 with image 3. I tried different feature detectors like SIFT, KAZE and SURF. With the obtained points I compute the essential matrix. I got the camera intrinsics from the camera calibration from Opencv and are stored in the variables 'mtx' and 'dist' in the code below.
```file = os.listdir('Path_to _images')
file.sort(key=lambda f: int(''.join(filter(str.isdigit,f))))
path = os.path.join(os.getcwd(),'Path_to_images/')
for i in range(0, len(file)-1):
if(i == len(file) - 1):
break
path1 = cv2.imread(path + file[i], 0)
path1 = cv2.equalizeHist(path1)
path2 = cv2.imread(path + file[i+1], 0)
path2 = cv2.equalizeHist(path2)
# Feature Detection #
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(path1,None)
kp2, des2 = sift.detectAndCompute(path2,None)
# Feature Matching #
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(des1,des2,k=2)
good = []
pts1 = []
pts2 = []
for j, (m,n) in enumerate(matches):
if m.distance < 0.8*n.distance:
good.append(m)
pts2.append(kp2[m.trainIdx].pt)
pts1.append(kp1[m.queryIdx].pt)
pts1 = np.int32(pts1)
pts2 = np.int32(pts2)
pts1 = np.array([pts1],dtype=np.float32)
pts2 = np.array([pts2],dtype=np.float32)
# UNDISTORTING POINTS #
pts1_norm = cv2.undistortPoints(pts1, mtx, dist)
pts2_norm = cv2.undistortPoints(pts2, mtx, dist)
# COMPUTE FUNDAMENTAL MATRIX #
F, mask = cv2.findFundamentalMat(pts1_norm,pts2_norm,cv2.FM_LMEDS)
# COMPUTE ESSENTIAL MATRIX #
E, mask = cv2.findEssentialMat(pts1_norm, pts2_norm, focal=55.474, pp=(33.516, 16.630), method=cv2.FM_LMEDS, prob=0.999, threshold=3.0)
# POSE RECOVERY #
points, R, t, mask = cv2.recoverPose(E, pts1_norm, pts2_norm)
anglesBetweenImages = rotationMatrixToEulerAngles(R)
sys.stdout = open('path_to_folder/angles.txt', 'a')
print(anglesBetweenImages)
# COMPOSE PROJECTION MATRIX OF R, t #
matrix_1 = np.hstack((R, t))
matrix_2 = np.hstack((np.eye(3, 3), np.zeros((3, 1))))
projMat_1 = np.dot(mtx, matrix_1)
projMat_2 = np.dot(mtx, matrix_2)
# TRIANGULATE POINTS #
point_4d_hom = cv2.triangulatePoints(projMat_1[:3], projMat_2[:3], pts1[:2].T, pts2[:2].T)
# HOMOGENIZE THE 4D RESULT TO 3D #
point_4d = point_4d_hom
point_3d = point_4d[:3, :].T # Obtains 3D points
np.savetxt('/path_to_folder/'+ file[i] +'.txt', point_3d)
After cv2.triangulatePoints, I expected to obtain one pointcloud. But the result I got has 2 surfaces as shown in the image below.
Result 1.
I really appreciate if anyone can tell me what is going wrong with my algorithm. Thanks!
you need to do this interativilly
like this:
cv::Mat pointsMat1(2, 1, CV_64F);
cv::Mat pointsMat2(2, 1, CV_64F);
int size0 = m_history.getHistorySize();
for(int i = 0; i < size0; i++){
cv::Point pt1 = m_history.getOriginalPoint(0, i);
cv::Point pt2 = m_history.getOriginalPoint(1, i);
pointsMat1.at<double>(0,0) = pt1.x;
pointsMat1.at<double>(1,0) = pt1.y;
pointsMat2.at<double>(0,0) = pt2.x;
pointsMat2.at<double>(1,0) = pt2.y;
cv::Mat pnts3D(4, 1, CV_64F);
cv::triangulatePoints(m_projectionMat1, m_projectionMat2, pointsMat1, pointsMat2, pnts3D);
}

image dilation with python

I'm trying to execute a piece of code I found online and it is giving me the following error.
I'm new to opencv so please help me.
error:
<ipython-input-1-7fe9c579ec14> in image_masking(filepath)
15 gray = cv2.imread(filepath,0)
16 edges = cv2.Canny(gray, CANNY_THRESH_1, CANNY_THRESH_2)
---> 17 edges = cv2.dilate(edges,None)
18 edges = cv2.erode(edges, None)
19
error: OpenCV(3.4.1) C:\Miniconda3\conda-bld\opencv-
suite_1533128839831\work\modules\core\src\matrix.cpp:760: error: (-215)
dims <= 2 && step[0] > 0 in function cv::Mat::locateROI
code:
import cv2
import numpy as np
def image_masking(filepath):
BLUR = 21
CANNY_THRESH_1 = 100
CANNY_THRESH_2 = 100
MASK_DILATE_ITER = 10
MASK_ERODE_ITER = 10
MASK_COLOR = (0.0,0.0,0.0) # In BGR format
gray = cv2.imread(filepath,0)
edges = cv2.Canny(gray, CANNY_THRESH_1, CANNY_THRESH_2)
edges = cv2.dilate(edges,None)
edges = cv2.erode(edges, None)
contour_info = []
_, contours, __ = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
for c in contours:
contour_info.append((c, cv2.isContourConvex(c), cv2.contourArea(c),))
contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True)
max_contour = contour_info[0]
for c in contour_info:
cv2.fillConvexPoly(mask, c[0], (255))
mask = cv2.dilate(mask, None, iterations=MASK_DILATE_ITER)
mask = cv2.erode(mask, None, iterations=MASK_ERODE_ITER)
mask = cv2.GaussianBlur(mask, (BLUR, BLUR), 0)
mask_stack = np.dstack([mask]*3)
mask_stack = mask_stack.astype('float32') / 255.0
img = img.astype('float32') / 255.0
masked = (mask_stack * img) + ((1-mask_stack) * MASK_COLOR)
masked = (masked * 255).astype('uint8')
fileName, fileExtension = filepath.split('.')
fileName += '-masked.'
filepath = fileName + fileExtension
print(filepath)
cv2.imwrite(filepath, masked)
if __name__ == '__main__':
filepath = 'C:\\Users\HP\Downloads\test3.jpg'
image_masking(filepath)
i tried replacing None in dilate function with kernel but it is giving me the same error
The second argument to cv2.dilate and cv2.erode should be the kernel with which you want to perform dilation/erosion as it is shown in the docs: opencv documentation
For example, you can try to do it like that:
kernel = np.ones((3, 3), np.uint8)
edges = cv2.dilate(edges, kernel)
edges = cv2.erode(edges, kernel)
Good luck with further opencv exploration!

samples.cols == var_count && samples.type() == 5 in function 'cv::ml::SVMImpl::predict' error on svm.predict method

I'm creating a object classifier in opencv python using svm. Training dataset is of 200 positive and 200 negative images. For positive images first took 200 images and cropped target object from images and resized them to (64,128) size for HOG calculation. Then for negative images, First created Pyramid of images then applied sliding window of 64X128 and then calculated HOG for positive as well all windows of negative images with labels 1 and 0. Trained svm model on hog features.
I am getting error "cv2.error: OpenCV(3.4.2) C:\projects\opencv-python\opencv\modules\ml\src\svm.cpp:2010: error: (-215:Assertion failed) samples.cols == var_count && samples.type() == 5 in function 'cv::ml::SVMImpl::predict' " when i called predict function using res = svm.predict(samples[0]).ravel() method.
import cv2
import os
import time
import numpy as np
import imutils
positive_path='C:\\Users\\Admin\\3D Objects\\datqaet with hog and svm\\ROI images'
negative_path='C:\\Users\\Admin\\3D Objects\\datqaet with hog and svm\\Negative images'
def pyramid(img): #Create image Pyramid
minSize=(30, 30)
imgarr = []
while True:
scale = 2
imgarr.append(img)
w = int(img.shape[1] / scale)
img = imutils.resize(img, width=w)
if img.shape[0] < minSize[1] or img.shape[1] < minSize[0]:
break
return imgarr
def sliding_window(image, stepSize, windowSize): #Sliding window for negative images
sliding = []
for y in range(0, image.shape[0], stepSize):
for x in range(0, image.shape[1], stepSize):
sliding.append((x, y, image[y:y + windowSize[1], x:x + windowSize[0]]))
return sliding
def get_hog() :
winSize = (64,128)
blockSize = (16,16)
blockStride = (16,16)
cellSize = (8,8)
nbins = 9
derivAperture = 1
winSigma = 4.
histogramNormType = 0
L2HysThreshold = 0.2
gammaCorrection = 0
nlevels = 64
signedGradient = True
hog = cv2.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins,derivAperture,winSigma,histogramNormType,L2HysThreshold,gammaCorrection,nlevels, signedGradient)
return hog
samples = []
labels = []
sam = []
hog = get_hog()
for filename in os.listdir(positive_path):
img = cv2.imread(os.path.join(positive_path,filename),0) #RGB image
img = cv2.resize(img,(64,128))
img = np.array(img)
hist = hog.compute(img)
hist = cv2.normalize(hist,None)
sam.append(img)
samples.append(hist)
labels.append(1)
i=0
for filename in os.listdir(negative_path):
img = cv2.imread(os.path.join(negative_path,filename),0)
(winW, winH) = (64,128)
pyr = pyramid(img)
for resized in pyr:
sliding = sliding_window(resized, stepSize=32, windowSize=(winW, winH))
for (x, y, window) in sliding:
if window.shape[0] != winH or window.shape[1] != winW:
continue
hist = hog.compute(window)
hist = cv2.normalize(hist,None)
sam.append(window)
samples.append(hist)
labels.append(0)
print(i)
i=i+1
samples = np.array(samples,dtype=np.float32)
labels = np.array(labels,dtype=int)
samples = np.squeeze(samples)
print(len(samples))
print(samples.shape)
rand = np.random.RandomState(10)
shuffle = rand.permutation(len(samples))
sam = samples[shuffle]
samples = sam[shuffle]
labels = labels[shuffle]
svm = cv2.ml.SVM_create()
svm.setKernel(cv2.ml.SVM_LINEAR)
svm.setType(cv2.ml.SVM_C_SVC)
svm.setC(2.67)
svm.setGamma(5.383)
svm_params = dict( kernel_type = cv2.ml.SVM_LINEAR,
svm_type = cv2.ml.SVM_C_SVC,
C=2.67, gamma=5.383 )
svm.train(samples,cv2.ml.ROW_SAMPLE,labels)
print("trained")
res = svm.predict(samples[0]).ravel()
print(res)
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
img=cv2.resize(img,(400,400))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
(winW, winH) = (64,128)
pyr = pyramid(img)
for resized in pyr:
sliding = sliding_window(resized, stepSize=32, windowSize=(winW, winH))
for (x, y, window) in sliding:
if window.shape[0] != winH or window.shape[1] != winW:
continue
hist = hog.compute(window)
hist = cv2.normalize(hist,None)
hist = np.reshape(hist,(1,hist.shape[0]))
res = svm.predict(hist)[1].ravel()
if res == 1:
print("found")
cv2.imshow('img',img)
cv2.waitKey(10)

how to get the length of eyes and mouth using dlib

I am working on a project of yawn detection, i am using dlib and opencv to detect the face and landmark on a video.
I want to get the length of eyes and mouth.
this is what i have done till now
import sys
import os
import dlib
import glob
from skimage import io
import cv2
import time
if len(sys.argv) != 3:
print("")
exit()
predictor_path = sys.argv[1]
faces_folder_path = sys.argv[2]
vidcap = cv2.VideoCapture('video.avi')
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
win = dlib.image_window()
while vidcap.isOpened():
success, image = vidcap.read()
if success:
win.clear_overlay()
win.set_image(image)
# Ask the detector to find the bounding boxes of each face. The 1 in the
# second argument indicates that we should upsample the image 1 time. This
# will make everything bigger and allow us to detect more faces.
dets = detector(image, 1)
print("Number of faces detected: {}".format(len(dets)))
for k, d in enumerate(dets):
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
k, d.left(), d.top(), d.right(), d.bottom()))
# Get the landmarks/parts for the face in box d.
shape = predictor(image, d)
print(shape)
print("Part 0: {}, Part 1: {},Part 2: {} ...".format(shape.part(0),shape.part(1),shape.part(2)))
# Draw the face landmarks on the screen.
win.add_overlay(shape)
win.add_overlay(dets)
time.sleep(0.01)
cv2.destroyAllWindows()
vidcap.release()
please help me how to get the length of open eyes and mouth.
From this figure
import Paths
import globals
from globals import ClassifierFiles
import numpy as np
import cv2
import time
import dlib
import math
import eyeCoordinates
import mouthCoordinates
from globals import Threshold
from globals import yawnFolder
import os
import openface
VIDEO_PATHS = []
readVideo('v.avi')#test video of faces
def readVideo(video):
global no,yes
video_capture = cv2.VideoCapture(video)
detector = dlib.get_frontal_face_detector() #Face detector
predictor = dlib.shape_predictor(ClassifierFiles.shapePredicter) #Landmark identifier
face_aligner = openface.AlignDlib(ClassifierFiles.shapePredicter)
u = 0
while True:
ret, frame = video_capture.read()
if frame != None:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
# clahe_image = clahe.apply(gray)
detections = detector(frame, 1) #Detect the faces in the image
for k,d in enumerate(detections): #For each detected face
shape = predictor(frame, d) #Get coordinates
vec = np.empty([68, 2], dtype = int)
coor = []
for i in range(1,68): #There are 68 landmark points on each face
#cv2.circle(frame, (shape.part(i).x, shape.part(i).y), 1, (0,0,255), thickness=1)
coor.append([shape.part(i).x, shape.part(i).y])
vec[i][0] = shape.part(i).x
vec[i][1] = shape.part(i).y
#RightEye and LeftEye coordinates
rightEye = eyeCoordinates.distanceRightEye(coor)
leftEye = eyeCoordinates.distanceLeftEye(coor)
eyes = (rightEye + leftEye)/2
#Mouth coordinates
mouth = mouthCoordinates.distanceBetweenMouth(coor)
print(eyes,mouth)
#prints both eyes average distance
#prints mouth distance
break
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == '__main__':
VIDEO_PATHS = Paths.videosPaths()
init()
eyeCoordinates File
import distanceFormulaCalculator
def distanceRightEye(c):
eR_36,eR_37,eR_38,eR_39,eR_40,eR_41 = 0,0,0,0,0,0
eR_36 = c[35]
eR_37 = c[36]
eR_38 = c[37]
eR_39 = c[38]
eR_40 = c[39]
eR_41 = c[40]
x1 = distanceFormulaCalculator.distanceFormula(eR_37,eR_41)
x2 = distanceFormulaCalculator.distanceFormula(eR_38,eR_40)
return ((x1+x2)/2)
def distanceLeftEye(c):
eL_42,eL_43,eL_44,eL_45,eL_46,eL_47 = 0,0,0,0,0,0
eL_42 = c[41]
eL_43 = c[42]
eL_44 = c[43]
eL_45 = c[44]
eL_46 = c[45]
eL_47 = c[46]
x1 = distanceFormulaCalculator.distanceFormula(eL_43,eL_47)
x2 = distanceFormulaCalculator.distanceFormula(eL_44,eL_46)
return ((x1+x2)/2)
def eyePoints():
return [36,37,38,39,40,41,42,43,44,45,46,47]
Mouth Coordinates File
import distanceFormulaCalculator
def distanceBetweenMouth(c):
m_60,m_61,m_62,m_63,m_64,m_65,m_66,m_67 = 0,0,0,0,0,0,0,0
m_60 = c[59]
m_61 = c[60]
m_62 = c[61]
m_63 = c[62]
m_64 = c[63]
m_65 = c[64]
m_66 = c[65]
m_67 = c[66]
x1 = distanceFormulaCalculator.distanceFormula(m_61,m_67)
x2 = distanceFormulaCalculator.distanceFormula(m_62,m_66)
x3 = distanceFormulaCalculator.distanceFormula(m_63,m_65)
return ((x1+x2+x3)/3)
def mouthPoints():
return [60,61,62,63,64,65,66,67]

Resources