facail landmark in my yolo face detection - python-3.x

i want to use a face landmark for alignment faces that extracted from my face detection code(using yolov3).
this is my code:
import argparse
import sys
import os
from utils import *
#####################################################################
parser = argparse.ArgumentParser()
parser.add_argument('--model-cfg', type=str, default='./cfg/yolov3-
face.cfg',
help='path to config file')
parser.add_argument('--model-weights', type=str,
default='./model-weights/yolov3-wider_16000.weights',
help='path to weights of model')
parser.add_argument('--image', type=str, default='',
help='path to image file')
parser.add_argument('--video', type=str, default='',
help='path to video file')
parser.add_argument('--src', type=int, default=0,
help='source of the camera')
parser.add_argument('--output-dir', type=str, default='outputs/',
help='path to the output directory')
args = parser.parse_args()
#####################################################################
# print the arguments
print('----- info -----')
print('[i] The config file: ', args.model_cfg)
print('[i] The weights of model file: ', args.model_weights)
print('[i] Path to image file: ', args.image)
print('[i] Path to video file: ', args.video)
print('###########################################################\n')
# check outputs directory
if not os.path.exists(args.output_dir):
print('==> Creating the {} directory...'.format(args.output_dir))
os.makedirs(args.output_dir)
else:
print('==> Skipping create the {}
directory...'.format(args.output_dir))
# Give the configuration and weight files for the model and load the
network
# using them.
net = cv2.dnn.readNetFromDarknet(args.model_cfg, args.model_weights)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
def _main():
wind_name = 'face detection using YOLOv3'
cv2.namedWindow(wind_name, cv2.WINDOW_NORMAL)
output_file = ''
if args.image:
if not os.path.isfile(args.image):
print("[!] ==> Input image file {} doesn't
exist".format(args.image))
sys.exit(1)
cap = cv2.VideoCapture(args.image)
output_file = args.image[:-4].rsplit('/')[-1] + '_yoloface.jpg'
elif args.video:
if not os.path.isfile(args.video):
print("[!] ==> Input video file {} doesn't
exist".format(args.video))
sys.exit(1)
cap = cv2.VideoCapture(args.video)
output_file = args.video[:-4].rsplit('/')[-1] + '_yoloface.avi'
else:
# Get data from the camera
cap = cv2.VideoCapture(args.src)
# Get the video writer initialized to save the output video
if not args.image:
video_writer = cv2.VideoWriter(os.path.join(args.output_dir,
output_file),
cv2.VideoWriter_fourcc('M', 'J',
'P', 'G'),
cap.get(cv2.CAP_PROP_FPS), (
round(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))
while True:
has_frame, frame = cap.read()
# Stop the program if reached end of video
if not has_frame:
print('[i] ==> Done processing!!!')
print('[i] ==> Output file is stored at',
os.path.join(args.output_dir, output_file))
cv2.waitKey(1000)
break
# Create a 4D blob from a frame.
blob = cv2.dnn.blobFromImage(frame, 1 / 255, (IMG_WIDTH,
IMG_HEIGHT),
[0, 0, 0], 1, crop=False)
# Sets the input to the network
net.setInput(blob)
# Runs the forward pass to get output of the output layers
outs = net.forward(get_outputs_names(net))
# Remove the bounding boxes with low confidence
faces = post_process(frame, outs, CONF_THRESHOLD, NMS_THRESHOLD)
print('[i] ==> # detected faces: {}'.format(len(faces)))
print('#' * 60)
# initialize the set of information we'll displaying on the frame
info = [
('number of faces detected', '{}'.format(len(faces)))
]
for (i, (txt, val)) in enumerate(info):
text = '{}: {}'.format(txt, val)
cv2.putText(frame, text, (10, (i * 20) + 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, COLOR_RED, 2)
# Save the output video to file
if args.image:
cv2.imwrite(os.path.join(args.output_dir, output_file),
frame.astype(np.uint8))
else:
video_writer.write(frame.astype(np.uint8))
cv2.imshow(wind_name, frame)
key = cv2.waitKey(1)
if key == 27 or key == ord('q'):
print('[i] ==> Interrupted by user!')
break
cap.release()
cv2.destroyAllWindows()
print('==> All done!')
print('***********************************************************')
if __name__ == '__main__':
_main()
my problem is that i want to use a face landmark to alignment faces that extracted by the yoloface detector but i dont know how use a code in my code for facail ladmarking
please help me
thx

You can use dlib and imutils for that. The model can be downloaded here: http://dlib.net/files/
import dlib
from imutils import face_utils
import cv2
shape_predictor = dlib.shape_predictor('shape_predictor_5_face_landmarks.dat')
fa = face_utils.facealigner.FaceAligner(self.shape_predictor, desiredLeftEye=(0.2, 0.2))
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
aligned_face = self.fa.align(frame, gray, dlib.rectangle(left = box[0], top=box[1], right=box[2], bottom=box[3]))

Related

How to load an image dataset in python with a manually defined MasterImage class?

I have a folder, Dataset, with the Train and Test subfolders containing images.
for example, in the Train folder, I have three subfolders, Cat, Car and Man each containing about 1700 images.
so I am using this class definition to load and render the images in the format suitable for a deep learning model.
It gives me a wrong shape - shape=(0, 200, 200, 3), dtype=float64) for my train images while I expect it to be (shape=(1700, 200, 200, 3), dtype=float64)
My code
import os
import pickle
import numpy as np
path_train = "C:\\Users\\me\\Jupiter_Notebooks\\Dataset\\Train"
path_test = "C:\\Users\\me\\Jupiter_Notebooks\\Dataset\\Test"
# defining the class
class MasterImage(object):
def __init__(self,PATH='', IMAGE_SIZE = 100):
self.PATH = PATH
self.IMAGE_SIZE = IMAGE_SIZE
self.image_data = []
self.x_data = []
self.y_data = []
self.CATEGORIES = []
# This will get List of categories
self.list_categories = []
def get_categories(self):
for path in os.listdir(self.PATH):
if '.DS_Store' in path:
pass
else:
self.list_categories.append(path)
print("Found Categories ",self.list_categories,'\n')
return self.list_categories
def Process_Image(self):
try:
"""
Return Numpy array of image
:return: X_Data, Y_Data
"""
self.CATEGORIES = self.get_categories()
for categories in self.CATEGORIES: # Iterate over categories
train_folder_path = os.path.join(self.PATH, categories) # Folder Path
class_index = self.CATEGORIES.index(categories) # this will get index for classification
for img in os.listdir(train_folder_path): # This will iterate in the Folder
new_path = os.path.join(train_folder_path, img) # image Path
try: # if any image is corrupted
image_data_temp = cv2.imread(new_path) # Read Image as numbers
image_temp_resize = cv2.resize(image_data_temp,(self.IMAGE_SIZE,self.IMAGE_SIZE))
self.image_data.append([image_temp_resize,class_index])
random.shuffle(self.image_data)
except:
pass
data = np.asanyarray(self.image_data)
# Iterate over the Data
for x in data:
self.x_data.append(x[0]) # Get the X_Data
self.y_data.append(x[1]) # get the label
X_Data = np.asarray(self.x_data) / (255.0) # Normalize Data
Y_Data = np.asarray(self.y_data)
# reshape x_Data
X_Data = X_Data.reshape(-1, self.IMAGE_SIZE, self.IMAGE_SIZE, 3)
return X_Data, Y_Data
except:
print("Failed to run Function Process Image ")
def pickle_image(self):
"""
:return: None Creates a Pickle Object of DataSet
"""
# Call the Function and Get the Data
X_Data,Y_Data = self.Process_Image()
# Write the Entire Data into a Pickle File
pickle_out = open('X_Data','wb')
pickle.dump(X_Data, pickle_out)
pickle_out.close()
# Write the Y Label Data
pickle_out = open('Y_Data', 'wb')
pickle.dump(Y_Data, pickle_out)
pickle_out.close()
print("Pickled Image Successfully ")
return X_Data,Y_Data
def load_dataset(self):
try:
# Read the Data from Pickle Object
X_Temp = open('X_Data','rb')
X_Data = pickle.load(X_Temp)
Y_Temp = open('Y_Data','rb')
Y_Data = pickle.load(Y_Temp)
print('Reading Dataset from Pickle Object')
return X_Data,Y_Data
except:
print('Could not Found Pickle File ')
print('Loading File and Dataset ..........')
X_Data,Y_Data = self.pickle_image()
return X_Data,Y_Data
# loading data
dstrain = MasterImage(PATH = path_train,IMAGE_SIZE = 100)
dstrain
(train_images, train_labels) = dstrain.load_dataset()
print('Train: X_images=%s, y_labels=%s' % (train_images.shape, train_labels.shape))
Train: X_images=(0, 100, 100, 3), y_labels=(0,)
what can I change to get the correct shape? thank you very much.
my data is similar to the training set from here.
https://github.com/soumilshah1995/Smart-Library-to-load-image-Dataset-for-Convolution-Neural-Network-Tensorflow-Keras-/tree/master/training_set

JPEG parameter struct mismatch: library thinks size is 584, caller expects 728 for python3 in Jetson

I am trying to run the darknet_video.py script for YOLO from here
in Jetson (nano and xavier NX). The code runs fine in one nano but not in another nano and NX. The script is run using the following command in Ubuntu 18.04 (Jetpack)
python3 darknet_video.py --input test.mp4 --out_filename out1.txt --weights yolov3-tiny.weights --ext_output --config_file yolov3-tiny.cfg --data_file coco.data --thresh 0.2
I am getting the following errors:
JPEG parameter struct mismatch: library thinks size is 584, caller expects 728
pure virtual method called
terminate called without an active exception
Aborted (core dumped)
Since it runs fine in one nano, it could be dependency issue, here is the code in darknet_video.py
from ctypes import *
import random
import os
import cv2
import time
import darknet
import argparse
from threading import Thread, enumerate
from queue import Queue
def parser():
parser = argparse.ArgumentParser(description="YOLO Object Detection")
parser.add_argument("--input", type=str, default=0,
help="video source. If empty, uses webcam 0 stream")
parser.add_argument("--out_filename", type=str, default="",
help="inference video name. Not saved if empty")
parser.add_argument("--weights", default="yolov4.weights",
help="yolo weights path")
parser.add_argument("--dont_show", action='store_true',
help="windown inference display. For headless systems")
parser.add_argument("--ext_output", action='store_true',
help="display bbox coordinates of detected objects")
parser.add_argument("--config_file", default="./cfg/yolov4.cfg",
help="path to config file")
parser.add_argument("--data_file", default="./cfg/coco.data",
help="path to data file")
parser.add_argument("--thresh", type=float, default=.25,
help="remove detections with confidence below this value")
return parser.parse_args()
def str2int(video_path):
"""
argparse returns and string althout webcam uses int (0, 1 ...)
Cast to int if needed
"""
try:
return int(video_path)
except ValueError:
return video_path
def check_arguments_errors(args):
assert 0 < args.thresh < 1, "Threshold should be a float between zero and one (non-inclusive)"
if not os.path.exists(args.config_file):
raise(ValueError("Invalid config path {}".format(os.path.abspath(args.config_file))))
if not os.path.exists(args.weights):
raise(ValueError("Invalid weight path {}".format(os.path.abspath(args.weights))))
if not os.path.exists(args.data_file):
raise(ValueError("Invalid data file path {}".format(os.path.abspath(args.data_file))))
if str2int(args.input) == str and not os.path.exists(args.input):
raise(ValueError("Invalid video path {}".format(os.path.abspath(args.input))))
def set_saved_video(input_video, output_video, size):
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
fps = int(input_video.get(cv2.CAP_PROP_FPS))
video = cv2.VideoWriter(output_video, fourcc, fps, size)
return video
def video_capture(frame_queue, darknet_image_queue):
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (width, height),
interpolation=cv2.INTER_LINEAR)
frame_queue.put(frame_resized)
img_for_detect = darknet.make_image(width, height, 3)
darknet.copy_image_from_bytes(img_for_detect, frame_resized.tobytes())
darknet_image_queue.put(img_for_detect)
cap.release()
def inference(darknet_image_queue, detections_queue, fps_queue):
while cap.isOpened():
darknet_image = darknet_image_queue.get()
prev_time = time.time()
detections = darknet.detect_image(network, class_names, darknet_image, thresh=args.thresh)
detections_queue.put(detections)
fps = int(1/(time.time() - prev_time))
fps_queue.put(fps)
print("FPS: {}".format(fps))
darknet.print_detections(detections, args.ext_output)
darknet.free_image(darknet_image)
cap.release()
def drawing(frame_queue, detections_queue, fps_queue):
random.seed(3) # deterministic bbox colors
video = set_saved_video(cap, args.out_filename, (width, height))
while cap.isOpened():
frame_resized = frame_queue.get()
detections = detections_queue.get()
fps = fps_queue.get()
if frame_resized is not None:
image = darknet.draw_boxes(detections, frame_resized, class_colors)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if args.out_filename is not None:
video.write(image)
if not args.dont_show:
cv2.imshow('Inference', image)
if cv2.waitKey(fps) == 27:
break
cap.release()
video.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
frame_queue = Queue()
darknet_image_queue = Queue(maxsize=1)
detections_queue = Queue(maxsize=1)
fps_queue = Queue(maxsize=1)
args = parser()
check_arguments_errors(args)
network, class_names, class_colors = darknet.load_network(
args.config_file,
args.data_file,
args.weights,
batch_size=1
)
width = darknet.network_width(network)
height = darknet.network_height(network)
input_path = str2int(args.input)
cap = cv2.VideoCapture(input_path)
Thread(target=video_capture, args=(frame_queue, darknet_image_queue)).start()
Thread(target=inference, args=(darknet_image_queue, detections_queue, fps_queue)).start()
Thread(target=drawing, args=(frame_queue, detections_queue, fps_queue)).start()
Any ideas will be appreciated.
JPEG parameter struct mismatch: library thinks size is 584, caller expects 728
This is about jpeglib.h that is used by the app and the low level library.
App is compiled with a different jpeglib.h and low level library is compiled with different jpeglib.h and structure in this case its j_decompress_ptr in this header file is different in these two different jpeglib.h files.
Make sure that you have low level lib (could be libjpeg-8b) and its client using the same libjpeg.h
Remove all the installed libjpeg packages and install only the latest one and try.

'bool' object not iterable

I am working on python3, opencv 3.4 and using Microsoft Azure's FaceAPI function 'CF.face.detect()'
As far as I know, 'for loop' needs iterable object to run on like list but simple boolean is not iterable. Though 'res1' is a list I get this error.
TypeError: 'bool' object not iterable
Please help, Thanks in advance
Here is the code:
import unittest
import cognitive_face as CF
from PIL import Image, ImageFont, ImageDraw
import time
import cv2
from time import strftime
CF.Key.set('')
#print(CF.Key.get())
CF.BaseUrl.set('https://southeastasia.api.cognitive.microsoft.com/face/v1.0/')
#print(CF.BaseUrl.get())
"""Setup Person and Person Group related data."""
person_group_id = '' #id from training terminal
"""Unittest for `face.detect`."""
cap = cv2.VideoCapture('1.mp4')
while(cap.isOpened()):
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
print("\n\n ##########.... LOOKING FOR FACES ....########## \n\n")
res1 = []
print(type(res1))
res1 = CF.face.detect(cap)
print('\n This is the res1: ', res1)
c = len(res1)
print('\nTOTAL FACES FOUND:', c)
detect_id = [] ##error was here so put exception
for i in range(c):
print("\n\n ##########.... DETECTING FACES ....########## \n\n")
print('\n This is i in range c', i, c)
detect_id.append(res1[i]['faceId'])
#print('\n\n detected faces id ', detect_id[i])
width = res1[i]['faceRectangle']['width']
height = res1[i]['faceRectangle']['height']
x = res1[i]['faceRectangle']['left']
y = res1[i]['faceRectangle']['top']
################## IF ENDS #########################################################################
cv2.imshow('image',img)
k = cv2.waitKey(100) & 0xff
if k == 27:
break
################ WHILE ENDS ####################################
cap.release()
cv2.destroyAllWindows()
#Jonasz is right, you should be detecting faces on images, meaning, in frames from your mp4 file.
The method CF.face.detect expects an URI, so in the following code we'll write it to disk before pass it onto CF.face.detect:
cap = cv2.VideoCapture('1.mp4')
count = 0 # <--
while(cap.isOpened()):
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
filename = "frame%d.jpg" % count # <--
cv2.imwrite(filename, img) # <--
count+=1 # <--
print("\n\n ##########.... LOOKING FOR FACES ....########## \n\n")
res1 = []
print(type(res1))
res1 = CF.face.detect(filename) # <--
Shouldn't you use CF.face.detect on your captured image not on cap variable?

Combining face detection and train face opencv python

I want to combine this two code to work only once, when I run the save_detected_face.py the code save faces into jpg file when detected to OpenCV CascadeClassifier, after being saved into jpg file, the train_save_face.py will automatically run whenever the process of detecting is finish.
here is my sample code: save_detected_face.py
import os
import urllib.request
import cv2
import numpy as np
from PIL import Image
path='dataset'
recognizer=cv2.face.LBPHFaceRecognizer_create();
def main():
#cap = cv2.VideoCapture("../index.htm?clientIpAddr=192.168.1.12&IsRemote=0")
a=0
cap = "http://192.168.1.43:8080/shot.jpg"
id = input('enter user id: ')
faceDetect=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
sampleNum=0
while True:
a = a+1
imgResp=urllib.request.urlopen(cap)
imgNp=np.array(bytearray(imgResp.read()),dtype=np.uint8)
img=cv2.imdecode(imgNp,-1)
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces=faceDetect.detectMultiScale(gray,1.3,5);
for(x,y,w,h) in faces:
sampleNum = sampleNum+1
cv2.rectangle(img, (x,y),(x+w,y+h),(255,0,0),2)
# roi_gray= gray[y:y+h, x:x+w]
roi_color= img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_color)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv2.imwrite("dataSet/User."+str(id)+"."+str(sampleNum)+".jpg",gray[y:y+h,x:x+w])
cv2.imshow('frame', img)
# Training code
def getImagesWithID(path):
imagePaths=[os.path.join(path,f) for f in os.listdir(path)]
faces=[]
IDs=[]
for imagePath in imagePaths:
faceImg=Image.open(imagePath).convert('L');
faceNp=np.array(faceImg,'uint8')
ID=int(os.path.split(imagePath)[-1].split('.')[1])
faces.append(faceNp)
IDs.append(ID)
cv2.imshow('training',faceNp)
cv2.waitKey(10)
return np.array(IDs), faces
Ids, faces=getImagesWithID(path)
recognizer.train(faces,Ids)
recognizer.save('recognizer/trainingData.yml')
if cv2.waitKey(100) & 0xff == ord('q'):
break
elif(sampleNum>20):
break
exit(0)
#
if __name__=='__main__':
main()
code for: train_save_face.py
import os
import cv2
import numpy as np
from PIL import Image
recognizer=cv2.face.LBPHFaceRecognizer_create();
path='dataset'
def getImagesWithID(path):
imagePaths=[os.path.join(path,f) for f in os.listdir(path)]
faces=[]
IDs=[]
for imagePath in imagePaths:
faceImg=Image.open(imagePath).convert('L');
faceNp=np.array(faceImg,'uint8')
ID=int(os.path.split(imagePath)[-1].split('.')[1])
faces.append(faceNp)
IDs.append(ID)
cv2.imshow('training',faceNp)
cv2.waitKey(10)
return np.array(IDs), faces
Ids, faces=getImagesWithID(path)
recognizer.train(faces,Ids)
recognizer.save('recognizer/trainingData.yml')
exit(0)
print('done training')
The save_detected_face.py works fine, but when I want to train the face it will not work.
although I wasn't sure if this is correct but it's running now.
# -*- coding: cp1252 -*-
import os
import urllib.request
import cv2
import numpy as np
from PIL import Image
recognizer=cv2.face.LBPHFaceRecognizer_create();
path='dataset'
def main():
#cap = cv2.VideoCapture("../index.htm?clientIpAddr=192.168.1.12&IsRemote=0")
a=0
cap = "http://192.168.1.43:8080/shot.jpg"
id = input('enter user id: ')
faceDetect=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
sampleNum=0
while True:
a = a+1
imgResp=urllib.request.urlopen(cap)
imgNp=np.array(bytearray(imgResp.read()),dtype=np.uint8)
img=cv2.imdecode(imgNp,-1)
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces=faceDetect.detectMultiScale(gray,1.3,5);
for(x,y,w,h) in faces:
sampleNum = sampleNum+1
cv2.rectangle(img, (x,y),(x+w,y+h),(255,0,0),2)
# roi_gray= gray[y:y+h, x:x+w]
roi_color= img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_color)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv2.imwrite("dataSet/User."+str(id)+"."+str(sampleNum)+".jpg",gray[y:y+h,x:x+w])
cv2.imshow('frame', img)
if cv2.waitKey(100) & 0xff == ord('q'):
break
elif(sampleNum>20):
break
exit(0)
def getImagesWithID(path):
imagePaths=[os.path.join(path,f) for f in os.listdir(path)]
faces=[]
IDs=[]
for imagePath in imagePaths:
faceImg=Image.open(imagePath).convert('L');
faceNp=np.array(faceImg,'uint8')
ID=int(os.path.split(imagePath)[-1].split('.')[1])
faces.append(faceNp)
IDs.append(ID)
cv2.imshow('training',faceNp)
cv2.waitKey(10)
return np.array(IDs), faces
Ids, faces=getImagesWithID(path)
recognizer.train(faces,Ids)
recognizer.save('recognizer/trainingData.yml')
exit(0)
print('done training')
#
if __name__=='__main__':
main()
getImagesWithID(path)

error with openCV2

I am new in coding. Using this script:
from PIL import Image
from PIL.ImageChops import subtract
import numpy, math, time, glob, sys, os, logging, requests, random
def GreenScreen(infile, inbg ,outfile='output.png', keyColor=None,tolerance=None):
"""
http://gc-films.com/chromakey.html
http://www.cs.utah.edu/~michael/chroma/
:param infile: Greenscreen image location
:param inbg: Background image location
:param outfile: Output file location
:param keyColor: greenscreen color; it can be any singular color
:param tolerance: tolerance of cleaning
:return:
"""
if not keyColor:
keyColor = [151,44,21] #Y,Cb, and Cr values of the greenscreen
if not tolerance:
tolerance = [100,130] #Allowed Distance from Values
#open files
inDataFG = Image.open('/home/leonardo/Scrivania/in/KVfnt.png').convert('YCbCr')
Path = '/home/leonardo/Scrivania/background/'
FullPath = os.path.join(Path, random.choice(os.listdir(Path)))
BG = Image.open(FullPath).convert('RGB')
[Y_key, Cb_key, Cr_key] = keyColor
[tola, tolb]= tolerance
(x,y) = inDataFG.size #get dimensions
foreground = numpy.array(inDataFG.getdata()) #make array from image
maskgen = numpy.vectorize(colorclose) #vectorize masking function
alphaMask = maskgen(foreground[:,1],foreground[:,2] ,Cb_key, Cr_key, tola, tolb) #generate mask
alphaMask.shape = (y,x) #make mask dimensions of original image
imMask = Image.fromarray(numpy.uint8(alphaMask))#convert array to image
invertMask = Image.fromarray(numpy.uint8(255-255*(alphaMask/255))) #create inverted mask with extremes
#create images for color mask
colorMask = Image.new('RGB',(x,y),tuple([0,0,0]))
allgreen = Image.new('YCbCr',(x,y),tuple(keyColor))
colorMask.paste(allgreen,invertMask) #make color mask green in green values on image
inDataFG = inDataFG.convert('RGB') #convert input image to RGB for ease of working with
cleaned = subtract(inDataFG,colorMask) #subtract greens from input
BG.paste(cleaned,imMask)#paste masked foreground over background
# BG.show() #display cleaned image
BG.save(outfile, "JPEG") #save cleaned image
def colorclose(Cb_p,Cr_p, Cb_key, Cr_key, tola, tolb):
temp = math.sqrt((Cb_key-Cb_p)**2+(Cr_key-Cr_p)**2)
if temp < tola:
z = 0.0
elif temp < tolb:
z = ((temp-tola)/(tolb-tola))
else:
z = 1.0
return 255.0*z
def check_folders(logger):
if not os.path.exists('out/'):
os.mkdir('out/')
if not os.path.exists('background/'):
os.mkdir('background/')
logger.error("Place background images in background/")
sys.exit()
if not os.path.exists('in/'):
os.mkdir('in/')
logger.error("Place input files in in/")
sys.exit()
def begin_greenbox(logger):
"""
For all backgrounds loop through all input files into the out file
"""
for bg in glob.glob('background/*'):
continue
bg_name = bg.split('/')[-1].lower().strip('.jpg').strip('.png').strip('.jpeg')
for picture in glob.glob('in/*'):
continue
pic_name = picture.split('/')[-1].lower().strip('.JPG').strip('.png').strip('.jpeg')
output_file = 'out/' + bg_name + ' ' + pic_name + '.jpg'
one_pic = time.time()
GreenScreen(infile=picture ,inbg=bg, outfile=output_file)
one_pic_time_done = time.time()
time_arr.append(one_pic_time_done-one_pic)
logger.info(time_arr)
logger.info('done : %s' % pic_name)
def start_logging():
logging.basicConfig()
logger = logging.getLogger('greenbox')
logger.setLevel(logging.INFO)
return logger
if __name__ == '__main__':
time_start = time.time()
time_arr = []
logger = start_logging()
logger.info("Start time: %s" % time_start)
check_folders(logger)
begin_greenbox(logger)
time_end = time.time()
logger.info("End time: %s" % time_end)
Everything is okay and the image is saved in the /out folder. Using this code:
from cv2 import *
# initialize the camera
cam = VideoCapture(0) # 0 -> index of camera
s, img = cam.read()
if s: # frame captured without any errors
namedWindow("cam-test",WINDOW_AUTOSIZE)
imwrite('/home/leonardo/Scrivania/in/KVfnt.png',img) #save image
Everything is okay and the image is captured from the camera and saved in /in folder. If I add the second code to the first one:
from PIL import Image
from PIL.ImageChops import subtract
import numpy, math, time, glob, sys, os, logging, requests, random
from cv2 import *
# initialize the camera
cam = VideoCapture(0) # 0 -> index of camera
s, img = cam.read()
if s: # frame captured without any errors
namedWindow("cam-test",WINDOW_AUTOSIZE)
imwrite('/home/leonardo/Scrivania/in/KVfnt.png',img) #save image
def GreenScreen(infile, inbg ,outfile='output.png', keyColor=None,tolerance=None):
"""
http://gc-films.com/chromakey.html
http://www.cs.utah.edu/~michael/chroma/
:param infile: Greenscreen image location
:param inbg: Background image location
:param outfile: Output file location
:param keyColor: greenscreen color; it can be any singular color
:param tolerance: tolerance of cleaning
:return:
"""
if not keyColor:
keyColor = [151,44,21] #Y,Cb, and Cr values of the greenscreen
if not tolerance:
tolerance = [100,130] #Allowed Distance from Values
#open files
inDataFG = Image.open('/home/leonardo/Scrivania/in/KVfnt.png').convert('YCbCr')
Path = '/home/leonardo/Scrivania/background/'
FullPath = os.path.join(Path, random.choice(os.listdir(Path)))
BG = Image.open(FullPath).convert('RGB')
[Y_key, Cb_key, Cr_key] = keyColor
[tola, tolb]= tolerance
(x,y) = inDataFG.size #get dimensions
foreground = numpy.array(inDataFG.getdata()) #make array from image
maskgen = numpy.vectorize(colorclose) #vectorize masking function
alphaMask = maskgen(foreground[:,1],foreground[:,2] ,Cb_key, Cr_key, tola, tolb) #generate mask
alphaMask.shape = (y,x) #make mask dimensions of original image
imMask = Image.fromarray(numpy.uint8(alphaMask))#convert array to image
invertMask = Image.fromarray(numpy.uint8(255-255*(alphaMask/255))) #create inverted mask with extremes
#create images for color mask
colorMask = Image.new('RGB',(x,y),tuple([0,0,0]))
allgreen = Image.new('YCbCr',(x,y),tuple(keyColor))
colorMask.paste(allgreen,invertMask) #make color mask green in green values on image
inDataFG = inDataFG.convert('RGB') #convert input image to RGB for ease of working with
cleaned = subtract(inDataFG,colorMask) #subtract greens from input
BG.paste(cleaned,imMask)#paste masked foreground over background
# BG.show() #display cleaned image
BG.save(outfile, "JPEG") #save cleaned image
def colorclose(Cb_p,Cr_p, Cb_key, Cr_key, tola, tolb):
temp = math.sqrt((Cb_key-Cb_p)**2+(Cr_key-Cr_p)**2)
if temp < tola:
z = 0.0
elif temp < tolb:
z = ((temp-tola)/(tolb-tola))
else:
z = 1.0
return 255.0*z
def check_folders(logger):
if not os.path.exists('out/'):
os.mkdir('out/')
if not os.path.exists('background/'):
os.mkdir('background/')
logger.error("Place background images in background/")
sys.exit()
if not os.path.exists('in/'):
os.mkdir('in/')
logger.error("Place input files in in/")
sys.exit()
def begin_greenbox(logger):
"""
For all backgrounds loop through all input files into the out file
"""
for bg in glob.glob('background/*'):
continue
bg_name = bg.split('/')[-1].lower().strip('.jpg').strip('.png').strip('.jpeg')
for picture in glob.glob('in/*'):
continue
pic_name = picture.split('/')[-1].lower().strip('.JPG').strip('.png').strip('.jpeg')
output_file = 'out/' + bg_name + ' ' + pic_name + '.jpg'
one_pic = time.time()
GreenScreen(infile=picture ,inbg=bg, outfile=output_file)
one_pic_time_done = time.time()
time_arr.append(one_pic_time_done-one_pic)
logger.info(time_arr)
logger.info('done : %s' % pic_name)
def start_logging():
logging.basicConfig()
logger = logging.getLogger('greenbox')
logger.setLevel(logging.INFO)
return logger
if __name__ == '__main__':
time_start = time.time()
time_arr = []
logger = start_logging()
logger.info("Start time: %s" % time_start)
check_folders(logger)
begin_greenbox(logger)
time_end = time.time()
logger.info("End time: %s" % time_end)
I obtain this error:
File "chromakey+upload.py", line 116, in <module>
begin_greenbox(logger)
File "chromakey+upload.py", line 97, in begin_greenbox
GreenScreen(infile=picture ,inbg=bg, outfile=output_file)
File "chromakey+upload.py", line 56, in GreenScreen
cleaned = subtract(inDataFG,colorMask) #subtract greens from input
TypeError: src1 is not a numpy array, neither a scalar
What is the problem? Thank you for your answers.
As the error says:
src1 is not a numpy array, neither a scalar
Perhaps, you should try:
cleaned = subtract(numpy.array(inDataFG.getdata()),numpy.array(colorMask.getdata()))
Edit
There is a 'conflict' on subtract:
from PIL.ImageChops import subtract # first subtract
from cv2 import * # OpenCV has a subtract too
This is one of the reasons to use namespaces on your calls.
If your main image lib is PIL, maybe you should do import cv2 and use cv2.* when needed.

Resources