I want to combine this two code to work only once, when I run the save_detected_face.py the code save faces into jpg file when detected to OpenCV CascadeClassifier, after being saved into jpg file, the train_save_face.py will automatically run whenever the process of detecting is finish.
here is my sample code: save_detected_face.py
import os
import urllib.request
import cv2
import numpy as np
from PIL import Image
path='dataset'
recognizer=cv2.face.LBPHFaceRecognizer_create();
def main():
#cap = cv2.VideoCapture("../index.htm?clientIpAddr=192.168.1.12&IsRemote=0")
a=0
cap = "http://192.168.1.43:8080/shot.jpg"
id = input('enter user id: ')
faceDetect=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
sampleNum=0
while True:
a = a+1
imgResp=urllib.request.urlopen(cap)
imgNp=np.array(bytearray(imgResp.read()),dtype=np.uint8)
img=cv2.imdecode(imgNp,-1)
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces=faceDetect.detectMultiScale(gray,1.3,5);
for(x,y,w,h) in faces:
sampleNum = sampleNum+1
cv2.rectangle(img, (x,y),(x+w,y+h),(255,0,0),2)
# roi_gray= gray[y:y+h, x:x+w]
roi_color= img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_color)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv2.imwrite("dataSet/User."+str(id)+"."+str(sampleNum)+".jpg",gray[y:y+h,x:x+w])
cv2.imshow('frame', img)
# Training code
def getImagesWithID(path):
imagePaths=[os.path.join(path,f) for f in os.listdir(path)]
faces=[]
IDs=[]
for imagePath in imagePaths:
faceImg=Image.open(imagePath).convert('L');
faceNp=np.array(faceImg,'uint8')
ID=int(os.path.split(imagePath)[-1].split('.')[1])
faces.append(faceNp)
IDs.append(ID)
cv2.imshow('training',faceNp)
cv2.waitKey(10)
return np.array(IDs), faces
Ids, faces=getImagesWithID(path)
recognizer.train(faces,Ids)
recognizer.save('recognizer/trainingData.yml')
if cv2.waitKey(100) & 0xff == ord('q'):
break
elif(sampleNum>20):
break
exit(0)
#
if __name__=='__main__':
main()
code for: train_save_face.py
import os
import cv2
import numpy as np
from PIL import Image
recognizer=cv2.face.LBPHFaceRecognizer_create();
path='dataset'
def getImagesWithID(path):
imagePaths=[os.path.join(path,f) for f in os.listdir(path)]
faces=[]
IDs=[]
for imagePath in imagePaths:
faceImg=Image.open(imagePath).convert('L');
faceNp=np.array(faceImg,'uint8')
ID=int(os.path.split(imagePath)[-1].split('.')[1])
faces.append(faceNp)
IDs.append(ID)
cv2.imshow('training',faceNp)
cv2.waitKey(10)
return np.array(IDs), faces
Ids, faces=getImagesWithID(path)
recognizer.train(faces,Ids)
recognizer.save('recognizer/trainingData.yml')
exit(0)
print('done training')
The save_detected_face.py works fine, but when I want to train the face it will not work.
although I wasn't sure if this is correct but it's running now.
# -*- coding: cp1252 -*-
import os
import urllib.request
import cv2
import numpy as np
from PIL import Image
recognizer=cv2.face.LBPHFaceRecognizer_create();
path='dataset'
def main():
#cap = cv2.VideoCapture("../index.htm?clientIpAddr=192.168.1.12&IsRemote=0")
a=0
cap = "http://192.168.1.43:8080/shot.jpg"
id = input('enter user id: ')
faceDetect=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
sampleNum=0
while True:
a = a+1
imgResp=urllib.request.urlopen(cap)
imgNp=np.array(bytearray(imgResp.read()),dtype=np.uint8)
img=cv2.imdecode(imgNp,-1)
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces=faceDetect.detectMultiScale(gray,1.3,5);
for(x,y,w,h) in faces:
sampleNum = sampleNum+1
cv2.rectangle(img, (x,y),(x+w,y+h),(255,0,0),2)
# roi_gray= gray[y:y+h, x:x+w]
roi_color= img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_color)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv2.imwrite("dataSet/User."+str(id)+"."+str(sampleNum)+".jpg",gray[y:y+h,x:x+w])
cv2.imshow('frame', img)
if cv2.waitKey(100) & 0xff == ord('q'):
break
elif(sampleNum>20):
break
exit(0)
def getImagesWithID(path):
imagePaths=[os.path.join(path,f) for f in os.listdir(path)]
faces=[]
IDs=[]
for imagePath in imagePaths:
faceImg=Image.open(imagePath).convert('L');
faceNp=np.array(faceImg,'uint8')
ID=int(os.path.split(imagePath)[-1].split('.')[1])
faces.append(faceNp)
IDs.append(ID)
cv2.imshow('training',faceNp)
cv2.waitKey(10)
return np.array(IDs), faces
Ids, faces=getImagesWithID(path)
recognizer.train(faces,Ids)
recognizer.save('recognizer/trainingData.yml')
exit(0)
print('done training')
#
if __name__=='__main__':
main()
getImagesWithID(path)
Related
import numpy
import cv2
import time as t
from PIL import ImageGrab
import numpy as np
print("\n\n----------------\n\nPress 'Q' to Quit\n\n----------------\n\n")
def draw_line(img, lines):
try:
for line in lines:
coords = line[0]
cv2.line(img, (coords[0]),(coords[1]),(coords[2]),(coords[3]), [255,0,255], 5)
except:
pass
def process_img(o_img):
pro_img = cv2.cvtColor(o_img, cv2.COLOR_BGR2GRAY)
pro_img = cv2.Canny(pro_img, threshold1= 100, threshold2=300)
lines = cv2.HoughLinesP(pro_img, 1, np.pi/180, 180, 3, 15)
draw_line(pro_img, lines)
return pro_img
while(True):
screen = np.array(ImageGrab.grab(bbox = (80, 130, 990, 630)))
new_scr = process_img(screen)
cv2.imshow("window", new_scr)
#cv2.imshow('window 2',cv2.cvtColor(screen, cv2.COLOR_BGR2RGB))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
I tried the draw function and it didn't work not sure if I need to do something else to the image or if the function isn't placed correctly
I'm making a watermark in the photo. How to make a watermark for several photos at once? and How to save multiple photos at once? which loop should be and in which part of the code?
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from PIL import ImageOps
def watermark_text(input_image_path,
output_image_path,
text, pos):
photo = Image.open(input_image_path)
drawing = ImageDraw.Draw(photo)
white = (3, 8, 12)
font = ImageFont.truetype("/Roboto-Regular.ttf", 150)
drawing.text(pos, text, fill=white, font=font)
photo.show()
photo.save(output_image_path)
if __name__ == '__main__':
img = 'new7093.JPG'
watermark_text(img, '11112.JPG',
text='sportves.ru',
pos=(300,500))
How about using multiprocessing.
from concurrent.futures import ProcessPoolExecutor
import os
img_list = os.listdir('path-to-image')
output_img_list = [i +'.output' for i in img_list]
text='sportves.ru'
pos=(300,500)
with ThreadPoolExecutor(max_workers=4) as executor:
futures = [executor.submit(watermark_text, input_image_path, output_image_path, text, pos)
for input_image_path, output_image_path
in zip(img_list, output_img_list)
]
It is should work). Replace a save path and append images to put watermarks.
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
def watermark_text(input_image_path,
output_image_path,
text, pos):
global new_images
photo = Image.open(input_image_path)
drawing = ImageDraw.Draw(photo)
white = (3, 8, 12)
font = ImageFont.truetype("/Roboto-Regular.ttf", 150)
drawing.text(pos, text, fill=white, font=font)
photo.show()
new_images.append(photo)
if __name__ == '__main__':
images = ['new7093.JPG', 'something.phg'] # list of photos without watermark
new_images = [] # list of photos with watermark
for img in images:
watermark_text(img, '11112.JPG',
text='sportves.ru',
pos=(300, 500))
new_images[0].save(r'C:\Users\You\Desktop', save_all=True,
append_images=new_images[1:]) # replace by you path
i want to use a face landmark for alignment faces that extracted from my face detection code(using yolov3).
this is my code:
import argparse
import sys
import os
from utils import *
#####################################################################
parser = argparse.ArgumentParser()
parser.add_argument('--model-cfg', type=str, default='./cfg/yolov3-
face.cfg',
help='path to config file')
parser.add_argument('--model-weights', type=str,
default='./model-weights/yolov3-wider_16000.weights',
help='path to weights of model')
parser.add_argument('--image', type=str, default='',
help='path to image file')
parser.add_argument('--video', type=str, default='',
help='path to video file')
parser.add_argument('--src', type=int, default=0,
help='source of the camera')
parser.add_argument('--output-dir', type=str, default='outputs/',
help='path to the output directory')
args = parser.parse_args()
#####################################################################
# print the arguments
print('----- info -----')
print('[i] The config file: ', args.model_cfg)
print('[i] The weights of model file: ', args.model_weights)
print('[i] Path to image file: ', args.image)
print('[i] Path to video file: ', args.video)
print('###########################################################\n')
# check outputs directory
if not os.path.exists(args.output_dir):
print('==> Creating the {} directory...'.format(args.output_dir))
os.makedirs(args.output_dir)
else:
print('==> Skipping create the {}
directory...'.format(args.output_dir))
# Give the configuration and weight files for the model and load the
network
# using them.
net = cv2.dnn.readNetFromDarknet(args.model_cfg, args.model_weights)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
def _main():
wind_name = 'face detection using YOLOv3'
cv2.namedWindow(wind_name, cv2.WINDOW_NORMAL)
output_file = ''
if args.image:
if not os.path.isfile(args.image):
print("[!] ==> Input image file {} doesn't
exist".format(args.image))
sys.exit(1)
cap = cv2.VideoCapture(args.image)
output_file = args.image[:-4].rsplit('/')[-1] + '_yoloface.jpg'
elif args.video:
if not os.path.isfile(args.video):
print("[!] ==> Input video file {} doesn't
exist".format(args.video))
sys.exit(1)
cap = cv2.VideoCapture(args.video)
output_file = args.video[:-4].rsplit('/')[-1] + '_yoloface.avi'
else:
# Get data from the camera
cap = cv2.VideoCapture(args.src)
# Get the video writer initialized to save the output video
if not args.image:
video_writer = cv2.VideoWriter(os.path.join(args.output_dir,
output_file),
cv2.VideoWriter_fourcc('M', 'J',
'P', 'G'),
cap.get(cv2.CAP_PROP_FPS), (
round(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))
while True:
has_frame, frame = cap.read()
# Stop the program if reached end of video
if not has_frame:
print('[i] ==> Done processing!!!')
print('[i] ==> Output file is stored at',
os.path.join(args.output_dir, output_file))
cv2.waitKey(1000)
break
# Create a 4D blob from a frame.
blob = cv2.dnn.blobFromImage(frame, 1 / 255, (IMG_WIDTH,
IMG_HEIGHT),
[0, 0, 0], 1, crop=False)
# Sets the input to the network
net.setInput(blob)
# Runs the forward pass to get output of the output layers
outs = net.forward(get_outputs_names(net))
# Remove the bounding boxes with low confidence
faces = post_process(frame, outs, CONF_THRESHOLD, NMS_THRESHOLD)
print('[i] ==> # detected faces: {}'.format(len(faces)))
print('#' * 60)
# initialize the set of information we'll displaying on the frame
info = [
('number of faces detected', '{}'.format(len(faces)))
]
for (i, (txt, val)) in enumerate(info):
text = '{}: {}'.format(txt, val)
cv2.putText(frame, text, (10, (i * 20) + 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, COLOR_RED, 2)
# Save the output video to file
if args.image:
cv2.imwrite(os.path.join(args.output_dir, output_file),
frame.astype(np.uint8))
else:
video_writer.write(frame.astype(np.uint8))
cv2.imshow(wind_name, frame)
key = cv2.waitKey(1)
if key == 27 or key == ord('q'):
print('[i] ==> Interrupted by user!')
break
cap.release()
cv2.destroyAllWindows()
print('==> All done!')
print('***********************************************************')
if __name__ == '__main__':
_main()
my problem is that i want to use a face landmark to alignment faces that extracted by the yoloface detector but i dont know how use a code in my code for facail ladmarking
please help me
thx
You can use dlib and imutils for that. The model can be downloaded here: http://dlib.net/files/
import dlib
from imutils import face_utils
import cv2
shape_predictor = dlib.shape_predictor('shape_predictor_5_face_landmarks.dat')
fa = face_utils.facealigner.FaceAligner(self.shape_predictor, desiredLeftEye=(0.2, 0.2))
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
aligned_face = self.fa.align(frame, gray, dlib.rectangle(left = box[0], top=box[1], right=box[2], bottom=box[3]))
Can we able to store our own trained dataset in xml format using pickle?
import numpy as np
import random
import pickle
import matplotlib
gui_env = [i for i in matplotlib.rcsetup.interactive_bk]
non_gui_backends = matplotlib.rcsetup.non_interactive_bk
print ("Non Gui backends are:", non_gui_backends)
print ("Gui backends I will test for", gui_env)
for gui in gui_env:
print ("testing", gui)
try:
matplotlib.use(gui,warn=False, force=True)
from matplotlib import pyplot as plt
print (" ",gui, "Is Available")
plt.plot([1.5,2.0,2.5])
fig = plt.gcf()
fig.suptitle(gui)
print ("Using ..... ",matplotlib.get_backend())
except:
print (" ",gui, "Not found")
import os
import cv2
from tqdm import tqdm
DATADIR = "Datasets/PetImages"
CATEGORIES = ["Dog", "Cat"]
training_data = []
IMG_SIZE = 50
def create_training_data():
for category in CATEGORIES:
path = os.path.join(DATADIR,category)
class_num = CATEGORIES.index(category)
for img in tqdm(os.listdir(path)):
try:
img_array = cv2.imread(os.path.join(path,img) ,cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
training_data.append([new_array, class_num])
except Exception as e:
pass
random.shuffle(training_data)
X = []
y = []
for features,label in training_data:
X.append(features)
y.append(label)
print(X[0].reshape(-1, IMG_SIZE, IMG_SIZE, 1))
X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
pickle_out = open("X.xml","wb")
pickle.dump(X, pickle_out)
pickle_out.close()
pickle_out = open("y.xml","wb")
pickle.dump(y, pickle_out)
pickle_out.close()
create_training_data()
print(len(training_data))
I have now tried to resolve this issue for multiple hours but no matter what I do, I never get the thing to work.
My project tracks live data and provides an endpoint for other services to get the latest(ish) measurement. But no matter what I do, the queue.get() always returns nothing.
Here is my code:
from collections import deque
import numpy as np
import argparse
import imutils
import cv2
from flask import Flask
from multiprocessing import Queue
import threading
import Queue as Q
app = Flask(__name__)
class ImageParser(object):
def dosmth(self, q):
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=14, help="max buffer size")
args = vars(ap.parse_args())
greenLower = [(86, 61, 128)]
greenUpper = [(148, 183, 196)]
pts1 = deque(maxlen=args["buffer"])
pts2 = deque(maxlen=args["buffer"])
if not args.get("video", False):
camera = cv2.VideoCapture(0)
else:
camera = cv2.VideoCapture(args["video"])
while True:
(grabbed, frame) = camera.read()
if args.get("video") and not grabbed:
break
frame = imutils.resize(frame, width=1200)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
for j in range(len(greenLower)):
upper = greenUpper[j]
lower = greenLower[j]
mask = cv2.inRange(hsv, lower, upper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
for i in range(len(cnts)):
center = None
if len(cnts) > 0:
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
if radius > 10:
q.put(center)
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
if j == 0:
pts1.appendleft(center)
for i in xrange(1, len(pts1)):
if pts1[i - 1] is None or pts1[i] is None:
continue
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts1[i - 1], pts1[i], (255,0,0), thickness)
if j == 1:
pts2.appendleft(center)
for i in xrange(1, len(pts2)):
if pts2[i - 1] is None or pts2[i] is None:
continue
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts2[i - 1], pts2[i], (51, 153, 255), thickness)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
camera.release()
cv2.destroyAllWindows()
imgPar = ImageParser()
q = Queue()
scp = threading.Thread(target=imgPar.dosmth, args=(q,))
scp.start()
def getVal():
try:
(x,y) = q.get_nowait()
except Q.Empty:
return -1 , -1
return (x,y)
#app.route('/', methods=['GET'])
def doMain():
x,y = getVal()
print x,y
return '{},{}'.format(x,y)
app.run(debug=True, host='10.21.8.52')
As I really do not have any other clue, what I should do, any help would be appreciated.
Everything is running on python 2.7.15 in an anaconda environment if that helps in any way.
As I really do not have
I took the liberty of stripping out the CV2 code as I don't have a camera, and replace the queue filler with a pair of random numbers every .5 seconds, and PEP8-ing the code a bit, and this way it works:
import random
import time
from flask import Flask
import threading
from multiprocessing import Queue
from Queue import Empty as QueueEmpty
app = Flask(__name__)
class ImageParser(object):
def __init__(self, queue):
self.queue = queue
self.source = random.random
self.pause = 0.5
def run(self):
while True:
value = (self.source(), self.source())
self.queue.put(value)
time.sleep(self.pause)
queue = Queue()
image_parser = ImageParser(queue)
image_thread = threading.Thread(target=image_parser.run)
#app.route('/', methods=['GET'])
def do_main():
try:
value = queue.get_nowait()
except QueueEmpty:
value = None
print(value)
return str(value)
if __name__ == '__main__':
image_thread.start()
app.run(debug=True, host='127.0.0.1')
Under http://127.0.0.1:5000/ I now get pairs of random numbers, and the occasional None when I reload too fast.
I therefore conclude that the problem probably lies with the image processing part. Specifically I noticed that only contours with an enclosing radius > 10 get put into the queue. Maybe that path of code just never gets executed. Are you quite sure that any values get put into the queue at all? Maybe a print x, y, radius before the if radius > 10 will shed some light. (And why put center instead of x and y?)