'bool' object not iterable - python-3.x

I am working on python3, opencv 3.4 and using Microsoft Azure's FaceAPI function 'CF.face.detect()'
As far as I know, 'for loop' needs iterable object to run on like list but simple boolean is not iterable. Though 'res1' is a list I get this error.
TypeError: 'bool' object not iterable
Please help, Thanks in advance
Here is the code:
import unittest
import cognitive_face as CF
from PIL import Image, ImageFont, ImageDraw
import time
import cv2
from time import strftime
CF.Key.set('')
#print(CF.Key.get())
CF.BaseUrl.set('https://southeastasia.api.cognitive.microsoft.com/face/v1.0/')
#print(CF.BaseUrl.get())
"""Setup Person and Person Group related data."""
person_group_id = '' #id from training terminal
"""Unittest for `face.detect`."""
cap = cv2.VideoCapture('1.mp4')
while(cap.isOpened()):
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
print("\n\n ##########.... LOOKING FOR FACES ....########## \n\n")
res1 = []
print(type(res1))
res1 = CF.face.detect(cap)
print('\n This is the res1: ', res1)
c = len(res1)
print('\nTOTAL FACES FOUND:', c)
detect_id = [] ##error was here so put exception
for i in range(c):
print("\n\n ##########.... DETECTING FACES ....########## \n\n")
print('\n This is i in range c', i, c)
detect_id.append(res1[i]['faceId'])
#print('\n\n detected faces id ', detect_id[i])
width = res1[i]['faceRectangle']['width']
height = res1[i]['faceRectangle']['height']
x = res1[i]['faceRectangle']['left']
y = res1[i]['faceRectangle']['top']
################## IF ENDS #########################################################################
cv2.imshow('image',img)
k = cv2.waitKey(100) & 0xff
if k == 27:
break
################ WHILE ENDS ####################################
cap.release()
cv2.destroyAllWindows()

#Jonasz is right, you should be detecting faces on images, meaning, in frames from your mp4 file.
The method CF.face.detect expects an URI, so in the following code we'll write it to disk before pass it onto CF.face.detect:
cap = cv2.VideoCapture('1.mp4')
count = 0 # <--
while(cap.isOpened()):
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
filename = "frame%d.jpg" % count # <--
cv2.imwrite(filename, img) # <--
count+=1 # <--
print("\n\n ##########.... LOOKING FOR FACES ....########## \n\n")
res1 = []
print(type(res1))
res1 = CF.face.detect(filename) # <--

Shouldn't you use CF.face.detect on your captured image not on cap variable?

Related

Python cv2 ORB detectandcompute returning "invalid number of channels in input image"

I'm trying to extract and match features from two different images but for some reason the "detectAndCompute" method doesn¡t work on my orb object:
orb = cv2.ORB_create()
kp, corners = orb.detectAndCompute(image,None
I am passing a single grayscale image (the return of the function np.float32(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY))). For some reason, the program returns the following error:
Traceback (most recent call last):
File "C:\Users\levxr\Desktop\Visual-positioning-bot-main\alloverlay.py", line 37, in
cv2.imshow("camera "+str(i), corn1.updateanddisplay())
File "C:\Users\levxr\Desktop\Visual-positioning-bot-main\features.py", line 33, in updateanddisplay
dst = self.update(image=self.image)
File "C:\Users\levxr\Desktop\Visual-positioning-bot-main\features.py", line 23, in update
kp, corners = orb.detectAndCompute(image,None)
cv2.error: OpenCV(4.4.0) c:\users\appveyor\appdata\local\temp\1\pip-req-build-95hbg2jt\opencv\modules\imgproc\src\color.simd_helpers.hpp:92: error: (-2:Unspecified error) in function '__cdecl cv::impl::anonymous-namespace'::CvtHelper<struct cv::impl::anonymous namespace'::Set<3,4,-1>,struct cv::impl::A0x2980c61a::Set<1,-1,-1>,struct cv::impl::A0x2980c61a::Set<0,2,5>,2>::CvtHelper(const class cv::_InputArray &,const class cv::_OutputArray &,int)'
Invalid number of channels in input image:
'VScn::contains(scn)'
where
'scn' is 1
The program is split in 3 files, alloverlay.py(the main file):
import sys
import cv2
import numpy as np
import features as corn
import camera as cali
cv2.ocl.setUseOpenCL(False)
#videoname = input("enter input")
videoname = "camera10001-0200.mkv"
try:
videoname = int(videoname)
cap = cv2.VideoCapture(videoname)
except:
cap = cv2.VideoCapture(videoname)
videoname2 = "camera 20000-0200.mkv"
try:
videoname = int(videoname)
cap2 = cv2.VideoCapture(videoname)
except:
cap2 = cv2.VideoCapture(videoname)
if cap.isOpened()and cap2.isOpened():
ret1, image1 = cap.read()
ret2, image2 = cap2.read()
ret = [ret1, ret2]
image = [np.float32(cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)), np.float32(cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY))]
cali1 = cali.Calibrator()
corn1 = corn.Corner_detector(image)
while cap.isOpened() and cap2.isOpened():
ret[0], image[0] = cap.read()
ret[1], image[1] = cap2.read()
if ret:
backupimg = image
for i, img in enumerate(image):
if cali1.calibrated:
backupimg[i] = corn1.image = cali1.undistort(np.float32(cv2.cvtColor(image[i], cv2.COLOR_BGR2GRAY)), cali1.mtx, cali1.dist)
else:
backupimg[i] = corn1.image = np.float32(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
cv2.imshow("camera "+str(i), corn1.updateanddisplay())
image = backupimg
print(ret, image)
#cv2.imshow("test", image)
key = cv2.waitKey(1)
if key == ord("c"):
cali1.calibrate(cali1.image)
if cv2.waitKey(25) & 0xFF == ord("q"):
break
else:
print("capture not reading")
break
cap.release()
, camera.py(module to calibrate and undistort the camera and triangulate the relative position of the point (a different part of this project, irrelevant to this issue)):
import sys
import cv2
#import glob
import numpy as np
cv2.ocl.setUseOpenCL(False)
class Missing_calibration_data_error(Exception):
def __init__():
pass
class Calibrator():
def __init__(self, image=None, mtx=None, dist=None, camera_data={"pixelsize":None, "matrixsize":None, "baseline":None, "lens_distance":None}, criteria=(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001), calibrated = False):
self.criteria = criteria
self.objpoints = []
self.imgpoints = []
self.objp = np.zeros((6*7,3), np.float32)
self.objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)
self.image = image
self.mtx = mtx
self.dist = dist
self.calibrated = calibrated
self.pixelsize = camera_data["pixelsize"]
self.matrixsize = camera_data["matrixsize"]
self.baseline = camera_data["baseline"]
self.lens_distance = camera_data["lens_distance"]
def calibrate(self, image):
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (7,6),None)
if ret == True:
self.objpoints.append(self.objp)
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),self.criteria)
self.imgpoints.append(corners2)
h, w = image.shape[:2]
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
self.mtx = mtx
self.dist = dist
self.calibrated = True
return mtx, dist
def undistort(self, image, mtx, dist):
if dist == None or mtx == None or image == None:
raise Missing_calibration_data_error
h, w = image.shape[:2]
newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))
dst = cv2.undistort(image, mtx, dist, None, newcameramtx)
x,y,w,h = roi
dst = dst[y:y+h, x:x+w]
return image
def calculate_point_relative_position(self, point_location2d):
angle = self.baseline/(point_location2d[left][x]-point_location2d[right][x])
x = angle * (point_location2d[left][x]-self.matrixsize[0]/2)
y = angle * (point_location2d[left][y]-self.matrixsize[1]/2)
z = self.lens_distance * (1-angle/self.pixelsize)
return [x, y, z]
´´´
, and features.py(module to detect and match the features, aparently where the issue happens):
´´´
import sys
import cv2
import numpy as np
cv2.ocl.setUseOpenCL(False)
class Unknown_algorythm_error(Exception):
def __init__(self):
pass
class No_image_passed_error(Exception):
def __int__ (self):
pass
class Corner_detector():
def __init__(self, image, detectortype="ORB", corners=[]):
self.corners = corners
self.image = image
self.detectortype = detectortype
def update(self, image=None):
if self.detectortype == "Harris":
self.corners = cv2.cornerHarris(image, 3, 3, 0, 1)
elif self.detectortype == "Shi-Tomasi":
self.corners = cv2.goodFeaturesToTrack(image, 3, 3, 0, 1)
elif self.detectortype == "ORB":
orb = cv2.ORB_create()
kp, corners = orb.detectAndCompute(image,None)
elif self.detectortype == "SURF":
minHessian = 400
detector = cv2.features2d_SURF(hessianThreshold=minHessian)
keypoints1, descriptors1 = detector.detectAndCompute(img1, None)
keypoints2, descriptors2 = detector.detectAndCompute(img2, None)
else:
raise Unknown_algoryth_error
return self.corners
def updateanddisplay(self):
dst = self.update(image=self.image)
self.image[dst>0.01*dst.max()] = 0
return self.image
class Feature_matcher():
def __init__(self, matcher = cv2.DescriptorMatcher_create(cv2.DescriptorMatcher_FLANNBASED)):
self.matcher = matcher
´´´
Does anyone know how to fix this? I've been looking for the answer for quite a while but i only find the answer for when you're converting the image to grayscale and it doesnt work for me.
It's hard to follow, but I think I identified the issue:
You are passing orb.detectAndCompute an image of type np.float32.
orb.detectAndCompute does not support image of type np.float32.
Reproducing the problem:
The following "simple test" reproduces the problem:
The code sample passes a black (zeros) image to orb.detectAndCompute:
The following code passes without an exception (image type is np.uint8):
# image type is uint8:
image = np.zeros((100, 100), np.uint8)
orb = cv2.ORB_create()
kp, corners = orb.detectAndCompute(image, None)
The following code raises an exception because image type is np.float32:
# image type is float32:
image = np.float32(np.zeros((100, 100), np.uint8))
orb = cv2.ORB_create()
kp, corners = orb.detectAndCompute(image, None)
Rises an exception:
Invalid number of channels in input image:
Solution:
Try to avoid the np.float32 conversion.
You may also convert image to uint8 as follows:
kp, corners = orb.detectAndCompute(image.astype(np.uint8), None)

How to detect objects which have almost similar color with their background?

original image
image after kmeans clustering
image I get as result
I am working on malaria parasite detection using thick blood microscopy image. I have tried to segment the parasite objects but it is difficult since they have almost similar background color. I have used vv2.kmeans() to cluster the parasite and non parasite.
import csv as csv
import matplotlib.pyplot as plt
def smooth(img):
dest=cv2.medianBlur(img,7)
#dest=cv2.GaussianBlur(img, (7,7),0)
return dest
def process(path,img):
image=cv2.imread(path+img,1)
image=smooth(image)
return image
def kmeans(img,name):
output=[]
image=img.reshape(img.shape[0]*img.shape[1],3)
image=np.float32(image)
nclusters=5
criteria=(cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER,10,1.0)
attempts=10
flags=cv2.KMEANS_RANDOM_CENTERS
compactness,labels,centers=cv2.kmeans(image,nclusters,None,criteria,attempts,flags)
centers = np.uint8(centers)
res = centers[labels.flatten()]
res2 = res.reshape((img.shape))
cv2.imwrite(dest+name[:-4]+'.png', res2)
im_color=cv2.imread(dest+name[:-4]+'.png',cv2.IMREAD_COLOR)
im_gray = cv2.cvtColor(im_color, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(im_gray, thresh=100, maxval=255, type=cv2.THRESH_BINARY_INV)
mask3 = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) # 3 channel mask
im_thresh_color = cv2.bitwise_and(img, mask3)
cv2.imwrite("C:\\Users\\user\\Desktop\\lbim2\\"+name[:-4] +".png",im_thresh_color)
def preprocess(path):
images=[]
j=0
print ("Median Blur")
for i in os.listdir(path):
print(i)
images.append(process(path,i))
print(images[j].shape)
#print(images[1].shape)
images[j]=kmeans(images[j],i)
j+=1
print(i)
dest='../output1/'
print ("Preprocess")
preprocess('../input1/')
I have get a image with all pixel value 0. black output

Python multiprocessing queue is empty although it is filled in a different thread

I have now tried to resolve this issue for multiple hours but no matter what I do, I never get the thing to work.
My project tracks live data and provides an endpoint for other services to get the latest(ish) measurement. But no matter what I do, the queue.get() always returns nothing.
Here is my code:
from collections import deque
import numpy as np
import argparse
import imutils
import cv2
from flask import Flask
from multiprocessing import Queue
import threading
import Queue as Q
app = Flask(__name__)
class ImageParser(object):
def dosmth(self, q):
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=14, help="max buffer size")
args = vars(ap.parse_args())
greenLower = [(86, 61, 128)]
greenUpper = [(148, 183, 196)]
pts1 = deque(maxlen=args["buffer"])
pts2 = deque(maxlen=args["buffer"])
if not args.get("video", False):
camera = cv2.VideoCapture(0)
else:
camera = cv2.VideoCapture(args["video"])
while True:
(grabbed, frame) = camera.read()
if args.get("video") and not grabbed:
break
frame = imutils.resize(frame, width=1200)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
for j in range(len(greenLower)):
upper = greenUpper[j]
lower = greenLower[j]
mask = cv2.inRange(hsv, lower, upper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
for i in range(len(cnts)):
center = None
if len(cnts) > 0:
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
if radius > 10:
q.put(center)
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
if j == 0:
pts1.appendleft(center)
for i in xrange(1, len(pts1)):
if pts1[i - 1] is None or pts1[i] is None:
continue
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts1[i - 1], pts1[i], (255,0,0), thickness)
if j == 1:
pts2.appendleft(center)
for i in xrange(1, len(pts2)):
if pts2[i - 1] is None or pts2[i] is None:
continue
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts2[i - 1], pts2[i], (51, 153, 255), thickness)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
camera.release()
cv2.destroyAllWindows()
imgPar = ImageParser()
q = Queue()
scp = threading.Thread(target=imgPar.dosmth, args=(q,))
scp.start()
def getVal():
try:
(x,y) = q.get_nowait()
except Q.Empty:
return -1 , -1
return (x,y)
#app.route('/', methods=['GET'])
def doMain():
x,y = getVal()
print x,y
return '{},{}'.format(x,y)
app.run(debug=True, host='10.21.8.52')
As I really do not have any other clue, what I should do, any help would be appreciated.
Everything is running on python 2.7.15 in an anaconda environment if that helps in any way.
As I really do not have
I took the liberty of stripping out the CV2 code as I don't have a camera, and replace the queue filler with a pair of random numbers every .5 seconds, and PEP8-ing the code a bit, and this way it works:
import random
import time
from flask import Flask
import threading
from multiprocessing import Queue
from Queue import Empty as QueueEmpty
app = Flask(__name__)
class ImageParser(object):
def __init__(self, queue):
self.queue = queue
self.source = random.random
self.pause = 0.5
def run(self):
while True:
value = (self.source(), self.source())
self.queue.put(value)
time.sleep(self.pause)
queue = Queue()
image_parser = ImageParser(queue)
image_thread = threading.Thread(target=image_parser.run)
#app.route('/', methods=['GET'])
def do_main():
try:
value = queue.get_nowait()
except QueueEmpty:
value = None
print(value)
return str(value)
if __name__ == '__main__':
image_thread.start()
app.run(debug=True, host='127.0.0.1')
Under http://127.0.0.1:5000/ I now get pairs of random numbers, and the occasional None when I reload too fast.
I therefore conclude that the problem probably lies with the image processing part. Specifically I noticed that only contours with an enclosing radius > 10 get put into the queue. Maybe that path of code just never gets executed. Are you quite sure that any values get put into the queue at all? Maybe a print x, y, radius before the if radius > 10 will shed some light. (And why put center instead of x and y?)

Assertion failed (m.dims >= 2) in Mat

Here is the code for image thresholding I am getting the error at line 22,
which is:-
ret,thresh2 = cv2.threshold(img,127,255,cv2.THRESH_BINARY_INV)
Here in this code I want to capture image frames from the video camera and then perform various kinds of thresholding operations on the captured image frames.
I have stored image frames at various instances of time. My objective is to segment the moving objects in the video. Hence I am applying thresholding operation.
Does anybody have any idea, how to do it?
Thanks in advance.
import cv2
import numpy as np
import time
from matplotlib import pyplot as plt
import sys
cam = cv2.VideoCapture(0)
while(cam.isOpened()):
ret, frame = cam.read() #Keep on capturing the frames continuously
while (ret==True):
#img = cv2.imread('/home/shrikrishna/Detection&Tracking/OpenCV-Tutorial',6)
cv2.imwrite('At time'+ str(time.clock()) + '.jpg', frame)
img2 = cv2.imread('At time'+ str(time.clock()) + '.jpg',6)
t = str(time.clock())
cv2.imshow('Orignal',frame)
k = cv2.waitKey(0) & 0xffff
if(k==27):
#img = cv2.imread('At time'+ str(time.clock()) + '.jpg',6)
break
if(k==ord('q')):
sys.exit(0)
break
#cv2.imwrite('At time'+ t + '.jpg', frame)
img = cv2.imread('At time'+ t + '.jpg',6)
ret,thresh1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
ret,thresh2 = cv2.threshold(img,127,255,cv2.THRESH_BINARY_INV)
ret,thresh3 = cv2.threshold(img,127,255,cv2.THRESH_TRUNC)
ret,thresh4 = cv2.threshold(img,127,255,cv2.THRESH_TOZERO)
ret,thresh5 = cv2.threshold(img,127,255,cv2.THRESH_TOZERO_INV)
titles = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV']
images = [img, thresh1, thresh2, thresh3, thresh4, thresh5]
for i in xrange(6):
plt.subplot(2,3,i+1),plt.imshow(images[i],'gray')
plt.title(titles[i])
plt.xticks([]),plt.yticks([])
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
In the following line, you read in an image as colour image (based on the second parameter -- flags).
img = cv2.imread('At time'+ t + '.jpg',6)
This means that img contains 3 channels, which in Python is represented by a 3-dimensional array.
You immediately use this image as the source for thresholding:
ret,thresh1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
According to the documentation, the first parameter to threshold() is:
src – input array (single-channel, 8-bit or 32-bit floating point).
That means you need a single channel image, e.g. a grayscale image:
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret,thresh1 = cv2.threshold(img_gray,127,255,cv2.THRESH_BINARY)
# ...
Another option would be to just read the image as grayscale in first place:
img_gray = cv2.imread('At time'+ t + '.jpg',0)
# ...

error with openCV2

I am new in coding. Using this script:
from PIL import Image
from PIL.ImageChops import subtract
import numpy, math, time, glob, sys, os, logging, requests, random
def GreenScreen(infile, inbg ,outfile='output.png', keyColor=None,tolerance=None):
"""
http://gc-films.com/chromakey.html
http://www.cs.utah.edu/~michael/chroma/
:param infile: Greenscreen image location
:param inbg: Background image location
:param outfile: Output file location
:param keyColor: greenscreen color; it can be any singular color
:param tolerance: tolerance of cleaning
:return:
"""
if not keyColor:
keyColor = [151,44,21] #Y,Cb, and Cr values of the greenscreen
if not tolerance:
tolerance = [100,130] #Allowed Distance from Values
#open files
inDataFG = Image.open('/home/leonardo/Scrivania/in/KVfnt.png').convert('YCbCr')
Path = '/home/leonardo/Scrivania/background/'
FullPath = os.path.join(Path, random.choice(os.listdir(Path)))
BG = Image.open(FullPath).convert('RGB')
[Y_key, Cb_key, Cr_key] = keyColor
[tola, tolb]= tolerance
(x,y) = inDataFG.size #get dimensions
foreground = numpy.array(inDataFG.getdata()) #make array from image
maskgen = numpy.vectorize(colorclose) #vectorize masking function
alphaMask = maskgen(foreground[:,1],foreground[:,2] ,Cb_key, Cr_key, tola, tolb) #generate mask
alphaMask.shape = (y,x) #make mask dimensions of original image
imMask = Image.fromarray(numpy.uint8(alphaMask))#convert array to image
invertMask = Image.fromarray(numpy.uint8(255-255*(alphaMask/255))) #create inverted mask with extremes
#create images for color mask
colorMask = Image.new('RGB',(x,y),tuple([0,0,0]))
allgreen = Image.new('YCbCr',(x,y),tuple(keyColor))
colorMask.paste(allgreen,invertMask) #make color mask green in green values on image
inDataFG = inDataFG.convert('RGB') #convert input image to RGB for ease of working with
cleaned = subtract(inDataFG,colorMask) #subtract greens from input
BG.paste(cleaned,imMask)#paste masked foreground over background
# BG.show() #display cleaned image
BG.save(outfile, "JPEG") #save cleaned image
def colorclose(Cb_p,Cr_p, Cb_key, Cr_key, tola, tolb):
temp = math.sqrt((Cb_key-Cb_p)**2+(Cr_key-Cr_p)**2)
if temp < tola:
z = 0.0
elif temp < tolb:
z = ((temp-tola)/(tolb-tola))
else:
z = 1.0
return 255.0*z
def check_folders(logger):
if not os.path.exists('out/'):
os.mkdir('out/')
if not os.path.exists('background/'):
os.mkdir('background/')
logger.error("Place background images in background/")
sys.exit()
if not os.path.exists('in/'):
os.mkdir('in/')
logger.error("Place input files in in/")
sys.exit()
def begin_greenbox(logger):
"""
For all backgrounds loop through all input files into the out file
"""
for bg in glob.glob('background/*'):
continue
bg_name = bg.split('/')[-1].lower().strip('.jpg').strip('.png').strip('.jpeg')
for picture in glob.glob('in/*'):
continue
pic_name = picture.split('/')[-1].lower().strip('.JPG').strip('.png').strip('.jpeg')
output_file = 'out/' + bg_name + ' ' + pic_name + '.jpg'
one_pic = time.time()
GreenScreen(infile=picture ,inbg=bg, outfile=output_file)
one_pic_time_done = time.time()
time_arr.append(one_pic_time_done-one_pic)
logger.info(time_arr)
logger.info('done : %s' % pic_name)
def start_logging():
logging.basicConfig()
logger = logging.getLogger('greenbox')
logger.setLevel(logging.INFO)
return logger
if __name__ == '__main__':
time_start = time.time()
time_arr = []
logger = start_logging()
logger.info("Start time: %s" % time_start)
check_folders(logger)
begin_greenbox(logger)
time_end = time.time()
logger.info("End time: %s" % time_end)
Everything is okay and the image is saved in the /out folder. Using this code:
from cv2 import *
# initialize the camera
cam = VideoCapture(0) # 0 -> index of camera
s, img = cam.read()
if s: # frame captured without any errors
namedWindow("cam-test",WINDOW_AUTOSIZE)
imwrite('/home/leonardo/Scrivania/in/KVfnt.png',img) #save image
Everything is okay and the image is captured from the camera and saved in /in folder. If I add the second code to the first one:
from PIL import Image
from PIL.ImageChops import subtract
import numpy, math, time, glob, sys, os, logging, requests, random
from cv2 import *
# initialize the camera
cam = VideoCapture(0) # 0 -> index of camera
s, img = cam.read()
if s: # frame captured without any errors
namedWindow("cam-test",WINDOW_AUTOSIZE)
imwrite('/home/leonardo/Scrivania/in/KVfnt.png',img) #save image
def GreenScreen(infile, inbg ,outfile='output.png', keyColor=None,tolerance=None):
"""
http://gc-films.com/chromakey.html
http://www.cs.utah.edu/~michael/chroma/
:param infile: Greenscreen image location
:param inbg: Background image location
:param outfile: Output file location
:param keyColor: greenscreen color; it can be any singular color
:param tolerance: tolerance of cleaning
:return:
"""
if not keyColor:
keyColor = [151,44,21] #Y,Cb, and Cr values of the greenscreen
if not tolerance:
tolerance = [100,130] #Allowed Distance from Values
#open files
inDataFG = Image.open('/home/leonardo/Scrivania/in/KVfnt.png').convert('YCbCr')
Path = '/home/leonardo/Scrivania/background/'
FullPath = os.path.join(Path, random.choice(os.listdir(Path)))
BG = Image.open(FullPath).convert('RGB')
[Y_key, Cb_key, Cr_key] = keyColor
[tola, tolb]= tolerance
(x,y) = inDataFG.size #get dimensions
foreground = numpy.array(inDataFG.getdata()) #make array from image
maskgen = numpy.vectorize(colorclose) #vectorize masking function
alphaMask = maskgen(foreground[:,1],foreground[:,2] ,Cb_key, Cr_key, tola, tolb) #generate mask
alphaMask.shape = (y,x) #make mask dimensions of original image
imMask = Image.fromarray(numpy.uint8(alphaMask))#convert array to image
invertMask = Image.fromarray(numpy.uint8(255-255*(alphaMask/255))) #create inverted mask with extremes
#create images for color mask
colorMask = Image.new('RGB',(x,y),tuple([0,0,0]))
allgreen = Image.new('YCbCr',(x,y),tuple(keyColor))
colorMask.paste(allgreen,invertMask) #make color mask green in green values on image
inDataFG = inDataFG.convert('RGB') #convert input image to RGB for ease of working with
cleaned = subtract(inDataFG,colorMask) #subtract greens from input
BG.paste(cleaned,imMask)#paste masked foreground over background
# BG.show() #display cleaned image
BG.save(outfile, "JPEG") #save cleaned image
def colorclose(Cb_p,Cr_p, Cb_key, Cr_key, tola, tolb):
temp = math.sqrt((Cb_key-Cb_p)**2+(Cr_key-Cr_p)**2)
if temp < tola:
z = 0.0
elif temp < tolb:
z = ((temp-tola)/(tolb-tola))
else:
z = 1.0
return 255.0*z
def check_folders(logger):
if not os.path.exists('out/'):
os.mkdir('out/')
if not os.path.exists('background/'):
os.mkdir('background/')
logger.error("Place background images in background/")
sys.exit()
if not os.path.exists('in/'):
os.mkdir('in/')
logger.error("Place input files in in/")
sys.exit()
def begin_greenbox(logger):
"""
For all backgrounds loop through all input files into the out file
"""
for bg in glob.glob('background/*'):
continue
bg_name = bg.split('/')[-1].lower().strip('.jpg').strip('.png').strip('.jpeg')
for picture in glob.glob('in/*'):
continue
pic_name = picture.split('/')[-1].lower().strip('.JPG').strip('.png').strip('.jpeg')
output_file = 'out/' + bg_name + ' ' + pic_name + '.jpg'
one_pic = time.time()
GreenScreen(infile=picture ,inbg=bg, outfile=output_file)
one_pic_time_done = time.time()
time_arr.append(one_pic_time_done-one_pic)
logger.info(time_arr)
logger.info('done : %s' % pic_name)
def start_logging():
logging.basicConfig()
logger = logging.getLogger('greenbox')
logger.setLevel(logging.INFO)
return logger
if __name__ == '__main__':
time_start = time.time()
time_arr = []
logger = start_logging()
logger.info("Start time: %s" % time_start)
check_folders(logger)
begin_greenbox(logger)
time_end = time.time()
logger.info("End time: %s" % time_end)
I obtain this error:
File "chromakey+upload.py", line 116, in <module>
begin_greenbox(logger)
File "chromakey+upload.py", line 97, in begin_greenbox
GreenScreen(infile=picture ,inbg=bg, outfile=output_file)
File "chromakey+upload.py", line 56, in GreenScreen
cleaned = subtract(inDataFG,colorMask) #subtract greens from input
TypeError: src1 is not a numpy array, neither a scalar
What is the problem? Thank you for your answers.
As the error says:
src1 is not a numpy array, neither a scalar
Perhaps, you should try:
cleaned = subtract(numpy.array(inDataFG.getdata()),numpy.array(colorMask.getdata()))
Edit
There is a 'conflict' on subtract:
from PIL.ImageChops import subtract # first subtract
from cv2 import * # OpenCV has a subtract too
This is one of the reasons to use namespaces on your calls.
If your main image lib is PIL, maybe you should do import cv2 and use cv2.* when needed.

Resources