issues with CNN to make predictions in video frames - python-3.x

import cv2
import numpy as np
import tensorflow as tf
model = tf.keras.models.load_model("oneptwoside.model")
CATEGORIES = ["front", "back"]
cap = cv2.VideoCapture(0)
if cap.isOpened():
ret, frame = cap.read()
else:
ret = False
while ret:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame',gray)
IMG_SIZE = 200
img_array = cv2.imread(frame, cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
prediction = model.predict([img_array])
print(prediction)
print(CATEGORIES[int(prediction[0][0])])
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
I cant make predictions with my webcam. I try the same code in image it's working fine with a little bit of tweak. However in video is always not reading the frame or some resizing issue

if cap.isOpened():
ret, frame = cap.read()
else:
ret = False
while ret:
curr_time = time.time()
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
curTime = time.time()
sec = curTime - prevTime
prevTime = curTime
fps = 1/(sec)
str = "FPS : %0.1f" % fps
cv2.putText(frame, str, (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 150, 0))
cv2.imshow('frame',gray)
cv2.imshow('color', frame)
if curr_time - last_recorded_time >= 0.0001:
last_recorded_time = curr_time
IMG_SIZE = 200
#img_array = cv2.imread('frame', cv2.IMREAD_GRAYSCALE)
frame = cv2.resize(frame, None, fx=0.5, fy=0.5)
frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))
#frame = frame[np.newaxis, ...]
frame = frame.reshape((-1, 200, 200, 1))
cv2.rectangle(frame,(200,0),(150,128),(0,255,0),3)
prediction = model.predict([frame])
#prediction = model.predict([frame])
print(prediction)
print(CATEGORIES[int(prediction[0][0])])
if cv2.waitKey(1) & 0xFF == ord('q'):
break
here is a solution not ideal but it's working

Related

How to do OpenCV Laser shot detection with Python on light backgrounds?

I'm trying to build a laser shot detection game in Python3 using OpenCV. I have a proof of concept working which will highlight detected momentary "shots" of the laser on the background.
The problem I'm having is that the laser signature doesn't get detected on lighter backgrounds, or if there is a very white / bright color item near the shot. I'm sure this is because the way I'm using binary thresholds to detect the laser as the brightest thing in the frame, and with light elements the laser gets washed out.
My question is how can I alter my approach to handle this situation, or perhaps "calibrate" the background / other items so that the laser can be detected? Ultimately I'm trying to detect laser shots on a computer screen, where the background where shots are landing is a video with its own high lights and the screen puts out its own light.
Any guidance is appreciated.
main.py
import cv2
from camera import VideoCamera
from detection import LaserDetector
debug=False
radius_min = float(1)
radius_max = float(10)
shot_size = 5
color_blue = (255, 0, 0)
cam = VideoCamera(640, 480)
shots = []
try:
while(True):
frame = cam.get_frame()
if frame is not False:
shot = LaserDetector(frame, debug).detect()
if shot:
x, y, radius = shot
if radius >= radius_min and radius <= radius_max:
shots.append(shot)
if debug:
print(f"(x:{int(x)}, y:{int(y)}, r:{int(radius)})")
for s in shots:
cv2.circle(frame, (int(s[0]), int(s[1])), shot_size, color_blue, 1)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
except KeyboardInterrupt:
pass
finally:
print("\nClosing video capture...")
cam.release()
cv2.destroyAllWindows()
camera.py
import cv2
class VideoCamera(object):
def __init__(self, cam_width, cam_height):
self.capture = cv2.VideoCapture(0)
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, cam_width)
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, cam_height)
def release(self):
self.capture.release()
def __del__(self):
self.capture.release()
def get_frame(self):
success, frame = self.capture.read()
if success:
return frame
else:
return False
def frame_to_jpeg(self, frame):
ret, jpeg = cv2.imencode('.jpg', frame)
return jpeg.tobytes()
detection.py
import cv2
import numpy as np
import decimal
class LaserDetector(object):
def __init__(self, frame, debug=False):
self.debug = debug
self.frame = frame
def get_contour_points(self, mask):
countours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
if len(countours) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
c = max(countours, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
moments = cv2.moments(c)
if moments["m00"] > 0:
# set the center
(x,y) = int(moments["m10"] / moments["m00"]), \
int(moments["m01"] / moments["m00"])
radius = round(decimal.Decimal(radius), 3)
return (int(x), int(y), radius)
return False
def get_hsv_threshold_mask(self, frame):
hsv_image = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv_image)
ret, h_frame = cv2.threshold(h, 125, 160, cv2.THRESH_BINARY)
ret, v_frame = cv2.threshold(v, 250, 256, cv2.THRESH_BINARY)
output = cv2.bitwise_and(h_frame, v_frame, frame)
if self.debug:
indiv_output = np.concatenate((h_frame, v_frame, output), axis=1)
cv2.imshow("threshold", indiv_output)
return output
def detect(self):
mask = self.get_hsv_threshold_mask(self.frame)
return self.get_contour_points(mask)

why is this attribute error showing up and what is the solution to it

i m running a code and its giving this attribute error pls help
the error that is shows while running the code
the code i am using:_
import cv2
from deepface import DeepFace
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
cap = cv2.VideoCapture(0)
if not cap.isOpened():
cap = cv2.VideoCapture(0)
if not cap.isOpened():
raise IOError("Cannot open webcam")
while True:
ret, frame = cap.read()
result = DeepFace.analyze(frame, actions = ["emotion"])
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray,1.1,4)
for(x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText('video',frame)
if cv2.waitKey(2) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()

OpenCV do not want to stop video recording with cv2.COLOR_BGR2GRAY

when i run this code:
from cv2 import *
image = cv2.VideoCapture(0, cv2.CAP_DSHOW)
fourcc_cod = cv2.VideoWriter_fourcc(*"XVID")
name = input()
video = cv2.VideoWriter(f"{name}.AVI",fourcc_cod,60,(640,480))
while (True):
check,frame = image.read()
frame1 = cvtColor(frame, cv2.COLOR_BGR2GRAY)
video.write(frame1)
cv2.imshow('myimage',frame1)
if waitKey(1) ord('q'):
cv2.destroyAllWindows()
video.release()
image.release()
it does not stop recording the video Even if I press "q"
Try this snippet.
import cv2
cap = cv2.VideoCapture(0)
name = input()
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(f'{name}.avi', fourcc, 20.0, (640, 480))
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 0)
# uncomment belove line for grayscale
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# write the flipped frame
out.write(frame)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release everything if job is finished
cap.release()
out.release() # video file writer
cv2.destroyAllWindows()

AttributeError: module 'cv2.cv2' has no attribute 'rectange'

I am using the following code on python 3.7
import cv2
import numpy as np
face_cascade = cv2.CascadeClassifier('D:\\ET\\haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('D:\\ET\\haarcascade_eye.xml')
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectange(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey), (ex+ew,ey+eh),(0,255,0),2)
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xFF
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
the following is installed:
opencv-contrib-python-3.4.4.19
The error is telling you that cv2 doesn't have anything called rectange, but it does have something called rectangle. You misspelled it:
cv2.rectange(img,(x,y),(x+w,y+h),(255,0,0),2)

How can I combine a graph, webcam capture (image) and buttons into the same window?

My code is designed to take an image off of the webcam, display the image, read the RGB colors from multiple pixels OpenCV and then plot those points in a graph (matplotlib). It all works, but I want to have the image, the graph and the buttons all in the same window, instead of three separate windows. Can anyone help?
Here is my code.
import cv2
import numpy as np
import time
from tkinter import *
from matplotlib import pyplot as plt
def closeWindow():
exit()
#def newSample():
def nextSample():
ramp_frames = 30
cv2.destroyAllWindows()
cap = cv2.VideoCapture(0)
s, im = cap.read()
cv2.imshow("test11.bmp", im)
cv2.imwrite("test11.bmp", im)
cv2.destroyAllWindows()
cap.release()
img = cv2.imread('test11.bmp', cv2.IMREAD_COLOR)
#img1 = PhotoImage(file="C:\Users\Isaac Erickson\Documents\Python\test.bmp")
plt.gcf().clear()
px1 = im[90,100]
px2 = im[90,150]
px3 = im[90,200]
px4 = im[90,250]
px5 = im[90,300]
px6 = im[90,350]
px7 = im[90,400]
px8 = im[90,550] #max 480, 640
plt.plot(["300","400","450","500","550", "600","650","700"], [px1,px2,px3,px4,px5,px6,px7,px8]) #x and y coordinates
cv2.imshow('image',im)
plt.show()
def reaction():
ramp_frames = 30
cv2.destroyAllWindows()
cap = cv2.VideoCapture(0)
s, im = cap.read()
cv2.imshow("test11.bmp", im)
cv2.imwrite("test11.bmp", im)
cv2.destroyAllWindows()
cap.release()
img = cv2.imread('test11.bmp', cv2.IMREAD_COLOR)
plt.gcf().clear()
px1 = im[90,100]
px2 = im[90,150]
px3 = im[90,200]
px4 = im[90,250]
px5 = im[90,300]
px6 = im[90,350]
px7 = im[90,400]
px8 = im[90,550] #max 480, 640
plt.plot(["1","2","3","4","5", "6","7","8"], [px1,px2,px3,px4,px5,px6,px7,px8]) #x and y coordinates
cv2.imshow('image',im)
plt.show()
ramp_frames = 30
cap = cv2.VideoCapture(0)
s, im = cap.read()
cv2.imshow("test11.bmp", im)
cv2.imwrite("test11.bmp", im)
cv2.destroyAllWindows()
cap.release()
#Analysis of image
root = Tk()
img = cv2.imread('test11.bmp', cv2.IMREAD_COLOR)
px1 = im[90,100]
px2 = im[90,150]
px3 = im[90,200]
px4 = im[90,250]
px5 = im[90,300]
px6 = im[90,350]
px7 = im[90,400]
px8 = im[90,550] #max 480, 640
plt.plot(["300","400","450","500","550", "600","650","700"], [px1,px2,px3,px4,px5,px6,px7,px8]) #x and y coordinates
title = Label(root, text= "VISIBLE COLORS")
p1 = Label(root, text=px1)
p2 = Label(root, text=px2)
p3 = Label(root, text=px3)
p4 = Label(root, text=px4)
p5 = Label(root, text=px5)
p6 = Label(root, text=px6)
p7 = Label(root, text=px7)
p8 = Label(root, text=px8)
title.pack()
p1.pack()
p2.pack()
p3.pack()
p4.pack()
p5.pack()
p6.pack()
p7.pack()
p8.pack()
button1 = Button(root, text="CLOSE", command=closeWindow)
button2 = Button(root, text="Next Sample", command=nextSample)
button3 = Button(root, text="reaction", command=reaction)
button1.pack()
button2.pack()
button3.pack()
print(px1)
print(px2)
print(px3)
print(px4)
print(px5)
print(px6)
print(px7)
print(px8)
cv2.imshow('image',im)
plt.show()
root.mainloop()
cv2.waitKey(0)
cap.release()
#out.release()
cv2.destroyAllWindows()

Resources