Pyqt Qtimer.singleshot in QEventloop - pyqt

#QtCore.pyqtSlot()
def startVideo(self):
global image
run_video = True
while run_video:
ret, image = self.camera.read()
color_swapped_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
qt_image1 = QtGui.QImage(color_swapped_image.data,
self.width,
self.height,
color_swapped_image.strides[0],
QtGui.QImage.Format_RGB888)
self.VideoSignal1.emit(qt_image1)
if self.flag:
img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
img_canny = cv2.Canny(img_gray, 50, 100)
qt_image2 = QtGui.QImage(img_canny.data,
self.width,
self.height,
img_canny.strides[0],
QtGui.QImage.Format_Grayscale8)
self.VideoSignal2.emit(qt_image2)
loop = QtCore.QEventLoop()
QtCore.QTimer.singleShot(25, loop.quit) #25 ms
loop.exec_()
How this can be work about loop?
When the loop in while: do the singleshot 25ms until loop.quit,
Then what is the loop.exec??
I can't understand loop and singleshot in this code.

Related

How use Qt.QNetworkAccessManager() for an array of pictures

I get a list of pictures from the Internet (links), I need to display them without downloading to the device
How use Qt.QNetworkAccessManager() for an array of pictures?
def createCellWidget(self, pix, btn):
layout = QGridLayout()
frame = QFrame()
frame.setLayout(layout)
self.picture = QLabel()
pixmap = QPixmap(pix)
self.b(pix)
self.picture.setFixedSize(250, 370)
layout.addWidget(self.picture, 0, 0)
self.lbl_checkbox = QCheckBox(self.picture)
self.lbl_checkbox.setText(btn)
return frame
def b(self, h):
self.nam = Qt.QNetworkAccessManager()
print("Load image")
url = h
self.nam.get(Qt.QNetworkRequest(Qt.QUrl(url)))
self.nam.finished.connect(self.finish_request)
def finish_request(self, reply):
img = Qt.QPixmap()
img.loadFromData(reply.readAll())
self.picture.setPixmap(img)
what am I doing wrong?

PhotoImage Tkinter problem: Button not working and not displaying

im using Proxlight Designer to create Drag-n-drop GUI. It is a application that works with open-cv-python, but a Button is not displaying. It seems as if open-cv is the problem, because if you remove it the Button displays and works properly. Here is the code for the GUI:
cap = cv2.VideoCapture(0)
window = Tk()
window.geometry("700x800")
window.configure(bg = "#ffffff")
canvas = Canvas(
window,
bg = "#ffffff",
height = 800,
width = 700,
bd = 0,
highlightthickness = 0,
relief = "ridge")
canvas.place(x = 0, y = 0)
l1 = Label(bg = "black")
l1.place(x = 100, y = 150, width = 500, height = 500)
img0 = PhotoImage(file = f"RES/img1.png")
b0 = Button(
image = img0,
borderwidth = 0,
highlightthickness = 0,
command = save_face,
relief = "flat")
b0.place(
x = 250, y = 693,
width = 200,
height = 75)
img1 = PhotoImage(file = f"RES/img2.png")
b1 = Button(
image = img1,
borderwidth = 0,
highlightthickness = 0,
command = encryptPass,
relief = "flat")
b1.place(
x = 480, y = 693,
width = 200,
height = 75)
img2 = PhotoImage(file = f"RES/img3.png")
b2 = Button(
image = img2,
borderwidth = 0,
highlightthickness = 0,
command = generate_key,
relief = "flat")
b2.place(
x = 20, y = 693,
width = 200,
height = 75)
window.resizable(False, False)
while True:
img = cap.read()[1]
img1 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = ImageTk.PhotoImage(Image.fromarray(img1))
l1["image"] = img
window.update()
So thanks to the comments of #Matiiss and #acw1668 (thx alot btw) i got it to work. Basically the while loop was the problem. I fixed it using this instead of the while loop:
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
def show_frame():
_, frame = cap.read()
global cv2image
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image=img)
l1.imgtk = imgtk
l1.configure(image=imgtk)
l1.after(10, show_frame)
window = Tk()
window.geometry("700x800")
window.configure(bg = "#ffffff")
l1 = Label(bg = "black")
l1.place(x = 100, y = 150, width = 500, height = 500)
window.resizable(False, False)
show_frame()
window.mainloop()

issues with CNN to make predictions in video frames

import cv2
import numpy as np
import tensorflow as tf
model = tf.keras.models.load_model("oneptwoside.model")
CATEGORIES = ["front", "back"]
cap = cv2.VideoCapture(0)
if cap.isOpened():
ret, frame = cap.read()
else:
ret = False
while ret:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame',gray)
IMG_SIZE = 200
img_array = cv2.imread(frame, cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
prediction = model.predict([img_array])
print(prediction)
print(CATEGORIES[int(prediction[0][0])])
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
I cant make predictions with my webcam. I try the same code in image it's working fine with a little bit of tweak. However in video is always not reading the frame or some resizing issue
if cap.isOpened():
ret, frame = cap.read()
else:
ret = False
while ret:
curr_time = time.time()
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
curTime = time.time()
sec = curTime - prevTime
prevTime = curTime
fps = 1/(sec)
str = "FPS : %0.1f" % fps
cv2.putText(frame, str, (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 150, 0))
cv2.imshow('frame',gray)
cv2.imshow('color', frame)
if curr_time - last_recorded_time >= 0.0001:
last_recorded_time = curr_time
IMG_SIZE = 200
#img_array = cv2.imread('frame', cv2.IMREAD_GRAYSCALE)
frame = cv2.resize(frame, None, fx=0.5, fy=0.5)
frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))
#frame = frame[np.newaxis, ...]
frame = frame.reshape((-1, 200, 200, 1))
cv2.rectangle(frame,(200,0),(150,128),(0,255,0),3)
prediction = model.predict([frame])
#prediction = model.predict([frame])
print(prediction)
print(CATEGORIES[int(prediction[0][0])])
if cv2.waitKey(1) & 0xFF == ord('q'):
break
here is a solution not ideal but it's working

How can I combine a graph, webcam capture (image) and buttons into the same window?

My code is designed to take an image off of the webcam, display the image, read the RGB colors from multiple pixels OpenCV and then plot those points in a graph (matplotlib). It all works, but I want to have the image, the graph and the buttons all in the same window, instead of three separate windows. Can anyone help?
Here is my code.
import cv2
import numpy as np
import time
from tkinter import *
from matplotlib import pyplot as plt
def closeWindow():
exit()
#def newSample():
def nextSample():
ramp_frames = 30
cv2.destroyAllWindows()
cap = cv2.VideoCapture(0)
s, im = cap.read()
cv2.imshow("test11.bmp", im)
cv2.imwrite("test11.bmp", im)
cv2.destroyAllWindows()
cap.release()
img = cv2.imread('test11.bmp', cv2.IMREAD_COLOR)
#img1 = PhotoImage(file="C:\Users\Isaac Erickson\Documents\Python\test.bmp")
plt.gcf().clear()
px1 = im[90,100]
px2 = im[90,150]
px3 = im[90,200]
px4 = im[90,250]
px5 = im[90,300]
px6 = im[90,350]
px7 = im[90,400]
px8 = im[90,550] #max 480, 640
plt.plot(["300","400","450","500","550", "600","650","700"], [px1,px2,px3,px4,px5,px6,px7,px8]) #x and y coordinates
cv2.imshow('image',im)
plt.show()
def reaction():
ramp_frames = 30
cv2.destroyAllWindows()
cap = cv2.VideoCapture(0)
s, im = cap.read()
cv2.imshow("test11.bmp", im)
cv2.imwrite("test11.bmp", im)
cv2.destroyAllWindows()
cap.release()
img = cv2.imread('test11.bmp', cv2.IMREAD_COLOR)
plt.gcf().clear()
px1 = im[90,100]
px2 = im[90,150]
px3 = im[90,200]
px4 = im[90,250]
px5 = im[90,300]
px6 = im[90,350]
px7 = im[90,400]
px8 = im[90,550] #max 480, 640
plt.plot(["1","2","3","4","5", "6","7","8"], [px1,px2,px3,px4,px5,px6,px7,px8]) #x and y coordinates
cv2.imshow('image',im)
plt.show()
ramp_frames = 30
cap = cv2.VideoCapture(0)
s, im = cap.read()
cv2.imshow("test11.bmp", im)
cv2.imwrite("test11.bmp", im)
cv2.destroyAllWindows()
cap.release()
#Analysis of image
root = Tk()
img = cv2.imread('test11.bmp', cv2.IMREAD_COLOR)
px1 = im[90,100]
px2 = im[90,150]
px3 = im[90,200]
px4 = im[90,250]
px5 = im[90,300]
px6 = im[90,350]
px7 = im[90,400]
px8 = im[90,550] #max 480, 640
plt.plot(["300","400","450","500","550", "600","650","700"], [px1,px2,px3,px4,px5,px6,px7,px8]) #x and y coordinates
title = Label(root, text= "VISIBLE COLORS")
p1 = Label(root, text=px1)
p2 = Label(root, text=px2)
p3 = Label(root, text=px3)
p4 = Label(root, text=px4)
p5 = Label(root, text=px5)
p6 = Label(root, text=px6)
p7 = Label(root, text=px7)
p8 = Label(root, text=px8)
title.pack()
p1.pack()
p2.pack()
p3.pack()
p4.pack()
p5.pack()
p6.pack()
p7.pack()
p8.pack()
button1 = Button(root, text="CLOSE", command=closeWindow)
button2 = Button(root, text="Next Sample", command=nextSample)
button3 = Button(root, text="reaction", command=reaction)
button1.pack()
button2.pack()
button3.pack()
print(px1)
print(px2)
print(px3)
print(px4)
print(px5)
print(px6)
print(px7)
print(px8)
cv2.imshow('image',im)
plt.show()
root.mainloop()
cv2.waitKey(0)
cap.release()
#out.release()
cv2.destroyAllWindows()

PyQt4 video player crashes when moving window

I've written a simple PyQt4 GUI that plays an OpenCV VideoCapture. This requires converting frames from numpy arrays to QImages. I'm using OpenCV so that I can detect circles using my findCircles method.
However, when I pass my frames to findCircles, the program crashes when the window is moved. This problem does not occur when I don't search for circles. I don't understand why this is happening, as I'm under the impression that the work is being done on a different thread than the GUI since I call findCircles from the run method of a QThread.
Note that I don't receive a normal error message in the console; Python crashes like such:
Here is the video file I've been using to test my player. I'm running Python 2.7.6 on Windows 8.1.
import sys
import cv2.cv as cv, cv2
from PyQt4.Qt import *
import time
def numpyArrayToQImage(array):
if array != None:
height, width, bytesPerComponent = array.shape
bytesPerLine = bytesPerComponent * width;
cv2.cvtColor(array, cv.CV_BGR2RGB, array)
return QImage(array.data, width, height, bytesPerLine, QImage.Format_RGB888)
return None
def findCircles(frame):
grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blurredFrame = cv2.medianBlur(grayFrame, 3)
circles = cv2.HoughCircles(blurredFrame, cv.CV_HOUGH_GRADIENT, 1, 30, param1=50, param2=30, minRadius=30, maxRadius=35)
if circles is not None:
for i in circles[0]:
cv2.circle(frame, (i[0], i[1]), i[2], (255, 0, 0), 1) # Perimeter
cv2.circle(frame, (i[0], i[1]), 3, (0, 255, 0), -1) # Center
class VideoThread(QThread):
frameProcessed = pyqtSignal(QImage)
def __init__(self, video, videoLabel):
QThread.__init__(self)
self.video = video
self.fps = self.video.get(cv.CV_CAP_PROP_FPS)
self.frameCount = self.video.get(cv.CV_CAP_PROP_FRAME_COUNT)
self.startingSecond = 0
self.videoLabel = videoLabel
def run(self):
clockAtStart = time.clock()
while True:
runtime = self.startingSecond + (time.clock() - clockAtStart)
currentFrame = int(runtime * self.fps)
if currentFrame < self.frameCount - 1:
self.video.set(cv.CV_CAP_PROP_POS_FRAMES, currentFrame)
frame = self.video.read()[1]
findCircles(frame) # Removing this line removes the issue
self.frameProcessed.emit(numpyArrayToQImage(frame))
time.sleep(.02)
else:
break
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.initUI()
#pyqtSlot(QImage)
def updateVideoLabel (self, image):
self.videoLabel.setPixmap(QPixmap.fromImage(image))
self.videoLabel.update()
def initUI(self):
self.setGeometry(300, 300, 500, 375)
self.setMinimumHeight(250)
self.createWidgets()
self.addWidgets()
def startNewVideo(self):
self.video = cv2.VideoCapture(unicode(QFileDialog.getOpenFileName(self, "Open video").toUtf8(), encoding="UTF-8"))
self.videoThread = VideoThread(self.video, self.videoLabel)
self.videoThread.frameProcessed.connect(self.updateVideoLabel)
self.playVideoFrom(0)
def playVideoFrom(self, frame):
self.videoThread.startingSecond = frame / self.videoThread.fps
self.videoThread.start()
def createWidgets(self):
self.populateMenuBar()
self.videoLabel = QLabel()
self.videoLabel.setStyleSheet('background-color : black;');
def populateMenuBar(self):
self.menuBar = self.menuBar()
fileMenu = QMenu('File', self)
openAction = QAction('Open video...', self)
openAction.triggered.connect(self.startNewVideo)
fileMenu.addAction(openAction)
self.menuBar.addMenu(fileMenu)
def addWidgets(self):
mainLayout = QVBoxLayout()
mainLayout.addWidget(self.videoLabel, 1)
centralWidget = QWidget()
self.setCentralWidget(centralWidget)
centralWidget.setLayout(mainLayout)
if __name__ == '__main__':
app = QApplication(sys.argv)
player = MainWindow()
player.show()
sys.exit(app.exec_())
I've tested your program, and it crashes when it finds no circles as indicated in the error message:
Traceback (most recent call last):
File "test_opencv_tkinter.py", line 53, in run
findCircles(frame) # Removing this line removes the issue
File "test_opencv_tkinter.py", line 26, in findCircles
if len(circles) > 0:
TypeError: object of type 'NoneType' has no len()
I've made some changes in the findCircles(frame) function, as follows, and it runs without error, even when I move the window around on the screen.
def findCircles(frame):
grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blurredFrame = cv2.medianBlur(grayFrame, 3)
circles = cv2.HoughCircles(grayFrame,cv.CV_HOUGH_GRADIENT,1,20,
param1=50,param2=30,minRadius=0,maxRadius=0)
if circles == None:
print "no circles found"
return
if len(circles) > 0:
print "found circles ", len(circles[0])
for i in circles[0]:
cv2.circle(frame, (i[0], i[1]), i[2], (255, 0, 0), 1) # Perimeter
cv2.circle(frame, (i[0], i[1]), 3, (0, 255, 0), -1) # Center

Resources