Plot line over pyqtgraph.ImageView() - python-3.x

I am using Pyqt5 to plot some medical images (numpy arrays) in three different widgets. Now, I want to plot a line over the image (displayed using pg.ImageViewer). Have someone already done this?
Thanks!

You can access the viewbox of the pg.ImageView widget using .getView(). From there you can add any items to it that you like using viewbox.AddItem(). Below is a modified version of the ImageView example which plots a line plot on the ImageView.
# -*- coding: utf-8 -*-
"""
This example demonstrates the use of ImageView with 3-color image stacks.
ImageView is a high-level widget for displaying and analyzing 2D and 3D data.
ImageView provides:
1. A zoomable region (ViewBox) for displaying the image
2. A combination histogram and gradient editor (HistogramLUTItem) for
controlling the visual appearance of the image
3. A timeline for selecting the currently displayed frame (for 3D data only).
4. Tools for very basic analysis of image data (see ROI and Norm buttons)
"""
## Add path to library (just for examples; you do not need this)
import initExample
import numpy as np
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
# Interpret image data as row-major instead of col-major
pg.setConfigOptions(imageAxisOrder='row-major')
app = pg.mkQApp("ImageView Example")
## Create window with ImageView widget
win = QtGui.QMainWindow()
win.resize(800,800)
imv = pg.ImageView()
imv_v = imv.getView()
win.setCentralWidget(imv)
win.show()
win.setWindowTitle('pyqtgraph example: ImageView')
## Create random 3D data set with time varying signals
dataRed = np.ones((100, 200, 200)) * np.linspace(90, 150, 100)[:, np.newaxis, np.newaxis]
dataRed += pg.gaussianFilter(np.random.normal(size=(200, 200)), (5, 5)) * 100
dataGrn = np.ones((100, 200, 200)) * np.linspace(90, 180, 100)[:, np.newaxis, np.newaxis]
dataGrn += pg.gaussianFilter(np.random.normal(size=(200, 200)), (5, 5)) * 100
dataBlu = np.ones((100, 200, 200)) * np.linspace(180, 90, 100)[:, np.newaxis, np.newaxis]
dataBlu += pg.gaussianFilter(np.random.normal(size=(200, 200)), (5, 5)) * 100
data = np.concatenate(
(dataRed[:, :, :, np.newaxis], dataGrn[:, :, :, np.newaxis], dataBlu[:, :, :, np.newaxis]), axis=3
)
## Display the data and assign each frame a time value from 1.0 to 3.0
imv.setImage(data, xvals=np.linspace(1., 3., data.shape[0]))
'''ADDED CODE'''
imv_v = imv.getView()
pci = pg.PlotCurveItem(x=[1,50,100,150,200], y=[1,50,100,150,200])
imv_v.addItem(pci)
''''''
## Set a custom color map
colors = [
(0, 0, 0),
(45, 5, 61),
(84, 42, 55),
(150, 87, 60),
(208, 171, 141),
(255, 255, 255)
]
cmap = pg.ColorMap(pos=np.linspace(0.0, 1.0, 6), color=colors)
imv.setColorMap(cmap)
# Start up with an ROI
imv.ui.roiBtn.setChecked(True)
imv.roiClicked()
if __name__ == '__main__':
pg.exec()

Related

AttributeError: 'Model' object has no attribute '_call_spec' (presumably an error in Keras)

So I'm trying out code from this website: https://towardsdatascience.com/real-time-age-gender-and-emotion-prediction-from-webcam-with-keras-and-opencv-bde6220d60a. I'm only interested in the real-time emotion prediction bit, and I use the emotion prediction model provided by the author. Following the setup and cutting out the code I don't need (all of the code was provided in the link), I'm left with this:
import cv2
from PIL import Image
import numpy as np
from mtcnn import MTCNN
import pickle
# load face detector
detector = MTCNN()
# load the model
emotion_model = pickle.load(open('emotion-model-final.pkl', 'rb'))
def rgb2gray(rgb):
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
def detect_face(img):
mt_res = detector.detect_faces(img)
return_res = []
for face in mt_res:
x, y, width, height = face['box']
center = [x+(width/2), y+(height/2)]
max_border = max(width, height)
# center alignment
left = max(int(center[0]-(max_border/2)), 0)
right = max(int(center[0]+(max_border/2)), 0)
top = max(int(center[1]-(max_border/2)), 0)
bottom = max(int(center[1]+(max_border/2)), 0)
# crop the face
center_img_k = img[top:top+max_border,
left:left+max_border, :]
center_img = np.array(Image.fromarray(center_img_k).resize([224, 224]))
# convert to grey scale then predict using the emotion model
grey_img = np.array(Image.fromarray(center_img_k).resize([48, 48]))
emotion_preds = emotion_model.predict(rgb2gray(grey_img).reshape(1, 48, 48, 1))
# output to the cv2
return_res.append([top, right, bottom, left, sex_preds, age_preds, emotion_preds])
return return_res
# Get a reference to webcam
video_capture = cv2.VideoCapture(0)
emotion_dict = {
0: 'Surprise',
1: 'Happy',
2: 'Disgust',
3: 'Anger',
4: 'Sadness',
5: 'Fear',
6: 'Contempt'
}
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Convert the image from BGR color (which OpenCV uses) to RGB color
rgb_frame = frame[:, :, ::-1]
# Find all the faces in the current frame of video
face_locations = detect_face(rgb_frame)
# Display the results
for top, right, bottom, left, emotion_preds in face_locations:
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.putText(frame, 'Emotion: {}({:.3f})'.format(emotion_dict[np.argmax(emotion_preds)], np.max(emotion_preds)), (left, top-40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (36,255,12), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
Running the code doesn't work for me because I get the following error message:
File "C:\Users\<my name>\source\repos\webcamtest\webcamtest\webcamtest.py", line 43, in detect_face
emotion_preds = emotion_model.predict(rgb2gray(grey_img).reshape(1, 48, 48, 1))
File "C:\Users\<my name>\miniconda3\lib\site-packages\keras\utils\traceback_utils.py", line 70, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Users\<my name>\miniconda3\lib\site-packages\keras\engine\training.py", line 3555, in _check_call_args
fullargspec = self._call_spec.full_argspec
AttributeError: 'Model' object has no attribute '_call_spec'
So it seems like the error leads to keras\engine\training.py. I've done nothing to it other than install it. I'm using Keras 2.11.0, tensorflow 2.11.0 and mtcnn 0.1.1 . What could be the problem?

Create line from list of points while ignoring outliers

I have a list of points that almost create a straight line (but they are not perfectly align on that line). I want to create a line that best describes those points.
For example, for points:
points = [(150, 250),(180, 220), (200, 195), (225, 180), (250, 150), (275, 115), (300, 100)]
I want to create line similar to this:
The problem is that sometimes there are points that are very far from that line (outliers). I want to ignore those outliers while creating the line:
How can I create this line?
P.S. this is the code for colab to generate the points:
import numpy as np
import cv2
from google.colab.patches import cv2_imshow
img = np.zeros([400,500,3],dtype=np.uint8)
points = [(150, 250),(180, 225), (200, 200), (225, 100), (250, 150), (275, 115), (300, 100)]
#points = [(150, 250),(180, 220), (200, 195), (225, 180), (250, 150), (275, 115), (300, 100)]
for idx, p in enumerate(points):
img = cv2.circle(img, p, radius=0, color=(0, 0, 255), thickness=10)
text_x, text_y = p
p = round(text_x-20), round(text_y+5)
img = cv2.putText(img=img, text=str(idx), fontFace=cv2.FONT_HERSHEY_SCRIPT_COMPLEX, org=p, fontScale=0.5, color=(0,255,0))
image = cv2.line(img, points[0], points[-1], (255, 0, 255), 1)
cv2_imshow(img)
In my code, I generate the line between first and last element of the list of points, so of course if the last point is outlier, all the line is disrupted:
Thanks for #Christoph Rackwitz's answer, I followed sklearn's doc for RANSAC, and created simple script to calculate the RANSAC (of course that it's need to be polished):
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
"""
Add points:
"""
points = [(150, 250),(175, 225), (200, 200), (225, 175), (250, 150), (275, 115), (300, 150)]
Y = []
X = []
for x,y in points:
Y.append(y)
X.append(x)
Y = np.array(Y)
X = np.array(X)
lr = linear_model.LinearRegression()
lr.fit(X.reshape(-1, 1), Y)
# Robustly fit linear model with RANSAC algorithm
ransac = linear_model.RANSACRegressor()
ransac.fit(X.reshape(-1, 1), Y)
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(X.min(), X.max())[:, np.newaxis]
line_y = lr.predict(line_X)
line_y_ransac = ransac.predict(line_X)
# Compare estimated coefficients
print("Estimated coefficients (true, linear regression, RANSAC):")
print(coef, lr.coef_, ransac.estimator_.coef_)
lw = 2
plt.gca().invert_yaxis() # Mirror points
plt.scatter(
X[inlier_mask], Y[inlier_mask], color="yellowgreen", marker=".", label="Inliers"
)
plt.scatter(
X[outlier_mask], Y[outlier_mask], color="gold", marker=".", label="Outliers"
)
plt.plot(line_X, line_y, color="navy", linewidth=lw, label="Linear regressor")
plt.plot(
line_X,
line_y_ransac,
color="cornflowerblue",
linewidth=lw,
label="RANSAC regressor",
)
plt.legend(loc="lower right")
plt.xlabel("Input")
plt.ylabel("Response")
plt.show()
And I got the following image (which looks great):

Quiver plot with optical flow?

Recently I'm working in cloud motion tracking using images, but in many examples when is used in video implementations shows a quiver plot that moves according the object tracked.
Quiver documentations takes four argumets principally ([X, Y], U, V), when X and Y are the starting points and U and V the directions. In the other hand, optical flow based on this example returnsp1 (the displacements) with a shape (m, n, l) of the image with shape of (200,200). My confusion is in how to order the parameters, because also goodFeaturesToTrack return the same as p1
¿How can I join both components to plot a quiver of the cloud motion?
I found a pretty good solution. I explain all my example here using the Hamburg taxi sequence:
Download the taxi sequence.
$ curl -O ftp://ftp.ira.uka.de/pub/vid-text/image_sequences/taxi/taxi.zip
$ unzip -q taxi.zip
Get all images and pick two random frames
from pathlib import Path
import numpy as np
import cv2 as cv
from PIL import Image
import matplotlib.pyplot as plt
taxis_fnames = list(Path('taxi').iterdir())
taxi1 = Image.open(taxis_fnames[rand_idx])
taxi2 = Image.open(taxis_fnames[rand_idx + 4])
Compute the optical flow
flow = cv.calcOpticalFlowFarneback(np.array(taxi1),
np.array(taxi2),
None, 0.5, 3, 15, 3, 5, 1.2, 0)
Plot the quiver
step = 3
plt.quiver(np.arange(0, flow.shape[1], step), np.arange(flow.shape[0], -1, -step),
flow[::step, ::step, 0], flow[::step, ::step, 1])
The step is to downsample the number of optical flow vectors picked. The x positions goes from 0 to image width, while the y positions are inversed (otherwise the optical flow will be up side down) from image height to 0. In some occasions, you will have to change the step so the height and with are divisible by it.
The resulting image:
Here is a general method for plotting a quiver field easily and accurately.
def plot_quiver(ax, flow, spacing, margin=0, **kwargs):
"""Plots less dense quiver field.
Args:
ax: Matplotlib axis
flow: motion vectors
spacing: space (px) between each arrow in grid
margin: width (px) of enclosing region without arrows
kwargs: quiver kwargs (default: angles="xy", scale_units="xy")
"""
h, w, *_ = flow.shape
nx = int((w - 2 * margin) / spacing)
ny = int((h - 2 * margin) / spacing)
x = np.linspace(margin, w - margin - 1, nx, dtype=np.int64)
y = np.linspace(margin, h - margin - 1, ny, dtype=np.int64)
flow = flow[np.ix_(y, x)]
u = flow[:, :, 0]
v = flow[:, :, 1]
kwargs = {**dict(angles="xy", scale_units="xy"), **kwargs}
ax.quiver(x, y, u, v, **kwargs)
ax.set_ylim(sorted(ax.get_ylim(), reverse=True))
ax.set_aspect("equal")
Example usage:
flow = cv2.calcOpticalFlowFarneback(
frame_1, frame_2, None, 0.5, 3, 15, 3, 5, 1.2, 0
)
fig, ax = plt.subplots()
plot_quiver(ax, flow, spacing=10, scale=1, color="#ff44ff")

How to set white pixels to transparent using OpenCV

My code so far. I want to crop out the white in the image.
import cv2
import numpy as np
image = cv2.imread('myimage.jpg')
image2 = np.ones((255, 255, 4))
for i in range(255):
for j in range(255):
if image[i,j,0] == 255:
image2[i, j, :] = np.append(image[i, j, :], 1)
else:
image2[i, j, :] = np.append(image[i, j, :], 1)
cv2.imwrite('image2.png', image2)
But it just produces an empty plot.
This should be enough:
The background of the website is white, so "right click" the Output and "open image in new tab" and you'll see that it is transparent :)
import cv2
import numpy as np
# read the image
image_bgr = cv2.imread('image_bgr.png')
# get the image dimensions (height, width and channels)
h, w, c = image_bgr.shape
# append Alpha channel -- required for BGRA (Blue, Green, Red, Alpha)
image_bgra = np.concatenate([image_bgr, np.full((h, w, 1), 255, dtype=np.uint8)], axis=-1)
# create a mask where white pixels ([255, 255, 255]) are True
white = np.all(image_bgr == [255, 255, 255], axis=-1)
# change the values of Alpha to 0 for all the white pixels
image_bgra[white, -1] = 0
# save the image
cv2.imwrite('image_bgra.png', image_bgra)
Input:
Output ("right click" >> "open in new tab"):

Multiple Same Color object tracking (Python3 Opencv)

I am seriously struggling here. I'm using open cv2 and python3. tracking multiple objects of the same color this question is the exact same one I'm asking. But the pages are out of date and the links don't work anymore. I can't find anything else online about it. I can track multiple colors (red object, green object, a blue object, etc) However I cannot for the life of me figure out how to track two red objects.
# import the necessary packages
from collections import deque
import numpy as np
import argparse
import imutils
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "green"
# ball in the HSV color space, then initialize the
# list of tracked points
greenLower = (29, 86, 6)
greenUpper = (64, 255, 255)
pts = deque(maxlen=args["buffer"])
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
camera = cv2.VideoCapture(0)
# otherwise, grab a reference to the video file
else:
camera = cv2.VideoCapture(args["video"])
# keep looping
while True:
# grab the current frame
(grabbed, frame) = camera.read()
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if args.get("video") and not grabbed:
break
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
# blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
I figured that in the line above this one that reads "c = max(cnts, key=cv2.contourArea)" I could simply find the second largest circle and use that one, but once again. I couldn't find anything online about how to do this.
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# update the points queue
pts.appendleft(center)

Resources