extracting a quadrilateral image to a rectangle - graphics

BOUNTY UPDATE
Following Denis's link, this is how to use the threeblindmiceandamonkey code:
// the destination rect is our 'in' quad
int dw = 300, dh = 250;
double in[4][4] = {{0,0},{dw,0},{dw,dh},{0,dh}};
// the quad in the source image is our 'out'
double out[4][5] = {{171,72},{331,93},{333,188},{177,210}};
double homo[3][6];
const int ret = mapQuadToQuad(in,out,homo);
// homo can be used for calculating the x,y of any destination point
// in the source, e.g.
for(int i=0; i<4; i++) {
double p1[3] = {out[i][0],out[i][7],1};
double p2[3];
transformMatrix(p1,p2,homo);
p2[0] /= p2[2]; // x
p2[1] /= p2[2]; // y
printf("\t%2.2f\t%2.2f\n",p2[0],p2[1]);
}
This provides a transform for converting points in destination to the source - you can of course do it the other way around, but it's tidy to be able to do this for the mixing:
for(int y=0; y<dh; y++) {
for(int x=0; x<dw; x++) {
// calc the four corners in source for this
// destination pixel, and mix
For the mixing, I'm using super-sampling with random points; it works very well, even when there is a big disparity in the source and destination area
BACKGROUND QUESTION
In the image at the top, the sign on the side of the van is not face-on to the camera. I want to calculate, as best I can with the pixels I have, what it'd look like face on.
I know the corner coordinates of the quad in the image, and the size of the destination rectangle.
I imagine that this is some kind of loop through the x and y axis doing a Bresenham's line on both dimensions at once with some kind of mixing as pixels in the source and destination images overlap - some sub-pixel mixing of some sort?
What approaches are there, and how do you mix the pixels?
Is there a standard approach for this?

What you want is called planar rectification, and it's not all that simple, I'm afraid. What you need to do is recover the homography that maps this oblique view of the van side onto the front-facing view. Photoshop / etc. have tools to do this for you given some control points; if you want to implement it for yourself you'll have to start delving into computer vision.
Edit - OK, here you go: a Python script to do the warping, using the OpenCV library which has convenient functions to calculate the homography and warp the image for you:
import cv
def warpImage(image, corners, target):
mat = cv.CreateMat(3, 3, cv.CV_32F)
cv.GetPerspectiveTransform(corners, target, mat)
out = cv.CreateMat(height, width, cv.CV_8UC3)
cv.WarpPerspective(image, out, mat, cv.CV_INTER_CUBIC)
return out
if __name__ == '__main__':
width, height = 400, 250
corners = [(171,72),(331,93),(333,188),(177,210)]
target = [(0,0),(width,0),(width,height),(0,height)]
image = cv.LoadImageM('fries.jpg')
out = warpImage(image, corners, target)
cv.SaveImage('fries_warped.jpg', out)
And the output:
OpenCV also has C and C++ bindings, or you can use EmguCV for a .NET wrapper; the API is fairly consistent across all languages so you can replicate this in whichever language suits your fancy.

Look up "quad to quad" transform, e.g.
threeblindmiceandamonkey.
A 3x3 transform on 2d homogeneous coordinates can transform any 4 points (a quad)
to any other quad;
conversely, any fromquad and toquad, such as the corners of your truck and a target rectangle,
give a 3 x 3 transform.
Qt has quadToQuad
and can transform pixmaps with it, but I guess you don't have Qt ?
Added 10Jun:
from labs.trolltech.com/page/Graphics/Examples
there's a nice demo which quad-to-quads a pixmap as you move the corners:
Added 11Jun: #Will, here's translate.h in Python (which you know a bit ?
""" ...""" are multiline comments.)
perstrans() is the key; hope that makes sense, if not ask.
Bytheway, you could map the pixels one by one, mapQuadToQuad( target rect, orig quad ),
but without pixel interpolation it'll look terrible; OpenCV does it all.
#!/usr/bin/env python
""" square <-> quad maps
from http://threeblindmiceandamonkey.com/?p=16 matrix.h
"""
from __future__ import division
import numpy as np
__date__ = "2010-06-11 jun denis"
def det2(a, b, c, d):
return a*d - b*c
def mapSquareToQuad( quad ): # [4][2]
SQ = np.zeros((3,3))
px = quad[0,0] - quad[1,0] + quad[2,0] - quad[3,0]
py = quad[0,1] - quad[1,1] + quad[2,1] - quad[3,1]
if abs(px) < 1e-10 and abs(py) < 1e-10:
SQ[0,0] = quad[1,0] - quad[0,0]
SQ[1,0] = quad[2,0] - quad[1,0]
SQ[2,0] = quad[0,0]
SQ[0,1] = quad[1,1] - quad[0,1]
SQ[1,1] = quad[2,1] - quad[1,1]
SQ[2,1] = quad[0,1]
SQ[0,2] = 0.
SQ[1,2] = 0.
SQ[2,2] = 1.
return SQ
else:
dx1 = quad[1,0] - quad[2,0]
dx2 = quad[3,0] - quad[2,0]
dy1 = quad[1,1] - quad[2,1]
dy2 = quad[3,1] - quad[2,1]
det = det2(dx1,dx2, dy1,dy2)
if det == 0.:
return None
SQ[0,2] = det2(px,dx2, py,dy2) / det
SQ[1,2] = det2(dx1,px, dy1,py) / det
SQ[2,2] = 1.
SQ[0,0] = quad[1,0] - quad[0,0] + SQ[0,2]*quad[1,0]
SQ[1,0] = quad[3,0] - quad[0,0] + SQ[1,2]*quad[3,0]
SQ[2,0] = quad[0,0]
SQ[0,1] = quad[1,1] - quad[0,1] + SQ[0,2]*quad[1,1]
SQ[1,1] = quad[3,1] - quad[0,1] + SQ[1,2]*quad[3,1]
SQ[2,1] = quad[0,1]
return SQ
#...............................................................................
def mapQuadToSquare( quad ):
return np.linalg.inv( mapSquareToQuad( quad ))
def mapQuadToQuad( a, b ):
return np.dot( mapQuadToSquare(a), mapSquareToQuad(b) )
def perstrans( X, t ):
""" perspective transform X Nx2, t 3x3:
[x0 y0 1] t = [a0 b0 w0] -> [a0/w0 b0/w0]
[x1 y1 1] t = [a1 b1 w1] -> [a1/w1 b1/w1]
...
"""
x1 = np.vstack(( X.T, np.ones(len(X)) ))
y = np.dot( t.T, x1 )
return (y[:-1] / y[-1]) .T
#...............................................................................
if __name__ == "__main__":
np.set_printoptions( 2, threshold=100, suppress=True ) # .2f
sq = np.array([[0,0], [1,0], [1,1], [0,1]])
quad = np.array([[171, 72], [331, 93], [333, 188], [177, 210]])
print "quad:", quad
print "square to quad:", perstrans( sq, mapSquareToQuad(quad) )
print "quad to square:", perstrans( quad, mapQuadToSquare(quad) )
dw, dh = 300, 250
rect = np.array([[0, 0], [dw, 0], [dw, dh], [0, dh]])
quadquad = mapQuadToQuad( quad, rect )
print "quad to quad transform:", quadquad
print "quad to rect:", perstrans( quad, quadquad )
"""
quad: [[171 72]
[331 93]
[333 188]
[177 210]]
square to quad: [[ 171. 72.]
[ 331. 93.]
[ 333. 188.]
[ 177. 210.]]
quad to square: [[-0. 0.]
[ 1. 0.]
[ 1. 1.]
[ 0. 1.]]
quad to quad transform: [[ 1.29 -0.23 -0. ]
[ -0.06 1.79 -0. ]
[-217.24 -88.54 1.34]]
quad to rect: [[ 0. 0.]
[ 300. 0.]
[ 300. 250.]
[ 0. 250.]]
"""

I think what you need is affine transformation which can be accomplished using matrix math.

And in modern times in python with cv2.
import cv2
import numpy as np
source_image = cv2.imread('french fries in Europe.jpeg')
source_corners = np.array([(171, 72), (331, 93), (333, 188), (177, 210)])
width, height = 400, 250
target_corners = np.array([(0, 0), (width, 0), (width, height), (0, height)])
# Get matrix H that maps source_corners to target_corners
H, _ = cv2.findHomography(source_corners, target_corners, params=None)
# Apply matrix H to source image.
transformed_image = cv2.warpPerspective(
source_image, H, (source_image.shape[1], source_image.shape[0]))
cv2.imwrite('tranformed_image.jpg', transformed_image)

Related

Mapping RGB data to values in legend

This is a follow-up to my previous question here
I've been trying to convert the color data in a heatmap to RGB values.
source image
In the below image, to the left is a subplot present in panel D of the source image. This has 6 x 6 cells (6 rows and 6 columns). On the right, we see the binarized image, with white color highlighted in the cell that is clicked after running the code below. The input for running the code is the below image. The ouput is(mean = [ 27.72 26.83 144.17])is the mean of BGR color in the cell that is highlighted in white on the right image below.
A really nice solution that was provided as an answer to my previous question is the following (ref)
import cv2
import numpy as np
# print pixel value on click
def mouse_callback(event, x, y, flags, params):
if event == cv2.EVENT_LBUTTONDOWN:
# get specified color
row = y
column = x
color = image[row, column]
print('color = ', color)
# calculate range
thr = 20 # ± color range
up_thr = color + thr
up_thr[up_thr < color] = 255
down_thr = color - thr
down_thr[down_thr > color] = 0
# find points in range
img_thr = cv2.inRange(image, down_thr, up_thr) # accepted range
height, width, _ = image.shape
left_bound = x - (x % round(width/6))
right_bound = left_bound + round(width/6)
up_bound = y - (y % round(height/6))
down_bound = up_bound + round(height/6)
img_rect = np.zeros((height, width), np.uint8) # bounded by rectangle
cv2.rectangle(img_rect, (left_bound, up_bound), (right_bound, down_bound), (255,255,255), -1)
img_thr = cv2.bitwise_and(img_thr, img_rect)
# get points around specified point
img_spec = np.zeros((height, width), np.uint8) # specified mask
last_img_spec = np.copy(img_spec)
img_spec[row, column] = 255
kernel = np.ones((3,3), np.uint8) # dilation structuring element
while cv2.bitwise_xor(img_spec, last_img_spec).any():
last_img_spec = np.copy(img_spec)
img_spec = cv2.dilate(img_spec, kernel)
img_spec = cv2.bitwise_and(img_spec, img_thr)
cv2.imshow('mask', img_spec)
cv2.waitKey(10)
avg = cv2.mean(image, img_spec)[:3]
mean.append(np.around(np.array(avg), 2))
print('mean = ', np.around(np.array(avg), 2))
# print(mean) # appends data to variable mean
if __name__ == '__main__':
mean = [] #np.zeros((6, 6))
# create window and callback
winname = 'img'
cv2.namedWindow(winname)
cv2.setMouseCallback(winname, mouse_callback)
# read & display image
image = cv2.imread('ip2.png', 1)
#image = image[3:62, 2:118] # crop the image to 6x6 cells
#---- resize image--------------------------------------------------
# appended this to the original code
print('Original Dimensions : ', image.shape)
scale_percent = 220 # percent of original size
width = int(image.shape[1] * scale_percent / 100)
height = int(image.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
image = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)
# ----------------------------------------------------------------------
cv2.imshow(winname, image)
cv2.waitKey() # press any key to exit
cv2.destroyAllWindows()
What do I want to do next?
The mean of the RGB values thus obtained has to be mapped to the values in the following legend provided in the source image,
I would like to ask for suggestions on how to map the RGB data to the values in the legend.
Note: In my previous post it has been suggested that one could
fit the RGB values into an equation which gives continuous results.
Any suggestions in this direction will also be helpful.
EDIT:
Answering the comment below
I did the following to measure the RGB values of legend
Input image:
This image has 8 cells in columns width and 1 cell in rows height
Changed these lines of code:
left_bound = x - (x % round(width/8)) # 6 replaced with 8
right_bound = left_bound + round(width/8) # 6 replaced with 8
up_bound = y - (y % round(height/1)) # 6 replaced with 1
down_bound = up_bound + round(height/1) # 6 replaced with 1
Mean obtained for each cell/ each color in legend from left to right:
mean = [ 82.15 174.95 33.66]
mean = [45.55 87.01 17.51]
mean = [8.88 8.61 5.97]
mean = [16.79 17.96 74.46]
mean = [ 35.59 30.53 167.14]
mean = [ 37.9 32.39 233.74]
mean = [120.29 118. 240.34]
mean = [238.33 239.56 248.04]
You can try to apply piece wise approach, make pair wise transitions between colors:
c[i->i+1](t)=t*(R[i+1],G[i+1],B[i+1])+(1-t)*(R[i],G[i],B[i])
Do the same for these values:
val[i->i+1](t)=t*val[i+1]+(1-t)*val[i]
Where i - index of color in legend scale, t - parameter in [0:1] range.
So, you have continuous mapping of 2 values, and just need to find color parameters i and t closest to sample and find value from mapping.
Update:
To find the color parameters you can think about every pair of neighbour legend colors as a pair of 3d points, and your queried color as external 3d point. Now you just meed to find a length of perpendicular from the external point to a line, then, iterating over legend color pairs, find the shortest perpendicular (now you have i).
Then find intersection point of the perpendicular and the line. This point will be located at the distance A from line start and if line length is L then parameter value t=A/L.
Update2:
Simple brutforce solution to illustrate piece wise approach:
#include "opencv2/opencv.hpp"
#include <string>
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc, char* argv[])
{
Mat Image=cv::Mat::zeros(100,250,CV_32FC3);
std::vector<cv::Scalar> Legend;
Legend.push_back(cv::Scalar(82.15,174.95,33.66));
Legend.push_back(cv::Scalar(45.55, 87.01, 17.51));
Legend.push_back(cv::Scalar(8.88, 8.61, 5.97));
Legend.push_back(cv::Scalar(16.79, 17.96, 74.46));
Legend.push_back(cv::Scalar(35.59, 30.53, 167.14));
Legend.push_back(cv::Scalar(37.9, 32.39, 233.74));
Legend.push_back(cv::Scalar(120.29, 118., 240.34));
Legend.push_back(cv::Scalar(238.33, 239.56, 248.04));
std::vector<float> Values;
Values.push_back(-4);
Values.push_back(-2);
Values.push_back(0);
Values.push_back(2);
Values.push_back(4);
Values.push_back(8);
Values.push_back(16);
Values.push_back(32);
int w = 30;
int h = 10;
for (int i = 0; i < Legend.size(); ++i)
{
cv::rectangle(Image, Rect(i * w, 0, w, h), Legend[i]/255, -1);
}
std::vector<cv::Scalar> Smooth_Legend;
std::vector<float> Smooth_Values;
for (int i = 0; i < Legend.size()-1; ++i)
{
cv::Scalar c1 = Legend[i];
cv::Scalar c2 = Legend[i + 1];
float v1 = Values[i];
float v2 = Values[i+1];
for (int j = 0; j < w; ++j)
{
float t = (float)j / (float)w;
Scalar c = c2 * t + c1 * (1 - t);
float v = v2 * t + v1 * (1 - t);
float x = i * w + j;
line(Image, Point(x, h), Point(x, h + h), c/255, 1);
Smooth_Values.push_back(v);
Smooth_Legend.push_back(c);
}
}
Scalar qp = cv::Scalar(5, 0, 200);
float d_min = FLT_MAX;
int ind = -1;
for (int i = 0; i < Smooth_Legend.size(); ++i)
{
float d = cv::norm(qp- Smooth_Legend[i]);
if (d < d_min)
{
ind = i;
d_min = d;
}
}
std::cout << Smooth_Values[ind] << std::endl;
line(Image, Point(ind, 3 * h), Point(ind, 4 * h), Scalar::all(255), 2);
circle(Image, Point(ind, 4 * h), 3, qp/255,-1);
putText(Image, std::to_string(Smooth_Values[ind]), Point(ind, 70), FONT_HERSHEY_DUPLEX, 1, Scalar(0, 0.5, 0.5), 0.002);
cv::imshow("Legend", Image);
cv::imwrite("result.png", Image*255);
cv::waitKey();
}
The result:
Python:
import cv2
import numpy as np
height=100
width=250
Image = np.zeros((height, width,3), np.float)
legend = np.array([ (82.15,174.95,33.66),
(45.55,87.01,17.51),
(8.88,8.61,5.97),
(16.79,17.96,74.46),
( 35.59,0.53,167.14),
( 37.9,32.39,233.74),
(120.29,118.,240.34),
(238.33,239.56,248.04)], np.float)
values = np.array([-4,-2,0,2,4,8,16,32], np.float)
# width of cell, also defines number
# of one segment transituin subdivisions.
# Larger values will give more accuracy, but will woek slower.
w = 30
# Only fo displaying purpose. Height of bars in result image.
h = 10
# Plot legend cells ( to check correcrness only )
for i in range(len(legend)):
col=legend[i]
cv2.rectangle(Image, (i * w, 0, w, h), col/255, -1)
# Start form smoorhed scales for color and according values
Smooth_Legend=[]
Smooth_Values=[]
for i in range(len(legend)-1): # iterate known knots
c1 = legend[i] # start color point
c2 = legend[i + 1] # end color point
v1 = values[i] # start value
v2 = values[i+1] # emd va;ie
for j in range(w): # slide inside [start:end] interval.
t = float(j) / float(w) # map it to [0:1] interval
c = c2 * t + c1 * (1 - t) # transition between c1 and c2
v = v2 * t + v1 * (1 - t) # transition between v1 and v2
x = i * w + j # global scale coordinate (for drawing)
cv2.line(Image, (x, h), (x, h + h), c/255, 1) # draw one tick of smoothed scale
Smooth_Values.append(v) # append smoothed values for next step
Smooth_Legend.append(c) # append smoothed color for next step
# queried color
qp = np.array([5, 0, 200])
# initial value for minimal distance set to large value
d_min = 1e7
# index for clolor search
ind = -1
# search for minimal distance from queried color to smoothed scale color
for i in range(len(Smooth_Legend)):
# distance
d = cv2.norm(qp-Smooth_Legend[i])
if (d < d_min):
ind = i
d_min = d
# ind contains index of the closest color in smoothed scale
# and now we can extract according value from smoothed values scale
print(Smooth_Values[ind]) # value mapped to queried color.
# plot pointer (to check ourself)
cv2.line(Image, (ind, 3 * h), (ind, 4 * h), (255,255,255), 2);
cv2.circle(Image, (ind, 4 * h), 3, qp/255,-1);
cv2.putText(Image, str(Smooth_Values[ind]), (ind, 70), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0.5, 0.5), 1);
# show window
cv2.imshow("Legend", Image)
# save to file
cv2.imwrite("result.png", Image*255)
cv2.waitKey()

Magnetic dipole in python

I looked at this code:
import numpy as np
from matplotlib import pyplot as plt
def dipole(m, r, r0):
"""
Calculation of field B in point r. B is created by a dipole moment m located in r0.
"""
# R = r - r0 - subtraction of elements of vectors r and r0, transposition of array
R = np.subtract(np.transpose(r), r0).T
# Spatial components of r are the outermost axis
norm_R = np.sqrt(np.einsum("i...,i...", R, R)) # einsum - Einsteinova sumace
# Dot product of R and m
m_dot_R = np.tensordot(m, R, axes=1)
# Computation of B
B = 3 * m_dot_R * R / norm_R**5 - np.tensordot(m, 1 / norm_R**3, axes=0)
B *= 1e-7 # abbreviation for B = B * 1e-7, multiplication B of 1e-7, permeability of vacuum: 4\pi * 10^(-7)
# The result is the magnetic field B
return B
X = np.linspace(-1, 1)
Y = np.linspace(-1, 1)
Bx, By = dipole(m=[0, 1], r=np.meshgrid(X, Y), r0=[-0.2,0.8])
plt.figure(figsize=(8, 8))
plt.streamplot(X, Y, Bx, By)
plt.margins(0, 0)
plt.show()
It shows the following figure:
Is it possible to get coordinates of one line of force? I don't understand how it is plotted.
The streamplot returns a container object 'StreamplotSet' with two parts:
lines: a LineCollection of the streamlines
arrows: a PatchCollection containing FancyArrowPatch objects (these are the triangular arrows)
c.lines.get_paths() gives all the segments. Iterating through these segments, their vertices can be examined. When a segment starts where the previous ended, both belong to the same curve. Note that each segment is a short straight line; many segments are used together to form a streamline curve.
The code below demonstrates iterating through the segments. To show what's happening, each segment is converted to an array of 2D points suitable for plt.plot. Default, plt.plot colors each curve with a new color (repeating every 10). The dots show where each of the short straight segments are located.
To find one particular curve, you could hover with the mouse over the starting point, and note the x coordinate of that point. And then test for that coordinate in the code. As an example, the curve that starts near x=0.48 is drawn in a special way.
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import patches
def dipole(m, r, r0):
R = np.subtract(np.transpose(r), r0).T
norm_R = np.sqrt(np.einsum("i...,i...", R, R))
m_dot_R = np.tensordot(m, R, axes=1)
B = 3 * m_dot_R * R / norm_R**5 - np.tensordot(m, 1 / norm_R**3, axes=0)
B *= 1e-7
return B
X = np.linspace(-1, 1)
Y = np.linspace(-1, 1)
Bx, By = dipole(m=[0, 1], r=np.meshgrid(X, Y), r0=[-0.2,0.8])
plt.figure(figsize=(8, 8))
c = plt.streamplot(X, Y, Bx, By)
c.lines.set_visible(False)
paths = c.lines.get_paths()
prev_end = None
start_indices = []
for index, segment in enumerate(paths):
if not np.array_equal(prev_end, segment.vertices[0]): # new segment
start_indices.append(index)
prev_end = segment.vertices[-1]
for i0, i1 in zip(start_indices, start_indices[1:] + [len(paths)]):
# get all the points of the curve that starts at index i0
curve = np.array([paths[i].vertices[0] for i in range(i0, i1)] + [paths[i1 - 1].vertices[-1]])
special_x_coord = 0.48
for i0, i1 in zip(start_indices, start_indices[1:] + [len(paths)]):
# get all the points of the curve that starts at index i0
curve = np.array([paths[i].vertices[0] for i in range(i0, i1)] + [paths[i1 - 1].vertices[-1]])
if abs(curve[0,0] - special_x_coord) < 0.01: # draw one curve in a special way
plt.plot(curve[:, 0], curve[:, 1], '-', lw=10, alpha=0.3)
else:
plt.plot(curve[:, 0], curve[:, 1], '.', ls='-')
plt.margins(0, 0)
plt.show()

Rectangle/Rectangle Collision Detection

I am trying to solve an issue when two rectangles intersect/overlap each other. when this happens, i want to know if intersection is True or False. I found a solution, however it is written in C or C++. I want to write these code in Python.
Here is the source: http://www.jeffreythompson.org/collision-detection/rect-rect.php
This is literally the first line of python code I've ever written (I do know C++ however)
def rectRect(r1x, r1y, r1w, r1h, r2x, r2y, r2w, r2h):
# are the sides of one rectangle touching the other?
return r1x + r1w >= r2x and \ # r1 right edge past r2 left
r1x <= r2x + r2w and \ # r1 left edge past r2 right
r1y + r1h >= r2y and \ # r1 top edge past r2 bottom
r1y <= r2y + r2h # r1 bottom edge past r2 top
IMHO rectRect is a really bad name for the function, I kept it from the linked code however.
Following is simple class that can perform both rectangle-rectangle intersection as well as point to rectangle intersection. The difference between earlier solution is that following snippet allows even detection of rotated rectangles.
import numpy as np
import matplotlib.pyplot as plt
class Rectangle:
def __init__(self, center: np.ndarray, dims: np.ndarray, angle: float):
self.corners = self.get_rect_points(center, dims, angle)
self.area = dims[0] * dims[1]
#staticmethod
def get_rect_points(center: np.ndarray, dims: np.ndarray, angle: float):
"""
returns four corners of the rectangle.
bottom left is the first conrner, from there it goes
counter clockwise.
"""
center = np.asarray(center)
length, breadth = dims
angle = np.deg2rad(angle)
corners = np.array([[-length/2, -breadth/2],
[length/2, -breadth/2],
[length/2, breadth/2],
[-length/2, breadth/2]])
rot = np.array([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]])
corners = rot.dot(corners.T) + center[:, None]
return corners.T
def is_point_in_collision(self, p: np.ndarray):
"""
check if a point is in collision with the rectangle.
"""
def area_triangle(a, b, c):
return abs((b[0] * a[1] - a[0] * b[1]) + (c[0] * b[1] - b[0] * c[1]) + (a[0] * c[1] - c[0] * a[1])) / 2
area = 0
area += area_triangle(self.corners[0], p, self.corners[3])
area += area_triangle(self.corners[3], p, self.corners[2])
area += area_triangle(self.corners[2], p, self.corners[1])
area += area_triangle(self.corners[1], p, self.corners[0])
return area > self.area
def is_intersect(self, rect_2: Rectangle):
"""
check if any of the four corners of the
rectangle is in collision
"""
if not np.all([self.is_point_in_collision(c) for c in rect_2.corners]):
return True
return False
def plot_rect(p1, p2, p3, p4, color='r'):
ax.plot([p1[0], p2[0]], [p1[1], p2[1]], color)
ax.plot([p2[0], p3[0]], [p2[1], p3[1]], color)
ax.plot([p3[0], p4[0]], [p3[1], p4[1]], color)
ax.plot([p4[0], p1[0]], [p4[1], p1[1]], color)
mid_point = 0.5 * (p1 + p3)
plt.scatter(mid_point[0], mid_point[1], marker='*')
plt.xlim([-1, 1])
plt.ylim([-1, 1])
Following are two samples:
Sample 1:
ax = plt.subplot(111)
st = Rectangle((0.067, 0.476),(0.61, 0.41), 90)
gripper = Rectangle((-0.367, 0.476),(0.21,0.16), 45)
plot_rect(*st.corners)
plot_rect(*gripper.corners)
plt.show()
print(f"gripper and rectangle intersect: {st.is_intersect(gripper)}")
Sample 2:
ax = plt.subplot(111)
st = Rectangle((0.067, 0.476),(0.61, 0.41), 90)
gripper = Rectangle((-0.167, 0.476),(0.21,0.16), 45)
plot_rect(*st.corners)
plot_rect(*gripper.corners)
plt.show()
print(f"gripper and rectangle intersect: {st.is_intersect(gripper)}")

Text Recognize using tesseract

Hello I'm trying to recognize text from Image using Tesseract but unable to get result.
I'm using EAST technique to detect text. I've one more question how can I extend padding of the box. cv2.putText does not work in this case.
original code for text detection: https://github.com/opencv/opencv/blob/master/samples/dnn/text_detection.cpp
import cv2
import numpy as np
import argparse
import time
import math
import matplotlib.pyplot as plt
import skimage.io as io
import os
from imutils.object_detection import non_max_suppression
import pytesseract
print(np.__version__)
def decode_predictions(scores, geometry):
**# grab the number of rows and columns from the scores volume, then
# initialize our set of bounding box rectangles and corresponding
# confidence scores**
(numRows, numCols) = scores.shape[2:4]
boxes = []
confidences = []
**# loop over the number of rows**
for y in range(0, numRows):
**# extract the scores (probabilities), followed by the geometrical
# data used to derive potential bounding box coordinates that
# surround text**
scoresData = scores[0, 0, y]
xData0 = geometry[0, 0, y]
xData1 = geometry[0, 1, y]
xData2 = geometry[0, 2, y]
xData3 = geometry[0, 3, y]
anglesData = geometry[0, 4, y]
**# loop over the number of columns**
for x in range(0, numCols):
**# if our score does not have sufficient probability, ignore it**
if scoresData[x] < args["min_confidence"]:
continue
**# compute the offset factor as our resulting feature maps will
# be 4x smaller than the input image**
(offsetX, offsetY) = (x * 4.0, y * 4.0)
**# extract the rotation angle for the prediction and then
# compute the sin and cosine**
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)
**# use the geometry volume to derive the width and height of
# the bounding box**
h = xData0[x] + xData2[x]
w = xData1[x] + xData3[x]
**# compute the rotated rect for
# the text prediction bounding box**
offset = (offsetX + (cos * xData1[x]) + (sin * xData2[x]), offsetY - (sin * xData1[x]) + (cos * xData2[x]))
p1 = (-sin * h + offset[0], -cos * h + offset[1])
p3 = (-cos * w + offset[0], sin * w + offset[1])
center = (0.5*(p1[0]+p3[0]), 0.5*(p1[1]+p3[1]))
**# add the bounding box coordinates and probability score to
# our respective lists**
boxes.append((center, (w,h), -angle * 180.0 / math.pi))
confidences.append(float(scoresData[x]))
return (boxes, confidences)
args = {
"image":"C:\\Users\\ckunwar\\Test_Images\\licence_plate1\\52.jpg",
"east": "frozen_east_text_detection.pb",
"min_confidence":0.25,
"nms_thresh": 0.24,
"width":480,
"height":320,
"padding":0.0
}
**# load the input image and grab the image dimensions**
image = cv2.imread(args["image"])
orig = image.copy()
(H, W) = image.shape[:2]
#print(H,W)
**# set the new width and height and then determine the ratio in change
# for both the width and height**
(newW, newH) = (args["width"], args["height"])
rW = W / float(newW)
rH = H / float(newH)
**# resize the image and grab the new image dimensions**
image = cv2.resize(image, (newW, newH))
(H, W) = image.shape[:2]
**# define the two output layer names for the EAST detector model that
# we are interested -- the first is the output probabilities and the
# second can be used to derive the bounding box coordinates of text**
layerNames = ["feature_fusion/Conv_7/Sigmoid","feature_fusion/concat_3"]
**# load the pre-trained EAST text detector**
print("[INFO] loading EAST text detector...")
net = cv2.dnn.readNet(args["east"])
**# construct a blob from the image and then perform a forward pass of
# the model to obtain the two output layer sets**
blob = cv2.dnn.blobFromImage(image, 1.0, (W, H),(123.68, 116.78, 103.94), swapRB=True, crop=False)
start = time.time()
net.setInput(blob)
(scores, geometry) = net.forward(layerNames)
end = time.time()
**# show timing information on text prediction**
print("[INFO] text detection took {:.6f} seconds".format(end - start))
(boxes, confidences) = decode_predictions(scores, geometry)
**# apply non-maxima suppression to suppress weak, overlapping bounding boxes**
indices = cv2.dnn.NMSBoxesRotated(boxes, confidences, args["min_confidence"], args["nms_thresh"])
results = []
**# loop over the bounding boxes**
for i in indices:
**# get 4 corners of the rotated rect**
vertices = cv2.boxPoints(boxes[i[0]])
**# scale the bounding box coordinates based on the respective ratios**
for j in [0,1,2,3]:
vertices[j][0] *= rW
vertices[j][1] *= rH
**# draw the bounding box on the image**
for j in [0,1,2,3]:
p1 = (vertices[j][0], vertices[j][1])
p2 = (vertices[(j + 1) % 4][0], vertices[(j + 1) % 4][1])
config = ("-l eng --oem 3 --psm 11")
text = pytesseract.image_to_string(orig,config=config)
results.append(((p1,p2), text))
results = sorted(results, key=lambda r:r[0][1])
output = orig.copy()
for ((p1,p2), text) in results:
print("OCR TEXT")
print("========")
print("{}\n".format(text))
text = "".join([c if ord(c) < 128 else "" for c in text]).strip()
cv2.line(output, p1, p2, (0, 255, 0), 2)
#cv2.rectangle(output, p1, p2,(0, 255, 0), 2)
cv2.putText(output, text,cv2.FONT_HERSHEY_TRIPLEX, 0.8, (0, 0, 255), 2)
**# show the output image**
#orig = cv2.cvtColor(orig, cv2.COLOR_BGR2RGB)
cv2.imshow("Text Detection", output)
cv2.waitKey(0)

Template Matching: efficient way to create mask for minMaxLoc?

Template matching in OpenCV is great. And you can pass a mask to cv2.minMaxLoc so that you only search (sort of) in part of the image for the template you want. You can also use a mask at the matchTemplate operation, but this only masks the template.
I want to find a template and I want to be assured that this template is within some other region of my image.
Calculating the mask for minMaxLoc seems kind of heavy. That is, calculating an accurate mask feels heavy. If you calculate a mask the easy way, it ignores the size of the template.
Examples are in order. My input images are show below. They're a bit contrived. I want to find the candy bar, but only if it's completely inside the white circle of the clock face.
clock1
clock2
template
In clock1, the candy bar is inside the circular clock face and it's a "PASS". But in clock2, the candy bar is only partially inside the face and I want it to be a "FAIL". Here's a code sample for doing it the easy way. I use cv.HoughCircles to find the clock face.
import numpy as np
import cv2
img = cv2.imread('clock1.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
template = cv2.imread('template.png')
t_h, t_w = template.shape[0:2] # template height and width
# find circle in gray image using Hough transform
circles = cv2.HoughCircles(gray, method = cv2.HOUGH_GRADIENT, dp = 1,
minDist = 150, param1 = 50, param2 = 70,
minRadius = 131, maxRadius = 200)
i = circles[0,0]
x0 = i[0]
y0 = i[1]
r = i[2]
# display circle on color image
cv2.circle(img,(x0, y0), r,(0,255,0),2)
# do the template match
result = cv2.matchTemplate(img, template, cv2.TM_CCOEFF_NORMED)
# finally, here is the part that gets tricky. we want to find highest
# rated match inside circle and we'd like to use minMaxLoc
# make mask by drawing circle on zero array
mask = np.zeros(result.shape, dtype = np.uint8) # minMaxLoc will throw
# error w/o np.uint8
cv2.circle(mask, (x0, y0), r, color = 1, thickness = -1)
# call minMaxLoc
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result, mask = mask)
# draw found rectangle on img
if max_val > 0.4: # use 0.4 as threshold for finding candy bar
cv2.rectangle(img, max_loc, (max_loc[0]+t_w, max_loc[1]+t_h), (0,255,0), 4)
cv2.imwrite('output.jpg', img)
output using clock1
output using clock2
finds candy bar even
though part of it is outside circle
So to properly make a mask, I use a bunch of NumPy operations. I make four separate masks (one for each corner of the template bounding box) and then AND them together. I'm not aware of any convenience functions in OpenCV that would do the mask for me. I'm a little nervous that all of the array operations will be expensive. Is there a better way to do this?
h, w = result.shape[0:2]
# make arrays that hold x,y coords
grid = np.indices((h, w))
x = grid[1]
y = grid[0]
top_left_mask = np.hypot(x - x0, y - y0) - r < 0
top_right_mask = np.hypot(x + t_w - x0, y - y0) - r < 0
bot_left_mask = np.hypot(x - x0, y + t_h - y0) - r < 0
bot_right_mask = np.hypot(x + t_w - x0, y + t_h - y0) - r < 0
mask = np.logical_and.reduce((top_left_mask, top_right_mask,
bot_left_mask, bot_right_mask))
mask = mask.astype(np.uint8)
cv2.imwrite('mask.png', mask*255)
Here's what the "fancy" mask looks like:
Seems about right. It cannot be circular because of the template shape. If I run clock2.jpg with this mask I get:
It works. No candy bars are identified. But I wish I could do it in fewer lines of code...
EDIT:
I've done some profiling. I ran 100 cycles of the "easy" way and the "accurate" way and calculated frames per second (fps):
easy way: 12.7 fps
accurate way: 7.8 fps
so there is some price to pay for making the mask with NumPy. These tests were done on a relatively powerful workstation. It could get uglier on more modest hardware...
Method 1: 'mask' image before cv2.matchTemplate
Just for kicks, I tried to make my own mask of the image that I pass to cv2.matchTemplate to see what kind of performance I can achieve. To be clear, this isn't a proper mask -- I set all of the pixels to ignore to one color (black or white). This is to get around the fact only TM_SQDIFF and TM_CORR_NORMED support a proper mask.
#Alexander Reynolds makes a very good point in the comments that some care must be taken if the template image (the thing we're trying to find) has lots of black or lots of white. For many problems, we will know a priori what the template looks like and we can specify a white background or black background.
I use cv2.multiply, which seems to be faster than numpy.multiply. cv2.multiply has the added advantage that it automatically clips the results to the range 0 to 255.
import numpy as np
import cv2
import time
img = cv2.imread('clock1.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
template = cv2.imread('target.jpg')
t_h, t_w = template.shape[0:2] # template height and width
mask_background = 'WHITE'
start_time = time.time()
for i in range(100): # do 100 cycles for timing
# find circle in gray image using Hough transform
circles = cv2.HoughCircles(gray, method = cv2.HOUGH_GRADIENT, dp = 1,
minDist = 150, param1 = 50, param2 = 70,
minRadius = 131, maxRadius = 200)
i = circles[0,0]
x0 = i[0]
y0 = i[1]
r = i[2]
# display circle on color image
cv2.circle(img,(x0, y0), r,(0,255,0),2)
if mask_background == 'BLACK': # black = 0, white = 255 on grayscale
mask = np.zeros(img.shape, dtype = np.uint8)
elif mask_background == 'WHITE':
mask = 255*np.ones(img.shape, dtype = np.uint8)
cv2.circle(mask, (x0, y0), r, color = (1,1,1), thickness = -1)
img2 = cv2.multiply(img, mask) # element wise multiplication
# values > 255 are truncated at 255
# do the template match
result = cv2.matchTemplate(img2, template, cv2.TM_CCOEFF_NORMED)
# call minMaxLoc
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
# draw found rectangle on img
if max_val > 0.4:
cv2.rectangle(img, max_loc, (max_loc[0]+t_w, max_loc[1]+t_h), (0,255,0), 4)
fps = 100/(time.time()-start_time)
print('fps ', fps)
cv2.imwrite('output.jpg', img)
Profiling results:
BLACK background 12.3 fps
WHITE background 12.1 fps
Using this method has very little performance hit relative to 12.7 fps in original question. However, it has the drawback that it will still find templates that still stick over the edge a little bit. Depending on the exact nature of the problem, this may be acceptable in many applications.
Method 2: use cv2.boxFilter to create mask for minMaxLoc
In this technique, we start with a circular mask (as in OP), but then modify it with cv2.boxFilter. We change the anchor from default center of kernel to the top left corner (0, 0)
import numpy as np
import cv2
import time
img = cv2.imread('clock1.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
template = cv2.imread('target.jpg')
t_h, t_w = template.shape[0:2] # template height and width
print('t_h, t_w ', t_h, ' ', t_w)
start_time = time.time()
for i in range(100):
# find circle in gray image using Hough transform
circles = cv2.HoughCircles(gray, method = cv2.HOUGH_GRADIENT, dp = 1,
minDist = 150, param1 = 50, param2 = 70,
minRadius = 131, maxRadius = 200)
i = circles[0,0]
x0 = i[0]
y0 = i[1]
r = i[2]
# display circle on color image
cv2.circle(img,(x0, y0), r,(0,255,0),2)
# do the template match
result = cv2.matchTemplate(img, template, cv2.TM_CCOEFF_NORMED)
# finally, here is the part that gets tricky. we want to find highest
# rated match inside circle and we'd like to use minMaxLoc
# start to make mask by drawing circle on zero array
mask = np.zeros(result.shape, dtype = np.float)
cv2.circle(mask, (x0, y0), r, color = 1, thickness = -1)
mask = cv2.boxFilter(mask,
ddepth = -1,
ksize = (t_w, t_h),
anchor = (0,0),
normalize = True,
borderType = cv2.BORDER_ISOLATED)
# mask now contains values from zero to 1. we want to make anything
# less than 1 equal to zero
_, mask = cv2.threshold(mask, thresh = 0.9999,
maxval = 1.0, type = cv2.THRESH_BINARY)
mask = mask.astype(np.uint8)
# call minMaxLoc
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result, mask = mask)
# draw found rectangle on img
if max_val > 0.4:
cv2.rectangle(img, max_loc, (max_loc[0]+t_w, max_loc[1]+t_h), (0,255,0), 4)
fps = 100/(time.time()-start_time)
print('fps ', fps)
cv2.imwrite('output.jpg', img)
This code gives a mask identical to OP, but at 11.89 fps. This technique gives us more accuracy with slightly more performance hit than Method 1.

Resources