How do I detect vertical text with OpenCV for extraction - python-3.x

I am new to OpenCV and trying to see if I can find a way to detect vertical text for the image attached.
In this case on row 3 , I would like to get the bounding box around Original Cost and the amount below ($200,000.00).
Similarly I would like to get the bounding box around Amount Existing Liens and the associated amount below. I then would use this data to send to an OCR engine to read text. Traditional OCR engines go line by line and extract and loses the context.
Here is what I have tried so far -
import cv2
import numpy as np
img = cv2.imread('Test3.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,100,100,apertureSize = 3)
cv2.imshow('edges',edges)
cv2.waitKey(0)
minLineLength = 20
maxLineGap = 10
lines = cv2.HoughLinesP(edges,1,np.pi/180,15,minLineLength=minLineLength,maxLineGap=maxLineGap)
for x in range(0, len(lines)):
for x1,y1,x2,y2 in lines[x]:
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
cv2.imshow('hough',img)
cv2.waitKey(0)

Here is my solution based on Kanan Vyas and Adrian Rosenbrock
It's probably not as "canonical" as you'd wish.
But it seems to work (more or less...) with the image you provided.
Just a word of CAUTION: The code looks within the directory from which it is running, for a folder named "Cropped" where cropped images will be stored. So, don't run it in a directory which already contains a folder named "Cropped" because it deletes everything in this folder at each run. Understood? If you're unsure run it in a separate folder.
The code:
# Import required packages
import cv2
import numpy as np
import pathlib
###################################################################################################################################
# https://www.pyimagesearch.com/2015/04/20/sorting-contours-using-python-and-opencv/
###################################################################################################################################
def sort_contours(cnts, method="left-to-right"):
# initialize the reverse flag and sort index
reverse = False
i = 0
# handle if we need to sort in reverse
if method == "right-to-left" or method == "bottom-to-top":
reverse = True
# handle if we are sorting against the y-coordinate rather than
# the x-coordinate of the bounding box
if method == "top-to-bottom" or method == "bottom-to-top":
i = 1
# construct the list of bounding boxes and sort them from top to
# bottom
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b:b[1][i], reverse=reverse))
# return the list of sorted contours and bounding boxes
return (cnts, boundingBoxes)
###################################################################################################################################
# https://medium.com/coinmonks/a-box-detection-algorithm-for-any-image-containing-boxes-756c15d7ed26 (with a few modifications)
###################################################################################################################################
def box_extraction(img_for_box_extraction_path, cropped_dir_path):
img = cv2.imread(img_for_box_extraction_path, 0) # Read the image
(thresh, img_bin) = cv2.threshold(img, 128, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU) # Thresholding the image
img_bin = 255-img_bin # Invert the imagecv2.imwrite("Image_bin.jpg",img_bin)
# Defining a kernel length
kernel_length = np.array(img).shape[1]//200
# A verticle kernel of (1 X kernel_length), which will detect all the verticle lines from the image.
verticle_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, kernel_length))
# A horizontal kernel of (kernel_length X 1), which will help to detect all the horizontal line from the image.
hori_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_length, 1))
# A kernel of (3 X 3) ones.
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))# Morphological operation to detect verticle lines from an image
img_temp1 = cv2.erode(img_bin, verticle_kernel, iterations=3)
verticle_lines_img = cv2.dilate(img_temp1, verticle_kernel, iterations=3)
#cv2.imwrite("verticle_lines.jpg",verticle_lines_img)# Morphological operation to detect horizontal lines from an image
img_temp2 = cv2.erode(img_bin, hori_kernel, iterations=3)
horizontal_lines_img = cv2.dilate(img_temp2, hori_kernel, iterations=3)
#cv2.imwrite("horizontal_lines.jpg",horizontal_lines_img)# Weighting parameters, this will decide the quantity of an image to be added to make a new image.
alpha = 0.5
beta = 1.0 - alpha
# This function helps to add two image with specific weight parameter to get a third image as summation of two image.
img_final_bin = cv2.addWeighted(verticle_lines_img, alpha, horizontal_lines_img, beta, 0.0)
img_final_bin = cv2.erode(~img_final_bin, kernel, iterations=2)
(thresh, img_final_bin) = cv2.threshold(img_final_bin, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)# For Debugging
# Enable this line to see verticle and horizontal lines in the image which is used to find boxes
#cv2.imwrite("img_final_bin.jpg",img_final_bin)
# Find contours for image, which will detect all the boxes
contours, hierarchy = cv2.findContours(
img_final_bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Sort all the contours by top to bottom.
(contours, boundingBoxes) = sort_contours(contours, method="top-to-bottom")
idx = 0
for c in contours:
# Returns the location and width,height for every contour
x, y, w, h = cv2.boundingRect(c)# If the box height is greater then 20, widht is >80, then only save it as a box in "cropped/" folder.
if (w > 50 and h > 20):# and w > 3*h:
idx += 1
new_img = img[y:y+h, x:x+w]
cv2.imwrite(cropped_dir_path+str(x)+'_'+str(y) + '.png', new_img)
###########################################################################################################################################################
def prepare_cropped_folder():
p=pathlib.Path('./Cropped')
if p.exists(): # Cropped folder non empty. Let's clean up
files = [x for x in p.glob('*.*') if x.is_file()]
for f in files:
f.unlink()
else:
p.mkdir()
###########################################################################################################################################################
# MAIN
###########################################################################################################################################################
prepare_cropped_folder()
# Read image from which text needs to be extracted
img = cv2.imread("dkesg.png")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Performing OTSU threshold
ret, thresh1 = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV)
thresh1=255-thresh1
bin_y=np.zeros(thresh1.shape[0])
for x in range(0,len(bin_y)):
bin_y[x]=sum(thresh1[x,:])
bin_y=bin_y/max(bin_y)
ry=np.where(bin_y>0.995)[0]
for i in range(0,len(ry)):
cv2.line(img, (0, ry[i]), (thresh1.shape[1], ry[i]), (0, 0, 0), 1)
# We need to draw abox around the picture with a white border in order for box_detection to work
cv2.line(img,(0,0),(0,img.shape[0]-1),(255,255,255),2)
cv2.line(img,(img.shape[1]-1,0),(img.shape[1]-1,img.shape[0]-1),(255,255,255),2)
cv2.line(img,(0,0),(img.shape[1]-1,0),(255,255,255),2)
cv2.line(img,(0,img.shape[0]-1),(img.shape[1]-1,img.shape[0]-1),(255,255,255),2)
cv2.line(img,(0,0),(0,img.shape[0]-1),(0,0,0),1)
cv2.line(img,(img.shape[1]-3,0),(img.shape[1]-3,img.shape[0]-1),(0,0,0),1)
cv2.line(img,(0,0),(img.shape[1]-1,0),(0,0,0),1)
cv2.line(img,(0,img.shape[0]-2),(img.shape[1]-1,img.shape[0]-2),(0,0,0),1)
cv2.imwrite('out.png',img)
box_extraction("out.png", "./Cropped/")
Now... It puts the cropped regions in the Cropped folder. They are named as x_y.png with (x,y) the position on the original image.
Here are two examples of the outputs
and
Now, in a terminal. I used pytesseract on these two images.
The results are the following:
1)
Original Cost
$200,000.00
2)
Amount Existing Liens
$494,215.00
As you can see, pytesseract got the amount wrong in the second case... So, be careful.
Best regards,
Stéphane

I assume the bounding box is fix (rectangle that able to fit in "Original Amount and the amount below). You can use text detection to detect the "Original Amount" and "Amount Existing Liens" using OCR and crop out the image based on the detected location for further OCR on the amount. You can refer this link for text detection

Try to divide the image into different cells using the lines in the image.
For example, first divide the input into rows by detecting the horizontal lines. This can be done by using cv.HoughLinesP and checking for each line if the difference between y-coordinate of the begin and end point is smaller than a certain threshold abs(y2 - y1) < 10. If you have a horizontal line, it's a separator for a new row. You can use the y-coordinates of this line to split the input horizontally.
Next, for the row you're interested in, divide the region into columns using the same technique, but now make sure the difference between the x-coordinates of the begin and end point are smaller than a certain threshold, since you're now looking for the vertical lines.
You can now crop the image to different cells using the y-coordinates of the horizontal lines and the x-coordinates of the vertical lines. Pass these cropped regions one by one to the OCR engine and you'll have for each cell the corresponding text.

Related

Finding points within a contour using OpenCV

For any general contour I was wondering if there was a method that could find an arbitrary amount of different points within it using OpenCV.
sample random contour
I'd like to be able to find an arbitrary amount of different points within the white region. Any help is appreciated
I don't think there's a built in way to do that. Here's a potential solution:
This works only if there are just 2 contour levels AND the entire contour line is within the image (as is the case with the picture you linked).
Suppose that you're working with a black/white image (the pixels in the white contour lines are simply 1, and all other pixels are 0).
And let region 0 be the part of the image outside the white line and region 1 be the part inside.
import numpy as np
import cv2
# Open the image
img = cv2.imread('<file_path_to_image>')
# Get its dimensions
height, width, __ = img.shape
# Create an array of zeros with the same dimensions as the image...
levels = np.zeros((height, width))
for i in range(height):
current_region = 0
for j in range(width):
# If this pixel is white (it's a contour line), you're transitioning either from region 0 to 1, or the other way around
if image[i][j]:
current_region == 1 if current_region==0 else 0
# But if the pixel is black, just record it's region into the new array
else:
levels[i][j] = current_region
# So now the array 'levels' has all the info you need.
# The pixels inside the contour are 1; the ones outside are 0.

How can i get the inner contour points without redundancy in OpenCV - Python

I'm new with OpenCV and the thing is that i need to get all the contour points. This is easy setting the cv2.RETR_TREE mode in findContours method. The thing is that in this way, returns redundant coordinates. So, for example, in this polygon, i don't want to get the contour points like this:
But like this:
So according to the first image, green color are the contours detected with RETR_TREE mode, and points 1-2, 3-5, 4-6, ... are redundant, because they are so close to each other. I need to put together those redundant points into one, and append it in the customContours array.
For the moment, i only have the code according for the first picture, setting up the distance between the points and the points coordinates:
def getContours(img, minArea=20000, cThr=[100, 100]):
font = cv2.FONT_HERSHEY_COMPLEX
imgColor = img
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1)
imgCanny = cv2.Canny(imgBlur, cThr[0], cThr[1])
kernel = np.ones((5, 5))
imgDial = cv2.dilate(imgCanny, kernel, iterations=3)
imgThre = cv2.erode(imgDial, kernel, iterations=2)
cv2.imshow('threshold', imgThre)
contours, hierachy = cv2.findContours(imgThre, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
customContours = []
for cnt in contours:
area = cv2.contourArea(cnt)
if area > minArea:
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.009*peri, True)
bbox = cv2.boundingRect(approx)
customContours.append([len(approx), area, approx, bbox, cnt])
print('points: ', len(approx))
n = approx.ravel()
i = 0
for j in n:
if i % 2 == 0:
x = n[i]
y = n[i + 1]
string = str(x)+" " + str(y)
cv2.putText(imgColor, str(i//2+1) + ': ' + string, (x, y), font, 2, (0, 0, 0), 2)
i = i + 1
customContours = sorted(customContours, key=lambda x: x[1], reverse=True)
for cnt in customContours:
cv2.drawContours(imgColor, [cnt[2]], 0, (0, 0, 255), 5)
return imgColor, customContours
Could you help me to get the real points regarding to i.e. the second picture?
(EDIT 01/07/21)
I want a generic solution, because the image could be more complex, such as the following picture:
NOTE: notice that the middle arrow (points 17 and 18) doesn't have a closed area, so isn't a polygon to study. Then, that region is not interested to obtain his points. Also, notice that the order of the points aren't important, but if the entry is the hole image, it should know that there are 4 polygons, so for each polygon points starts with 0, then 1, etc.
Here's my approach. It is mainly morphological-based. It involves convolving the image with a special kernel. This convolution identifies the end-points of the triangle as well as the intersection points where the middle line is present. This will result in a points mask containing the pixel that matches the points you are looking for. After that, we can apply a little bit of morphology to join possible duplicated points. What remains is to get a list of the coordinate of these points for further processing.
These are the steps:
Get a binary image of the input via Otsu's thresholding
Get the skeleton of the binary image
Define the special kernel and convolve the skeleton image
Apply a morphological dilate to join possible duplicated points
Get the centroids of the points and store them in a list
Here's the code:
# Imports:
import numpy as np
import cv2
# image path
path = "D://opencvImages//"
fileName = "triangle.png"
# Reading an image in default mode:
inputImage = cv2.imread(path + fileName)
# Prepare a deep copy for results:
inputImageCopy = inputImage.copy()
# Convert BGR to Grayscale
grayImage = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)
# Threshold via Otsu:
_, binaryImage = cv2.threshold(grayImage, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
The first bit computes the binary image. Very straightforward. I'm using this image as base, which is just a cleaned-up version of what you posted without the annotations. This is the resulting binary image:
Now, to perform the convolution we must first get the image "skeleton". The skeleton is a version of the binary image where lines have been normalized to have a width of 1 pixel. This is useful because we can then convolve the image with a 3 x 3 kernel and look for specific pixel patterns. Let's compute the skeleton using OpenCV's extended image processing module:
# Get image skeleton:
skeleton = cv2.ximgproc.thinning(binaryImage, None, 1)
This is the image obtained:
We can now apply the convolution. The approach is based on Mark Setchell's info on this post. The post mainly shows the method for finding end-points of a shape, but I extended it to also identify line intersections, such as the middle portion of the triangle. The main idea is that the convolution yields a very specific value where patterns of black and white pixels are found in the input image. Refer to the post for the theory behind this idea, but here, we are looking for two values: 110 and 40. The first one occurs when an end-point has been found. The second one when a line intersections is found. Let's setup the convolution:
# Threshold the image so that white pixels get a value of 0 and
# black pixels a value of 10:
_, binaryImage = cv2.threshold(skeleton, 128, 10, cv2.THRESH_BINARY)
# Set the convolution kernel:
h = np.array([[1, 1, 1],
[1, 10, 1],
[1, 1, 1]])
# Convolve the image with the kernel:
imgFiltered = cv2.filter2D(binaryImage, -1, h)
# Create list of thresholds:
thresh = [110, 40]
The first part is done. We are going to detect end-points and intersections in two separated steps. Each step will produce a partial result, we can OR both results to get a final mask:
# Prepare the final mask of points:
(height, width) = binaryImage.shape
pointsMask = np.zeros((height, width, 1), np.uint8)
# Perform convolution and create points mask:
for t in range(len(thresh)):
# Get current threshold:
currentThresh = thresh[t]
# Locate the threshold in the filtered image:
tempMat = np.where(imgFiltered == currentThresh, 255, 0)
# Convert and shape the image to a uint8 height x width x channels
# numpy array:
tempMat = tempMat.astype(np.uint8)
tempMat = tempMat.reshape(height,width,1)
# Accumulate mask:
pointsMask = cv2.bitwise_or(pointsMask, tempMat)
This is the final mask of points:
Note that the white pixels are the locations that matched our target patterns. Those are the points we are looking for. As the shape is not a perfect triangle, some points could be duplicated. We can "merge" neighboring blobs by applying a morphological dilation:
# Set kernel (structuring element) size:
kernelSize = 7
# Set operation iterations:
opIterations = 3
# Get the structuring element:
morphKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernelSize, kernelSize))
# Perform Dilate:
morphoImage = cv2.morphologyEx(pointsMask, cv2.MORPH_DILATE, morphKernel, None, None, opIterations, cv2.BORDER_REFLECT101)
This is the result:
Very nice, we have now big clusters of pixels (or blobs). To get their coordinates, one possible approach would be to get the bounding rectangles of these contours and compute their centroids:
# Look for the outer contours (no children):
contours, _ = cv2.findContours(morphoImage, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Store the points here:
pointsList = []
# Loop through the contours:
for i, c in enumerate(contours):
# Get the contours bounding rectangle:
boundRect = cv2.boundingRect(c)
# Get the centroid of the rectangle:
cx = int(boundRect[0] + 0.5 * boundRect[2])
cy = int(boundRect[1] + 0.5 * boundRect[3])
# Store centroid into list:
pointsList.append( (cx,cy) )
# Set centroid circle and text:
color = (0, 0, 255)
cv2.circle(inputImageCopy, (cx, cy), 3, color, -1)
font = cv2.FONT_HERSHEY_COMPLEX
string = str(cx) + ", " + str(cy)
cv2.putText(inputImageCopy, str(i) + ':' + string, (cx, cy), font, 0.5, (255, 0, 0), 1)
# Show image:
cv2.imshow("Circles", inputImageCopy)
cv2.waitKey(0)
These are the points located in the original input:
Note also that I've stored their coordinates in the pointsList list:
# Print the list of points:
print(pointsList)
This prints the centroids as the tuple (centroidX, centroidY):
[(717, 971), (22, 960), (183, 587), (568, 586), (388, 98)]

Paste an image to another image at two given co-ordinates with altered opacity using PIL or OpenCV in Python

I have two images with given points, one point each image, that need to be aligned so that the result image is a summation of both images, while image 2 is pasted on image 1 with 40% opacity. I have taken this question into consideration but our case does not exactly match as the image co-ordinate is supplied by user and images can have wide range of sizes.
Image 1:
Image2:
Final result(desired output):
For this I have tried img.paste() function of PIL and replacing values in numpy array of images in cv2, both giving results that are far from desired.
I made two input images with ImageMagick like this:
magick -size 300x400 xc:"rgb(1,204,255)" -fill red -draw "point 280,250" 1.png
magick -size 250x80 xc:"rgb(150,203,0)" -fill red -draw "point 12,25" 2.png
Then ran the following code:
#!/usr/bin/env python3
"""
Paste one image on top of another such that given points in each are coincident.
"""
from PIL import Image
# Open images and ensure RGB
im1 = Image.open('1.png').convert('RGB')
im2 = Image.open('2.png').convert('RGB')
# x,y coordinates of point in each image
p1x, p1y = 280, 250
p2x, p2y = 12, 25
# Work out how many pixels of space we need left, right, above, below common point in new image
pL = max(p1x, p2x)
pR = max(im1.width-p1x, im2.width-p2x)
pT = max(p1y, p2y)
pB = max(im1.height-p1y, im2.height-p2y)
# Create background in solid white
bg = Image.new('RGB', (pL+pR, pT+pB),'white')
bg.save('DEBUG-bg.png')
# Paste im1 onto background
bg.paste(im1, (pL-p1x, pT-p1y))
bg.save('DEBUG-bg+im1.png')
# Make 40% opacity mask for im2
alpha = Image.new('L', (im2.width,im2.height), int(40*255/100))
alpha.save('DEBUG-alpha.png')
# Paste im2 over background with alpha
bg.paste(im2, (pL-p2x, pT-p2y), alpha)
bg.save('result.png')
The result is this:
The lines that save images with names starting "DEBUG-xxx.png" are just for easy debugging and can be removed. I can easily view them all to see what is going on with the code and I can easily delete them all by removing "DEBUG*png".
Without any more details, I will try to answer the question as best as I can and will name all the extra assumptions that I made (and how to handle them if you can't make them).
Since there were no provided images, I created a blue and green image with a black dot as merging coordinate, using the following code:
import numpy as np
from PIL import Image, ImageDraw
def create_image_with_point(name, color, x, y, width=3):
image = np.full((400, 400, 3), color, dtype=np.uint8)
image[y - width:y + width, x - width:x + width] = (0, 0, 0)
image = Image.fromarray(image, mode='RGB')
ImageDraw.Draw(image).text((x - 15, y - 20), 'Point', (0, 0, 0))
image.save(name)
return image
blue = create_image_with_point('blue.png', color=(50, 50, 255), x=300, y=100)
green = create_image_with_point('green.png', color=(50, 255, 50), x=50, y=50)
This results in the following images:
Now I will make the assumption that the images do not contain an alpha layer yet (as I created them without). Therefore I will load the image and add an alpha layer to them:
import numpy as np
from PIL import Image
blue = Image.open('blue.png')
blue.putalpha(255)
green = Image.open('green.png')
green.putalpha(255)
My following assumption is that you know the merge coordinates beforehand:
# Assuming x, y coordinates.
point_blue = (300, 100)
point_green = (50, 50)
Then you can create an empty image, that can hold both of the images easily:
new_image = np.zeros((1000, 1000, 4), dtype=np.uint8)
This is a far stretch assumption if you do not know the image size beforehand, and in case you do not know this you will have to calculate the combining size of the two images.
Then you can place the images dot in the center of the newly created images (in my case (500, 500). For this you use the merging points as offsets. And you can perform alpha blending (in any case: np.uint8(img_1*alpha + img_2*(1-alpha))) to merge the images using different opacity.
Which is in code:
def place_image(image: Image, point_xy: tuple[int, int], dest: np.ndarray, alpha: float = 1.) -> np.ndarray:
# Place the merging dot on (500, 500).
offset_x, offset_y = 500 - point_xy[0], 500 - point_xy[1]
# Calculate the location of the image and perform alpha blending.
destination = dest[offset_y:offset_y + image.height, offset_x:offset_x + image.width]
destination = np.uint8(destination * (1 - alpha) + np.array(image) * alpha)
# Copy the 'merged' imaged to the destination location.
dest[offset_y:offset_y + image.height, offset_x:offset_x + image.width] = destination
return dest
# Add the background image blue with alpha 1
new_image = place_image(blue, point_blue, dest=new_image, alpha=1)
# Add the second image with 40% opacity
new_image = place_image(green, point_green, dest=new_image, alpha=0.4)
# Store the resulting image.
image = Image.fromarray(new_image)
image.save('result.png')
The final result will be a bigger image, of the combined images, again you can calculate the correct bounding box, so you don't have these huge areas of 'nothing' sticking out. The final result will look like this:

How to find the document edges in various coloured backgrounds using opencv python? [Document Scanning in various backgrounds]

I am currently have a document that needs to be smart scanned.
For that, I need to find proper contours of the document in any background so that I can do a warped perspective projection and detection with that image.
The main issue faced while doing this is that the document edge detects any kind of background.
I have tried to use the function HoughLineP and tried to find contours on the grayscale blurred image passed through canny edge detection until now.
MORPH = 9
CANNY = 84
HOUGH = 25
IM_HEIGHT, IM_WIDTH, _ = rescaled_image.shape
# convert the image to grayscale and blur it slightly
gray = cv2.cvtColor(rescaled_image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7,7), 0)
#dilate helps to remove potential holes between edge segments
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(MORPH,MORPH))
dilated = cv2.dilate(gray, kernel)
# find edges and mark them in the output map using the Canny algorithm
edged = cv2.Canny(dilated, 0, CANNY)
test_corners = self.get_corners(edged)
approx_contours = []
(_, cnts, hierarchy) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
# loop over the contours
for c in cnts:
# approximate the contour
approx = cv2.approxPolyDP(c, 80, True)
if self.is_valid_contour(approx, IM_WIDTH, IM_HEIGHT):
approx_contours.append(approx)
break
How to find a proper bounding box around the document via OpenCV code.
Any help will be much appreciated.
(The document is taken from the camera in any angle and any coloured background.)
Following code might help you to detect/segment the page in the image...
import cv2
import matplotlib.pyplot as plt
import numpy as np
image = cv2.imread('test_p.jpg')
image = cv2.imread('test_p.jpg')
print(image.shape)
ori = image.copy()
image = cv2.resize(image, (image.shape[1]//10,image.shape[0]//10))
Resized the image to make the operations more faster so that we can work on realtime..
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (11,11), 0)
edged = cv2.Canny(gray, 75, 200)
print("STEP 1: Edge Detection")
plt.imshow(edged)
plt.show()
cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts[1], key = cv2.contourArea, reverse = True)[:5]
Here we will consider only first 5 contours from the sorted list based on area
Here the size of the gaussian blur is bit sensitive, so chose it accordingly based on the image size.
After the above operations image may look like..
for c in cnts:
### Approximating the contour
#Calculates a contour perimeter or a curve length
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.01 * peri, True)
# if our approximated contour has four points, then we
# can assume that we have found our screen
screenCnt = approx
if len(approx) == 4:
screenCnt = approx
break
# show the contour (outline)
print("STEP 2: Finding Boundary")
cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
image_e = cv2.resize(image,(image.shape[1],image.shape[0]))
cv2.imwrite('image_edge.jpg',image_e)
plt.imshow(image_e)
plt.show()
Final Image may look like...
Rest of the things may be handled after getting the final image...
Code Reference :- Git Repository
I guess this answer would be helpful...
There is a similar problem which is called orthographic projection.
Orthographic approaches
Rather than doing, Gaussian blur+morphological operation to get the edge of the document, try to do orthographic projection first and then find contours via your method.
For fining proper bounding box, try some preset values or a reference letter after which an orthographic projection will allow you to compute the height and hence the dimensions of the bounding box.

Removing noise after doing some preprocessing, so how come it fails? (OpenCV with Python)

So After some image preprocessing I have gotten an image which holds 5 contours
(The image was resized for posting here in stackoverflow):
I'd like to remove all "islands" except for the actual letter,
So at first I tried using cv2.erode and cv2.dilate with all kinds of kernels sizes and it didn't do the job, so I decided to remove by masking all contours except the largest one by this:
_, cnts, _ = cv2.findContours(original, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
I would expect according to the given image there would be 5 contours
areas = []
for contour in cnts:
area = cv2.contourArea(contour)
areas.append(area)
relevant_indexes = list(range(1, len(cnts)))
relevant_indexes.remove(areas.index(max(areas)))
mask = numpy.zeros(eroded.shape).astype(eroded.dtype)
color = 255
for i in relevant_indexes:
cv2.fillPoly(mask, cnts[i], color)
cv2.imwrite("mask.png", mask)
// Trying to mask out the noise
result = cv2.bitwise_xor(orifinal, mask)
cv2.imwrite("result.png", result)
But the mask I get is:
it's not what I would expect, and the left down contour is missing,
can someone PLEASE explain me what am I missing here? And what would be the correct approach for getting rid of those "isolated islands"?
Thank you all!
p.s
The original photo I'm working on:
Solution:
It sounds like you want to mask out the largest connected component (cv-speak for "island").
Here's an opencv/python script to do that:
#!/usr/bin/env python
import cv2
import numpy as np
import console
# load image in grayscale
img = cv2.imread("img.png", 0)
# get all connected components
_, output, stats, _ = cv2.connectedComponentsWithStats(img, connectivity=4)
# get a list of areas for each group label
group_areas = stats[cv2.CC_STAT_AREA]
# get the id of the group with the largest area (ignoring 0, which is the background id)
max_group_id = np.argmax(group_areas[1:]) + 1
# get max_group_id mask and save it as an image
max_group_id_mask = (output == max_group_id).astype(np.uint8) * 255
cv2.imwrite("output.png", max_group_id_mask)
Result:
Here's the result of the above script on your sample image:

Resources