detect text in passport images with opencv - python-3.x

I am detecting text from various passport images with OpenCV. The task is to get the cropped text present on passports like Name, DOB, Nationality, etc. The current code is given below:
image = cv2.imread(image) # read image
image = imutils.resize(image, height=600)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (3, 3), 0)
rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (13, 5))
sqKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (21, 21))
blackhat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT, rectKernel)
gradX = cv2.Sobel(blackhat, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)
gradX = np.absolute(gradX)
(minVal, maxVal) = (np.min(gradX), np.max(gradX))
gradX = (255 * ((gradX - minVal) / (maxVal - minVal))).astype("uint8")
gradX = cv2.morphologyEx(gradX, cv2.MORPH_CLOSE, rectKernel)
thresh = cv2.threshold(gradX, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
if do_erosion: # do erosion if flag is True only
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, sqKernel)
thresh = cv2.erode(thresh, None, iterations=4)
p = int(image.shape[1] * 0.05)
thresh[:, 0:p] = 0
thresh[:, image.shape[1] - p:] = 0 # thresholded image shown below
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE) # finding contours from threshold image
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
rois=[]
# loop over the contours
for c in cnts:
# compute the bounding box of the contour and use the contour to
# compute the aspect ratio and coverage ratio of the bounding box
# width to the width of the image
(x, y, w, h) = cv2.boundingRect(c)
ar = w / float(h)
crWidth = w / float(gray.shape[1])
# check to see if the aspect ratio and coverage width are within
# acceptable criteria
if ar > 2 and crWidth > 0.05:
# pad the bounding box since we applied erosions and now need
# to re-grow it
pX = int((x + w) * 0.03)
pY = int((y + h) * 0.03)
(x, y) = (x - pX, y - pY)
(w, h) = (w + (pX * 2), h + (pY * 2))
# extract the ROI from the image and draw a bounding box
# surrounding the MRZ
roi = original[y:y + h, x:x + w].copy() # interested in finding all ROIs having text
rois.append(roi)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
I am detecting some textual values correctly but the solution is not generic. It is sometimes extracting only the year of DOB but not the full date (sometimes it detects month and day as well but not in the same ROI). The original and threshold image is shown below:
As you can see, in the "ROIs found image" some ROIs are not being detected in the same image like DOB and date of expiry. Any help in extracting all the text regions from the passport will be highly appreciated. Thanks!

You need to tweak the Aspect Ratio and Coverage ratio thresholds to obtain all the desired bounding boxes.
When I run your code, for 05, the value of ar is 1.541 and value of crWidth is 0.03. Since these values are less than the thresholds you have specified, they are getting filtered out. This is why some of the words do not have bounding boxes in the final image.
But, since you want to get the entire DOB in a single bounding box, just before the line thresh[:, 0:p] = 0, you can apply a dilation operation:
thresh = cv2.morphologyEx(thresh, cv2.MORPH_DILATE, np.ones((1, 30), np.uint8)).
This will dilate the pixels and combine blobs that are nearby horizontally. The resultant image after preprocessing is as follows -
The final image -

Related

Is there any way to calculate the white distance from the image using python opencv

As you can see in the image, the size of the image is 2000*2000. We have to find the distance of the image from all the 4 coordinates. I am trying with opencv contours but on some other image it is not working.
image = cv2.imread(image_full_path)
ori_h, ori_w, _ = image.shape
print("Original height, width-->", ori_h, ori_w)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # grayscale
_, thresh = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY_INV) # threshold
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
dilated = cv2.dilate(thresh, kernel, iterations=13) # dilate
contours, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) # get contours
idx = 0
# for each contour found, draw a rectangle around it on original image
# print('length', len(contours))
for contour in contours:
idx += 1
# get rectangle bounding contour
[x, y, w, h] = cv2.boundingRect(contour)
remaining_height_bottom = ori_h - (y + h)
remaining_width_right_side = ori_w - (x + w)
print("Upper Height, Right Width-->", y, x)
print("Bottom Height, Left Width-->", remaining_height_bottom, remaining_width_right_side)
print("Image Height, Image Width-->", h, w)
Thanks in advance.If anyone thinks that the way of asking questions is wrong please ignore. I really need help.
Please find the code below it working for me
img = cv2.imread(file_full_path) # Read in the image and convert to grayscale
ori_h, ori_w, _ = img.shape
scrollbarright = 20
img = img[:, :-scrollbarright]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = 255*(gray < 128).astype(np.uint8)
coords = cv2.findNonZero(gray)
x, y, w, h = cv2.boundingRect(coords)
print("I am ",x,y,w,h)
remaining_height_bottom = ori_h - (y + h)
remaining_width_right_side = ori_w - (x + w)
print("Upper Height, Right Width-->", y, x)
print("Bottom Height, Left Width-->", remaining_height_bottom, remaining_width_right_side)
print("Image Height, Image Width-->", h, w)
print("Original Image Height,Original Image Width-->", ori_h, ori_w)

What is the best way to extract text contained within a table in a pdf using python?

I'm constructing a program to extract text from a pdf, put it in a structured format, and send it off to a database. I have roughly 1,400 individual pdfs that all follow a similar format, but nuances in the verbiage and plan designs that the documents summarize make it tricky.
I've played around with a couple different pdf readers in python including tabula-py and pdfminer but none of them are quite getting to what I'd like to do. Tabula reads in all of the text very well, however it pulls everything as it explicitly lays horizontally, excluding the fact that some of the text is wrapped in a box. For example, if you open up the sample SBC I have attached where it reads "What is the overall deductible?" Tabula will read in "What is the overall $500/Individual or..." skipping the fact that the word "deductible" is really part of the first sentence. (Note the files I'm working with are pdfs but I've attached a jpeg because I couldn't figure out how to attach a pdf.)
import tabula
df = tabula.read_pdf(*filepath*, pandas_options={'header': None))
print(df.iloc[0][0])
print(df)
In the end, I'd really like to be able to parse out the text within each box so that I can better identify what values belong to deductible, out-of-pocket limts, copays/coinsurance, etc. I thought possibly some sort of OCR would allow me to recognize which parts of the PDF are contained in the blue rectangles and then pull the string from there, but I really don't know where to start with that.Sample SBC
#jpnadas In this case the code you copied from my answer in this post isn't really suitable because it addresses the case when a table doesn't have surrounding grid. That algorithm looks for repeating blocks of texts and tries to find a pattern that resembles a table heuristically.
But in this particular case the table does have the grid and by taking this advantage we can achieve a lot more accurate result.
The strategy is the following:
Increase image gamma to make the grid darker
Get rid of colour and apply Otsu thresholding
Find long vertical an horizontal lines in the image and create a mask from it using erode and dilate functions
Find the cell blocks in the mask using findContours function.
Find table objects
5.1 The rest can be as in the post about finding a table without the
grid: find table structure heuristically
5.2 Alternative approach could be using hierarchy returned by the findContours function. This approach is even more accurate and
allows to find multiple tables on a single image.
Having cell coordinates it's easy to extract certain cell image from the original image:
cell_image = image[cell_y:cell_y + cell_h, cell_x:cell_x + cell_w]
Apply OCR to each cell_image.
BUT! I consider the OpenCV approach as a last resort when you're not able to read the PDF's contents: for instance in case when a PDF contains raster image inside.
If it's a vector-based PDF and its contents are readable it makes more sense to find the table inside contents and just read the text from it instead of doing heavy 'OCR lifting'.
Here's the code for reference for more accurate table recognition:
import os
import imutils
import numpy as np
import argparse
import cv2
def gamma_correction(image, gamma = 1.0):
look_up_table = np.empty((1,256), np.uint8)
for i in range(256):
look_up_table[0,i] = np.clip(pow(i / 255.0, gamma) * 255.0, 0, 255)
result = cv2.LUT(image, look_up_table)
return result
def pre_process_image(image):
# Let's get rid of color first
# Applying gamma to make the table lines darker
gamma = gamma_correction(image, 2)
# Getting rid of color
gray = cv2.cvtColor(gamma, cv2.COLOR_BGR2GRAY)
# Then apply Otsu threshold to reveal important areas
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# inverting the thresholded image
return ~thresh
def get_horizontal_lines_mask(image, horizontal_size=100):
horizontal = image.copy()
horizontal_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (horizontal_size, 1))
horizontal = cv2.erode(horizontal, horizontal_structure, anchor=(-1, -1), iterations=1)
horizontal = cv2.dilate(horizontal, horizontal_structure, anchor=(-1, -1), iterations=1)
return horizontal
def get_vertical_lines_mask(image, vertical_size=100):
vertical = image.copy()
vertical_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (1, vertical_size))
vertical = cv2.erode(vertical, vertical_structure, anchor=(-1, -1), iterations=1)
vertical = cv2.dilate(vertical, vertical_structure, anchor=(-1, -1), iterations=1)
return vertical
def make_lines_mask(preprocessed, min_horizontal_line_size=100, min_vertical_line_size=100):
hor = get_horizontal_lines_mask(preprocessed, min_horizontal_line_size)
ver = get_vertical_lines_mask(preprocessed, min_vertical_line_size)
mask = np.zeros((preprocessed.shape[0], preprocessed.shape[1], 1), dtype=np.uint8)
mask = cv2.bitwise_or(mask, hor)
mask = cv2.bitwise_or(mask, ver)
return ~mask
def find_cell_boxes(mask):
# Looking for the text spots contours
# OpenCV 3
# img, contours, hierarchy = cv2.findContours(pre, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# OpenCV 4
contours = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
image_width = mask.shape[1]
# Getting the texts bounding boxes based on the text size assumptions
boxes = []
for contour in contours:
box = cv2.boundingRect(contour)
w = box[2]
# Excluding the page box shape but adding smaller boxes
if w < 0.95 * image_width:
boxes.append(box)
return boxes
def find_table_in_boxes(boxes, cell_threshold=10, min_columns=2):
rows = {}
cols = {}
# Clustering the bounding boxes by their positions
for box in boxes:
(x, y, w, h) = box
col_key = x // cell_threshold
row_key = y // cell_threshold
cols[row_key] = [box] if col_key not in cols else cols[col_key] + [box]
rows[row_key] = [box] if row_key not in rows else rows[row_key] + [box]
# Filtering out the clusters having less than 2 cols
table_cells = list(filter(lambda r: len(r) >= min_columns, rows.values()))
# Sorting the row cells by x coord
table_cells = [list(sorted(tb)) for tb in table_cells]
# Sorting rows by the y coord
table_cells = list(sorted(table_cells, key=lambda r: r[0][1]))
return table_cells
def build_vertical_lines(table_cells):
if table_cells is None or len(table_cells) <= 0:
return [], []
max_last_col_width_row = max(table_cells, key=lambda b: b[-1][2])
max_x = max_last_col_width_row[-1][0] + max_last_col_width_row[-1][2]
max_last_row_height_box = max(table_cells[-1], key=lambda b: b[3])
max_y = max_last_row_height_box[1] + max_last_row_height_box[3]
hor_lines = []
ver_lines = []
for box in table_cells:
x = box[0][0]
y = box[0][1]
hor_lines.append((x, y, max_x, y))
for box in table_cells[0]:
x = box[0]
y = box[1]
ver_lines.append((x, y, x, max_y))
(x, y, w, h) = table_cells[0][-1]
ver_lines.append((max_x, y, max_x, max_y))
(x, y, w, h) = table_cells[0][0]
hor_lines.append((x, max_y, max_x, max_y))
return hor_lines, ver_lines
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path to images directory")
args = vars(ap.parse_args())
in_file = args["image"]
filename_base = in_file.replace(os.path.splitext(in_file)[1], "")
img = cv2.imread(in_file)
pre_processed = pre_process_image(img)
# Visualizing pre-processed image
cv2.imwrite(filename_base + ".pre.png", pre_processed)
lines_mask = make_lines_mask(pre_processed, min_horizontal_line_size=1800, min_vertical_line_size=500)
# Visualizing table lines mask
cv2.imwrite(filename_base + ".mask.png", lines_mask)
cell_boxes = find_cell_boxes(lines_mask)
cells = find_table_in_boxes(cell_boxes)
# apply OCR to each cell rect here
# the cells array contains cell coordinates in tuples (x, y, w, h)
hor_lines, ver_lines = build_vertical_lines(cells)
# Visualize the table lines
vis = img.copy()
for line in hor_lines:
[x1, y1, x2, y2] = line
cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255), 1)
for line in ver_lines:
[x1, y1, x2, y2] = line
cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255), 1)
cv2.imwrite(filename_base + ".result.png", vis)
Some parameters are hard-coded:
page size threshold - 0.95
min horizontal line size - 1800 px
min vertical line size - 500 px
You can provide them as configurable parameters or make them relative to image size.
Results:
I think that the best way to do what you need is to find and isolate the cells in the file and then apply OCR to each individual cell.
There are a number of solutions in SO for that, I got the code from this answer and played around a little with the parameters to get the output below (not perfect yet, but you can tweak it a little bit yourself).
import os
import cv2
import imutils
# This only works if there's only one table on a page
# Important parameters:
# - morph_size
# - min_text_height_limit
# - max_text_height_limit
# - cell_threshold
# - min_columns
def pre_process_image(img, save_in_file, morph_size=(23, 23)):
# get rid of the color
pre = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Otsu threshold
pre = cv2.threshold(pre, 250, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
# dilate the text to make it solid spot
cpy = pre.copy()
struct = cv2.getStructuringElement(cv2.MORPH_RECT, morph_size)
cpy = cv2.dilate(~cpy, struct, anchor=(-1, -1), iterations=1)
pre = ~cpy
if save_in_file is not None:
cv2.imwrite(save_in_file, pre)
return pre
def find_text_boxes(pre, min_text_height_limit=20, max_text_height_limit=120):
# Looking for the text spots contours
contours, _ = cv2.findContours(pre, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# Getting the texts bounding boxes based on the text size assumptions
boxes = []
for contour in contours:
box = cv2.boundingRect(contour)
h = box[3]
if min_text_height_limit < h < max_text_height_limit:
boxes.append(box)
return boxes
def find_table_in_boxes(boxes, cell_threshold=100, min_columns=3):
rows = {}
cols = {}
# Clustering the bounding boxes by their positions
for box in boxes:
(x, y, w, h) = box
col_key = x // cell_threshold
row_key = y // cell_threshold
cols[row_key] = [box] if col_key not in cols else cols[col_key] + [box]
rows[row_key] = [box] if row_key not in rows else rows[row_key] + [box]
# Filtering out the clusters having less than 2 cols
table_cells = list(filter(lambda r: len(r) >= min_columns, rows.values()))
# Sorting the row cells by x coord
table_cells = [list(sorted(tb)) for tb in table_cells]
# Sorting rows by the y coord
table_cells = list(sorted(table_cells, key=lambda r: r[0][1]))
return table_cells
def build_lines(table_cells):
if table_cells is None or len(table_cells) <= 0:
return [], []
max_last_col_width_row = max(table_cells, key=lambda b: b[-1][2])
max_x = max_last_col_width_row[-1][0] + max_last_col_width_row[-1][2]
max_last_row_height_box = max(table_cells[-1], key=lambda b: b[3])
max_y = max_last_row_height_box[1] + max_last_row_height_box[3]
hor_lines = []
ver_lines = []
for box in table_cells:
x = box[0][0]
y = box[0][1]
hor_lines.append((x, y, max_x, y))
for box in table_cells[0]:
x = box[0]
y = box[1]
ver_lines.append((x, y, x, max_y))
(x, y, w, h) = table_cells[0][-1]
ver_lines.append((max_x, y, max_x, max_y))
(x, y, w, h) = table_cells[0][0]
hor_lines.append((x, max_y, max_x, max_y))
return hor_lines, ver_lines
if __name__ == "__main__":
in_file = os.path.join(".", "test.jpg")
pre_file = os.path.join(".", "pre.png")
out_file = os.path.join(".", "out.png")
img = cv2.imread(os.path.join(in_file))
pre_processed = pre_process_image(img, pre_file)
text_boxes = find_text_boxes(pre_processed)
cells = find_table_in_boxes(text_boxes)
hor_lines, ver_lines = build_lines(cells)
# Visualize the result
vis = img.copy()
# for box in text_boxes:
# (x, y, w, h) = box
# cv2.rectangle(vis, (x, y), (x + w - 2, y + h - 2), (0, 255, 0), 1)
for line in hor_lines:
[x1, y1, x2, y2] = line
cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255), 1)
for line in ver_lines:
[x1, y1, x2, y2] = line
cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255), 1)
cv2.imwrite(out_file, vis)

OpenCV: Segment each digit from the given image. Digits are written in each cell of a row matrix. Each cell is bounded by margins

I have been trying to recognise handwritten letters (digits/alphabet) from a form-document. As it is known that form-documents have 1d row cells, where the applicant has to fill their information within those bounded cells. However, I'm unable to segment the digits(currently my input consists only digits) from the bounding boxes.
I went through the following steps:
Reading the image (as a grayscale image) via "imread" method of opencv2. Initial Image size:19 x 209(in pixels).
pic = "crop/cropped000.jpg"
newImg = cv2.imread(pic, 0)
Resizing the image 200% its original size via "resize" method of opencv2. I used INTER_AREA Interpolation. Resized Image size: 38 x 418(in pixels)
h,w = newImg.shape
resizedImg = cv2.resize(newImg, (2*w,2*h), interpolation=cv2.INTER_AREA)
Applied Canny edge detection.
v = np.median(resizedImg)
sigma = 0.33
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edgedImg = cv2.Canny(resizedImg, lower, upper)
Cropped the contours and saved them as images in 'BB' directory.
im2, contours, hierarchy = cv2.findContours(edgedImg.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
num = 0
for c in contours:
x, y, w, h = cv2.boundingRect(c)
num += 1
new_img = resizedImg[y:y+h, x:x+w]
cv2.imwrite('BB/'+str(num).zfill(3) + '.jpg', new_img)
Entire code in summary:
pic = "crop/cropped000.jpg"
newImg = cv2.imread(pic, 0)
h,w = newImg.shape
print(newImg.shape)
resizedImg = cv2.resize(newImg, (2*w,2*h), interpolation=cv2.INTER_AREA)
print(resizedImg.shape)
v = np.median(resizedImg)
sigma = 0.33
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edgedImg = cv2.Canny(resizedImg, lower, upper)
im2, contours, hierarchy = cv2.findContours(edgedImg.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
num = 0
for c in contours:
x, y, w, h = cv2.boundingRect(c)
num += 1
new_img = resizedImg[y:y+h, x:x+w]
cv2.imwrite('BB/'+str(num).zfill(3) + '.jpg', new_img)
Images produced are posted here:
https://imgur.com/a/GStIcdj
I had to double the image size because Canny edge detection was producing double-edges for an object (However, it still does). I have also played with other openCV functionalities like Thresholding, Gaussian Blur, Dilate, Erode but all in vain.
# we need one more parameter for Date cell width : as this could be different for diff bank
def crop_image_data_from_date_field(image, new_start_h, new_end_h, new_start_w, new_end_w, cell_width):
#for date each cell has same height and width : here width: 25 px so cord will be changed based on width
cropped_image_list = []
starting_width = new_start_w
for i in range(1,9): # as date has only 8 fields: DD/MM/YYYY
cropped_img = image[new_start_h:new_end_h, new_start_w + 1 :new_start_w+22]
new_start_w = starting_width + (i*cell_width)
cropped_img = cv2.resize(cropped_img, (28, 28))
image_name = 'cropped_date/cropped_'+ str(i) + '.png'
cv2.imwrite(image_name, cropped_img)
cropped_image_list.append(image_name)
# print('cropped_image_list : ',cropped_image_list,len(cropped_image_list))
# rec_value = handwritten_digit_recog.recog_digits(cropped_image_list)
recvd_value = custom_predict.predict_digit(cropped_image_list)
# print('recvd val : ',recvd_value)
return recvd_value
you need to specify each cell width and it's x,y,w,h.
I think this will help you.

Crop Binary Image on Object Height and Width

I have this image:
character
I have successfully isolated the character, and now I want to crop the image to bounds of the binary object. Here is the cleaned up image that I want to crop: edited
I can't seem to figure out the code. Here is what I've tried:
inv = cv2.bitwise_not(img)
mask = np.zeros(img.shape, np.uint8)
_, cnts, hier = cv2.findContours(img.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for c in cnts:
if cv2.contourArea(c) > 100:
cv2.drawContours(mask, [c], 0, 255, -1)
x, y, h, w = cv2.boundingRect(c)
roi = mask[y:y + h, x:x + w]
crop = img[y:y + h, x:x + w]
final = crop * (roi / 255)
You can simply use max to find the largest contour and cv2.boundingRect to find the coordinates of the largest contour after which you can crop it.
c = max(contours, key = cv2.contourArea)
x,y,w,h = cv2.boundingRect(c)
It would be better to use cv2.RETR_EXTERNAL instead of cv2.RETR_LIST while using cv2.findContours as in this case as it you are looking for only the boundry of the object and not internal details.
if you want to crop an image with the points (x1,y1) and (x2,y2)
(x1,y1)+-------+
| |
| |
+-------+(x2,y2)
## mask is a numpy array (an image)
croped_image = mask[y1:y2, x1:x2] ## this will return a cropped image
but for your question you want the bounding rectangle to crop the binary image i guess
see : https://docs.opencv.org/3.1.0/dd/d49/tutorial_py_contour_features.html
_, contours, _ = cv2.findContours( mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE )
contour = max(contours, key = cv2.contourArea)
x, y, w, h = cv2.boundingRect(contour)
x1, y1, x2, y2 = x, y, x+w, y+h
now use these points to crop the image

Reading a Meter using OpenCV

I am trying to read values on my electricity meter LCD display using opencv, From my picture I am able to find meter using HoughCircles method, I am able to find LCD display on meter using contours, the lcd display isn't so clear so again I search for contours to extract digits from display. Now I am unable to read values on the display using tesseract or ssocr, how can i read the values on LCD display. I just started using opencv (Beginner), don't know the right way to go from here and if my approach is correct, would appreciate any help. Below is my code snippet and the meter images links are in comments.
def process_image(path, index):
img = cv2.imread(path)
img = cv2.resize(img,(0,0),fx=2.0,fy=2.0)
height, width, depth = img.shape
print("\n---------------------------------------------\n")
print("In Process Image Path is %s height is %d Width is %d depth is %d" %(path, height, width, depth))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.medianBlur(gray, 15)
circles = cv2.HoughCircles(blur, cv2.HOUGH_GRADIENT,1.2,100)
# ensure at least one circles is found, which is our meter
if circles is not None:
circles = np.uint16(np.around(circles))
print("Meter Found")
for i in circles[0]:
CenterX = i[0]
CenterY = i[1]
Radius = i[2]
circle_img = np.zeros((height, width), np.uint8)
cv2.circle(circle_img, (CenterX, CenterY), Radius, 1, thickness=-1)
masked_data = cv2.bitwise_and(img, img, mask=circle_img)
output = masked_data.copy()
cv2.circle(output, (i[0], i[1]), i[2], (0, 255, 0), 2)
cv2.circle(output, (i[0], i[1]), 2, (0, 0, 255), 3)
cv2.imwrite("output_" + str(index) + ".jpg", output)
break
gray = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray,(5,5),1)
edged = cv2.Canny(blurred, 5,10,200)
cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
displayCnt = None
contour_list = []
# loop over the contours
for c in cnts:
# approximate the contour
peri = 0.02 * cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c,peri, True)
# if the contour has four vertices, then we have found
# the meter display
if len(approx) == 4:
contour_list.append(c)
cv2.contourArea(c)
displayCnt = approx
break
warped = four_point_transform(gray, displayCnt.reshape(4, 2))
output = four_point_transform(output, displayCnt.reshape(4, 2))
thresh = cv2.adaptiveThreshold(warped, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY, 31, 2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
digitCnts = []
# loop over the digit area candidates
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# if the contour is sufficiently large, it must be a digit
if (w > 5 and w < 100) and (h >= 15 and h <= 150) :
digitCnts.append(c)
# sort the contours from left-to-right, then initialize the
# actual digits themselves
digitCnts = contours.sort_contours(digitCnts,method="left-to-right")[0]
mask = np.zeros(thresh.shape, np.uint8)
cv2.drawContours(mask, digitCnts, -80, (255, 255, 255),-1)
mask = cv2.bitwise_not(mask)
mask = cv2.resize(mask, (0, 0), fx=2.0, fy=2.0)
result = os.popen('/usr/local/bin/ssocr --number-digits=-1 -t 10 Mask.jpg')
output = result.read()
print("Output is " + output)
output = output[2:8]
return str(round(float(output) * 0.1, 1))
else:
print("Circle not Found")
print("\n---------------------------------------------\n")
return None

Resources