Related
As you can see in the image, the size of the image is 2000*2000. We have to find the distance of the image from all the 4 coordinates. I am trying with opencv contours but on some other image it is not working.
image = cv2.imread(image_full_path)
ori_h, ori_w, _ = image.shape
print("Original height, width-->", ori_h, ori_w)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # grayscale
_, thresh = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY_INV) # threshold
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
dilated = cv2.dilate(thresh, kernel, iterations=13) # dilate
contours, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) # get contours
idx = 0
# for each contour found, draw a rectangle around it on original image
# print('length', len(contours))
for contour in contours:
idx += 1
# get rectangle bounding contour
[x, y, w, h] = cv2.boundingRect(contour)
remaining_height_bottom = ori_h - (y + h)
remaining_width_right_side = ori_w - (x + w)
print("Upper Height, Right Width-->", y, x)
print("Bottom Height, Left Width-->", remaining_height_bottom, remaining_width_right_side)
print("Image Height, Image Width-->", h, w)
Thanks in advance.If anyone thinks that the way of asking questions is wrong please ignore. I really need help.
Please find the code below it working for me
img = cv2.imread(file_full_path) # Read in the image and convert to grayscale
ori_h, ori_w, _ = img.shape
scrollbarright = 20
img = img[:, :-scrollbarright]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = 255*(gray < 128).astype(np.uint8)
coords = cv2.findNonZero(gray)
x, y, w, h = cv2.boundingRect(coords)
print("I am ",x,y,w,h)
remaining_height_bottom = ori_h - (y + h)
remaining_width_right_side = ori_w - (x + w)
print("Upper Height, Right Width-->", y, x)
print("Bottom Height, Left Width-->", remaining_height_bottom, remaining_width_right_side)
print("Image Height, Image Width-->", h, w)
print("Original Image Height,Original Image Width-->", ori_h, ori_w)
I am detecting text from various passport images with OpenCV. The task is to get the cropped text present on passports like Name, DOB, Nationality, etc. The current code is given below:
image = cv2.imread(image) # read image
image = imutils.resize(image, height=600)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (3, 3), 0)
rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (13, 5))
sqKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (21, 21))
blackhat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT, rectKernel)
gradX = cv2.Sobel(blackhat, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)
gradX = np.absolute(gradX)
(minVal, maxVal) = (np.min(gradX), np.max(gradX))
gradX = (255 * ((gradX - minVal) / (maxVal - minVal))).astype("uint8")
gradX = cv2.morphologyEx(gradX, cv2.MORPH_CLOSE, rectKernel)
thresh = cv2.threshold(gradX, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
if do_erosion: # do erosion if flag is True only
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, sqKernel)
thresh = cv2.erode(thresh, None, iterations=4)
p = int(image.shape[1] * 0.05)
thresh[:, 0:p] = 0
thresh[:, image.shape[1] - p:] = 0 # thresholded image shown below
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE) # finding contours from threshold image
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
rois=[]
# loop over the contours
for c in cnts:
# compute the bounding box of the contour and use the contour to
# compute the aspect ratio and coverage ratio of the bounding box
# width to the width of the image
(x, y, w, h) = cv2.boundingRect(c)
ar = w / float(h)
crWidth = w / float(gray.shape[1])
# check to see if the aspect ratio and coverage width are within
# acceptable criteria
if ar > 2 and crWidth > 0.05:
# pad the bounding box since we applied erosions and now need
# to re-grow it
pX = int((x + w) * 0.03)
pY = int((y + h) * 0.03)
(x, y) = (x - pX, y - pY)
(w, h) = (w + (pX * 2), h + (pY * 2))
# extract the ROI from the image and draw a bounding box
# surrounding the MRZ
roi = original[y:y + h, x:x + w].copy() # interested in finding all ROIs having text
rois.append(roi)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
I am detecting some textual values correctly but the solution is not generic. It is sometimes extracting only the year of DOB but not the full date (sometimes it detects month and day as well but not in the same ROI). The original and threshold image is shown below:
As you can see, in the "ROIs found image" some ROIs are not being detected in the same image like DOB and date of expiry. Any help in extracting all the text regions from the passport will be highly appreciated. Thanks!
You need to tweak the Aspect Ratio and Coverage ratio thresholds to obtain all the desired bounding boxes.
When I run your code, for 05, the value of ar is 1.541 and value of crWidth is 0.03. Since these values are less than the thresholds you have specified, they are getting filtered out. This is why some of the words do not have bounding boxes in the final image.
But, since you want to get the entire DOB in a single bounding box, just before the line thresh[:, 0:p] = 0, you can apply a dilation operation:
thresh = cv2.morphologyEx(thresh, cv2.MORPH_DILATE, np.ones((1, 30), np.uint8)).
This will dilate the pixels and combine blobs that are nearby horizontally. The resultant image after preprocessing is as follows -
The final image -
I'm constructing a program to extract text from a pdf, put it in a structured format, and send it off to a database. I have roughly 1,400 individual pdfs that all follow a similar format, but nuances in the verbiage and plan designs that the documents summarize make it tricky.
I've played around with a couple different pdf readers in python including tabula-py and pdfminer but none of them are quite getting to what I'd like to do. Tabula reads in all of the text very well, however it pulls everything as it explicitly lays horizontally, excluding the fact that some of the text is wrapped in a box. For example, if you open up the sample SBC I have attached where it reads "What is the overall deductible?" Tabula will read in "What is the overall $500/Individual or..." skipping the fact that the word "deductible" is really part of the first sentence. (Note the files I'm working with are pdfs but I've attached a jpeg because I couldn't figure out how to attach a pdf.)
import tabula
df = tabula.read_pdf(*filepath*, pandas_options={'header': None))
print(df.iloc[0][0])
print(df)
In the end, I'd really like to be able to parse out the text within each box so that I can better identify what values belong to deductible, out-of-pocket limts, copays/coinsurance, etc. I thought possibly some sort of OCR would allow me to recognize which parts of the PDF are contained in the blue rectangles and then pull the string from there, but I really don't know where to start with that.Sample SBC
#jpnadas In this case the code you copied from my answer in this post isn't really suitable because it addresses the case when a table doesn't have surrounding grid. That algorithm looks for repeating blocks of texts and tries to find a pattern that resembles a table heuristically.
But in this particular case the table does have the grid and by taking this advantage we can achieve a lot more accurate result.
The strategy is the following:
Increase image gamma to make the grid darker
Get rid of colour and apply Otsu thresholding
Find long vertical an horizontal lines in the image and create a mask from it using erode and dilate functions
Find the cell blocks in the mask using findContours function.
Find table objects
5.1 The rest can be as in the post about finding a table without the
grid: find table structure heuristically
5.2 Alternative approach could be using hierarchy returned by the findContours function. This approach is even more accurate and
allows to find multiple tables on a single image.
Having cell coordinates it's easy to extract certain cell image from the original image:
cell_image = image[cell_y:cell_y + cell_h, cell_x:cell_x + cell_w]
Apply OCR to each cell_image.
BUT! I consider the OpenCV approach as a last resort when you're not able to read the PDF's contents: for instance in case when a PDF contains raster image inside.
If it's a vector-based PDF and its contents are readable it makes more sense to find the table inside contents and just read the text from it instead of doing heavy 'OCR lifting'.
Here's the code for reference for more accurate table recognition:
import os
import imutils
import numpy as np
import argparse
import cv2
def gamma_correction(image, gamma = 1.0):
look_up_table = np.empty((1,256), np.uint8)
for i in range(256):
look_up_table[0,i] = np.clip(pow(i / 255.0, gamma) * 255.0, 0, 255)
result = cv2.LUT(image, look_up_table)
return result
def pre_process_image(image):
# Let's get rid of color first
# Applying gamma to make the table lines darker
gamma = gamma_correction(image, 2)
# Getting rid of color
gray = cv2.cvtColor(gamma, cv2.COLOR_BGR2GRAY)
# Then apply Otsu threshold to reveal important areas
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# inverting the thresholded image
return ~thresh
def get_horizontal_lines_mask(image, horizontal_size=100):
horizontal = image.copy()
horizontal_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (horizontal_size, 1))
horizontal = cv2.erode(horizontal, horizontal_structure, anchor=(-1, -1), iterations=1)
horizontal = cv2.dilate(horizontal, horizontal_structure, anchor=(-1, -1), iterations=1)
return horizontal
def get_vertical_lines_mask(image, vertical_size=100):
vertical = image.copy()
vertical_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (1, vertical_size))
vertical = cv2.erode(vertical, vertical_structure, anchor=(-1, -1), iterations=1)
vertical = cv2.dilate(vertical, vertical_structure, anchor=(-1, -1), iterations=1)
return vertical
def make_lines_mask(preprocessed, min_horizontal_line_size=100, min_vertical_line_size=100):
hor = get_horizontal_lines_mask(preprocessed, min_horizontal_line_size)
ver = get_vertical_lines_mask(preprocessed, min_vertical_line_size)
mask = np.zeros((preprocessed.shape[0], preprocessed.shape[1], 1), dtype=np.uint8)
mask = cv2.bitwise_or(mask, hor)
mask = cv2.bitwise_or(mask, ver)
return ~mask
def find_cell_boxes(mask):
# Looking for the text spots contours
# OpenCV 3
# img, contours, hierarchy = cv2.findContours(pre, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# OpenCV 4
contours = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
image_width = mask.shape[1]
# Getting the texts bounding boxes based on the text size assumptions
boxes = []
for contour in contours:
box = cv2.boundingRect(contour)
w = box[2]
# Excluding the page box shape but adding smaller boxes
if w < 0.95 * image_width:
boxes.append(box)
return boxes
def find_table_in_boxes(boxes, cell_threshold=10, min_columns=2):
rows = {}
cols = {}
# Clustering the bounding boxes by their positions
for box in boxes:
(x, y, w, h) = box
col_key = x // cell_threshold
row_key = y // cell_threshold
cols[row_key] = [box] if col_key not in cols else cols[col_key] + [box]
rows[row_key] = [box] if row_key not in rows else rows[row_key] + [box]
# Filtering out the clusters having less than 2 cols
table_cells = list(filter(lambda r: len(r) >= min_columns, rows.values()))
# Sorting the row cells by x coord
table_cells = [list(sorted(tb)) for tb in table_cells]
# Sorting rows by the y coord
table_cells = list(sorted(table_cells, key=lambda r: r[0][1]))
return table_cells
def build_vertical_lines(table_cells):
if table_cells is None or len(table_cells) <= 0:
return [], []
max_last_col_width_row = max(table_cells, key=lambda b: b[-1][2])
max_x = max_last_col_width_row[-1][0] + max_last_col_width_row[-1][2]
max_last_row_height_box = max(table_cells[-1], key=lambda b: b[3])
max_y = max_last_row_height_box[1] + max_last_row_height_box[3]
hor_lines = []
ver_lines = []
for box in table_cells:
x = box[0][0]
y = box[0][1]
hor_lines.append((x, y, max_x, y))
for box in table_cells[0]:
x = box[0]
y = box[1]
ver_lines.append((x, y, x, max_y))
(x, y, w, h) = table_cells[0][-1]
ver_lines.append((max_x, y, max_x, max_y))
(x, y, w, h) = table_cells[0][0]
hor_lines.append((x, max_y, max_x, max_y))
return hor_lines, ver_lines
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path to images directory")
args = vars(ap.parse_args())
in_file = args["image"]
filename_base = in_file.replace(os.path.splitext(in_file)[1], "")
img = cv2.imread(in_file)
pre_processed = pre_process_image(img)
# Visualizing pre-processed image
cv2.imwrite(filename_base + ".pre.png", pre_processed)
lines_mask = make_lines_mask(pre_processed, min_horizontal_line_size=1800, min_vertical_line_size=500)
# Visualizing table lines mask
cv2.imwrite(filename_base + ".mask.png", lines_mask)
cell_boxes = find_cell_boxes(lines_mask)
cells = find_table_in_boxes(cell_boxes)
# apply OCR to each cell rect here
# the cells array contains cell coordinates in tuples (x, y, w, h)
hor_lines, ver_lines = build_vertical_lines(cells)
# Visualize the table lines
vis = img.copy()
for line in hor_lines:
[x1, y1, x2, y2] = line
cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255), 1)
for line in ver_lines:
[x1, y1, x2, y2] = line
cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255), 1)
cv2.imwrite(filename_base + ".result.png", vis)
Some parameters are hard-coded:
page size threshold - 0.95
min horizontal line size - 1800 px
min vertical line size - 500 px
You can provide them as configurable parameters or make them relative to image size.
Results:
I think that the best way to do what you need is to find and isolate the cells in the file and then apply OCR to each individual cell.
There are a number of solutions in SO for that, I got the code from this answer and played around a little with the parameters to get the output below (not perfect yet, but you can tweak it a little bit yourself).
import os
import cv2
import imutils
# This only works if there's only one table on a page
# Important parameters:
# - morph_size
# - min_text_height_limit
# - max_text_height_limit
# - cell_threshold
# - min_columns
def pre_process_image(img, save_in_file, morph_size=(23, 23)):
# get rid of the color
pre = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Otsu threshold
pre = cv2.threshold(pre, 250, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
# dilate the text to make it solid spot
cpy = pre.copy()
struct = cv2.getStructuringElement(cv2.MORPH_RECT, morph_size)
cpy = cv2.dilate(~cpy, struct, anchor=(-1, -1), iterations=1)
pre = ~cpy
if save_in_file is not None:
cv2.imwrite(save_in_file, pre)
return pre
def find_text_boxes(pre, min_text_height_limit=20, max_text_height_limit=120):
# Looking for the text spots contours
contours, _ = cv2.findContours(pre, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# Getting the texts bounding boxes based on the text size assumptions
boxes = []
for contour in contours:
box = cv2.boundingRect(contour)
h = box[3]
if min_text_height_limit < h < max_text_height_limit:
boxes.append(box)
return boxes
def find_table_in_boxes(boxes, cell_threshold=100, min_columns=3):
rows = {}
cols = {}
# Clustering the bounding boxes by their positions
for box in boxes:
(x, y, w, h) = box
col_key = x // cell_threshold
row_key = y // cell_threshold
cols[row_key] = [box] if col_key not in cols else cols[col_key] + [box]
rows[row_key] = [box] if row_key not in rows else rows[row_key] + [box]
# Filtering out the clusters having less than 2 cols
table_cells = list(filter(lambda r: len(r) >= min_columns, rows.values()))
# Sorting the row cells by x coord
table_cells = [list(sorted(tb)) for tb in table_cells]
# Sorting rows by the y coord
table_cells = list(sorted(table_cells, key=lambda r: r[0][1]))
return table_cells
def build_lines(table_cells):
if table_cells is None or len(table_cells) <= 0:
return [], []
max_last_col_width_row = max(table_cells, key=lambda b: b[-1][2])
max_x = max_last_col_width_row[-1][0] + max_last_col_width_row[-1][2]
max_last_row_height_box = max(table_cells[-1], key=lambda b: b[3])
max_y = max_last_row_height_box[1] + max_last_row_height_box[3]
hor_lines = []
ver_lines = []
for box in table_cells:
x = box[0][0]
y = box[0][1]
hor_lines.append((x, y, max_x, y))
for box in table_cells[0]:
x = box[0]
y = box[1]
ver_lines.append((x, y, x, max_y))
(x, y, w, h) = table_cells[0][-1]
ver_lines.append((max_x, y, max_x, max_y))
(x, y, w, h) = table_cells[0][0]
hor_lines.append((x, max_y, max_x, max_y))
return hor_lines, ver_lines
if __name__ == "__main__":
in_file = os.path.join(".", "test.jpg")
pre_file = os.path.join(".", "pre.png")
out_file = os.path.join(".", "out.png")
img = cv2.imread(os.path.join(in_file))
pre_processed = pre_process_image(img, pre_file)
text_boxes = find_text_boxes(pre_processed)
cells = find_table_in_boxes(text_boxes)
hor_lines, ver_lines = build_lines(cells)
# Visualize the result
vis = img.copy()
# for box in text_boxes:
# (x, y, w, h) = box
# cv2.rectangle(vis, (x, y), (x + w - 2, y + h - 2), (0, 255, 0), 1)
for line in hor_lines:
[x1, y1, x2, y2] = line
cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255), 1)
for line in ver_lines:
[x1, y1, x2, y2] = line
cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255), 1)
cv2.imwrite(out_file, vis)
What I want to do is draw a grid on an image that has smaller squares at the top and larger ones near the bottom. I am lost on how to do this though. I figured out how to draw a grid on the image using open cv but don't know how to fine turn it to get the results I want.
What I will be using this for is to find where a bounding box point is for a detected object. I will need a smaller bounding box at the top and a larger one closer to the bottom of the image.
This is what I want to do:
But I cannot figure out how to get the curves in the code.
This is what I have so far:
And this the is code that I am using.
'''
#This method draws simple grid overthe image based on the passed step
#The pxstep controls the size of the grid
'''
def drawBasicGrid(image, pxstep, midX, midY):
x = pxstep
y = pxstep
#Draw all x lines
while x < img.shape[1]:
cv2.line(img, (x, 0), (x, img.shape[0]), color=(255, 0, 255), thickness=1)
x += pxstep
while y < img.shape[0]:
cv2.line(img, (0, y), (img.shape[1], y), color=(255, 0, 255),thickness=1)
y += pxstep
This draws the basic grid.
and this creates thebounding boxes that I use for detection of the bounding points of a detected object.
def makeBoundingBox(h,w, step):
#BBox is 100*100 square or step which is defied
y = 0
bbox = []
while y < h:
#print("Y value", y)
x=0
while x < w:
#print("X Value", x)
bbox.append([(x,y), (x+step, y+step)])
x += step
y += step
return bbox
And this is how I am using it.
#Prepare Images
img = cv2.imread(image)
#img = imutils.resize(img, width=300)
#gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
(H,W) = img.shape[:2]
#Create bounding boxes for the image
'''
#Creates the boxes for people detection
#It will look for which bounding boxes the person is in and then
#Draw a box around their feet
'''
#People Detection
blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)),0.007843,(300, 300), 127.5)
net.setInput(blob)
detections = net.forward()
'''
#Get the center point of the image
'''
midY = H//2
midX = W//2
print("Middle Y pixel", midY)
#Draw center line
cv2.line(img, (0, midY), (W, midY), color=green, thickness=2)
cv2.line(img, (midX, 0), (midX, H), color=green, thickness=2)
#Visual grid drawn
drawBasicGrid(img, step, midX,midY)
bbox = makeBoundingBox(H, W, step) #Used for finding where the endx and startx BBOX points are
Any help on this will be appreciated.
I am trying to read values on my electricity meter LCD display using opencv, From my picture I am able to find meter using HoughCircles method, I am able to find LCD display on meter using contours, the lcd display isn't so clear so again I search for contours to extract digits from display. Now I am unable to read values on the display using tesseract or ssocr, how can i read the values on LCD display. I just started using opencv (Beginner), don't know the right way to go from here and if my approach is correct, would appreciate any help. Below is my code snippet and the meter images links are in comments.
def process_image(path, index):
img = cv2.imread(path)
img = cv2.resize(img,(0,0),fx=2.0,fy=2.0)
height, width, depth = img.shape
print("\n---------------------------------------------\n")
print("In Process Image Path is %s height is %d Width is %d depth is %d" %(path, height, width, depth))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.medianBlur(gray, 15)
circles = cv2.HoughCircles(blur, cv2.HOUGH_GRADIENT,1.2,100)
# ensure at least one circles is found, which is our meter
if circles is not None:
circles = np.uint16(np.around(circles))
print("Meter Found")
for i in circles[0]:
CenterX = i[0]
CenterY = i[1]
Radius = i[2]
circle_img = np.zeros((height, width), np.uint8)
cv2.circle(circle_img, (CenterX, CenterY), Radius, 1, thickness=-1)
masked_data = cv2.bitwise_and(img, img, mask=circle_img)
output = masked_data.copy()
cv2.circle(output, (i[0], i[1]), i[2], (0, 255, 0), 2)
cv2.circle(output, (i[0], i[1]), 2, (0, 0, 255), 3)
cv2.imwrite("output_" + str(index) + ".jpg", output)
break
gray = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray,(5,5),1)
edged = cv2.Canny(blurred, 5,10,200)
cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
displayCnt = None
contour_list = []
# loop over the contours
for c in cnts:
# approximate the contour
peri = 0.02 * cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c,peri, True)
# if the contour has four vertices, then we have found
# the meter display
if len(approx) == 4:
contour_list.append(c)
cv2.contourArea(c)
displayCnt = approx
break
warped = four_point_transform(gray, displayCnt.reshape(4, 2))
output = four_point_transform(output, displayCnt.reshape(4, 2))
thresh = cv2.adaptiveThreshold(warped, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY, 31, 2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
digitCnts = []
# loop over the digit area candidates
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# if the contour is sufficiently large, it must be a digit
if (w > 5 and w < 100) and (h >= 15 and h <= 150) :
digitCnts.append(c)
# sort the contours from left-to-right, then initialize the
# actual digits themselves
digitCnts = contours.sort_contours(digitCnts,method="left-to-right")[0]
mask = np.zeros(thresh.shape, np.uint8)
cv2.drawContours(mask, digitCnts, -80, (255, 255, 255),-1)
mask = cv2.bitwise_not(mask)
mask = cv2.resize(mask, (0, 0), fx=2.0, fy=2.0)
result = os.popen('/usr/local/bin/ssocr --number-digits=-1 -t 10 Mask.jpg')
output = result.read()
print("Output is " + output)
output = output[2:8]
return str(round(float(output) * 0.1, 1))
else:
print("Circle not Found")
print("\n---------------------------------------------\n")
return None