Python ROI not being croped properly from contour - python-3.x

Hello everybody before you close this question, i have already searched here, here too and also here
I am using python code to detect the Leaf in an image, using contours finding and then find out the largest contour, the part works best, but then i want only the leaf part of the image and skip the rest of the image to avoid unnecessary content in the resultant output, some of the methods in the link suggests bounding box but this still includes extra content in the image as the shape is not rectangular it's irregular, sample is attached
The code is following
import cv2
import numpy as np
img = cv2.imread("blob.jpg", -1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 101, 3)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
blob = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9,9))
blob = 255 - cv2.morphologyEx(blob, cv2.MORPH_CLOSE, kernel)
cnts = cv2.findContours(blob, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(cnts) == 2:
cnts = cnts[0]
else:
cnts = cnts[1]
big_contour = max(cnts, key = cv2.contourArea)
blob_area_thresh = 1000
blob_area = cv2.contourArea(big_contour)
if blob_area < blob_area_thresh:
print("Leaf is Too Small")
else:
#problem starts from here . i tested big_contour is just perfect by drawing on actual image
mask = np.zeros_like(img)
cv2.drawContours(mask, big_contour, -1, 255, -1)
out = np.zeros_like(img)
out[mask == 255] = img[mask == 255]
cv2.imwrite('output.jpg', out)
Now the problem is i am getting the resultant image as black nothing cropped all black pixels

There is a problem with your contour because it is not circulating the leaf as the end of the leaf on the right is out of the image.
You can see this when I try to fill the contour to create a mask using
cv2.fillPoly(mask, pts =[big_contour], color=(255,255,255))
it doesn't fill the leaf.
However I tried something although not perfect and has some background left but it crops the leaf to some extend.
import cv2
import numpy as np
img = cv2.imread("96Klg.jpg", -1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 101, 3)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
blob = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
#kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9,9))
#blob = 255 - cv2.morphologyEx(blob, cv2.MORPH_CLOSE, kernel)
cnts = cv2.findContours(blob, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(cnts) == 2:
cnts = cnts[0]
else:
cnts = cnts[1]
big_contour = max(cnts, key = cv2.contourArea)
blob_area_thresh = 1000
blob_area = cv2.contourArea(big_contour)
if blob_area < blob_area_thresh:
print("Leaf is Too Small")
else:
#problem starts from here . i tested big_contour is just perfect by drawing on actual image
mask = np.ones_like(img)
mask.fill(255)
cv2.fillPoly(mask, pts =[big_contour], color=(0,0,0))
#cv2.drawContours(mask, big_contour, -1, (255,255,255), 1)
out = np.zeros_like(img)
out[mask == 255] = img[mask == 255]
width = int(gray.shape[1] * 0.25)
height = int(gray.shape[0] * 0.25)
dim = (width, height)
# resize image
resized = cv2.resize(out, dim, interpolation = cv2.INTER_AREA)
resizedmask = cv2.resize(mask, dim, interpolation = cv2.INTER_AREA)
cv2.imshow('gray',resized)
cv2.imshow('out',resizedmask)
Output

Related

How to reduce noise in contours using Python OpenCV

I am trying to detect orange beats in below image
To detect these, I have first cropped the area from original image and then setting high and low hsv values to detect orange. This seems to be working fine. Below is the detected image:
Below is the code:
import cv2
import numpy as np
win_name = "Image"
cv2.namedWindow(win_name)
img = cv2.imread('image.png')
orangeImg = img[420:510, 457:953]
hsv = cv2.cvtColor(orangeImg, cv2.COLOR_BGR2HSV)
lower_bound = np.array([0, 80, 80])
upper_bound = np.array([20, 255, 255])
origMask = cv2.inRange(hsv, lower_bound, upper_bound)
contours, hierarchy = cv2.findContours(origMask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for _, c in enumerate(contours):
areas = [cv2.contourArea(c) for c in contours]
for area in areas:
if area >= 20.0:
boundRect = cv2.boundingRect(c)
rectX = boundRect[0]
rectY = boundRect[1]
rectWidth = boundRect[2]
rectHeight = boundRect[3]
color = (0, 0, 255)
cv2.rectangle(orangeImg, (int(rectX), int(rectY)), (int(rectX + rectWidth), int(rectY + rectHeight)), color, 2)
cv2.imshow(win_name, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
In the output image, you can notice that it still has some noise around the bbox created. Is there a better way to reduce the noise in it. Also is there a way to count the detected contours in the image?

Opencv detect side column and text

I am working on a opencv project, where I need to detect names column and any black color border present around the ROI. I am quite new with image processing so unable to figure out how to do this.
This is one of the sample images from which I wish to remove the column on the right (one containing all the details). But not all images contain this column, so I wish to detect the column and remove it from the image.
Here is the expected output.
EDIT
Here is the code that I have tried (I have tried using detection of largest rectangles in the region):
import cv2
from cv2 import dilate
from cv2 import findContours
import imutils
import numpy as np
image_name = 'test2.jpg'
og_plan = cv2.imread('test_images/{}'.format(image_name))
res = og_plan.copy()
img_height, img_width, img_channel = og_plan.shape
img_area = img_width * img_height
if og_plan.shape[0] > 800:
res = imutils.resize(res, height=720)
img_height, img_width, img_channel = res.shape
img_area = img_width * img_height
print(res.shape)
print(img_area)
hsv_plan = cv2.cvtColor(res, cv2.COLOR_BGR2HSV)
grey_plan = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
blue_min = np.array([14,100,76])
blue_max = np.array([130,255,255])
bluemask = cv2.inRange(hsv_plan,blue_min,blue_max)
blue_output = cv2.bitwise_and(hsv_plan, hsv_plan, mask=bluemask)
grey_mask = cv2.cvtColor(blue_output, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(grey_mask, 100, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
ret2, thresh2 = cv2.threshold(grey_plan, 160, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
kernel = np.ones((3,3), np.uint8)
dil = dilate(thresh, kernel, iterations=2)
dil_grey = dilate(thresh2, kernel, iterations=2)
cont,hier = findContours(dil, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cont1,hier1 = findContours(dil_grey, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
def max_rect(cntrs):
ar = {}
for cnt in cntrs:
x,y,w,h = cv2.boundingRect(cnt)
area = w*h
ar[area] = (x,y,w,h)
# ar = sorted(ar, key=ar.keys, reverse=True)
return ar
area_dict = max_rect(cont1)
roi_area = []
for area in area_dict:
if area >= img_area*0.1 and area < img_area:
print(area)
roi_area.append(area)
plan_no = 1
for a in roi_area:
plan = area_dict[a]
# del area_dict[a]
x,y,w,h = plan
roi = res[y:y+h, x:x+w]
print(plan)
cv2.rectangle(res, (x-5,y-5), (x+w+5, y+h+5), (255,255,0), 2)
cv2.imshow('ROI-{}'.format(image_name),roi)
cv2.imwrite('./result/{}_plan-{}.png'.format(image_name,plan_no),roi)
cv2.waitKey(0)
plan_no += 1
'''plan1 = area_dict[max(area_dict)]
del area_dict[max(area_dict)]
plan2 = area_dict[max(area_dict)]
x,y,w,h = plan1
x1,y1,w1,h1 = plan2
roi1 = res[y:y+h, x:x+w]
roi2 = res[y1:y1+h1, x1:x1+w1]
print(plan1, plan2)
cv2.rectangle(res, (x-5,y-5), (x+w+5, y+h+5), (255,255,0), 2)
cv2.rectangle(res, (x1-5,y1-5), (x1+w1+5, y1+h1+20), (255,255,0), 2)'''

Extracting contours from image to local folder

I wanted to extract contours from image to local folder(image contains circle shaped contours in a tray)
I have done thresholding and drawing of contours.but not able to extract detected contours into a folder because they are very close in try (like a basket having a number of balls)
example image: https://www.shutterstock.com/image-photo/donut-balls-on-metal-tray-white-1307503597?src=xfk0YUuijFGMUXsPIcYEPA-1-49
count = 0
for file in sorted(glob.glob(image_path)):
img = cv2.imread(file)
#img = imutils.resize(img, width=500)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 127, 255, 0)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnts = imutils.grab_contours(cnts)
seg_c = 0
for c in cnts:
seg_c = seg_c + 1
x,y,w,h = cv2.boundingRect(c)
roi=img[y:y+h,x:x+w]
cv2.imwrite(cropped_path + '/frame%d_crop.jpg', roi)
# cv2.rectangle(img,(x,y),(x+w,y+h),(200,0,0),2)
count += 1
I expected all the detected circle shaped contours extracted to the folder

How do I crop the solar panels captured by drone?

I am currently working on solar panel cropping from the images taken by the drone(attaching sample image). I have tried using contours but there wasn't a proper outcome. It was not detecting all solar panels in the image some of them were missing. I struck here itself. How do I proceed further? Please help me with this problem.
Thank you,
Sample Code:
import cv2
import numpy as np
img = cv2.imread('D:\\SolarPanel Images\\solarpanel.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
edges = cv2.Canny(blur,100,200)
th3 = cv2.adaptiveThreshold(edges,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
im2, contours, hierarchy = cv2.findContours(th3, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
print("Len of contours",len(contours)
try: hierarchy = hierarchy[0]
except: hierarchy = []
height, width, = edges.shape
min_x, min_y = width, height
max_x = max_y = 0
# computes the bounding box for the contour, and draws it on the image,
for contour, hier in zip(contours, hierarchy):
area = cv2.contourArea(contour)
if area > 10000 and area < 250000:
(x,y,w,h) = cv2.boundingRect(contour)
min_x, max_x = min(x, min_x), max(x+w, max_x)
min_y, max_y = min(y, min_y), max(y+h, max_y)
if w > 80 and h > 80:
cv2.rectangle(img, (x,y), (x+w,y+h), (255, 0, 0), 2)
cv2.imshow('cont imge', img)
cv2.waitKey(0)
To find contours in images where the object of importance is clearly distinguishable from the background, you can always try converting the image to HSV format and then contour. I did the following:
import cv2
import numpy as np
img = cv2.imread('panel.jpg')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
ret,thresh1 = cv2.threshold(hsv[:,:,0],100,255,cv2.THRESH_BINARY)
im2, contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
try: hierarchy = hierarchy[0]
except: hierarchy = []
for contour, hier in zip(contours, hierarchy):
area = cv2.contourArea(contour)
if area > 10000 and area < 250000:
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(img,[box],0,(0,0,255),2)
cv2.imshow('cont imge', img)
cv2.waitKey(0)
cv2.imwrite("result.jpg",img)
Result:

Reading a Meter using OpenCV

I am trying to read values on my electricity meter LCD display using opencv, From my picture I am able to find meter using HoughCircles method, I am able to find LCD display on meter using contours, the lcd display isn't so clear so again I search for contours to extract digits from display. Now I am unable to read values on the display using tesseract or ssocr, how can i read the values on LCD display. I just started using opencv (Beginner), don't know the right way to go from here and if my approach is correct, would appreciate any help. Below is my code snippet and the meter images links are in comments.
def process_image(path, index):
img = cv2.imread(path)
img = cv2.resize(img,(0,0),fx=2.0,fy=2.0)
height, width, depth = img.shape
print("\n---------------------------------------------\n")
print("In Process Image Path is %s height is %d Width is %d depth is %d" %(path, height, width, depth))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.medianBlur(gray, 15)
circles = cv2.HoughCircles(blur, cv2.HOUGH_GRADIENT,1.2,100)
# ensure at least one circles is found, which is our meter
if circles is not None:
circles = np.uint16(np.around(circles))
print("Meter Found")
for i in circles[0]:
CenterX = i[0]
CenterY = i[1]
Radius = i[2]
circle_img = np.zeros((height, width), np.uint8)
cv2.circle(circle_img, (CenterX, CenterY), Radius, 1, thickness=-1)
masked_data = cv2.bitwise_and(img, img, mask=circle_img)
output = masked_data.copy()
cv2.circle(output, (i[0], i[1]), i[2], (0, 255, 0), 2)
cv2.circle(output, (i[0], i[1]), 2, (0, 0, 255), 3)
cv2.imwrite("output_" + str(index) + ".jpg", output)
break
gray = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray,(5,5),1)
edged = cv2.Canny(blurred, 5,10,200)
cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
displayCnt = None
contour_list = []
# loop over the contours
for c in cnts:
# approximate the contour
peri = 0.02 * cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c,peri, True)
# if the contour has four vertices, then we have found
# the meter display
if len(approx) == 4:
contour_list.append(c)
cv2.contourArea(c)
displayCnt = approx
break
warped = four_point_transform(gray, displayCnt.reshape(4, 2))
output = four_point_transform(output, displayCnt.reshape(4, 2))
thresh = cv2.adaptiveThreshold(warped, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY, 31, 2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
digitCnts = []
# loop over the digit area candidates
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# if the contour is sufficiently large, it must be a digit
if (w > 5 and w < 100) and (h >= 15 and h <= 150) :
digitCnts.append(c)
# sort the contours from left-to-right, then initialize the
# actual digits themselves
digitCnts = contours.sort_contours(digitCnts,method="left-to-right")[0]
mask = np.zeros(thresh.shape, np.uint8)
cv2.drawContours(mask, digitCnts, -80, (255, 255, 255),-1)
mask = cv2.bitwise_not(mask)
mask = cv2.resize(mask, (0, 0), fx=2.0, fy=2.0)
result = os.popen('/usr/local/bin/ssocr --number-digits=-1 -t 10 Mask.jpg')
output = result.read()
print("Output is " + output)
output = output[2:8]
return str(round(float(output) * 0.1, 1))
else:
print("Circle not Found")
print("\n---------------------------------------------\n")
return None

Resources