Extracting contours from image to local folder - python-3.x

I wanted to extract contours from image to local folder(image contains circle shaped contours in a tray)
I have done thresholding and drawing of contours.but not able to extract detected contours into a folder because they are very close in try (like a basket having a number of balls)
example image: https://www.shutterstock.com/image-photo/donut-balls-on-metal-tray-white-1307503597?src=xfk0YUuijFGMUXsPIcYEPA-1-49
count = 0
for file in sorted(glob.glob(image_path)):
img = cv2.imread(file)
#img = imutils.resize(img, width=500)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 127, 255, 0)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnts = imutils.grab_contours(cnts)
seg_c = 0
for c in cnts:
seg_c = seg_c + 1
x,y,w,h = cv2.boundingRect(c)
roi=img[y:y+h,x:x+w]
cv2.imwrite(cropped_path + '/frame%d_crop.jpg', roi)
# cv2.rectangle(img,(x,y),(x+w,y+h),(200,0,0),2)
count += 1
I expected all the detected circle shaped contours extracted to the folder

Related

How to reduce noise in contours using Python OpenCV

I am trying to detect orange beats in below image
To detect these, I have first cropped the area from original image and then setting high and low hsv values to detect orange. This seems to be working fine. Below is the detected image:
Below is the code:
import cv2
import numpy as np
win_name = "Image"
cv2.namedWindow(win_name)
img = cv2.imread('image.png')
orangeImg = img[420:510, 457:953]
hsv = cv2.cvtColor(orangeImg, cv2.COLOR_BGR2HSV)
lower_bound = np.array([0, 80, 80])
upper_bound = np.array([20, 255, 255])
origMask = cv2.inRange(hsv, lower_bound, upper_bound)
contours, hierarchy = cv2.findContours(origMask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for _, c in enumerate(contours):
areas = [cv2.contourArea(c) for c in contours]
for area in areas:
if area >= 20.0:
boundRect = cv2.boundingRect(c)
rectX = boundRect[0]
rectY = boundRect[1]
rectWidth = boundRect[2]
rectHeight = boundRect[3]
color = (0, 0, 255)
cv2.rectangle(orangeImg, (int(rectX), int(rectY)), (int(rectX + rectWidth), int(rectY + rectHeight)), color, 2)
cv2.imshow(win_name, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
In the output image, you can notice that it still has some noise around the bbox created. Is there a better way to reduce the noise in it. Also is there a way to count the detected contours in the image?

Python ROI not being croped properly from contour

Hello everybody before you close this question, i have already searched here, here too and also here
I am using python code to detect the Leaf in an image, using contours finding and then find out the largest contour, the part works best, but then i want only the leaf part of the image and skip the rest of the image to avoid unnecessary content in the resultant output, some of the methods in the link suggests bounding box but this still includes extra content in the image as the shape is not rectangular it's irregular, sample is attached
The code is following
import cv2
import numpy as np
img = cv2.imread("blob.jpg", -1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 101, 3)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
blob = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9,9))
blob = 255 - cv2.morphologyEx(blob, cv2.MORPH_CLOSE, kernel)
cnts = cv2.findContours(blob, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(cnts) == 2:
cnts = cnts[0]
else:
cnts = cnts[1]
big_contour = max(cnts, key = cv2.contourArea)
blob_area_thresh = 1000
blob_area = cv2.contourArea(big_contour)
if blob_area < blob_area_thresh:
print("Leaf is Too Small")
else:
#problem starts from here . i tested big_contour is just perfect by drawing on actual image
mask = np.zeros_like(img)
cv2.drawContours(mask, big_contour, -1, 255, -1)
out = np.zeros_like(img)
out[mask == 255] = img[mask == 255]
cv2.imwrite('output.jpg', out)
Now the problem is i am getting the resultant image as black nothing cropped all black pixels
There is a problem with your contour because it is not circulating the leaf as the end of the leaf on the right is out of the image.
You can see this when I try to fill the contour to create a mask using
cv2.fillPoly(mask, pts =[big_contour], color=(255,255,255))
it doesn't fill the leaf.
However I tried something although not perfect and has some background left but it crops the leaf to some extend.
import cv2
import numpy as np
img = cv2.imread("96Klg.jpg", -1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 101, 3)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
blob = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
#kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9,9))
#blob = 255 - cv2.morphologyEx(blob, cv2.MORPH_CLOSE, kernel)
cnts = cv2.findContours(blob, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(cnts) == 2:
cnts = cnts[0]
else:
cnts = cnts[1]
big_contour = max(cnts, key = cv2.contourArea)
blob_area_thresh = 1000
blob_area = cv2.contourArea(big_contour)
if blob_area < blob_area_thresh:
print("Leaf is Too Small")
else:
#problem starts from here . i tested big_contour is just perfect by drawing on actual image
mask = np.ones_like(img)
mask.fill(255)
cv2.fillPoly(mask, pts =[big_contour], color=(0,0,0))
#cv2.drawContours(mask, big_contour, -1, (255,255,255), 1)
out = np.zeros_like(img)
out[mask == 255] = img[mask == 255]
width = int(gray.shape[1] * 0.25)
height = int(gray.shape[0] * 0.25)
dim = (width, height)
# resize image
resized = cv2.resize(out, dim, interpolation = cv2.INTER_AREA)
resizedmask = cv2.resize(mask, dim, interpolation = cv2.INTER_AREA)
cv2.imshow('gray',resized)
cv2.imshow('out',resizedmask)
Output

cv2.error: OpenCV(4.0.0) /io/opencv/modules/imgproc/src/shapedescr.cpp:272

This is my python script:
while True:
text = ""
img = cam.read()[1]
img = cv2.flip(img, 1)
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([imgHSV], [0, 1], hist, [0, 180, 0, 256], 1)
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(10,10))
cv2.filter2D(dst,-1,disc,dst)
blur = cv2.GaussianBlur(dst, (11,11), 0)
blur = cv2.medianBlur(blur, 15)
thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
thresh = cv2.merge((thresh,thresh,thresh))
thresh = cv2.cvtColor(thresh, cv2.COLOR_BGR2GRAY)
thresh = thresh[y:y+h, x:x+w]
contours = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[1]
if len(contours) > 0:
contour = max(contours, key = cv2.contourArea)
if cv2.contourArea(contour) > 10000:
x1, y1, w1, h1 = cv2.boundingRect(contour)
save_img = thresh[y1:y1+h1, x1:x1+w1]
This code works properly on another system but while I run it in my system,
it shows the following error:
cv2.error: OpenCV(4.0.0) /io/opencv/modules/imgproc/src/shapedescr.cpp:272: error: (-215:Assertion failed) npoints >= 0 && (depth == CV_32F || depth == CV_32S) in function 'contourArea'
Which can be caused by the following script:
contour = max(contours, key = cv2.contourArea)
I am using ubuntu 18.02 and opencv 4.0...
This is as part of our project, please help.
This problem is occurring because cv2.findContours has changed from V3.X to V4.0 in opencv.
So in V3.X it used to be
findContours(image, mode, method[, contours[, hierarchy[, offset]]]) -> image, contours, hierarchy
Three objects returned.
and V4.0
findContours(image, mode, method[, contours[, hierarchy[, offset]]]) -> contours, hierarchy
Two objects returned.
So you code would be
contours = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[0]
if you intend to get contours.

How do I crop the solar panels captured by drone?

I am currently working on solar panel cropping from the images taken by the drone(attaching sample image). I have tried using contours but there wasn't a proper outcome. It was not detecting all solar panels in the image some of them were missing. I struck here itself. How do I proceed further? Please help me with this problem.
Thank you,
Sample Code:
import cv2
import numpy as np
img = cv2.imread('D:\\SolarPanel Images\\solarpanel.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
edges = cv2.Canny(blur,100,200)
th3 = cv2.adaptiveThreshold(edges,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
im2, contours, hierarchy = cv2.findContours(th3, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
print("Len of contours",len(contours)
try: hierarchy = hierarchy[0]
except: hierarchy = []
height, width, = edges.shape
min_x, min_y = width, height
max_x = max_y = 0
# computes the bounding box for the contour, and draws it on the image,
for contour, hier in zip(contours, hierarchy):
area = cv2.contourArea(contour)
if area > 10000 and area < 250000:
(x,y,w,h) = cv2.boundingRect(contour)
min_x, max_x = min(x, min_x), max(x+w, max_x)
min_y, max_y = min(y, min_y), max(y+h, max_y)
if w > 80 and h > 80:
cv2.rectangle(img, (x,y), (x+w,y+h), (255, 0, 0), 2)
cv2.imshow('cont imge', img)
cv2.waitKey(0)
To find contours in images where the object of importance is clearly distinguishable from the background, you can always try converting the image to HSV format and then contour. I did the following:
import cv2
import numpy as np
img = cv2.imread('panel.jpg')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
ret,thresh1 = cv2.threshold(hsv[:,:,0],100,255,cv2.THRESH_BINARY)
im2, contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
try: hierarchy = hierarchy[0]
except: hierarchy = []
for contour, hier in zip(contours, hierarchy):
area = cv2.contourArea(contour)
if area > 10000 and area < 250000:
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(img,[box],0,(0,0,255),2)
cv2.imshow('cont imge', img)
cv2.waitKey(0)
cv2.imwrite("result.jpg",img)
Result:

Reading a Meter using OpenCV

I am trying to read values on my electricity meter LCD display using opencv, From my picture I am able to find meter using HoughCircles method, I am able to find LCD display on meter using contours, the lcd display isn't so clear so again I search for contours to extract digits from display. Now I am unable to read values on the display using tesseract or ssocr, how can i read the values on LCD display. I just started using opencv (Beginner), don't know the right way to go from here and if my approach is correct, would appreciate any help. Below is my code snippet and the meter images links are in comments.
def process_image(path, index):
img = cv2.imread(path)
img = cv2.resize(img,(0,0),fx=2.0,fy=2.0)
height, width, depth = img.shape
print("\n---------------------------------------------\n")
print("In Process Image Path is %s height is %d Width is %d depth is %d" %(path, height, width, depth))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.medianBlur(gray, 15)
circles = cv2.HoughCircles(blur, cv2.HOUGH_GRADIENT,1.2,100)
# ensure at least one circles is found, which is our meter
if circles is not None:
circles = np.uint16(np.around(circles))
print("Meter Found")
for i in circles[0]:
CenterX = i[0]
CenterY = i[1]
Radius = i[2]
circle_img = np.zeros((height, width), np.uint8)
cv2.circle(circle_img, (CenterX, CenterY), Radius, 1, thickness=-1)
masked_data = cv2.bitwise_and(img, img, mask=circle_img)
output = masked_data.copy()
cv2.circle(output, (i[0], i[1]), i[2], (0, 255, 0), 2)
cv2.circle(output, (i[0], i[1]), 2, (0, 0, 255), 3)
cv2.imwrite("output_" + str(index) + ".jpg", output)
break
gray = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray,(5,5),1)
edged = cv2.Canny(blurred, 5,10,200)
cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
displayCnt = None
contour_list = []
# loop over the contours
for c in cnts:
# approximate the contour
peri = 0.02 * cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c,peri, True)
# if the contour has four vertices, then we have found
# the meter display
if len(approx) == 4:
contour_list.append(c)
cv2.contourArea(c)
displayCnt = approx
break
warped = four_point_transform(gray, displayCnt.reshape(4, 2))
output = four_point_transform(output, displayCnt.reshape(4, 2))
thresh = cv2.adaptiveThreshold(warped, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY, 31, 2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
digitCnts = []
# loop over the digit area candidates
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# if the contour is sufficiently large, it must be a digit
if (w > 5 and w < 100) and (h >= 15 and h <= 150) :
digitCnts.append(c)
# sort the contours from left-to-right, then initialize the
# actual digits themselves
digitCnts = contours.sort_contours(digitCnts,method="left-to-right")[0]
mask = np.zeros(thresh.shape, np.uint8)
cv2.drawContours(mask, digitCnts, -80, (255, 255, 255),-1)
mask = cv2.bitwise_not(mask)
mask = cv2.resize(mask, (0, 0), fx=2.0, fy=2.0)
result = os.popen('/usr/local/bin/ssocr --number-digits=-1 -t 10 Mask.jpg')
output = result.read()
print("Output is " + output)
output = output[2:8]
return str(round(float(output) * 0.1, 1))
else:
print("Circle not Found")
print("\n---------------------------------------------\n")
return None

Resources