How to remove the background from a picture in OpenCV python - python-3.x

Because I am new to computer vision. I would like also to ask how can I delete the whole background of this image and keep only the pills untouched. I tried different things like to change the background color but still, there are some small edges and also noise.
Or if it's possible for all the white background to be a neutral color, without the line between the circle.

Here is one way in Python/OpenCV. Threshold the image on white. Then apply some morphology to clean it up a bit. Then invert it to make a mask. Then apply the mask to the input. I note that your pills overlap the ring. So this method does not remove the ring.
Input:
import cv2
import numpy as np
# Read image
img = cv2.imread('pills.jpg')
hh, ww = img.shape[:2]
# threshold on white
# Define lower and uppper limits
lower = np.array([200, 200, 200])
upper = np.array([255, 255, 255])
# Create mask to only select black
thresh = cv2.inRange(img, lower, upper)
# apply morphology
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20,20))
morph = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# invert morp image
mask = 255 - morph
# apply mask to image
result = cv2.bitwise_and(img, img, mask=mask)
# save results
cv2.imwrite('pills_thresh.jpg', thresh)
cv2.imwrite('pills_morph.jpg', morph)
cv2.imwrite('pills_mask.jpg', mask)
cv2.imwrite('pills_result.jpg', result)
cv2.imshow('thresh', thresh)
cv2.imshow('morph', morph)
cv2.imshow('mask', mask)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Threshold image:
Morphology cleaned image:
Mask image:
Result:

Here is another way to do that in Python/OpenCV removing the ring. But it will remove parts of the pills that overlap the ring.
Read the input
Threshold on white
Apply morphology close to remove the center strip
Get the contours
Draw the contours as white filled on black background
Get the convex hull of the white filled contours
Fit an ellipse to the convex hull
Print the ellipse shape to make sure it is close to a circle
Draw the convex hull outline in red on the input to check if fits the white region
Draw a circle using the average ellipse radii and center as white filled on black background
Erode the circle a little to avoid leaving a partial white ring
Combine the inverted morph image and the circle image to make a final mask
Apply the final mask to the input
Save the results
import cv2
import numpy as np
# Read image
img = cv2.imread('pills.jpg')
hh, ww = img.shape[:2]
# threshold on white
# Define lower and uppper limits
lower = np.array([200, 200, 200])
upper = np.array([255, 255, 255])
# Create mask to only select black
thresh = cv2.inRange(img, lower, upper)
# apply morphology
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20,20))
morph = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# get contours
contours = cv2.findContours(morph, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
# draw white contours on black background as mask
mask = np.zeros((hh,ww), dtype=np.uint8)
for cntr in contours:
cv2.drawContours(mask, [cntr], 0, (255,255,255), -1)
# get convex hull
points = np.column_stack(np.where(thresh.transpose() > 0))
hullpts = cv2.convexHull(points)
((centx,centy), (width,height), angle) = cv2.fitEllipse(hullpts)
print("center x,y:",centx,centy)
print("diameters:",width,height)
print("orientation angle:",angle)
# draw convex hull on image
hull = img.copy()
cv2.polylines(hull, [hullpts], True, (0,0,255), 1)
# create new circle mask from ellipse
circle = np.zeros((hh,ww), dtype=np.uint8)
cx = int(centx)
cy = int(centy)
radius = (width+height)/4
cv2.circle(circle, (cx,cy), int(radius), 255, -1)
# erode circle a bit to avoid a white ring
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (6,6))
circle = cv2.morphologyEx(circle, cv2.MORPH_ERODE, kernel)
# combine inverted morph and circle
mask2 = cv2.bitwise_and(255-morph, 255-morph, mask=circle)
# apply mask to image
result = cv2.bitwise_and(img, img, mask=mask2)
# save results
cv2.imwrite('pills_thresh2.jpg', thresh)
cv2.imwrite('pills_morph2.jpg', morph)
cv2.imwrite('pills_mask2.jpg', mask)
cv2.imwrite('pills_hull2.jpg', hull)
cv2.imwrite('pills_circle.jpg', circle)
cv2.imwrite('pills_result2.jpg', result)
cv2.imshow('thresh', thresh)
cv2.imshow('morph', morph)
cv2.imshow('mask', mask)
cv2.imshow('hull', hull)
cv2.imshow('circle', circle)
cv2.imshow('mask2', mask2)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Threshold image:
Morphology image:
Filled contours image:
Convex hull on input:
Circle image:
Final mask image:
Result:

Related

apply heat map to the opencv rectangle

iam trying to apply a heat map on a selected portion[people's face] of my picture. Here is what i had done so far...
the rectangle will be applied to the face .
the face will be cropped
heat map will be applied to the cropped image.
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
crop_img = image[y:y+h, x:x+w]
# Cropping Area
# Color Mapping Area
images = cv2.imread(crop_img, 0)
colormap = plt.get_cmap('inferno')
heatmap = (colormap(images) * 2**16).astype(np.uint16)[:,:,:3]
heatmap = cv2.cvtColor(heatmap, cv2.COLOR_RGB2BGR)
# Saving Color Map
img_names = "heatimage{}.png".format(i)
cv2.imwrite(img_names, heatmap)
print("{} written!".format(img_names))
img = cv2.imread(img_names,0)
cv2.imshow('heatmap{}'.format(i),heatmap)
i was able to save the cropped image and rectangle pointing faces separately but i need
1. to make the rectangle to be a heat map ,without cropping separately,in my original image.
2. other part of the image has to be normal
EDITED
Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
crop_img = image[y:y+h, x:x+w]
sample = cv2.imread("sample.jpg",cv2.COLOR_BGR2GRAY)
colormap = cm.get_cmap('inferno', 256)
cmp = cm.ScalarMappable(cmap='inferno')
# create 1D float gradient from 0 to 1 with 256 increments
# convert to rgba in range 0 to 255 (via bytes=True)
# remove alpha channel and reshape to 256x1 3 channel from (256, 4)
# convert rgb to bgr
cmap = np.linspace(0, 1, 256, endpoint=True)
cmap = cmp.to_rgba(cmap, bytes=True)
cmap = cmap[:, 0:-1].reshape((256, 1, 3))
cmap = cv2.cvtColor(cmap, cv2.COLOR_RGB2BGR)
# apply color map to crop
crop_mapped = cv2.applyColorMap(crop_img, cmap)
# put color mapped crop back into input
result = sample.copy()
result = cv2.cvtColor(result, cv2.COLOR_GRAY2BGR)
result[y:y+h, x:x+w] = crop_mapped
# save result
cv2.imwrite('IRimage.jpg', result)
# show result
cv2.imshow("result", result)
i+=1
cv2.imshow("Faces found", image)
If i have more than one face , how can i apply color filter to both the faces?
If I understand correctly, you're trying to display the portion of a heatmap that's on the face of a person.Try this
alpha = 0.5
image[y:y+h, x:x+w] = alpha * image[y:y+h, x:x+w] + (1 - alpha) * heatmap[y:y+h, x:x+w]
cv2.imshow("preview", image)
I believe that you will have to crop the image, apply the colormap to the cropped image, then put the color mapped crop image back into your original. I do not think there is a way to apply a colormap directly to a portion of an image.
Here is how I do the above in Python/OpenCV.
Read the input as grayscale
Crop the image where you want it to be color mapped
Load the colormap from Matplotlib and convert it to a BGR image
Apply the colormap to the cropped image
Convert the input to 3 channel gray and insert the color mapped cropped image back in the correct location.
Save the results
Input:
import cv2
import numpy as np
import matplotlib.cm as cm
# read image and convert to gray
img = cv2.imread('redhat_gray.jpg', cv2.COLOR_BGR2GRAY)
# crop image
crop = img[140:240, 70:170]
# get colormap from matplotlib and normalize
colormap = cm.get_cmap('inferno', 256)
cmp = cm.ScalarMappable(cmap='inferno')
# create 1D float gradient from 0 to 1 with 256 increments
# convert to rgba in range 0 to 255 (via bytes=True)
# remove alpha channel and reshape to 256x1 3 channel from (256, 4)
# convert rgb to bgr
cmap = np.linspace(0, 1, 256, endpoint=True)
cmap = cmp.to_rgba(cmap, bytes=True)
cmap = cmap[:, 0:-1].reshape((256, 1, 3))
cmap = cv2.cvtColor(cmap, cv2.COLOR_RGB2BGR)
#print(cmap)
# apply color map to crop
crop_mapped = cv2.applyColorMap(crop, cmap)
# put color mapped crop back into input
result = img.copy()
result = cv2.cvtColor(result, cv2.COLOR_GRAY2BGR)
result[140:240, 70:170] = crop_mapped
# save result
cv2.imwrite('redhat_gray_rectangle_inferno.jpg', result)
# show result
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:

Bounding boxes for individual contours except one color

I have an image as below
I want to add bounding boxes for each of the regions as shown in the pic below using OpenCV & Python
I now how to find contours is the region is one colour. However, here I want to find contours for all non-Black regions. I am just not able to figure it out. Can anyone help?
Regrading some regions being regions being non-continuous (2 vertical lines on the left), you can ignore that. I will dilate & make them continuous.
If I understand what you want, here is one way in Python/OpenCV.
Read the input
Convert to gray
Threshold to black and white
Find external contours and their bounding boxes
Draw the bounding box rectangles on a copy of the input
Save the results
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('white_green.png')
# convert to grayscale
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# threshold
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[1]\
# get contour bounding boxes and draw on copy of input
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contours = contours[0] if len(contours) == 2 else contours[1]
result = img.copy()
for c in contours:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(result, (x, y), (x+w-1, y+h-1), (0, 0, 255), 1)
# view result
cv2.imshow("threshold", thresh)
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
# save result
cv2.imwrite("white_green_thresh.jpg", thresh)
cv2.imwrite("white_green_bboxes.jpg", result)
Thresholded image:
Bounding Boxes:

How do I find corners of a paper when there are printed corners/lines on paper itself?

I'm using openCV in Python to find the corners of a sheet of paper to unwarp it.
img = cv2.imread(images[i])
corners = cv2.goodFeaturesToTrack(cv2.cvtColor(img,cv2.COLOR_BGR2GRAY),4,.01,1000,useHarrisDetector=True,k=.04)
corners = np.float32(corners)
print(corners)
ratio = 1.6
cardH = math.sqrt((corners[2][0][0] - corners[1][0][0]) * (corners[2][0][0] - corners[1][0][0]) + (corners[2][0][1] - corners[1][0][1]) * (
corners[2][0][1] - corners[1][0][1]))
cardW = ratio * cardH;
pts2 = np.float32(
[[corners[0][0][0], corners[0][0][1]], [corners[0][0][0] + cardW, corners[0][0][1]], [corners[0][0][0] + cardW, corners[0][0][1] + cardH],
[corners[0][0][0], corners[0][0][1] + cardH]])
M = cv2.getPerspectiveTransform(corners, pts2)
offsetSize = 500
transformed = np.zeros((int(cardW + offsetSize), int(cardH + offsetSize)), dtype=np.uint8);
dst = cv2.warpPerspective(img, M, transformed.shape)
Before:
https://imgur.com/a/H7HjFro
After:
https://imgur.com/a/OA6Iscq
As you can see with these images, they're detecting edges inside the paper itself, rather than the corner of the paper. Should I consider using a different algorithm entirely? I'm quite lost.
I've tried increasing the minimum euclidean distance to 1000, but that really didn't do anything.
Please note, this no one's real information, this is a fake dataset found on Kaggle.
The kaggle dataset can be found https://www.kaggle.com/mcvishnu1/fake-w2-us-tax-form-dataset
Here is one way to do that in Python/OpenCV.
Note that the found corners are listed counter-clockwise from the top-most corner.
Read the input
Convert to gray
Gaussian blur
Otsu threshold
Morphology open/close to clean up the threshold
Get largest contour
Approximate a polygon from the contour
Get the corners
Draw the polygon on the input
Compute side lengths
Compute output corresponding corners
Get perspective transformation matrix from corresponding corner points
Warp the input image according to the matrix
Save the results
Input:
import cv2
import numpy as np
# read image
img = cv2.imread("efile.jpg")
# convert img to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# blur image
blur = cv2.GaussianBlur(gray, (3,3), 0)
# do otsu threshold on gray image
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
# apply morphology
kernel = np.ones((7,7), np.uint8)
morph = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
morph = cv2.morphologyEx(morph, cv2.MORPH_OPEN, kernel)
# get largest contour
contours = cv2.findContours(morph, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
area_thresh = 0
for c in contours:
area = cv2.contourArea(c)
if area > area_thresh:
area_thresh = area
big_contour = c
# draw white filled largest contour on black just as a check to see it got the correct region
page = np.zeros_like(img)
cv2.drawContours(page, [big_contour], 0, (255,255,255), -1)
# get perimeter and approximate a polygon
peri = cv2.arcLength(big_contour, True)
corners = cv2.approxPolyDP(big_contour, 0.04 * peri, True)
# draw polygon on input image from detected corners
polygon = img.copy()
cv2.polylines(polygon, [corners], True, (0,0,255), 1, cv2.LINE_AA)
# Alternate: cv2.drawContours(page,[corners],0,(0,0,255),1)
# print the number of found corners and the corner coordinates
# They seem to be listed counter-clockwise from the top most corner
print(len(corners))
print(corners)
# for simplicity get average of top/bottom side widths and average of left/right side heights
# note: probably better to get average of horizontal lengths and of vertical lengths
width = 0.5*( (corners[0][0][0] - corners[1][0][0]) + (corners[3][0][0] - corners[2][0][0]) )
height = 0.5*( (corners[2][0][1] - corners[1][0][1]) + (corners[3][0][1] - corners[0][0][1]) )
width = np.int0(width)
height = np.int0(height)
# reformat input corners to x,y list
icorners = []
for corner in corners:
pt = [ corner[0][0],corner[0][1] ]
icorners.append(pt)
icorners = np.float32(icorners)
# get corresponding output corners from width and height
ocorners = [ [width,0], [0,0], [0,height], [width,height] ]
ocorners = np.float32(ocorners)
# get perspective tranformation matrix
M = cv2.getPerspectiveTransform(icorners, ocorners)
# do perspective
warped = cv2.warpPerspective(img, M, (width, height))
# write results
cv2.imwrite("efile_thresh.jpg", thresh)
cv2.imwrite("efile_morph.jpg", morph)
cv2.imwrite("efile_polygon.jpg", polygon)
cv2.imwrite("efile_warped.jpg", warped)
# display it
cv2.imshow("efile_thresh", thresh)
cv2.imshow("efile_morph", morph)
cv2.imshow("efile_page", page)
cv2.imshow("efile_polygon", polygon)
cv2.imshow("efile_warped", warped)
cv2.waitKey(0)
Thresholded image:
Morphology cleaned image:
Polygon drawn on input:
Extracted Corners (counterclockwise from top right corner)
4
[[[693 67]]
[[ 23 85]]
[[ 62 924]]
[[698 918]]]
Warped Result:

Crop the rectangular paper from the image

from the discussion : Crop exactly document paper from image
I'm trying to get the white paper from the image and I'm using the following code which not cropping exactly rectangular.
def crop_image(image):
image = cv2.imread(image)
# convert to grayscale image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# threshold
thresh = cv2.threshold(gray, 190, 255, cv2.THRESH_BINARY)[1]
# apply morphology
kernel = np.ones((7, 7), np.uint8)
morph = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
kernel = np.ones((9, 9), np.uint8)
morph = cv2.morphologyEx(morph, cv2.MORPH_ERODE, kernel)
# Get Largest contour
contours = cv2.findContours(morph, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contours = contours[0] if len(contours) == 2 else contours[1]
area_thresh = 0
for cnt in contours:
area = cv2.contourArea(cnt)
if area > area_thresh:
area_thresh = area
big_contour = cnt
# get bounding box
x, y, w, h = cv2.boundingRect(big_contour)
# draw filled contour on black background
mask = np.zeros_like(gray)
mask = cv2.merge([mask, mask, mask])
cv2.drawContours(mask, [big_contour], -1, (255, 255, 255), cv2.FILLED)
# apply mask to input
result = image.copy()
result = cv2.bitwise_and(result, mask)
# crop result
img_result = result[y:y+h, x:x+w]
filename = generate_filename()
cv2.imwrite(filename, img_result)
logger.info('Successfully saved cropped file : %s' % filename)
return img_result, filename
I'm able to get the desired result but not the rectangular image.
Here I'm attaching and here is what I'm getting after cropping image .
I want a rectangular image of the paper.
Please help me with this.
Thanks in advance
The first problem I can see is that the threshold value is not low enough so the bottom part of the paper is not correctly capture (it's too dark to be captured by the threshold)
The second problem as far I can understand is being able to fit the square to the image. What you need to do is wrapping perspective.
To do that you can find more information in this amazing post of PyImageSearch

How to find the document edges in various coloured backgrounds using opencv python? [Document Scanning in various backgrounds]

I am currently have a document that needs to be smart scanned.
For that, I need to find proper contours of the document in any background so that I can do a warped perspective projection and detection with that image.
The main issue faced while doing this is that the document edge detects any kind of background.
I have tried to use the function HoughLineP and tried to find contours on the grayscale blurred image passed through canny edge detection until now.
MORPH = 9
CANNY = 84
HOUGH = 25
IM_HEIGHT, IM_WIDTH, _ = rescaled_image.shape
# convert the image to grayscale and blur it slightly
gray = cv2.cvtColor(rescaled_image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7,7), 0)
#dilate helps to remove potential holes between edge segments
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(MORPH,MORPH))
dilated = cv2.dilate(gray, kernel)
# find edges and mark them in the output map using the Canny algorithm
edged = cv2.Canny(dilated, 0, CANNY)
test_corners = self.get_corners(edged)
approx_contours = []
(_, cnts, hierarchy) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
# loop over the contours
for c in cnts:
# approximate the contour
approx = cv2.approxPolyDP(c, 80, True)
if self.is_valid_contour(approx, IM_WIDTH, IM_HEIGHT):
approx_contours.append(approx)
break
How to find a proper bounding box around the document via OpenCV code.
Any help will be much appreciated.
(The document is taken from the camera in any angle and any coloured background.)
Following code might help you to detect/segment the page in the image...
import cv2
import matplotlib.pyplot as plt
import numpy as np
image = cv2.imread('test_p.jpg')
image = cv2.imread('test_p.jpg')
print(image.shape)
ori = image.copy()
image = cv2.resize(image, (image.shape[1]//10,image.shape[0]//10))
Resized the image to make the operations more faster so that we can work on realtime..
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (11,11), 0)
edged = cv2.Canny(gray, 75, 200)
print("STEP 1: Edge Detection")
plt.imshow(edged)
plt.show()
cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts[1], key = cv2.contourArea, reverse = True)[:5]
Here we will consider only first 5 contours from the sorted list based on area
Here the size of the gaussian blur is bit sensitive, so chose it accordingly based on the image size.
After the above operations image may look like..
for c in cnts:
### Approximating the contour
#Calculates a contour perimeter or a curve length
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.01 * peri, True)
# if our approximated contour has four points, then we
# can assume that we have found our screen
screenCnt = approx
if len(approx) == 4:
screenCnt = approx
break
# show the contour (outline)
print("STEP 2: Finding Boundary")
cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
image_e = cv2.resize(image,(image.shape[1],image.shape[0]))
cv2.imwrite('image_edge.jpg',image_e)
plt.imshow(image_e)
plt.show()
Final Image may look like...
Rest of the things may be handled after getting the final image...
Code Reference :- Git Repository
I guess this answer would be helpful...
There is a similar problem which is called orthographic projection.
Orthographic approaches
Rather than doing, Gaussian blur+morphological operation to get the edge of the document, try to do orthographic projection first and then find contours via your method.
For fining proper bounding box, try some preset values or a reference letter after which an orthographic projection will allow you to compute the height and hence the dimensions of the bounding box.

Resources