I have the following NumPy array of a running man, which you can download here:
https://drive.google.com/file/d/1SfIEqGsBV_vA7iP4UjLdklLJlLdDzozL/view?usp=sharing
To display it, use this code:
import numpy as np
import matplotlib.pyplot as plt
# load data
data = np.load('running_man.npy')
# plot data
plt.imshow(data)
As you can see there is a lot of noise (freckles) in the image. I would like to get rid of it and retrieve a clean image of the runner. Any idea of how to do it?
This is what I have done so far:
from skimage import measure
# Find contours at a constant value of 1
contours = measure.find_contours(data, 1, fully_connected='high')
# Select the largest contiguous contour
contour = sorted(contours, key=lambda x: len(x))[-1]
# Create an empty image to store the masked array
r_mask = np.zeros_like(data, dtype='bool')
# Create a contour image by using the contour coordinates rounded to their nearest integer value
r_mask[np.round(contour[:, 0]).astype('int'), np.round(contour[:, 1]).astype('int')] = 1
# Fill in the hole created by the contour boundary
r_mask = ndimage.binary_fill_holes(r_mask)
# Invert the mask since one wants pixels outside of the region
r_mask = ~r_mask
plt.imshow(r_mask)
... but as you can see the outline is very rough !
What works well is to upload the image to an online jpg to SVG converter -> this makes the lines super smooth. ... but I want to be able to do it in python.
Idea:
I am looking for something that can keep the sharp corners, maybe something that detects the gradient along the edge and only keeps the point where the gradient is above a certain threshold...
For this specific image you can just use numpy:
import numpy as np
import matplotlib.pyplot as plt
data = np.load('running_man.npy')
data[data > 1] = 0
plt.xticks([])
plt.yticks([])
plt.imshow(data)
For a method that preserves the corners better, we can use median filters, but force the preservation of corners.
Masked Image
Mask after filtering
Recolored
import cv2
import numpy as np
# load image
img = cv2.imread("run.png");
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY);
# make mask
_, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU);
# median filter
med = cv2.medianBlur(thresh, 11);
med[thresh == 255] = 255;
# inverse filter
mask = cv2.bitwise_not(med);
med = cv2.medianBlur(mask, 3);
med[mask == 255] = 255;
# recolor
color = np.zeros_like(img);
color[med == 0] = (66, 239, 245);
color[med == 255] = (92, 15, 75);
# show
cv2.imshow("colored", color);
cv2.waitKey(0);
Related
Following this example of K means clustering I want to recreate the same - only I'm very keen for the final image to contain just the quantized colours (+ white background). As it is, the colour bars get smooshed together to create a pixel line of blended colours.
Whilst they look very similar, the image (top half) is what I've got from CV2 it contains 38 colours total.
The lower image only has 10 colours and is what I'm after.
Let's look at a bit of that with 6 times magnification:
I've tried :
# OpenCV and Python K-Means Color Clustering
# build a histogram of clusters and then create a figure
# representing the number of pixels labeled to each color
hist = colour_utils.centroid_histogram(clt)
bar = colour_utils.plot_colors(hist, clt.cluster_centers_)
bar = cv2.resize(bar, (460, 345), 0, 0, interpolation = cv2.INTER_NEAREST)
However, the resize seems to have no resizing effect or change the scaling type. I don't know what controls the initial image size either.
Confused.
Any ideas?
I recommend you to show the image using cv2.imshow, instead of using matplotlib.
cv2.imshow shows the image "pixel to pixel" by default, while matplotlib.pyplot matches the image dimensions to the size of the axes.
bar_bgr = cv2.cvtColor(bar, cv2.COLOR_RGB2BGR) # Convert RGB to BGR
cv2.imshow('bar', bar_bgr)
cv2.waitKey()
cv2.destroyAllWindows()
In case you want to use matplotlib, take a look at: Display image with a zoom = 1 with Matplotlib imshow() (how to?).
Code used for testing:
# import the necessary packages
import numpy as np
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import argparse
#import utils
import cv2
def centroid_histogram(clt):
# grab the number of different clusters and create a histogram
# based on the number of pixels assigned to each cluster
numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1)
(hist, _) = np.histogram(clt.labels_, bins = numLabels)
# normalize the histogram, such that it sums to one
hist = hist.astype("float")
hist /= hist.sum()
# return the histogram
return hist
def plot_colors(hist, centroids):
# initialize the bar chart representing the relative frequency
# of each of the colors
bar = np.zeros((50, 300, 3), dtype = "uint8")
startX = 0
# loop over the percentage of each cluster and the color of
# each cluster
for (percent, color) in zip(hist, centroids):
# plot the relative percentage of each cluster
endX = startX + (percent * 300)
cv2.rectangle(bar, (int(startX), 0), (int(endX), 50),
color.astype("uint8").tolist(), -1)
startX = endX
# return the bar chart
return bar
# load the image and convert it from BGR to RGB so that
# we can dispaly it with matplotlib
image = cv2.imread('chelsea.png')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# show our image
plt.figure()
plt.axis("off")
plt.imshow(image)
# reshape the image to be a list of pixels
image = image.reshape((image.shape[0] * image.shape[1], 3))
# cluster the pixel intensities
clt = KMeans(n_clusters = 5)
clt.fit(image)
# build a histogram of clusters and then create a figure
# representing the number of pixels labeled to each color
hist = centroid_histogram(clt)
bar = plot_colors(hist, clt.cluster_centers_)
# show our color bart
#plt.figure()
#plt.axis("off")
#plt.imshow(bar)
#plt.show()
bar = cv2.resize(bar, (460, 345), 0, 0, interpolation = cv2.INTER_NEAREST)
bar_bgr = cv2.cvtColor(bar, cv2.COLOR_RGB2BGR) # Convert RGB to BGR
cv2.imshow('bar', bar_bgr)
cv2.waitKey()
cv2.destroyAllWindows()
I have a matrix, e.g., generated as follows
x = np.random.randint(10,size=(20,20))
How to visualize the matrix with respect to the distribution of a given value, i.e., 6
In other words, how to show the matrix as an image, where the pixels with corresponding matrix entries being equivalent to 6 will be shown as white, while other pixels will be shown as black.
The simplest way to display the distribution of a given value through a black and white image is using a boolean array like x == 6. If you wish to improve visualization by replacing black and white with custom colors, NumPy's where will come in handy:
import numpy as np
import matplotlib.pyplot as plt
x = np.random.randint(10, size=(20, 20))
value = 6
foreground = [255, 0, 0] # red
background = [0, 0, 255] # blue
bw = x == value
rgb = np.where(bw[:, :, None], foreground, background)
fig, ax = plt.subplots(1, 2)
ax[0].imshow(bw, cmap='gray')
ax[0].set_title('Black & white')
ax[1].imshow(rgb)
ax[1].set_title('RGB')
plt.show(fig)
I think you want this:
from PIL import Image
import numpy as np
# Initialise data
x = np.random.randint(10,size=(20,20),dtype=np.uint8)
# Make all pixels with value 6 into white and all else black
x[x==6] = 255
x[x!=255] = 0
# Make PIL Image from Numpy array
pi = Image.fromarray(x)
# Display image
pi.show()
# Save PIL Image
pi.save('result.png')
After applying mask original image
import cv2
import dlib
import numpy as np
img = cv2.imread("Aayush.jpg")
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
msk = np.zeros_like(img_gray)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
faces = detector(img_gray)
for face in faces:
landmarks = predictor(img_gray, face)
lp = []
for n in range(0,68):
x = landmarks.part(n).x
y = landmarks.part(n).y
lp.append((x,y))
p = np.array(lp, np.int32)
#cv2.circle(img, (x,y), 3, (0, 0, 255), -1)
convexhull = cv2.convexHull(p)
#cv2.polylines(img, [convexhull], True, (255,0,0), 3)
cv2.fillConvexPoly(msk, convexhull, 255)
img1 = cv2.bitwise_and(img, img, mask = msk)
img1 containsa complete black image with face cut from img, I just require the pixel values of face portion and not complete image
As original image and mask have not been provided in the question itself. I am assuming a simple input image and a mask image with circular cavity as:
The mask here is a single channel matrix with a value of 255 in the central cavity. To get the pixel info inside the cavity only you can use following numpy operation:
pixel_info = original_image[mask == 255]
# You may need to convert the numpy array to Python list.
pixel_info_list = pixel_info.tolist()
Now you may serialize the list to any format you want (csv in this case.)
Full code:
import cv2
import numpy as np
original_image = cv2.imread("/path/to/lena.png")
mask = np.zeros(original_image.shape[:2], dtype=original_image.dtype)
mask = cv2.circle(mask, (256, 256), 100, [255], -1)
pixel_info = original_image[mask == 255]
pixel_info_list = pixel_info.tolist()
I got a segmented image as entry in my program the goal is to split regions into two images one contains external contours(regions) and the other contains internal contours (regions).
Programme in python 3.7 and opencv
I try to use some morphological operations (close) and smoothing filter (median) then I apply a binary and otsu threshold and canny edge detection to get a better version of contours with the fonction find contour
In first I extrac external contours with CV2.RETR_EXTERNAL but this is what I get:
def function(image):
#pretraitement
im = cv2.imread(image,0)
_Kernel = 3
iteration__ = 5
im = Pretraitement.pretraitement.lissage_median(im, _Kernel, iteration__)
kernel = (3,3)
im = cv2.morphologyEx(im, cv2.MORPH_CLOSE,cv2.getStructuringElement(cv2.MORPH_CROSS,kernel))
high_thresh, im = cv2.threshold(im, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
lowThresh = 0.5 * high_thresh
cv2.rectangle(im, (0, 0), (im.shape[1], im.shape[0]), 0, 3)
contour = cv2.findcontours(
cv2.Canny(im.copy(), lowThresh, high_thresh),
Img_Colored_Readed.shape, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
MaskExtern = np.zeros((im.shape[0],im.shape[1],3),dtype=np.uint8)
MaskRegion = np.zeros((im.shape[0],im.shape[1],3),dtype=np.uint8)
MaskContour = = np.zeros(im.shape,dtype=np.uint8)
for i in range(len(contour)):
for j in range(len(contour)):
#to check if the contour j is inside contour i
if BoundaryBasedDescriptors.Contours.pointInContour3(contour[i],contour[j]):
pass
else:
cv2.drawContours(MaskExtern,contour, j, (0,255,255), 1)
cv2.drawContours(MaskContour,contour,i,255,1)
cv2.drawContours(MaskRegion,contour,i,(255,i*10,255-i*10),-1)
cv2.imwrite('_external.jpg', MaskExtern)
cv2.imwrite('_contour.bmp', MaskContour)
cv2.imwrite('_colore.jpg', MaskRegion)
The link to the image represent the segmented imageenter image description here
and this is what I get when I draw all contours with thickness -1enter image description here
I expect to get the rigth external contour (regions) I get some regions that are internalenter image description here
This is the error in your code:
cv2.rectangle(im, (0, 0), (im.shape[1], im.shape[0]), 0, 3)
The result is assigned to nothing, so it does nothing. If you add im = in front you'll get the behavior you're expecting.
If your purpose is to separate the internal and the external white area's, you could also try this approach. First invert the image. Then find the external outline of the black area (in original, white in inverted), which you can then use as a mask to separate the area's. If necessary you you can use the the masked interior image to find the smaller contours.
Result:
Code:
import cv2
import numpy as np
# load image as grayscale
img = cv2.imread('cSxN8.png',0)
# treshold to create binary image
tr, img = cv2.threshold(img,50,255,cv2.THRESH_BINARY)
# invert image
img_inv = cv2.bitwise_not(img)
# find external contours
contours, hier = cv2.findContours(img_inv, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
# draw contours in gray
for cnt in contours:
cv2.drawContours(img,[cnt],0,(127),5)
# display image
cv2.imshow('Result', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
```python```
import cv2
import numpy as np
# load image as grayscale
img = cv2.imread('cSxN8.png',0)
# treshold to create binary image
tr, thresh = cv2.threshold(img,50,255,cv2.THRESH_BINARY)
img = thresh.copy()
# invert image
img_inv_ = cv2.bitwise_not(img)
#find_external_contours
_,cnt = cv2.findcontours(img_inv.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for i in cnt:
cv2.drawContours(img, [cnt], 0, (127), 1)
#to extract the edges of the external contour
img = cv2.bitwise_xor(img,img_inv)
#binarisation of the external contour
_, img = cv2.threshold(img,126,255,cv2.THRESH_BINARY)
#now we fill the external region
cnt = cv2.findcontours(img_prete_2,cv2.RETR_CCOPM,cv2.CHAIN_APPROX_SIMPLE)
mask_externe=np.zeros(img.shape,dtype=np.uint8)
for i in range(cnt.longeurCnt):
cv2.drawContours(mask_externe,[cnt.contour [i]],-1,255,-1)
#get the internal region
mask_internal = cv2.bitwise_xor(img_inv,mask_extern)
```
this is the complete solution (the approach is poposed by J.D.
, thanks to you)
[I have the images as below, i need to extract just the white strip portion from all the images.
i Have tried using PIL to extract the rectangular portion by manually specifying the pixel value, Can there be any automated way to get this work done where by just feeding the image gives back the rectangular portion
Below is My snipped code:
from PIL import Image
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = Image.open('C:/Users/ShAgarwal/Documents/image_dataset/pic9.jpg')
half_the_width = img.size[0] / 2
half_the_height = img.size[1] / 2
img4 = img.crop(
(
half_the_width-1632,
half_the_height - 440,
half_the_width+1632,
half_the_height + 80
)
)
sample image
import cv2
import numpy as np
from matplotlib import pyplot as plt
image='IMG_3134.JPG'
# read image
imgc = cv2.imread(image)
img = cv2.resize(imgc, None, fx=0.25, fy=0.25) # resize since image is huge
#cropping the strip dimensions
#crop_img = img[1010:1650,140:1099723]
blurred = cv2.blur(img, (3,3))
canny = cv2.Canny(blurred, 50, 200)
Marking coordinates through auto image detection using canny's algorithm
## find the non-zero min-max coords of canny
pts = np.argwhere(canny>0)
y1,x1 = pts.min(axis=0)
y2,x2 = pts.max(axis=0)`
`## crop the region
cropped = img[y1:y2, x1:x2]
cv2.imwrite("cropped.png", cropped)
#Select the bounded area around white boundary
tagged = cv2.rectangle(img.copy(), (x1,y1), (x2,y2), (0,255,0), 3, cv2.LINE_AA)
r = cv2.selectROI(tagged)
imCrop = im[int(r[1]):int(r[1]+r[3]), int(r[0]):int(r[0]+r[2])]
#Bounded Area
cv2.imwrite("taggd2.png", imcrop)
cv2.waitKey()
Results from above code