I'm trying to learn opencv. Online i found that, with opencv, I can obtain the contours of some image. So i tried that. Here is the script:
import cv2
import numpy as np
def getC(imagine):
global imgContour
c,h = cv2.findContours(imagine,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
for cnt in c:
a = cv2.contourArea(cnt)
print(area)
if area>500:
cv2.drawContour(imgContour,cnt,-1,(255,0,0),3)
img = cv2.imread("a3.jpg")
imgContour = img.copy()
imgG = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgB = cv2.GaussianBlur(imgG,(7,7),1)
imgC = cv2.Canny(imgB,50,50)
getC(imgContour)
cv2.imshow("",img)
cv2.imshow("g",imgG)
cv2.imshow("b",imgB)
cv2.imshow("l",imgContour)
cv2.waitKey(0)
I think there is a problem with global variabiles, but also with the format. a3.jpg is that image.
I don't now what to do now, and how to resolve the issue.
Thanks for the help
you saved the area as the variable a but used it with the name area you can fix this by changing the variable name a to area
area = cv2.contourArea(cnt)
there is a typo in cv2.drawContour you should write it like that cv2.drawContours
cv2.drawContours method expects the contour you want to draw to be a list of lists so you need to call it like that
cv2.drawContours(imgContour,[cnt],-1,(255,0,0),3)
when you pass the image to the getC method you gave it an image without pre-processing this image and converting it to threshold image using canny so you need to call it like that
getC(imgC)
The Final Script
import cv2
import numpy as np
def getC(imagine):
global imgContour
print(imgContour.shape)
c,h = cv2.findContours(imagine,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
for cnt in c:
area = cv2.contourArea(cnt)
print(area)
if area>500:
cv2.drawContours(imgContour,[cnt],-1,(255,0,0),3)
img = cv2.imread("./a3.jpg")
imgContour = img.copy()
imgG = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgB = cv2.GaussianBlur(imgG,(7,7),1)
imgC = cv2.Canny(imgB,50,50)
getC(imgC)
cv2.imshow("",img)
cv2.imshow("g",imgG)
cv2.imshow("b",imgB)
cv2.imshow("l",imgContour)
cv2.waitKey(0)
Related
I need to reduce the noise in images like the one bellow, i.e. fill the holes in the white object. I tried something with opencv but it ended up removing part of the object as you can see. Is there a better way to do this without losing the object itself? Any help is appreciated!
Here's what I have so far:
import numpy as np
import cv2
def remove_noise(gray, num):
Y, X = gray.shape
nearest_neigbours = [[
np.argmax(
np.bincount(
gray[max(i - num, 0):min(i + num, Y), max(j - num, 0):min(j + num, X)].ravel()))
for j in range(X)] for i in range(Y)]
result = np.array(nearest_neigbours, dtype=np.uint8)
cv2.imwrite('result.png', result)
return result
img = cv2.imread('img.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
remove_noise(gray, 10)
Input image:
Output image:
Following #JeruLuke's suggestion, I used cv.morphologyEx(img, cv.MORPH_CLOSE, kernel) and got the result I wanted with the following code snippet.
import cv2
import numpy as np
image = cv2.imread('image.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
kernel_size = (7, 7)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernel_size)
closing = cv2.morphologyEx(gray, cv2.MORPH_CLOSE, kernel)
cv2.imwrite('result.png', closing)
Output image:
After applying mask original image
import cv2
import dlib
import numpy as np
img = cv2.imread("Aayush.jpg")
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
msk = np.zeros_like(img_gray)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
faces = detector(img_gray)
for face in faces:
landmarks = predictor(img_gray, face)
lp = []
for n in range(0,68):
x = landmarks.part(n).x
y = landmarks.part(n).y
lp.append((x,y))
p = np.array(lp, np.int32)
#cv2.circle(img, (x,y), 3, (0, 0, 255), -1)
convexhull = cv2.convexHull(p)
#cv2.polylines(img, [convexhull], True, (255,0,0), 3)
cv2.fillConvexPoly(msk, convexhull, 255)
img1 = cv2.bitwise_and(img, img, mask = msk)
img1 containsa complete black image with face cut from img, I just require the pixel values of face portion and not complete image
As original image and mask have not been provided in the question itself. I am assuming a simple input image and a mask image with circular cavity as:
The mask here is a single channel matrix with a value of 255 in the central cavity. To get the pixel info inside the cavity only you can use following numpy operation:
pixel_info = original_image[mask == 255]
# You may need to convert the numpy array to Python list.
pixel_info_list = pixel_info.tolist()
Now you may serialize the list to any format you want (csv in this case.)
Full code:
import cv2
import numpy as np
original_image = cv2.imread("/path/to/lena.png")
mask = np.zeros(original_image.shape[:2], dtype=original_image.dtype)
mask = cv2.circle(mask, (256, 256), 100, [255], -1)
pixel_info = original_image[mask == 255]
pixel_info_list = pixel_info.tolist()
I'm trying to add an image that was processed by scikit-image and scipy to a tkinter gui. To add it to the canvas it needs to be either saved as a png, or converted to a PIL image. However, when I try to use ImageTk's Image.fromarray() it distorts the image a lot. I would prefer not to save it as a png, because it's just an intermediate step for generating data labels.
I tried checking the shapes of the arrays, and they're the same. I tried printing out the images, and the filled_objects is the correct image, while im is distorted. So it's not problem in the Tkinter gui. Also, if I don't use np.asarray() it produces the same output.
def generateCanny(imageName):
#imagename should be a path to the image, created with os path join
img = skimage.io.imread(imageName)
print('orig {}'.format(img.shape))
gray = np.sqrt((img*img).sum(-1))
#converts the image to greyscale
edges = skimage.feature.canny(gray, sigma=3)
fill = scipy.ndimage.binary_fill_holes(edges)
return fill
imageName = os.path.join(imagePath, imageStr)
filled_objects = generateCanny(imageName)
a = np.asarray(filled_objects)
im = PIL.Image.fromarray(a)
Here are the two images, im is on the left and filled_objects is on the right
I would think that you could just convert it easily because filled_objects is just an array, but Image.fromarray() must be doing some processing.
The problem is that fromarray isn't interpreting the boolean array a correctly. If you convert a back to RGB with:
# Extend the array into 3 dimensions, repeating the data:
a = np.repeat(a[...,None],3,axis=2).astype(np.uint8)
# Scale to 0-255:
a = 255*a
im = PIL.Image.fromarray(a)
then im.show() will display the correct image.
Converting the result to NumPy's uint8 will do the trick:
from skimage import data, color, feature, util
import tkinter as tk
import numpy as np
from PIL import ImageTk, Image
from scipy.ndimage import binary_fill_holes
rgb = data.hubble_deep_field()
gray = color.rgb2grey(rgb)
edges = feature.canny(gray, sigma=3)
filled_objects = binary_fill_holes(edges)
img_bool = Image.fromarray(filled_objects)
img_uint8 = Image.fromarray(util.img_as_ubyte(filled_objects))
root = tk.Tk()
photo_bool = ImageTk.PhotoImage(img_bool)
photo_uint8 = ImageTk.PhotoImage(img_uint8)
label_bool = tk.Label(root, image=photo_bool).grid(row=1, column=1)
label_uint8 = tk.Label(root, image=photo_uint8).grid(row=1, column=2)
root.mainloop()
I try to display the image located here: http://skyservice.pha.jhu.edu/DR12/ImgCutout/getjpeg.aspx?ra=118.70299999999999&dec=45.721000000000004&width=10&height=10&scale=0.6
The image looks like this:
I use this code:
import matplotlib.pyplot as plt
import numpy as np
import urllib
import cv2
url = 'http://skyservice.pha.jhu.edu/DR12/ImgCutout/getjpeg.aspx?ra=118.70299999999999&dec=45.721000000000004&width=10&height=10&scale=0.6'
def url_to_image(url):
resp = urllib.request.urlopen(url)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
return image
img = url_to_image(url)
plt.imshow(img)
And it displays this:
Where all colours are too blue. I have tried various possibilities to change cv2.IMREAD_COLOR with values found in the manual, on StackOverflow or elsewhere on the net, like -1, 0, 1, cv2.COLOR_BGR2RGB, ... but I haven't been able to get the right colours. I have tried cv2.COLOR_BGR2GRAY, it didn't even show in gray scales. I even tried this answer, but cv2.CV_LOAD_IMAGE_COLORdoesn't seem to exist anymore...
Is there a correct value of cv2.imdecode() flag, or a special colormap of plt.imshow(), which would give me the initial colours?
Thanks to Mark Setchell, it now works. I quote him:
matplotlib requires RGB ordering whereas OpenCV (perversely) uses BGR
Therefore, the correct code is
def url_to_image(url):
resp = urllib.request.urlopen(url)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
imageBGR = cv2.imdecode(image, cv2.IMREAD_COLOR)
imageRGB = cv2.cvtColor(imageBGR , cv2.COLOR_BGR2RGB)
return image
[I have the images as below, i need to extract just the white strip portion from all the images.
i Have tried using PIL to extract the rectangular portion by manually specifying the pixel value, Can there be any automated way to get this work done where by just feeding the image gives back the rectangular portion
Below is My snipped code:
from PIL import Image
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = Image.open('C:/Users/ShAgarwal/Documents/image_dataset/pic9.jpg')
half_the_width = img.size[0] / 2
half_the_height = img.size[1] / 2
img4 = img.crop(
(
half_the_width-1632,
half_the_height - 440,
half_the_width+1632,
half_the_height + 80
)
)
sample image
import cv2
import numpy as np
from matplotlib import pyplot as plt
image='IMG_3134.JPG'
# read image
imgc = cv2.imread(image)
img = cv2.resize(imgc, None, fx=0.25, fy=0.25) # resize since image is huge
#cropping the strip dimensions
#crop_img = img[1010:1650,140:1099723]
blurred = cv2.blur(img, (3,3))
canny = cv2.Canny(blurred, 50, 200)
Marking coordinates through auto image detection using canny's algorithm
## find the non-zero min-max coords of canny
pts = np.argwhere(canny>0)
y1,x1 = pts.min(axis=0)
y2,x2 = pts.max(axis=0)`
`## crop the region
cropped = img[y1:y2, x1:x2]
cv2.imwrite("cropped.png", cropped)
#Select the bounded area around white boundary
tagged = cv2.rectangle(img.copy(), (x1,y1), (x2,y2), (0,255,0), 3, cv2.LINE_AA)
r = cv2.selectROI(tagged)
imCrop = im[int(r[1]):int(r[1]+r[3]), int(r[0]):int(r[0]+r[2])]
#Bounded Area
cv2.imwrite("taggd2.png", imcrop)
cv2.waitKey()
Results from above code