I want to draw the ellipse contour around the given figure append below. I am not getting the correct result since the figure consist of two lines.
I have tried the following:-
Read the Image
Convert the BGR to HSV
Define the Range of color blue
Create the inRange Mask to capture the value of between lower and upper blue
Find the contour & Draw the fit ellipse.
Here is the source code-
import cv2
import numpy as np
image=cv2.imread('./source image.jpg')
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower_blue= np.array([75, 0, 0])
upper_blue= np.array([105, 255, 255])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
res=cv2.bitwise_and(image,image,mask=mask)
_,contours,_=cv2.findContours(close,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
ellipse = cv2.fitEllipse(max(contours,key=cv2.contourArea))
cv2.ellipse(image,ellipse,(0,255,0),2)
cv2.imshow('mask',image)
cv2.waitKey(0)
cv2.destroyAllWindows()
The figure/Image below show the Expected & Actual Output-
Expected & Actual display image
Source Image
Source Image
Output Contour Array
Contour file
I try to run your code on C++ and add erosion, dilatation and convexHull for result contour:
auto DetectEllipse = [](cv::Mat rgbImg, cv::Mat hsvImg, cv::Scalar fromColor, cv::Scalar toColor)
{
cv::Mat threshImg;
cv::inRange(hsvImg, fromColor, toColor, threshImg);
cv::erode(threshImg, threshImg, cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3)), cv::Point(-1, -1), 2);
cv::dilate(threshImg, threshImg, cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3)), cv::Point(-1, -1), 2);
std::vector<std::vector<cv::Point> > contours;
cv::findContours(threshImg, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_NONE);
int areaThreshold = (rgbImg.cols * rgbImg.rows) / 100;
std::vector<cv::Point> allContours;
allContours.reserve(10 * areaThreshold);
for (size_t i = 0; i < contours.size(); i++)
{
if (contours[i].size() > 4)
{
auto area = cv::contourArea(contours[i]);
if (area > areaThreshold)
{
allContours.insert(allContours.end(), contours[i].begin(), contours[i].end());
}
}
}
if (allContours.size() > 4)
{
std::vector<cv::Point> hull;
cv::convexHull(allContours, hull, false);
cv::ellipse(rgbImg, cv::fitEllipse(hull), cv::Scalar(255, 0, 255), 2);
}
};
cv::Mat rgbImg = cv::imread("h8gx3.jpg", cv::IMREAD_COLOR);
cv::Mat hsvImg;
cv::cvtColor(rgbImg, hsvImg, cv::COLOR_BGR2HSV);
DetectEllipse(rgbImg, hsvImg, cv::Scalar(75, 0, 0), cv::Scalar(105, 255, 255));
DetectEllipse(rgbImg, hsvImg, cv::Scalar(10, 100, 20), cv::Scalar(25, 255, 255));
cv::imshow("rgbImg", rgbImg);
cv::waitKey(0);
Result looks correct:
Related
I am trying to detect orange beats in below image
To detect these, I have first cropped the area from original image and then setting high and low hsv values to detect orange. This seems to be working fine. Below is the detected image:
Below is the code:
import cv2
import numpy as np
win_name = "Image"
cv2.namedWindow(win_name)
img = cv2.imread('image.png')
orangeImg = img[420:510, 457:953]
hsv = cv2.cvtColor(orangeImg, cv2.COLOR_BGR2HSV)
lower_bound = np.array([0, 80, 80])
upper_bound = np.array([20, 255, 255])
origMask = cv2.inRange(hsv, lower_bound, upper_bound)
contours, hierarchy = cv2.findContours(origMask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for _, c in enumerate(contours):
areas = [cv2.contourArea(c) for c in contours]
for area in areas:
if area >= 20.0:
boundRect = cv2.boundingRect(c)
rectX = boundRect[0]
rectY = boundRect[1]
rectWidth = boundRect[2]
rectHeight = boundRect[3]
color = (0, 0, 255)
cv2.rectangle(orangeImg, (int(rectX), int(rectY)), (int(rectX + rectWidth), int(rectY + rectHeight)), color, 2)
cv2.imshow(win_name, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
In the output image, you can notice that it still has some noise around the bbox created. Is there a better way to reduce the noise in it. Also is there a way to count the detected contours in the image?
I tried to remove the black color from the image, if i tried the text also removed.
import cv2
img = cv2.imread('test.png')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
thresh1 = cv2.threshold(s, 100, 255, cv2.THRESH_BINARY)[1]
thresh2 = cv2.threshold(v, 150, 255, cv2.THRESH_BINARY)[1]
thresh2 = 255 - thresh2
mask = cv2.add(thresh1, thresh2)
result = img.copy()
result[mask == 128] = (255, 255, 255)
cv2.imwrite('clear.png', result)
Please give some ideas to get the text from that image.
Let's say I have two image, and the artery (the red arrow) is about the same position:
Now, I need to show the two image in one figure, and I use the vtkImageBlend for this purpose. My code is:
import vtk
img1 = vtk.vtkDICOMImageReader()
img1.SetFileName('C:\\Users\\MLoong\\Desktop\\dicom_data\\Chang Cheng\\TOF\\IM_0198')
img1.Update()
print('img1: ', img1.GetOutput().GetSpacing())
print('img1: ', img1.GetOutput().GetExtent())
img2 = vtk.vtkDICOMImageReader()
img2.SetFileName('C:\\Users\\MLoong\\Desktop\\dicom_data\\Chang Cheng\\SNAP\\IM_0502')
img2.Update()
print('img2: ', img2.GetOutput().GetSpacing())
print('img2: ', img2.GetOutput().GetExtent())
image_blender = vtk.vtkImageBlend()
image_blender.AddInputConnection(img1.GetOutputPort())
image_blender.AddInputConnection(img2.GetOutputPort())
image_blender.SetOpacity(0, 0.1)
image_blender.SetOpacity(1, 0.9)
image_blender.Update()
imageActor = vtk.vtkImageActor()
windowLevel = vtk.vtkImageMapToWindowLevelColors()
imageActor.GetMapper().SetInputConnection(windowLevel.GetOutputPort())
ren = vtk.vtkRenderer()
ren.AddActor(imageActor)
ren.SetBackground(0.1, 0.2, 0.4)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(400, 400)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
windowLevel.SetInputData(image_blender.GetOutput())
windowLevel.Update()
renWin.Render()
iren.Start()
And the result is:
In the above figure, the img2 is about half ot the img1.
Hoever, the printed information is:
img1: (0.3571428656578064, 0.3571428656578064, 1.399999976158142)
img1: (0, 559, 0, 559, 0, 0)
img2: (0.5, 0.5, 1.0)
img2: (0, 319, 0, 319, 0, 0)
For img1, the extent is (560, 560), and the spacing is (0.357, 0.357). Thus, the FOV is: 0.357*560=200, and the FOV of img2 is 160. Thus, I think the blend figure may be wrong.
Moreover, the RadiAnt also provide the fusion figure:
In the RadiAnt fusion figure, the artery of two images is overlay, which is what I want.
Is there anything wrong with my vtkImageBlend code?
From the Detailed Description of vtkImageBlend:
"blend images together using alpha or opacity
vtkImageBlend takes L, LA, RGB, or RGBA images as input and blends them according to the alpha values and/or the opacity setting for each input.
The spacing, origin, extent, and number of components of the output are the same as those for the first input."
So, you need to make sure to have the same spacing, extent and origin.
The way to solve it is to create new 2 vtkImageData with the same properties and copy your existing data to the new images.
Here I handle extent:
private static void AdjustImage(ref vtkImageData oldImage, int[] newDimensions)
{
vtkImageData newImage = new vtkImageData();
vtkInformation info = new vtkInformation();
int[] oldDimensions = oldImage.GetDimensions();
double[] spacing = oldImage.GetSpacing();
newImage.SetDimensions(newDimensions[0], newDimensions[1], newDimensions[2]);
vtkImageData.SetScalarType(4/*VTK_SHORT*/, info);
vtkImageData.SetNumberOfScalarComponents(1, info);//The components that each pixel needs to represent =1 is the index quantity map
newImage.AllocateScalars(info);//It is very important to allocate memory and generate image data. After the image is generated, the default value of all pixels is 0
newImage.SetSpacing(spacing[0], spacing[1], spacing[2]);
vtkImageData data = oldImage;
Parallel.For(0, newDimensions[0], i =>
{
if (i < oldDimensions[0])
{
Parallel.For(0, newDimensions[1], j =>
{
if (j < oldDimensions[1])
{
Parallel.For(0, newDimensions[2], k =>
{
if (k < oldDimensions[2])
{
newImage.SetScalarComponentFromDouble(i, j,
newDimensions[2] - 1 - k, 0,
data.GetScalarComponentAsDouble(i, j,
oldDimensions[2] - 1 - k, 0));
}
else
{
SetImageToDefault(newImage, newDimensions, i, j, k);
}
});
}
else
{
SetImageToDefault(newImage, newDimensions, i, j);
}
});
}
else
{
SetImageToDefault(newImage, newDimensions, i);
}
});
oldImage.Dispose();
oldImage = newImage;
}
private static void SetImageToDefault(vtkImageData img, int[] imageDimensions, int i, int j, int k)
{
const double transparentHu = -1000;
img.SetScalarComponentFromDouble(i, j, imageDimensions[2] - 1 - k, 0, transparentHu);
}
Afterward you will need to translate the second image by the delta of the differences between the 2 images origins.
In my application, I use Hough Line Detection to detect lines inside an image. What I'm trying to do is to retrieve only the lines that compose the border and the corners of each square of the chessboard. How can I apply filters to obtain a clear view of the lines?
My idea is to apply filters to check the angle between each line(90 degrees) or the distance to get only the lines that count. The final goal will be to obtain the intersection between these lines to get the coordinates of each square.
Code:
chessBoard = cv2.imread('img.png')
gray = cv2.cvtColor(chessBoard,cv2.COLOR_BGR2GRAY)
dst = cv2.Canny(gray, 50, 200)
lines= cv2.HoughLines(dst, 1, math.pi/180.0, 100, np.array([]), 0, 0)
a,b,c = lines.shape
for i in range(a):
rho = lines[i][0][0]
theta = lines[i][0][1]
a = math.cos(theta)
b = math.sin(theta)
x0, y0 = a*rho, b*rho
pt1 = ( int(x0+1000*(-b)), int(y0+1000*(a)) )
pt2 = ( int(x0-1000*(-b)), int(y0-1000*(a)) )
cv2.line(chessBoard, pt1, pt2, (0, 255, 0), 2, cv2.LINE_AA)
I encountered such a problem: I can not draw lines on the image where the color was determined, and also find out the distance to this place. Help to make it as in the image below:
My code:
import cv2
import numpy as np
from PIL import ImageGrab
while True:
screen = np.array(ImageGrab.grab(bbox=(0,40,800,640)))
rgb_screen = cv2.cvtColor(screen, cv2.COLOR_BGR2RGB)
lower = np.array([72, 160, 160])
upper = np.array([112, 249, 249])
mask = cv2.inRange(rgb_screen, lower, upper)
output = cv2.bitwise_and(rgb_screen, rgb_screen, mask=mask)
cv2.imshow('window', output)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
I can't help much because I do not have your original image. But you could read my code and maybe get an idea. For distance I do not know what u mean, so I made an example on how to get distance of top left corner to bottom left. You can apply other points or apply it as ratio depanding on your demands.
import cv2
import numpy as np
img = cv2.imread('untitled.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, threshold = cv2.threshold(gray,150,255,cv2.THRESH_BINARY)
im, contours, hierarchy = cv2.findContours(threshold,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
area = sorted(contours, key=cv2.contourArea, reverse=True)
c = area[0]
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
print(box)
box = np.int0(box)
cv2.drawContours(img,[box],0,(0,0,255),2)
extreme_left = tuple(c[c[:, :, 0].argmin()][0])
extreme_top = tuple(c[c[:, :, 1].argmin()][0])
x1 = box[1,0]
y1 = box[1,1]
x2 = box[0,0]
y2 = box[0,1]
distance = np.sqrt( (x1 - x2)**2 + (y1 - y2)**2 )
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'Distance: '+str(distance),(1,300), font, 0.5,(255,255,255),2,cv2.LINE_AA)
cv2.circle(img, (x2,y2), 5, (255, 0, 0), -1)
cv2.circle(img, (x1,y1), 5, (255, 0, 0), -1)
cv2.imshow('image', img)