I am working on a project which predicts that the MRI has tumor or not, now the next step is to draw a bounding rectangle around the tumor. I was able to extract the tumor from the MRI, now I want to get the opposite corners of the rectangle to bound the tumor in original figure.
EDIT:
For some of the MRI images the I cannot separate the the tumor from MRI, calculated the threshold using OTSU method seperately but its not working properly.
Thank you !
Computing threshold:
path=r"ImageProc\Y54.jpg"
img = cv.imread(path,0)
blur = cv.GaussianBlur(img,(5,5),0)
# find normalized_histogram, and its cumulative distribution function
hist = cv.calcHist([blur],[0],None,[256],[0,256])
hist_norm = hist.ravel()/hist.sum()
Q = hist_norm.cumsum()
bins = np.arange(256)
fn_min = np.inf
thresh = -1
for i in range(1,256):
p1,p2 = np.hsplit(hist_norm,[i]) # probabilities
q1,q2 = Q[i],Q[255]-Q[i] # cum sum of classes
if q1 < 1.e-6 or q2 < 1.e-6:
continue
b1,b2 = np.hsplit(bins,[i]) # weights
# finding means and variances
m1,m2 = np.sum(p1*b1)/q1, np.sum(p2*b2)/q2
v1,v2 = np.sum(((b1-m1)**2)*p1)/q1,np.sum(((b2-m2)**2)*p2)/q2
# calculates the minimization function
fn = v1*q1 + v2*q2
if fn < fn_min:
fn_min = fn
thresh = i
# find otsu's threshold value with OpenCV function
ret, otsu = cv.threshold(blur,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
print( "{} {}".format(thresh,ret) )
My progress so for through the code is:
import cv2
import matplotlib.pyplot as plt
def show_image(title, image):
cv2.imshow(title, image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def show_image_plt(title, image, cmap = None):
plt.figure(title)
plt.imshow(image,cmap=cmap)
plt.axis('off')
plt.show()
def cvt_image_colorspace(image, colorspace = cv2.COLOR_BGR2GRAY):
return cv2.cvtColor(image, colorspace)
def median_filtering(image, kernel_size=3):
'''
:param image: grayscale image
:param kernel_size: kernel size should be odd number
:return: blurred image
'''
return cv2.medianBlur(image, kernel_size)
def apply_threshold(image, **kwargs):
'''
:param image: image object
:param kwargs: threshold parameters - dictionary
:return:
'''
threshold_method = kwargs['threshold_method']
max_value = kwargs['pixel_value']
threshold_flag = kwargs.get('threshold_flag', None)
if threshold_flag is not None:
ret, thresh1 = cv2.adaptiveThreshold(image, max_value, threshold_method,cv2.THRESH_BINARY,
kwargs['block_size'], kwargs['const'])
else:
ret, thresh1 = cv2.threshold(image, kwargs['threshold'], max_value, threshold_method)
return thresh1
def sobel_filter(img,x,y,kernel_size = 3):
return cv2.Sobel(img, cv2.CV_8U, x,y, ksize=kernel_size)
path=r"Imageproc\Y54.jpg"
image = cv2.imread(path, 1)
show_image('Original image', image)
#Step one - grayscale the image
grayscale_img = cvt_image_colorspace(image)
#show_image('Grayscaled image', grayscale_img)
#Step two - filter out image
median_filtered = median_filtering(grayscale_img,5)
#show_image('Median filtered', median_filtered)
#testing threshold function
bin_image = apply_threshold(median_filtered, **{"threshold" : 93,
"pixel_value" : 255,
"threshold_method" : cv2.THRESH_BINARY})
otsu_image = apply_threshold(median_filtered, **{"threshold" : 93,
"pixel_value" : 255,
"threshold_method" : cv2.THRESH_BINARY +
cv2.THRESH_OTSU})
#Step 3a - apply Sobel filter
img_sobelx = sobel_filter(median_filtered, 1, 0)
img_sobely = sobel_filter(median_filtered, 0, 1)
# Adding mask to the image
img_sobel = img_sobelx + img_sobely+grayscale_img
#show_image('Sobel filter applied', img_sobel)
#Step 4 - apply threshold
# Set threshold and maxValue
threshold = 160
maxValue = 255
# Threshold the pixel values
thresh = apply_threshold(img_sobel, **{"threshold" : 93,
"pixel_value" : 255,
"threshold_method" : cv2.THRESH_BINARY})
#show_image("Thresholded", thresh)
#Step 3b - apply erosion + dilation
#apply erosion and dilation to show only the part of the image having more intensity - tumor region
#that we want to extract
kernel=cv2.getStructuringElement(cv2.MORPH_RECT,(9,9))
erosion = cv2.morphologyEx(median_filtered, cv2.MORPH_ERODE, kernel)
#show_image('Eroded image', erosion)
dilation = cv2.morphologyEx(erosion, cv2.MORPH_DILATE, kernel)
#show_image('Dilatated image', dilation)
#Step 4 - apply thresholding
threshold = 160
maxValue = 255
# apply thresholding
new_thresholding = apply_threshold(dilation, **{"threshold" : 93,
"pixel_value" : 255,
"threshold_method" : cv2.THRESH_BINARY})
show_image('Threshold image after erosion + dilation', new_thresholding)
The output image for given MRI is:
I think the best way is to know where pixels are not black
pts = np.argwhere(new_thresholding>0)
y1,x1 = pts.min(axis=0)
y2,x2 = pts.max(axis=0)
new_thresholding_rect= cv2.rectangle(new_thresholding,(x1,y1),(x2,y2),(255,0,0),2)
show_image('Threshold image after erosion + dilation + Rectangle',new_thresholding_rect)
Related
I am trying to do some dynamic template matching with cv2.matchTemplate. The template is:
and the image I am matching it to is:
I am adjusting the size and angle of the template and then grabbing maxVal and maxLoc from cv2.matchTemplate and storing them in a dict with maxVal as the keys. I'm then grabbing max(data.keys()) as the "best" match from result = cv2.matchTemplate(image, resized_template, cv2.TM_CCORR_NORMED). When I loop through all of the keys and plot them on the image, there are some that find the mickey head much better than max(data.keys()).
This is the region for "best" match:
#Find best match from scores
best_match = max(scores.keys())
#Output image
image_copy = color_image.copy()
#Create rectangle around best match
cv2.rectangle(image_copy, (data[best_match][1][0], data[best_match][1][1]), (data[best_match][1][0] + data[best_match][0][0], data[best_match][1][1] + data[best_match][0][1]), (255, 255, 255), 2)
plt.imshow(image_copy)
But this region appears to locate it much more accurately:
image_copy = color_image.copy()
for n in list(data.keys())[69:70:1]:
#Create rectangle around best match
cv2.rectangle(image_copy, (data[n][1][0], data[n][1][1]), (data[n][1][0] + data[n][0][0], data[n][1][1] + data[n][0][1]), (255, 255, 255), 2)
cv2.imwrite('output_1.jpg', image_copy)
plt.imshow(image_copy)
Am I miss interpreting what the maxVal from cv2.matchTemplate actually represents? If so, how can I select the actual "best" region based on some value or criteria? Here's the full code:
#Import packages
import numpy as np
import matplotlib.pyplot as plt
import cv2
import imutils
import os
#Import template
template = cv2.imread(templates_dir + '\\' + os.listdir(templates_dir)[3])
gray_template = cv2.cvtColor(template, cv2.COLOR_RGB2GRAY)
#Import image
image = cv2.imread(images_dir + '\\' + os.listdir(images_dir)[5])
color_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
#Image dims
image_h = image.shape[1]
image_w = image.shape[0]
#Processing loop scaling the template
#Scores dict
scores = {}
#Data dict of structure (maxVal: (template dims, locs, angle))
data = {}
#Edge images
template = cv2.Canny(gray_template, 80, 200)
image = cv2.Canny(gray_image, 80, 200)
#Loop through different template scales
for scale in np.linspace(0.2, 1.5, 15):
#Loop through different template rotations
for angle in np.linspace(0, 360, 25)[:-1]:
#Rotate template
template_rotated = imutils.rotate(template, angle)
#Resize template
resized_template = imutils.resize(template_rotated, width = int(template_rotated.shape[0] * scale))
#Dims of resized template
template_h = resized_template.shape[1]
template_w = resized_template.shape[0]
#Break from loop if the template becomes bigger than the image
if template_h > image_h or template_w > image_w:
break
#Run template through image
result = cv2.matchTemplate(image, resized_template, cv2.TM_CCORR_NORMED)
#Get matching score and location
(_, maxVal, _, maxLoc) = cv2.minMaxLoc(result)
#Add correlation value and result to scores
scores[str(maxVal)] = result
#Add info to data
data[str(maxVal)] = ((template_h, template_w), (maxLoc[0], maxLoc[1]), angle)
#Find best match from scores
best_match = max(scores.keys())
#Output image
image_copy = color_image.copy()
#Create rectangle around best match
cv2.rectangle(image_copy, (data[best_match][1][0], data[best_match][1][1]), (data[best_match][1][0] + data[best_match][0][0], data[best_match][1][1] + data[best_match][0][1]), (0, 0, 255), 2)
plt.imshow(image_copy)
I am loading several images will go over my face and I am having difficulty getting the image to go over the square for face created. I have looked at a many resources , but for some reason I am receiving an error when attempting to follow their method.
Every time I do so , I receive an error
ValueError: could not broadcast input array from shape (334,334,3) into shape (234,234,3)
I think the images might be too large, however I tried to resize them to see if they will fit to no avail.
here is my code:
import cv2
import sys
import logging as log
import datetime as dt
from time import sleep
import os
import random
from timeit import default_timer as timer
cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
#log.basicConfig(filename='webcam.log',level=log.INFO)
video_capture = cv2.VideoCapture(0)
anterior = 0
#s_img = cv2.imread("my.jpg")
increment = 0
for filename in os.listdir("Faces/"):
if filename.endswith(".png"):
FullFile = (os.path.join("Faces/", filename))
#ret, frame = video_capture.read()
frame = cv2.imread(FullFile, cv2.IMREAD_UNCHANGED)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale( gray,scaleFactor=1.1, minNeighbors=5, minSize=(30, 30) )
edges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 9)
for (x, y, w, h) in faces:
roi_color = frame[y:( y ) + ( h ), x:x + w]
status = cv2.imwrite('export/faces_detected'+ str( increment ) +'.png', roi_color)
increment = increment + 1
else:
continue
masks = []
for filename in os.listdir("export/"):
if filename.endswith(".png"):
FullFile = (os.path.join("export/", filename))
s_img = cv2.imread(FullFile)
masks.append(s_img)
Start = timer()
End = timer()
MasksSize = len(masks)
nrand = random.randint(0, MasksSize -1 )
increment = 0
while True:
if not video_capture.isOpened():
print('Unable to load camera.')
sleep(5)
pass
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
edges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 9)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
if (End - Start) > 3:
Start = timer()
End = timer()
nrand = random.randint(0, MasksSize -1 )
# -75 and +20 added to fit my face
cv2.rectangle(frame, (x, y - 75), (x+w, y+h+20), (0, 255, 0), 2)
s_img = masks[nrand]
increment = increment + 1
#maskresize = cv2.resize(s_img, (150, 150))
#frame[y:y+s_img.shape[0] , x:x+s_img.shape[1]] = s_img # problem occurs here with
# ValueError: could not broadcast input array from shape (334,334,3) into shape (234,234,3)
# I assume I am inserting somethign too big?
End = timer()
if anterior != len(faces):
anterior = len(faces)
#log.info("faces: "+str(len(faces))+" at "+str(dt.datetime.now()))
# Display the resulting frame
cv2.imshow('Video', frame)
#cv2.imshow('Video', cartoon)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Display the resulting frame
cv2.imshow('Video', frame)
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
In the following line,
frame[y:y+s_img.shape[0] , x:x+s_img.shape[1]] = s_img
you are trying to attempt to assign s_img to frame[y:y+s_img.shape[0] , x:x+s_img.shape[1]] which are of different shapes.
You can check the shapes of the two by printing the shape (it will be the same as the shapes mentioned in the error).
Try reshaping s_img to the same shape and then try to assign.
Refer to this link:https://www.geeksforgeeks.org/image-resizing-using-opencv-python/
I used this function to resize the image to scale.
def image_resize(image, width = None, height = None, inter = cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation = inter)
# return the resized image
return resized
Then later on called
r= image_resize(s_img, height = h, width=w)
frame[y:y+r.shape[0] , x:x+r.shape[1]] = r
Answer taken from here too:
Resize an image without distortion OpenCV
I am using the same code that is provided by the OpenCv tutorial, it was working few weeks ago, today I was trying to run it is says that gray name is not defined!! can some one find me the error?
import numpy as np
#import matplotlib.pyplot as plt
import cv2
import glob
import os
def draw(img, corners, imgpts):
corner = tuple(corners[0].ravel())
img = cv2.line(img, corner, tuple(imgpts[0].ravel()), (255,0,0), 5)
img = cv2.line(img, corner, tuple(imgpts[1].ravel()), (0,255,0), 5)
img = cv2.line(img, corner, tuple(imgpts[2].ravel()), (0,0,255), 5)
return img
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((7*7,3), np.float32)
objp[:,:2] = np.mgrid[0:7,0:7].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
img_dir = "C:\\Hungary\\Biblography\\Rotating Solitary Wave\\My Work\\Final Work\\Experiment1111 \\Camera Calibration\\Image Processing\\chess"
data_path = os.path.join(img_dir,'*bmp')
images = glob.glob(data_path)
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (7,7),None)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (7,7), corners2,ret)
cv2.imshow('img',img)
cv2.waitKey(500)
cv2.destroyAllWindows()
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape [::-1],None,None)
print('Rotation Vector, or the Angles For Each Photo: ', rvecs, '\n')
R = cv2.Rodrigues(rvecs[0])
print('The Rotation Matrix is: ', R)
print('Translation Vector: ', tvecs, '\n')
print(mtx, '\n')
print('Distortion Coefficients ', dist, '\n')
img = cv2.imread('00000274.bmp')
h, w = img.shape[:2]
newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))
print('Camera Matrix', newcameramtx, '\n')
# undistort
dst = cv2.undistort(img, mtx, dist) #, None, newcameramtx)
p = np.ones_like(dst)
# crop the image
x,y,w,h = roi
dst = dst[y:y+h, x:x+w]
# undistort
mapx,mapy = cv2.initUndistortRectifyMap(mtx,dist,None,newcameramtx,(w,h),5)
dst = cv2.remap(img,mapx,mapy,cv2.INTER_LINEAR)
# crop the image
x,y,w,h = roi
dst = dst[y:y+h, x:x+w]
cv2.imwrite('calibresult.png',dst)
mean_error = 0
for i in range(len(objpoints)):
imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
error = cv2.norm(imgpoints[i],imgpoints2, cv2.NORM_L2)/len(imgpoints2)
mean_error += error
print("total error: ", mean_error/len(objpoints))
If you read the opencv document you will find that I did little changes on the code and it was working but today it is raising this error about the gray name is not defined!
Check your path once, and see if images is an empty list. In that case, for loop will not be executed where the gray variable is defined.
I'm trying to remove numbers which are laying inside the circular part of image, numbers are in black in color and background varies between red,yellow, blue and green.
I am using opencv to remove those numbers. I used a mask which extracts numbers from image, with help of cv2.inpaint tried to remove those numbers from images.
For my further analysis I required to have clear image. But my current approach gives distorted image and numbers are not completely removed.
I tried changing the threshold values, lowering will neglect numbers from dark shaded area such as from green and red.
import cv2
img = cv2.imread('scan_1.jpg')
mask = cv2.threshold(img,50,255,cv2.THRESH_BINARY_INV)[1][:,:,0]
cv2.imshow('mask', mask)
cv2.waitKey(0)
cv2.destroyAllWindows()
dst = cv2.inpaint(img, mask, 5, cv2.INPAINT_TELEA)
cv2.imshow('dst',dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite('ost_1.jpg',dst)
Input images: a) scan_1.jpg
b) scan_2.jpg
Output images: a) ost_1.jpg
b) ost_2.jpg
Expected Image: Circles can ignored, but something similar to it is required.
Here is my attempt, a better/easier solution might be acquired if you do not care about preserving texts outside of your circle.
import cv2
import numpy as np
# connectivity method used for finding connected components, 4 vs 8
CONNECTIVITY = 4
# HSV threshold for finding black pixels
H_THRESHOLD = 179
S_THRESHOLD = 255
V_THRESHOLD = 150
# read image
img = cv2.imread("a1.jpg")
img_height = img.shape[0]
img_width = img.shape[1]
# save a copy for creating resulting image
result = img.copy()
# convert image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# found the circle in the image
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1.7, minDist= 100, param1 = 48, param2 = 100, minRadius=70, maxRadius=100)
# draw found circle, for visual only
circle_output = img.copy()
# check if we found exactly 1 circle
num_circles = len(circles)
print("Number of found circles:{}".format(num_circles))
if (num_circles != 1):
print("invalid number of circles found ({}), should be 1".format(num_circles))
exit(0)
# save center position and radius of found circle
circle_x = 0
circle_y = 0
circle_radius = 0
if circles is not None:
# convert the (x, y) coordinates and radius of the circles to integers
circles = np.round(circles[0, :]).astype("int")
for (x, y, radius) in circles:
circle_x, circle_y, circle_radius = (x, y, radius)
cv2.circle(circle_output, (circle_x, circle_y), circle_radius, (255, 0, 0), 4)
print("circle center:({},{}), radius:{}".format(x,y,radius))
# keep a median filtered version of image, will be used later
median_filtered = cv2.medianBlur(img, 21)
# Convert BGR to HSV
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# define range of black color in HSV
lower_val = np.array([0,0,0])
upper_val = np.array([H_THRESHOLD,S_THRESHOLD,V_THRESHOLD])
# Threshold the HSV image to get only black colors
mask = cv2.inRange(hsv, lower_val, upper_val)
# find connected components
components = cv2.connectedComponentsWithStats(mask, CONNECTIVITY, cv2.CV_32S)
# apply median filtering to found components
#centers = components[3]
num_components = components[0]
print("Number of found connected components:{}".format(num_components))
labels = components[1]
stats = components[2]
for i in range(1, num_components):
left = stats[i, cv2.CC_STAT_LEFT] - 10
top = stats[i, cv2.CC_STAT_TOP] - 10
width = stats[i, cv2.CC_STAT_WIDTH] + 10
height = stats[i, cv2.CC_STAT_HEIGHT] + 10
# iterate each pixel and replace them if
#they are inside circle
for row in range(top, top+height+1):
for col in range(left, left+width+1):
dx = col - circle_x
dy = row - circle_y
if (dx*dx + dy*dy <= circle_radius * circle_radius):
result[row, col] = median_filtered[row, col]
# smooth the image, may be necessary?
#result = cv2.blur(result, (3,3))
# display image(s)
cv2.imshow("img", img)
cv2.imshow("gray", gray)
cv2.imshow("found circle:", circle_output)
cv2.imshow("mask", mask)
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result for a1:
I am currently trying to get contours of a collection of images. But CV2 is unable to return the full contours for various images as shown below.
Hence, I am looking for a way to either approximate the moments for the list of contours, a way to make CV2 return a single contour when calling the function "findContours(...)" or merge the list of contours returned into a single contour if possible.
My codes(Updated) currently consists of:
def find_if_close(cnt1,cnt2):
row1,row2 = cnt1.shape[0],cnt2.shape[0]
for i in range(row1):
for j in range(row2):
dist = np.linalg.norm(cnt1[i]-cnt2[j])
if abs(dist) < 50 :
return True
elif i==row1-1 and j==row2-1:
return False
def thresh_callback(thresh, img, gray, blur):
edges = cv2.Canny(blur,thresh,thresh*2)
drawing = np.zeros(img.shape,np.uint8) # Image to draw the contours
image, contours,hierarchy = cv2.findContours(edges,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
LENGTH = len(contours)
status = np.zeros((LENGTH,1))
for i,cnt1 in enumerate(contours):
x = i
if i != LENGTH-1:
for j,cnt2 in enumerate(contours[i+1:]):
x = x+1
dist = find_if_close(cnt1,cnt2)
if dist == True:
val = min(status[i],status[x])
status[x] = status[i] = val
else:
if status[x]==status[i]:
status[x] = i+1
unified = []
maximum = int(status.max())+1
for i in range(maximum):
pos = np.where(status==i)[0]
if pos.size != 0:
cont = np.vstack(contours[i] for i in pos)
unified.append(cont)
cv2.drawContours(img,unified,-1,(0,255,0),2)
cv2.drawContours(drawing,unified,-1,255,-1)
cv2.imshow('output',img)
cv2.imshow('input',drawing)
cv2.waitKey(0)
cv2.destroyAllWindows()
return moments, cx, cy, count
def alter_image(img, blur):
ret, thresh1 = cv2.threshold(blur, 50, 255, cv2.THRESH_BINARY)
bitwise = cv2.bitwise_not(thresh1)
erosion = cv2.erode(bitwise, np.ones((2, 2) ,np.uint8), iterations=1) #15
dilation = cv2.dilate(erosion, np.ones((3, 3) ,np.uint8), iterations=1) #45
return dilation
imgs = cv2.imread('./images/'+str(num)+'.jpg')
img_grey = cv2.cvtColor(imgs,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(img_grey,(5,5),0)
imgs = alter_image(imgs, blur)
thresh = 255
max_thresh = 255
moments, cx, cy, count = thresh_callback(thresh, imgs, img_grey, blur)
As shown in the codes, I tried to alter the image through changes such as erosion, dilation, bitwise changes but the results were still the same.The original image can be found below.
Image Output (Updated):