I encountered such a problem: I can not draw lines on the image where the color was determined, and also find out the distance to this place. Help to make it as in the image below:
My code:
import cv2
import numpy as np
from PIL import ImageGrab
while True:
screen = np.array(ImageGrab.grab(bbox=(0,40,800,640)))
rgb_screen = cv2.cvtColor(screen, cv2.COLOR_BGR2RGB)
lower = np.array([72, 160, 160])
upper = np.array([112, 249, 249])
mask = cv2.inRange(rgb_screen, lower, upper)
output = cv2.bitwise_and(rgb_screen, rgb_screen, mask=mask)
cv2.imshow('window', output)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
I can't help much because I do not have your original image. But you could read my code and maybe get an idea. For distance I do not know what u mean, so I made an example on how to get distance of top left corner to bottom left. You can apply other points or apply it as ratio depanding on your demands.
import cv2
import numpy as np
img = cv2.imread('untitled.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, threshold = cv2.threshold(gray,150,255,cv2.THRESH_BINARY)
im, contours, hierarchy = cv2.findContours(threshold,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
area = sorted(contours, key=cv2.contourArea, reverse=True)
c = area[0]
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
print(box)
box = np.int0(box)
cv2.drawContours(img,[box],0,(0,0,255),2)
extreme_left = tuple(c[c[:, :, 0].argmin()][0])
extreme_top = tuple(c[c[:, :, 1].argmin()][0])
x1 = box[1,0]
y1 = box[1,1]
x2 = box[0,0]
y2 = box[0,1]
distance = np.sqrt( (x1 - x2)**2 + (y1 - y2)**2 )
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'Distance: '+str(distance),(1,300), font, 0.5,(255,255,255),2,cv2.LINE_AA)
cv2.circle(img, (x2,y2), 5, (255, 0, 0), -1)
cv2.circle(img, (x1,y1), 5, (255, 0, 0), -1)
cv2.imshow('image', img)
Related
I am having separate code for face detection and for grabcut algorithm. I want to run them in one where first it will detect the face and store the coordinates of the rectangle and then applying garbcut to crop it using that rectangle. Please help me.
import cv2
import numpy as np
#---loading haarcascade detector---
face_detector=cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
#---Loading the image-----
img = cv2.imread('/home/mongoose/Videos/Projects/backgroundremoval/detectface/1665410456980.JPEG')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
padding = 50
cv2.rectangle(img,(x-padding,y-padding),(x+w+padding,y+h+padding),(0,255,0),2)
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
import cv2
import numpy as np
drawn = False
startx, starty = -1, -1
rectangle = (0, 0, 0, 0)
def load_and_resize(path):
image = cv2.imread(path)
new_size = (800, 800)
resized_image = cv2.resize(image, new_size, interpolation=cv2.INTER_AREA)
return resized_image
def select_roi(event, newx, newy, flags, params):
global startx, starty, drawn, rectangle
if event == cv2.EVENT_LBUTTONDOWN:
startx, starty = newx, newy
cv2.circle(image, (startx, starty), 4, (255, 255, 120), -1)
elif event == cv2.EVENT_LBUTTONUP:
drawn = True
rectangle = (startx, starty, newx - startx, newy - starty)
print("\nROI Selected Successfully")
def extract_foreground(image):
global drawn
cv2.namedWindow(winname='BG Subractor')
cv2.setMouseCallback('BG Subractor', select_roi)
print("\nSelect ROI from mouse pointer.")
black_mask = np.zeros(image.shape[:2], np.uint8)
background = np.zeros((1, 65), np.float64)
foreground = np.zeros((1, 65), np.float64)
while True:
if drawn:
print("\nPerforming Background Subtraction")
cv2.grabCut(image, black_mask, rectangle,background, foreground, 50, cv2.GC_INIT_WITH_RECT)
mask2 = np.where((black_mask == 2) | (black_mask == 0), 0, 1).astype('uint8')
image = image * mask2[:, :, np.newaxis]
drawn = False
print("\nExtraction complete")
cv2.imshow('BG Subractor', image)
cv2.imwrite('/home/mongoose/Videos/Projects/backgroundremoval/bg_grabcut-20221014T120042Z-001/bg_grabcut/new/1.jpg',image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
Please help me to run the code by combining both script.
I am trying to detect orange beats in below image
To detect these, I have first cropped the area from original image and then setting high and low hsv values to detect orange. This seems to be working fine. Below is the detected image:
Below is the code:
import cv2
import numpy as np
win_name = "Image"
cv2.namedWindow(win_name)
img = cv2.imread('image.png')
orangeImg = img[420:510, 457:953]
hsv = cv2.cvtColor(orangeImg, cv2.COLOR_BGR2HSV)
lower_bound = np.array([0, 80, 80])
upper_bound = np.array([20, 255, 255])
origMask = cv2.inRange(hsv, lower_bound, upper_bound)
contours, hierarchy = cv2.findContours(origMask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for _, c in enumerate(contours):
areas = [cv2.contourArea(c) for c in contours]
for area in areas:
if area >= 20.0:
boundRect = cv2.boundingRect(c)
rectX = boundRect[0]
rectY = boundRect[1]
rectWidth = boundRect[2]
rectHeight = boundRect[3]
color = (0, 0, 255)
cv2.rectangle(orangeImg, (int(rectX), int(rectY)), (int(rectX + rectWidth), int(rectY + rectHeight)), color, 2)
cv2.imshow(win_name, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
In the output image, you can notice that it still has some noise around the bbox created. Is there a better way to reduce the noise in it. Also is there a way to count the detected contours in the image?
I am trying to display an image over another image at a particular co-ordinates. I have detected the aruco markers using the webcam and I want to display another image over the aruco marker. The aruco marker can be moved and the overlaying image should move along with the marker.
There is various draw functions and to input text into the image. I have tried image overlay and image homography.
I can obtain the co-ordinates for the corners.
Is there any function to insert the image at those co-ordinates?
import cv2
import cv2.aruco as aruco
import glob
markerLength = 0.25
cap = cv2.VideoCapture(0)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((6*7,3), np.float32)
objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)
objpoints = []
imgpoints = []
images = glob.glob('calib_images/*.jpg')
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (7,6),None)
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
imgpoints.append(corners2)
img = cv2.drawChessboardCorners(img, (7,6), corners2,ret)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
calibrationFile = "calibrationFileName.xml"
calibrationParams = cv2.FileStorage(calibrationFile, cv2.FILE_STORAGE_READ)
camera_matrix = calibrationParams.getNode("cameraMatrix").mat()
dist_coeffs = calibrationParams.getNode("distCoeffs").mat()
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
arucoParameters = aruco.DetectorParameters_create()
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=arucoParameters)
if np.all(ids != None):
rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners, markerLength, mtx, dist)
axis = aruco.drawAxis(frame, mtx, dist, rvec, tvec, 0.3)
print(ids)
display = aruco.drawDetectedMarkers(axis, corners)
display = np.array(display)
else:
display = frame
cv2.imshow('Display',display)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()```
To replace a part of image
import cv2
import numpy as np
img1 = cv2.imread('Desert.jpg')
img2 = cv2.imread('Penguins.jpg')
img3 = img1.copy()
# replace values at coordinates (100, 100) to (399, 399) of img3 with region of img2
img3[100:400,100:400,:] = img2[100:400,100:400,:]
cv2.imshow('Result1', img3)
To alpha blend two images
alpha = 0.5
img3 = np.uint8(img1*alpha + img2*(1-alpha))
cv2.imshow('Result2', img3)
#user8190410's answer works fine. Just to give a complete answer, in order to alpha blend two images with different size at a particular position, you can do the following:
alpha= 0.7
img1_mod = img1.copy()
img1_mod[:pos_x,:pos_y,:] = img1[:pos_x,:pos_y,:]*alpha + img2*(1-alpha)
cv2.imshow('Image1Mod', img1_mod)
Actually, I found that image homography can be used to do it.
Here is the updated code.
import numpy as np
import cv2
import cv2.aruco as aruco
cap = cv2.VideoCapture(0)
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
arucoParameters = aruco.DetectorParameters_create()
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=arucoParameters)
if np.all(ids != None):
display = aruco.drawDetectedMarkers(frame, corners)
x1 = (corners[0][0][0][0], corners[0][0][0][1])
x2 = (corners[0][0][1][0], corners[0][0][1][1])
x3 = (corners[0][0][2][0], corners[0][0][2][1])
x4 = (corners[0][0][3][0], corners[0][0][3][1])
im_dst = frame
im_src = cv2.imread("mask.jpg")
size = im_src.shape
pts_dst = np.array([x1,x2,x3,x4])
pts_src = np.array(
[
[0,0],
[size[1] - 1, 0],
[size[1] - 1, size[0] -1],
[0, size[0] - 1 ]
],dtype=float
);
h, status = cv2.findHomography(pts_src, pts_dst)
temp = cv2.warpPerspective(im_src, h, (im_dst.shape[1],im_dst.shape[0]))
cv2.fillConvexPoly(im_dst, pts_dst.astype(int), 0, 16);
im_dst = im_dst + temp
cv2.imshow('Display',im_dst)
else:
display = frame
cv2.imshow('Display',display)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
I am currently working on solar panel cropping from the images taken by the drone(attaching sample image). I have tried using contours but there wasn't a proper outcome. It was not detecting all solar panels in the image some of them were missing. I struck here itself. How do I proceed further? Please help me with this problem.
Thank you,
Sample Code:
import cv2
import numpy as np
img = cv2.imread('D:\\SolarPanel Images\\solarpanel.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
edges = cv2.Canny(blur,100,200)
th3 = cv2.adaptiveThreshold(edges,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
im2, contours, hierarchy = cv2.findContours(th3, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
print("Len of contours",len(contours)
try: hierarchy = hierarchy[0]
except: hierarchy = []
height, width, = edges.shape
min_x, min_y = width, height
max_x = max_y = 0
# computes the bounding box for the contour, and draws it on the image,
for contour, hier in zip(contours, hierarchy):
area = cv2.contourArea(contour)
if area > 10000 and area < 250000:
(x,y,w,h) = cv2.boundingRect(contour)
min_x, max_x = min(x, min_x), max(x+w, max_x)
min_y, max_y = min(y, min_y), max(y+h, max_y)
if w > 80 and h > 80:
cv2.rectangle(img, (x,y), (x+w,y+h), (255, 0, 0), 2)
cv2.imshow('cont imge', img)
cv2.waitKey(0)
To find contours in images where the object of importance is clearly distinguishable from the background, you can always try converting the image to HSV format and then contour. I did the following:
import cv2
import numpy as np
img = cv2.imread('panel.jpg')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
ret,thresh1 = cv2.threshold(hsv[:,:,0],100,255,cv2.THRESH_BINARY)
im2, contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
try: hierarchy = hierarchy[0]
except: hierarchy = []
for contour, hier in zip(contours, hierarchy):
area = cv2.contourArea(contour)
if area > 10000 and area < 250000:
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(img,[box],0,(0,0,255),2)
cv2.imshow('cont imge', img)
cv2.waitKey(0)
cv2.imwrite("result.jpg",img)
Result:
Image used --
My code:
# multiple programs
import cv2
import numpy as np
img = cv2.imread('Dodo.jpg', 0)
ret, thresh = cv2.threshold(img, 127, 255, 0)
img2, contours, hierarchy = cv2.findContours(thresh, 1, 2)
cnt = contours[0]
M = cv2.moments(cnt)
print(M)
cx = int(M['m10']/ M['m00'])
cy = int(M['m01']/ M['m00'])
print("Cx:", cx, "Cy:", cy)
area = cv2.contourArea(cnt)
print("Area:", area)
perimeter = cv2.arcLength(cnt, True)
print("Perimeter:", perimeter)
epsilon = 0.1*cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt,epsilon,True)
imgapprox = cv2.drawContours(img,[approx],0,(0,0,255),2)
hull = cv2.convexHull(cnt)
imghull =cv2.drawContours(img,[hull],0,(0,0,255),2)
k = cv2.isContourConvex(cnt)
print(k)
x,y,w,h = cv2.boundingRect(cnt)
rectst = cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
rectrt =cv2.drawContours(img,[box],0,(0,0,255),2)
cv2.imshow('StraightRect', rectst)
cv2.imshow('RotatedRect', rectrt)
cv2.imshow('Approx', imgapprox)
cv2.imshow('hull', imghull)
cv2.waitKey()
cv2.destroyAllWindows()
OpenCV-Python version 3.4.1
So I am trying to learn the contour section in OpenCV (Link below)
Link : https://docs.opencv.org/3.4.1/dd/d49/tutorial_py_contour_features.html
Now the output is the same for all the features. i.e. same output for every cv2.imshow here.
Why? What is the error?
If it is overwriting the previous feature, then how do I display every feature?
Please help. Thanks :)
You are making the change in the same image each time.
Use image.copy() in cv2.drawContours(img.copy ,.......) , cv2.rectangle(img.copy(),.....)
.Because of that it seems they are showing the same features but it isn't .
Also since the background is black you are not able to see the rectangles and contour properly
Try this:
import cv2
import numpy as np
img = cv2.imread('Dodo.jpg')
f1 = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
f1 = cv2.threshold(f1, 120,255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
img2, contours, hierarchy = cv2.findContours(f1, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
#ret, thresh = cv2.threshold(img, 127, 255, 0)
#img2, contours, hierarchy = cv2.findContours(thresh, 1, 2)
cnt = contours[0]
M = cv2.moments(cnt)
print(M)
cx = int(M['m10']/ M['m00'])
cy = int(M['m01']/ M['m00'])
print("Cx:", cx, "Cy:", cy)
area = cv2.contourArea(cnt)
print("Area:", area)
perimeter = cv2.arcLength(cnt, True)
print("Perimeter:", perimeter)
epsilon = 0.1*cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt,epsilon,True)
imgapprox = cv2.drawContours(img.copy(),[approx],0,(0,0,255),2)
hull = cv2.convexHull(cnt)
imghull =cv2.drawContours(img.copy(),[hull],0,(0,0,255),2)
k = cv2.isContourConvex(cnt)
print(k)
x,y,w,h = cv2.boundingRect(cnt)
rectst = cv2.rectangle(img.copy(),(x,y),(x+w,y+h),(0,255,0),2)
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
rectrt =cv2.drawContours(img.copy(),[box],0,(0,0,255),2)
cv2.imshow('StraightRect', rectst)
cv2.imshow('RotatedRect', rectrt)
cv2.imshow('Approx', imgapprox)
cv2.imshow('hull', imghull)
cv2.waitKey()
cv2.destroyAllWindows()
This is the result i get after executing the above code.