How detect different colors - python-3.x

I am learning Image Filtering using opencv. I wrote some code but my code could only detect objects with red color, How can I detect objects with other colors.
I tried different numpy array values, still I'm not satisfied with output
hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
lower_blue = np.array([150,150,0])
upper_blue = np.array([255,255,225])
mask = cv2.inRange(hsv,lower_blue,upper_blue)
res = cv2.bitwise_and(frame,frame,mask=mask)
cv2.imshow('res',res)

You just need to change the boundary values (in your case lower_blue and upper_blue) to different values. The values may range ass follows [0 < H< 179], [0 < S < 255], [0 < V < 255]. You can see it better from the picture.
Good luck!

First, the range of H should be from 0 to 179. To get the feeling of what combination of HSV values produces what color here is a small piece of code. Below code creates trackbars for H, S, V. Adjust the trackbars to segment the different colors.
import cv2
import numpy as np
def nothing(x):
pass
cap = cv2.VideoCapture(0)
# Create a window
cv2.namedWindow('image',cv2.WINDOW_NORMAL)
# create trackbars for color change
cv2.createTrackbar('lowH','image',0,179,nothing)
cv2.createTrackbar('highH','image',179,179,nothing)
cv2.createTrackbar('lowS','image',0,255,nothing)
cv2.createTrackbar('highS','image',255,255,nothing)
cv2.createTrackbar('lowV','image',0,255,nothing)
cv2.createTrackbar('highV','image',255,255,nothing)
while(True):
ret, frame = cap.read()
# get current positions of the trackbars
ilowH = cv2.getTrackbarPos('lowH', 'image')
ihighH = cv2.getTrackbarPos('highH', 'image')
ilowS = cv2.getTrackbarPos('lowS', 'image')
ihighS = cv2.getTrackbarPos('highS', 'image')
ilowV = cv2.getTrackbarPos('lowV', 'image')
ihighV = cv2.getTrackbarPos('highV', 'image')
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_hsv = np.array([ilowH, ilowS, ilowV])
higher_hsv = np.array([ihighH, ihighS, ihighV])
mask = cv2.inRange(hsv, lower_hsv, higher_hsv)
frame = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow('image', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()

Related

How to get rid of image noise for improving results with PyTesseract?

I'm trying to get the text "P1" and "P2" from the upper left corner of the video.
P1
P2
I take a frame and crop them down to following images and then apply the image processing found here:
P1 Crop
P2 Crop
use pytesseract to recognize text from image
and while it works on cropped still images I edited manually using an image editor, it doesn't work when taking frames from the video using cv2.
I'm not sure why this is but I suspect it has something to do with the black and white background like in the picture below, but I don't know how to get rid of it without also removing the text.
P1 post image manipulation
and here's my code
import cv2
import pytesseract
import re
from difflib import SequenceMatcher
def determineWinner(video):
winnerRect = [(70,95),(146,152)]
cap = cv2.VideoCapture(video)
if(cap.isOpened() == False):
print("No dice")
return
fps = cap.get(cv2.CAP_PROP_FPS)
frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
print(fps)
print(frames)
desiredSeek = frames - int(fps * 9)
print(desiredSeek)
seconds = desiredSeek/fps
print(seconds)
minutes = seconds/60
print(minutes)
partial = minutes - int(minutes)
print(partial)
seconds = partial * 60
print(seconds)
print(str(int(minutes)) +":"+ str(seconds))
cap.set(cv2.CAP_PROP_POS_FRAMES,(desiredSeek))
ret,img = cap.read()
winTxt = []
p1Count = 0
p2Count = 0
cv2.namedWindow("",cv2.WINDOW_NORMAL)
ret,img = cap.read()
while ret:
key = cv2.waitKey(1)
if key == ord('q'):
break
if key == ord('e'):
ret,img = cap.read()
if ret:
winROI = img[winnerRect[0][1]:winnerRect[1][1],winnerRect[0][0]:winnerRect[1][0]]
gray = cv2.cvtColor(winROI, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (3,3), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Morph open to remove noise and invert image
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=1)
invert = 255-opening
invert=cv2.resize(invert,None,fx=2,fy=2)
wConfig='-l eng --oem 1 --psm 10 -c tessedit_char_whitelist=P12'
winTxt = pytesseract.image_to_string(invert,config=wConfig)
cv2.rectangle(img,winnerRect[0],winnerRect[1],(255,0,0),2)
cv2.imshow("winroi",invert)
cv2.imshow("",img)
cv2.resizeWindow("",800,600)
print(winTxt)
desiredSeek+=1
seconds = desiredSeek/fps
minutes = seconds/60
partial = minutes - int(minutes)
seconds = partial * 60
print(str(int(minutes)) +":"+ str(seconds))
else:
break
cap.release()
cv2.destroyAllWindows()
This code works as a testing script. I only extracted the parameters for the image containing P1. For applying a filter on a new image, just erase the predefined thresholds values as following:
From:
low_blue, low_green, low_red, upper_blue, upper_green, upper_red = (115, 0, 0, 255, 178, 255)
To:
low_blue, low_green, low_red, upper_blue, upper_green, upper_red = (0, 0, 0, 255, 255, 255)
And start modifying the parameters as described below. After determining the parameters, press esc to exit the program, take the parameters shown in the console and paste them in the thresholds tuple.
How to use it:
Very important. For this to work properly, you have to select with left click of the mouse, the window from cv2.imshow() , in this case Original image or Binary image
q increases and w decreases the lower blue threshold
a increases and s decreases the lower green threshold
... so on and so forth for both lower and upper colors (BGR) thresholds
import numpy as np
import cv2
low_blue, low_green, low_red, upper_blue, upper_green, upper_red = (115, 0, 0, 255, 178, 255)
# Get picture
path = "C:\\Users\\asd\\asd\\P1.png"
frame = cv2.imread(path)
while 1:
lower_color = np.array((low_blue, low_green, low_red))
upper_color = np.array((upper_blue, upper_green, upper_red))
# extract binary image with active blue regions
binary_image = cv2.inRange(frame, lower_color, upper_color)
cv2.imshow('Original image', binary_image)
#erode for the little white contour to dissapear
binary_image = cv2.erode(binary_image, cv2.getStructuringElement(cv2.MORPH_RECT,(3,3)))
binary_image = cv2.dilate(binary_image, cv2.getStructuringElement(cv2.MORPH_RECT,(3,3)))
cv2.imshow('Binary image ', binary_image)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
if k == ord('q'):
low_blue += 1
if k == ord('w'):
low_blue -= 1
if k == ord('a'):
low_green += 1
if k == ord('s'):
low_green -= 1
if k == ord('z'):
low_red += 1
if k == ord('x'):
low_red -= 1
if k == ord('e'):
upper_blue += 1
if k == ord('r'):
upper_blue -= 1
if k == ord('d'):
upper_green += 1
if k == ord('f'):
upper_green -= 1
if k == ord('c'):
upper_red += 1
if k == ord('v'):
upper_red -= 1
print("low_blue=", low_blue, "low_green=", low_green, "low_red=",low_red, "upper_blue", upper_blue, "upper_green=",
upper_green, "upper_red=",upper_red)
cv2.destroyAllWindows()
The results
From:
To:

Detection of small object - aphids on plants

I'm currently trying to create a detector of aphids (green and rose) on plants but only using "classic" image processing technique (no neural network).
Here are an image I'm working on:
'aphids.jpg'
I'm working on a code (see below). If you apply it on the image you should have the plants alone. My problem is that I want to isolate the aphids that can be seen on the plants. There are a lot of them but I just want to detect the biggest or the more obvious.
On the code there is an "edges_detect" function I'm currently working on. One of the problem I have is that I can detect some of the aphids as contour but it will also take simple lines...
I tried to drop those line using the hierarchy of contour but it seems those line have inner contour so I can't easily delete them.
I also tried the adjust_gamma and contrast, but it doesn't give that much result.
I'm looking for more ideas. What would you try ?
Thank you in advance !
Here is the code:
import cv2
import numpy as np
import matplotlib.pyplot as plt
def adjust_gamma(image, gamma=1.0):
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv2.LUT(image, table)
def adjust_contrast(image,alpha=1.0,beta=0):
new = np.zeros(image.shape,image.dtype)
for y in range(image.shape[0]):
for x in range(image.shape[1]):
for c in range(image.shape[2]):
new[y,x,c] = np.clip(alpha*image[y,x,c]+beta,0,255)
return(new)
def img_process(img):
(h1, w1) = img.shape[:2]
center = (w1 / 2, h1 / 2)
blur = cv2.GaussianBlur(img.copy(),(5,5),0)
hsv = cv2.cvtColor(blur,cv2.COLOR_BGR2HSV)
#image = img.copy()
#Boundaries to separate plants from the image
l_bound = np.array([20,0,0])
h_bound = np.array([90,250,170])#green
mask = cv2.inRange(hsv,l_bound,h_bound)
res = cv2.bitwise_and(img,img,mask=mask)
#Find contour plants
cnt,_ = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
sort_cnt = sorted(cnt,key=cv2.contourArea,reverse=True)
cnt = [sort_cnt[i] for i in range(len(sort_cnt)) if cv2.contourArea(sort_cnt[i])>300]
cv2.drawContours(res, cnt, -1, (0,255,0), -1)
#Inverse mask to have only the plant in the image
mask2 = cv2.inRange(res,np.array([0,0,0]),np.array([250,250,250]))
mask2 = cv2.bitwise_not(mask2)
res2 = cv2.bitwise_and(img,img,mask=mask2)
#Augment bright/contrast
res2=res2*1.45
res2=res2.astype('uint8')
#Crop
res2 = res2[:-50,int(center[0]-300):int(center[0]+550)]
return res2
def edge_detec(img):
(h1, w1) = img.shape[:2]
center = (w1 / 2, h1 / 2)
blur = cv2.GaussianBlur(img.copy(),(5,5),0)
gray = cv2.cvtColor(blur,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,30,70,apertureSize = 3)
edges = edges[:-50,int(center[0]-300):int(center[0]+550)]
#kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
#edges = cv2.morphologyEx(edges, cv2.MORPH_GRADIENT, kernel)
cnt,hierarchy = cv2.findContours(edges,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnt = sorted(cnt,key=cv2.contourArea,reverse=True)
listArea = list(map(cv2.contourArea,cnt))
sort_cnt = [x for x in cnt if cv2.contourArea(x)>10]
cv2.drawContours(edges, sort_cnt, -1, (0,255,0), -1)
return edges,center,img
### Debut programme
img = cv2.imread('051.jpg')
while True:
##Put processing function here
img_mod = img_process(img)
cv2.imshow('img',img_mod)
if cv2.waitKey(1) & 0xFF == 27:
break
cv2.destroyAllWindows()

How to clear numbers from the image using openCV?

I'm trying to remove numbers which are laying inside the circular part of image, numbers are in black in color and background varies between red,yellow, blue and green.
I am using opencv to remove those numbers. I used a mask which extracts numbers from image, with help of cv2.inpaint tried to remove those numbers from images.
For my further analysis I required to have clear image. But my current approach gives distorted image and numbers are not completely removed.
I tried changing the threshold values, lowering will neglect numbers from dark shaded area such as from green and red.
import cv2
img = cv2.imread('scan_1.jpg')
mask = cv2.threshold(img,50,255,cv2.THRESH_BINARY_INV)[1][:,:,0]
cv2.imshow('mask', mask)
cv2.waitKey(0)
cv2.destroyAllWindows()
dst = cv2.inpaint(img, mask, 5, cv2.INPAINT_TELEA)
cv2.imshow('dst',dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite('ost_1.jpg',dst)
Input images: a) scan_1.jpg
b) scan_2.jpg
Output images: a) ost_1.jpg
b) ost_2.jpg
Expected Image: Circles can ignored, but something similar to it is required.
Here is my attempt, a better/easier solution might be acquired if you do not care about preserving texts outside of your circle.
import cv2
import numpy as np
# connectivity method used for finding connected components, 4 vs 8
CONNECTIVITY = 4
# HSV threshold for finding black pixels
H_THRESHOLD = 179
S_THRESHOLD = 255
V_THRESHOLD = 150
# read image
img = cv2.imread("a1.jpg")
img_height = img.shape[0]
img_width = img.shape[1]
# save a copy for creating resulting image
result = img.copy()
# convert image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# found the circle in the image
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1.7, minDist= 100, param1 = 48, param2 = 100, minRadius=70, maxRadius=100)
# draw found circle, for visual only
circle_output = img.copy()
# check if we found exactly 1 circle
num_circles = len(circles)
print("Number of found circles:{}".format(num_circles))
if (num_circles != 1):
print("invalid number of circles found ({}), should be 1".format(num_circles))
exit(0)
# save center position and radius of found circle
circle_x = 0
circle_y = 0
circle_radius = 0
if circles is not None:
# convert the (x, y) coordinates and radius of the circles to integers
circles = np.round(circles[0, :]).astype("int")
for (x, y, radius) in circles:
circle_x, circle_y, circle_radius = (x, y, radius)
cv2.circle(circle_output, (circle_x, circle_y), circle_radius, (255, 0, 0), 4)
print("circle center:({},{}), radius:{}".format(x,y,radius))
# keep a median filtered version of image, will be used later
median_filtered = cv2.medianBlur(img, 21)
# Convert BGR to HSV
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# define range of black color in HSV
lower_val = np.array([0,0,0])
upper_val = np.array([H_THRESHOLD,S_THRESHOLD,V_THRESHOLD])
# Threshold the HSV image to get only black colors
mask = cv2.inRange(hsv, lower_val, upper_val)
# find connected components
components = cv2.connectedComponentsWithStats(mask, CONNECTIVITY, cv2.CV_32S)
# apply median filtering to found components
#centers = components[3]
num_components = components[0]
print("Number of found connected components:{}".format(num_components))
labels = components[1]
stats = components[2]
for i in range(1, num_components):
left = stats[i, cv2.CC_STAT_LEFT] - 10
top = stats[i, cv2.CC_STAT_TOP] - 10
width = stats[i, cv2.CC_STAT_WIDTH] + 10
height = stats[i, cv2.CC_STAT_HEIGHT] + 10
# iterate each pixel and replace them if
#they are inside circle
for row in range(top, top+height+1):
for col in range(left, left+width+1):
dx = col - circle_x
dy = row - circle_y
if (dx*dx + dy*dy <= circle_radius * circle_radius):
result[row, col] = median_filtered[row, col]
# smooth the image, may be necessary?
#result = cv2.blur(result, (3,3))
# display image(s)
cv2.imshow("img", img)
cv2.imshow("gray", gray)
cv2.imshow("found circle:", circle_output)
cv2.imshow("mask", mask)
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result for a1:

How do i save the image result from this code?

I'm trying to write a code that identifies the eyes of parakeets. So far, i have managed to use a code that identifies the circles edges and got great results at a certain threshold. But i can't save the result image.
I've tried using imwrite('result.png', clone) to save the result at the end of the code, but when I run it I'm get TypeError: Expected cv::UMat for argument 'img'.
I need the clone image to be colored too, but I've no idea where to start.
import cv2
import numpy as np
import imutils
def nothing(x):
pass
# Load an image
img = cv2.imread('sample.png')
# Resize The image
if img.shape[1] > 600:
img = imutils.resize(img, width=600)
# Create a window
cv2.namedWindow('Treshed')
# create trackbars for treshold change
cv2.createTrackbar('Treshold','Treshed',0,255,nothing)
while(1):
# Clone original image to not overlap drawings
clone = img.copy()
# Convert to gray
gray = cv2.cvtColor(clone, cv2.COLOR_BGR2GRAY)
# get current positions of four trackbars
r = cv2.getTrackbarPos('Treshold','Treshed')
# Thresholding the gray image
ret,gray_threshed = cv2.threshold(gray,r,255,cv2.THRESH_BINARY)
# Blur an image
bilateral_filtered_image = cv2.bilateralFilter(gray_threshed, 5, 175, 175)
# Detect edges
edge_detected_image = cv2.Canny(bilateral_filtered_image, 75, 200)
# Find contours
contours, _= cv2.findContours(edge_detected_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contour_list = []
for contour in contours:
# approximte for circles
approx = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True)
area = cv2.contourArea(contour)
if ((len(approx) > 8) & (area > 30) ):
contour_list.append(contour)
# Draw contours on the original image
cv2.drawContours(clone, contour_list, -1, (255,0,0), 2)
# there is an outer boundary and inner boundary for each eadge, so contours double
print('Number of found circles: {}'.format(int(len(contour_list)/2)))
#Displaying the results
cv2.imshow('Objects Detected', clone)
cv2.imshow("Treshed", gray_threshed)
# ESC to break
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
# close all open windows
cv2.destroyAllWindows()
I just tried this modification and it works flawlessly.
# ESC to break
k = cv2.waitKey(1) & 0xFF
if k == 27:
cv2.imwrite('result.png', clone)
break
There's either a misconception about when/where to call imwrite or something is messy with your python/opencv version. I ran it in pycharm using python 3.6.8 and opencv-python 4.0.0.21

Lucas-Kanade loses tracked points OpemCv Python

i´m new in Python and OpenCV. I played a little bit with the lucas-kanade example you can see below. Now I made a foto of my Raspberry PI box, where this code detects 5 Points (see figure 1) Lucas-Kanade-tracked-Points. In the second image I slightly moved a piece of paper from left to right in front of the box. There I saw, that my 5 tracked points can be moved by this piece of paper. These 5 points where now attached at the long side of this paper (see figure 2) moved tracked points. How is that possible? Why can I move this points? In my opinion they have to be lost, when I move this piece of paper over them. Can somebody help me please?
Best regards,
Hanz
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
# params for ShiTomasi corner detection
feature_params = dict( maxCorners = 100,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
# Parameters for lucas kanade optical flow
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Create some random colors
color = np.random.randint(0,255,(100,3))
# Take first frame and find corners in it
ret, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
# Create a mask image for drawing purposes
mask = np.zeros_like(old_frame)
while(1):
ret,frame = cap.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# calculate optical flow
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st==1]
good_old = p0[st==1]
# draw the tracks
for i,(new,old) in enumerate(zip(good_new,good_old)):
a,b = new.ravel()
c,d = old.ravel()
mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)
frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)
img = cv2.add(frame,mask)
cv2.imshow('frame',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
# Now update the previous frame and previous points
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1,1,2)
cv2.destroyAllWindows()
cap.release()
I have found a solution. I have to check the prev points with the next points, like in this example, which can be found here https://github.com/opencv/opencv/blob/master/samples/python/lk_homography.py
And this is the relevant code line:
def checkedTrace(img0, img1, p0, back_threshold = 1.0):
p1, _st, _err = cv.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, _st, _err = cv.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
d = abs(p0-p0r).reshape(-1, 2).max(-1)
status = d < back_threshold
return p1, status

Resources