Here is the code for image thresholding I am getting the error at line 22,
which is:-
ret,thresh2 = cv2.threshold(img,127,255,cv2.THRESH_BINARY_INV)
Here in this code I want to capture image frames from the video camera and then perform various kinds of thresholding operations on the captured image frames.
I have stored image frames at various instances of time. My objective is to segment the moving objects in the video. Hence I am applying thresholding operation.
Does anybody have any idea, how to do it?
Thanks in advance.
import cv2
import numpy as np
import time
from matplotlib import pyplot as plt
import sys
cam = cv2.VideoCapture(0)
while(cam.isOpened()):
ret, frame = cam.read() #Keep on capturing the frames continuously
while (ret==True):
#img = cv2.imread('/home/shrikrishna/Detection&Tracking/OpenCV-Tutorial',6)
cv2.imwrite('At time'+ str(time.clock()) + '.jpg', frame)
img2 = cv2.imread('At time'+ str(time.clock()) + '.jpg',6)
t = str(time.clock())
cv2.imshow('Orignal',frame)
k = cv2.waitKey(0) & 0xffff
if(k==27):
#img = cv2.imread('At time'+ str(time.clock()) + '.jpg',6)
break
if(k==ord('q')):
sys.exit(0)
break
#cv2.imwrite('At time'+ t + '.jpg', frame)
img = cv2.imread('At time'+ t + '.jpg',6)
ret,thresh1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
ret,thresh2 = cv2.threshold(img,127,255,cv2.THRESH_BINARY_INV)
ret,thresh3 = cv2.threshold(img,127,255,cv2.THRESH_TRUNC)
ret,thresh4 = cv2.threshold(img,127,255,cv2.THRESH_TOZERO)
ret,thresh5 = cv2.threshold(img,127,255,cv2.THRESH_TOZERO_INV)
titles = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV']
images = [img, thresh1, thresh2, thresh3, thresh4, thresh5]
for i in xrange(6):
plt.subplot(2,3,i+1),plt.imshow(images[i],'gray')
plt.title(titles[i])
plt.xticks([]),plt.yticks([])
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
In the following line, you read in an image as colour image (based on the second parameter -- flags).
img = cv2.imread('At time'+ t + '.jpg',6)
This means that img contains 3 channels, which in Python is represented by a 3-dimensional array.
You immediately use this image as the source for thresholding:
ret,thresh1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
According to the documentation, the first parameter to threshold() is:
src – input array (single-channel, 8-bit or 32-bit floating point).
That means you need a single channel image, e.g. a grayscale image:
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret,thresh1 = cv2.threshold(img_gray,127,255,cv2.THRESH_BINARY)
# ...
Another option would be to just read the image as grayscale in first place:
img_gray = cv2.imread('At time'+ t + '.jpg',0)
# ...
Related
I have tried most of the solutions on the site to extract data from the image,
only this script worked with the format *.tif, and gave me correct data
'''
from PIL import Image
import glob
import pytesseract
image_list = []
for filename in glob.glob(my_image):
im=Image.open(filename)
image_list.append(im)
pytesseract.pytesseract.tesseract_cmd="C:\\Program Files\\Tesseract-OCR\\tesseract.exe"
texts = [pytesseract.image_to_string(img,lang = 'eng') for img in image_list]
'''
However, this is not working with *.png and *.jpg, I tried the following:
'''
import cv2
import numpy as np
image = cv2.imread('1.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
sharpen_kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
sharpen = cv2.filter2D(gray, -1, sharpen_kernel)
thresh = cv2.threshold(sharpen, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
close = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=1)
result = 255 - close
'''
And like,
'''
import os
from PIL import Image
import cv2
import pytesseract
import ftfy
import uuid
filename = img
image = cv2.imread(os.path.join(filename))
gray = cv2.threshold(image, 200, 255, cv2.THRESH_BINARY)[1]
gray = cv2.resize(gray, (0, 0), fx=3, fy=3)
gray = cv2.medianBlur(gray, 9)
filename = str(uuid.uuid4())+".jpg"
cv2.imwrite(os.path.join(filename), gray)
config = ("-l eng --oem 3 --psm 11")
text = pytesseract.image_to_string(Image.open(os.path.join(filename)), config=config)
text = ftfy.fix_text(text)
text = ftfy.fix_encoding(text)
text = text.replace('-\n', '')
print(text)
'''
and such, but not given me data, how can I extract text from image like of invoice?
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract'
print(pytesseract.image_to_string(r'D:\examplepdf2image.png'))
def escape(html):
"""Returns the given HTML with ampersands, quotes and carets encoded."""
return mark_safe(force_unicode(html).replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", '''))
this a sample code put instead of trying to print text through many different variables from this to that just try to print the image itself first. Then work on how to improve from there. One last thing is that this will let python work without errors making it easy to understand as well. The second piece of code with the def escape shows how to import an html file which you have to put your pieces of code into so you change it to your liking.
I am creating a script in which the script gets 2 images. First image is the background image and the second one is the overlay image to be displayed on top of the first image but with almost 90% transparency.
I have the following code:
from PIL import Image
img = Image.open('C:\\Users\\USER\\Desktop\\web\\2.jpg', 'r')
img_w, img_h = img.size
img.putalpha(200)
background = Image.open('C:\\Users\\USER\\Desktop\\web\\email.jpg', 'r')
bg_w, bg_h = background.size
offset = ((bg_w - img_w) // 2, (bg_h - img_h) // 2)
background.paste(img, offset)
background.save('C:\\Users\\USER\\Desktop\\out.png')
Now, the issue is, that the img.putalpha(200) does not do anything at all, even though it was supposed to give me transparency of the image.
How can I achieve to place the overlay image and then modify it's transparency in Python?
Thank you.
You need to add a third parameter as mask for the paste().
I used these 2 images:
Here's the complete code:
#!/usr/bin/env python3
from PIL import Image
# Open overlay image
img = Image.open('good.jpg')
img_w, img_h = img.size
img.putalpha(128)
background = Image.open('paddington.jpg')
bg_w, bg_h = background.size
offset = ((bg_w - img_w) // 2, (bg_h - img_h) // 2)
background.paste(img, offset, img)
background.save('result.png')
And if I revert to the original code:
background.paste(img, offset)
I am trying to use OpenCV on Python3 to create an image with a QR code and read that code back.
Here is some relevant code:
def make_qr_code(self, data):
qr = qrcode.QRCode(
version=2,
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size=10,
border=4,
)
qr.add_data(data)
return numpy.array( qr.make_image().get_image())
# // DEBUG
img = numpy.ones([380, 380, 3]) * 255
index = self.make_qr_code('Hello StackOverflow!')
img[:index.shape[0], :index.shape[1]][index] = [0, 0, 255]
frame = img
# // DEBUG
self.show_image_in_canvas(0, frame)
frame_mono = cv.cvtColor(numpy.uint8(frame), cv.COLOR_BGR2GRAY)
self.show_image_in_canvas(1, frame_mono)
qr_detector = cv.QRCodeDetector()
data, bbox, rectifiedImage = qr_detector.detectAndDecode(frame_mono)
if len(data) > 0:
print("Decoded Data : {}".format(data))
self.show_image_in_canvas(2, rectifiedImage)
else:
print("QR Code not detected")
(the calls to show_image_in_canvas are just for showing the images in my GUI so I can see what is going on).
When inspecting the frame and frame_mono visually, it looks OK to me
However, the QR Code Detector doesn't return anything (going into the else: "QR Code not detected").
There is literally nothing else in the frame than the QR code I just generated. What do I need to configure about cv.QRCodeDetector or what additional preprocessing do I need to do on my frame to make it find the QR code?
OP here; solved the problem by having a good look at the generated QR code and comparing it to some other sources.
The problem was not in the detection, but in the generation of the QR codes.
Apparently the array that qrcode.QRCode returns has False (or maybe it was 0 and I assumed it was a boolean) in the grid squares that are part of the code, and True (or non-zero) in the squares that are not.
So when I did img[:index.shape[0], :index.shape[1]][index] = [0, 0, 255] I was actually creating a negative image of the QR code.
When I inverted the index array the QR code changed from the image on the left to the image on the right and the detection succeeded.
In addition I decided to switch to the ZBar library because it's much better at detecting these codes under less perfect circumstances (like from a webcam image).
import cv2
import sys
filename = sys.argv[1]
# Or you can take file directly like this:
# filename = f'images/filename.jpg' where images is folder for files that you trying to read
# read the QRCODE image
# in case if QR code is not black/white it is better to convert it into grayscale
# Zero means grayscale
img = cv2.imread(filename, 0)
img_origin = cv2.imread(filename)
# initialize the cv2 QRCode detector
detector = cv2.QRCodeDetector()
# detect and decode
data, bbox, straight_qrcode = detector.detectAndDecode(img)
# if there is a QR code
if bbox is not None:
print(f"QRCode data:\n{data}")
# display the image with lines
# length of bounding box
# Cause bbox = [[[float, float]]], we need to convert fload into int and loop over the first element of array
n_lines = len(bbox[
0])
bbox1 = bbox.astype(int) # Float to Int conversion
for i in range(n_lines):
# draw all lines
point1 = tuple(bbox1[0, [i][0]])
point2 = tuple(bbox1[0, [(i + 1) % n_lines][0]])
cv2.line(img_origin, point1, point2, color=(255, 0, 0), thickness=2)
# display the result
cv2.imshow("img", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
print("QR code not detected")
To re-state the accepted answer, the background of the QRcode must be white and the foreground must be black. So if the generated QRcode has a white foreground you must invert the colors, e.g.:
from cv2 import cv2
img = cv2.imread('C:/Users/N/qrcode.jpg')
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Invert colors so foreground is black
img_invert = cv2.bitwise_not(img_gray)
cv2.imshow('gray', img_gray)
cv2.imshow('inverted', img_invert)
cv2.waitKey(1)
qr_detector = cv2.QRCodeDetector()
text, _, _ = qr_detector.detectAndDecode(img_invert)
print(text)
original image
image after kmeans clustering
image I get as result
I am working on malaria parasite detection using thick blood microscopy image. I have tried to segment the parasite objects but it is difficult since they have almost similar background color. I have used vv2.kmeans() to cluster the parasite and non parasite.
import csv as csv
import matplotlib.pyplot as plt
def smooth(img):
dest=cv2.medianBlur(img,7)
#dest=cv2.GaussianBlur(img, (7,7),0)
return dest
def process(path,img):
image=cv2.imread(path+img,1)
image=smooth(image)
return image
def kmeans(img,name):
output=[]
image=img.reshape(img.shape[0]*img.shape[1],3)
image=np.float32(image)
nclusters=5
criteria=(cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER,10,1.0)
attempts=10
flags=cv2.KMEANS_RANDOM_CENTERS
compactness,labels,centers=cv2.kmeans(image,nclusters,None,criteria,attempts,flags)
centers = np.uint8(centers)
res = centers[labels.flatten()]
res2 = res.reshape((img.shape))
cv2.imwrite(dest+name[:-4]+'.png', res2)
im_color=cv2.imread(dest+name[:-4]+'.png',cv2.IMREAD_COLOR)
im_gray = cv2.cvtColor(im_color, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(im_gray, thresh=100, maxval=255, type=cv2.THRESH_BINARY_INV)
mask3 = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) # 3 channel mask
im_thresh_color = cv2.bitwise_and(img, mask3)
cv2.imwrite("C:\\Users\\user\\Desktop\\lbim2\\"+name[:-4] +".png",im_thresh_color)
def preprocess(path):
images=[]
j=0
print ("Median Blur")
for i in os.listdir(path):
print(i)
images.append(process(path,i))
print(images[j].shape)
#print(images[1].shape)
images[j]=kmeans(images[j],i)
j+=1
print(i)
dest='../output1/'
print ("Preprocess")
preprocess('../input1/')
I have get a image with all pixel value 0. black output
I am following this example.
OpenCV Aruco example with image
And following is the code snippet I am using to detect the markers. I am unable to understand why the example is not working for me.
import numpy as np
import cv2
import cv2.aruco as aruco
import os
im_names = filter(lambda x: x.endswith('.png'),
[f for f in os.listdir('local_vids_ims')])
for imn in im_names:
image = cv2.imread('local_vids_ims/' + imn)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
parameters = aruco.DetectorParameters_create()
corners, ids, rejectedImgPoints = aruco.detectMarkers(
image, aruco_dict, parameters=parameters)
print(corners, ids, rejectedImgPoints)
# aruco.drawDetectedMarkers(image, corners)
aruco.drawDetectedMarkers(image, rejectedImgPoints)
cv2.imshow('gray_im', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
Interesting. There's nothing wrong with your program. I tried the same thing in Python and C++ and got the same result as you. So I tried with a different image and was successful.
Here's my program. It's basically the same as yours but note that I'm using a different dictionary.
import numpy as np
import cv2
import cv2.aruco as aruco
image = cv2.imread("52814747.png")
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
parameters = aruco.DetectorParameters_create()
corners, ids, rejectedImgPoints = aruco.detectMarkers(
image, aruco_dict, parameters=parameters)
print(corners, ids, rejectedImgPoints)
aruco.drawDetectedMarkers(image, corners, ids)
aruco.drawDetectedMarkers(image, rejectedImgPoints, borderColor=(100, 0, 240))
cv2.imshow('so52814747', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
I don't know if the problem is with the 6X6 dictionary or that the source image doesn't have enough resolution to work with the 6x6 dictionary. But there's definitely something wrong with that tutorial. I've reported the issue on GitHub.
Here's the image I used.
And here's the result. (Found markers have green borders. Rejected candidates have red borders.)
I encounter the same problem. I solve it by flipping the input image mat with function cv::flip.
You have only created a method to detect the aruco markers and their respective ID. If you want to detect and augment the marker ID with image ID you have to do this
def augment_marker(bbox , ids , img , img_aug , draw_id=True):
tl = bbox[0][0][0], bbox[0][0][1] # top left
tr = bbox[0][1][0], bbox[0][1][1]
br = bbox[0][2][0], bbox[0][2][1] # bottom left
bl = bbox[0][3][0], bbox[0][3][1]
h , w , c = img_aug.shape
pts1 = np.array([tl,tr,br,bl])
pts2 = np.float32([[0,0],[w,0],[w,h],[0,h]])
matrix, _ = cv2.findHomography(pts2,pts1)
imgout = cv2.warpPerspective(img_aug , matrix ,
(img.shape[1],img.shape[0]))
# here the above imgout will just wrapy the marker and make the
background full black
# so now just want to overlay the marker part and make the
environment
real
# step 1 : making the marker area alone black
cv2.fillConvexPoly(img , pts1.astype(int),(0,0,0))
imgout = img + imgout
where ,
bbox is what you get from the aruco.detectMarkers()
img is the aruco marker
img_aug is the what you want to augment over the marker
draw_id = just I made to draw the id over detected things