Reading a barcode using OpenCV QRCodeDetector - python-3.x

I am trying to use OpenCV on Python3 to create an image with a QR code and read that code back.
Here is some relevant code:
def make_qr_code(self, data):
qr = qrcode.QRCode(
version=2,
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size=10,
border=4,
)
qr.add_data(data)
return numpy.array( qr.make_image().get_image())
# // DEBUG
img = numpy.ones([380, 380, 3]) * 255
index = self.make_qr_code('Hello StackOverflow!')
img[:index.shape[0], :index.shape[1]][index] = [0, 0, 255]
frame = img
# // DEBUG
self.show_image_in_canvas(0, frame)
frame_mono = cv.cvtColor(numpy.uint8(frame), cv.COLOR_BGR2GRAY)
self.show_image_in_canvas(1, frame_mono)
qr_detector = cv.QRCodeDetector()
data, bbox, rectifiedImage = qr_detector.detectAndDecode(frame_mono)
if len(data) > 0:
print("Decoded Data : {}".format(data))
self.show_image_in_canvas(2, rectifiedImage)
else:
print("QR Code not detected")
(the calls to show_image_in_canvas are just for showing the images in my GUI so I can see what is going on).
When inspecting the frame and frame_mono visually, it looks OK to me
However, the QR Code Detector doesn't return anything (going into the else: "QR Code not detected").
There is literally nothing else in the frame than the QR code I just generated. What do I need to configure about cv.QRCodeDetector or what additional preprocessing do I need to do on my frame to make it find the QR code?

OP here; solved the problem by having a good look at the generated QR code and comparing it to some other sources.
The problem was not in the detection, but in the generation of the QR codes.
Apparently the array that qrcode.QRCode returns has False (or maybe it was 0 and I assumed it was a boolean) in the grid squares that are part of the code, and True (or non-zero) in the squares that are not.
So when I did img[:index.shape[0], :index.shape[1]][index] = [0, 0, 255] I was actually creating a negative image of the QR code.
When I inverted the index array the QR code changed from the image on the left to the image on the right and the detection succeeded.
In addition I decided to switch to the ZBar library because it's much better at detecting these codes under less perfect circumstances (like from a webcam image).

import cv2
import sys
filename = sys.argv[1]
# Or you can take file directly like this:
# filename = f'images/filename.jpg' where images is folder for files that you trying to read
# read the QRCODE image
# in case if QR code is not black/white it is better to convert it into grayscale
# Zero means grayscale
img = cv2.imread(filename, 0)
img_origin = cv2.imread(filename)
# initialize the cv2 QRCode detector
detector = cv2.QRCodeDetector()
# detect and decode
data, bbox, straight_qrcode = detector.detectAndDecode(img)
# if there is a QR code
if bbox is not None:
print(f"QRCode data:\n{data}")
# display the image with lines
# length of bounding box
# Cause bbox = [[[float, float]]], we need to convert fload into int and loop over the first element of array
n_lines = len(bbox[
0])
bbox1 = bbox.astype(int) # Float to Int conversion
for i in range(n_lines):
# draw all lines
point1 = tuple(bbox1[0, [i][0]])
point2 = tuple(bbox1[0, [(i + 1) % n_lines][0]])
cv2.line(img_origin, point1, point2, color=(255, 0, 0), thickness=2)
# display the result
cv2.imshow("img", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
print("QR code not detected")

To re-state the accepted answer, the background of the QRcode must be white and the foreground must be black. So if the generated QRcode has a white foreground you must invert the colors, e.g.:
from cv2 import cv2
img = cv2.imread('C:/Users/N/qrcode.jpg')
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Invert colors so foreground is black
img_invert = cv2.bitwise_not(img_gray)
cv2.imshow('gray', img_gray)
cv2.imshow('inverted', img_invert)
cv2.waitKey(1)
qr_detector = cv2.QRCodeDetector()
text, _, _ = qr_detector.detectAndDecode(img_invert)
print(text)

Related

Python3 and Pillow (PIL) - Add an image on top of other image with transparency

I am creating a script in which the script gets 2 images. First image is the background image and the second one is the overlay image to be displayed on top of the first image but with almost 90% transparency.
I have the following code:
from PIL import Image
img = Image.open('C:\\Users\\USER\\Desktop\\web\\2.jpg', 'r')
img_w, img_h = img.size
img.putalpha(200)
background = Image.open('C:\\Users\\USER\\Desktop\\web\\email.jpg', 'r')
bg_w, bg_h = background.size
offset = ((bg_w - img_w) // 2, (bg_h - img_h) // 2)
background.paste(img, offset)
background.save('C:\\Users\\USER\\Desktop\\out.png')
Now, the issue is, that the img.putalpha(200) does not do anything at all, even though it was supposed to give me transparency of the image.
How can I achieve to place the overlay image and then modify it's transparency in Python?
Thank you.
You need to add a third parameter as mask for the paste().
I used these 2 images:
Here's the complete code:
#!/usr/bin/env python3
from PIL import Image
# Open overlay image
img = Image.open('good.jpg')
img_w, img_h = img.size
img.putalpha(128)
background = Image.open('paddington.jpg')
bg_w, bg_h = background.size
offset = ((bg_w - img_w) // 2, (bg_h - img_h) // 2)
background.paste(img, offset, img)
background.save('result.png')
And if I revert to the original code:
background.paste(img, offset)

How to detect objects which have almost similar color with their background?

original image
image after kmeans clustering
image I get as result
I am working on malaria parasite detection using thick blood microscopy image. I have tried to segment the parasite objects but it is difficult since they have almost similar background color. I have used vv2.kmeans() to cluster the parasite and non parasite.
import csv as csv
import matplotlib.pyplot as plt
def smooth(img):
dest=cv2.medianBlur(img,7)
#dest=cv2.GaussianBlur(img, (7,7),0)
return dest
def process(path,img):
image=cv2.imread(path+img,1)
image=smooth(image)
return image
def kmeans(img,name):
output=[]
image=img.reshape(img.shape[0]*img.shape[1],3)
image=np.float32(image)
nclusters=5
criteria=(cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER,10,1.0)
attempts=10
flags=cv2.KMEANS_RANDOM_CENTERS
compactness,labels,centers=cv2.kmeans(image,nclusters,None,criteria,attempts,flags)
centers = np.uint8(centers)
res = centers[labels.flatten()]
res2 = res.reshape((img.shape))
cv2.imwrite(dest+name[:-4]+'.png', res2)
im_color=cv2.imread(dest+name[:-4]+'.png',cv2.IMREAD_COLOR)
im_gray = cv2.cvtColor(im_color, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(im_gray, thresh=100, maxval=255, type=cv2.THRESH_BINARY_INV)
mask3 = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) # 3 channel mask
im_thresh_color = cv2.bitwise_and(img, mask3)
cv2.imwrite("C:\\Users\\user\\Desktop\\lbim2\\"+name[:-4] +".png",im_thresh_color)
def preprocess(path):
images=[]
j=0
print ("Median Blur")
for i in os.listdir(path):
print(i)
images.append(process(path,i))
print(images[j].shape)
#print(images[1].shape)
images[j]=kmeans(images[j],i)
j+=1
print(i)
dest='../output1/'
print ("Preprocess")
preprocess('../input1/')
I have get a image with all pixel value 0. black output

Aruco OpenCV example, all markers rejected

I am following this example.
OpenCV Aruco example with image
And following is the code snippet I am using to detect the markers. I am unable to understand why the example is not working for me.
import numpy as np
import cv2
import cv2.aruco as aruco
import os
im_names = filter(lambda x: x.endswith('.png'),
[f for f in os.listdir('local_vids_ims')])
for imn in im_names:
image = cv2.imread('local_vids_ims/' + imn)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
parameters = aruco.DetectorParameters_create()
corners, ids, rejectedImgPoints = aruco.detectMarkers(
image, aruco_dict, parameters=parameters)
print(corners, ids, rejectedImgPoints)
# aruco.drawDetectedMarkers(image, corners)
aruco.drawDetectedMarkers(image, rejectedImgPoints)
cv2.imshow('gray_im', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
Interesting. There's nothing wrong with your program. I tried the same thing in Python and C++ and got the same result as you. So I tried with a different image and was successful.
Here's my program. It's basically the same as yours but note that I'm using a different dictionary.
import numpy as np
import cv2
import cv2.aruco as aruco
image = cv2.imread("52814747.png")
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
parameters = aruco.DetectorParameters_create()
corners, ids, rejectedImgPoints = aruco.detectMarkers(
image, aruco_dict, parameters=parameters)
print(corners, ids, rejectedImgPoints)
aruco.drawDetectedMarkers(image, corners, ids)
aruco.drawDetectedMarkers(image, rejectedImgPoints, borderColor=(100, 0, 240))
cv2.imshow('so52814747', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
I don't know if the problem is with the 6X6 dictionary or that the source image doesn't have enough resolution to work with the 6x6 dictionary. But there's definitely something wrong with that tutorial. I've reported the issue on GitHub.
Here's the image I used.
And here's the result. (Found markers have green borders. Rejected candidates have red borders.)
I encounter the same problem. I solve it by flipping the input image mat with function cv::flip.
You have only created a method to detect the aruco markers and their respective ID. If you want to detect and augment the marker ID with image ID you have to do this
def augment_marker(bbox , ids , img , img_aug , draw_id=True):
tl = bbox[0][0][0], bbox[0][0][1] # top left
tr = bbox[0][1][0], bbox[0][1][1]
br = bbox[0][2][0], bbox[0][2][1] # bottom left
bl = bbox[0][3][0], bbox[0][3][1]
h , w , c = img_aug.shape
pts1 = np.array([tl,tr,br,bl])
pts2 = np.float32([[0,0],[w,0],[w,h],[0,h]])
matrix, _ = cv2.findHomography(pts2,pts1)
imgout = cv2.warpPerspective(img_aug , matrix ,
(img.shape[1],img.shape[0]))
# here the above imgout will just wrapy the marker and make the
background full black
# so now just want to overlay the marker part and make the
environment
real
# step 1 : making the marker area alone black
cv2.fillConvexPoly(img , pts1.astype(int),(0,0,0))
imgout = img + imgout
where ,
bbox is what you get from the aruco.detectMarkers()
img is the aruco marker
img_aug is the what you want to augment over the marker
draw_id = just I made to draw the id over detected things

Text written on the frame also turns gray when video frames turned into grayscale

I am building a motion detector application.So for the motion detection algorithm to work, I converted the frames to grayscale so now the application is able to detect the motions.But when I try to put a text on the frame trying to post a message like "MOVING", even the text has turned gray and is hardly visible.How do I draw colored text on a video frame?
Below is my motion detection application code
import cv2
import numpy as np
from skimage.measure import compare_ssim
from twilio.rest import Client
#we can compare two images using Structural Similarity
#so a small change in pixel value won't prompt this method to term both images as dissimilar
#the closer the value is to 1,the more similar two images are
def ssim(A, B):
return compare_ssim(A, B, data_range=A.max() - A.min())
#capture a video either from a file or a live video stream
cap = cv2.VideoCapture(0)
first_frame = True
prev_frame = None
current_frame = None
#we keep a count of the frames
frame_counter = 0
while True:
if frame_counter == 0:
#prev_frame will always trail behind the current_frame
prev_frame = current_frame
#get a frame from the video
ret, current_frame = cap.read()
#if we reach the end of the video in case of a video file,stop reading
if current_frame is None:
break
#convert the image to grayscale
current_frame = cv2.cvtColor(current_frame,cv2.COLOR_BGR2GRAY)
if first_frame:
#for the first time prev_frame and current_frame will be the same
prev_frame = current_frame
first_frame = False
if frame_counter == 9:
#compare two images based on SSIM
ssim_val = ssim(current_frame, prev_frame)
print(ssim_val)
#if there is a major drop in the SSIM value ie it has detected an object
if ssim_val < 0.8:
# Here I want to put a colored text to the screen
cv2.putText(current_frame, "MOVING", (100, 300),
cv2.FONT_HERSHEY_TRIPLEX, 4, (255, 0, 0))
frame_counter = -1
#show the video as a series of frames
cv2.imshow("Motion Detection",current_frame) #(name of the window,image file)
frame_counter += 1
key = cv2.waitKey(1) & 0xFF #cv2.waitKey(1) returns a value of -1 which is masked using & 0xFF to get char value
if key == ord('q'): #gives ASCII value of 'q'
break
#release the resources allocated to the video file or video stream
cap.release()
#destroy all the windows
cv2.destroyAllWindows()
I searched online and I got this piece of code which basically suggests to convert grayscale back to BGR
backtorgb = cv2.cvtColor(current_frame, cv2.COLOR_GRAY2RGB)
But this didn't work.I even took a copy of the current frame before it being converted to grayscale frame and then tried to write on the copied color frame but still the text comes gray and not colored.What should I do?

Assertion failed (m.dims >= 2) in Mat

Here is the code for image thresholding I am getting the error at line 22,
which is:-
ret,thresh2 = cv2.threshold(img,127,255,cv2.THRESH_BINARY_INV)
Here in this code I want to capture image frames from the video camera and then perform various kinds of thresholding operations on the captured image frames.
I have stored image frames at various instances of time. My objective is to segment the moving objects in the video. Hence I am applying thresholding operation.
Does anybody have any idea, how to do it?
Thanks in advance.
import cv2
import numpy as np
import time
from matplotlib import pyplot as plt
import sys
cam = cv2.VideoCapture(0)
while(cam.isOpened()):
ret, frame = cam.read() #Keep on capturing the frames continuously
while (ret==True):
#img = cv2.imread('/home/shrikrishna/Detection&Tracking/OpenCV-Tutorial',6)
cv2.imwrite('At time'+ str(time.clock()) + '.jpg', frame)
img2 = cv2.imread('At time'+ str(time.clock()) + '.jpg',6)
t = str(time.clock())
cv2.imshow('Orignal',frame)
k = cv2.waitKey(0) & 0xffff
if(k==27):
#img = cv2.imread('At time'+ str(time.clock()) + '.jpg',6)
break
if(k==ord('q')):
sys.exit(0)
break
#cv2.imwrite('At time'+ t + '.jpg', frame)
img = cv2.imread('At time'+ t + '.jpg',6)
ret,thresh1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
ret,thresh2 = cv2.threshold(img,127,255,cv2.THRESH_BINARY_INV)
ret,thresh3 = cv2.threshold(img,127,255,cv2.THRESH_TRUNC)
ret,thresh4 = cv2.threshold(img,127,255,cv2.THRESH_TOZERO)
ret,thresh5 = cv2.threshold(img,127,255,cv2.THRESH_TOZERO_INV)
titles = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV']
images = [img, thresh1, thresh2, thresh3, thresh4, thresh5]
for i in xrange(6):
plt.subplot(2,3,i+1),plt.imshow(images[i],'gray')
plt.title(titles[i])
plt.xticks([]),plt.yticks([])
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
In the following line, you read in an image as colour image (based on the second parameter -- flags).
img = cv2.imread('At time'+ t + '.jpg',6)
This means that img contains 3 channels, which in Python is represented by a 3-dimensional array.
You immediately use this image as the source for thresholding:
ret,thresh1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
According to the documentation, the first parameter to threshold() is:
src – input array (single-channel, 8-bit or 32-bit floating point).
That means you need a single channel image, e.g. a grayscale image:
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret,thresh1 = cv2.threshold(img_gray,127,255,cv2.THRESH_BINARY)
# ...
Another option would be to just read the image as grayscale in first place:
img_gray = cv2.imread('At time'+ t + '.jpg',0)
# ...

Resources