Datamatrix with raspberry - python-3.x

I am trying to read datamatrix code by rasp using python.
I'm using pylibdmtx to read the code, but it only works on my notebook. When I put the same code on the raspberry it can't read the code. At the moment my raspberry is reading only qrcode and barcode.
I have two rasp one with raspbian and the other with ubuntu core, neither of which worked.
An example code below
import cv2
import time
from pylibdmtx.pylibdmtx import decode
data = None
video = cv2.VideoCapture(0)
video.set(cv2.CAP_PROP_FPS, 19)
while video.isOpened():
time.sleep(1/9)
ret, frame = video.read()
if ret is False:
break
decodeObjects = decode(frame,
timeout=1000,
max_count=1,
corrections=3)
for obj in decodeObjects:
if obj.data:
data = obj
if data:
break
video.release()
cv2.destroyAllWindows()
print(data)

pylibdmtx is just a wrapper for libdmtx. To make it work, you have to install the native library first.
The .whl file has already contained the .DLL file for Windows:
As for macOS and Linux, you can install the library via command-line tools.
Mac OS X:
brew install libdmtx
Linux:
sudo apt-get install libdmtx0a
I suppose there's no pre-built library for Raspberry Pi. So you can build it by yourself. Here is the source code:
https://github.com/dmtx/libdmtx
Take 3 steps to build and install the libdmtx library:
$ ./configure
$ make
$ sudo make install
After installing the libdmtx library, your Python code should work.

import cv2
import time
from pylibdmtx.pylibdmtx import decode
data = None
video = cv2.VideoCapture(0)
video.set(cv2.CAP_PROP_FPS, 19)
# Add
saveFilename = "./liveImage.jpg"
while video.isOpened():
time.sleep(1/9)
ret, frame = video.read()
if ret is False:
break
# Add - save Live Image
liveImage = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imwrite(saveFilename, liveImage)
# Add - open File
loadImage = cv2.imread(saveFilename)
# Modify
decodeObjects = decode(loadImage,
timeout=1000,
max_count=1,
corrections=3)
for obj in decodeObjects:
if obj.data:
data = obj
if data:
break
video.release()
cv2.destroyAllWindows()
print(data)

import cv2
import time
from pylibdmtx.pylibdmtx import decode
data = None
video = cv2.VideoCapture(0)
video.set(cv2.CAP_PROP_FPS, 19)
# Add
saveFilename = "./liveImage.jpg"
while video.isOpened():
time.sleep(1/9)
ret, frame = video.read()
if ret is False:
break
# Add - save Live Image
liveImage = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imwrite(saveFilename, liveImage)
# Add - open File
loadImage = cv2.imread(saveFilename)
# Modify
decodeObjects = decode(loadImage,
# Delete timeout=1000,
max_count=1,
corrections=3)
for obj in decodeObjects:
if obj.data:
data = obj
if data:
break
video.release()
cv2.destroyAllWindows()
print(data)

Related

JPEG parameter struct mismatch: library thinks size is 584, caller expects 728 for python3 in Jetson

I am trying to run the darknet_video.py script for YOLO from here
in Jetson (nano and xavier NX). The code runs fine in one nano but not in another nano and NX. The script is run using the following command in Ubuntu 18.04 (Jetpack)
python3 darknet_video.py --input test.mp4 --out_filename out1.txt --weights yolov3-tiny.weights --ext_output --config_file yolov3-tiny.cfg --data_file coco.data --thresh 0.2
I am getting the following errors:
JPEG parameter struct mismatch: library thinks size is 584, caller expects 728
pure virtual method called
terminate called without an active exception
Aborted (core dumped)
Since it runs fine in one nano, it could be dependency issue, here is the code in darknet_video.py
from ctypes import *
import random
import os
import cv2
import time
import darknet
import argparse
from threading import Thread, enumerate
from queue import Queue
def parser():
parser = argparse.ArgumentParser(description="YOLO Object Detection")
parser.add_argument("--input", type=str, default=0,
help="video source. If empty, uses webcam 0 stream")
parser.add_argument("--out_filename", type=str, default="",
help="inference video name. Not saved if empty")
parser.add_argument("--weights", default="yolov4.weights",
help="yolo weights path")
parser.add_argument("--dont_show", action='store_true',
help="windown inference display. For headless systems")
parser.add_argument("--ext_output", action='store_true',
help="display bbox coordinates of detected objects")
parser.add_argument("--config_file", default="./cfg/yolov4.cfg",
help="path to config file")
parser.add_argument("--data_file", default="./cfg/coco.data",
help="path to data file")
parser.add_argument("--thresh", type=float, default=.25,
help="remove detections with confidence below this value")
return parser.parse_args()
def str2int(video_path):
"""
argparse returns and string althout webcam uses int (0, 1 ...)
Cast to int if needed
"""
try:
return int(video_path)
except ValueError:
return video_path
def check_arguments_errors(args):
assert 0 < args.thresh < 1, "Threshold should be a float between zero and one (non-inclusive)"
if not os.path.exists(args.config_file):
raise(ValueError("Invalid config path {}".format(os.path.abspath(args.config_file))))
if not os.path.exists(args.weights):
raise(ValueError("Invalid weight path {}".format(os.path.abspath(args.weights))))
if not os.path.exists(args.data_file):
raise(ValueError("Invalid data file path {}".format(os.path.abspath(args.data_file))))
if str2int(args.input) == str and not os.path.exists(args.input):
raise(ValueError("Invalid video path {}".format(os.path.abspath(args.input))))
def set_saved_video(input_video, output_video, size):
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
fps = int(input_video.get(cv2.CAP_PROP_FPS))
video = cv2.VideoWriter(output_video, fourcc, fps, size)
return video
def video_capture(frame_queue, darknet_image_queue):
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (width, height),
interpolation=cv2.INTER_LINEAR)
frame_queue.put(frame_resized)
img_for_detect = darknet.make_image(width, height, 3)
darknet.copy_image_from_bytes(img_for_detect, frame_resized.tobytes())
darknet_image_queue.put(img_for_detect)
cap.release()
def inference(darknet_image_queue, detections_queue, fps_queue):
while cap.isOpened():
darknet_image = darknet_image_queue.get()
prev_time = time.time()
detections = darknet.detect_image(network, class_names, darknet_image, thresh=args.thresh)
detections_queue.put(detections)
fps = int(1/(time.time() - prev_time))
fps_queue.put(fps)
print("FPS: {}".format(fps))
darknet.print_detections(detections, args.ext_output)
darknet.free_image(darknet_image)
cap.release()
def drawing(frame_queue, detections_queue, fps_queue):
random.seed(3) # deterministic bbox colors
video = set_saved_video(cap, args.out_filename, (width, height))
while cap.isOpened():
frame_resized = frame_queue.get()
detections = detections_queue.get()
fps = fps_queue.get()
if frame_resized is not None:
image = darknet.draw_boxes(detections, frame_resized, class_colors)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if args.out_filename is not None:
video.write(image)
if not args.dont_show:
cv2.imshow('Inference', image)
if cv2.waitKey(fps) == 27:
break
cap.release()
video.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
frame_queue = Queue()
darknet_image_queue = Queue(maxsize=1)
detections_queue = Queue(maxsize=1)
fps_queue = Queue(maxsize=1)
args = parser()
check_arguments_errors(args)
network, class_names, class_colors = darknet.load_network(
args.config_file,
args.data_file,
args.weights,
batch_size=1
)
width = darknet.network_width(network)
height = darknet.network_height(network)
input_path = str2int(args.input)
cap = cv2.VideoCapture(input_path)
Thread(target=video_capture, args=(frame_queue, darknet_image_queue)).start()
Thread(target=inference, args=(darknet_image_queue, detections_queue, fps_queue)).start()
Thread(target=drawing, args=(frame_queue, detections_queue, fps_queue)).start()
Any ideas will be appreciated.
JPEG parameter struct mismatch: library thinks size is 584, caller expects 728
This is about jpeglib.h that is used by the app and the low level library.
App is compiled with a different jpeglib.h and low level library is compiled with different jpeglib.h and structure in this case its j_decompress_ptr in this header file is different in these two different jpeglib.h files.
Make sure that you have low level lib (could be libjpeg-8b) and its client using the same libjpeg.h
Remove all the installed libjpeg packages and install only the latest one and try.

How to set the pygame icon to an online image

So let's say I have a URL that stores an image, let's say it's, "https://www.image.site/img.png"
I want to set the pygame icon to that image without downloading anything on my computer.
I am already familiar with the code to set the icon to a file on your computer
img = pygame.image.load('image')
pygame.display.set_icon(img)
But how can I set it to an online image, would just setting the argument in set_icon to the URL work, or do I have to do something more complicated?
Another way:
Use the requests library to download the image bytes
Use BytesIO to create a file stream in memory
Create a PIL.Image from the byte file stream
Use pygame.image.fromstring to convert the PIL image to a pygame image
Here's the code:
import pygame
from io import BytesIO
import requests
from PIL import Image
# create image from URL
rsp = requests.get('https://www.pygame.org/docs/_static/pygame_tiny.png')
pilimage = Image.open(BytesIO(rsp.content)).convert("RGBA")
pgimg = pygame.image.fromstring(pilimage.tobytes(), pilimage.size, pilimage.mode)
# show image
pygame.init()
display = pygame.display.set_mode((250,150))
display.fill((255,255,255))
display.blit(pgimg,((250 - pgimg.get_rect().width)/2,(150 - pgimg.get_rect().height)/2))
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit(); exit()
Output
I don't know why you'd do that, but I cooked something up for you.
It downloads the data of an image and then loads an image out of that data.
import requests
import io
import pygame
def surf_from_url(url):
data = io.BytesIO()
response = requests.get(url, stream=True)
if not response.ok:
print(response)
for block in response.iter_content(1024):
if not block:
break
data.write(block)
data.seek(0)
return pygame.image.load(data)
#demo
surf = surf_from_url("https://www.pygame.org/images/logo_lofi.png")
surf2 = surf_from_url("https://yt3.ggpht.com/a/AATXAJxybFq6Y8SFOuyvWqldJPcXbCps-gR_Qp2z4jKt=s100-c-k-c0xffffffff-no-rj-mo")
screen = pygame.display.set_mode([300,300])
pygame.display.set_icon(surf)
screen.fill((255,255,255))
screen.blit(surf, (0,0))
screen.blit(surf2, (20,100))
pygame.display.flip()

Python3 OpenCV - LIBTDB ERROR: data is not tagged properly

I'm getting this error by running this python script (converted into .exe) I found on github on my Acer Tablet with Windows 8.1:
LIBTDB ERROR: data is not tagged properly
(the script continues after printing the error)
import cv2
import numpy as np
import socket
import struct
from io import BytesIO
IP = '192.168.1.8'
# Capture frame
cap = cv2.VideoCapture(0) ## here the error
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect((IP, 8080))
while cap.isOpened():
_, frame = cap.read()
memfile = BytesIO()
np.save(memfile, frame)
memfile.seek(0)
data = memfile.read()
# Send form byte array: frame size + frame content
client_socket.sendall(struct.pack("L", len(data)) + data)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
It works fine on my windows 10 pc, but I wanted to try it on two different devices.

Python face_recognition and saving file with cv2

This is my first time posting a question so pardon any mistakes. I'm trying to write a script that will do face_recognition and save the video file at the same time and running into I think latency issues. When there isn't a face to detect it saves the video file fine. When there is a face though it seems to get every other frame. I feel like that is because it's doing computations for finding the face which prevents it from saving the next frame. Is there a way around this? maybe threading or multiprocessing?
import face_recognition as fr
import os
import face_recognition
import numpy as np
import cv2
def get_encoded_faces():
encoded = {}
for dirpath, dnames, fnames in os.walk("./faces"):
for f in fnames:
if f.endswith(".jpg") or f.endswith(".png"):
face = fr.load_image_file("faces/" + f)
encoding = fr.face_encodings(face)[0]
encoded[f.split(".")[0]] = encoding
return encoded
def unknown_image_encoded(img):
face = fr.load_image_file("faces/" + img)
encoding = fr.face_encodings(face)[0]
return encoding
faces = get_encoded_faces()
faces_encoded = list(faces.values())
known_face_names = list(faces.keys())
def FindFace(img):
face_locations = face_recognition.face_locations(img)
unknown_face_encodings = face_recognition.face_encodings(img, face_locations)
face_names = []
for face_encoding in unknown_face_encodings:
matches = face_recognition.compare_faces(faces_encoded, face_encoding)
name = "Unknown"
face_distances = face_recognition.face_distance(faces_encoded, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
#cv2.imwrite('final_image.png',img)
video_capture = cv2.VideoCapture(1)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 20.0, (640,480))
if not video_capture.isOpened():
raise Exception("Could not open video device")
while(video_capture.isOpened()):
ret, frame = video_capture.read()
out.write(frame)
#cv2.imshow('Video', frame)
FindFace(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()

AttributeError with OpenCV in Python

Traceback (most recent call last):
File "test.py", line 10, in <module>
tracker = cv2.Tracker_create("MIL")
AttributeError: module 'cv2.cv2' has no attribute 'Tracker_create
I get the above error when I try to run:
import cv2
import sys
if __name__ == '__main__' :
# Set up tracker.
# Instead of MIL, you can also use
# BOOSTING, KCF, TLD, MEDIANFLOW or GOTURN
tracker = cv2.Tracker_create("MIL")
# Read video
video = cv2.VideoCapture(0)
# Exit if video not opened.
if not video.isOpened():
print ("Could not open video")
sys.exit()
# Read first frame.
ok, frame = video.read()
if not ok:
print ('Cannot read video file')
sys.exit()
# Define an initial bounding box
bbox = (287, 23, 86, 320)
# Uncomment the line below to select a different bounding box
# bbox = cv2.selectROI(frame, False)
# Initialize tracker with first frame and bounding box
ok = tracker.init(frame, bbox)
while True:
# Read a new frame
ok, frame = video.read()
if not ok:
break
# Update tracker
ok, bbox = tracker.update(frame)
# Draw bounding box
if ok:
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
cv2.rectangle(frame, p1, p2, (0,0,255))
# Display result
cv2.imshow("Tracking", frame)
# Exit if ESC pressed
k = cv2.waitKey(1) & 0xff
if k == 27 : break
I found an answer here: How to add "Tracker" in openCV python 2.7
But this confused me more. I'm on MacOSX and I'm just getting started with OpenCV and I'm not really sure how to recompile OpenCV with the correct modules.
Thanks in advance, and sorry if I'm missing something obvious.
So it wasn't a case of the installation, but the constructor name had changed.
tracker = cv2.Tracker_create("MIL")
Should be:
tracker = cv2.TrackerMIL_create()

Resources