Hi, I'm new to opencv and I'm trying to decode a byte array
From one side I'm sending I need to send a message in bytes format, and I'm using this code:
image_bytes = cv2.imencode('.jpg', imageRGB)[1].tobytes()
And from the receiving side, I'm am receiving a message with the following type: <class 'str'>
with this content: /9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAIBAQEBAQIBAQECAgICAgQDAgICAgUEBAMEBgUGBgYFBgYGBwkIBgcJBwYGCAsICQoK ...
I tried the following: (x['other']['contentBytes'] is where the bytes are)
nparr = np.fromstring(x['other']['contentBytes'], np.uint8)
This returns a ( <class 'numpy.ndarray'> ) with the following shape: (40672,)
And when I try to
newFrame = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
I get a <class 'NoneType'> type.
Your image is base64-encoded so you need to decode it first:
from base64 import b64decode
import numpy as np
import cv2
# Extract JPEG-encoded image from base64-encoded string
JPEG = b64decode(YOURDATA)
# Decode JPEG back into Numpy array
na = cv2.imdecode(np.frombuffer(JPEG,dtype=np.uint8), cv2.IMREAD_COLOR)
Related
Here is my tensor:
import torch
from torchvision import transforms
content:
tensor([[[[0.8939, 0.8700, 0.8458, ..., 0.7610, 0.7093, 0.6909],
.
.
.
[0.4880, 0.5192, 0.5957, ..., 0.8569, 0.9148, 0.9186]]]])
I want to convert this torch.Tensor to base64. I've tried this but I can not convert PILImage to base64 too.
img = transforms.ToPILImage(content)
How can I do this?
You should first convert your image to bytes to encode afterward your image to base64 image
import torch
from torchvision import transforms
import base64
tensor = torch.randn(3, 512, 512)
transform = transforms.ToPILImage()
pil_image = transform(tensor)
# Convert the PIL image to bytes
image_bytes = pil_image.tobytes()
# Encode the bytes to base64
base64_string = base64.b64encode(image_bytes).decode()
print(base64_string)
Output:
00Hbhc4yZEZnw/VlIyHF6OH2VRAXeRdYiH82tBgSBoocDAB+cycknk3c5G9ZmdbHNu90whgDK49cjELZ6uJCzqYxVclXbJRc0Nb2AgN9XWULFOK+ubWJAyNbAgNZRvmwocwJN4603
I want to decompress a butch of nii.gz files in python so that they could be processed in sitk later on. When I decompress a single file manually by right-clicking the file and choosing 'Extract..', this file is then correctly interpreted by sitk (I do sitk.ReadImage(unzipped)). But when I try to decompress it in python using following code:
with gzip.open(segmentation_zipped, "rb") as f:
bindata = f.read()
segmentation_unzipped = os.path.join(segmentation_zipped.replace(".gz", ""))
with gzip.open(segmentation_unzipped, "wb") as f:
f.write(bindata)
I get error when sitk tries to read the file:
RuntimeError: Exception thrown in SimpleITK ReadImage: C:\d\VS14-Win64-pkg\SimpleITK\Code\IO\src\sitkImageReaderBase.cxx:82:
sitk::ERROR: Unable to determine ImageIO reader for "E:\BraTS19_2013_10_1_seg.nii"
Also when trying to do it a little differently:
input = gzip.GzipFile(segmentation_zipped, 'rb')
s = input.read()
input.close()
segmentation_unzipped = os.path.join(segmentation_zipped.replace(".gz", ""))
output = open(segmentation_unzipped, 'wb')
output.write(s)
output.close()
I get:
RuntimeError: Exception thrown in SimpleITK ReadImage: C:\d\VS14-Win64-pkg\SimpleITK-build\ITK\Modules\IO\PNG\src\itkPNGImageIO.cxx:101:
itk::ERROR: PNGImageIO(0000022E3AF2C0C0): PNGImageIO failed to read header for file:
Reason: fread read only 0 instead of 8
can anyone help?
No need to unzip the Nifti images, libraries such as Nibabel can handle it without decompression.
#==================================
import nibabel as nib
import numpy as np
import matplotlib.pyplot as plt
#==================================
# load image (4D) [X,Y,Z_slice,time]
nii_img = nib.load('path_to_file.nii.gz')
nii_data = nii_img.get_fdata()
fig, ax = plt.subplots(number_of_frames, number_of_slices,constrained_layout=True)
fig.canvas.set_window_title('4D Nifti Image')
fig.suptitle('4D_Nifti 10 slices 30 time Frames', fontsize=16)
#-------------------------------------------------------------------------------
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
for slice in range(number_of_slices):
# if your data in 4D, otherwise remove this loop
for frame in range(number_of_frames):
ax[frame, slice].imshow(nii_data[:,:,slice,frame],cmap='gray', interpolation=None)
ax[frame, slice].set_title("layer {} / frame {}".format(slice, frame))
ax[frame, slice].axis('off')
plt.show()
Or you can Use SimpleITK as following:
import SimpleITK as sitk
import numpy as np
# A path to a T1-weighted brain .nii image:
t1_fn = 'path_to_file.nii'
# Read the .nii image containing the volume with SimpleITK:
sitk_t1 = sitk.ReadImage(t1_fn)
# and access the numpy array:
t1 = sitk.GetArrayFromImage(sitk_t1)
When i try to convert base64 string into image i am getting none in CV2.
Following is my code
import cv2
import numpy as np
def bsae62toimage(imgvalue):
imge = "R0lGODlhEAAQAMQAAORHHOVSKudfOulrSOp3WOyDZu6QdvCchPGolfO0o/XBs/fNwfjZ0frl3/zy7////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAkAABAALAAAAAAQABAAAAVVICSOZGlCQAosJ6mu7fiyZeKqNKToQGDsM8hBADgUXoGAiqhSvp5QAnQKGIgUhwFUYLCVDFCrKUE1lBavAViFIDlTImbKC5Gm2hB0SlBCBMQiB0UjIQA7"
nparr = np.fromstring(imge,np.uint8)
print(nparr.shape)
img1 = cv2.imdecode(nparr,cv2.IMREAD_UNCHANGED)
print(img1.shape)
It is printing the numpy array but cv2.imdecode is returning None. Can you help me to find the problem in my code?
My Findings: np.fromstring it returns 1D array. I assume it should it should return 3D array. But i may be wrong.
Since it is unclear as from where have you obtained the imge = "R0lGODlhEA..." variable. I would present an ideal flow for converting OpenCV image to base64 string and converting it back to OpenCV img as:
import cv2
import base64
import numpy as np
def to_base64(img):
_, buf = cv2.imencode(".png", img)
return base64.b64encode(buf)
def from_base64(buf):
buf_decode = base64.b64decode(buf)
buf_arr = np.fromstring(buf_decode, dtype=np.uint8)
return cv2.imdecode(buf_arr, cv2.IMREAD_UNCHANGED)
img = cv2.imread("/path/to/img.png")
img_base64 = to_base64(img)
img_decoded = from_base64(img_base64)
print img_decoded.shape
Also the documentation states that:
If the buffer is too short or contains invalid data, the empty
matrix/image is returned.
It seems to me that imge = "R0lGODlhEA..." is invalid.
I am trying to convert a byte type data (stream from RPi over Socket) to string or any useable form (as required by OpenCV); however, unable to do so! I've already tried decoding the received data via io.BytesIO() using .decode() but it comes back saying, JpegImageFile has no Attribute 'decode'. This code works great for PIL but not for OpenCV since the socket returns a byte type data in Python3, and OpenCV requires a numpy array (from what I've read so far). I am using Python 3.5.5, OpenCV 3.4.1... Any help is greatly appreciated!
import io
import socket
import struct
from PIL import Image
import cv2
server_socket = socket.socket()
server_socket.bind(('10.0.0.214', 8025))
server_socket.listen(0)
connection = server_socket.accept()[0].makefile('rb')
try:
while True:
image_len = struct.unpack('<L', connection.read(struct.calcsize('<L')))[0]
if not image_len:
break
image_stream = io.BytesIO()
image_stream.write(connection.read(image_len))
image_stream.seek(0)
image = Image.open(image_stream)
string_data = image.decode()
gray = cv2.cvtColor(string_data, cv2.COLOR_BGR2GRAY)
print('Image is %dx%d' % image.size)
image.verify()
print('Image is verified')
finally:
connection.close()
server_socket.close()
Traceback (most recent call last):
\test\TestingStream.py", line 32, in <module>
string_data = image.decode()
AttributeError: 'JpegImageFile' object has no attribute 'decode'
[Finished in 11.9s]
I have an image that is obtained from an OpenCV video capture object as such:
import cv2
import base64
from PIL import Image
import io
cap = cv2.VideoCapture(0)
# capture frame by frame
ret, frame = cap.read()
How can I encode and decode the image (i.e. go from raw pixels to bytes and back to raw pixels)?
So far I have been trying the following:
encoded_string = base64.b64encode(frame)
decoded_string = base64.b64decode(encoded_string)
img = Image.open(io.BytesIO(decoded_string))
img.show()
This is giving me an error:
File "/usr/lib/python3/dist-packages/PIL/Image.py", line 2295, in open
% (filename if filename else fp))
OSError: cannot identify image file <_io.BytesIO object at 0x7efddbb78e08>
The correct way of encoding and subsequently decoding an image with base64 turns out to be as follows:
import numpy as np
import cv2
import base64
cap = cv2.VideoCapture(0)
# capture frame by frame
ret, frame = cap.read()
# encode frame
encoded_string = base64.b64encode(frame)
# decode frame
decoded_string = base64.b64decode(encoded_string)
decoded_img = np.fromstring(decoded_string, dtype=np.uint8)
decoded_img = decoded_img.reshape(frame.shape)
# show decoded frame
cv2.imshow("decoded", decoded_img)