IndexError: list index out of range at line 25 - python-3.x

import numpy as np
import cv2, os
import sys
import pandas as pd
import tensorflow as tf
from model import Tensorflow_Model
class dl_model():
EXT_TRAIN_DATA = 'D:\Diabetic_Retinopathy_Detection-master\data\train'
EXT_TEST_DATA = 'D:\Diabetic_Retinopathy_Detection-master\data\test'
EXT_TRAIN_CSV = 'D:\Diabetic_Retinopathy_Detection-master\data\trainLabels.csv'
IMAGE_WIDTH = 512 #1536
IMAGE_HEIGHT = 340 #1024
N_CHANNELS = 3
GENERATOR_BATCH_SIZE = 100
NB_EPOCH_PER_BATCH = 2
NB_EPOCH = 5
def __init__(self, argv):
self.argv = argv
self.BASE_PATH = argv[0]
self.dims_image = {'width': self.IMAGE_WIDTH, 'height': self.IMAGE_HEIGHT, 'channel': self.N_CHANNELS}
self.dims_output = 5
def get_image_name_list(self, path, train_or_not):
if train_or_not:
training_csv = pd.read_csv(path)
headers = training_csv.columns
return np.array([training_csv[headers[0]], training_csv[headers[1]]])
else:
return np.array([os.listdir(path)])
def get_image_names(self):
self.train_image_names_with_labels = self.get_image_name_list(os.path.join(self.BASE_PATH, self.EXT_TRAIN_CSV), 1) # returns a tuple
self.test_image_names = self.get_image_name_list(os.path.join(self.BASE_PATH, self.EXT_TEST_DATA), 0) # returns just names
print('Number of training images: {}\nNumber of testing images: {}'.format(len(self.train_image_names_with_labels[0]), len(self.test_image_names[0])))
def image_transformation(self, image_path):
img = cv2.imread(image_path)
img = cv2.resize(img, (self.IMAGE_WIDTH, self.IMAGE_HEIGHT))
return np.array(img).reshape((self.IMAGE_HEIGHT, self.IMAGE_WIDTH, self.N_CHANNELS))
def image_batch_generator(self, array, batch_size, ext):
path = os.path.join(self.BASE_PATH, ext)
for i in range(0, len(array[0]), batch_size):
batch = array[0][i: i+batch_size]
data_batch = []
for j, image_name in enumerate(batch):
try:
if ext == self.EXT_TRAIN_DATA:
image_path = '{}.jpeg'.format(os.path.join(path, image_name))
data_batch.append((self.image_transformation(image_path), array[1][i+j]))
else:
image_path = '{}'.format(os.path.join(path, image_name))
data_batch.append(self.image_transformation(image_path))
except:
print('Error reading: {}'.format(image_path))
yield(np.array(data_batch))
def execute(self):
with tf.device('/cpu:0'):
self.get_image_names()
training_batch_generator = self.image_batch_generator(self.train_image_names_with_labels, self.GENERATOR_BATCH_SIZE, self.EXT_TRAIN_DATA)
tf_model = Tensorflow_Model(self.dims_image, self.dims_output) # CALCULATE dims_output
# TRAINING PHASE
for i, training_batch in enumerate(training_batch_generator):
if not i > self.NB_EPOCH:
tf_model.train(training_batch)
else:
break
# test_batch_generator = self.image_batch_generator(self.test_image_names, self.BATCH_SIZE, self.EXT_TEST_DATA)
if __name__ == '__main__':
dl_model(sys.argv[1:]).execute()

Related

Multiprocessing in python for a function

I'm new to python and I'm trying to run multiprocessing:
I'm trying to code a program to convert a tiffs file to dax file present in a directory.
This is my original code:
import os
import datawriter
import datareader
from time import time
from tqdm import tqdm
dataroot = input("Enter the folder location:")
count_for_number_of_files = 0
count_for_frames_in_filelist = 0
for subdir, dir, files in os.walk(dataroot):
for file in files:
if file.endswith(".tif"):
print(f"file {count_for_number_of_files + 1} = {file}")
count_for_number_of_files += 1
print("Total number of files:", count_for_number_of_files)
frame_list = [None] * count_for_number_of_files
for i in range(0, len(frame_list)):
frame_list[i] = input(f"Enter number of frames for file {i + 1}: ")
print("Frames in each file:", frame_list)
start_time = time()
for subdir, dir, files in os.walk(dataroot):
for file in sorted(files):
if file.endswith(".tif"):
dax_file = datawriter.DaxWriter("{}.dax".format(file[0:-4]))
print(f"Processing {frame_list[count_for_frames_in_filelist]} frames for {file}")
for i in tqdm(range(int(frame_list[count_for_frames_in_filelist]))):
data = datareader.TifReader("{}".format(file)).loadAFrame(i)
dax_file.addFrame(data)
count_for_frames_in_filelist += 1
dax_file.close()
print(f"Conversion completed for {count_for_number_of_files} files", '\n',
"Total time taken:", time() - start_time, "seconds")
The new code using multiprocessing is:
import multiprocessing as mp
from multiprocessing import Process, Lock
import numpy as np
import pandas as pd
import os, logging
import originpro as op
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.ticker import PercentFormatter
import datawriter
import datareader
from time import time
from tqdm import tqdm
import tifffile
import hashlib
import re
import threading
dataroot = input("Enter the folder location:")
class Multi:
def f(x):
count_for_number_of_files = 0
count_for_frames_in_filelist = 0
for subdir, dir, files in os.walk(x):
for file in files:
if file.endswith(".tif"):
print(f"file {count_for_number_of_files + 1} = {file}")
count_for_number_of_files += 1
print("Total number of files:", count_for_number_of_files)
frame_list = [None] * count_for_number_of_files
for i in range(0, len(frame_list)):
frame_list[i] = input(f"Enter number of frames for file {i + 1}: ")
print("Frames in each file:", frame_list)
start_time = time()
for subdir, dir, files in os.walk(dataroot):
for file in sorted(files):
if file.endswith(".tif"):
dax_file = datawriter.DaxWriter("{}.dax".format(file[0:-4]))
print(f"Processing {frame_list[count_for_frames_in_filelist]} frames for {file}")
for i in tqdm(range(int(frame_list[count_for_frames_in_filelist]))):
data = datareader.TifReader("{}".format(file)).loadAFrame(i)
dax_file.addFrame(data)
count_for_frames_in_filelist += 1
dax_file.close()
print(f"Conversion completed for {count_for_number_of_files} files", '\n',
"Total time taken:", time() - start_time, "seconds")
my_object=Multi
if __name__ == '__main__':
ctx = mp.get_context('spawn')
q = ctx.Queue()
p = ctx.Process(Multi.f(dataroot))
p.start()
print(q.get())
p.join()
The thing is runtime still remains the same and it hasn't changed which I hoped it'll get faster.
datareader & datawriter are another python files which I'm calling into this function:
datareader.py
#!/usr/bin/env python
"""
Classes that handles reading STORM movie files. Currently this
is limited to the dax, fits, spe and tif formats.
Hazen 06/13
"""
import hashlib
import numpy
import os
import re
import tifffile
# Avoid making astropy mandatory for everybody.
try:
from astropy.io import fits
except ImportError:
pass
def inferReader(filename, verbose=False):
"""
Given a file name this will try to return the appropriate
reader based on the file extension.
"""
ext = os.path.splitext(filename)[1]
if (ext == ".dax"):
return DaxReader(filename, verbose=verbose)
elif (ext == ".fits"):
return FITSReader(filename, verbose=verbose)
elif (ext == ".spe"):
return SpeReader(filename, verbose=verbose)
elif (ext == ".tif") or (ext == ".tiff"):
return TifReader(filename, verbose=verbose)
else:
print(ext, "is not a recognized file type")
raise IOError("only .dax, .spe and .tif are supported (case sensitive..)")
class Reader(object):
"""
The superclass containing those functions that
are common to reading a STORM movie file.
Subclasses should implement:
1. __init__(self, filename, verbose = False)
This function should open the file and extract the
various key bits of meta-data such as the size in XY
and the length of the movie.
2. loadAFrame(self, frame_number)
Load the requested frame and return it as numpy array.
"""
def __init__(self, filename, verbose=False):
super(Reader, self).__init__()
self.filename = filename
self.fileptr = None
self.verbose = verbose
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, etype, value, traceback):
self.close()
def averageFrames(self, start=None, end=None):
"""
Average multiple frames in a movie.
"""
length = 0
average = numpy.zeros((self.image_height, self.image_width), numpy.float)
for [i, frame] in self.frameIterator(start, end):
if self.verbose and ((i % 10) == 0):
print(" processing frame:", i, " of", self.number_frames)
length += 1
average += frame
if (length > 0):
average = average / float(length)
return average
def close(self):
if self.fileptr is not None:
self.fileptr.close()
self.fileptr = None
def filmFilename(self):
"""
Returns the film name.
"""
return self.filename
def filmSize(self):
"""
Returns the film size.
"""
return [self.image_width, self.image_height, self.number_frames]
def filmLocation(self):
"""
Returns the picture x,y location, if available.
"""
if hasattr(self, "stage_x"):
return [self.stage_x, self.stage_y]
else:
return [0.0, 0.0]
def filmScale(self):
"""
Returns the scale used to display the film when
the picture was taken.
"""
if hasattr(self, "scalemin") and hasattr(self, "scalemax"):
return [self.scalemin, self.scalemax]
else:
return [100, 2000]
def frameIterator(self, start=None, end=None):
"""
Iterator for going through the frames of a movie.
"""
if start is None:
start = 0
if end is None:
end = self.number_frames
for i in range(start, end):
yield [i, self.loadAFrame(i)]
def hashID(self):
"""
A (hopefully) unique string that identifies this movie.
"""
return hashlib.md5(self.loadAFrame(0).tostring()).hexdigest()
def loadAFrame(self, frame_number):
assert frame_number >= 0, "Frame_number must be greater than or equal to 0, it is " + str(frame_number)
assert frame_number < self.number_frames, "Frame number must be less than " + str(self.number_frames)
def lockTarget(self):
"""
Returns the film focus lock target.
"""
if hasattr(self, "lock_target"):
return self.lock_target
else:
return 0.0
class DaxReader(Reader):
"""
Dax reader class. This is a Zhuang lab custom format.
"""
def __init__(self, filename, verbose=False):
super(DaxReader, self).__init__(filename, verbose=verbose)
# save the filenames
dirname = os.path.dirname(filename)
if (len(dirname) > 0):
dirname = dirname + "/"
self.inf_filename = dirname + os.path.splitext(os.path.basename(filename))[0] + ".inf"
# defaults
self.image_height = None
self.image_width = None
# extract the movie information from the associated inf file
size_re = re.compile(r'frame dimensions = ([\d]+) x ([\d]+)')
length_re = re.compile(r'number of frames = ([\d]+)')
endian_re = re.compile(r' (big|little) endian')
stagex_re = re.compile(r'Stage X = ([\d\.\-]+)')
stagey_re = re.compile(r'Stage Y = ([\d\.\-]+)')
lock_target_re = re.compile(r'Lock Target = ([\d\.\-]+)')
scalemax_re = re.compile(r'scalemax = ([\d\.\-]+)')
scalemin_re = re.compile(r'scalemin = ([\d\.\-]+)')
inf_file = open(self.inf_filename, "r")
while 1:
line = inf_file.readline()
if not line: break
m = size_re.match(line)
if m:
self.image_height = int(m.group(2))
self.image_width = int(m.group(1))
m = length_re.match(line)
if m:
self.number_frames = int(m.group(1))
m = endian_re.search(line)
if m:
if m.group(1) == "big":
self.bigendian = 1
else:
self.bigendian = 0
m = stagex_re.match(line)
if m:
self.stage_x = float(m.group(1))
m = stagey_re.match(line)
if m:
self.stage_y = float(m.group(1))
m = lock_target_re.match(line)
if m:
self.lock_target = float(m.group(1))
m = scalemax_re.match(line)
if m:
self.scalemax = int(m.group(1))
m = scalemin_re.match(line)
if m:
self.scalemin = int(m.group(1))
inf_file.close()
# set defaults, probably correct, but warn the user
# that they couldn't be determined from the inf file.
if not self.image_height:
print("Could not determine image size, assuming 256x256.")
self.image_height = 256
self.image_width = 256
# open the dax file
if os.path.exists(filename):
self.fileptr = open(filename, "rb")
else:
if self.verbose:
print("dax data not found", filename)
def loadAFrame(self, frame_number):
"""
Load a frame & return it as a numpy array.
"""
super(DaxReader, self).loadAFrame(frame_number)
self.fileptr.seek(frame_number * self.image_height * self.image_width * 2)
image_data = numpy.fromfile(self.fileptr, dtype='uint16', count=self.image_height * self.image_width)
image_data = numpy.reshape(image_data, [self.image_height, self.image_width])
if self.bigendian:
image_data.byteswap(True)
return image_data
class FITSReader(Reader):
"""
FITS file reader class.
FIXME: This depends on internals of astropy.io.fits that I'm sure
we are not supposed to be messing with. The problem is that
astropy.io.fits does not support memmap'd images when the
image is scaled (which is pretty much always the case?). To
get around this we set _ImageBaseHDU._do_not_scale_image_data
to True, then do the image scaling ourselves.
We want memmap = True as generally it won't make sense to
load the entire movie into memory.
Another consequence of this is that we only support
'pseudo unsigned' 16 bit FITS format files.
"""
def __init__(self, filename, verbose=False):
super(FITSReader, self).__init__(filename, verbose=verbose)
self.hdul = fits.open(filename, memmap=True)
hdr = self.hdul[0].header
# We only handle 16 bit FITS files.
assert ((hdr['BITPIX'] == 16) and (hdr['bscale'] == 1) and (hdr['bzero'] == 32768)), \
"Only 16 bit pseudo-unsigned FITS format is currently supported!"
# Get image size. We're assuming that the film is a data cube in
# the first / primary HDU.
#
self.image_height = hdr['naxis2']
self.image_width = hdr['naxis1']
if (hdr['naxis'] == 3):
self.number_frames = hdr['naxis3']
else:
self.number_frames = 1
self.hdu0 = self.hdul[0]
# Hack, override astropy.io.fits internal so that we can load
# data with memmap = True.
#
self.hdu0._do_not_scale_image_data = True
def close(self):
pass
def loadAFrame(self, frame_number):
super(FITSReader, self).loadAFrame(frame_number)
frame = self.hdu0.data[frame_number, :, :].astype(numpy.uint16)
frame -= 32768
return frame
class SpeReader(Reader):
"""
SPE (Roper Scientific) reader class.
"""
def __init__(self, filename, verbose=False):
super(SpeReader, self).__init__(filename, verbose=verbose)
# open the file & read the header
self.header_size = 4100
self.fileptr = open(filename, "rb")
self.fileptr.seek(42)
self.image_width = int(numpy.fromfile(self.fileptr, numpy.uint16, 1)[0])
self.fileptr.seek(656)
self.image_height = int(numpy.fromfile(self.fileptr, numpy.uint16, 1)[0])
self.fileptr.seek(1446)
self.number_frames = int(numpy.fromfile(self.fileptr, numpy.uint32, 1)[0])
self.fileptr.seek(108)
image_mode = int(numpy.fromfile(self.fileptr, numpy.uint16, 1)[0])
if (image_mode == 0):
self.image_size = 4 * self.image_width * self.image_height
self.image_mode = numpy.float32
elif (image_mode == 1):
self.image_size = 4 * self.image_width * self.image_height
self.image_mode = numpy.uint32
elif (image_mode == 2):
self.image_size = 2 * self.image_width * self.image_height
self.image_mode = numpy.int16
elif (image_mode == 3):
self.image_size = 2 * self.image_width * self.image_height
self.image_mode = numpy.uint16
else:
print("unrecognized spe image format: ", image_mode)
def loadAFrame(self, frame_number, cast_to_int16=True):
"""
Load a frame & return it as a numpy array.
"""
super(SpeReader, self).loadAFrame(frame_number)
self.fileptr.seek(self.header_size + frame_number * self.image_size)
image_data = numpy.fromfile(self.fileptr, dtype=self.image_mode, count=self.image_height * self.image_width)
if cast_to_int16:
image_data = image_data.astype(numpy.uint16)
image_data = numpy.reshape(image_data, [self.image_height, self.image_width])
return image_data
class TifReader(Reader):
"""
TIF reader class.
This is supposed to handle the following:
1. A normal Tiff file with one frame/image per page.
2. Tiff files with multiple frames on a single page.
3. Tiff files with multiple frames on multiple pages.
"""
def __init__(self, filename, verbose=False):
super(TifReader, self).__init__(filename, verbose)
self.page_data = None
self.page_number = -1
# Save the filename
self.fileptr = tifffile.TiffFile(filename)
number_pages = len(self.fileptr.pages)
# Single page Tiff file, which might be a "ImageJ Tiff"
# with many frames on a page.
#
if (number_pages == 1):
# Determines the size without loading the entire file.
isize = self.fileptr.series[0].shape
# Check if this is actually just a single frame tiff, if
# it is we'll just load it into memory.
#
if (len(isize) == 2):
self.frames_per_page = 1
self.number_frames = 1
self.image_height = isize[0]
self.image_width = isize[1]
self.page_data = self.fileptr.asarray()
# Otherwise we'll memmap it in case it is really large.
#
else:
self.frames_per_page = isize[0]
self.number_frames = isize[0]
self.image_height = isize[1]
self.image_width = isize[2]
self.page_data = self.fileptr.asarray(out='memmap')
# Multiple page Tiff file.
#
else:
isize = self.fileptr.asarray(key=0).shape
# Check for one frame per page.
if (len(isize) == 2):
self.frames_per_page = 1
self.number_frames = number_pages
self.image_height = isize[0]
self.image_width = isize[1]
# Multiple frames per page.
#
# FIXME: No unit test for this kind of file.
#
else:
self.frames_per_page = isize[0]
self.number_frames = number_pages * isize[0]
self.image_height = isize[1]
self.image_width = isize[2]
if self.verbose:
print("{0:0d} frames per page, {1:0d} pages".format(self.frames_per_page, number_pages))
def loadAFrame(self, frame_number, cast_to_int16=True):
super(TifReader, self).loadAFrame(frame_number)
# All the data is on a single page.
if self.number_frames == self.frames_per_page:
if (self.number_frames == 1):
image_data = self.page_data
else:
image_data = self.page_data[frame_number, :, :]
# Multiple frames of data on multiple pages.
elif (self.frames_per_page > 1):
page = int(frame_number / self.frames_per_page)
frame = frame_number % self.frames_per_page
# This is an optimization for files with a large number of frames
# per page. In this case tifffile will keep loading the entire
# page over and over again, which really slows everything down.
# Ideally tifffile would let us specify which frame on the page
# we wanted.
#
# Since it was going to load the whole thing anyway we'll have
# memory overflow either way, so not much we can do about that
# except hope for small file sizes.
#
if (page != self.page_number):
self.page_data = self.fileptr.asarray(key=page)
self.page_number = page
image_data = self.page_data[frame, :, :]
# One frame on each page.
else:
image_data = self.fileptr.asarray(key=frame_number)
assert (len(image_data.shape) == 2), "Not a monochrome tif image! " + str(image_data.shape)
if cast_to_int16:
image_data = image_data.astype(numpy.uint16)
return image_data
if (__name__ == "__main__"):
import sys
if (len(sys.argv) != 2):
print("usage: <movie>")
exit()
movie = inferReader(sys.argv[1], verbose=True)
print("Movie size is", movie.filmSize())
frame = movie.loadAFrame(0)
print(frame.shape, type(frame), frame.dtype)
#
# The MIT License
#
# Copyright (c) 2013 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
datawriter.py
#!/usr/bin/env python
"""
Writes dax files or tiff files. This is mostly used
by the simulator.
We try and follow a convention were the first dimension (slow
axis) is the image height and the second dimension (fast axis)
is the image width, so image.shape = [height, width]
Hazen 1/18
"""
import numpy
import os
import tifffile
# Import here to avoid making astropy mandatory for everybody.
try:
from astropy.io import fits
except ImportError:
pass
def inferWriter(filename, width = None, height = None):
"""
Given a file name this will try to return the appropriate
writer based on the file extension.
"""
ext = os.path.splitext(filename)[1]
if (ext == ".dax"):
return DaxWriter(filename, width = width, height = height)
elif (ext == ".fits"):
return FITSWriter(filename, width = width, height = height)
elif (ext == ".tif") or (ext == ".tiff"):
return TiffWriter(filename, width = width, height = height)
else:
print(ext, "is not a recognized file type")
raise IOError("only .dax and .tif are supported (case sensitive..)")
def dummyDaxFile(name, x_size, y_size):
ddax = DaxWriter(name, width = x_size, height = y_size)
frame = numpy.ones((x_size, y_size))
ddax.addFrame(frame)
ddax.close()
def singleFrameDax(name, frame):
[fx, fy] = frame.shape
dax_file = DaxWriter(name, width = fy, height = fx)
dax_file.addFrame(frame)
dax_file.close()
class Writer(object):
def __init__(self, width = None, height = None, **kwds):
super(Writer, self).__init__(**kwds)
self.w = width
self.h = height
def frameToU16(self, frame):
frame = frame.copy()
frame[(frame < 0)] = 0
frame[(frame > 65535)] = 65535
return numpy.round(frame).astype(numpy.uint16)
class DaxWriter(Writer):
def __init__(self, name, **kwds):
super(DaxWriter, self).__init__(**kwds)
self.name = name
if len(os.path.dirname(name)) > 0:
self.root_name = os.path.dirname(name) + "/" + os.path.splitext(os.path.basename(name))[0]
else:
self.root_name = os.path.splitext(os.path.basename(name))[0]
self.fp = open(self.name, "wb")
self.l = 0
def addFrame(self, frame):
frame = self.frameToU16(frame)
if (self.w is None) or (self.h is None):
[self.h, self.w] = frame.shape
else:
assert(self.h == frame.shape[0])
assert(self.w == frame.shape[1])
frame.tofile(self.fp)
self.l += 1
def close(self):
self.fp.close()
self.w = int(self.w)
self.h = int(self.h)
inf_fp = open(self.root_name + ".inf", "w")
inf_fp.write("binning = 1 x 1\n")
inf_fp.write("data type = 16 bit integers (binary, little endian)\n")
inf_fp.write("frame dimensions = " + str(self.w) + " x " + str(self.h) + "\n")
inf_fp.write("number of frames = " + str(self.l) + "\n")
inf_fp.write("Lock Target = 0.0\n")
if True:
inf_fp.write("x_start = 1\n")
inf_fp.write("x_end = " + str(self.w) + "\n")
inf_fp.write("y_start = 1\n")
inf_fp.write("y_end = " + str(self.h) + "\n")
inf_fp.close()
class FITSWriter(Writer):
"""
This is mostly for testing. It will store all the movie data in
memory, then dump it when the file is closed.
"""
def __init__(self, filename, **kwds):
super(FITSWriter, self).__init__(**kwds)
self.filename = filename
self.frames = []
def addFrame(self, frame):
frame = self.frameToU16(frame)
if (self.w is None) or (self.h is None):
[self.h, self.w] = frame.shape
else:
assert(self.h == frame.shape[0])
assert(self.w == frame.shape[1])
self.frames.append(frame)
def close(self):
# Remove old file, if any.
if os.path.exists(self.filename):
os.remove(self.filename)
data = numpy.zeros((len(self.frames), self.h, self.w), dtype = numpy.uint16)
for i in range(len(self.frames)):
data[i,:,:] = self.frames[i]
hdu = fits.PrimaryHDU(data)
hdu.writeto(self.filename)
class TiffWriter(Writer):
def __init__(self, filename, **kwds):
super(TiffWriter, self).__init__(**kwds)
self.tif_fp = tifffile.TiffWriter(filename)
def addFrame(self, frame):
frame = self.frameToU16(frame)
# Enforce that all the frames are the same size.
if (self.h is None) or (self.w is None):
[self.h, self.w] = frame.shape
else:
assert(self.h == frame.shape[0])
assert(self.w == frame.shape[1])
self.tif_fp.save(frame)
def close(self):
self.tif_fp.close()
Any suggestions for making my code faster shall be welcomed.

Not getting the Multithreaading to work for GTK 3 python

I am trying to get the themes from the gnome-look.org and trying to create widgets by scraping the website.
I wanted to show the window first while updating the GtkWidgets necessary in the background via another Thread.
Here is my code
[code]
#!/usr/bin/python3
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GdkPixbuf
import requests
import sys
import gi
import shutil
from bs4 import BeautifulSoup
import dryscrape
import json
import urllib.parse
import concurrent.futures
import threading
class ReadGnomeLook:
def format_bytes(self, size):
# 2**10 = 1024
power = 2**10
n = 0
power_labels = {0 : '', 1: 'KB', 2: 'MB', 3: 'GB'}
while size > power:
size /= power
n += 1
return str("{:.2f}".format(size)) +' ' + str(power_labels[n])
def getDownloadLinks(self, childURL):
#childURL = "https://www.gnome-look.org/s/Gnome/p/1519633"
childURL = childURL+"#files-panel"
session = dryscrape.Session()
session.set_attribute('auto_load_images', False)
session.visit(childURL)
response = session.body()
soup = BeautifulSoup(response, features='lxml')
downloadlink = []
allscripts = soup.find_all('script', {"src":False})
for each_script in range(len(allscripts)):
content = str(allscripts[each_script]).split("var")
for indx in content:
if 'filesJson' in str(indx):
content = indx.replace('filesJson = ','').replace(';','')
content = json.loads(content)
links = []
for each_item in content:
if each_item['active'] == '1':
links.append({'name':each_item['name'],'type':each_item['type'],'size':format_bytes(int(each_item['size'])),'md5sum':each_item['md5sum'],'title':each_item['title'],'description':each_item['description'],'url':urllib.parse.unquote(each_item['url'])})
for each in links:
downloadlink.append(each)
return downloadlink
def readWebpage(self, URL):
myProducts = []
baseURL="https://www.gnome-look.org"
#URL = "https://www.gnome-look.org/browse/cat/132/order/latest/"
session = dryscrape.Session()
session.set_header('Host','www.gnome-look.org')
session.visit(URL)
response = session.body()
soup = BeautifulSoup(response, features='lxml')
#print(soup)
#soup.find(class="product-browse-item-info")
mydivs = soup.find_all("div", {"class": "product-browse-item picture"})
for mydiv in mydivs:
myProducts.append([
{'name' : mydiv.div.a.findAll("div",{"class":"product-browse-item-info"})[0].h2.text},
{'category' : mydiv.div.a.findAll("div",{"class":"product-browse-item-info"})[0].findAll("span")[0].text},
{'author' : mydiv.div.a.findAll("div",{"class":"product-browse-item-info"})[0].findAll("span")[1].b.text},
{'img' : mydiv.div.a.div.img['src']},
{'href' : mydiv.div.a['href']}
])
productCatalog = []
for elements in myProducts:
productCatalog.append([
{
'Name':elements[0]['name'],
'Category': elements[1]['category'],
'Author': elements[2]['author'],
'Image': elements[3]['img'],
'Link': baseURL + elements[4]['href'],
#'DownloadLinks': getDownloadLinks(baseURL + elements[4]['href'])
}
])
return productCatalog
class AppicationWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Themes Manager")
# Main Application Window
# self.set_title("Themes Manager v1.0")
#self.set_default_size(400, 400)
self.set_position(Gtk.WindowPosition.CENTER)
self.connect("destroy",Gtk.main_quit)
# Create Image and Title Grid
#self.image = self.getImageFromWeb('https://media.wired.com/photos/592697678d4ebc5ab806acf7/master/w_2560%2Cc_limit/GooglePlay.jpg')
#self.image.set_from_file("android-download.png")
#image.set_size(200,200)
print("Before 1st Show all")
self.show_all()
z = threading.Thread(target=self.doProcessing(),daemon=True)
z.start()
print("Started Z thread")
def doProcessing(self):
# Grid for Full Icon Themes
self.gridfulliconthemes = Gtk.FlowBox(valign = Gtk.Align.START)
self.gridfulliconthemesscroll = Gtk.ScrolledWindow(hexpand=True, vexpand=True) # Create scroll window
self.gridfulliconthemesscroll.add(self.gridfulliconthemes) # Adds the TreeView to the scroll container
self.getProductCatalog()
## Start
self.URLs = []
self.labels = []
self.images = []
self.threads = []
for each_item in self.productCatalog:
image = Gtk.Image()
image.new_from_file('/tmp/82682596e6c89475b2f21221d5dc61927887.png')
self.images.append(image)
self.labels.append(Gtk.Label("loading"))
for each_item in range(0,len(self.productCatalog)):
#print(each_item[0]['Name'])
self.URLs.append(self.productCatalog[each_item][0]['Image'])
vertical_box = Gtk.Box()
vertical_box.set_homogeneous(True)
vertical_items = Gtk.FlowBox(valign = Gtk.Align.START)
vertical_items.set_max_children_per_line(1)
label = Gtk.Label()
label.set_text(self.productCatalog[each_item][0]['Name'])
label.set_line_wrap(True)
label.set_max_width_chars(10)
label.set_hexpand(True)
self.labels.append(label)
#image = Gtk.Image()
#self.images.append(image)
vertical_items.add(self.images[each_item])
vertical_items.add(self.labels[each_item])
vertical_box.add(vertical_items)
vertical_box.connect("button-press-event", self.do_anything)
self.gridfulliconthemes.add(vertical_box)
## End
# Create Notebook to add to the Window
self.notebook = Gtk.Notebook()
self.add(self.notebook)
self.fullicontheme = Gtk.Label()
self.fullicontheme.set_text("Full Icon Themes")
self.gtkthemes = Gtk.Label()
self.gtkthemes.set_text("Gtk 3/4 Themes")
self.gnomeshellthemes = Gtk.Label()
self.gnomeshellthemes.set_text("Gnome Shell Themes")
self.fulliconthemepage = Gtk.Label()
self.fulliconthemepage.set_text("Full Icon Themes Page")
self.gtkthemespage = Gtk.Label()
self.gtkthemespage.set_text("GTK themes Page")
self.gnomeshellthemespage = Gtk.Label()
self.gnomeshellthemespage.set_text("Gnome Shell Themes Page")
#notebook.append_page(fullicontheme, Gtk.Label("Icon Page"))
self.notebook.append_page(self.gridfulliconthemesscroll, self.fulliconthemepage)
self.notebook.append_page(self.gtkthemes, self.gtkthemespage)
self.notebook.append_page(self.gnomeshellthemes, self.gnomeshellthemespage)
self.notebook.set_tab_reorderable(self.gridfulliconthemesscroll, True)
#self.add(hb)
#self.show_all()
#threadtemp = threading.Thread(target=self.getImageFromWeb(each_item[0]['Image']))
#self.threads.append(threadtemp)
#self.getAllImages()
x = threading.Thread(target=self.getAllImages(),daemon=True)
x.start()
self.show_all()
def getProductCatalog(self):
# Download Links from GnomeLook.org
URL = "https://www.gnome-look.org/s/Gnome/browse/cat/132/page/2/ord/latest/"
readgnomelook = ReadGnomeLook()
self.productCatalog = readgnomelook.readWebpage(URL)
#print(json.dumps(productCatalog, sort_keys=False, indent=4))
def getAllImages(self):
for i in range(0,len(self.productCatalog)):
#self.images.append(self.getImageFromWeb(self.productCatalog[i][0]['Image']))
#self.images[i] = self.getImageFromWeb(self.productCatalog[i][0]['Image'],self.images[i])
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
future = executor.submit(self.getImageFromWeb, self.productCatalog[i][0]['Image'], self.images[i])
# #self.images.append(future.result())
self.images[i]= future.result()
# #print(type(self.images[i]))
def do_anything(self):
print("clicked on box")
def getImageFromWeb(self, URL,image):
filename = '/tmp/'+URL.split("/")[-1]
try:
f = open(filename)
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_scale(
filename=filename,
width=100,
height=100,
preserve_aspect_ratio=False)
Gtk.Image.set_from_pixbuf(image, pixbuf)
#image.set_from_file(filename)
#print("Got the image : " + filename)
#del r
return image
except IOError:
#print("File not accessible")
r = requests.get(URL,stream=True)
if r.status_code == 200:
with open(filename,'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_scale(
filename=filename,
width=200,
height=200,
preserve_aspect_ratio=False)
Gtk.Image.set_from_pixbuf(image, pixbuf)
#image.set_from_file(filename)
#print("Got the image : " + filename)
return image
else:
#print("Failed to get the image : " + filename)
return None
del r
window = AppicationWindow()
#window.connect("destroy",Gtk.main_quit)
#window.show_all()
Gtk.main()
[/code]
Code works fine. But in below code, the thread doProcessing() is getting completed and then I am seeing the "Started Z thread"
print("Before 1st Show all")
self.show_all()
z = threading.Thread(target=self.doProcessing(),daemon=True)
z.start()
print("Started Z thread")
As I see, doProcessing should start in background and "Started Z thread" should be printed immediately but that's not happening.
Am I missing anything here ? Any help is appreciated.
Thanks, Debasish
z = threading.Thread(target=self.doProcessing(),daemon=True)
threading.Thread wants a function not the result of a function, thus try:
z = threading.Thread(target=self.doProcessing,daemon=True)

Python cv2 ORB detectandcompute returning "invalid number of channels in input image"

I'm trying to extract and match features from two different images but for some reason the "detectAndCompute" method doesn¡t work on my orb object:
orb = cv2.ORB_create()
kp, corners = orb.detectAndCompute(image,None
I am passing a single grayscale image (the return of the function np.float32(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY))). For some reason, the program returns the following error:
Traceback (most recent call last):
File "C:\Users\levxr\Desktop\Visual-positioning-bot-main\alloverlay.py", line 37, in
cv2.imshow("camera "+str(i), corn1.updateanddisplay())
File "C:\Users\levxr\Desktop\Visual-positioning-bot-main\features.py", line 33, in updateanddisplay
dst = self.update(image=self.image)
File "C:\Users\levxr\Desktop\Visual-positioning-bot-main\features.py", line 23, in update
kp, corners = orb.detectAndCompute(image,None)
cv2.error: OpenCV(4.4.0) c:\users\appveyor\appdata\local\temp\1\pip-req-build-95hbg2jt\opencv\modules\imgproc\src\color.simd_helpers.hpp:92: error: (-2:Unspecified error) in function '__cdecl cv::impl::anonymous-namespace'::CvtHelper<struct cv::impl::anonymous namespace'::Set<3,4,-1>,struct cv::impl::A0x2980c61a::Set<1,-1,-1>,struct cv::impl::A0x2980c61a::Set<0,2,5>,2>::CvtHelper(const class cv::_InputArray &,const class cv::_OutputArray &,int)'
Invalid number of channels in input image:
'VScn::contains(scn)'
where
'scn' is 1
The program is split in 3 files, alloverlay.py(the main file):
import sys
import cv2
import numpy as np
import features as corn
import camera as cali
cv2.ocl.setUseOpenCL(False)
#videoname = input("enter input")
videoname = "camera10001-0200.mkv"
try:
videoname = int(videoname)
cap = cv2.VideoCapture(videoname)
except:
cap = cv2.VideoCapture(videoname)
videoname2 = "camera 20000-0200.mkv"
try:
videoname = int(videoname)
cap2 = cv2.VideoCapture(videoname)
except:
cap2 = cv2.VideoCapture(videoname)
if cap.isOpened()and cap2.isOpened():
ret1, image1 = cap.read()
ret2, image2 = cap2.read()
ret = [ret1, ret2]
image = [np.float32(cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)), np.float32(cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY))]
cali1 = cali.Calibrator()
corn1 = corn.Corner_detector(image)
while cap.isOpened() and cap2.isOpened():
ret[0], image[0] = cap.read()
ret[1], image[1] = cap2.read()
if ret:
backupimg = image
for i, img in enumerate(image):
if cali1.calibrated:
backupimg[i] = corn1.image = cali1.undistort(np.float32(cv2.cvtColor(image[i], cv2.COLOR_BGR2GRAY)), cali1.mtx, cali1.dist)
else:
backupimg[i] = corn1.image = np.float32(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
cv2.imshow("camera "+str(i), corn1.updateanddisplay())
image = backupimg
print(ret, image)
#cv2.imshow("test", image)
key = cv2.waitKey(1)
if key == ord("c"):
cali1.calibrate(cali1.image)
if cv2.waitKey(25) & 0xFF == ord("q"):
break
else:
print("capture not reading")
break
cap.release()
, camera.py(module to calibrate and undistort the camera and triangulate the relative position of the point (a different part of this project, irrelevant to this issue)):
import sys
import cv2
#import glob
import numpy as np
cv2.ocl.setUseOpenCL(False)
class Missing_calibration_data_error(Exception):
def __init__():
pass
class Calibrator():
def __init__(self, image=None, mtx=None, dist=None, camera_data={"pixelsize":None, "matrixsize":None, "baseline":None, "lens_distance":None}, criteria=(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001), calibrated = False):
self.criteria = criteria
self.objpoints = []
self.imgpoints = []
self.objp = np.zeros((6*7,3), np.float32)
self.objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)
self.image = image
self.mtx = mtx
self.dist = dist
self.calibrated = calibrated
self.pixelsize = camera_data["pixelsize"]
self.matrixsize = camera_data["matrixsize"]
self.baseline = camera_data["baseline"]
self.lens_distance = camera_data["lens_distance"]
def calibrate(self, image):
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (7,6),None)
if ret == True:
self.objpoints.append(self.objp)
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),self.criteria)
self.imgpoints.append(corners2)
h, w = image.shape[:2]
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
self.mtx = mtx
self.dist = dist
self.calibrated = True
return mtx, dist
def undistort(self, image, mtx, dist):
if dist == None or mtx == None or image == None:
raise Missing_calibration_data_error
h, w = image.shape[:2]
newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))
dst = cv2.undistort(image, mtx, dist, None, newcameramtx)
x,y,w,h = roi
dst = dst[y:y+h, x:x+w]
return image
def calculate_point_relative_position(self, point_location2d):
angle = self.baseline/(point_location2d[left][x]-point_location2d[right][x])
x = angle * (point_location2d[left][x]-self.matrixsize[0]/2)
y = angle * (point_location2d[left][y]-self.matrixsize[1]/2)
z = self.lens_distance * (1-angle/self.pixelsize)
return [x, y, z]
´´´
, and features.py(module to detect and match the features, aparently where the issue happens):
´´´
import sys
import cv2
import numpy as np
cv2.ocl.setUseOpenCL(False)
class Unknown_algorythm_error(Exception):
def __init__(self):
pass
class No_image_passed_error(Exception):
def __int__ (self):
pass
class Corner_detector():
def __init__(self, image, detectortype="ORB", corners=[]):
self.corners = corners
self.image = image
self.detectortype = detectortype
def update(self, image=None):
if self.detectortype == "Harris":
self.corners = cv2.cornerHarris(image, 3, 3, 0, 1)
elif self.detectortype == "Shi-Tomasi":
self.corners = cv2.goodFeaturesToTrack(image, 3, 3, 0, 1)
elif self.detectortype == "ORB":
orb = cv2.ORB_create()
kp, corners = orb.detectAndCompute(image,None)
elif self.detectortype == "SURF":
minHessian = 400
detector = cv2.features2d_SURF(hessianThreshold=minHessian)
keypoints1, descriptors1 = detector.detectAndCompute(img1, None)
keypoints2, descriptors2 = detector.detectAndCompute(img2, None)
else:
raise Unknown_algoryth_error
return self.corners
def updateanddisplay(self):
dst = self.update(image=self.image)
self.image[dst>0.01*dst.max()] = 0
return self.image
class Feature_matcher():
def __init__(self, matcher = cv2.DescriptorMatcher_create(cv2.DescriptorMatcher_FLANNBASED)):
self.matcher = matcher
´´´
Does anyone know how to fix this? I've been looking for the answer for quite a while but i only find the answer for when you're converting the image to grayscale and it doesnt work for me.
It's hard to follow, but I think I identified the issue:
You are passing orb.detectAndCompute an image of type np.float32.
orb.detectAndCompute does not support image of type np.float32.
Reproducing the problem:
The following "simple test" reproduces the problem:
The code sample passes a black (zeros) image to orb.detectAndCompute:
The following code passes without an exception (image type is np.uint8):
# image type is uint8:
image = np.zeros((100, 100), np.uint8)
orb = cv2.ORB_create()
kp, corners = orb.detectAndCompute(image, None)
The following code raises an exception because image type is np.float32:
# image type is float32:
image = np.float32(np.zeros((100, 100), np.uint8))
orb = cv2.ORB_create()
kp, corners = orb.detectAndCompute(image, None)
Rises an exception:
Invalid number of channels in input image:
Solution:
Try to avoid the np.float32 conversion.
You may also convert image to uint8 as follows:
kp, corners = orb.detectAndCompute(image.astype(np.uint8), None)

AttributeError: type object 'FooEnv' has no attribute 'reset'

I am new in Python and I faced with a problem in my code. I try to build my custom environment for a Deep Q-Network program. The name of my environment is "FooEnv".But when I run the main code, I faced with this error in line FooEnv.reset()
type object 'FooEnv' has no attribute 'reset'
This is my main code, That I call "FooEnv" here:
import json
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense
from keras.optimizers import sgd
from FooEnv import FooEnv
class ExperienceReplay(object):
def __init__(self, max_memory=100, discount=.9):
self.max_memory = max_memory
self.memory = list()
self.discount = discount
def remember(self, states, game_over):
# memory[i] = [[state_t, action_t, reward_t, state_t+1], game_over?]
self.memory.append([states, game_over])
if len(self.memory) > self.max_memory:
del self.memory[0]
def get_batch(self, model, batch_size=10):
len_memory = len(self.memory)
num_actions = model.output_shape[-1]
# env_dim = self.memory[0][0][0].shape[1]
env_dim = self.memory[0][0][0].shape[1]
inputs = np.zeros((min(len_memory, batch_size), env_dim))
targets = np.zeros((inputs.shape[0], num_actions))
for i, idx in enumerate(np.random.randint(0, len_memory,
size=inputs.shape[0])):
state_t, action_t, reward_t, state_tp1 = self.memory[idx][0]
game_over = self.memory[idx][1]
inputs[i:i+1] = state_t
# There should be no target values for actions not taken.
# Thou shalt not correct actions not taken #deep
targets[i] = model.predict(state_t)[0]
Q_sa = np.max(model.predict(state_tp1)[0])
if game_over: # if game_over is True
targets[i, action_t] = reward_t
else:
# reward_t + gamma * max_a' Q(s', a')
targets[i, action_t] = reward_t + self.discount * Q_sa
return inputs, targets
if __name__ == "__main__":
# parameters
epsilon = .1
num_actions = 2
epoch = 1000
max_memory = 500
hidden_size = 100
batch_size = 50
input_size = 2
f_c=[2.4*10**9]
eta_Los=[1]
eta_NLos=[2]
x_threshold = [5]
model = Sequential()
model.add(Dense(hidden_size, input_shape=(2, ), activation='relu'))
model.add(Dense(hidden_size, activation='relu'))
model.add(Dense(num_actions))
model.compile(sgd(lr=.2), "mse")
# Define environment/game
env = FooEnv(f_c, eta_Los, eta_NLos)
# Initialize experience replay object
exp_replay = ExperienceReplay(max_memory=max_memory)
FooEnv.reset()
And this is my FooEnv code:
import numpy as np
import math
class FooEnv(object):
def __init__(self, f_c, eta_Los, eta_NLos):
self.f_c = f_c
self.eta_Los = eta_Los
self.eta_NLos = eta_NLos
self.num_actions = 2
def reset(self):
state=self.state
E_Consumtion, Average_Delay_UAV, Average_DeLay_FAP = state
E_Consumtion=0
Average_Delay_UAV=0
Average_DeLay_FAP=0
self.state = np.append(E_Consumtion,Average_Delay_UAV,Average_DeLay_FAP)
self.steps_beyond_done = None
return np.array(self.state)
I would greatly appreciated it if you could help me with this.
FooEnv is a class and env is an object of that class. You want to reset the object, not the class.

How do you parse the bin file from INT8 Calibration of TensorRT?

I have created a python script for calibrating(INT8) the dynamic scales of the activation of TinyYOLO V2 using TensorRT. The script gave me a file called calibration_cache.bin. How do I parse the .bin file ? What do the values inside the .bin file mean ?
calibrator.py
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
from PIL import Image
import ctypes
import tensorrt as trt
import os
CHANNEL = 3
HEIGHT = 416
WIDTH = 416
class PythonEntropyCalibrator(trt.IInt8EntropyCalibrator):
def __init__(self, input_layers, stream):
trt.IInt8EntropyCalibrator.__init__(self)
self.input_layers = input_layers
self.stream = stream
self.d_input = cuda.mem_alloc(self.stream.calibration_data.nbytes)
stream.reset()
def get_batch_size(self):
return self.stream.batch_size
def get_batch(self, bindings, names):
batch = self.stream.next_batch()
if not batch.size:
return None
cuda.memcpy_htod(self.d_input, batch)
for i in self.input_layers[0]:
assert names[0] != i
bindings[0] = int(self.d_input)
return bindings
def read_calibration_cache(self, length=0):
if os.path.exists('calibration_cache.bin'):
with open('calibration_cache.bin', 'rb') as f:
return f.read()
return None
def write_calibration_cache(self, cache, size=0):
with open('calibration_cache.bin', 'wb') as f:
f.write(cache)
return None
class ImageBatchStream():
def __init__(self, batch_size, calibration_files, preprocessor):
self.batch_size = batch_size
self.max_batches = (len(calibration_files) // batch_size) + \
(1 if (len(calibration_files) % batch_size) \
else 0)
self.files = calibration_files
self.calibration_data = np.zeros((batch_size, CHANNEL, HEIGHT, WIDTH), \
dtype=np.float32)
self.batch = 0
self.preprocessor = preprocessor
#staticmethod
def read_image_chw(path):
img = Image.open(path).resize((WIDTH,HEIGHT), Image.NEAREST)
im = np.array(img, dtype=np.float32, order='C')
im = im[:,:,::-1]
im = im.transpose((2,0,1))
return im
def reset(self):
self.batch = 0
def next_batch(self):
if self.batch < self.max_batches:
imgs = []
files_for_batch = self.files[self.batch_size * self.batch : \
self.batch_size * (self.batch + 1)]
for f in files_for_batch:
self.batch_size * (self.batch + 1)]
for f in files_for_batch:
print("[ImageBatchStream] Processing ", f)
img = ImageBatchStream.read_image_chw(f)
img = self.preprocessor(img)
imgs.append(img)
for i in range(len(imgs)):
self.calibration_data[i] = imgs[i]
self.batch += 1
return np.ascontiguousarray(self.calibration_data, dtype=np.float32)
else:
return np.array([])
test.py
from random import shuffle
from PIL import Image
import glob
import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import os
from calibrator import *
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
model_file = './tiny_yolov2/Model.onnx'
dataset_loc = './Dataset/*.jpg'
def normalize(data):
data /= 255.0
return data
def create_calibration_dataset():
calibration_files = glob.glob(dataset_loc)
shuffle(calibration_files)
return calibration_files[:20]
calibration_files = create_calibration_dataset()
NUM_IMAGES_PER_BATCH = 5
batchstream = ImageBatchStream(NUM_IMAGES_PER_BATCH, calibration_files, normalize)
Int8_calibrator = PythonEntropyCalibrator(["conv2d_91_input"], batchstream)
builder = trt.Builder(TRT_LOGGER)
builder.int8_calibrator = Int8_calibrator
builder.refittable = True
builder.int8_mode = True
network = builder.create_network()
parser = trt.OnnxParser(network, TRT_LOGGER)
print(builder.int8_mode, builder.platform_has_fast_int8,builder.refittable)
with open(model_file, 'rb') as model:
parser.parse(model.read())
print('Done reading ONNX File\n')
engine = builder.build_cuda_engine(network)
print(engine, TRT_LOGGER)
with open("model.trt", "wb") as f:
f.write(engine.serialize())
print("Done converting the ONNX to TRT\n")
tinyolo_fitter = trt.Refitter(engine, TRT_LOGGER)
print(tinyolo_fitter.refit_cuda_engine())
print(tinyolo_fitter.get_tensors_with_dynamic_range())
calibration_cache.bin
TRT-5105-EntropyCalibration
image: 3c010a14
scalerPreprocessor_scaled: 38018ba0
image2: 38018ba0
batchnormalization_1_output: 3d07b31d
leakyrelu_1_output: 3c98a317
maxpooling2d_1_output: 3c1e5b30
batchnormalization_2_output: 3ca6aa67
leakyrelu_2_output: 3ca6aa67
maxpooling2d_2_output: 3c82cf7d
batchnormalization_3_output: 3ce07ce8
leakyrelu_3_output: 3ce52236
maxpooling2d_3_output: 3cc8ed6f
batchnormalization_4_output: 3d3df55f
leakyrelu_4_output: 3c651727
maxpooling2d_4_output: 3cec84fc
batchnormalization_5_output: 3d0f51e3
leakyrelu_5_output: 3cb52377
maxpooling2d_5_output: 3d026049
batchnormalization_6_output: 3d387291
leakyrelu_6_output: 3ccc009a
maxpooling2d_6_output: 3c8d0f0c
batchnormalization_7_output: 3e0de3d2
leakyrelu_7_output: 3d7b4397
batchnormalization_8_output: 3cc459d6
leakyrelu_8_output: 3cbd9562
grid: 3ddc32dc
def read_calibration_cache(self, length=0):
if os.path.exists('calibration_cache.bin'):
with open('calibration_cache.bin', 'rb') as f:
return f.read()
return None
This does the work i guess. If there is a calibration_cache.bin file in your dir, calibrator parses it instead of calibrating again.

Resources