How to compute local standard deviation efficiently - python-3.x

I need to compute the standard deviation in a neighborhood of a point for each point in a large (10M pixel) image. The end result should produce two results:
Get a feeling what is the standard deviation for sharp vs blurred images
Produce a mask for a new image that shows blurred and sharp areas of an image
At first, I've tried this approach, but it is very slow
b_arr, g_arr, r_arr = [], [], []
pad = 3
def compute_std(img, h, w):
arr = [0, 0, 0]
for c in range(len(arr)):
arr[c] = np.std(img[h-pad:h+pad+1, w-pad:w+pad+1, c])
return arr
img = cv2.imread('image_path.png')
for h in range(pad, img.shape[0] - pad):
for w in range(pad, img.shape[1] - pad):
b, g, r = compute_std(img, h, w)
b_arr.append(b)
g_arr.append(g)
r_arr.append(r)
This approach takes minutes to produce the result.
What is the right way to compute the standard deviations for each pixel of an image quickly?
Alternatively, is it faster to generate first several layers of a CNN, turn them to a vector for each point and then use some sort of a decision tree to classify a pixel as "sharp" or "blurred"?

You can either use numpy.lib.stride_tricks.sliding_window_view or scipy.signal.convolve2d (see https://stackoverflow.com/a/25912169/13636407):
def std_windowed(img, win_size):
win_h, win_w = win_size
win_view = sliding_window_view(img, (win_h, win_w), axis=(0, 1))
return win_view.std(axis=(-2, -1))
def std_convoluted(img, win_size):
img = np.moveaxis(img, -1, 0) # HWC -> CHW
img2 = img**2
kernel = np.ones(win_size)
kernel = kernel / kernel.size
conv = lambda x: convolve2d(x, kernel, mode="valid")
img_mean = np.stack([conv(band) for band in img], axis=-1)
img2_mean = np.stack([conv(band) for band in img2], axis=-1)
return np.sqrt(np.clip((img2_mean - img_mean**2), 0, None))
The one using the convolution is ~4x faster on 10M pixels image:
# Inputs
img = get_image()
win_size = 7, 7
# Compute both ways
std_win = std_windowed(img, win_size)
std_conv = std_convoluted(img, win_size)
# Maximum absolute error
print(np.abs(std_win - std_conv).max())
# Performance
%timeit std_windowed(img, win_size)
%timeit std_convoluted(img, win_size)
1.2651919518872833e-05
13.7 s ± 213 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
2.92 s ± 90.7 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
Plots:
fig, (ax_orig, ax_std1, ax_std2) = plt.subplots(ncols=3)
ax_orig.set_title("Original")
ax_orig.imshow(minmax_scale(img))
ax_std1.set_title("std_windowed")
ax_std1.imshow(minmax_scale(std_win))
ax_std2.set_title("std_convoluted")
ax_std2.imshow(minmax_scale(std_conv))
plt.show()
Complete code for reference:
from io import BytesIO
import matplotlib.pyplot as plt
import numpy as np
import requests
from numpy.lib.stride_tricks import sliding_window_view
from PIL import Image
from scipy.signal import convolve2d
def std_windowed(img, win_size):
win_h, win_w = win_size
win_view = sliding_window_view(img, (win_h, win_w), axis=(0, 1))
return win_view.std(axis=(-2, -1))
def std_convoluted(img, win_size):
img = np.moveaxis(img, -1, 0) # HWC -> CHW
img2 = img**2
kernel = np.ones(win_size)
kernel = kernel / kernel.size
conv = lambda x: convolve2d(x, kernel, mode="valid")
img_mean = np.stack([conv(band) for band in img], axis=-1)
img2_mean = np.stack([conv(band) for band in img2], axis=-1)
return np.sqrt(np.clip((img2_mean - img_mean**2), 0, None))
def get_image():
url = "https://upload.wikimedia.org/wikipedia/commons/thumb/7/7a/Firefox_brand_logo%2C_2019.svg/2048px-Firefox_brand_logo%2C_2019.svg.png"
response = requests.get(url)
img = Image.open(BytesIO(response.content))
return np.array(img)[..., :3].astype(float)
def minmax_scale(arr, *, vmin=0, vmax=1):
amin, amax = arr.min(), arr.max()
arr_std = (arr - amin) / (amax - amin)
return arr_std * (vmax - vmin) + vmin
# Inputs
img = get_image()
win_size = 7, 7
# Compute both ways
std_win = std_windowed(img, win_size)
std_conv = std_convoluted(img, win_size)
# Maximum absolute error
print(np.abs(std_win - std_conv).max())
# Plot results
fig, (ax_orig, ax_std1, ax_std2) = plt.subplots(ncols=3)
ax_orig.set_title("Original")
ax_orig.imshow(minmax_scale(img))
ax_std1.set_title("std_windowed")
ax_std1.imshow(minmax_scale(std_win))
ax_std2.set_title("std_convoluted")
ax_std2.imshow(minmax_scale(std_conv))
plt.show()
# Performance
%timeit std_windowed(img, win_size)
%timeit std_convoluted(img, win_size)

You can try to do it in the following way:
import skimage
img = cv2.imread('image_path.png')
pad = 3
c = 3
blks=skimage.util.view_as_windows(img,(pad,pad,c))
# get the image as blocks with shape (pad, pad, c)
blks = np.squeeze(blks) # squeeze the single dimension
# get std along height and width of each blk
std_view = blks.std(axis=(-3,-2))

Related

How to concatenate gathered data using mpi4py library in python

I used to list append of data employing mpi4py and try to save the data sequentially at the source(root==0) node.
As suggested by Alan22, I've modified the code and it works, but the script does not concatenate properly, so I get the output file as shown in attached figure:01.
Can anybody help how to fix the error message? In addition, whatever I've written in python script [shown below], isn't the best way to solve the problem.
Is there any way to solve this type of problem efficiently? Any help is highly appreciated.
The python script is given as follows:
import numpy as np
from scipy import signal
from mpi4py import MPI
import random
import cmath, math
import matplotlib.pyplot as plt
import time
#File storing path
save_results_to = 'File storing path'
count_day = 1
count_hour = 1
arr_x = [0, 8.49, 0.0, -8.49, -12.0, -8.49, -0.0, 8.49, 12.0]
arr_y = [0, 8.49, 12.0, 8.49, 0.0, -8.49, -12.0, -8.49, -0.0]
M = len(arr_x)
N = len(arr_y)
np.random.seed(12345)
total_rows = 50000
raw_data=np.reshape(np.random.rand(total_rows*N),(total_rows,N))
# Function of CSD:: Using For Loop
fs = 500; # Sampling frequency
def csdMat(data):
dat, cols = data.shape # For 2D data
total_csd = []
for i in range(cols):
col_csd =[]
for j in range( cols):
freq, Pxy = signal.csd(data[:,i], data[:, j], fs=fs, window='hann', nperseg=100, noverlap=70, nfft=5000)
col_csd.append(Pxy)
total_csd.append(col_csd)
pxy = np.array(total_csd)
return freq, pxy
# Finding cross spectral density (CSD)
t0 = time.time()
freq, csd = csdMat(raw_data)
print('The shape of the csd data', csd.shape)
print('Time required {} seconds to execute CSD--For loop'.format(time.time()-t0))
kf=1*2*np.pi/10
resolution = 50 # This is important:: the HIGHER the Resolution, the higher the execution time!!!
grid_size = N * resolution
kx = np.linspace(-kf, kf, ) # space vector
ky = np.linspace(-kf, kf, grid_size) # space vector
def DFT2D(data):
P=len(kx)
Q=len(ky)
dft2d = np.zeros((P,Q), dtype=complex)
for k in range(P):
for l in range(Q):
sum_log = []
mat2d = np.zeros((M,N))
sum_matrix = 0.0
for m in range(M):
for n in range(N):
e = cmath.exp(-1j*((((dx[m]-dx[n])*kx[l])/1) + (((dy[m]-dy[n])*ky[k])/1)))
sum_matrix += data[m, n] * e
dft2d[k,l] = sum_matrix
return dft2d
dx = arr_x[:]; dy = arr_y[:]
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
data = []
start_freq = 100
end_freq = 109
freq_range = np.arange(start_freq,end_freq)
no_of_freq = len(freq_range)
for fr_count in range(start_freq, end_freq):
if fr_count % size == rank:
dft = np.zeros((grid_size, grid_size))
spec_csd = csd[:,:, fr_count]
dft = DFT2D(spec_csd) # Call the DFT2D function
spec = np.array(np.real(dft)) # Spectrum or 2D_DFT of data[real part]
print('Shape of spec', spec.shape)
data.append(spec)
#data = np.append(data,spec)
np.seterr(invalid='ignore')
data = comm.gather(data, root =0)
# comm.Allreduce(MPI.IN_PLACE,data,op=MPI.MAX)
print("Rank: ", rank, ". Spectrum shape is:\n", spec.shape)
if rank == 0:
output_data = np.concatenate(data, axis = 0)
#output_data = np.c_(data, axis = 0)
dft_tot = np.array((output_data), dtype='object')
res = np.zeros((grid_size, grid_size))
for k in range(size):
for i in range(no_of_freq):
jj = np.around(freq[freq_range[i]], decimals = 2)
#print('The shape of data after indexing', data1.shape)
#data_final=data1.reshape(data1.shape[0]*data1.shape[1], data1.shape[2])
res[i * size + k] = dft_tot[k][i] #np.array(data[k])
data = np.array(res)
#print('The shape of the dft at root node', data.shape)
np.savetxt(save_results_to + f'Day_{count_day}_hour_{count_hour}_f_{jj}_hz.txt', data.view(float))
I use the following bash script command to run the script ( i.e., my_file.sh). I submit the job with command sbatch my_file.sh
#! /bin/bash -l
#SBATCH -J testmvapich2
#SBATCH -N 1 ## Maximum 04 nodes
#SBATCH --ntasks=10
#SBATCH --cpus-per-task=1 # cpu-cores per task
#SBATCH --mem-per-cpu=3000MB
#SBATCH --time=00:20:00
#SBATCH -p para
#SBATCH --output="stdout.txt"
#SBATCH --error="stderr.txt"
#SBATCH -A camk
##SBATCH --mail-type=ALL
##SBATCH --chdir=/work/cluster_computer/my_name/data_work/MMC331/
eval "$(conda shell.bash hook)"
conda activate myenv
#conda activate fast-mpi4py
cd $SLURM_SUBMIT_DIR
#module purge
#module add mpi/mvapich2-2.2-x86_64
mpirun python3 mpi_test.py
You can try with this after "data = comm.gather(data, root=0)"
if rank == 0:
print('Type of data:', type(data))
dft_tot = np.array((data))#, dtype='object')
print('shape of DATA array:', dft_tot.shape)
#print('Type of dft array:', type(dft_tot))
res = np.zeros((450,450))
for k in range(size):
# for i in range(len(data[rank])):
for i in range(no_of_freq):
jj = np.around(freq[freq_range[k]], decimals = 2)
#data1 = np.array(dft_tot[k])
res[i * size + k] = data[k]
data = np.array(res)#.reshape(data1.shape[0]*data1.shape[1], data1.shape[2])
print('The shape of the dft at root node', data.shape)
np.savetxt(save_results_to + f'Day_{count_day}_hour_{co
Here is the link. Hope it helps mpi4py on HPC: comm.gather
As mentioned in the comments, there are two typos in the code:
The indices for arrays kx and ky have been swapped in the line where variable e is calculated in the function DFT2D(data).
The code is being run for 10 MPI processes for frequencies fr_count in the range start_freq = 100 and end_freq = 109. For this, the loops and arange must be written as for fr_count in range(start_freq, end_freq + 1) and freq_range = np.arange(start_freq, end_freq + 1) as these are not end-point inclusive.
The data = comm.gather(data, root=0) and subsequent output_data = np.concatenate(data, axis=0) operations are performing as they should and as such, the question detracts from the actual issue in the code.
A major issue is that in line res[i * size + k] = dft_tot[k][i] arrays of disparate sizes are being assigned to each other.
Shape of res: 450 x 450
Shape of dft_tot: 10 x 50 x 450
The value of i*size + k ranges from 0 to 110. I think the user expects dft_tot to have the shape 450 x 450, probably due to the indexing confusion mentioned in typo#2 above. Properly done concatenation would yield dft_tot with shape 500 x 450 (since there are 10 arrays of size 50 x 450).
Currently the gather operation returns a list of lists, each containing a NumPy array of size 50 x 450. Technically, it should return a list of NumPy arrays each of size 50 x 450. Adding the line data = data[0] (since data has only one element anyway in each process) before performing data = comm.gather(data, root=0) will achieve this result.
But this whole process seems redundant..
Because there are 10 frequencies considered here. For each frequency, there is a data set of size 50 x 450 . There are 10 MPI processes with each handling one frequency out of the 10. Finally, 10 files are being written corresponding to each frequency. This makes the whole gather operation redundant, as each MPI process can directly write the file corresponding to each frequency.
If instead the dft_tot file was being written as is by rank = 0, then the gather operation would make sense. But splitting the array into the constituent frequencies defeats the point.
This achieves the same result without the gather operation:
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
start_freq = 100
end_freq = 109
freq_range = np.arange(start_freq,end_freq+1)
no_of_freq = len(freq_range)
for fr_count in range(start_freq, end_freq+1):
if fr_count % size == rank:
dft = np.zeros((grid_size, grid_size))
spec_csd = csd[:,:, fr_count]
dft = DFT2D(spec_csd) # Call the DFT2D function
spec = np.array(np.real(dft)) # Spectrum or 2D_DFT of data[real part]
print('Shape of spec', spec.shape)
jj = np.around(freq[freq_range[rank]], decimals = 2)
np.savetxt(f'Day_{count_day}_hour_{count_hour}_f_{jj}_hz.txt', spec.view(float))

Only integer scalar arrays can be converted to a scalar index not running under Spyder

I have the following code, which runs well under Visual Studio Code with python 3.9.10, opencv 4.5.5 and numpy 1.22.1.
I would like to migrate this code into the Spyder IDE (Version 5, another notebook), python 3.8, opencv 4.5.1 and numpy 1.22.2.
In spyder, I get the error message TypeError: only integer scalar arrays can be converted a scalar index in line: output_layers = [layer_names[i-1]...] (marked line down in the code section)
I have already checked other answers on this site such as
TypeError when indexing a list with a NumPy array: only integer scalar arrays can be converted to a scalar index
which suggests list comprehension, but in my understanding I am already implemented this.
What is the reason for running currectly in on environment but not in the other?
import cv2
import numpy as np
def get_output_layers(net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
def draw_prediction(img, class_id, confidence, x, y, x_plus_w, y_plus_h):
label = str(classes[class_id])
color = COLORS[class_id]
cv2.rectangle(img, (x,y), (x_plus_w,y_plus_h), color, 2)
cv2.putText(img, label, (x-10,y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
image = cv2.imread('horses.jpg')
Width = image.shape[1]
Height = image.shape[0]
scale = 0.00392
classes = None
with open(r'yolov3.txt', 'r') as f:
classes = [line.strip() for line in f.readlines()]
COLORS = np.random.uniform(0, 255, size=(len(classes), 3))
net = cv2.dnn.readNet('yolov3.weights','yolov3.cfg')
blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True, crop=False)
net.setInput(blob)
outs = net.forward(get_output_layers(net))
class_ids = []
confidences = []
boxes = []
conf_threshold = 0.5
nms_threshold = 0.4
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
center_x = int(detection[0] * Width)
center_y = int(detection[1] * Height)
w = int(detection[2] * Width)
h = int(detection[3] * Height)
x = center_x - w / 2
y = center_y - h / 2
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([x, y, w, h])
indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)
for i in indices:
box = boxes[i]
x = box[0]
y = box[1]
w = box[2]
h = box[3]
draw_prediction(image, class_ids[i], confidences[i], round(x), round(y),
round(x+w), round(y+h))
cv2.imshow("object detection", image)
cv2.waitKey()
cv2.imwrite("object-detection.jpg", image)
cv2.destroyAllWindows()
there were subtle, recent api changes wrt handling std::vector in python
(4.5.1 still expects a 2d array, but it's 1d in 4.5.5)
to avoid the whole trouble, please simply use:
output_layers = net.getUnconnectedOutLayersNames()
(like it is done in the sample)

No performance increase when looping of FFTs in Cython

I'm writing a script that tracks the shifts of a sample by estimating the displacement of an ensemble of particles. The first implementation, in Python, works alright, but it takes too long for a large amount of samples. To combat this, I tried rewriting the method in Cython, but as this was my first time ever using it, I can't seem to get any performance increases. I know 3D FFTs exist and are often faster than looped 2D FFTs, but for this instance, they take too much memory and or slower than for-loops.
Python function:
import numpy as np
from scipy.fft import fftshift
import pyfftw
def python_corr(frame_a, frame_b):
DTYPEf = 'float32'
DTYPEc = 'complex64'
k = frame_a.shape[0]
m = frame_a.shape[1] # size y of 2d sample
n = frame_a.shape[2] # size x of 2d sample
fs = [m,n] # sample shape
bs = [m,n//2+1] # rfft sample shape
corr = np.zeros([k,m,n], DTYPEf) # out
fft_forward = pyfftw.builders.rfft2(
pyfftw.empty_aligned(fs, dtype = DTYPEf),
axes = [-2,-1],
)
fft_backward = pyfftw.builders.irfft2(
pyfftw.empty_aligned(bs, dtype = DTYPEc),
axes = [-2,-1],
)
for ind in range(k): # looping over 2D samples
window_a = frame_a[ind,:,:]
window_b = frame_b[ind,:,:]
corr[ind,:,:] = fftshift( # cross correlation via FFT algorithm
np.real(fft_backward(
np.conj(fft_forward(window_a))*fft_forward(window_b)
)),
axes = [-2,-1]
)
return corr
Cython function:
import numpy as np
from scipy.fft import fftshift
import pyfftw
cimport numpy as np
np.import_array()
cimport cython
DTYPEf = np.float32
ctypedef np.float32_t DTYPEf_t
DTYPEc = np.complex64
ctypedef np.complex64_t DTYPEc_t
#cython.boundscheck(False)
#cython.nonecheck(False)
def cython_corr(
np.ndarray[DTYPEf_t, ndim = 3] frame_a,
np.ndarray[DTYPEf_t, ndim = 3] frame_b,
):
cdef int ind, k, m, n
k = frame_a.shape[0]
m = frame_a.shape[1] # size y of sample
n = frame_a.shape[2] # size x of sample
cdef DTYPEf_t[:,:] window_a = pyfftw.empty_aligned([m,n], dtype = DTYPEf) # sample a
window_a[:,:] = 0.
cdef DTYPEf_t[:,:] window_b = pyfftw.empty_aligned([m,n], dtype = DTYPEf) # sample b
window_b[:,:] = 0.
cdef DTYPEf_t[:,:] corr = pyfftw.empty_aligned([m,n], dtype = DTYPEf) # cross-corr matrix
corr[:,:] = 0.
cdef DTYPEf_t[:,:,:] out = pyfftw.empty_aligned([k,m,n], dtype = DTYPEf) # out
out[:,:] = 0.
cdef object fft_forward
cdef object fft_backward
cdef DTYPEc_t[:,:] f2a = pyfftw.empty_aligned([m, n//2+1], dtype = DTYPEc) # rfft out of sample a
f2a[:,:] = 0. + 0.j
cdef DTYPEc_t[:,:] f2b = pyfftw.empty_aligned([m, n//2+1], dtype = DTYPEc) # rfft out of sample b
f2b[:,:] = 0. + 0.j
cdef DTYPEc_t[:,:] r = pyfftw.empty_aligned([m, n//2+1], dtype = DTYPEc) # power spectrum of sample a and b
r[:,:] = 0. + 0.j
fft_forward = pyfftw.builders.rfft2(
pyfftw.empty_aligned([m,n], dtype = DTYPEf),
axes = [0,1],
)
fft_backward = pyfftw.builders.irfft2(
pyfftw.empty_aligned([m,n//2+1], dtype = DTYPEc),
axes = [0,1],
)
for ind in range(k):
window_a = frame_a[ind,:,:]
window_b = frame_b[ind,:,:]
r = np.conj(fft_forward(window_a))*fft_forward(window_b) # power spectrum of sample a and b
corr = fft_backward(r).real # cross correlation
corr = fftshift(corr, axes = [0,1]) # shift Q1 --> Q3, Q2 --> Q4
# the fftshift could be moved out of the loop, but lets use that as a last resort :)
out[ind,:,:] = corr
return out
Test for methods:
import time
aa = bb = np.empty([14000, 24,24]).astype('float32') # a small test with 14000 24x24px samples
print(f'Number of samples: {aa.shape[0]}')
start = time.time()
corr = python_corr(aa, bb)
print(f'Time for Python: {time.time() - start}')
del corr
start = time.time()
corr = cython_corr(aa, bb)
print(f'Time for Cython: {time.time() - start}')
del corr

Tensorflow: How to use a generator for fit() which runs in parallel with multiple processes

I am trying to train a model on a data set which does not fit in my RAM.
Therefore I am using a data generator which inherits from tensorflow.keras.utils.Sequence as shown below.
This is working. However because I am doing processing on the images my training is CPU bound. When looking in GPU-Z my GPU is only at 10-20% but one of my CPU Cores is at its max.
To solve this I am trying to run the generator in parallel on all my 16 cores. However when I set use_multiprocessing=True in the fit() function the program freezes. And using workers=8 does not speed up the process just produces batches in uneven intervals.
ex.:
batch 1-8 is processed immediately than there is some delay and than batch 9-16 is processed.
The code below shows what I am trying to do.
#read the dataset
x, o_y = reader.read_dataset_whole(ETLCharacterGroups.kanji)
#split data into 90/10 percent parts
percentage = round(len(x) / 100 * 80)
x_train = x[:percentage]
x_test = x[percentage:]
y_train = o_y[:percentage]
y_test = o_y[percentage:]
def distort_sample(img : Image) -> (Image, [int], [int]):
"""
Distort the given image randomly.
Randomly applies the transformations:
- rotation
- shear
- scale
- translate
- sharpen
- blur
Returns the distorted image.
"""
offset, scale = (0, 0), (64, 64)
t = random.choice(["sine"]) # "rotate", "shear", "scale",
f = random.choice(["blur", "sharpen", "smooth"])
# randomly apply transformations...
# rotate image
if("rotate" in t):
img = img.rotate(random.uniform(-30, 30))
# shear image
if("shear" in t):
y_shear = random.uniform(-0.2, 0.2)
x_shear = random.uniform(-0.2, 0.2)
img = img.transform(img.size, PImage.AFFINE, (1, x_shear, 0, y_shear, 1, 0))
# scale and translate image
if("scale" in t):
#scale the image
size_x = random.randrange(20, 63)
size_y = random.randrange(20, 63)
scale = (size_x, size_y)
offset = (math.ceil((64 - size_x) / 2), math.ceil((64 - size_y) / 2))
img = img.resize(scale)
# put it again on a black background (translated)
background = PImage.new('L', (64, 64))
trans_x = random.randrange(0, math.floor((64 - size_x)))
trans_y = random.randrange(0, math.floor((64 - size_y)))
offset = (trans_x, trans_y)
background.paste(img, offset)
img = background
if("sine" in t):
t_img = np.array(img)
A = t_img.shape[0] / 3.0
w = 2.0 / t_img.shape[1]
shift = lambda x: random.uniform(0.15, 0.2) * A * np.sin(-2*np.pi*x * w)
for i in range(t_img.shape[0]):
t_img[:,i] = np.roll(t_img[:,i], int(shift(i)))
img = PImage.fromarray(t_img)
# blur
if("blur" in f):
img = img.filter(ImageFilter.GaussianBlur(radius=random.uniform(0.5, 1.2)))
# sharpen
if("sharpen" in f):
img = img.filter(ImageFilter.SHARPEN)
# smooth
if("smooth" in f):
img = img.filter(ImageFilter.SMOOTH)
return img, offset, scale
class DataGenerator(tf.keras.utils.Sequence):
def __init__(self, x_col, y_col, batch_size, mode="training", shuffle=True):
self.batch_size = batch_size
self.undistorted_images = batch_size // 2
self.shuffle = shuffle
self.indices = len(x_col)
self.x_col = x_col
self.y_col = y_col
def __len__(self):
return self.indices // self.batch_size
def on_epoch_end(self):
if(False):
rng_state = np.random.get_state()
np.random.shuffle(x)
np.random.set_state(rng_state)
np.random.shuffle(o_y)
def __getitem__(self, index):
X, Y = [], []
for i in range(index * self.undistorted_images, (index+1) * self.undistorted_images):
base_img = self.x_col[i]
img = PImage.fromarray(np.uint8(base_img.reshape(64, 64) * 255))
# distort_sample() creates random variations of an image
img, *unused = distort_sample(img)
# add transformed image
X.append(np.array(img).reshape(64, 64, 1))
Y.append(self.y_col[i])
# add base image
X.append(base_img)
Y.append(self.y_col[i])
return np.array(X), np.array(Y)
#instantiate generators
training_generator = DataGenerator(x_col = x_train, y_col = y_train, batch_size = 256)
validation_generator = DataGenerator(x_col = x_test, y_col = y_test, batch_size = 256)
#train the model
hist = model.fit(
x=training_generator,
epochs=100,
validation_data=training_generator,
max_queue_size=50,
workers=8,
#use_multiprocessing=True <- this freezes the program
)
In the end I needed to make the Data generator use multi processing. To do this, the arrays needed to be stored in shared memory and than used in the sub processes.
import multiprocessing as mp
import numpy as np
from PIL import Image as PImage
from PIL import ImageFilter
import random
import math
import tensorflow as tf
shared_dict = {}
def distort_sample(img : PImage) -> (PImage, [int], [int]):
"""
Distort the given image randomly.
Randomly applies the transformations:
rotation, shear, scale, translate,
Randomly applies the filter:
sharpen, blur, smooth
Returns the distorted image.
"""
offset, scale = (0, 0), (64, 64)
t = random.choice(["sine", "rotate", "shear", "scale"])
f = random.choice(["blur", "sharpen", "smooth"])
# randomly apply transformations...
# rotate image
if("rotate" in t):
img = img.rotate(random.uniform(-15, 15))
# shear image
if("shear" in t):
y_shear = random.uniform(-0.2, 0.2)
x_shear = random.uniform(-0.2, 0.2)
img = img.transform(img.size, PImage.AFFINE, (1, x_shear, 0, y_shear, 1, 0))
# scale and translate image
if("scale" in t):
#scale the image
size_x = random.randrange(25, 63)
size_y = random.randrange(25, 63)
scale = (size_x, size_y)
offset = (math.ceil((64 - size_x) / 2), math.ceil((64 - size_y) / 2))
img = img.resize(scale)
# put it again on a black background (translated)
background = PImage.new('L', (64, 64))
trans_x = random.randrange(0, math.floor((64 - size_x)))
trans_y = random.randrange(0, math.floor((64 - size_y)))
offset = (trans_x, trans_y)
background.paste(img, offset)
img = background
if("sine" in t):
t_img = np.array(img)
A = t_img.shape[0] / 3.0
w = 2.0 / t_img.shape[1]
shift_factor = random.choice([-1, 1]) * random.uniform(0.15, 0.2)
shift = lambda x: shift_factor * A * np.sin(-2*np.pi*x * w)
for i in range(t_img.shape[0]):
t_img[:,i] = np.roll(t_img[:,i], int(shift(i)))
img = PImage.fromarray(t_img)
# blur
if("blur" in f):
img = img.filter(ImageFilter.GaussianBlur(radius=random.uniform(0.5, 1.2)))
# sharpen
if("sharpen" in f):
img = img.filter(ImageFilter.SHARPEN)
# smooth
if("smooth" in f):
img = img.filter(ImageFilter.SMOOTH)
return img, offset, scale
def generator_func(start_index, end_index, x_shape, y_shape):
X, Y = [], []
x_loc = np.frombuffer(shared_dict["x"], dtype="float16").reshape(x_shape)
y_loc = np.frombuffer(shared_dict["y"], dtype="b").reshape(y_shape)
for i in range(start_index, end_index):
base_img = x_loc[i]
img = PImage.fromarray(np.uint8(base_img.reshape(64, 64) * 255))
img, *unused = distort_sample(img)
# add transformed image
X.append(np.array(img).reshape(64, 64, 1))
Y.append(y_loc[i])
X.append(np.array(img).reshape(64, 64, 1))
Y.append(y_loc[i])
# add base image
#X.append(base_img)
#Y.append(y_loc[i])
return X, Y
def generator_initializer(_x_shared, _y_shared):
shared_dict["x"] = _x_shared
shared_dict["y"] = _y_shared
def generator_func(start_index, end_index, x_shape, y_shape):
X, Y = [], []
x_loc = np.frombuffer(shared_dict["x"], dtype="float16").reshape(x_shape)
y_loc = np.frombuffer(shared_dict["y"], dtype="b").reshape(y_shape)
for i in range(start_index, end_index):
base_img = x_loc[i]
img = PImage.fromarray(np.uint8(base_img.reshape(64, 64) * 255))
img, *unused = distort_sample(img)
# add transformed image
X.append(np.array(img).reshape(64, 64, 1))
Y.append(y_loc[i])
X.append(np.array(img).reshape(64, 64, 1))
Y.append(y_loc[i])
# add base image
#X.append(base_img)
#Y.append(y_loc[i])
return X, Y
class DataGenerator(tf.keras.utils.Sequence):
def __init__(self, num_samples, batch_size,
percentage, mode,
x_shared, y_shared,
x_np_shape, y_np_shape,
processes, shuffle=True):
self.num_samples = num_samples
# 50% original images + 50% augmented images
self.batch_size = batch_size // 2
self.percentage = percentage
# an offset to devide the data set into test and train
self.start_index = 0
if(mode == "testing"):
self.start_index = num_samples - (num_samples // 100 * percentage)
# is this a train or a test generator
self.mode = mode
# how many processes should be used for this generator
self.processes = processes
# should the arrays be shuffled after each epoch
self.shuffle = shuffle
self.x_np_shape = x_np_shape
self.y_np_shape = y_np_shape
# a pool of processes for generating augmented data
self.pool = mp.Pool(processes=self.processes,
initializer=generator_initializer,
initargs=(x_shared, y_shared))
def __len__(self):
return (self.num_samples // 100 * self.percentage) // self.batch_size
def on_epoch_end(self):
if(False):
rng_state = np.random.get_state()
np.random.shuffle(x_np)
np.random.set_state(rng_state)
np.random.shuffle(y_np)
def __getitem__(self, index):
arguments = []
slice_size = self.batch_size // self.processes
current_batch = index * self.batch_size
for i in range(self.processes):
slice_start = self.start_index + (current_batch + i * slice_size)
slice_end = self.start_index + (current_batch + (i+1) * slice_size)
arguments.append([slice_start, slice_end, self.x_np_shape, self.y_np_shape])
return_values = self.pool.starmap(generator_func, arguments)
X, Y = [], []
for imgs, labels in return_values:
X.append(imgs)
Y.append(labels)
return np.concatenate(X).astype(np.float16), np.concatenate(Y).astype(np.float16)

How to reduce the number of boxes/regions created in MSER

I have been trying to get less boxes with MSER since I have too many boxes created on the same element repeatedly with very little pixel differences. My code is as below:
## Get mser, and set parameters
_delta = 10
_min_area = 250
_max_area = 800
_max_variation = 10.0
_min_diversity = 30.0
_max_evolution = 10
_area_threshold = 12.0
_min_margin = 2.9
_edge_blur_size = 3
mser = cv2.MSER_create(_delta,_min_area, _max_area, _max_variation,
_min_diversity,_max_evolution, _area_threshold, _min_margin, _edge_blur_size)
and then
## Do mser detection, get the coodinates and bboxes on the original image
gray = cv2.cvtColor(final, cv2.COLOR_BGR2GRAY)
coordinates, bboxes = mser.detectRegions(gray)
After this , I see there are 26K boxes created. Which amongst the parameters can be tuned for lesser number of regions(since they are overlapping a lot). Kindly help?
_delta is the most important parameter for reducing the number of boxes. Try raising it to 25. The higher the _delta the less blobs you will get.
_min_area - The smallest blob
_max_area - The largest blob
_min_diversity - Raise to reduce the number of overlapping blobs
_max_variation - Raise to reduce areas with high variance
For more information
After that I would checking the bboxes to filter out over lapping blobs
Code Example
import cv2
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
img = cv.imread('input_img.png')
iou_th = 0.95
mser = cv2.MSER_create(_delta=10, _min_area=1000, _max_area=int(0.1 * np.pi * (img.shape[0] /2)**2), _max_variation=0.1)
regions, bboxes = mser.detectRegions(img)
hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
# Debug plot
img_ = img.copy()
cv2.polylines(img_, hulls, 1, (255, 0, 0), thickness=1)
fig, ax = plt.subplots(figsize=(10, 6))
ax.imshow(img_)
ax.set_title('MSER with overlapping regions')
size_dict = {k: len(region) for k, region in enumerate(regions)}
# Cull overlapping blobs
graph = nx.Graph()
graph.add_nodes_from(range(len(hulls)))
for i, cnt in enumerate(hulls):
for j, cnt in enumerate(hulls):
if i >= j:
continue
box_i = bboxes[i]
box_j = bboxes[j]
tl_i = box_i[:2]
tl_j = box_j[:2]
br_i = tl_i + box_i[2:]
br_j = tl_j + box_j[2:]
tl = np.maximum(tl_i, tl_j)
br = np.minimum(br_i, br_j)
intersected_rect = br - tl
intersection = np.prod(intersected_rect) if intersected_rect[0] > 0 and intersected_rect[1] > 0 else 0
union = np.prod(box_i[2:]) + np.prod(box_j[2:]) - intersection
iou = intersection / union
if iou > iou_th:
graph.add_edge(i, j, iou=iou)
# make list of unique regions - pick the smallest region
trees = list(nx.connected_component_subgraphs(graph))
unique_blobs = []
for tree in trees:
# Choose the smallest region
smallest_idx = None
smallest_blob = np.inf
for node in tree.nodes():
if size_dict[node] < smallest_blob:
smallest_blob = size_dict[node]
smallest_idx = node
unique_blobs.append(smallest_idx)
unique_blobs = unique_blobs
hulls = [hulls[k] for k in unique_blobs]
regions = [regions[k] for k in unique_blobs]
bboxes = [bboxes[k] for k in unique_blobs]
size_dict = {k: len(region) for k, region in enumerate(regions)}
# debug plot
img_ = img.copy()
cv2.polylines(img_, hulls, 1, (255, 0, 0), thickness=1)
fig, ax = plt.subplots(figsize=(10, 6))
ax.imshow(img_)
ax.set_title('MSER with unique regions')

Resources