Keras layer asks for different shape than in the summary - keras

I'm writing a U-net CNN in keras, and trying to use fit_generator for training. In order for this to work, I used a generator script, that could feed the images and labels for my network (simple fit function is working but I want to train a big dataset which cannot fit into the memory).
My problem is that in the model summary, it says correctly that, the output layer has a shape: (None, 288, 512, 4)
https://i.imgur.com/69xG8pO.jpg
but when I try actual training I get this error:
https://i.imgur.com/j7H6sHX.jpg
I don't get why keras wants (288, 512, 1) when in the summary it expects (288, 512, 4)
I tried it with my own unet code, and copied a working code from github also, but both of them has the exact same problem which leads me to believe that my generator script is the weak link. Below is the code I used (the image and label array functions used here were already working when I used them with "fit" in a previous CNN):
def generator(img_path, label_path, batch_size, height, width, num_classes):
input_pairs = get_pairs(img_path, label_path) # rewrite if param name changes
random.shuffle(input_pairs)
iterate_pairs = itertools.cycle(input_pairs)
while True:
X = []
Y = []
for _ in range(batch_size):
im, lab = next(iterate_pairs)
appended_im = next(iter(im))
appended_lab = next(iter(lab))
X.append(input_image_array(appended_im, width, height))
Y.append(input_label_array(appended_lab, width, height, num_classes, palette))
yield (np.array(X), np.array(Y))
I tried the generator out and the provided batches has the shapes of (for batch size of 15):
(15, 288, 512, 3)
(15, 288, 512, 4)
So I really do not know what could be the problem here.
EDIT: Here is the model code I used:
def conv_block(input_tensor, n_filter, kernel=(3, 3), padding='same', initializer="he_normal"):
x = Conv2D(n_filter, kernel, padding=padding, kernel_initializer=initializer)(input_tensor)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(n_filter, kernel, padding=padding, kernel_initializer=initializer)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
return x
def deconv_block(input_tensor, residual, n_filter, kernel=(3, 3), strides=(2, 2), padding='same'):
y = Conv2DTranspose(n_filter, kernel, strides, padding)(input_tensor)
y = concatenate([y, residual], axis=3)
y = conv_block(y, n_filter)
return y
# NETWORK - n_classes is the desired number of classes, filters are fixed
def Unet(input_height, input_width, n_classes=4, filters=64):
# Downsampling
input_layer = Input(shape=(input_height, input_width, 3), name='input')
conv_1 = conv_block(input_layer, filters)
conv_1_out = MaxPooling2D(pool_size=(2, 2))(conv_1)
conv_2 = conv_block(conv_1_out, filters*2)
conv_2_out = MaxPooling2D(pool_size=(2, 2))(conv_2)
conv_3 = conv_block(conv_2_out, filters*4)
conv_3_out = MaxPooling2D(pool_size=(2, 2))(conv_3)
conv_4 = conv_block(conv_3_out, filters*8)
conv_4_out = MaxPooling2D(pool_size=(2, 2))(conv_4)
conv_4_drop = Dropout(0.5)(conv_4_out)
conv_5 = conv_block(conv_4_drop, filters*16)
conv_5_drop = Dropout(0.5)(conv_5)
# Upsampling
deconv_1 = deconv_block(conv_5_drop, conv_4, filters*8)
deconv_1_drop = Dropout(0.5)(deconv_1)
deconv_2 = deconv_block(deconv_1_drop, conv_3, filters*4)
deconv_2_drop = Dropout(0.5)(deconv_2)
deconv_3 = deconv_block(deconv_2_drop, conv_2, filters*2)
deconv_3 = deconv_block(deconv_3, conv_1, filters)
# Output - mapping each 64-component feature vector to number of classes
output = Conv2D(n_classes, (1, 1))(deconv_3)
output = BatchNormalization()(output)
output = Activation("softmax")(output)
# embed into functional API
model = Model(inputs=input_layer, outputs=output, name="Unet")
return model

Change your loss to categorical_crossentropy.
When using the sparse_categorical_crossentropy loss, your targets
should be integer targets.

Related

Converting TensorFlow Keras model API to model subclassing

For a simple TF2 Object detection CNN architecture defined using Keras's functional API as follows:
input_ = Input(shape = (144, 144, 3), name = 'image')
# name - An optional name string for the Input layer. Should be unique in
# a model (do not reuse the same name twice). It will be autogenerated if it isn't provided.
# Here 'image' is the Python3 dict's key used to map the data to one of the layer in the model.
x = input_
# Define a conv block-
x = Conv2D(filters = 64, kernel_size = 3, activation = 'relu')(x)
x = BatchNormalization()(x)
x = MaxPool2D(pool_size = 2)(x)
x = Flatten()(x) # flatten the last pooling layer's output volume
x = Dense(256, activation='relu')(x)
# We are using a data generator which yields dictionaries. Using 'name' argument makes it
# possible to map the correct data generator's output to the appropriate layer
class_out = Dense(units = 9, activation = 'softmax', name = 'class_out')(x) # classification output
box_out = Dense(units = 2, activation = 'linear', name = 'box_out')(x) # regression output
# Define the CNN model-
model = tf.keras.models.Model(input_, [class_out, box_out]) # since we have 2 outputs, we use a list
I am attempting to define it using Model sub-classing as:
class OD(Model):
def __init__(self):
super(OD, self).__init__()
self.conv1 = Conv2D(filters = 64, kernel_size = 3, activation = None)
self.bn = BatchNormalization()
self.pool = MaxPool2D(pool_size = 2)
self.flatten = Flatten()
self.dense = Dense(256, activation = None)
self.class_out = Dense(units = 9, activation = None, name = 'class_out')
self.box_out = Dense(units = 2, activation = 'linear', name = 'box_out')
def call(self, x):
x = tf.nn.relu(self.bn(self.conv1(x)))
x = self.pool(x)
x = self.flatten(x)
x = tf.nn.relu(self.dense(x))
x = [tf.nn.softmax(self.class_out(x)), self.box_out(x)]
return x
A batch of training data is obtained as:
example, label = next(data_generator(batch_size = 32))
example.keys()
# dict_keys(['image'])
image = example['image']
image.shape
# (32, 144, 144, 3)
label.keys()
# dict_keys(['class_out', 'box_out'])
label['class_out'].shape, label['box_out'].shape
# ((32, 9), (32, 2))
Is my Model sub-classing architecture equivalent to Keras's functional API?

How do I test a complex CNN model on a new image?

I am learning CNN and I have found a script online that classifies building rooftops from satellite images. The script works just fine but I am not able to figure out a way to test the script on a new single image. I am showing the code briefly and then I will show what I have tried:
seq = iaa.Sequential([
iaa.imgcorruptlike.Fog(severity=1),
iaa.imgcorruptlike.Spatter(severity =1),
])
batch_size = 16
size = 512
epochs =50
version = 1 # version 2 for MobilV2unet
data_augmentation = True
model_type = 'UNet%d' % (version)
translearn = True
from tensorflow.keras.applications import MobileNetV2
def m_u_net(input_shape):
inputs = Input(shape=input_shape, name="input_image")
encoder = MobileNetV2(input_tensor=inputs, weights="imagenet", include_top=False, alpha=1.3)
#encoder.trainable=False
skip_connection_names = ["input_image", "block_1_expand_relu", "block_3_expand_relu", "block_6_expand_relu"]
encoder_output = encoder.get_layer("block_13_expand_relu").output
f = [16, 32, 48, 64]
x = encoder_output
for i in range(1, len(skip_connection_names)+1, 1):
x_skip = encoder.get_layer(skip_connection_names[-i]).output
x = UpSampling2D((2, 2))(x)
x = Concatenate()([x, x_skip])
x = Conv2D(f[-i], (3, 3), padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(f[-i], (3, 3), padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(1, (1, 1), padding="same")(x)
x = Activation("sigmoid")(x)
model = Model(inputs, x)
return model
def load_rasters_simple(path, pathX, pathY ): # Subset from original raster with extent and upperleft coord
"""Load training data pairs (two high resolution images and two low resolution images)"""
pathXabs = os.path.join(path, pathX)
pathYabs = os.path.join(path, pathY)
le = len(os.listdir(pathXabs) )
stackX = []
stackY = []
for i in range(0, le):
fileX = os.path.join(pathXabs, os.listdir(pathXabs)[i])
fileY = os.path.join(pathYabs, os.listdir(pathXabs)[i])
dataX = gdal_array.LoadFile(fileX) #.astype(np.int),ysize=extent[1],xsize=extent[0]
stackX.append(dataX)
dataY = gdal_array.LoadFile(fileY) #.astype(np.int),ysize=extent[1],xsize=extent[0]
stackY.append(dataY)
stackX = np.array(stackX)
stackY = np.array(stackY)
return stackX, stackY
X, Y= load_rasters_simple('/Users/vaibhavsaxena/Desktop/segmentation/Classification/Satellite dataset ó± (global cities)','image','label')
def slice (arr, size, inputsize,stride):
result = []
if stride is None:
stride = size
for i in range(0, (inputsize-size)+1, stride):
for j in range(0, (inputsize-size)+1, stride):
s = arr[i:(i+size),j:(j+size), ]
result.append(s)
result = np.array(result)
return result
def batchslice (arr, size, inputsize, stride, num_img):
result = []
for i in range(0, num_img):
s= slice(arr[i,], size, inputsize, stride )
result.append(s )
result = np.array(result)
result = result.reshape(result.shape[0]*result.shape[1], result.shape[2], result.shape[3], -1)
return result
Y=batchslice(Y, size, Y.shape[1], size, Y.shape[0]).squeeze()
X_cl =batchslice(X_cl, size, X_cl.shape[1], size, X_cl.shape[0])
X_train = X_cl[:int(X_cl.shape[0]*0.8),]
Y_train = Y[:int(Y.shape[0]*0.8),]
X_test = X_cl[int(X_cl.shape[0]*0.8)+1:,]
Y_test = Y[int(Y.shape[0]*0.8)+1:,]
THEN the big unet model architecture. The whole script can be found here.
This model just works fine with the dataset. I am trying to test it with my own out of dataset image and this is what I have tried:
model = load_model('no_aug_unet_model.h5', custom_objects=dependencies)
model.compile(loss='binary_crossentropy', metrics=[iou],
optimizer=Adam(learning_rate=lr_schedule(0)))
from keras.preprocessing import image
test_image= image.load_img('bangkok_noi_2.jpg', target_size = (2000, 2000))
test_image = image.img_to_array(test_image)
test_image1 = test_image.reshape((1,2000,2000,3))
testpre = model.predict(test_image1)
img = Image.fromarray(test_image, 'RGB')
img.show()
The original shape of my test image is (1852, 3312, 3).
I am getting a weirdly predicted image that makes no sense unlike the expectations. I believe, I am doing the wrong preprocessing with my test image. Any help would be extremely appreciated.
The whole script can be found here.

Keras OOM when allocating tensor with shape

I have been scratching my head over this OOM Error for days and I am new to Keras. I have tried sampling down my data, lowering batch size, and removing layers from 3D-Unet but nothing is working for me. I am using LIDC IDRI dataset of CT scans of 1010 Patients. After pre-processing I save my volumes of 64x64x64 shape on disk which I extracted from resampled 256x256x256 whole CT scans (That is because at first I was first trying to train on whole CT scans but after getting OOM I decided to go with 64 cubic size shapes). Each patient has 64 shapes of 64x64x64, and in total that makes 64,640 samples on which I have to train my 3D-Unet.
Here’s my Keras code for the model:
im_width = 64
im_height = 64
im_depth = 64
path_train = 'D:/LIDC-IDRI-Dataset/'
def npz_volume_generator(inputPath, bs, mode="train", aug=None):
batch_start_index = 0
patients = os.listdir(inputPath + "images")
# loop indefinitely
while True:
# initialize our batches of scans and masks
scan_pixels = []
mask_pixels = []
# keep looping until we reach our batch size
for id_ in range(batch_start_index, batch_start_index+bs):
# attempt to read the next sample from path
scan_pixel = np.zeros((im_depth, im_width, im_height))
scan_pixel = np.load(inputPath + 'images/' + patients[id_])['arr_0']
mask_pixel = np.zeros((im_depth, im_width, im_height))
mask_pixel = np.load(inputPath + 'masks/' + patients[id_])['arr_0']
# check to see if we have reached the end of our samples
if(batch_start_index >= len(patients)):
# reset the batch start index to the beginning of our samples
batch_start_index -= len(patients)
# if we are evaluating we should now break from our
# loop to ensure we don't continue to fill up the
# batch from samples from the beginning
if mode == "eval":
break
# update our corresponding batch lists
scan_pixels.append(scan_pixel)
mask_pixels.append(mask_pixel)
batch_start_index += bs
if(batch_start_index >= len(patients)):
batch_start_index -= len(patients)
# if the data augmentation object is not None, apply it
if aug is not None:
(scan_pixels, mask_pixels) = next(aug.flow(np.array(scan_pixels),np.array(mask_pixels), batch_size=bs))
#Re-shaping and adding a channel dimension (5D Tensor)
#batch_size, length, breadth, height, channel [None,im_width,im_height,im_depth,1]
#yield the batch to the calling function
yield (np.array(expand_dims(scan_pixels, axis=4)), np.array(expand_dims(mask_pixels, axis=4)))
def conv3d_block(input_tensor, n_filters, kernel_size=3, batchnorm=True):
# first layer
x = Conv3D(filters=n_filters, kernel_size=(kernel_size, kernel_size, kernel_size), kernel_initializer="he_normal",
padding="same")(input_tensor)
if batchnorm:
x = BatchNormalization()(x)
x = Activation("relu")(x)
# second layer
x = Conv3D(filters=n_filters, kernel_size=(kernel_size, kernel_size, kernel_size), kernel_initializer="he_normal",
padding="same")(x)
if batchnorm:
x = BatchNormalization()(x)
x = Activation("relu")(x)
return x
def get_unet(input_img, n_filters=16, dropout=0.5, batchnorm=True):
# contracting path
c1 = conv3d_block(input_img, n_filters=n_filters*1, kernel_size=3, batchnorm=batchnorm)
p1 = MaxPooling3D((2, 2, 2)) (c1)
p1 = Dropout(dropout*0.5)(p1)
c2 = conv3d_block(p1, n_filters=n_filters*2, kernel_size=3, batchnorm=batchnorm)
p2 = MaxPooling3D((2, 2, 2)) (c2)
p2 = Dropout(dropout)(p2)
c3 = conv3d_block(p2, n_filters=n_filters*4, kernel_size=3, batchnorm=batchnorm)
p3 = MaxPooling3D((2, 2, 2)) (c3)
p3 = Dropout(dropout)(p3)
c4 = conv3d_block(p3, n_filters=n_filters*16, kernel_size=3, batchnorm=batchnorm)
# expansive path
u5 = Conv3DTranspose(n_filters*8, (3, 3, 3), strides=(2, 2, 2), padding='same') (c4)
u5 = concatenate([u5, c3])
u5 = Dropout(dropout)(u5)
c5 = conv3d_block(u5, n_filters=n_filters*8, kernel_size=3, batchnorm=batchnorm)
u6 = Conv3DTranspose(n_filters*4, (3, 3, 3), strides=(2, 2, 2), padding='same') (c5)
u6 = concatenate([u6, c2])
u6 = Dropout(dropout)(u6)
c6 = conv3d_block(u6, n_filters=n_filters*4, kernel_size=3, batchnorm=batchnorm)
u7 = Conv3DTranspose(n_filters*2, (3, 3,3), strides=(2, 2, 2), padding='same') (c6)
u7 = concatenate([u7, c1])
u7 = Dropout(dropout)(u7)
c7 = conv3d_block(u7, n_filters=n_filters*2, kernel_size=3, batchnorm=batchnorm)
outputs = Conv3D(1, (1, 1, 1), activation='sigmoid') (c7)
model = Model(inputs=[input_img], outputs=[outputs])
return model
# initialize the number of epochs to train for and batch size
NUM_EPOCHS = 50
BS = 8
# initialize the total number of training and testing image
NUM_TRAIN_IMAGES = len(os.listdir(path_train+ 'images/'))
NUM_TEST_IMAGES = len(os.listdir(path_train+ 'test/'))
# construct the training image generator for data augmentation
aug = ImageDataGenerator(rotation_range=20, zoom_range=0.15,
width_shift_range=0.2, height_shift_range=0.2, shear_range=0.15,
horizontal_flip=True, fill_mode="nearest")
# initialize both the training and testing image generators
trainGen = npz_volume_generator(path_train, BS, mode="train", aug=aug)
testGen = npz_volume_generator(path_train, BS, mode="train", aug=None)
# initialize our Keras model and compile it
model = get_unet(Input((im_depth, im_width, im_height, 1)), n_filters=16, dropout=0.05, batchnorm=True)
print(model.summary())
model.compile(optimizer=Adam(), loss="binary_crossentropy", metrics=["accuracy"])
# train the network
print("[INFO] training w/ generator...")
H = model.fit_generator(trainGen, steps_per_epoch=NUM_TRAIN_IMAGES // BS,
validation_data=testGen, validation_steps=NUM_TEST_IMAGES // BS,
epochs=NUM_EPOCHS)
There are two issues with the output I get. The first warning I get is this:
\Anaconda3\lib\site-packages\keras_preprocessing\image\numpy_array_iterator.py:127: UserWarning: NumpyArrayIterator is set to use the data format convention "channels_last" (channels on axis 3), i.e. expected either 1, 3, or 4 channels on axis 3. However, it was passed an array with shape (8, 64, 64, 64) (64 channels). str(self.x.shape[channels_axis]) + ' channels).')
It states that the shape passed to Keras library was (8, 64, 64, 64) (64 channels), however the input shape I declared in Input() function of Keras is (64, 64, 64, 1) with 1 being the channel on last axis, you don’t declare batch size here which is 8 in my case, yet Keras state that the shape passed on to it has 64 channels, ignoring the last dimension I gave it.
The second error that I get is as following:
ResourceExhaustedError: OOM when allocating tensor with shape[8,32,64,64,64] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc
[[{{node conv3d_transpose_3/conv3d_transpose}}]]
Hint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.
[[{{node loss/mul}}]]
Hint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.
Again I have a problem with shape here, Tensor Shape. My shape should be (8,64,64,64,1) but what it reports is (8,32,64,64,64), not only my number of channels are huge here but I also have no idea where that 32 came from. Is there a different interpretation to Tensor Shape? I think there’s something wrong with my input shapes (which is unknowingly being to set to very large) and that is causing the OOM error.

Keras : How to merge layers sequentially not use "concatenate"

I'm trying to build a model that combines cnn and lstm.
I want to multivariate the input of cnn and put the outputs sequentially into the input of the LSTM. However, there is a problem in merging the cnn outputs. If you use concatenate, it will stretch to axis = -1 as shown. But I'll put it in the lstm structure so I'd like to increase it sequentailly. But I didn't find any function to merge except concatenate. The shape I want is (None, 6, 1904) in the image below. What can I do?
below is my build code.
def build_model():
in_layers, out_layers = [], []
for i in range(in_len):
inputs = Input(shape=(row,col, channel))
conv1 = Conv2D(4, (12, 12), activation='relu')(inputs)
pool1 = pooling.MaxPooling2D(pool_size=(4,4))(conv1)
conv2 = Conv2D(4, (7, 7) , activation='relu')(pool1)
pool2 = pooling.MaxPooling2D(pool_size=(3,3))(conv2)
conv3 = Conv2D(8, (5, 5) , activation='relu')(pool2)
pool3 = pooling.MaxPooling2D(pool_size=(2,2))(conv3)
flat = Flatten()(pool3)
# store layers
in_layers.append(inputs)
out_layers.append(flat)
print(type(flat))
merged = concatenate(out_layers)
model = Model(inputs=in_layers, outputs=merged)
plot_model(model, show_shapes=True, to_file='cnn_lstm_real.png')
return model
What you want is still concatenation, but on a different, new axis. The concatenation layer and function allows to specify the axis, so you can do it like this:
def build_model():
in_layers, out_layers = [], []
for i in range(in_len):
inputs = Input(shape=(row,col, channel))
conv1 = Conv2D(4, (12, 12), activation='relu')(inputs)
pool1 = pooling.MaxPooling2D(pool_size=(4,4))(conv1)
conv2 = Conv2D(4, (7, 7) , activation='relu')(pool1)
pool2 = pooling.MaxPooling2D(pool_size=(3,3))(conv2)
conv3 = Conv2D(8, (5, 5) , activation='relu')(pool2)
pool3 = pooling.MaxPooling2D(pool_size=(2,2))(conv3)
flat = Flatten()(pool3)
flat = Reshape((1, -1))(flat)
# store layers
in_layers.append(inputs)
out_layers.append(flat)
merged = concatenate(out_layers, axis = 1)
model = Model(inputs=in_layers, outputs=merged)
plot_model(model, show_shapes=True, to_file='cnn_lstm_real.png')
return model
The only big difference is that you need to add the new axis explicitly in the output of each branch (hence the Reshape layer), in order to allow for concatenation to happen along that axis.

Extracting Activation maps from trained neural network

I have a trained cnn model. I am trying to extract the output from each convolutional layer and plot the results to explore which regions of the image have high activations. Any ideas on how to do this?
Below is the network I have trained.
input_shape = (3,227,227)
x = Input(input_shape)
# Conv Layer 1
x = Convolution2D(96, 7,7,subsample=(4,4),activation='relu',
name='conv_1', init='he_normal')(x_input)
x = MaxPooling2D((3, 3), strides=(2,2), name='maxpool')(x)
x = BatchNormalization()(x)
x = ZeroPadding2D((2,2))(x)
# Conv Layer 2
x = Convolution2D(256, 5,5,activation='relu',name='conv_2', init='he_normal')(x)
x = MaxPooling2D((3, 3), strides=(2,2),name='maxpool2')(x)
x = BatchNormalization()(x)
x = ZeroPadding2D((2,2))(x)
# Conv Layer 3
x = Convolution2D(384, 3,3,activation='relu',
name='conv_3', init='he_normal')(x)
x = MaxPooling2D((3, 3), strides=(2,2),name='maxpool3')(x)
x = Flatten()(x)
x = Dense(512, activation = "relu")(x)
x = Dropout(0.5)(x)
x = Dense(512, activation ="relu")(x)
x = Dropout(0.5)(x)
predictions = Dense(2, activation="softmax")(x)
model = Model(inputs = x_input, outputs = predictions)
Thanks!
Look at this GitHub issue and the FAQ How can I obtain the output of an intermediate layer?. It seems the easiest way to do that is defining new models with the outputs that you want. For example:
input_shape = (3,227,227)
x = Input(input_shape)
# Conv Layer 1
# Save layer in a variable
conv1 = Convolution2D(96, 7, 7, subsample=(4,4), activation='relu',
name='conv_1', init='he_normal')(x_input)
x = conv1
x = MaxPooling2D(...)(x)
# ...
conv2 = Convolution2D(...)(x)
x = conv2
# ...
conv3 = Convolution2D(...)(x)
x = conv3
# ...
predictions = Dense(2, activation="softmax")(x)
# Main model
model = Model(inputs=x_input, outputs=predictions)
# Intermediate evaluation model
conv_layers_model = Model(inputs=x_input, outputs=[conv1, conv2, conv3])
# After training is done, retrieve intermediate evaluations for data
conv1_val, conv2_val, conv3_val = conv_layers_model.predict(data)
Note that since you are using the same objects in both models the weights are automatically shared between them.
A more complete example of activation visualization can be found here. In that case they use the K.function approach.

Resources