Related
Creating Deeper Bottleneck ResNet from Scratch using Tensorflow
Hi everyone, recently I've been learning how to create ResNet50 using tf.keras according to the link given above. My question is, to train this machine, should I assign the 'input' variable (line 75) with the dataset I wished to train with? If so, how should I do that?
#import the needed libraries
from tensorflow.python.keras.utils.vis_utils import model_to_dot
from IPython.display import SVG
import pydot
import graphviz
from tensorflow.keras.layers import Input, Conv2D, BatchNormalization
from tensorflow.keras.layers import MaxPool2D, GlobalAvgPool2D
from tensorflow.keras.layers import Add, ReLU, Dense
from tensorflow.keras import Model
'''import tensorflow
print(tensorflow.__version__)'''
#Conv-BatchNorm-ReLU block
def conv_batchnorm_relu(x, filters, kernel_size, strides=1):
x = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding = 'same')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
return x
#identity block
def identity_block(tensor, filters):
x = conv_batchnorm_relu(tensor, filters = filters, kernel_size = 1, strides = 1)
x = conv_batchnorm_relu(x, filters = filters, kernel_size = 3, strides = 1)
x = Conv2D(filters = 4 * filters, kernel_size = 1, strides = 1)(x)
x = BatchNormalization()(x)
x = Add()([tensor, x]) #skip connection
x = ReLU()(x)
return x
#Projection block
def projection_block(tensor, filters, strides):
#left stream
x = conv_batchnorm_relu(tensor, filters = filters, kernel_size = 1, strides = strides)
x = conv_batchnorm_relu(x, filters = filters, kernel_size = 3, strides = 1)
x = Conv2D(filters = 4 * filters, kernel_size = 1, strides = 1)(x)
x = BatchNormalization()(x)
#right stream
shortcut = Conv2D(filters = 4 * filters, kernel_size = 1, strides = strides)(tensor)
shortcut = BatchNormalization()(shortcut)
x = Add()([shortcut, x]) #skip connection
x = ReLU()(x)
return x
#Resnet Block
#First block is always projection. While others are identity blocks
def resnet_block(x, filters, reps, strides):
x = projection_block(x, filters, strides)
for i in range(reps-1):
x = identity_block(x, filters)
return x
#Create Model
input = Input(shape = (244,244,3)) #Should I assign this input variable with dataset? If so, how do I do that?
#################
x = conv_batchnorm_relu(input, filters = 64, kernel_size = 7, strides = 2)
x = MaxPool2D(pool_size = 3, strides = 2)(x)
x = resnet_block(x, filters = 64, reps = 3, strides = 1)
x = resnet_block(x, filters=128, reps =4, strides=2)
x = resnet_block(x, filters=256, reps =6, strides=2)
x = resnet_block(x, filters=512, reps =3, strides=2)
x = GlobalAvgPool2D()(x)
#################
output = Dense(1000, activation = 'softmax')(x)
model = Model(inputs=input, outputs=output)
model.summary()
SVG(model_to_dot(model, show_shapes=True, show_layer_names=True, rankdir='TB',expand_nested=False,
dpi=60, subgraph=False).create(prog='dot',format='svg'))
I am learning CNN and I have found a script online that classifies building rooftops from satellite images. The script works just fine but I am not able to figure out a way to test the script on a new single image. I am showing the code briefly and then I will show what I have tried:
seq = iaa.Sequential([
iaa.imgcorruptlike.Fog(severity=1),
iaa.imgcorruptlike.Spatter(severity =1),
])
batch_size = 16
size = 512
epochs =50
version = 1 # version 2 for MobilV2unet
data_augmentation = True
model_type = 'UNet%d' % (version)
translearn = True
from tensorflow.keras.applications import MobileNetV2
def m_u_net(input_shape):
inputs = Input(shape=input_shape, name="input_image")
encoder = MobileNetV2(input_tensor=inputs, weights="imagenet", include_top=False, alpha=1.3)
#encoder.trainable=False
skip_connection_names = ["input_image", "block_1_expand_relu", "block_3_expand_relu", "block_6_expand_relu"]
encoder_output = encoder.get_layer("block_13_expand_relu").output
f = [16, 32, 48, 64]
x = encoder_output
for i in range(1, len(skip_connection_names)+1, 1):
x_skip = encoder.get_layer(skip_connection_names[-i]).output
x = UpSampling2D((2, 2))(x)
x = Concatenate()([x, x_skip])
x = Conv2D(f[-i], (3, 3), padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(f[-i], (3, 3), padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(1, (1, 1), padding="same")(x)
x = Activation("sigmoid")(x)
model = Model(inputs, x)
return model
def load_rasters_simple(path, pathX, pathY ): # Subset from original raster with extent and upperleft coord
"""Load training data pairs (two high resolution images and two low resolution images)"""
pathXabs = os.path.join(path, pathX)
pathYabs = os.path.join(path, pathY)
le = len(os.listdir(pathXabs) )
stackX = []
stackY = []
for i in range(0, le):
fileX = os.path.join(pathXabs, os.listdir(pathXabs)[i])
fileY = os.path.join(pathYabs, os.listdir(pathXabs)[i])
dataX = gdal_array.LoadFile(fileX) #.astype(np.int),ysize=extent[1],xsize=extent[0]
stackX.append(dataX)
dataY = gdal_array.LoadFile(fileY) #.astype(np.int),ysize=extent[1],xsize=extent[0]
stackY.append(dataY)
stackX = np.array(stackX)
stackY = np.array(stackY)
return stackX, stackY
X, Y= load_rasters_simple('/Users/vaibhavsaxena/Desktop/segmentation/Classification/Satellite dataset ó± (global cities)','image','label')
def slice (arr, size, inputsize,stride):
result = []
if stride is None:
stride = size
for i in range(0, (inputsize-size)+1, stride):
for j in range(0, (inputsize-size)+1, stride):
s = arr[i:(i+size),j:(j+size), ]
result.append(s)
result = np.array(result)
return result
def batchslice (arr, size, inputsize, stride, num_img):
result = []
for i in range(0, num_img):
s= slice(arr[i,], size, inputsize, stride )
result.append(s )
result = np.array(result)
result = result.reshape(result.shape[0]*result.shape[1], result.shape[2], result.shape[3], -1)
return result
Y=batchslice(Y, size, Y.shape[1], size, Y.shape[0]).squeeze()
X_cl =batchslice(X_cl, size, X_cl.shape[1], size, X_cl.shape[0])
X_train = X_cl[:int(X_cl.shape[0]*0.8),]
Y_train = Y[:int(Y.shape[0]*0.8),]
X_test = X_cl[int(X_cl.shape[0]*0.8)+1:,]
Y_test = Y[int(Y.shape[0]*0.8)+1:,]
THEN the big unet model architecture. The whole script can be found here.
This model just works fine with the dataset. I am trying to test it with my own out of dataset image and this is what I have tried:
model = load_model('no_aug_unet_model.h5', custom_objects=dependencies)
model.compile(loss='binary_crossentropy', metrics=[iou],
optimizer=Adam(learning_rate=lr_schedule(0)))
from keras.preprocessing import image
test_image= image.load_img('bangkok_noi_2.jpg', target_size = (2000, 2000))
test_image = image.img_to_array(test_image)
test_image1 = test_image.reshape((1,2000,2000,3))
testpre = model.predict(test_image1)
img = Image.fromarray(test_image, 'RGB')
img.show()
The original shape of my test image is (1852, 3312, 3).
I am getting a weirdly predicted image that makes no sense unlike the expectations. I believe, I am doing the wrong preprocessing with my test image. Any help would be extremely appreciated.
The whole script can be found here.
Let's say we have a custom layer in Keras like this:
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.layers import Layer
class Custom_Layer(Layer):
def __init__(self,**kwargs):
super(ProbabilisticActivation, self).__init__(**kwargs)
self.params_1 = 0
self.params_2 = 0
def build(self, input_shape):
self.params_1 = K.variable(np.zeros(shape=input_shape[1::]))
self.params_2 = K.variable(np.zeros(shape=input_shape[1::]))
super(Custom_Layer,self).build(input_shape)
def call(self, x, training=None):
# DO SOMETHING
How could I access the value of the parameters (params_1, params_2) in the training process? I tried to get parameters by using model.get_layer('Name of Custom Layer').params_1, but in this case, I can not access the value of the parameters.
Here is the model architecture:
def get_model(img_height, img_width:
input_layer = Input(shape=(img_height, img_width, 3))
x = Conv2D(32, (3, 3), padding='same', name='conv2d_1', activation='relu')(input_layer)
x = Custom_Layer()(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.25)(x)
x = Conv2D(64, kernel_size=(3, 3), name='conv2d_2', activation='relu')(x)
x = Conv2D(64, (3, 3), name='conv2d_4', activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.25)(x)
x = Flatten()(x)
x = Dense(512)(x)
x = Activation('relu')(x)
x = Dropout(0.5)(x)
x = Dense(10)(x)
x = Activation('softmax')(x)
model = Model(inputs=[input_layer], outputs=[x])
model.summary()
return model
Note that params_1 and params_2 are TensorFlow tensors. To get their value, you should run them within a tf.Session. You could do something along the lines of:
from keras import backend as K
# ... train model
sess = K.get_session()
params_1 = model.get_layer('Name of Custom Layer').params_1
values_1 = sess.run(params_1)
print(values_1)
NOTE: Not tested.
I'm writing a U-net CNN in keras, and trying to use fit_generator for training. In order for this to work, I used a generator script, that could feed the images and labels for my network (simple fit function is working but I want to train a big dataset which cannot fit into the memory).
My problem is that in the model summary, it says correctly that, the output layer has a shape: (None, 288, 512, 4)
https://i.imgur.com/69xG8pO.jpg
but when I try actual training I get this error:
https://i.imgur.com/j7H6sHX.jpg
I don't get why keras wants (288, 512, 1) when in the summary it expects (288, 512, 4)
I tried it with my own unet code, and copied a working code from github also, but both of them has the exact same problem which leads me to believe that my generator script is the weak link. Below is the code I used (the image and label array functions used here were already working when I used them with "fit" in a previous CNN):
def generator(img_path, label_path, batch_size, height, width, num_classes):
input_pairs = get_pairs(img_path, label_path) # rewrite if param name changes
random.shuffle(input_pairs)
iterate_pairs = itertools.cycle(input_pairs)
while True:
X = []
Y = []
for _ in range(batch_size):
im, lab = next(iterate_pairs)
appended_im = next(iter(im))
appended_lab = next(iter(lab))
X.append(input_image_array(appended_im, width, height))
Y.append(input_label_array(appended_lab, width, height, num_classes, palette))
yield (np.array(X), np.array(Y))
I tried the generator out and the provided batches has the shapes of (for batch size of 15):
(15, 288, 512, 3)
(15, 288, 512, 4)
So I really do not know what could be the problem here.
EDIT: Here is the model code I used:
def conv_block(input_tensor, n_filter, kernel=(3, 3), padding='same', initializer="he_normal"):
x = Conv2D(n_filter, kernel, padding=padding, kernel_initializer=initializer)(input_tensor)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(n_filter, kernel, padding=padding, kernel_initializer=initializer)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
return x
def deconv_block(input_tensor, residual, n_filter, kernel=(3, 3), strides=(2, 2), padding='same'):
y = Conv2DTranspose(n_filter, kernel, strides, padding)(input_tensor)
y = concatenate([y, residual], axis=3)
y = conv_block(y, n_filter)
return y
# NETWORK - n_classes is the desired number of classes, filters are fixed
def Unet(input_height, input_width, n_classes=4, filters=64):
# Downsampling
input_layer = Input(shape=(input_height, input_width, 3), name='input')
conv_1 = conv_block(input_layer, filters)
conv_1_out = MaxPooling2D(pool_size=(2, 2))(conv_1)
conv_2 = conv_block(conv_1_out, filters*2)
conv_2_out = MaxPooling2D(pool_size=(2, 2))(conv_2)
conv_3 = conv_block(conv_2_out, filters*4)
conv_3_out = MaxPooling2D(pool_size=(2, 2))(conv_3)
conv_4 = conv_block(conv_3_out, filters*8)
conv_4_out = MaxPooling2D(pool_size=(2, 2))(conv_4)
conv_4_drop = Dropout(0.5)(conv_4_out)
conv_5 = conv_block(conv_4_drop, filters*16)
conv_5_drop = Dropout(0.5)(conv_5)
# Upsampling
deconv_1 = deconv_block(conv_5_drop, conv_4, filters*8)
deconv_1_drop = Dropout(0.5)(deconv_1)
deconv_2 = deconv_block(deconv_1_drop, conv_3, filters*4)
deconv_2_drop = Dropout(0.5)(deconv_2)
deconv_3 = deconv_block(deconv_2_drop, conv_2, filters*2)
deconv_3 = deconv_block(deconv_3, conv_1, filters)
# Output - mapping each 64-component feature vector to number of classes
output = Conv2D(n_classes, (1, 1))(deconv_3)
output = BatchNormalization()(output)
output = Activation("softmax")(output)
# embed into functional API
model = Model(inputs=input_layer, outputs=output, name="Unet")
return model
Change your loss to categorical_crossentropy.
When using the sparse_categorical_crossentropy loss, your targets
should be integer targets.
I have a trained cnn model. I am trying to extract the output from each convolutional layer and plot the results to explore which regions of the image have high activations. Any ideas on how to do this?
Below is the network I have trained.
input_shape = (3,227,227)
x = Input(input_shape)
# Conv Layer 1
x = Convolution2D(96, 7,7,subsample=(4,4),activation='relu',
name='conv_1', init='he_normal')(x_input)
x = MaxPooling2D((3, 3), strides=(2,2), name='maxpool')(x)
x = BatchNormalization()(x)
x = ZeroPadding2D((2,2))(x)
# Conv Layer 2
x = Convolution2D(256, 5,5,activation='relu',name='conv_2', init='he_normal')(x)
x = MaxPooling2D((3, 3), strides=(2,2),name='maxpool2')(x)
x = BatchNormalization()(x)
x = ZeroPadding2D((2,2))(x)
# Conv Layer 3
x = Convolution2D(384, 3,3,activation='relu',
name='conv_3', init='he_normal')(x)
x = MaxPooling2D((3, 3), strides=(2,2),name='maxpool3')(x)
x = Flatten()(x)
x = Dense(512, activation = "relu")(x)
x = Dropout(0.5)(x)
x = Dense(512, activation ="relu")(x)
x = Dropout(0.5)(x)
predictions = Dense(2, activation="softmax")(x)
model = Model(inputs = x_input, outputs = predictions)
Thanks!
Look at this GitHub issue and the FAQ How can I obtain the output of an intermediate layer?. It seems the easiest way to do that is defining new models with the outputs that you want. For example:
input_shape = (3,227,227)
x = Input(input_shape)
# Conv Layer 1
# Save layer in a variable
conv1 = Convolution2D(96, 7, 7, subsample=(4,4), activation='relu',
name='conv_1', init='he_normal')(x_input)
x = conv1
x = MaxPooling2D(...)(x)
# ...
conv2 = Convolution2D(...)(x)
x = conv2
# ...
conv3 = Convolution2D(...)(x)
x = conv3
# ...
predictions = Dense(2, activation="softmax")(x)
# Main model
model = Model(inputs=x_input, outputs=predictions)
# Intermediate evaluation model
conv_layers_model = Model(inputs=x_input, outputs=[conv1, conv2, conv3])
# After training is done, retrieve intermediate evaluations for data
conv1_val, conv2_val, conv3_val = conv_layers_model.predict(data)
Note that since you are using the same objects in both models the weights are automatically shared between them.
A more complete example of activation visualization can be found here. In that case they use the K.function approach.