I am using a Conv-6 CNN in TensorFlow 2.5 and Python3. The objective is to selectively set certain weights within any trainable layer. The Conv-6 CNN model definition is as follows:
def conv6_cnn():
"""
Function to define the architecture of a neural network model
following Conv-6 architecture for CIFAR-10 dataset and using
provided parameter which are used to prune the model.
Conv-6 architecture-
64, 64, pool -- convolutional layers
128, 128, pool -- convolutional layers
256, 256, pool -- convolutional layers
256, 256, 10 -- fully connected layers
Output: Returns designed and compiled neural network model
"""
l = tf.keras.layers
model = Sequential()
model.add(
Conv2D(
filters = 64, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.GlorotNormal(),
strides = (1, 1), padding = 'same',
input_shape=(32, 32, 3)
)
)
model.add(
Conv2D(
filters = 64, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.GlorotNormal(),
strides = (1, 1), padding = 'same'
)
)
model.add(
MaxPooling2D(
pool_size = (2, 2),
strides = (2, 2)
)
)
model.add(
Conv2D(
filters = 128, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.GlorotNormal(),
strides = (1, 1), padding = 'same'
)
)
model.add(
Conv2D(
filters = 128, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.GlorotNormal(),
strides = (1, 1), padding = 'same'
)
)
model.add(
MaxPooling2D(
pool_size = (2, 2),
strides = (2, 2)
)
)
model.add(
Conv2D(
filters = 256, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.GlorotNormal(),
strides = (1, 1), padding = 'same'
)
)
model.add(
Conv2D(
filters = 256, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.GlorotNormal(),
strides = (1, 1), padding = 'same'
)
)
model.add(
MaxPooling2D(
pool_size = (2, 2),
strides = (2, 2)
)
)
model.add(Flatten())
model.add(
Dense(
units = 256, activation='relu',
kernel_initializer = tf.initializers.GlorotNormal()
)
)
model.add(
Dense(
units = 256, activation='relu',
kernel_initializer = tf.initializers.GlorotNormal()
)
)
model.add(
Dense(
units = 10, activation='softmax'
)
)
'''
# Compile CNN-
model.compile(
loss=tf.keras.losses.categorical_crossentropy,
# optimizer='adam',
optimizer=tf.keras.optimizers.Adam(learning_rate = 0.0003),
metrics=['accuracy']
)
'''
return model
# Load trained model from before-
best_model = conv6_cnn()
best_model.load_weights("best_weights.h5")
I came across this GitHub answer of freezing certain weights during training. On it's basis, I coded the following to freeze weights in the first and sixth conv layers:
conv1 = pruned_model.trainable_weights[0]
# Find all weights less than a threshold (0.1) and set them to zero-
conv1 = tf.where(conv1 < 0.1, 0, conv1)
# For all weights set to zero, stop training them-
conv1 = tf.where(conv1 == 0, tf.stop_gradient(conv1), conv1)
# Sanity check: number of parameters set at 0-
tf.math.count_nonzero(conv1, axis = None).numpy()
# 133
# Original number of paramaters-
tf.math.count_nonzero(best_model.trainable_weights[0], axis = None).numpy()
# 1728
# Assign conv layer1 back to pruned model-
pruned_model.trainable_weights[0].assign(conv1)
# Sanity check-
tf.math.count_nonzero(pruned_model.trainable_weights[0], axis = None).numpy()
# 133
# conv layer 6-
conv6 = pruned_model.trainable_weights[10]
# Find all weights less than a threshold (0.1) and set them to zero-
conv6 = tf.where(conv6 < 0.1, 0, conv6)
# For all weights set to zero, stop training them-
conv6 = tf.where(conv6 == 0, tf.stop_gradient(conv6), conv6)
# Sanity check: number of parameters set at 0-
tf.math.count_nonzero(conv6, axis = None).numpy()
# 5369
# Original number of paramaters-
tf.math.count_nonzero(best_model.trainable_weights[10], axis = None).numpy()
# 589824
# Assign conv layer6 back to pruned model-
pruned_model.trainable_weights[10].assign(conv6)
# Sanity check-
tf.math.count_nonzero(pruned_model.trainable_weights[10], axis = None).numpy()
# 5369
# Train model for 10 epochs for testing:
# Compile CNN-
pruned_model.compile(
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=False),
optimizer=tf.keras.optimizers.Adam(learning_rate = 0.01),
metrics=['accuracy']
)
history = pruned_model.fit(
x = X_train, y = y_train,
epochs = 10, validation_data = (X_test, y_test)
)
However, after training when I check the number of non-zero weights:
# first conv layer-
tf.math.count_nonzero(pruned_model.trainable_weights[0], axis = None).numpy()
# sixth conv layer-
tf.math.count_nonzero(pruned_model.trainable_weights[10], axis = None).numpy()
The weights have increased in numbers again. They should have been 133 and 5369, but they are not.
Help?
Related
I am using Cats Vs Dogs dataset which contains 2000 images in 2 categories and divided into train and validation directory which can be download here.
I am trying to use real-time image augmentation to feed into a CNN model using train and validation generators. I am using Python 3.8 and TF2.5. The code is as follows:
path_to_imgs = "cats_and_dogs_filtered\\"
# Define the train and validation directory-
train_dir = os.path.join(path_to_imgs, 'train')
val_dir = os.path.join(path_to_imgs, 'validation')
batch_size = 64
IMG_HEIGHT, IMG_WIDTH = 150, 150
def plotImages(images_arr):
# function to plot 5 images together-
fig, axes = plt.subplots(1, 5, figsize=(20,20))
axes = axes.flatten()
for img, ax in zip( images_arr, axes):
ax.imshow(img)
ax.axis('off')
plt.tight_layout()
plt.show()
return None
# Use image augmentation for training dataset-
image_generator = ImageDataGenerator(
rescale = 1./255, rotation_range = 135)
train_data_gen = image_generator.flow_from_directory(
directory = train_dir, batch_size = batch_size,
shuffle = True, target_size = (IMG_HEIGHT, IMG_WIDTH),
class_mode = 'sparse'
)
# Found 2000 images belonging to 2 classes.
# Validation images need no augmentations-
val_data_gen = tf.keras.preprocessing.image_dataset_from_directory(
val_dir, image_size = (IMG_HEIGHT, IMG_WIDTH),
batch_size = batch_size)
# Found 1000 files belonging to 2 classes.
# Configure the dataset for performance-
# AUTOTUNE = tf.data.AUTOTUNE
# val_data_gen = val_data_gen.cache().prefetch(buffer_size = AUTOTUNE)
val_data_gen = val_data_gen.take(batch_size).cache().repeat()
augmented_images = [train_data_gen[0][0][0] for i in range(5)]
plotImages(augmented_images)
# Get a batch of training images and labels-
x, y = next(iter(train_data_gen))
# Get a batch of validation images and labels-
x_t, y_t = next(iter(val_data_gen))
x.shape, y.shape
# ((64, 150, 150, 3), (64,))
x_t.shape, y_t.shape
# (TensorShape([64, 150, 150, 3]), TensorShape([64]))
weight_decay = 0.0005
model = Sequential()
model.add(
Conv2D(
filters = 64, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.he_normal(),
strides = (1, 1), padding = 'same', kernel_regularizer = regularizers.l2(weight_decay),
input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)
)
)
model.add(
Conv2D(
filters = 64, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.he_normal(),
strides = (1, 1), padding = 'same', kernel_regularizer = regularizers.l2(weight_decay)
)
)
model.add(
# AveragePooling2D(
MaxPooling2D(
pool_size=(2, 2), strides=(2, 2)
)
)
model.add(
Conv2D(
filters = 128, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.he_normal(),
strides = (1, 1), padding = 'same', kernel_regularizer = regularizers.l2(weight_decay)
)
)
model.add(
Conv2D(
filters = 128, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.he_normal(),
strides = (1, 1), padding = 'same', kernel_regularizer = regularizers.l2(weight_decay)
)
)
model.add(
# AveragePooling2D(
MaxPooling2D(
pool_size=(2, 2), strides=(2, 2)
)
)
model.add(
Conv2D(
filters = 256, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.he_normal(),
strides = (1, 1), padding = 'same', kernel_regularizer = regularizers.l2(weight_decay)
)
)
model.add(
Conv2D(
filters = 256, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.he_normal(),
strides = (1, 1), padding = 'same', kernel_regularizer = regularizers.l2(weight_decay)
)
)
model.add(
AveragePooling2D(
# MaxPooling2D(
pool_size=(2, 2), strides=(2, 2)
)
)
model.add(Flatten())
model.add(
Dense(
units = 2, activation = 'sigmoid'
)
)
# Compile defined model-
model.compile(
optimizer = tf.keras.optimizers.Adam(learning_rate = 0.001),
# loss=tf.losses.SparseCategoricalCrossentropy(from_logits = True),
# loss = tf.losses.SparseCategoricalCrossentropy(),
loss = 'sparse_categorical_crossentropy',
metrics=['accuracy']
)
model(x).shape
# TensorShape([64, 2])
model.predict(x).shape
# (64, 2)
'''
# This is deprecated in favor of model.fit()-
model.fit_generator(
generator = train_data_gen, steps_per_epoch = len(train_data_gen),
epochs = 5
)
'''
model.fit(train_data_gen, val_data_gen, batch_size = batch_size, epochs = 5)
Using "model.fit()" gives the error:
ValueError: y argument is not supported when using
keras.utils.Sequence as input.
What am I doing wrong?
I am trying to implement VGG-19 CNN on CIFAR-10 dataset where the images are of dimension (32, 32, 3). The training set has 50000 images while the testing set has 10000 images.
I am using Python 3.7 and TensorFlow 2.0. I have preprocessed the dataset by normalizing them-
# Normalize the training and testing datasets-
X_train /= 255.0
X_test /= 255.0
I have then designed a CNN-
def vgg_19():
"""
Function to define the architecture of a convolutional neural network
model following VGG-19 architecture for CIFAR-10 dataset.
Vgg-19 architecture-
64, 64, pool -- convolutional layers
128, 128, pool -- convolutional layers
256, 256, 256, 256, max-pool -- convolutional layers
512, 512, 512, 512, max-pool -- convolutional layers
512, 512, 512, 512, avg-pool -- convolutional layers
256, 256, 10 -- fully connected layers
Output: Returns designed and compiled convolutional neural network model
"""
l = tf.keras.layers
model = Sequential()
model.add(
Conv2D(
filters = 64, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.GlorotUniform(),
strides = (1, 1), padding = 'same',
input_shape=(32, 32, 3)
)
)
model.add(
Conv2D(
filters = 64, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.GlorotUniform(),
strides = (1, 1), padding = 'same'
)
)
model.add(
MaxPooling2D(
pool_size = (2, 2),
strides = (2, 2)
)
)
model.add(
Conv2D(
filters = 128, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.GlorotUniform(),
strides = (1, 1), padding = 'same'
)
)
model.add(
Conv2D(
filters = 128, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.GlorotUniform(),
strides = (1, 1), padding = 'same'
)
)
model.add(
MaxPooling2D(
pool_size = (2, 2),
strides = (2, 2)
)
)
model.add(
Conv2D(
filters = 256, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.GlorotUniform(),
strides = (1, 1), padding = 'same'
)
)
model.add(
Conv2D(
filters = 256, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.GlorotUniform(),
strides = (1, 1), padding = 'same'
)
)
model.add(
Conv2D(
filters = 256, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.GlorotUniform(),
strides = (1, 1), padding = 'same'
)
)
model.add(
Conv2D(
filters = 256, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.GlorotUniform(),
strides = (1, 1), padding = 'same'
)
)
model.add(
MaxPooling2D(
pool_size = (2, 2),
strides = (2, 2)
)
)
model.add(
Conv2D(
filters = 512, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.GlorotUniform(),
strides = (1, 1), padding = 'same'
)
)
model.add(
Conv2D(
filters = 512, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.GlorotUniform(),
strides = (1, 1), padding = 'same'
)
)
model.add(
Conv2D(
filters = 512, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.GlorotUniform(),
strides = (1, 1), padding = 'same'
)
)
model.add(
Conv2D(
filters = 512, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.GlorotUniform(),
strides = (1, 1), padding = 'same'
)
)
model.add(
MaxPooling2D(
pool_size = (2, 2),
strides = (2, 2)
)
)
model.add(
Conv2D(
filters = 512, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.GlorotUniform(),
strides = (1, 1), padding = 'same'
)
)
model.add(
Conv2D(
filters = 512, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.GlorotUniform(),
strides = (1, 1), padding = 'same'
)
)
model.add(
Conv2D(
filters = 512, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.GlorotUniform(),
strides = (1, 1), padding = 'same'
)
)
model.add(
Conv2D(
filters = 512, kernel_size = (3, 3),
activation='relu', kernel_initializer = tf.initializers.GlorotUniform(),
strides = (1, 1), padding = 'same'
)
)
model.add(
AveragePooling2D(
pool_size=(2, 2), strides=(2, 2)
)
)
'''
model.add(
MaxPooling2D(
pool_size = (2, 2),
strides = (2, 2)
)
)
'''
model.add(Flatten())
model.add(
Dense(
units = 256, activation='relu'
)
)
model.add(
Dense(
units = 256, activation='relu'
)
)
'''
model.add(
Dense(
units = 1000, activation='relu'
)
)
'''
model.add(
Dense(
units = 10, activation='softmax'
)
)
# Compile pruned CNN-
model.compile(
loss=tf.keras.losses.categorical_crossentropy,
# optimizer='adam',
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9),
metrics=['accuracy']
)
return model
However, when I try to train it-
history = orig_model.fit(
x = X_train, y = y_train,
batch_size = batch_size,
epochs = num_epochs,
verbose = 1,
# callbacks = callback,
validation_data = (X_test, y_test),
shuffle = True
)
The designed CNN gives a validation accuracy of about 9%.
What's going wrong?
The abysmally low validation accuracy is due to Glorot initializer. After changing it to 'he normal', the VGG-19 CNN starts learning and reaches about 77-79% validation accuracy.
I am trying to make a cycle-GAN for an unpaired image to image translation as per this reference. when trying to compile the combined model, following error encounters. I don't know why is it so as I have used the same configurations as per reference. Attaches is my code. Please have a review if anyone of you can solve my problem. Thanks in advance. Sorry for my bad English.
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
img_rows, img_columns, channels = 256, 256, 1
img_shape = (img_rows, img_columns, channels)
def Generator():
inputs = Input(img_shape)
conv1 = Conv2D(64, (4, 4), strides=2, padding='same')(inputs) # 128
conv1 = Activation(LeakyReLU(alpha=0.2))(conv1)
conv1 = InstanceNormalization()(conv1)
conv2 = Conv2D(128, (4, 4), strides=2, padding='same')(conv1) # 64
conv2 = Activation(LeakyReLU(alpha=0.2))(conv2)
conv2 = InstanceNormalization()(conv2)
conv3 = Conv2D(256, (4, 4), strides=2, padding='same')(conv2) # 32
conv3 = Activation(LeakyReLU(alpha=0.2))(conv3)
conv3 = InstanceNormalization()(conv3)
Deconv3 = concatenate([Conv2DTranspose(256, (4, 4), strides=2, padding='same')(conv3), conv2], axis=-1) # 64
Deconv3 = InstanceNormalization()(Deconv3)
Deconv3 = Dropout(0.2)(Deconv3)
Deconv3 = Activation('relu')(Deconv3)
Deconv2 = concatenate([Conv2DTranspose(128, (4, 4), strides=2, padding='same')(Deconv3), conv1], axis=-1) # 128
Deconv2 = InstanceNormalization()(Deconv2)
Deconv2 = Dropout(0.2)(Deconv2)
Deconv2 = Activation('relu')(Deconv2)
Deconv1 = UpSampling2D(size=(2, 2))(Deconv2) # 256
Deconv1 = Conv2D(1, (4, 4), strides=1, padding='same')(Deconv1)
outputs = Activation('tanh')(Deconv1)
return Model(inputs=inputs, outputs=outputs, name='Generator')
def Discriminator():
inputs = Input(img_shape)
conv1 = Conv2D(64, (4, 4), strides=2, padding='same')(inputs) # 128
conv1 = Activation(LeakyReLU(alpha=0.2))(conv1)
conv1 = InstanceNormalization()(conv1)
conv2 = Conv2D(128, (4, 4), strides=2, padding='same')(conv1) # 64
conv2 = Activation(LeakyReLU(alpha=0.2))(conv2)
conv2 = InstanceNormalization()(conv2)
conv3 = Conv2D(256, (4, 4), strides=2, padding='same')(conv2) # 32
conv3 = Activation(LeakyReLU(alpha=0.2))(conv3)
conv3 = InstanceNormalization()(conv3)
conv4 = Conv2D(256, (4, 4), strides=2, padding='same')(conv3) # 16
conv4 = Activation(LeakyReLU(alpha=0.2))(conv4)
conv4 = InstanceNormalization()(conv4)
conv5 = Conv2D(512, (4, 4), strides=2, padding='same')(conv4) # 8
conv5 = Activation(LeakyReLU(alpha=0.2))(conv5)
conv5 = InstanceNormalization()(conv5)
conv6 = Conv2D(512, (4, 4), strides=2, padding='same')(conv5) # 4
conv6 = Activation(LeakyReLU(alpha=0.2))(conv6)
conv6 = InstanceNormalization()(conv6)
outputs = Conv2D(1, (4, 4), strides=1, padding='same')(conv6) # 4
return Model(inputs=inputs, outputs=outputs, name='Discriminator')
# Calculate output shape of D (PatchGAN)
patch = int(height / 2**6)
disc_patch = (patch, patch, 1)
# Loss weights
lambda_cycle = 10.0 # Cycle-consistency loss
lambda_id = 0.1 * lambda_cycle # Identity loss
optimizer = Adam(0.0002, 0.5)
# Build and compile the discriminators
d_A = Discriminator()
d_B = Discriminator()
d_A.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])
d_B.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])
# Build the generators
g_AB = Generator()
g_BA = Generator()
# Input images from both domains
img_A = Input(shape=img_shape)
img_B = Input(shape=img_shape)
# Translate images to the other domain
fake_B = g_AB(img_A)
fake_A = g_BA(img_B)
# Translate images back to original domain
reconstr_A = g_BA(fake_B)
reconstr_B = g_AB(fake_A)
# Identity mapping of images
img_A_id = g_BA(img_A)
img_B_id = g_AB(img_B)
# For the combined model we will only train the generators
d_A.trainable = False
d_B.trainable = False
# Discriminators determines validity of translated images
valid_A = d_A(fake_A)
valid_B = d_B(fake_B)
# Combined model trains generators to fool discriminators
combined = Model(inputs=[img_A, img_B], outputs=[ valid_A, valid_B, reconstr_A, reconstr_B, img_A_id, img_B_id ])
combined.compile(loss=['mse', 'mse', 'mae', 'mae', 'mae', 'mae'],loss_weights=[ 1, 1, lambda_cycle, lambda_cycle, lambda_id, lambda_id ], optimizer=optimizer)
and the error is
The name "Generator" is used 2 times in the model. All layer names should be unique.
These lines are the cause of the problem in the Generator and Discriminator methods as they're are invoked twice causing the duplicate name issue. Generate a unique name on every invocation or don't provide the name argument.
return Model(inputs=inputs, outputs=outputs, name='Generator')
return Model(inputs=inputs, outputs=outputs, name='Discriminator')
one possible solution:
return Model(inputs=inputs, outputs=outputs)
I would like to train a deep learning model, where input image shape is (224,224,3) . And I would like to feed them into a u-net model.
After training I get the error : Error when checking target: expected conv2d_29 to have 4 dimensions, but got array with shape (1255, 12)
I'm confused since I'm sure the image array and label has no issue. Is the issue within the model? How should I resolve this?
The model is as below:
#def unet(pretrained_weights = None, input_size = (224,224,3)):
concat_axis = 3
input_size= Input((224,224,3))
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(input_size)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
#flat1 = Flatten()(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up_conv5 = UpSampling2D(size=(2, 2), data_format="channels_last")(conv5)
ch, cw = get_crop_shape(conv4, up_conv5)
crop_conv4 = Cropping2D(cropping=(ch,cw), data_format="channels_last")(conv4)
up6 = concatenate([up_conv5, crop_conv4], axis=concat_axis)
conv6 = Conv2D(256, (3, 3), padding="same", activation="relu", kernel_initializer = 'he_normal')(up6)
conv6 = Conv2D(256, (3, 3), padding="same", activation="relu", kernel_initializer = 'he_normal')(conv6)
up_conv6 = UpSampling2D(size=(2, 2), data_format="channels_last")(conv6)
ch, cw = get_crop_shape(conv3, up_conv6)
crop_conv3 = Cropping2D(cropping=(ch,cw), data_format="channels_last")(conv3)
up7 = concatenate([up_conv6, crop_conv3], axis=concat_axis)
conv7 = Conv2D(128, (3, 3), padding="same", activation="relu", kernel_initializer = 'he_normal')(up7)
conv7 = Conv2D(128, (3, 3), padding="same", activation="relu", kernel_initializer = 'he_normal')(conv7)
up_conv7 = UpSampling2D(size=(2, 2), data_format="channels_last")(conv7)
ch, cw = get_crop_shape(conv2, up_conv7)
crop_conv2 = Cropping2D(cropping=(ch,cw), data_format="channels_last")(conv2)
up8 = concatenate([up_conv7, crop_conv2], axis=concat_axis)
conv8 = Conv2D(64, (3, 3), padding="same", activation="relu", kernel_initializer = 'he_normal')(up8)
conv8 = Conv2D(64, (3, 3), padding="same", activation="relu", kernel_initializer = 'he_normal')(conv8)
up_conv8 = UpSampling2D(size=(2, 2), data_format="channels_last")(conv8)
ch, cw = get_crop_shape(conv1, up_conv8)
crop_conv1 = Cropping2D(cropping=(ch,cw), data_format="channels_last")(conv1)
up9 = concatenate([up_conv8, crop_conv1], axis=concat_axis)
conv9 = Conv2D(32, (3, 3), padding="same", activation="relu", kernel_initializer = 'he_normal')(up9)
conv9 = Conv2D(32, (3, 3), padding="same", activation="relu", kernel_initializer = 'he_normal')(conv9)
model = Model(inputs = input_size, outputs = conv9)
Since the model output's layer is conv layer, the output shape has 4 dimensions(Batch_size, width, height, channels). But you are feeding an array of shape (1255, 12). If the target label has a shape of (Batch_size, num_features) then the last layer's output should have a shape of (None, 12) or (Batch_size, 12).
You have two options to deal with this situation.
Using dense layer after flattening the output of conv layer
Reshaping the output of conv layer to have the desired shape.
The choice depends on the problem you're dealing with. If the problem is classification, option one could be used to add softmax activation. With option 1 the modification to the code would be,
conv9 = Conv2D(32, (3, 3), padding="same", activation="relu", kernel_initializer = 'he_normal')(conv9)
flatten1 = Flatten()(conv9)
dense1 = Dense(12, activation="softmax")(flatten1) # The choice of the activation depends on the problem you are dealing with.
model = Model(inputs = input_size, outputs = dense1)
With option 2, the modification would be
conv9 = Conv2D(32, (3, 3), padding="same", activation="relu", kernel_initializer = 'he_normal')(conv9)
reshape1 = Reshape((12,)(conv9) # The choice of the activation depends on the problem you are dealing with.
model = Model(inputs = input_size, outputs = reshape1)
N.B: When the Reshape layer is used to reshape tensor to (None, 12) shape be sure that the product of the output shape of the previous layer should be divisible by 12.
I once used keras 1 (maybe 1.0.5) for multi-category classification. And my input in CNN is (n, 1, 24, 113) and 113 is channel numbers, and kernel size is (1, 5).
code like:
X_train = X_train.reshape((-1, 1, SLIDING_WINDOW_LENGTH, NUM_SENSOR_CHANNELS))
X_test = X_test.reshape((-1, 1, SLIDING_WINDOW_LENGTH, NUM_SENSOR_CHANNELS))
# network
inputs = Input(shape=(1, SLIDING_WINDOW_LENGTH, NUM_SENSOR_CHANNELS))
conv1 = ELU()(Convolution2D(NUM_FILTERS, FILTER_SIZE, 1, border_mode='valid', init='normal', activation='relu')(inputs))
conv2 = ELU()(Convolution2D(NUM_FILTERS, FILTER_SIZE, 1, border_mode='valid', init='normal', activation='relu')(conv1))
conv3 = ELU()(Convolution2D(NUM_FILTERS, FILTER_SIZE, 1, border_mode='valid', init='normal', activation='relu')(conv2))
conv4 = ELU()(Convolution2D(NUM_FILTERS, FILTER_SIZE, 1, border_mode='valid', init='normal', activation='relu')(conv3))
reshape1 = Reshape((8, NUM_FILTERS * NUM_SENSOR_CHANNELS))(conv4)
gru1 = GRU(NUM_UNITS_LSTM, return_sequences=True, consume_less='mem')(reshape1)
gru2 = GRU(NUM_UNITS_LSTM, return_sequences=False, consume_less='mem')(gru1)
outputs = Dense(NUM_CLASSES, activation='softmax')(gru2)
# Hardcoded number of sensor channels employed in the OPPORTUNITY challenge
NUM_SENSOR_CHANNELS = 113
# Hardcoded number of classes in the gesture recognition problem
NUM_CLASSES = 18
# Hardcoded length of the sliding window mechanism employed to segment the data
SLIDING_WINDOW_LENGTH = 24
# Length of the input sequence after convolutional operations
FINAL_SEQUENCE_LENGTH = 8
# Hardcoded step of the sliding window mechanism employed to segment the data
SLIDING_WINDOW_STEP = 12
# Batch Size
BATCH_SIZE = 100
# Number filters convolutional layers
NUM_FILTERS = 64
# Size filters convolutional layers
FILTER_SIZE = 5
# Number of unit in the long short-term recurrent layers
NUM_UNITS_LSTM = 128
And these days I switched keras to keras 2. and the networks did not change. And my code like:
X_train = X_train.reshape((-1, 1, SLIDING_WINDOW_LENGTH, NUM_SENSOR_CHANNELS))
X_test = X_test.reshape((-1, 1, SLIDING_WINDOW_LENGTH, NUM_SENSOR_CHANNELS))
# network
inputs = Input(shape=(1, SLIDING_WINDOW_LENGTH, NUM_SENSOR_CHANNELS))
conv1 = ELU()(
Conv2D(filters=NUM_FILTERS, kernel_size=(1, FILTER_SIZE), strides=(1, 1), padding='valid', activation='relu',
kernel_initializer='normal', data_format='channels_last')(inputs))
conv2 = ELU()(
Conv2D(filters=NUM_FILTERS, kernel_size=(1, FILTER_SIZE), strides=(1, 1), padding='valid', activation='relu',
kernel_initializer='normal', data_format='channels_last')(conv1))
conv3 = ELU()(
Conv2D(filters=NUM_FILTERS, kernel_size=(1, FILTER_SIZE), strides=(1, 1), padding='valid', activation='relu',
kernel_initializer='normal', data_format='channels_last')(conv2))
conv4 = ELU()(
Conv2D(filters=NUM_FILTERS, kernel_size=(1, FILTER_SIZE), strides=(1, 1), padding='valid', activation='relu',
kernel_initializer='normal', data_format='channels_last')(conv3))
# permute1 = Permute((2, 1, 3))(conv4)
reshape1 = Reshape((SLIDING_WINDOW_LENGTH - (FILTER_SIZE - 1) * 4, NUM_FILTERS * 1))(conv4) # 4 for 4 convs
gru1 = GRU(NUM_UNITS_LSTM, return_sequences=True, implementation=0)(reshape1)
gru2 = GRU(NUM_UNITS_LSTM, return_sequences=False, implementation=0)(gru1) # implementation=2 for GPU
outputs = Dense(NUM_CLASSES, activation='softmax')(gru2)
and the speed seems faster but the shape is strange since I didn't know where is my channels ?
Is there anything wrong with my code and could someone help ? THX
It seems that Keras handles the channel parameter himself.