I am getting error while running the following code in keras
Traceback (most recent call last):
File "my_conv_ae.py", line 74, in <module>
validation_steps = nb_validation_samples // batch_size)
File "C:\Users\Administrator\AppData\Local\Programs\Python\Python35\lib\site-packages\keras\legacy\interfaces.py", line 88, in wrapper
return func(*args, **kwargs)
File "C:\Users\Administrator\AppData\Local\Programs\Python\Python35\lib\site-packages\keras\engine\training.py", line 1890, in fit_generator
class_weight=class_weight)
File "C:\Users\Administrator\AppData\Local\Programs\Python\Python35\lib\site-packages\keras\engine\training.py", line 1627, in train_on_batch
check_batch_axis=True)
File "C:\Users\Administrator\AppData\Local\Programs\Python\Python35\lib\site-packages\keras\engine\training.py", line 1309, in _standardize_user_data
exception_prefix='target')
File "C:\Users\Administrator\AppData\Local\Programs\Python\Python35\lib\site-packages\keras\engine\training.py", line 127, in _standardize_input_data
str(array.shape))
ValueError: Error when checking target: expected conv2d_transpose_8 to have 4 dimensions, but got array with shape (20, 1)
The code is:
import keras
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D, Conv2DTranspose
from keras.models import Model
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
input_img = Input(shape=(512, 512, 1))
nb_train_samples = 1700
nb_validation_samples = 420
epochs = 10
batch_size = 20
x = Conv2D(64, (11, 11), activation='relu', strides= 1, padding='valid', kernel_initializer='glorot_uniform')(input_img)
x = Conv2D(64, (11, 11), activation='relu', strides= 1, padding='valid', kernel_initializer='glorot_uniform')(x)
x = MaxPooling2D((2, 2))(x)
x = Conv2D(128, (7, 7), activation='relu', strides= 1, padding='valid', kernel_initializer='glorot_uniform')(x)
x = Conv2D(128, (5, 5), activation='relu', strides= 1, padding='valid', kernel_initializer='glorot_uniform')(x)
x = MaxPooling2D((2, 2))(x)
x = Conv2D(256, (5, 5), activation='relu', strides= 1, padding='valid', kernel_initializer='glorot_uniform')(x)
x = Conv2D(256, (3, 3), activation='relu', strides= 1, padding='valid', kernel_initializer='glorot_uniform')(x)
x = MaxPooling2D((2, 2))(x)
x = Conv2D(512, (3, 3), activation='relu', strides= 1, padding='valid', kernel_initializer='glorot_uniform')(x)
x = Conv2D(512, (3, 3), activation='relu', strides= 1, padding='valid', kernel_initializer='glorot_uniform')(x)
encoded = MaxPooling2D((2, 2))(x)
print (K.int_shape(encoded))
at this point the representation is (26, 26, 512)
x = UpSampling2D((2, 2))(encoded)
x = Conv2DTranspose(512, (3, 3), activation='relu', strides= 1, padding='valid', kernel_initializer='glorot_uniform')(x)
x = Conv2DTranspose(512, (3, 3), activation='relu', strides= 1, padding='valid', kernel_initializer='glorot_uniform')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2DTranspose(256, (3, 3), activation='relu', strides= 1, padding='valid', kernel_initializer='glorot_uniform')(x)
x = Conv2DTranspose(256, (5, 5), activation='relu', strides= 1, padding='valid', kernel_initializer='glorot_uniform')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2DTranspose(128, (5, 5), activation='relu', strides= 1, padding='valid', kernel_initializer='glorot_uniform')(x)
x = Conv2DTranspose(128, (7, 7), activation='relu', strides= 1, padding='valid', kernel_initializer='glorot_uniform')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2DTranspose(64, (11, 11), activation='relu', strides= 1, padding='valid', kernel_initializer='glorot_uniform')(x)
decoded = Conv2DTranspose(1, (11, 11), activation='relu', strides= 1, padding='valid', kernel_initializer='glorot_uniform')(x)
print (K.int_shape(decoded))
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer = 'adadelta', loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'])
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
x_train = train_datagen.flow_from_directory(
'data/train',
target_size = (512, 512), color_mode = 'grayscale',
batch_size = batch_size,
class_mode = 'binary')
x_test = test_datagen.flow_from_directory(
'data/validation',
target_size = (512, 512), color_mode = 'grayscale',
batch_size = batch_size,
class_mode = 'binary')
autoencoder.fit_generator(
x_train,
steps_per_epoch = nb_train_samples // batch_size,
epochs = epochs,
validation_data = x_test,
validation_steps = nb_validation_samples // batch_size)
decoded_imgs = autoencoder.predict(x_test)
Summary of model is as follows:
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) (None, 512, 512, 1) 0
_________________________________________________________________
conv2d_1 (Conv2D) (None, 502, 502, 64) 7808
_________________________________________________________________
conv2d_2 (Conv2D) (None, 492, 492, 64) 495680
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 246, 246, 64) 0
_________________________________________________________________
conv2d_3 (Conv2D) (None, 240, 240, 128) 401536
_________________________________________________________________
conv2d_4 (Conv2D) (None, 236, 236, 128) 409728
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 118, 118, 128) 0
_________________________________________________________________
conv2d_5 (Conv2D) (None, 114, 114, 256) 819456
_________________________________________________________________
conv2d_6 (Conv2D) (None, 112, 112, 256) 590080
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 56, 56, 256) 0
_________________________________________________________________
conv2d_7 (Conv2D) (None, 54, 54, 512) 1180160
_________________________________________________________________
conv2d_8 (Conv2D) (None, 52, 52, 512) 2359808
_________________________________________________________________
max_pooling2d_4 (MaxPooling2 (None, 26, 26, 512) 0
_________________________________________________________________
up_sampling2d_1 (UpSampling2 (None, 52, 52, 512) 0
_________________________________________________________________
conv2d_transpose_1 (Conv2DTr (None, 54, 54, 512) 2359808
_________________________________________________________________
conv2d_transpose_2 (Conv2DTr (None, 56, 56, 512) 2359808
_________________________________________________________________
up_sampling2d_2 (UpSampling2 (None, 112, 112, 512) 0
_________________________________________________________________
conv2d_transpose_3 (Conv2DTr (None, 114, 114, 256) 1179904
_________________________________________________________________
conv2d_transpose_4 (Conv2DTr (None, 118, 118, 256) 1638656
_________________________________________________________________
up_sampling2d_3 (UpSampling2 (None, 236, 236, 256) 0
_________________________________________________________________
conv2d_transpose_5 (Conv2DTr (None, 240, 240, 128) 819328
_________________________________________________________________
conv2d_transpose_6 (Conv2DTr (None, 246, 246, 128) 802944
_________________________________________________________________
up_sampling2d_4 (UpSampling2 (None, 492, 492, 128) 0
_________________________________________________________________
conv2d_transpose_7 (Conv2DTr (None, 502, 502, 64) 991296
_________________________________________________________________
conv2d_transpose_8 (Conv2DTr (None, 512, 512, 1) 7745
=================================================================
Total params: 16,423,745
Trainable params: 16,423,745
Non-trainable params: 0
_________________________________________________________________
Please help me. Is this because of Conv2DTranspose() which I have used for decoding?
It's definitly not a problem with model architecture itself (because it working on my side). Seems like problem with your ground truth data. It must have same dimensions as your input image, but flow_from_directory don't provide such ground truth data. I guess you need use your own custom data generator.
Related
Can anyone please help me to convert this model to PyTorch? I already tried to convert from Keras to PyTorch like this How can I convert this keras cnn model to pytorch version but training results were different. Thank you.
input_3d = (1, 64, 96, 96)
pool_3d = (2, 2, 2)
model = Sequential()
model.add(Convolution3D(8, 3, 3, 3, name='conv1', input_shape=input_3d,
data_format='channels_first'))
model.add(MaxPooling3D(pool_size=pool_3d, name='pool1'))
model.add(Convolution3D(8, 3, 3, 3, name='conv2',data_format='channels_first'))
model.add(MaxPooling3D(pool_size=pool_3d, name='pool2'))
model.add(Convolution3D(8, 3, 3, 3, name='conv3',data_format='channels_first'))
model.add(MaxPooling3D(pool_size=pool_3d, name='pool3'))
model.add(Flatten())
model.add(Dense(2000, activation='relu', name='dense1'))
model.add(Dropout(0.5, name='dropout1'))
model.add(Dense(500, activation='relu', name='dense2'))
model.add(Dropout(0.5, name='dropout2'))
model.add(Dense(3, activation='softmax', name='softmax'))
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv1 (Conv3D) (None, 8, 60, 94, 94) 224
_________________________________________________________________
pool1 (MaxPooling3D) (None, 8, 30, 47, 47) 0
_________________________________________________________________
conv2 (Conv3D) (None, 8, 28, 45, 45) 1736
_________________________________________________________________
pool2 (MaxPooling3D) (None, 8, 14, 22, 22) 0
_________________________________________________________________
conv3 (Conv3D) (None, 8, 12, 20, 20) 1736
_________________________________________________________________
pool3 (MaxPooling3D) (None, 8, 6, 10, 10) 0
_________________________________________________________________
flatten_1 (Flatten) (None, 4800) 0
_________________________________________________________________
dense1 (Dense) (None, 2000) 9602000
_________________________________________________________________
dropout1 (Dropout) (None, 2000) 0
_________________________________________________________________
dense2 (Dense) (None, 500) 1000500
_________________________________________________________________
dropout2 (Dropout) (None, 500) 0
_________________________________________________________________
softmax (Dense) (None, 3) 1503
=================================================================
Your PyTorch equivalent of the Keras model would look like this:
class CNN(nn.Module):
def __init__(self, ):
super(CNN, self).__init__()
self.maxpool = nn.MaxPool3d((2, 2, 2))
self.conv1 = nn.Conv3d(in_channels=1, out_channels=8, kernel_size=3)
self.conv2 = nn.Conv3d(in_channels=8, out_channels=8, kernel_size=3)
self.conv3 = nn.Conv3d(in_channels=8, out_channels=8, kernel_size=3)
self.linear1 = nn.Linear(4800, 2000)
self.dropout1 = nn.Dropout3d(0.5)
self.linear2 = nn.Linear(2000, 500)
self.dropout2 = nn.Dropout3d(0.5)
self.linear3 = nn.Linear(500, 3)
def forward(self, x):
out = self.maxpool(self.conv1(x))
out = self.maxpool(self.conv2(out))
out = self.maxpool(self.conv3(out))
# Flattening process
b, c, d, h, w = out.size() # batch_size, channels, depth, height, width
out = out.view(-1, c * d * h * w)
out = self.dropout1(self.linear1(out))
out = self.dropout2(self.linear2(out))
out = self.linear3(out)
out = torch.softmax(out, 1)
return out
A driver program to test the model:
inputs = torch.randn(8, 1, 64, 96, 96)
model = CNN()
outputs = model(inputs)
print(outputs.shape) # torch.Size([8, 3])
You can save keras weight and reload then in pytorch.
the steps are
Step 0: Train a Model in Keras. ...
Step 1: Recreate & Initialize Your Model Architecture in PyTorch. ...
Step 2: Import Your Keras Model and Copy the Weights. ...
Step 3: Load Those Weights onto Your PyTorch Model. ...
Step 4: Test and Save Your Pytorch Model.
You Can follow example here https://gereshes.com/2019/06/24/how-to-transfer-a-simple-keras-model-to-pytorch-the-hard-way/
I am trying to train a segmentation model, But loss saturates at 0.3370 , i am really not sure what to do, can someone please help
This is the model
def unet(input_shape=(128, 128, 128), optimizer=Adam, initial_learning_rate=5e-4,
loss_function=weighted_dice_coefficient_loss):
inputs = Input(shape=input_shape)
conv1 = UnetConv3D(inputs, 32, is_batchnorm=False, name='conv1')
pool1 = MaxPooling3D(pool_size=(2, 2,2 ))(conv1)
conv2 = UnetConv3D(pool1, 64, is_batchnorm=False, name='conv2')
pool2 = MaxPooling3D(pool_size=(2, 2,2 ))(conv2)
conv3 = UnetConv3D(pool2, 128, is_batchnorm=False, name='conv3')
pool3 = MaxPooling3D(pool_size=(2, 2,2 ))(conv3)
conv4 = UnetConv3D(pool3, 256, is_batchnorm=False, name='conv4')
pool4 = MaxPooling3D(pool_size=(2, 2,2 ))(conv4)
conv5 = Conv3D(512, (3, 3, 3), activation='relu', kernel_initializer=kinit, padding='same', data_format = 'channels_first')(pool4)
conv5 = Conv3D(512, (3, 3, 3), activation='relu', kernel_initializer=kinit, padding='same', data_format = 'channels_first')(conv5)
up6 = concatenate([Conv3DTranspose(256, (2, 2,2 ), strides=(2, 2,2 ), kernel_initializer=kinit, padding='same', data_format = 'channels_first')(conv5), conv4], axis=1)
conv6 = Conv3D(256, (3, 3, 3), activation='relu', padding='same', data_format = 'channels_first')(up6)
conv6 = Conv3D(256, (3, 3, 3), activation='relu', padding='same', data_format = 'channels_first')(conv6)
up7 = concatenate([Conv3DTranspose(128, (2, 2,2 ), strides=(2, 2,2 ), padding='same', data_format = 'channels_first')(conv6), conv3], axis=1)
conv7 = Conv3D(128, (3, 3, 3), activation='relu', kernel_initializer=kinit, padding='same', data_format = 'channels_first')(up7)
conv7 = Conv3D(128, (3, 3, 3), activation='relu', kernel_initializer=kinit, padding='same', data_format = 'channels_first')(conv7)
up8 = concatenate([Conv3DTranspose(64, (2, 2,2 ), strides=(2,2,2 ), kernel_initializer=kinit, padding='same', data_format = 'channels_first')(conv7), conv2], axis=1)
conv8 = Conv3D(64, (3, 3, 3), activation='relu', kernel_initializer=kinit, padding='same', data_format = 'channels_first')(up8)
up9 = concatenate([Conv3DTranspose(32, (2, 2,2 ), strides=(2, 2,2 ), kernel_initializer=kinit, padding='same', data_format = 'channels_first')(conv8), conv1], axis=1)
conv9 = Conv3D(32, (3, 3, 3), activation='relu', kernel_initializer=kinit, padding='same', data_format = 'channels_first')(up9)
conv9 = Conv3D(32, (3, 3, 3), activation='relu', kernel_initializer=kinit, padding='same', data_format = 'channels_first')(conv9)
conv10 = Conv3D(3, (1, 1, 1), activation='relu', kernel_initializer=kinit,padding = 'same', name='final', data_format = 'channels_first')(conv9)
activation_name = 'sigmoid'
activation_block = Activation(activation_name)(conv10)
model = Model(inputs=[inputs], outputs=[activation_block])
model.compile(optimizer=optimizer(), loss=loss_function)
return model
This is the helper function
def UnetConv3D(input, outdim, is_batchnorm, name):
x = Conv3D(outdim, (3, 3, 3), strides=(1, 1, 1), kernel_initializer=kinit, padding="same", name=name+'_1', data_format = 'channels_first')(input)
if is_batchnorm:
x =BatchNormalization(name=name + '_1_bn')(x)
x = Activation('relu',name=name + '_1_act')(x)
x = Conv3D(outdim, (3, 3, 3), strides=(1, 1, 1), kernel_initializer=kinit, padding="same", name=name+'_2', data_format = 'channels_first')(x)
if is_batchnorm:
x = BatchNormalization(name=name + '_2_bn')(x)
x = Activation('relu', name=name + '_2_act')(x)
return x
And this is the loss function --
def weighted_dice_coefficient(y_true, y_pred, axis=(-3, -2, -1), smooth=0.00001):
"""
Weighted dice coefficient. Default axis assumes a "channels first" data structure
:param smooth:
:param y_true:
:param y_pred:
:param axis:
:return:
"""
return K.mean(2. * (K.sum(y_true * y_pred,
axis=axis) + smooth/2)/(K.sum(y_true,
axis=axis) + K.sum(y_pred,
axis=axis) + smooth))
My input is (128,128,128), am i doing an obvious mistake? Please let me know if more info needed.
Model summary
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) (None, 1, 128, 128, 0
__________________________________________________________________________________________________
conv1_1 (Conv3D) (None, 32, 128, 128, 896 input_1[0][0]
__________________________________________________________________________________________________
conv1_1_act (Activation) (None, 32, 128, 128, 0 conv1_1[0][0]
__________________________________________________________________________________________________
conv1_2 (Conv3D) (None, 32, 128, 128, 27680 conv1_1_act[0][0]
__________________________________________________________________________________________________
conv1_2_act (Activation) (None, 32, 128, 128, 0 conv1_2[0][0]
__________________________________________________________________________________________________
max_pooling3d_1 (MaxPooling3D) (None, 32, 64, 64, 6 0 conv1_2_act[0][0]
__________________________________________________________________________________________________
conv2_1 (Conv3D) (None, 64, 64, 64, 6 55360 max_pooling3d_1[0][0]
__________________________________________________________________________________________________
conv2_1_act (Activation) (None, 64, 64, 64, 6 0 conv2_1[0][0]
__________________________________________________________________________________________________
conv2_2 (Conv3D) (None, 64, 64, 64, 6 110656 conv2_1_act[0][0]
__________________________________________________________________________________________________
conv2_2_act (Activation) (None, 64, 64, 64, 6 0 conv2_2[0][0]
__________________________________________________________________________________________________
max_pooling3d_2 (MaxPooling3D) (None, 64, 32, 32, 3 0 conv2_2_act[0][0]
__________________________________________________________________________________________________
conv3_1 (Conv3D) (None, 128, 32, 32, 221312 max_pooling3d_2[0][0]
__________________________________________________________________________________________________
conv3_1_act (Activation) (None, 128, 32, 32, 0 conv3_1[0][0]
__________________________________________________________________________________________________
conv3_2 (Conv3D) (None, 128, 32, 32, 442496 conv3_1_act[0][0]
__________________________________________________________________________________________________
conv3_2_act (Activation) (None, 128, 32, 32, 0 conv3_2[0][0]
__________________________________________________________________________________________________
max_pooling3d_3 (MaxPooling3D) (None, 128, 16, 16, 0 conv3_2_act[0][0]
__________________________________________________________________________________________________
conv4_1 (Conv3D) (None, 256, 16, 16, 884992 max_pooling3d_3[0][0]
__________________________________________________________________________________________________
conv4_1_act (Activation) (None, 256, 16, 16, 0 conv4_1[0][0]
__________________________________________________________________________________________________
conv4_2 (Conv3D) (None, 256, 16, 16, 1769728 conv4_1_act[0][0]
__________________________________________________________________________________________________
conv4_2_act (Activation) (None, 256, 16, 16, 0 conv4_2[0][0]
__________________________________________________________________________________________________
max_pooling3d_4 (MaxPooling3D) (None, 256, 8, 8, 8) 0 conv4_2_act[0][0]
__________________________________________________________________________________________________
conv3d_1 (Conv3D) (None, 512, 8, 8, 8) 3539456 max_pooling3d_4[0][0]
__________________________________________________________________________________________________
conv3d_2 (Conv3D) (None, 512, 8, 8, 8) 7078400 conv3d_1[0][0]
__________________________________________________________________________________________________
conv3d_transpose_1 (Conv3DTrans (None, 256, 16, 16, 1048832 conv3d_2[0][0]
__________________________________________________________________________________________________
concatenate_1 (Concatenate) (None, 512, 16, 16, 0 conv3d_transpose_1[0][0]
conv4_2_act[0][0]
__________________________________________________________________________________________________
conv3d_3 (Conv3D) (None, 256, 16, 16, 3539200 concatenate_1[0][0]
__________________________________________________________________________________________________
conv3d_4 (Conv3D) (None, 256, 16, 16, 1769728 conv3d_3[0][0]
__________________________________________________________________________________________________
conv3d_transpose_2 (Conv3DTrans (None, 128, 32, 32, 262272 conv3d_4[0][0]
__________________________________________________________________________________________________
concatenate_2 (Concatenate) (None, 256, 32, 32, 0 conv3d_transpose_2[0][0]
conv3_2_act[0][0]
__________________________________________________________________________________________________
conv3d_5 (Conv3D) (None, 128, 32, 32, 884864 concatenate_2[0][0]
__________________________________________________________________________________________________
conv3d_6 (Conv3D) (None, 128, 32, 32, 442496 conv3d_5[0][0]
__________________________________________________________________________________________________
conv3d_transpose_3 (Conv3DTrans (None, 64, 64, 64, 6 65600 conv3d_6[0][0]
__________________________________________________________________________________________________
concatenate_3 (Concatenate) (None, 128, 64, 64, 0 conv3d_transpose_3[0][0]
conv2_2_act[0][0]
__________________________________________________________________________________________________
conv3d_7 (Conv3D) (None, 64, 64, 64, 6 221248 concatenate_3[0][0]
__________________________________________________________________________________________________
conv3d_transpose_4 (Conv3DTrans (None, 32, 128, 128, 16416 conv3d_7[0][0]
__________________________________________________________________________________________________
concatenate_4 (Concatenate) (None, 64, 128, 128, 0 conv3d_transpose_4[0][0]
conv1_2_act[0][0]
__________________________________________________________________________________________________
conv3d_8 (Conv3D) (None, 32, 128, 128, 55328 concatenate_4[0][0]
__________________________________________________________________________________________________
conv3d_9 (Conv3D) (None, 32, 128, 128, 27680 conv3d_8[0][0]
__________________________________________________________________________________________________
final (Conv3D) (None, 3, 128, 128, 99 conv3d_9[0][0]
__________________________________________________________________________________________________
activation_1 (Activation) (None, 3, 128, 128, 0 final[0][0]
==================================================================================================
Thanks in advance
I have built a Keras ConvLSTM neural network, and I want to predict one frame ahead based on a sequence of 10-time steps:
model = Sequential()
model.add(ConvLSTM2D(filters=128, kernel_size=(3, 3),
input_shape=(None, img_size, img_size, Channels),
padding='same', return_sequences=True))
model.add(BatchNormalization())
model.add(ConvLSTM2D(filters=64, kernel_size=(3, 3),
padding='same', return_sequences=True))
model.add(BatchNormalization())
model.add(ConvLSTM2D(filters=64, kernel_size=(3, 3),
padding='same', return_sequences=False))
model.add(BatchNormalization())
model.add(Conv2D(filters=1, kernel_size=(3, 3),
activation='sigmoid',
padding='same', data_format='channels_last', name='conv2d'))
model.compile(loss='binary_crossentropy', optimizer='adadelta')
Training:
data_train_x:(10, 10, 62, 62, 12)
data_train_y:(10, 1, 62, 62, 1)
model.fit(data_train_x, data_train_y, batch_size=10, epochs=1,
validation_split=0.05)
But I get the following error:
ValueError: Error when checking target: expected conv2d to have 4 dimensions, but got array with shape (10, 1, 62, 62, 1)
And this is the results of 'model.summary()':
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv_lst_m2d_4 (ConvLSTM2D) (None, None, 62, 62, 128) 645632
_________________________________________________________________
batch_normalization_3 (Batch (None, None, 62, 62, 128) 512
_________________________________________________________________
conv_lst_m2d_5 (ConvLSTM2D) (None, None, 62, 62, 64) 442624
_________________________________________________________________
batch_normalization_4 (Batch (None, None, 62, 62, 64) 256
_________________________________________________________________
conv_lst_m2d_6 (ConvLSTM2D) (None, 62, 62, 64) 295168
_________________________________________________________________
batch_normalization_5 (Batch (None, 62, 62, 64) 256
_________________________________________________________________
conv2d (Conv2D) (None, 62, 62, 1) 577
=================================================================
Total params: 1,385,025
Trainable params: 1,384,513
Non-trainable params: 512
_________________________________________________________________
This model is a revised version of another model which was compiled without error, what is changed from the previous model is just the last two layers. Previously was like:
model.add(ConvLSTM2D(filters=64, kernel_size=(3, 3),
padding='same', return_sequences=True))
model.add(BatchNormalization())
model.add(Conv3D(filters=1, kernel_size=(3, 3, 3),
activation='sigmoid',
padding='same', data_format='channels_last', name='conv3d'))
I made this change because I want to get a 4-dimensional output of the form (samples, output_row, output_col, filters)
The error message is clear. The model expects the output rank to be four, but you are passing output of rank 5. Squeeze the second dimension of data_train_y before feeding it to the model.
data_train_y = tf.squeeze(data_train_y, axis=1)
I am following a Keras tutorial and want to shadow it in Pytorch, so am translating. I'm not strongly familiar with either and am coming unstuck on the input size parameter especially, but also the final layer - do I need another Linear layer? Can anyone translate the following to a Pytorch sequential definition?
visible = Input(shape=(64,64,1))
conv1 = Conv2D(32, kernel_size=4, activation='relu')(visible)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(16, kernel_size=4, activation='relu')(pool1)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
hidden1 = Dense(10, activation='relu')(pool2)
output = Dense(1, activation='sigmoid')(hidden1)
model = Model(inputs=visible, outputs=output)
This is the output of the model:
Layer (type) Output Shape Param #
_________________________________________________________________
input_1 (InputLayer) (None, 64, 64, 1) 0
conv2d_1 (Conv2D) (None, 61, 61, 32) 544
max_pooling2d_1 (MaxPooling2 (None, 30, 30, 32) 0
conv2d_2 (Conv2D) (None, 27, 27, 16) 8208
max_pooling2d_2 (MaxPooling2 (None, 13, 13, 16) 0
dense_1 (Dense) (None, 13, 13, 10) 170
dense_2 (Dense) (None, 13, 13, 1) 11
Total params: 8,933
Trainable params: 8,933
Non-trainable params: 0
What I have worked out lacks a specification for the shape of the input, and I am also a bit perplexed at the translation of stride in the specified Keras model as it uses stride 2 in the MaxPooling2D but doesn't specify this elsewhere - it is perhaps a toy example.
model = nn.Sequential(
nn.Conv2d(1, 32, 4),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(1, 16, 4),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Linear(10, 1),
nn.Sigmoid(),
)
I'm trying to create a model on some data with 2 classes, but I keep getting an error saying:
ValueError: Error when checking target: expected softmax to have shape (1100,) but got array with shape (2,)
I know it's a fairly common error, but I can't seem to fix mine. I believe the error suggests that the model has an output shape of (1100,) but the outputs have dimension (2,). Anyone know how it can be fixed?
Here's my model:
def TestModel(nb_classes=2, inputs=(3, 224, 224)):
input_img = Input(shape=inputs)
conv1 = Convolution2D(
96, 7, 7, activation='relu', init='glorot_uniform',
subsample=(2, 2), border_mode='same', name='conv1')(input_img)
maxpool1 = MaxPooling2D(
pool_size=(3, 3), strides=(2, 2), name='maxpool1', dim_ordering="th")(conv1)
fire2_squeeze = Convolution2D(
16, 1, 1, activation='relu', init='glorot_uniform',
border_mode='same', name='fire2_squeeze')(maxpool1)
fire2_expand1 = Convolution2D(
64, 1, 1, activation='relu', init='glorot_uniform',
border_mode='same', name='fire2_expand1')(fire2_squeeze)
fire2_expand2 = Convolution2D(
64, 3, 3, activation='relu', init='glorot_uniform',
border_mode='same', name='fire2_expand2')(fire2_squeeze)
merge2 = merge(
[fire2_expand1, fire2_expand2], mode='concat', concat_axis=1)
fire3_squeeze = Convolution2D(
16, 1, 1, activation='relu', init='glorot_uniform',
border_mode='same', name='fire3_squeeze')(merge2)
fire3_expand1 = Convolution2D(
64, 1, 1, activation='relu', init='glorot_uniform',
border_mode='same', name='fire3_expand1')(fire3_squeeze)
fire3_expand2 = Convolution2D(
64, 3, 3, activation='relu', init='glorot_uniform',
border_mode='same', name='fire3_expand2')(fire3_squeeze)
merge3 = merge(
[fire3_expand1, fire3_expand2], mode='concat', concat_axis=1)
fire4_squeeze = Convolution2D(
32, 1, 1, activation='relu', init='glorot_uniform',
border_mode='same', name='fire4_squeeze')(merge3)
fire4_expand1 = Convolution2D(
128, 1, 1, activation='relu', init='glorot_uniform',
border_mode='same', name='fire4_expand1')(fire4_squeeze)
fire4_expand2 = Convolution2D(
128, 3, 3, activation='relu', init='glorot_uniform',
border_mode='same', name='fire4_expand2')(fire4_squeeze)
merge4 = merge(
[fire4_expand1, fire4_expand2], mode='concat', concat_axis=1)
maxpool4 = MaxPooling2D(
pool_size=(3, 3), strides=(2, 2), name='maxpool4')(merge4)
fire5_squeeze = Convolution2D(
32, 1, 1, activation='relu', init='glorot_uniform',
border_mode='same', name='fire5_squeeze')(maxpool4)
fire5_expand1 = Convolution2D(
128, 1, 1, activation='relu', init='glorot_uniform',
border_mode='same', name='fire5_expand1')(fire5_squeeze)
fire5_expand2 = Convolution2D(
128, 3, 3, activation='relu', init='glorot_uniform',
border_mode='same', name='fire5_expand2')(fire5_squeeze)
merge5 = merge(
[fire5_expand1, fire5_expand2], mode='concat', concat_axis=1)
fire6_squeeze = Convolution2D(
48, 1, 1, activation='relu', init='glorot_uniform',
border_mode='same', name='fire6_squeeze')(merge5)
fire6_expand1 = Convolution2D(
192, 1, 1, activation='relu', init='glorot_uniform',
border_mode='same', name='fire6_expand1')(fire6_squeeze)
fire6_expand2 = Convolution2D(
192, 3, 3, activation='relu', init='glorot_uniform',
border_mode='same', name='fire6_expand2')(fire6_squeeze)
merge6 = merge(
[fire6_expand1, fire6_expand2], mode='concat', concat_axis=1)
fire7_squeeze = Convolution2D(
48, 1, 1, activation='relu', init='glorot_uniform',
border_mode='same', name='fire7_squeeze')(merge6)
fire7_expand1 = Convolution2D(
192, 1, 1, activation='relu', init='glorot_uniform',
border_mode='same', name='fire7_expand1')(fire7_squeeze)
fire7_expand2 = Convolution2D(
192, 3, 3, activation='relu', init='glorot_uniform',
border_mode='same', name='fire7_expand2')(fire7_squeeze)
merge7 = merge(
[fire7_expand1, fire7_expand2], mode='concat', concat_axis=1)
fire8_squeeze = Convolution2D(
64, 1, 1, activation='relu', init='glorot_uniform',
border_mode='same', name='fire8_squeeze')(merge7)
fire8_expand1 = Convolution2D(
256, 1, 1, activation='relu', init='glorot_uniform',
border_mode='same', name='fire8_expand1')(fire8_squeeze)
fire8_expand2 = Convolution2D(
256, 3, 3, activation='relu', init='glorot_uniform',
border_mode='same', name='fire8_expand2')(fire8_squeeze)
merge8 = merge(
[fire8_expand1, fire8_expand2], mode='concat', concat_axis=1)
maxpool8 = MaxPooling2D(
pool_size=(3, 3), strides=(2, 2), name='maxpool8')(merge8)
fire9_squeeze = Convolution2D(
64, 1, 1, activation='relu', init='glorot_uniform',
border_mode='same', name='fire9_squeeze')(maxpool8)
fire9_expand1 = Convolution2D(
256, 1, 1, activation='relu', init='glorot_uniform',
border_mode='same', name='fire9_expand1')(fire9_squeeze)
fire9_expand2 = Convolution2D(
256, 3, 3, activation='relu', init='glorot_uniform',
border_mode='same', name='fire9_expand2')(fire9_squeeze)
merge9 = merge(
[fire9_expand1, fire9_expand2], mode='concat', concat_axis=1)
fire9_dropout = Dropout(0.5, name='fire9_dropout')(merge9)
conv10 = Convolution2D(
nb_classes, 1, 1, init='glorot_uniform',
border_mode='valid', name='conv10')(fire9_dropout)
# The size should match the output of conv10
avgpool10 = AveragePooling2D((13, 13), name='avgpool10')(conv10)
flatten = Flatten(name='flatten')(avgpool10)
softmax = Activation("softmax", name='softmax')(flatten)
return Model(input=input_img, output=softmax)
Here's the code creating the model:
def main():
np.random.seed(45)
nb_class = 2
width, height = 224, 224
sn = model.TestModel(nb_classes=nb_class, inputs=(height, width, 3))
print('Build model')
sgd = SGD(lr=0.001, decay=0.0002, momentum=0.9, nesterov=True)
sn.compile(
optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
print(sn.summary())
# Training
train_data_dir = 'data/train'
validation_data_dir = 'data/validation'
nb_train_samples = 2000
nb_validation_samples = 800
nb_epoch = 500
# Generator
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
#train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(width, height),
batch_size=32,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(width, height),
batch_size=32,
class_mode='categorical')
# Instantiate AccLossPlotter to visualise training
plotter = AccLossPlotter(graphs=['acc', 'loss'], save_graph=True)
early_stopping = EarlyStopping(monitor='val_loss', patience=3, verbose=0)
checkpoint = ModelCheckpoint(
'weights.{epoch:02d}-{val_loss:.2f}.h5',
monitor='val_loss',
verbose=0,
save_best_only=True,
save_weights_only=True,
mode='min',
period=1)
sn.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples,
nb_epoch=nb_epoch,
validation_data=validation_generator,
nb_val_samples=nb_validation_samples,
callbacks=[plotter, checkpoint])
sn.save_weights('weights.h5')
Here's summary():
____________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
====================================================================================================
input_1 (InputLayer) (None, 224, 224, 3) 0
____________________________________________________________________________________________________
conv1 (Convolution2D) (None, 112, 112, 96) 14208 input_1[0][0]
____________________________________________________________________________________________________
maxpool1 (MaxPooling2D) (None, 112, 55, 47) 0 conv1[0][0]
____________________________________________________________________________________________________
fire2_squeeze (Convolution2D) (None, 112, 55, 16) 768 maxpool1[0][0]
____________________________________________________________________________________________________
fire2_expand1 (Convolution2D) (None, 112, 55, 64) 1088 fire2_squeeze[0][0]
____________________________________________________________________________________________________
fire2_expand2 (Convolution2D) (None, 112, 55, 64) 9280 fire2_squeeze[0][0]
____________________________________________________________________________________________________
merge_1 (Merge) (None, 224, 55, 64) 0 fire2_expand1[0][0]
fire2_expand2[0][0]
____________________________________________________________________________________________________
fire3_squeeze (Convolution2D) (None, 224, 55, 16) 1040 merge_1[0][0]
____________________________________________________________________________________________________
fire3_expand1 (Convolution2D) (None, 224, 55, 64) 1088 fire3_squeeze[0][0]
____________________________________________________________________________________________________
fire3_expand2 (Convolution2D) (None, 224, 55, 64) 9280 fire3_squeeze[0][0]
____________________________________________________________________________________________________
merge_2 (Merge) (None, 448, 55, 64) 0 fire3_expand1[0][0]
fire3_expand2[0][0]
____________________________________________________________________________________________________
fire4_squeeze (Convolution2D) (None, 448, 55, 32) 2080 merge_2[0][0]
____________________________________________________________________________________________________
fire4_expand1 (Convolution2D) (None, 448, 55, 128) 4224 fire4_squeeze[0][0]
____________________________________________________________________________________________________
fire4_expand2 (Convolution2D) (None, 448, 55, 128) 36992 fire4_squeeze[0][0]
____________________________________________________________________________________________________
merge_3 (Merge) (None, 896, 55, 128) 0 fire4_expand1[0][0]
fire4_expand2[0][0]
____________________________________________________________________________________________________
maxpool4 (MaxPooling2D) (None, 447, 27, 128) 0 merge_3[0][0]
____________________________________________________________________________________________________
fire5_squeeze (Convolution2D) (None, 447, 27, 32) 4128 maxpool4[0][0]
____________________________________________________________________________________________________
fire5_expand1 (Convolution2D) (None, 447, 27, 128) 4224 fire5_squeeze[0][0]
____________________________________________________________________________________________________
fire5_expand2 (Convolution2D) (None, 447, 27, 128) 36992 fire5_squeeze[0][0]
____________________________________________________________________________________________________
merge_4 (Merge) (None, 894, 27, 128) 0 fire5_expand1[0][0]
fire5_expand2[0][0]
____________________________________________________________________________________________________
fire6_squeeze (Convolution2D) (None, 894, 27, 48) 6192 merge_4[0][0]
____________________________________________________________________________________________________
fire6_expand1 (Convolution2D) (None, 894, 27, 192) 9408 fire6_squeeze[0][0]
____________________________________________________________________________________________________
fire6_expand2 (Convolution2D) (None, 894, 27, 192) 83136 fire6_squeeze[0][0]
____________________________________________________________________________________________________
merge_5 (Merge) (None, 1788, 27, 192) 0 fire6_expand1[0][0]
fire6_expand2[0][0]
____________________________________________________________________________________________________
fire7_squeeze (Convolution2D) (None, 1788, 27, 48) 9264 merge_5[0][0]
____________________________________________________________________________________________________
fire7_expand1 (Convolution2D) (None, 1788, 27, 192) 9408 fire7_squeeze[0][0]
____________________________________________________________________________________________________
fire7_expand2 (Convolution2D) (None, 1788, 27, 192) 83136 fire7_squeeze[0][0]
____________________________________________________________________________________________________
merge_6 (Merge) (None, 3576, 27, 192) 0 fire7_expand1[0][0]
fire7_expand2[0][0]
____________________________________________________________________________________________________
fire8_squeeze (Convolution2D) (None, 3576, 27, 64) 12352 merge_6[0][0]
____________________________________________________________________________________________________
fire8_expand1 (Convolution2D) (None, 3576, 27, 256) 16640 fire8_squeeze[0][0]
____________________________________________________________________________________________________
fire8_expand2 (Convolution2D) (None, 3576, 27, 256) 147712 fire8_squeeze[0][0]
____________________________________________________________________________________________________
merge_7 (Merge) (None, 7152, 27, 256) 0 fire8_expand1[0][0]
fire8_expand2[0][0]
____________________________________________________________________________________________________
maxpool8 (MaxPooling2D) (None, 3575, 13, 256) 0 merge_7[0][0]
____________________________________________________________________________________________________
fire9_squeeze (Convolution2D) (None, 3575, 13, 64) 16448 maxpool8[0][0]
____________________________________________________________________________________________________
fire9_expand1 (Convolution2D) (None, 3575, 13, 256) 16640 fire9_squeeze[0][0]
____________________________________________________________________________________________________
fire9_expand2 (Convolution2D) (None, 3575, 13, 256) 147712 fire9_squeeze[0][0]
____________________________________________________________________________________________________
merge_8 (Merge) (None, 7150, 13, 256) 0 fire9_expand1[0][0]
fire9_expand2[0][0]
____________________________________________________________________________________________________
fire9_dropout (Dropout) (None, 7150, 13, 256) 0 merge_8[0][0]
____________________________________________________________________________________________________
conv10 (Convolution2D) (None, 7150, 13, 2) 514 fire9_dropout[0][0]
____________________________________________________________________________________________________
avgpool10 (AveragePooling2D) (None, 550, 1, 2) 0 conv10[0][0]
____________________________________________________________________________________________________
flatten (Flatten) (None, 1100) 0 avgpool10[0][0]
____________________________________________________________________________________________________
softmax (Activation) (None, 1100) 0 flatten[0][0]
====================================================================================================
Total params: 683,954
Trainable params: 683,954
Non-trainable params: 0
____________________________________________________________________________________________________
None
Found 22778 images belonging to 2 classes.
Found 2222 images belonging to 2 classes.
Epoch 1/500
Any thought appreciated.
You shouldn't be using AveragePooling2D, but GlobalAveragePooling2D, that will reduce the spatial dimensions to 1, making the Flatten work and produce an output of (None, 2).