How to fix this error using Maxpooling in CNN? - conv-neural-network

The last two maxPooling, are not working.
THE CODE:
input_shape = (48,48,1)
output_class = 7
model = Sequential()
model.add(Conv2D(128, kernel_size=(3,3), activation='relu', input_shape=input_shape))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.4))
model.add(Conv2D(256, kernel_size=(3,3), activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.4))
model.add(Conv2D(512, kernel_size=(3,3), activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.4))
model.add(Conv2D(512, kernel_size=(3,3), activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.4))`
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2),data_format='channels_last',
input_shape=input_shape,padding='same')) #ERROR
model.add(Dropout(0.4))
model.add(Dense(256, activation='relu', input_shape=input_shape))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))` #ERROR
model.add(Dropout(0.3))
model.add(Dense(output_class, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics='accuracy')
ERROR :
ValueError: Input 0 of layer "max_pooling2d_178" is incompatible with the layer: expected ndim=4, found ndim=2. Full shape received: (None, 512)
I have tried searching for an answer and other solutions on the internet.
to no success.
without running that line of MaxPooling, the code runs successfully.

Related

How to determine the number of layers your CNN has?

How to determine the number layers you have in a CNN. For example, in the code snippet given below, how can you determine the number of layers in the CNN?
CODE
# Construct model
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=2, input_shape=(num_rows, num_columns, num_channels), activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=32, kernel_size=2, activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=64, kernel_size=2, activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=128, kernel_size=2, activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(GlobalAveragePooling2D())
model.add(Dense(num_labels, activation='softmax'))
do you mean how to count them per hand or a tensorflow function that returns the amount of layers? If second is the case, this should be it:
layer_amount = len(model.layers)

Resnet autoencoder in keras I/0 issues

I am trying to code a deep auto encoder in keras. My image shape is (4575,32,32,3) and targets are (4575,1)
Here's the function
def build_deep_autoencoder(img_shape, code_size):
H,W,C = img_shape
# encoder
encoder = Sequential()
encoder.add(L.InputLayer(img_shape))
encoder.add(ResNet50(include_top=False,pooling='avg'))
encoder.add(Flatten())
encoder.add(Dense(512, activation='relu'))
encoder.add(Dropout(0.5))
encoder.add(BatchNormalization())
encoder.add(Dense(256, activation='relu'))
encoder.add(Dropout(0.5))
encoder.add(BatchNormalization())
encoder.add(Dense(code_size))
# decoder
decoder = Sequential()
decoder.add(L.InputLayer((code_size,)))
encoder.add(Flatten())
decoder.add(Dense(2*2*256))
decoder.add(Reshape((2, 2, 256)))
decoder.add(Conv2DTranspose(filters=128, kernel_size=(3, 3), strides=2, activation='elu', padding='same'))
decoder.add(Conv2DTranspose(filters=64, kernel_size=(3, 3), strides=2, activation='elu', padding='same'))
decoder.add(Conv2DTranspose(filters=32, kernel_size=(3, 3), strides=2, activation='elu', padding='same'))
decoder.add(Conv2DTranspose(filters=3, kernel_size=(3, 3), strides=2, activation=None, padding='same'))
return encoder, decoder
encoder,decoder = build_deep_autoencoder(img_shape,code_size=2)
inp = L.Input(img_shape)
code = encoder(inp)
reconstruction = decoder(code)
autoencoder = tensorflow.keras.models.Model(inp,reconstruction)
encoder.summary()
autoencoder.compile('nadam','mse')
autoencoder.fit(x=X,y=y,epochs=10)
I am getting an error:
InvalidArgumentError: Incompatible shapes: [31,32,32,3] vs. [31,1]
[[{{node training_18/Nadam/gradients/loss_12/sequential_28_loss/MeanSquaredError/sub_grad/BroadcastGradientArgs}}]]
I am using tensorflow.python.keras
Any help would be appreciated.

Error when checking input: expected conv2d_19_input to have 4 dimensions, but got array with shape (274, 1) deep learning

this is my code with error
def createModel():
model = Sequential()
# first set of CONV => RELU => MAX POOL layers
model.add(Conv2D(32, (3, 3), padding='same', activation='relu', input_shape=inputShape))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(output_dim=NUM_CLASSES, activation='softmax'))
# returns our fully constructed deep learning + Keras image classifier
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
# use binary_crossentropy if there are two classes
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
return model
ValueError: Error when checking input: expected conv2d_19_input to have 4 dimensions, but got array with shape (274, 1)

Keras Error - expected activation_1 to have shape (2622,) but got array with shape (1,)

Hello guys I am trying to make pretrained VGG16 on Keras
But it keeps give me error:
ValueError: Error when checking target: expected activation_1 to have
shape (2622,) but got array with shape (1,)
I was trying to create the model based on this poster : Link
Also, I took the pre-trained weight from here. This weight can be read on here
This my code:
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense, ZeroPadding2D
from keras import backend as K
# dimensions of our images.
img_width, img_height = 224, 224
train_data_dir = 'database/train'
validation_data_dir = 'database/validation'
nb_train_samples = 2000
nb_validation_samples = 800
epochs = 50
batch_size = 16
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
# build the VGG16 network
model = applications.VGG16(weights='imagenet', include_top=False)
print('VGG Pretrained Model loaded.')
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Conv2D(4096, (7, 7), activation='relu'))
model.add(Dropout(0.5))
model.add(Conv2D(4096, (1, 1), activation='relu'))
model.add(Dropout(0.5))
model.add(Conv2D(2622, (1, 1)))
model.add(Flatten())
model.add(Activation('softmax'))
# model.load_weights('./vgg16_face_weights.h5')
#
# vgg_face_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1. / 224,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 224)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
model.save_weights('first_try.h5')
You probably have only one folder inside 'database/train' and 'database/validation'.
Please make sure you have 2622 folders in the two folders so that keras can generate the label correctly.
Following is an example showing that the label should have shape of (batch_size, 2622).
# the above remains the same
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
import numpy as np
classes = 2622
batch_size = 4
y = np.zeros((batch_size, classes))
for i in range(batch_size):
y[i, np.random.choice(classes)] = 1
model.fit(x=np.random.random((batch_size,)+input_shape), y=y, batch_size=batch_size)
model.save_weights('first_try.h5')
EDIT:
To change the last Conv2D layer from 2622 filters to 12 filters while maintaining the loaded weights, here is a workaround:
#define model and load_weights
#......
#build a new model based on the last model
conv = Conv2D(12, (1, 1))(model.layers[-4].output)
flatten = Flatten()(conv)
softmax = Activation('softmax')(flatten)
final_model = Model(inputs=model.input, outputs=softmax)
Ref:Cannot add layers to saved Keras Model. 'Model' object has no attribute 'add'

Keras ValueError: ValueError: Error when checking target: expected dense_4 to have shape (None, 2) but got array with shape (2592, 1) Python3

I am having an issue when trying to train my model in Keras 2.0.8, Python 3.6.1, and a Tensorflow Backend.
Error Message:
ValueError: Error when checking target: expected dense_4 to have shape (None, 2) but got array with shape (2592, 1)
X_train = numpy.swapaxes(X_train, 1, 3)
X_test = numpy.swapaxes(X_test, 1, 3)
print("X_train shape: ") --> size = (2592, 1, 1366, 96)
print("-----")
print("X_test shape") --> size = (648, 1, 1366, 96)
print("-----")
print(Y_train.shape) --> size = (2592,)
print("-----")
print("Y_test shape") --> size = (648,)
Relevant Code snippets:
K.set_image_dim_ordering('th')
K.set_image_data_format('channels_first')
def create_model(weights_path=None):
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu', padding="same", input_shape=(1, 1366, 96)))
model.add(Conv2D(64, (3, 3), activation='relu', dim_ordering="th"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(16, activation='relu'))
model.add(Dense(2, activation='softmax'))
if weights_path:
model.load_weights(weights_path)
return model
model = create_model()
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.SGD(lr=0.01),
metrics=['accuracy'])
history = model.fit(X_train, Y_train,
batch_size=32,
epochs=100,
verbose=1,
validation_data=(X_test, Y_test))
Line 142, where I call model.fit() is where I am getting this error
Things I have tried to fix this error
Referenced these stack overflow posts:
I tried to reshape the Y_test and Y_train numpy arrays using the following code:
Y_train.reshape(2592, 2)
Y_test.reshape(648, 2)
However, I get the following error:
ValueError: cannot reshape array of size 2592 into shape (2592,2)
It seems to me you need to change the last layer of the NN:
def create_model(weights_path=None):
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu', padding="same", input_shape=(1, 1366, 96)))
model.add(Conv2D(64, (3, 3), activation='relu', dim_ordering="th"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(16, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
if weights_path:
model.load_weights(weights_path)
return model
As you are using the categorical_crossentropy loss, you have to use one-hot encoded labels. For this you can use the function to_categorical from keras.utils.np_utils
from keras.utils import np_utils
y_train_onehot = np_utils.to_categorical(y_train)
y_test_onehot = np_utils.to_categorical(y_test)
Then use the one-hot encoded labels to train your model.

Resources