I'm using places365-standard datasets to train my Keras CNN VGG16 model. We change the output layer from 1000 categories to 10, which are 1.botanical_garden 2.cliff 3.creek 4.forest-broadleaf 5.islet 6.mountain 7.ocean 8.pier 9.skyscraper 10.temple-asia
Every category has 5,000 training images and the total of 50,000 training images.
The problem is after 10 epochs of training 50,000 images, we still can't break our accuracy through 20%.
We will really appreciate if anyone can give us advice about why our model's accuracy is so low, thank you very much.
The model is as follow:
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(224,224,3)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(128, (3, 3)))
model.add(Activation('relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(128, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3)))
model.add(Activation('relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3)))
model.add(Activation('relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3)))
model.add(Activation('relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3)))
model.add(Activation('relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(4096))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(4096))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
sgd = SGD(lr=0.1, decay=1e-6, nesterov=True)
model.summary()
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
for i in range(10):
for j in range(50):
X = np.load( "C:/Users/firzen41616316/Desktop/numpydataKeras_1000x50/imgonehot_"+str((j+1)*1000)+".npy" )
Y = np.load( "C:/Users/firzen41616316/Desktop/numpydataKeras_1000x50/labelonehot_"+str((j+1)*1000)+".npy" )
model.fit(x = X, y = Y,
validation_split = 0.2,
epochs = 1,
verbose = 1)
print('Done training ', (j+1)*1000 ,' images')
print('Done training 50000 images, Epoch ', i ,' -------------')
Here is some parts of the print out:
Done training 47000 images
Train on 800 samples, validate on 200 samples
Epoch 1/1
800/800 [==============================] - 29s 36ms/step - loss: 2.3021 - acc: 0.1187 - val_loss: 2.3036 - val_acc: 0.1050
Done training 48000 images
Train on 800 samples, validate on 200 samples
Epoch 1/1
800/800 [==============================] - 29s 36ms/step - loss: 2.3036 - acc: 0.1037 - val_loss: 2.3056 - val_acc: 0.1100
Done training 49000 images
Train on 800 samples, validate on 200 samples
Epoch 1/1
800/800 [==============================] - 29s 36ms/step - loss: 2.3028 - acc: 0.1187 - val_loss: 2.3042 - val_acc: 0.1050
Done training 50000 images
Done training 50000 images, Epoch 9 -------------
Related
this is my code with error
def createModel():
model = Sequential()
# first set of CONV => RELU => MAX POOL layers
model.add(Conv2D(32, (3, 3), padding='same', activation='relu', input_shape=inputShape))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(output_dim=NUM_CLASSES, activation='softmax'))
# returns our fully constructed deep learning + Keras image classifier
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
# use binary_crossentropy if there are two classes
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
return model
ValueError: Error when checking input: expected conv2d_19_input to have 4 dimensions, but got array with shape (274, 1)
To recognize the image I am using cnn+svm hybrid model.
model:
model.add(Conv2D(20, (5, 5), padding="same",input_shape=inputShape))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(50, (5, 5), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(500))
model.add(Activation('relu'))
model.add(Dense((classes), kernel_regularizer=regularizers.l2(0.01)))
model.add(Activation('linear'))
compiling model:
opt = Adam(lr=lr, decay=lr / epochs)
model.compile(loss="categorical_hinge", optimizer=opt, metrics=["accuracy"])
Is this correct? When the training process, the accuracy not improving ....
what is the wrong with this code...
what happens if I add another convolution layer after convolution layer with the same filter size
for example, in a network of around 20 layers, I have to choose different filter sizes among convolution layers. so what will be the impact if I do something like this
inner = MaxPooling2D(pool_size=(1,2),strides=2,padding='valid', name ='max3')(inner)
inner = Conv2D(64, (3,3),strides=1,padding= 'same', name='conv6', kernel_initializer='he_normal')(inner)
inner = Conv2D(64, (3,3),strides=1,padding= 'same', name='conv7', kernel_initializer='he_normal')(inner)
inner = Conv2D(64, (3,3),strides=1,padding= 'same', name='conv8', kernel_initializer='he_normal')(inner)
inner = BatchNormalization()(inner)
Trained with both scenarios where with same filters and different filters
(1) The observation is when trained with more weights(means more filters with increase in number) the test accuracy is more
(2) when trained with same filters the accuracy is not as good as earlier case.
Ran the below code on CIFAR10 dataset.
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
Total params: 56,320
Trainable params: 56,320
Non-trainable params: 0
in this case loss: 0.9110 - accuracy: 0.6937
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(32, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(32, (3, 3), activation='relu'))
Total params: 19,392
Trainable params: 19,392
Non-trainable params: 0
in this case: loss: 0.9760 and accuracy: 0.6695
Hello guys I am trying to make pretrained VGG16 on Keras
But it keeps give me error:
ValueError: Error when checking target: expected activation_1 to have
shape (2622,) but got array with shape (1,)
I was trying to create the model based on this poster : Link
Also, I took the pre-trained weight from here. This weight can be read on here
This my code:
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense, ZeroPadding2D
from keras import backend as K
# dimensions of our images.
img_width, img_height = 224, 224
train_data_dir = 'database/train'
validation_data_dir = 'database/validation'
nb_train_samples = 2000
nb_validation_samples = 800
epochs = 50
batch_size = 16
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
# build the VGG16 network
model = applications.VGG16(weights='imagenet', include_top=False)
print('VGG Pretrained Model loaded.')
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Conv2D(4096, (7, 7), activation='relu'))
model.add(Dropout(0.5))
model.add(Conv2D(4096, (1, 1), activation='relu'))
model.add(Dropout(0.5))
model.add(Conv2D(2622, (1, 1)))
model.add(Flatten())
model.add(Activation('softmax'))
# model.load_weights('./vgg16_face_weights.h5')
#
# vgg_face_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1. / 224,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 224)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
model.save_weights('first_try.h5')
You probably have only one folder inside 'database/train' and 'database/validation'.
Please make sure you have 2622 folders in the two folders so that keras can generate the label correctly.
Following is an example showing that the label should have shape of (batch_size, 2622).
# the above remains the same
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
import numpy as np
classes = 2622
batch_size = 4
y = np.zeros((batch_size, classes))
for i in range(batch_size):
y[i, np.random.choice(classes)] = 1
model.fit(x=np.random.random((batch_size,)+input_shape), y=y, batch_size=batch_size)
model.save_weights('first_try.h5')
EDIT:
To change the last Conv2D layer from 2622 filters to 12 filters while maintaining the loaded weights, here is a workaround:
#define model and load_weights
#......
#build a new model based on the last model
conv = Conv2D(12, (1, 1))(model.layers[-4].output)
flatten = Flatten()(conv)
softmax = Activation('softmax')(flatten)
final_model = Model(inputs=model.input, outputs=softmax)
Ref:Cannot add layers to saved Keras Model. 'Model' object has no attribute 'add'
I'm trying to make a binary classification model for color images,
which contains to classes : a.intact b. damaged
I build a model like below:
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=X_train.shape[1:]
))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(128, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.35))
model.add(Conv2D(32, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1024))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES, name = 'output')) #output
model.add(Activation('softmax'))
Since I didn't get expected result.
I tried saliency map and it shown that the model didn't
detect the defects at all.
After different image process like grayscale or histgram equalization
and random augmentation method.
It still not working at all.
Now I want to give a try on PCA/ZCA process to my data.
this is PCA/ZCA code I found from internet:
from sklearn.decomposition import PCA
pca = PCA(n_components=150, whiten=True, random_state=42)
n_samples = len(X_train)
X_train = X_train.reshape((n_samples, -1))
X_train_pca = pca.fit_transform(X_train)
n_samples = len(X_test)
X_test = X_test.reshape((n_samples, -1))
X_test_pca = pca.transform(X_test)
pca = PCA(whiten=True)
transformed = pca.fit_transform(X_train)
pca.whiten = False
zca = pca.inverse_transform(transformed)
Since it transform my data into a 1 dimension array,
I got this ERROR message:
ValueError: Input 0 is incompatible with layer conv1: expected ndim=4, found ndim=2
Any idea to build appropriate model or any way to modify the original one to fit
my data in ?
Any suggestion will be appreciated.