I'm trying to retrain VGG16 to classify Lego images. However, my model has a low accuracy (between 20%). What am I doing wrong? Maybe the number of FC is wrong, or my ImageDataGenerator. I have approx. 2k images per class and a total of 6 classes.
How I create the model:
def vgg16Model(self,image_shape,num_classes):
model_VGG16 = VGG16(include_top = False, weights = None)
model_input = Input(shape = image_shape, name = 'input_layer')
output_VGG16_conv = model_VGG16(model_input)
#Init of FC layers
x = Flatten(name='flatten')(output_VGG16_conv)
x = Dense(256, activation = 'relu', name = 'fc1')(x)
output_layer = Dense(num_classes,activation='softmax',name='output_layer')(x)
vgg16 = Model(inputs = model_input, outputs = output_layer)
vgg16.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
vgg16.summary()
return vgg16
I'm creating ImageDataGenerator and training:
path = "real_Legos_images/trainable_classes"
evaluate_path = "real_Legos_images/evaluation"
NN = NeuralNetwork()
gen = ImageDataGenerator(rotation_range=40, width_shift_range=0.02, shear_range=0.02,height_shift_range=0.02, horizontal_flip=True, fill_mode='nearest')
train_generator = gen.flow_from_directory(os.path.abspath(os.path.join(path)),
target_size = (224,224), color_mode = "rgb", batch_size = 16, class_mode='categorical')
validation_generator = gen.flow_from_directory(os.path.abspath(os.path.join(evaluate_path)),
target_size = (224,224), color_mode = "rgb", batch_size = 16, class_mode='categorical')
STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size
num_classes = len(os.listdir(os.path.abspath(os.path.join(path))))
VGG16 = NN.vgg16Model((224, 224, 3), num_classes)
VGG16.save_weights('weights.h5')
VGG16.fit_generator(train_generator, validation_data = validation_generator, validation_steps = validation_generator.n//validation_generator.batch_size,
steps_per_epoch = STEP_SIZE_TRAIN, epochs = 50)
The VGG16 model with the parameter include_top = False will return 512 dimensions feature maps. Usually, we should add a GlobalAveragePooling2D or GlobalMaxPooling2D layer after it first, then flat it to an one-dimensional array. Otherwise, you will get an array which is too long to fit.
You have set the weight property to 'None' for VGG which means your networks is initialized with random weights. This means you are not using the pre-trained weights. So, I would suggest to try setting the weights to 'imagenet' such that you can use the VGG networks that its weights are pretrained on imagenet dataset:
model_VGG16 = VGG16(include_top=False, weights='imagenet')
Related
I am fine-tuning ResNetV2 model trained previously using transfer learning on 320 images from 40 classes. I am adding 1 class containing 8 images to the model and trying to retrain the model, but I am getting a very high loss value during training. I got a good accuracy(~91%) when training on 40 classes. Please help
base_model = models.load_model('/content/drive/MyDrive/saved_model/resnetv2.h5')
base_model.trainable = True
preds=Dense(41,activation='softmax',name='main_output')(base_model.layers[-2].output)
model=Model(inputs=base_model.input,outputs=preds)
train_datagen = ImageDataGenerator(rescale = 1./255,shear_range = 0.2,zoom_range = 0.2,horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory(train_path,target_size = (224,224),batch_size = 2, class_mode = 'categorical')
test_set = test_datagen.flow_from_directory(valid_path,target_size = (224,224),batch_size = 2,class_mode = 'categorical')
model.compile(optimizer='Adam',loss='categorical_crossentropy',metrics=['accuracy'])
training = model.fit_generator(training_set,validation_data=test_set,epochs=5,steps_per_epoch=1)
I have created a CNN to do binary classification in keras with the following code:
def neural_network():
classifier = Sequential()
# Adding a first convolutional layer
classifier.add(Convolution2D(48, 3, input_shape = (320, 320, 3), activation = 'relu'))
classifier.add(MaxPooling2D())
# Adding a second convolutional layer
classifier.add(Convolution2D(48, 3, activation = 'relu'))
classifier.add(MaxPooling2D())
#Flattening
classifier.add(Flatten())
#Full connected
classifier.add(Dense(256, activation = 'relu'))
#Full connected
classifier.add(Dense(256, activation = 'sigmoid'))
#Full connected
classifier.add(Dense(1, activation = 'sigmoid'))
# Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
classifier.summary()
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
horizontal_flip = True,
vertical_flip=True,
brightness_range=[0.5, 1.5])
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('/content/drive/My Drive/data_sep/train',
target_size = (320, 320),
batch_size = 32,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('/content/drive/My Drive/data_sep/validate',
target_size = (320, 320),
batch_size = 32,
class_mode = 'binary')
es = EarlyStopping(
monitor="val_accuracy",
mode="max",
patience
baseline=None,
restore_best_weights=True,
)
filepath = "/content/drive/My Drive/data_sep/weightsbestval.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
history = classifier.fit(training_set,
epochs = 10,
validation_data = test_set,
callbacks= es
)
best_score = max(history.history['val_accuracy'])
from sklearn.metrics import classification_report
predictions =(classifier.predict(test_set) > 0.5).astype("int32")
newlist = predictions.tolist()
finallist = []
for number in newlist:
finallist.append(number[0])
predicted_classes = np.asarray(finallist)
true_classes = test_set.classes
class_labels = list(test_set.class_indices.keys())
report = classification_report(true_classes, predicted_classes, target_names=class_labels)
accuracy = metrics.accuracy_score(true_classes, predicted_classes)
print(true_classes)
print(predicted_classes)
print(class_labels)
correct = 0
for i in range(len(true_classes)):
if (true_classes[i] == predicted_classes[i]):
correct = correct + 1
print(correct)
print((correct*1.0)/(len(true_classes)*1.0))
print(report)
return best_score
When I run the model I get a validation accuracy of 81.90% by model.fit()
But after finishing the model.predict validation accuracy is 40%.
I have added a callback where the best weights are restored. So what could be the problem here?
What fixed it for me was that I created another Image Data Generator variable
test2_datagen = ImageDataGenerator(rescale = 1./255)
test2_set = test2_datagen.flow_from_directory('/content/drive/My Drive/data_sep/validate',
target_size = (320, 320),
batch_size = 32,
class_mode = 'binary',
Shuffle = False)
But as you can see I set Shuffle = False . I am posting this answer in case anyone has the same problem. So I used test2_set for for the prediction.
test2_set = test2_datagen.flow_from_directory('/content/drive/My Drive/data_sep/validate',
target_size= (320, 320),
batch_size= 32,
class_mode= 'binary',
shuffle= False)
Emphasis on the lowercase shuffle parameter, otherwise this code will fail
Since you are saving best model in this line
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
please load this model in your code , and then predict
from keras.models import load_model
loaded_model = load_model('data_sep/weightsbestval.hdf5')
Then
loaded_model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics['accuracy'])
score = loaded_model.evaluate(X_test, Y_test, verbose=0)
print ("%s: %.2f%%" % (loaded_model.metrics_names[1], score[1]*100))
Plz vote / mark correct if you find this useful
I'm working on building a regressor. Currently the regressor will accept information through the network it appears up until the final dense layer when it throws an error. I'm new to these types of LSTM and would appreciate some help throubleshooting. I've looked through a few articles and haven't found anything that seems to work.
def generate_arrays(arrays=None, targets=None):
from random import shuffle
while True:
available_ids = list(range(len(arrays)))
shuffle(available_ids)
for i in available_ids:
frames = []
for image in arrays[i]:
# load the image
img = Image.open(image_path + image)
# convert image to numpy array
img = np.asarray(img) / 255
height = 220
width = 220
dim = (width, height)
img = cv2.resize(img, dim, interpolation=cv2.INTER_LINEAR)
# add to bigger np array
frames.append(img)
frames = np.array(frames)
frames = frames.reshape(-1, 3, 220, 220, 3)
target = targets[i]
yield (frames, category)
def build_model(frames=seq_len, channels=3, pixels_x=220, pixels_y=220):
model = Sequential()
model.add(
ConvLSTM2D(filters=10
, kernel_size=(2,2)
, data_format='channels_last'
, return_sequences = False
, activation='relu', input_shape=(frames, pixels_x, pixels_y, channels))
)
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dense(1, activation='linear'))
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(
loss = "mse",
optimizer = optimizer,
metrics=['mae', 'mse'])
return model
def evaluate_model(train_x, train_y, test_x, test_y):
# define model
model = build_model()
model.summary()
# fit network
model.fit_generator(
generate_arrays(train_x, train_y)
, steps_per_epoch = len(train_x)
, validation_data = generate_arrays(test_x, test_y)
, validation_steps = len(test_x)
, epochs = 100
, verbose = 1
, shuffle = False
, initial_epoch = 0
)
The model summary is as such:
Now, the error I'm getting appears to be the generic value error that means that something isn't getting passed correctly but I cannot figure out what it is...
ValueError: Error when checking target: expected dense_47 to have 2 dimensions, but got array with shape ()
I recently started taking advantage of Keras's flow_from_dataframe() feature for a project, and decided to test it with the MNIST dataset. I have a directory full of the MNIST samples in png format, and a dataframe with the absolute directory for each in one column and the label in the other.
I'm also using transfer learning, importing VGG16 as a base, and adding my own 512 node relu dense layer and 0.5 drop-out before a softmax layer of 10. (For digits 0-9). I'm using rmsprop (lr=1e-4) as the optimizer.
When I launch my environment, it calls the latest version of keras_preprocessing from Git, which has support for absolute directories and capitalized file extensions.
My problem is that I have a very high training accuracy, and a terribly low validation accuracy. By my final epoch (10), I had a training accuracy of 0.94 and a validation accuracy of 0.01.
I'm wondering if there's something fundamentally wrong with my script? With another dataset, I'm even getting NaNs for both my training and validation loss values after epoch 4. (I checked the relevant columns, there aren't any null values!)
Here's my code. I'd be deeply appreciative is someone could glance through it and see if anything jumped out at them.
import pandas as pd
import numpy as np
import keras
from keras_preprocessing.image import ImageDataGenerator
from keras import applications
from keras import optimizers
from keras.models import Model
from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D
from keras import backend as k
from keras.callbacks import ModelCheckpoint, CSVLogger
from keras.applications.vgg16 import VGG16, preprocess_input
# INITIALIZE MODEL
img_width, img_height = 32, 32
model = VGG16(weights = 'imagenet', include_top=False, input_shape = (img_width, img_height, 3))
# freeze all layers
for layer in model.layers:
layer.trainable = False
# Adding custom Layers
x = model.output
x = Flatten()(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(10, activation="softmax")(x)
# creating the final model
model_final = Model(input = model.input, output = predictions)
# compile the model
rms = optimizers.RMSprop(lr=1e-4)
#adadelta = optimizers.Adadelta(lr=0.001, rho=0.5, epsilon=None, decay=0.0)
model_final.compile(loss = "categorical_crossentropy", optimizer = rms, metrics=["accuracy"])
# LOAD AND DEFINE SOURCE DATA
train = pd.read_csv('MNIST_train.csv', index_col=0)
val = pd.read_csv('MNIST_test.csv', index_col=0)
nb_train_samples = 60000
nb_validation_samples = 10000
batch_size = 60
epochs = 10
# Initiate the train and test generators
train_datagen = ImageDataGenerator()
test_datagen = ImageDataGenerator()
train_generator = train_datagen.flow_from_dataframe(dataframe=train,
directory=None,
x_col='train_samples',
y_col='train_labels',
has_ext=True,
target_size = (img_height,
img_width),
batch_size = batch_size,
class_mode = 'categorical',
color_mode = 'rgb')
validation_generator = test_datagen.flow_from_dataframe(dataframe=val,
directory=None,
x_col='test_samples',
y_col='test_labels',
has_ext=True,
target_size = (img_height,
img_width),
batch_size = batch_size,
class_mode = 'categorical',
color_mode = 'rgb')
# GET CLASS INDICES
print('****************')
for cls, idx in train_generator.class_indices.items():
print('Class #{} = {}'.format(idx, cls))
print('****************')
# DEFINE CALLBACKS
path = './chk/epoch_{epoch:02d}-valLoss_{val_loss:.2f}-valAcc_{val_acc:.2f}.hdf5'
chk = ModelCheckpoint(path, monitor = 'val_acc', verbose = 1, save_best_only = True, mode = 'max')
logger = CSVLogger('./chk/training_log.csv', separator = ',', append=False)
nPlus = 1
samples_per_epoch = nb_train_samples * nPlus
# Train the model
model_final.fit_generator(train_generator,
steps_per_epoch = int(samples_per_epoch/batch_size),
epochs = epochs,
validation_data = validation_generator,
validation_steps = int(nb_validation_samples/batch_size),
callbacks = [chk, logger])
Have you tried explicitly defining the classes of the images? as such:
train_generator=image.ImageDataGenerator().flow_from_dataframe(classes=[0,1,2,3,4,5,6,7,8,9])
in both the train and validation generators.
I have found that sometimes the train and validation generators create different correspondence dictionaries.
i have bulit a model to distinguish cats from dogs using kaggle's cats_vs_dogs data set. I have tried two ways to do it. For the first one, I used three existing models(ResNet50, Xception InceptionV3)to extract features, i put the traing data through these models's convolutional base,predict and concatenate the results, then use them for a standalone densely-connected classifier.The result is pretty good, after five epoches training, val_acc became 99.58%. Then i want to use data augmentation and fine-tuing, so i extended the those three models by adding layers on top, and running he whole thing end-to-end on the input data. The strange thing is the second way got good result in the traing but lousy one in the validation, and the val_acc is always a constant(0.5). i feel very confused, how come these two ways have such different results.
here is my code
from keras.models import *
from keras.layers import *
from keras.applications import *
from keras.preprocessing.image import *
res_net_input = Input((224, 224, 3), name='res_net')
res_net_base_model = ResNet50(input_tensor=res_net_input, weights='imagenet', include_top=False)
for layers in res_net_base_model.layers:
layers.trainable = False
xception_input = Input((299, 299, 3), name='xception')
xception_base_model = Xception(input_tensor=xception_input, weights='imagenet', include_top=False)
for layers in xception_base_model.layers:
layers.trainable = False
inception_input = Input((299, 299, 3), name='inception')
inception_base_model = InceptionV3(input_tensor=inception_input, weights='imagenet', include_top=False)
for layers in inception_base_model.layers:
layers.trainable = False
res_result = GlobalAveragePooling2D()(res_net_base_model.output)
xcp_result = GlobalAveragePooling2D()(xception_base_model.output)
icp_result = GlobalAveragePooling2D()(inception_base_model.output)
concatenated = concatenate([res_result, xcp_result, icp_result], axis=1)
x = Dropout(0.5)(concatenated)
x = Dense(1, activation='sigmoid')(x)
model = Model([res_net_base_model.input, xception_base_model.input, inception_base_model.input], x)
model.compile(optimizer='adadelta',
loss='binary_crossentropy',
metrics=['accuracy'])
train_imgen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
rotation_range=5.,
horizontal_flip = True)
validation_imgen = ImageDataGenerator(rescale = 1./255)
def generate_generator_multiple(generator,dir1, batch_size, img_size1, img_size2, img_size3):
genX1 = generator.flow_from_directory(dir1,
target_size = (img_size1[0],img_size1[1]),
class_mode = 'binary',
batch_size = batch_size,
shuffle=False,
)
genX2 = generator.flow_from_directory(dir1,
target_size = (img_size2[0],img_size2[1]),
class_mode = 'binary',
batch_size = batch_size,
shuffle=False,
seed=7)
genX3 = generator.flow_from_directory(dir1,
target_size = (img_size3[0],img_size3[1]),
class_mode = 'binary',
batch_size = batch_size,
shuffle=False,
seed=7)
while True:
X1i = genX1.next()
X2i = genX2.next()
X3i = genX3.next()
yield [X1i[0], X2i[0],X3i[0]], X1i[1]
tain_generator = generate_generator_multiple(train_imgen , '/output/keras/dog_vs_cat_full/train', 100, (224,224), (299, 299), (299, 299))
validation_generator = generate_generator_multiple(validation_imgen,'/output/keras/dog_vs_cat_full/validation', 100, (224,224), (299, 299), (299, 299))
history=model.fit_generator(tain_generator,
steps_per_epoch=200,
epochs = 5,
validation_data = validation_generator,
validation_steps = 50,
shuffle=False)