Keras Transfer Learning Issue - keras

I have trained & saved a smaller network on my small dataset, and I want to use transfer learning.
I want to use this saved network on top of the conv part of the pretrained VGG16, specifically I want to freeze some layers of VGG but not all then I want to use the fc that I have already trained on my smaller dataset, and learn a model which is a combination of both with transferred weights.
I am following a mish and mash of tutorials: https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html and https://machinelearningmastery.com/how-to-develop-a-convolutional-neural-network-to-classify-photos-of-dogs-and-cats/ I do not just want to use pretrained features, and I do not just want to add two new layers to the VGG's conv net, as I mentioned, I want to transfer the fc layers of the smaller network and freeze all blocks of conv layers but one of VGGs and train again. Below is my code but I get an error (no matter how I tried to change around the code, I get a similar error)
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.models import Sequential, Model
from keras.layers import Dropout, Flatten, Dense
# path to the model weights files.
weights_path = '/home/d/Desktop/s/vgg16_weights.h5'
top_model_weights_path = '/home/d/Desktop/s/model_weights.h5'
# dimensions of our images.
img_width, img_height = 256, 256
# build the VGG16 network
base_model = applications.VGG16(weights='imagenet', include_top=False, input_shape=(256, 256, 3))
print('Model loaded.')
# set the first 25 layers (up to the last conv block)
# to non-trainable (weights will not be updated)
for layer in base_model.layers[:15]:
layer.trainable = False
# build a classifier model to put on top of the convolutional model
top_model = Sequential()
top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(1, activation='sigmoid'))
# note that it is necessary to start with a fully-trained
# classifier, including the top classifier,
# in order to successfully do fine-tuning
top_model.load_weights(top_model_weights_path)
model= Model(inputs=base_model.input, outputs=top_model(base_model.output))
# add the model on top of the convolutional base
#model.add(top_model)
print(top_model.summary())
# compile the model with a SGD/momentum optimizer
# and a very slow learning rate.
model.compile(loss='binary_crossentropy',
optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
metrics=['accuracy'])
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
train_dir = '/home/d/Desktop/s/data/train'
eval_dir = '/home/d/Desktop/s/data/eval'
test_dir = '/home/d/Desktop/s/data/test'
# create a data generator
train_datagen = ImageDataGenerator(rescale=1./255, #Scale the image between 0 and 1
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,)
val_datagen = ImageDataGenerator(rescale=1./255) #We do not augment validation data. we only perform rescale
test_datagen = ImageDataGenerator(rescale=1./255) #We do not augment validation data. we only perform rescale
# load and iterate training dataset
train_generator = train_datagen.flow_from_directory(train_dir, class_mode='binary', batch_size=16, shuffle='True', seed=42)
# load and iterate validation dataset
val_generator = val_datagen.flow_from_directory(eval_dir, class_mode='binary', batch_size=16, shuffle='True', seed=42)
# load and iterate test dataset
test_generator = test_datagen.flow_from_directory(test_dir, class_mode=None, batch_size=1, shuffle='False', seed=42)
#The training part
#We train for 64 epochs with about 100 steps per epoch
history = model.fit_generator(train_generator,
steps_per_epoch=train_generator.n // train_generator.batch_size,
epochs=6,
validation_data=val_generator,
validation_steps=val_generator.n // val_generator.batch_size)
The error I am getting is:
Model loaded.
Traceback (most recent call last):
File "/home/d/Desktop/s/transferLearningS.py", line 33, in <module>
top_model.load_weights(top_model_weights_path)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/network.py", line 1166, in load_weights
f, self.layers, reshape=reshape)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/saving.py", line 1030, in load_weights_from_hdf5_group
str(len(filtered_layers)) + ' layers.')
ValueError: You are trying to load a weight file containing 6 layers into a model with 2 layers.
And my smaller network is built this way:
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(256, 256, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(Dropout(0.2))
model.add(layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(Dropout(0.2))
model.add(layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(Dropout(0.2))
model.add(layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(Dropout(0.2))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5)) #Dropout for regularization
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid')) #Sigmoid function at the end because we have just two classes
Any recommendations how I can fix this issue?

Related

How do I make use of CSV file and code to train a neural network for python using tensorflow and keras?

I have make use of label studio to annotate my image and export the results as a CSV file.
CSV File header
Currently I have these questions and I hope that someone can point me in correct direction.
How do I use the csv files to train the neural network? I read from online tutorial that stated they use CSV file to train however the codes to how to achieves that is very vague.
Is there a format which I need to follows for the dataset files?
How do I organised the training, validation and test image set for feeding the Convoluted neural network?
Do I need to code for the neural network to read the annotated value from the CSV file and loading the original images files to train the network?
Currently I have tried the following code with reference to online guide with some of my own amendment. https://www.analyticsvidhya.com/blog/2021/07/step-by-step-guide-for-image-classification-on-custom-datasets/
'''
x_test=[]
for folder in os.listdir(test_path):
sub_path=test_path+"/"+folder
for img in os.listdir(sub_path):
image_path=sub_path+"/"+img
img_arr=cv2.imread(image_path)
img_arr=cv2.resize(img_arr,(224,224))
x_test.append(img_arr)
'''
x_val=[]
for folder in os.listdir(val_path):
sub_path=val_path+"/"+folder
for img in os.listdir(sub_path):
image_path=sub_path+"/"+img
img_arr=cv2.imread(image_path)
img_arr=cv2.resize(img_arr,(224,224))
x_val.append(img_arr)
train_x=np.array(x_train)
test_x=np.array(x_test)
val_x=np.array(x_val)
Convert the image file to numpy array. These images is unlabelled. Original image is DICOM images but I have converted it using another python code from DICOM to JPG image files.
train_x=train_x/255.0
test_x=test_x/255.0
val_x=val_x/255.0
Then normalise by 255.0
train_datagen = ImageDataGenerator(rescale = 1./255)
test_datagen = ImageDataGenerator(rescale = 1./255)
val_datagen = ImageDataGenerator(rescale = 1./255)
Then making use of ImageDataGenerator to generate the tensor image data
training_set = train_datagen.flow_from_directory(train_path,
target_size = (224, 224),
batch_size = 32,
class_mode = 'sparse')
test_set = test_datagen.flow_from_directory(test_path,
target_size = (224, 224),
batch_size = 32,
class_mode = 'sparse')
val_set = val_datagen.flow_from_directory(val_path,
target_size = (224, 224),
batch_size = 32,
class_mode = 'sparse')
train_y=training_set.classes
test_y=test_set.classes
val_y=val_set.classes
training_set.class_indices
train_y.shape,test_y.shape,val_y.shape
After this steps, the online guide is to sent it to a pre-trained CNN, however for me my objective is to train a new CNN from scratch.
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(224, 224, 3), padding='Valid')) #default 64
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.summary()
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='softmax')) #64
model.add(layers.Dense(256)) #10
model.add(layers.Dense(128))
model.add(layers.Dense(64))
model.add(layers.Dense(10))
model.summary()
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit_generator(training_set,
steps_per_epoch=len(training_set),
epochs=100,
validation_data=val_set,
validation_steps = len(val_set))
After 100 epoch
The result from the training is as shown on the image above.
Please guide me on how to make this more accurate and how do I load in my annotated.CSV and my original dataset to make the training of the model better.
Thank you

Unexpected outputs from model.predict when using rescaling

First off: I'm aware of this post, but it doesn't provide an answer.
I am building my model like this:
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
from keras.preprocessing.image import ImageDataGenerator # for data augmentation
import pandas as pd # to save .csv files
from time import perf_counter # to track runtime
from keras.metrics import TrueNegatives, TruePositives, FalseNegatives, FalsePositives
def build_model(dimension):
model = Sequential()
model.add(Conv2D(32, (11,11), activation='relu',
input_shape=(dimension, dimension, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) # to prepare for dropout
model.add(Dropout(0.2)) # to prevent overfitting
model.add(Dense(256, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy',
TruePositives(),
TrueNegatives(),
FalsePositives(),
FalseNegatives()
]
)
return model
def train_model(epoch, batch_size, run, subrun):
dimension = 200
model = build_model(dimension)
train_datagen = ImageDataGenerator(validation_split=0.2,
# samplewise_std_normalization=True,
rotation_range=40,
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
width_shift_range=0.2,
height_shift_range=0.2,
fill_mode='nearest'
)
training_set = train_datagen.flow_from_directory('train6',
target_size=(dimension, dimension),
color_mode='rgb', # default
class_mode='binary',
batch_size=batch_size,
save_to_dir=None,
interpolation='nearest',
subset='training')
validation_set = train_datagen.flow_from_directory('train6',
target_size=(dimension, dimension),
color_mode='rgb', # default
class_mode='binary',
batch_size=batch_size,
save_to_dir=None,
# if 'str', saves augmented images for visualisation
interpolation='nearest',
subset='validation')
start_time = perf_counter() # start counting
history = model.fit_generator(training_set,
epochs=epoch,
steps_per_epoch=training_set.samples // batch_size,
validation_data=validation_set,
validation_steps=validation_set.samples // batch_size,
verbose=2)
stop_time = perf_counter() # stop counting
# saving trained model & history file
model.save_weights('models/cat_dog_classifier_{0}_{1}.h5'.format(run, subrun)) # save model weights
hist_pd = pd.DataFrame(history.history) # making panda file of history.history
hist_csv_file = 'histories/history_{0}_{1}.csv'.format(run, subrun) # defining name for csv file
with open(hist_csv_file, mode='w') as f: # saving the pd file as csv
hist_pd.to_csv(f)
return stop_time - start_time
And I use the following code to get the probabilities:
from build_model import build_model
from keras.preprocessing import image
import numpy as np
run = 'A28'
subrun = 1
dimension = 200
# build model
model = build_model(dimension)
model.load_weights('models/cat_dog_classifier_{0}_{1}.h5'.format(run, subrun))
# Get test image ready
amount_of_images = 10
predictions = np.zeros((amount_of_images, 2))
labels = np.zeros(amount_of_images)
for i in range(amount_of_images):
image_name = 1 + i # choose what image to start from
test_image = image.load_img('test1/{}.jpg'.format(image_name), target_size=(dimension, dimension))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis=0)
label = model.predict_classes(test_image, batch_size=1)
labels[i] = label
prediction = model.predict(test_image, batch_size=1)
print(prediction)
print(labels)
When I train my model without using the rescaling or the normalisation, the predictions are probabilities as expected. However, when I use either, it returns only 0s and 1s (the same labels as predict_classes). I have tried to run the dummy code provided in the link above, and it worked as expected; which makes sense I suppose, as the script also runs fine when I haven't used rescaling. However, I would really like to be able to use rescaling. Does anyone have any idea what is going wrong?

Why does my model predict the same label?

I am training a small network and the training seems to go fine, the val loss decreases, I reach validation accuracy around 80, and it actually stops training once there is no more improvement (patience=10). It trained for 40 epochs. However, it keeps predicting only one class for every test image! I tried to initialize the conv layers randomly, I added regularizers, I switched from Adam to SGD, I added clipvalue, I added dropouts. I also switched to softmax (I have only two labels but I saw some recommendation on using softmax and Dense layer with 2 neurons). Some or one of these helped with the overfitting, but nothing worked for the prediction problem. The data is balanced, though it is a small dataset, so it doesn't make sense that it reaches 80% if it predicts the same labels for evaluation set as well.
What is wrong with my model and how can I fix it? Any comments are welcome.
#Import some packages to use
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
import os
from keras.regularizers import l2
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.initializers import RandomNormal
os.environ["CUDA_VISIBLE_DEVICES"]="0"
epochs = 200
callbacks = []
#schedule = None
decay = 0.0
earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min')
mcp_save = ModelCheckpoint('.mdl_wts.hdf5', save_best_only=True, monitor='val_loss', mode='min')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1, epsilon=1e-5, mode='min')
train_dir = '/home/d/Desktop/s/data/train'
eval_dir = '/home/d/Desktop/s/data/eval'
test_dir = '/home/d/Desktop/s/data/test'
# create a data generator
train_datagen = ImageDataGenerator(rescale=1./255, #Scale the image between 0 and 1
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,)
val_datagen = ImageDataGenerator(rescale=1./255) #We do not augment validation data. we only perform rescale
test_datagen = ImageDataGenerator(rescale=1./255) #We do not augment validation data. we only perform rescale
# load and iterate training dataset
train_generator = train_datagen.flow_from_directory(train_dir, target_size=(224,224),class_mode='categorical', batch_size=16, shuffle='True', seed=42)
# load and iterate validation dataset
val_generator = val_datagen.flow_from_directory(eval_dir, target_size=(224,224),class_mode='categorical', batch_size=16, shuffle='True', seed=42)
# load and iterate test dataset
test_generator = test_datagen.flow_from_directory(test_dir, target_size=(224,224), class_mode=None, batch_size=1, shuffle='False', seed=42)
#We will use a batch size of 32. Note: batch size should be a factor of 2.***4,8,16,32,64...***
#batch_size = 4
#from keras import layers
from keras import models
from keras import optimizers
#from keras.layers import Dropout
#from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import img_to_array, load_img
model = models.Sequential()
model.add(Conv2D(64, (3, 3), activation='relu', name='block1_conv1', kernel_initializer=RandomNormal(
mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05), input_shape=(224, 224, 3)))
model.add(Conv2D(64, (3, 3), activation='relu', name='block1_conv2', kernel_initializer=RandomNormal(
mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(128, (3, 3), activation='relu', name='block2_conv1', kernel_initializer=RandomNormal(
mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
model.add(Conv2D(128, (3, 3), activation='relu', name='block2_conv2',kernel_initializer=RandomNormal(
mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
model.add(MaxPooling2D((2, 2), name='block2_pool'))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3), activation='relu', name='block3_conv1', kernel_initializer=RandomNormal(
mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
model.add(Conv2D(256, (3, 3), activation='relu', name='block3_conv2', kernel_initializer=RandomNormal(
mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
model.add(Conv2D(256, (3, 3), activation='relu', name='block3_conv3', kernel_initializer=RandomNormal(
mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05)))
model.add(MaxPooling2D((2, 2), name='block3_pool'))
model.add(Dropout(0.2))
#model.add(layers.Conv2D(512, (3, 3), activation='relu', name='block4_conv1'))
#model.add(layers.Conv2D(512, (3, 3), activation='relu', name='block4_conv2'))
#model.add(layers.Conv2D(512, (3, 3), activation='relu', name='block4_conv3'))
#model.add(layers.MaxPooling2D((2, 2), name='block4_pool'))
model.add(Flatten())
model.add(Dense(256, kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), activation='relu', kernel_initializer='he_uniform'))
model.add(Dropout(0.5))
model.add(Dense(2, kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), activation='softmax'))
#Lets see our model
model.summary()
#We'll use the RMSprop optimizer with a learning rate of 0.0001
#We'll use binary_crossentropy loss because its a binary classification
#model.compile(loss='binary_crossentropy', optimizer=optimizers.SGD(lr=1e-5, momentum=0.9), metrics=['acc'])
model.compile(loss='categorical_crossentropy',
#optimizer=optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=decay),
optimizer=optimizers.SGD(lr= 0.0001, clipvalue = 0.5, decay=1e-6, momentum=0.9, nesterov=True),
metrics=['accuracy'])
#The training part
#We train for 64 epochs with about 100 steps per epoch
history = model.fit_generator(train_generator,
steps_per_epoch=train_generator.n // train_generator.batch_size,
epochs=epochs,
validation_data=val_generator,
validation_steps=val_generator.n // val_generator.batch_size,
callbacks=[earlyStopping, mcp_save]) #, reduce_lr_loss])
#Save the model
model.save_weights('/home/d/Desktop/s/categorical_weights.h5')
model.save('/home/d/Desktop/s/categorical_model_keras.h5')
#lets plot the train and val curve
#get the details form the history object
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
#Train and validation accuracy
plt.plot(epochs, acc, 'b', label='Training accuracy')
plt.plot(epochs, val_acc, 'r', label='Validation accuracy')
plt.title('Training and Validation accuracy')
plt.legend()
plt.figure()
#Train and validation loss
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and Validation loss')
plt.legend()
plt.show()
model.evaluate_generator(generator=val_generator, steps=val_generator.n // val_generator.batch_size)
STEP_SIZE_TEST=test_generator.n//test_generator.batch_size
test_generator.reset()
pred=model.predict_generator(test_generator,
steps=STEP_SIZE_TEST,
verbose=1)
predicted_class_indices=np.argmax(pred,axis=1)
labels = (train_generator.class_indices)
np.save('/home/d/Desktop/s/classes', labels)
labels = dict((v,k) for k,v in labels.items())
predictions = [labels[k] for k in predicted_class_indices]
filenames=test_generator.filenames
results=pd.DataFrame({"Filename":filenames,
"Predictions":predictions})
results.to_csv("categorical_results.csv",index=False)
One of the problems that could lead to such behavior is imbalanced dataset. Your model found out that if it predicts the dominant class each time, it would get a good results.
There are many ways to tackle an imbalance dataset. Here is a good tutorial.
One of the easiest yet powerful solution is to apply higher penalty to your loss if it wrongly predicted the smaller class. This can be implemented in keras by setting the parameter class_weight in the fitor fit_generator function.
It can be a dictionary of example:
class_weight = {0: 0.75, 1: 0.25} # does not necessarily add to up 1.
history = model.fit_generator(train_generator,
steps_per_epoch=train_generator.n // train_generator.batch_size,
epochs=epochs,
class_weight= class_weight, # this is the important part
validation_data=val_generator,
validation_steps=val_generator.n // val_generator.batch_size,
callbacks=[earlyStopping, mcp_save]) #, reduce_lr_loss])
Adding to Coderji's answer, it might also prove advantageous to counter class imbalance using stratified k-fold cross-validation, with k = 5 being common practice. This basically splits your data set up into k splits like regular cross-validation, but also stratifies these splits. In the case of class imbalance, each of these splits contain over-/undersampled classes compensating for their lower/higher occurence within the data set.
As of yet Keras does not have it's own way to use stratified k-fold cross-validation. Instead it's advised to use sklearn's StratifiedKFold. This article gives a detailed overview how to achieve this in Keras,
with the gist of it being:
from sklearn.model_selection import StratifiedKFold# Instantiate the cross validator
skf = StratifiedKFold(n_splits=kfold_splits, shuffle=True)# Loop through the indices the split() method returns
for index, (train_indices, val_indices) in enumerate(skf.split(X, y)):
print "Training on fold " + str(index+1) + "/10..." # Generate batches from indices
xtrain, xval = X[train_indices], X[val_indices]
ytrain, yval = y[train_indices], y[val_indices] # Clear model, and create it
model = None
model = create_model()
# Debug message I guess
# print "Training new iteration on " + str(xtrain.shape[0]) + " training samples, " + str(xval.shape[0]) + " validation samples, this may be a while..."
history = train_model(model, xtrain, ytrain, xval, yval)
accuracy_history = history.history['acc']
val_accuracy_history = history.history['val_acc']
print "Last training accuracy: " + str(accuracy_history[-1]) + ", last validation accuracy: " + str(val_accuracy_history[-1])
create_model() returns a compiled Keras model
train_model() returns last history object of its last model.fit() operation

Error when fitting 3D convolutional neural network

I am trying to train a convolutional neural network on google colab for a medical classification problem. The data set is 89 256x256x256 images for training and 11 for testing. When I try to make my model train it gives me the following error:
import keras
from keras import optimizers
import keras.models
from keras.models import Sequential
import keras.layers
from keras.layers.convolutional import Conv3D
from keras.layers.convolutional import MaxPooling3D
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers import Dense
from keras import metrics
model = Sequential()
model.add(Conv3D(64, kernel_size=(3,3,3),
activation='relu',
input_shape=(10,1,256,256,256)))
model.add(Conv3D(64, (2,2,2), activation='relu'))
model.add(MaxPooling3D(pool_size=(2,2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='softmax'))
opt=keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(opt, loss='categorical_crossentropy', metrics=['mae','acc'])
model.fit(x=train_data, y=train_labels,epochs=100, batch_size=10, verbose=2 ,callbacks=None, validation_split=0.0, validation_data=(validation_data,validation_labels), shuffle=True)
This is the error i get:
ValueError: Input 0 is incompatible with layer conv3d_56: expected ndim=5, found ndim=6
Assuming you are using channels first data_format, your input_shape argugment to the first Conv3D layer should be (CHANNELS, HEIGHT, WIDTH, DEPTH). But your input shape tuple has length of 5, and that is not what Conv3D layer expecting. Assuming the batch_size(of 10) is specified by mistake, making the following changes should fix the problem
model.add(Conv3D(64, kernel_size=(3,3,3),
activation='relu',
input_shape=(1,256,256,256)))
Edit
If you are using channels_last data-format your input_shape should be (HEIGHT, WIDTH, DEPTH, CHANNELS). And assuming your images have 1 channels, the above line should be,
model.add(Conv3D(64, kernel_size=(3,3,3),
activation='relu',
input_shape=(256,256,256, 1)))

how to get predicted classes when using ImageDataGenerator

I am making a CNN model for image classification( i have two classes). I am using ImageDataGenerator for data preparation and model.fit_generator for training. for testing i am using model.evaluate_generator. For confusion matrix i am using sklearn.metrics.confusion_matrix, that requires actual and predicted classes. I have actual classes of my test data.For prediction i am using model.predict_generator but i don't know how to get predicted classes. generally i use model.predict_classes but it not works with validation_generator. My code looks like following:
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
from sklearn.metrics import confusion_matrix
model = Sequential()
model.add(Conv2D(32, (2, 2), input_shape=(50,50,1),activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(32, (2, 2),activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(64, (2, 2),activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(64,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
batch_size = 10
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train_data',
target_size=(50, 50),
batch_size=batch_size,
class_mode='binary',color_mode='grayscale')
validation_generator = test_datagen.flow_from_directory( 'data/test_data',target_size=(50, 50),
batch_size=batch_size,class_mode='binary',color_mode='grayscale')
model.fit_generator(train_generator,steps_per_epoch=542 ,epochs=10)
print(model.evaluate_generator(validation_generator))
and i calculate confusion matrix and othe parameter with following code with continuation to above code, but i think it is wrong, because validation accuracy calculated with TP TN formula is not matched calculated with model.evaluate_generator:
predict1_data=model.predict_generator(validation_generator)
predict_data=np.round(predict1_data)
print(train_generator.class_indices)
print(validation_generator.class_indices)
actual1=np.zeros(21)
actual1[13:21]=1
actual=np.float32(actual1)
cm = confusion_matrix(actual,predict_data)
TN=cm[0,0]
FP=cm[0,1]
FN=cm[1,0]
TP=cm[1,1]
SEN=TP/(TP+FN);print('SEN=',SEN)
SPE=TN/(TN+FP);print('SPE=',SPE)
ACC=(TP+TN)/(TP+TN+FP+FN);print('ACC=',ACC)
I'm trying to figure out the same thing. The closest I came is:
test_datagen = ImageDataGenerator(rescale=1. / 255)
# preprocess data for testing (resize) and create batches
validation_generator = test_datagen.flow_from_directory(
'data/test/',
target_size=(img_width, img_height),
batch_size=16,
class_mode=None,
shuffle=False,
)
print(validation_generator.class_indices)
print (model.predict_generator(validation_generator))
The probability that this outputs is for class 1 (not for class 0).

Resources