KerasClassifier inside new class not functioning correctly - keras

I am using KerasClassifier to create a NN, usually, after using .fit() the object will still retain type of wrappers.scikit_learn.KerasClassifier and then further functions such as cross_val_score and GridSearchCV work perfectly. I am now trying to fit my code into a format that I have been given for a project which has a predefined class to hold the NN. When trying to assign either this new class or a property of the class with the KerasClassifier wrapper type the return from .fit() is instead of type engine.sequential.Sequential meaning the further functions will not operate.
I would expect the .fit() function to return an item of type KerasClassifier.
The code below is passed preprocessed training data.
class Module4_Model:
def __init__(self):
self.my_model = None
def init_classifier(self):
self.my_model = KerasClassifier(build_fn = self.build_classifier,
optimizer = 'adam',
n_units = 7,
batch_size = 32,
epochs = 100)
return self.my_model
def build_classifier(self, optimizer, n_units):
self.my_model = Sequential()
self.my_model.add(Dense(units = n_units, kernel_initializer = 'uniform', activation = 'relu', input_dim = 15))
self.my_model.add(Dense(units = n_units, kernel_initializer = 'uniform', activation = 'relu'))
self.my_model.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
self.my_model.compile(optimizer = optimizer, loss = 'binary_crossentropy', metrics = ['accuracy'])
return self.my_model
def train_model(self, X_train, y_train):
history = self.my_model.fit(X_train, y_train, validation_split = 0.1)
print(type(self.my_model))
return history
my_model = Module4_Model()
my_model.init_classifier()
history = my_model.train_model(x_train_processed, y_train_processed)
The same code moved outside of a class works as expected.

The problem is that you are using the same variable (self.my_model) inside both init_classifier and build_classifier, which is not necessary at all. When the KerasClassifier instance is created, it received self.build_classifier, which is called each time a new classifier instance is created (inside KerasClassifier), and then it overwrites the value of self.my_model.
A simple solution is to do this:
def build_classifier(self, optimizer, n_units):
model = Sequential()
model.add(Dense(units = n_units, kernel_initializer = 'uniform', activation = 'relu', input_dim = 15))
model.add(Dense(units = n_units, kernel_initializer = 'uniform', activation = 'relu'))
model.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
model.compile(optimizer = optimizer, loss = 'binary_crossentropy', metrics = ['accuracy'])
return model
Just do not use the same variable for two purposes and it should be fine.

Related

Model.fit() Validation Accuracy different than Model.predict()

I have created a CNN to do binary classification in keras with the following code:
def neural_network():
classifier = Sequential()
# Adding a first convolutional layer
classifier.add(Convolution2D(48, 3, input_shape = (320, 320, 3), activation = 'relu'))
classifier.add(MaxPooling2D())
# Adding a second convolutional layer
classifier.add(Convolution2D(48, 3, activation = 'relu'))
classifier.add(MaxPooling2D())
#Flattening
classifier.add(Flatten())
#Full connected
classifier.add(Dense(256, activation = 'relu'))
#Full connected
classifier.add(Dense(256, activation = 'sigmoid'))
#Full connected
classifier.add(Dense(1, activation = 'sigmoid'))
# Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
classifier.summary()
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
horizontal_flip = True,
vertical_flip=True,
brightness_range=[0.5, 1.5])
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('/content/drive/My Drive/data_sep/train',
target_size = (320, 320),
batch_size = 32,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('/content/drive/My Drive/data_sep/validate',
target_size = (320, 320),
batch_size = 32,
class_mode = 'binary')
es = EarlyStopping(
monitor="val_accuracy",
mode="max",
patience
baseline=None,
restore_best_weights=True,
)
filepath = "/content/drive/My Drive/data_sep/weightsbestval.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
history = classifier.fit(training_set,
epochs = 10,
validation_data = test_set,
callbacks= es
)
best_score = max(history.history['val_accuracy'])
from sklearn.metrics import classification_report
predictions =(classifier.predict(test_set) > 0.5).astype("int32")
newlist = predictions.tolist()
finallist = []
for number in newlist:
finallist.append(number[0])
predicted_classes = np.asarray(finallist)
true_classes = test_set.classes
class_labels = list(test_set.class_indices.keys())
report = classification_report(true_classes, predicted_classes, target_names=class_labels)
accuracy = metrics.accuracy_score(true_classes, predicted_classes)
print(true_classes)
print(predicted_classes)
print(class_labels)
correct = 0
for i in range(len(true_classes)):
if (true_classes[i] == predicted_classes[i]):
correct = correct + 1
print(correct)
print((correct*1.0)/(len(true_classes)*1.0))
print(report)
return best_score
When I run the model I get a validation accuracy of 81.90% by model.fit()
But after finishing the model.predict validation accuracy is 40%.
I have added a callback where the best weights are restored. So what could be the problem here?
What fixed it for me was that I created another Image Data Generator variable
test2_datagen = ImageDataGenerator(rescale = 1./255)
test2_set = test2_datagen.flow_from_directory('/content/drive/My Drive/data_sep/validate',
target_size = (320, 320),
batch_size = 32,
class_mode = 'binary',
Shuffle = False)
But as you can see I set Shuffle = False . I am posting this answer in case anyone has the same problem. So I used test2_set for for the prediction.
test2_set = test2_datagen.flow_from_directory('/content/drive/My Drive/data_sep/validate',
target_size= (320, 320),
batch_size= 32,
class_mode= 'binary',
shuffle= False)
Emphasis on the lowercase shuffle parameter, otherwise this code will fail
Since you are saving best model in this line
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
please load this model in your code , and then predict
from keras.models import load_model
loaded_model = load_model('data_sep/weightsbestval.hdf5')
Then
loaded_model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics['accuracy'])
score = loaded_model.evaluate(X_test, Y_test, verbose=0)
print ("%s: %.2f%%" % (loaded_model.metrics_names[1], score[1]*100))
Plz vote / mark correct if you find this useful

Why does my Gridsearch on Keras NN - Model just loop?

i have a problem with gridsearch keras problem which loops every time with the same epoch = 25? It doesn't change to 35.
def build_classifier(optimizer):
classifier = Sequential()
classifier.add(Dense(units = 3000, kernel_initializer = 'uniform', activation = 'relu', input_dim = pca_dimensions))
classifier.add(Dense(units = 3000, kernel_initializer = 'uniform', activation = 'relu'))
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
classifier.compile(optimizer = optimizer, loss = 'binary_crossentropy', metrics = ['accuracy'])
return classifier
classifier = KerasClassifier(build_fn = build_classifier)
parameters = {'batch_size': [1000],
'epochs': [25,35,45],
'optimizer': ['adam']}
grid_search = GridSearchCV(estimator = classifier,
param_grid = parameters,
scoring = 'accuracy',
cv = 10)enter code here
grid_results = grid_search.fit(X_train, y_train)
print("Best: %f using %s" % (grid_results.best_score_, grid_results.best_params_))
means = grid_results.cv_results_['mean_test_score']
stds = grid_results.cv_results_['std_test_score']
params = grid_results.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))

Using Tensorboard to monitor training real time and visualize the model architecture

I am learning to use Tensorboard -- Tensorflow 2.0.
In particular, I would like to monitor the learning curves realtime and also to visually inspect and communicate the architecture of my model.
Below I will provide code for a reproducible example.
I have three problems:
Although I get the learning curves once the training is over I don't know what I should do to monitor them in real time
The learning curve I get from Tensorboard does not agree with the plot of history.history. In fact is bizarre and difficult to interpret its reversals.
I can not make sense of the graph. I have trained a sequential model with 5 dense layers and dropout layers in between. What Tensorboard shows me is something which much more elements in it.
My code is the following:
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
inputs = Input(shape = (train_data.shape[1], ))
x1 = Dense(100, kernel_initializer = 'he_normal', activation = 'elu')(inputs)
x1a = Dropout(0.5)(x1)
x2 = Dense(100, kernel_initializer = 'he_normal', activation = 'elu')(x1a)
x2a = Dropout(0.5)(x2)
x3 = Dense(100, kernel_initializer = 'he_normal', activation = 'elu')(x2a)
x3a = Dropout(0.5)(x3)
x4 = Dense(100, kernel_initializer = 'he_normal', activation = 'elu')(x3a)
x4a = Dropout(0.5)(x4)
x5 = Dense(100, kernel_initializer = 'he_normal', activation = 'elu')(x4a)
predictions = Dense(1)(x5)
model = Model(inputs = inputs, outputs = predictions)
model.compile(optimizer = 'Adam', loss = 'mse')
logdir="logs\\fit\\" + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)
history = model.fit(train_data, train_targets,
batch_size= 32,
epochs= 20,
validation_data=(test_data, test_targets),
shuffle=True,
callbacks=[tensorboard_callback ])
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.plot(history.history['val_loss'])
I think what you can do is to launch TensorBoard before calling .fit() on your model. If you are using IPython (Jupyter or Colab), and have already installed TensorBoard, here's how you can modify your code;
from keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
inputs = Input(shape = (train_data.shape[1], ))
x1 = Dense(100, kernel_initializer = 'he_normal', activation = 'relu')(inputs)
x1a = Dropout(0.5)(x1)
x2 = Dense(100, kernel_initializer = 'he_normal', activation = 'relu')(x1a)
x2a = Dropout(0.5)(x2)
x3 = Dense(100, kernel_initializer = 'he_normal', activation = 'relu')(x2a)
x3a = Dropout(0.5)(x3)
x4 = Dense(100, kernel_initializer = 'he_normal', activation = 'relu')(x3a)
x4a = Dropout(0.5)(x4)
x5 = Dense(100, kernel_initializer = 'he_normal', activation = 'relu')(x4a)
predictions = Dense(1)(x5)
model = Model(inputs = inputs, outputs = predictions)
model.compile(optimizer = 'Adam', loss = 'mse')
logdir="logs\\fit\\" + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)
In another cell, you can run;
# Magic func to use TensorBoard directly in IPython
%load_ext tensorboard
Launch TensorBoard by running this in another cell;
# Launch TensorBoard with objects in the log directory
# This should launch tensorboard in your browser, but you may not see your metadata.
%tensorboard --logdir=logdir
And you can finally call .fit() on your model in another cell;
history = model.fit(train_data, train_targets,
batch_size= 32,
epochs= 20,
validation_data=(test_data, test_targets),
shuffle=True,
callbacks=[tensorboard_callback ])
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
If you are not using IPython, you probably just have to launch TensorBoard during or before training your model to monitor it in real-time.

Grid search and KerasClassifier using class weights

I am trying to conduct grid search using scikit-learn RandomizedSearchCV function together with Keras KerasClassifier wrapper for my unbalanced multi-class classification problem. However, when I try to give class_weight as an input, the fit method gives me the following error:
RuntimeError: Cannot clone object <keras.wrappers.scikit_learn.KerasClassifier object at 0x000002AA3C676710>, as the constructor either does not set or modifies parameter class_weight
Below are the functions that I use to build the KerasClassifier and the script for RandomizedSearchCV:
build_fn:
import keras as k
def build_keras_model(loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'], optimiser = 'adam',
learning_rate = 0.001, n_neurons = 30, n_layers = 1, n_classes = 3,
l1_reg = 0.001, l2_reg = 0.001, batch_norm = False, dropout = None,
input_shape = (8,)):
model = k.models.Sequential()
model.add(k.layers.Dense(n_neurons,
input_shape = input_shape,
kernel_regularizer = k.regularizers.l1_l2(l1 = l1_reg, l2 = l2_reg),
activation = 'relu'))
if batch_norm is True:
model.add(k.layers.BatchNormalization())
if dropout is not None:
model.add(k.layers.Dropout(dropout))
i = 1
while i < n_layers:
model.add(k.layers.Dense(n_neurons,
kernel_regularizer = k.regularizers.l1_l2(l1 = l1_reg, l2 = l2_reg),
activation = 'relu'))
if batch_norm is True:
model.add(k.layers.BatchNormalization())
if dropout is not None:
model.add(k.layers.Dropout(dropout))
i += 1
del i
model.add(k.layers.Dense(n_classes, activation = 'softmax'))
if optimiser == 'adam':
koptimiser = k.optimizers.Adam(lr = learning_rate)
elif optimiser == 'adamax':
koptimiser = k.optimizers.Adamax(lr = learning_rate)
elif optimiser == 'nadam':
koptimiser = k.optimizers.Nadam(lr = learning_rate)
else:
print('Unknown optimiser type')
model.compile(optimizer = koptimiser, loss = loss, metrics = metrics)
model.summary()
return model
Script:
import scipy as sp
from sklearn.utils.class_weight import compute_class_weight
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import RandomizedSearchCV
parameters = {
'optimiser': ['adam', 'adamax', 'nadam'],
'learning_rate': sp.stats.uniform(0.0005, 0.0015),
'epochs': sp.stats.randint(500, 1501),
'n_neurons': sp.stats.randint(20, 61),
'n_layers': sp.stats.randint(1, 3),
'n_classes': [3],
'batch_size': sp.stats.randint(1, 11),
'l1_reg': sp.stats.reciprocal(1e-3, 1e1),
'l2_reg': sp.stats.reciprocal(1e-3, 1e1),
'batch_norm': [False],
'dropout': [None],
'metrics': [['accuracy']],
'loss': ['sparse_categorical_crossentropy'],
'input_shape': [(training_features.shape[1],)]
}
class_weights = compute_class_weight('balanced', np.unique(training_targets),
training_targets[target_label[0]])
class_weights = dict(enumerate(class_weights))
keras_model = KerasClassifier(build_fn = build_keras_model, verbose = 0, class_weight = class_weights)
clf = RandomizedSearchCV(keras_model, parameters, n_iter = 1, scoring = 'f1_micro',
n_jobs = 1, cv = 5, random_state = random_state)
clf.fit(training_features, training_targets.values[:, 0])
model = clf.best_estimator_
To pass class_weights in this scenario with KerasClassifier, the class_weights should be passed in the fit method and then will be forwarded to the keras model.
grid_result = clf.fit(training_features, training_targets.values[:, 0], class_weight=class_weights)
In older versions it was neccecary to pass them with the clf__ prefix:
grid_result = clf.fit(training_features, training_targets.values[:, 0], clf__class_weight=class_weights)
When using a KerasClassifier, to use class weights, even for GridSearch, use fit_params functionality to add multiple parameters as the build_fn calls the model function and does not accept arguments.
`
classifier = KerasClassifier(build_fn = build_classifier, epochs=20, batch_size = 128)
accuracies = cross_val_score(estimator=classifier, X = X_train, y = y_train, cv = 3,
n_jobs = -1, verbose=0,
fit_params = {'callbacks': [EarlyStopping()],
class_weight:class_weights})
`

KerasClassifier for use with build_fn which takes arguments

I am attempting to wrap my keras models in scikit learn GridSearchCV and Pipeline structures for hyperparameter tuning.
It works absolutely fine when the build_fn function takes 0 arguments for use in KerasClassifier. However it fails whenever I use a function which takes arguments.
Example code below
def prepare_classifier(x, y):
shape_of_input = x.shape
shape_of_target = y.shape
classifier = Sequential()
## number of neurons = 30
## kernel_initializer determines how the weights are initialized
## activation is the activation function at this particular hidden layer
## input_shape is the number of features in a single row.. in this case it is shape_of_input[1]
## shape_of_input[0] is the total number of such rows
classifier.add(Dense(units = 30, activation = 'relu', kernel_initializer = 'uniform', input_dim = shape_of_input[1]))
classifier.add(Dense(units = 30, activation = 'relu', kernel_initializer = 'uniform'))
## we are predicting 10 digits for each row of x.
## in total there are shape_of_input[0] rows in total
classifier.add(Dense(10, activation = 'softmax'))
## categorical_crossentropy is the loss function for multi output loss function
classifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
return classifier
def fit(classifier, x_train, y_train, epoch_size, batch_size = 10):
pipeline = Pipeline([
('keras_classifier', classifier)
])
param_grid = {
'keras_classifier__batch_size' : [10,20,30,50],
'keras_classifier__epochs' : [100, 200, 300],
'keras_classifier__x' : [x_train],
'keras_classifier__y' : [y_train],
}
grid = GridSearchCV(estimator = pipeline, param_grid = param_grid, n_jobs = -1)
grid.fit(x_train, y_train)
print("Best parameters are : ", grid.best_params_, '\n grid best score :', grid.best_score_)
classifier = KerasClassifier(build_fn = prepare_classifier, x = x_train[0:100], y = y_train )
fit(classifier, x_train[:100], y_train, epoch_size )
This is for some x, and some y data (p.s. I have used mnist data)
The error I get is :
RuntimeError: Cannot clone object , as the constructor either does not set or modifies parameter x
However if my prepare_classifier function takes no arguments code works absolutely fine.
What am I doing incorrectly?
solved it. essentially the below line was the issue
classifier = KerasClassifier(build_fn = prepare_classifier, x = x_train[0:100], y = y_train )
needed to be changed to
classifier = KerasClassifier(build_fn = prepare_classifier)
and the parameters for the prepare_classifier needs to be sent using param_grid

Resources