Use custom function with custom parameters in keras callback - keras

I am training a model in keras and I want to plot graphs of results after each epoch. I know that keras callbacks provide "on_epoch_end" function that can be overloaded if one wants to do some computations after each epoch but my function takes some additional parameters which when given, crashes code by the meta class error. The detail is given below:
Here is how I am doing it right now, which is working fine:-
class NewCallback(Callback):
def on_epoch_end(self, epoch, logs={}): #working fine, printing epoch after each epoch
print("EPOCH IS: "+str(epoch))
epochs=5
batch_size = 16
model_saved=False
if model_saved:
vae.load_weights(args.weights)
else:
# train the autoencoder
vae.fit(x_train,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, None),
callbacks=[NewCallback()])
But I want my callback function like this:-
class NewCallback(Callback,models,data,batch_size):
def on_epoch_end(self, epoch, logs={}):
print("EPOCH IS: "+str(epoch))
x=models.predict(data)
plt.plot(x)
plt.savefig(epoch+".png")
If I call it like this in fit:
callbacks=[NewCallback(models, data, batch_size=batch_size)]
I get this error:
TypeError: metaclass conflict: the metaclass of a derived class must be a (non-strict) subclass of the metaclasses of all its bases
I am looking for a simpler solution to call my function or get this error of meta class resolved, any help will be much appreciated!

I think that what you would like to do is to define a class that descends from callback and takes models, data, etc... as constructor arguments. So:
class NewCallback(Callback):
""" NewCallback descends from Callback
"""
def __init__(self, models, data, batch_size):
""" Save params in constructor
"""
self.models = models
def on_epoch_end(self, epoch, logs={}):
x = self.models.predict(self.data)

In case you want to make predictions on the test data you can try this
class CustomCallback(keras.callbacks.Callback):
def __init__(self, model, x_test, y_test):
self.model = model
self.x_test = x_test
self.y_test = y_test
def on_epoch_end(self, epoch, logs={}):
y_pred = self.model.predict(self.x_test, self.y_test)
print('y predicted: ', y_pred)
You need mention the callback during model.fit
model.sequence()
# your model architecture
model.fit(x_train, y_train, epochs=10,
callbacks=[CustomCallback(model, x_test, y_test)])
Similar to on_epoch_end there are many other methods provided by keras
on_train_begin, on_train_end, on_epoch_begin, on_epoch_end, on_test_begin,
on_test_end, on_predict_begin, on_predict_end, on_train_batch_begin, on_train_batch_end,
on_test_batch_begin, on_test_batch_end, on_predict_batch_begin,on_predict_batch_end

Related

Gettting the training data in ecery batch keras

I would like to know if it is possible to get the set of training data used in every batch keras.
It's easy to get y_true and y_pred, but I want to know the set of trainig data used to predict in that batch.
def my_loss(y_true, y_pred):
loss=K.mean(K.abs(y_true-y_pred))
return loss
model.compile(loss=my_loss, optimizer='rmsprop', metrics=['mae'])
This is OK
but I want something like this:
def my_loss(y_true, y_pred, x_train):
my_loss() missing 1 required positional argument: 'x_train'
Thanks for any help
If you want to pass parameters other then y_true and y_pred, You can define your custom loss like this:
def custom_loss(x_train):
def my_loss(y_true, y_pred):
loss=K.mean(K.abs(y_true-y_pred))
# do something with x_train
return loss
return my_loss
While compiling you can pass tensor of shape same as your x_train.
input_tensor = Input(shape=input_shape) #specify your input shape, same as x_train.
model.compile(loss=custom_loss(input_tensor), optimizer='rmsprop', metrics=['mae'])
This is how you can define your custom loss. Further you want get current batch of x_train, Now batching is something that you'll have to handle yourself.
Finally while training you can use model.train_on_batch.

How visualize in Tensorboard a metric callback?

I have a model in keras in which I use my custom metric as:
class MyMetrics(keras.callbacks.Callback):
def __init__(self):
initial_value = 0
def on_train_begin(self, logs={}):
...
def on_epoch_end(self, batch, logs={}):
here I calculate my important values
Now, there is a way to visualize them in Tensorboard?
For example if my metric was something like:
def mymetric(y_true,y_pred):
return myImportantValues
I could visualize them in Tensorboard through
mymodel.compile(..., metrics = mymetric)
Is there something similar with a metric callback?
I tried to create a function inside the class MyMetric and pass it to the mymodel.compile but it does not update the values.
You can create an event file with the custom metrics and visualize it in tensorboard directly.
This works for Tensorflow 2.0. In this example, the accuracy/metrics are logged from training history. In your case, you can do it from the on_epoch_end callback.
import datetime
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'logs/train/' + current_time
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
history = model.fit(x=X, y=y, epochs=100, verbose=1)
for epoch in range(len(history.history['accuracy'])):
with train_summary_writer.as_default():
tf.summary.scalar('loss', history.history['loss'][epoch], step=epoch)
tf.summary.scalar('accuracy', history.history['accuracy'][epoch], step=epoch)
After script execution,
tensorboard --logdir logs/train
https://www.tensorflow.org/tensorboard/r2/get_started#using_tensorboard_with_other_methods
You need to ceate a cutom callback first:
class CustomLogs(Callback):
def __init__(self, validation_data=()):
super(Callback, self).__init__()
self.X_val, self.y_val = validation_data
def on_train_begin(self, logs={}):
## on begin of training, we are creating a instance f1_scores
self.model.f1_scores = []
def on_epoch_end(self, epoch, logs={}):
# calculating micro_avg_f1_score
val_predict_proba = np.array(self.model.predict(self.X_val))
val_predict = np.round(val_predict_proba)
val_targ = self.y_val
#using scikit-learn f1_score
f1 = f1_score(val_targ, val_predict, average='micro')
#appending f1_scores for every epoch
self.model.f1_scores.append(f1)
print('micro_f1_score: ',f1)
#initilize your call back with validation data
customLogs = CustomLogs(validation_data=(X_test, Y_test))
#not change in commpile method
model.compile(optimizer='Adam',loss='CategoricalCrossentropy')
#pass customLogs and validation_data in fit method
model.fit(X_train,
Y_train,
batch_size=32,
validation_data=(X_test, Y_test),
callbacks=[customLogs],
epochs=20)
#After fit method accessing the f1_scores
f1_scores = model.f1_scores
# writing the summary in tensorboard
log_dir='/log'
writer=tf.summary.create_file_writer(log_dir)
for idx in range(len(f1_scores)):
with writer.as_default(step=idx+1):
tf.summary.scalar('f1_scores', f1_scores[idx])
writer.flush ()
Now launch : tensorboard --logdir /log.
You can see the plot of f1_scores in tesorboard scalers

While using custom callback in Earlystopping callback not works

I am working on a project where I am using custom callback with earlystopping callback, in this my model training not stops even val_loss not improving much.
Here is my implmentation:
class CustomCallback(keras.callbacks.Callback):
def __init__(self, x, y):
self.x = x
self.y = y
def on_epoch_end(self, epoch, logs={}):
y_pred = self.model.predict(self.x)
error_rate = np.sum(self.y == y_pred)
print(f'Error number:: {error_rate}')
logs['error_rate'] = error_rate
early_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=2)
custom_callback = CustomCallback(X_data, y_data)
model.fit(train_data, y_train, epochs=100, batch_size=32, validation_data=(cv_data, y_cv), callbacks=[early_stop, custom_callback])
What is wrong in my implementation?
Why not use a custom metric instead of a callback?
def error_rate(y_true, y_pred):
rate = K.cast(K.equal(y_true, y_pred), K.floatx())
return keras.backend.sum(rate)
Are you passing label numbers or one hot tensors as y?? Usually it should be rounding first (there will be nothing equal if you don't)
def error_rate(y_true, y_pred):
y_pred = K.cast(K.greater(y_pred, 0.5), K.floatx())
ate = K.cast(K.equal(y_true, y_pred), K.floatx())
return keras.backend.sum(rate)
Use it as a metric:
model.compile(......, metrics=[error_rate, ...])
Try passing the min_delta argument in EarlyStopping with some value so that an absolute change less than min_delta will count as no improvement and it will stop training.

How to add BatchNormalization loss to gradient calculation in tensorflow 2.0 using keras subclass API

Using the keras subclass API it is easy enough to add a a batch normalization layer however the layer.losses list always appears empty. What is the correct method of including in the train loss when doing tape.gradient(loss, lossmodel.trainable_variables) where lossmodel is some separate keras subclass model defining a more complicated loss function that must include the gradient losses?
For example, this is minimal model with ONLY the batch norm layer. It has no loss AFAIK
class M(tf.keras.Model):
def __init__(self, axis):
super().__init__()
self.layer = tf.keras.layers.BatchNormalization(axis=axis, scale=False, center=True, virtual_batch_size=1, input_shape=(6,))
def call(self, x):
out = self.layer(x)
return out
m = M(1)
In [77]: m.layer.losses
Out[77]: []

Keras: how to output learning rate onto tensorboard

I added a callback to decay the learning rate:
keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=100,
verbose=0, mode='auto',epsilon=0.00002, cooldown=20, min_lr=0)
Here is my tensorboard callback:
keras.callbacks.TensorBoard(log_dir='./graph/rank{}'.format(hvd.rank()), histogram_freq=10, batch_size=FLAGS.batch_size,
write_graph=True, write_grads=True, write_images=False)
I want to make sure the learning rate scheduler has kicked in during training, so I want to output the learning rate onto tensorboard. But I can not find where I can set it.
I also checked the optimizer api, but no luck.
keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
How can I output the learning rate to tensorboad?
According to the author of Keras, the proper way is to subclass the TensorBoard callback:
from keras import backend as K
from keras.callbacks import TensorBoard
class LRTensorBoard(TensorBoard):
# add other arguments to __init__ if you need
def __init__(self, log_dir, **kwargs):
super().__init__(log_dir=log_dir, **kwargs)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs.update({'lr': K.eval(self.model.optimizer.lr)})
super().on_epoch_end(epoch, logs)
Then pass it as part of the callbacks argument to model.fit (credit Finncent Price):
model.fit(x=..., y=..., callbacks=[LRTensorBoard(log_dir="/tmp/tb_log")])
Note that with the current nightly version of tf (2.5 - probably earlier) learning rates using LearningRateSchedule are automatically added to tensorboard's logs. The following solution is only necessary if you're adapting the learning rate some other way - e.g. via ReduceLROnPlateau or LearningRateScheduler (different to LearningRateSchedule) callbacks.
While extending tf.keras.callbacks.TensorBoard is a viable option, I prefer composition over subclassing.
class LearningRateLogger(tf.keras.callbacks.Callback):
def __init__(self):
super().__init__()
self._supports_tf_logs = True
def on_epoch_end(self, epoch, logs=None):
if logs is None or "learning_rate" in logs:
return
logs["learning_rate"] = self.model.optimizer.lr
This allows us to compose multiple similar callbacks, and use the logged learning rate in multiple other callbacks (e.g. if you add a CSVLogger it should also write the learning rate values to file).
Then in model.fit
model.fit(
callbacks=[
LearningRateLogger(),
# other callbacks that update `logs`
tf.keras.callbacks.TensorBoard(path),
# other callbacks that use updated logs, e.g. CSVLogger
],
**kwargs
)
You gave the optimizer's code twice, instead of TensorBoard Callback. Anyway, I didn`t find the way to display the learning rate on TensorBoard.
I am plotting it after the training finished, taking data from History object:
nb_epoch = len(history1.history['loss'])
learning_rate=history1.history['lr']
xc=range(nb_epoch)
plt.figure(3,figsize=(7,5))
plt.plot(xc,learning_rate)
plt.xlabel('num of Epochs')
plt.ylabel('learning rate')
plt.title('Learning rate')
plt.grid(True)
plt.style.use(['seaborn-ticks'])
The chart looks like this:
LR plot
Sorry, that is not exactly what you are asking about, but perhaps could help.
class XTensorBoard(TensorBoard):
def on_epoch_begin(self, epoch, logs=None):
# get values
lr = float(K.get_value(self.model.optimizer.lr))
decay = float(K.get_value(self.model.optimizer.decay))
# computer lr
lr = lr * (1. / (1 + decay * epoch))
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
super().on_epoch_end(epoch, logs)
callbacks_list = [XTensorBoard('./logs')]
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=20, batch_size=32, verbose=2, callbacks=callbacks_list)
lr curve in tensorboard
For tensorflor 2.5 if you have some custom Learning Rate Scheduler:
class LearningRateLogger(tf.keras.callbacks.Callback):
def __init__(self, log_dir):
super().__init__()
self._supports_tf_logs = True
self.log_dir = log_dir
def set_model(self, model):
self.model = model
self.sess = tf.compat.v1.keras.backend.get_session()
self.writer = tf.summary.create_file_writer(self.log_dir)
def on_epoch_end(self, epoch, logs=None):
if logs is None or "learning_rate" in logs:
return
logs["learning_rate"] = self.model.optimizer.lr
logs.update({'learning_rate': self.model.optimizer.lr})
self._write_logs(logs, epoch)
def _write_logs(self, logs, index):
with self.writer.as_default():
for name, value in logs.items():
if name in ['batch', 'size']:
continue
if isinstance(value, np.ndarray):
tf.summary.scalar(name, value.item(), step=index)
else:
tf.summary.scalar(name, value, step=index)
self.writer.flush()
Then for calling the callback in your model.fit:
model.fit(x=..., y=..., callbacks=[LearningRateLogger(log_dir="/path/to/folder/where/tensorboard/is/logging")])

Resources