Keras: how to output learning rate onto tensorboard - keras

I added a callback to decay the learning rate:
keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=100,
verbose=0, mode='auto',epsilon=0.00002, cooldown=20, min_lr=0)
Here is my tensorboard callback:
keras.callbacks.TensorBoard(log_dir='./graph/rank{}'.format(hvd.rank()), histogram_freq=10, batch_size=FLAGS.batch_size,
write_graph=True, write_grads=True, write_images=False)
I want to make sure the learning rate scheduler has kicked in during training, so I want to output the learning rate onto tensorboard. But I can not find where I can set it.
I also checked the optimizer api, but no luck.
keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
How can I output the learning rate to tensorboad?

According to the author of Keras, the proper way is to subclass the TensorBoard callback:
from keras import backend as K
from keras.callbacks import TensorBoard
class LRTensorBoard(TensorBoard):
# add other arguments to __init__ if you need
def __init__(self, log_dir, **kwargs):
super().__init__(log_dir=log_dir, **kwargs)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs.update({'lr': K.eval(self.model.optimizer.lr)})
super().on_epoch_end(epoch, logs)
Then pass it as part of the callbacks argument to model.fit (credit Finncent Price):
model.fit(x=..., y=..., callbacks=[LRTensorBoard(log_dir="/tmp/tb_log")])

Note that with the current nightly version of tf (2.5 - probably earlier) learning rates using LearningRateSchedule are automatically added to tensorboard's logs. The following solution is only necessary if you're adapting the learning rate some other way - e.g. via ReduceLROnPlateau or LearningRateScheduler (different to LearningRateSchedule) callbacks.
While extending tf.keras.callbacks.TensorBoard is a viable option, I prefer composition over subclassing.
class LearningRateLogger(tf.keras.callbacks.Callback):
def __init__(self):
super().__init__()
self._supports_tf_logs = True
def on_epoch_end(self, epoch, logs=None):
if logs is None or "learning_rate" in logs:
return
logs["learning_rate"] = self.model.optimizer.lr
This allows us to compose multiple similar callbacks, and use the logged learning rate in multiple other callbacks (e.g. if you add a CSVLogger it should also write the learning rate values to file).
Then in model.fit
model.fit(
callbacks=[
LearningRateLogger(),
# other callbacks that update `logs`
tf.keras.callbacks.TensorBoard(path),
# other callbacks that use updated logs, e.g. CSVLogger
],
**kwargs
)

You gave the optimizer's code twice, instead of TensorBoard Callback. Anyway, I didn`t find the way to display the learning rate on TensorBoard.
I am plotting it after the training finished, taking data from History object:
nb_epoch = len(history1.history['loss'])
learning_rate=history1.history['lr']
xc=range(nb_epoch)
plt.figure(3,figsize=(7,5))
plt.plot(xc,learning_rate)
plt.xlabel('num of Epochs')
plt.ylabel('learning rate')
plt.title('Learning rate')
plt.grid(True)
plt.style.use(['seaborn-ticks'])
The chart looks like this:
LR plot
Sorry, that is not exactly what you are asking about, but perhaps could help.

class XTensorBoard(TensorBoard):
def on_epoch_begin(self, epoch, logs=None):
# get values
lr = float(K.get_value(self.model.optimizer.lr))
decay = float(K.get_value(self.model.optimizer.decay))
# computer lr
lr = lr * (1. / (1 + decay * epoch))
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
super().on_epoch_end(epoch, logs)
callbacks_list = [XTensorBoard('./logs')]
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=20, batch_size=32, verbose=2, callbacks=callbacks_list)
lr curve in tensorboard

For tensorflor 2.5 if you have some custom Learning Rate Scheduler:
class LearningRateLogger(tf.keras.callbacks.Callback):
def __init__(self, log_dir):
super().__init__()
self._supports_tf_logs = True
self.log_dir = log_dir
def set_model(self, model):
self.model = model
self.sess = tf.compat.v1.keras.backend.get_session()
self.writer = tf.summary.create_file_writer(self.log_dir)
def on_epoch_end(self, epoch, logs=None):
if logs is None or "learning_rate" in logs:
return
logs["learning_rate"] = self.model.optimizer.lr
logs.update({'learning_rate': self.model.optimizer.lr})
self._write_logs(logs, epoch)
def _write_logs(self, logs, index):
with self.writer.as_default():
for name, value in logs.items():
if name in ['batch', 'size']:
continue
if isinstance(value, np.ndarray):
tf.summary.scalar(name, value.item(), step=index)
else:
tf.summary.scalar(name, value, step=index)
self.writer.flush()
Then for calling the callback in your model.fit:
model.fit(x=..., y=..., callbacks=[LearningRateLogger(log_dir="/path/to/folder/where/tensorboard/is/logging")])

Related

How to get learning rate of AdamW optimizer (using multioptimizer)

I am using AdamW optimizer with two different learning rates: One for pre-trained layer and the other for custom layer
import tensorflow_addons as tfa
lr = 1e-3
wd = 1e-4 * lr
optimizers = [
tfa.optimizers.AdamW(learning_rate=pre_trained_layer_lr , weight_decay=wd),
tfa.optimizers.AdamW(learning_rate=lr, weight_decay=wd)
]
optimizers_and_layers = [(optimizers[0], base_model.layers[0]), (optimizers[1],
base_model.layers[1:])]
optimizer = tfa.optimizers.MultiOptimizer(optimizers_and_layers)
Now I want to visualize this learning rate during model training.
Below is the code that I am using
from keras import backend as K
from keras.callbacks import TensorBoard
class LRTensorBoard(TensorBoard):
# add other arguments to __init__ if you need
def __init__(self, log_dir, **kwargs):
super().__init__(log_dir=log_dir, **kwargs)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs.update({'lr': K.eval(self.model.optimizer.lr)})
super().on_epoch_end(epoch, logs)
#Using the code class in model.fit
model.fit(...., callbacks = [LRTensorBoard(path)])
But I didn't find model.optimizer.lr as this variable is not present in optimizer mentioned above.
I found some information related to the optimizer using
model.optimizer.optimizer_specs[0]
But I am not able to find different learning rates associated with this optimizer.
How to get the learning rate for pre-trained layer and custom layer using AdamW optimizer?
model.optimizer.optimizer_specs is a list of dictionaries containing infos for each of your optmizers. You can access your first optimizer object by model.optimizer.optimizer_specs[0]['optimizer']. This way, you can also access the learning rate by model.optimizer.optimizer_specs[0]['optimizer'].lr.

How to access to the number of steps by epoch in a Keras Lambda Callback

I would like to know how to access the number of batch per epoch from inside a Keras lambda callback, that is, the value passed to the parameter steps_per_epoch of the model.fit function.
Below is my custom callback:
(I want to fill the ??????? in batch_per_epoch = ???????)
class MyBatchLogger(keras.callbacks.Callback):
def __init__(self):
super().__init__()
self._current_epoch = 0
def on_epoch_begin(self, epoch, logs=None):
self._current_epoch = epoch
def on_epoch_end(self, epoch, logs=None):
print("Epoch end", logs)
def on_batch_end(self, batch, logs={}):
batch_per_epoch = ???????
acc = logs["acc"].item()
loss = logs["loss"].item()
mae = logs["mean_absolute_error"].item()
ca = logs["categorical_accuracy"].item()
print(json.dumps({
"timestamp": datetime.now().isoformat(),
"epoch": self._current_epoch,
"batch": batch,
"batchPerEpoch": batch_per_epoch,
"accuracy": acc,
"meanAbsoluteError": mae,
"categoricalAccuracy": ca,
"loss": loss
}))
I'm using Keras 2.2.5 with Tensorflow 1.14.1 but I'm OK to update if necessary.
the answer might come a bit late but I've spent a bit of time digging for it so maybe that's helpful anyway.
The information you need is here
self.params.get('steps')

How visualize in Tensorboard a metric callback?

I have a model in keras in which I use my custom metric as:
class MyMetrics(keras.callbacks.Callback):
def __init__(self):
initial_value = 0
def on_train_begin(self, logs={}):
...
def on_epoch_end(self, batch, logs={}):
here I calculate my important values
Now, there is a way to visualize them in Tensorboard?
For example if my metric was something like:
def mymetric(y_true,y_pred):
return myImportantValues
I could visualize them in Tensorboard through
mymodel.compile(..., metrics = mymetric)
Is there something similar with a metric callback?
I tried to create a function inside the class MyMetric and pass it to the mymodel.compile but it does not update the values.
You can create an event file with the custom metrics and visualize it in tensorboard directly.
This works for Tensorflow 2.0. In this example, the accuracy/metrics are logged from training history. In your case, you can do it from the on_epoch_end callback.
import datetime
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'logs/train/' + current_time
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
history = model.fit(x=X, y=y, epochs=100, verbose=1)
for epoch in range(len(history.history['accuracy'])):
with train_summary_writer.as_default():
tf.summary.scalar('loss', history.history['loss'][epoch], step=epoch)
tf.summary.scalar('accuracy', history.history['accuracy'][epoch], step=epoch)
After script execution,
tensorboard --logdir logs/train
https://www.tensorflow.org/tensorboard/r2/get_started#using_tensorboard_with_other_methods
You need to ceate a cutom callback first:
class CustomLogs(Callback):
def __init__(self, validation_data=()):
super(Callback, self).__init__()
self.X_val, self.y_val = validation_data
def on_train_begin(self, logs={}):
## on begin of training, we are creating a instance f1_scores
self.model.f1_scores = []
def on_epoch_end(self, epoch, logs={}):
# calculating micro_avg_f1_score
val_predict_proba = np.array(self.model.predict(self.X_val))
val_predict = np.round(val_predict_proba)
val_targ = self.y_val
#using scikit-learn f1_score
f1 = f1_score(val_targ, val_predict, average='micro')
#appending f1_scores for every epoch
self.model.f1_scores.append(f1)
print('micro_f1_score: ',f1)
#initilize your call back with validation data
customLogs = CustomLogs(validation_data=(X_test, Y_test))
#not change in commpile method
model.compile(optimizer='Adam',loss='CategoricalCrossentropy')
#pass customLogs and validation_data in fit method
model.fit(X_train,
Y_train,
batch_size=32,
validation_data=(X_test, Y_test),
callbacks=[customLogs],
epochs=20)
#After fit method accessing the f1_scores
f1_scores = model.f1_scores
# writing the summary in tensorboard
log_dir='/log'
writer=tf.summary.create_file_writer(log_dir)
for idx in range(len(f1_scores)):
with writer.as_default(step=idx+1):
tf.summary.scalar('f1_scores', f1_scores[idx])
writer.flush ()
Now launch : tensorboard --logdir /log.
You can see the plot of f1_scores in tesorboard scalers

Use custom function with custom parameters in keras callback

I am training a model in keras and I want to plot graphs of results after each epoch. I know that keras callbacks provide "on_epoch_end" function that can be overloaded if one wants to do some computations after each epoch but my function takes some additional parameters which when given, crashes code by the meta class error. The detail is given below:
Here is how I am doing it right now, which is working fine:-
class NewCallback(Callback):
def on_epoch_end(self, epoch, logs={}): #working fine, printing epoch after each epoch
print("EPOCH IS: "+str(epoch))
epochs=5
batch_size = 16
model_saved=False
if model_saved:
vae.load_weights(args.weights)
else:
# train the autoencoder
vae.fit(x_train,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, None),
callbacks=[NewCallback()])
But I want my callback function like this:-
class NewCallback(Callback,models,data,batch_size):
def on_epoch_end(self, epoch, logs={}):
print("EPOCH IS: "+str(epoch))
x=models.predict(data)
plt.plot(x)
plt.savefig(epoch+".png")
If I call it like this in fit:
callbacks=[NewCallback(models, data, batch_size=batch_size)]
I get this error:
TypeError: metaclass conflict: the metaclass of a derived class must be a (non-strict) subclass of the metaclasses of all its bases
I am looking for a simpler solution to call my function or get this error of meta class resolved, any help will be much appreciated!
I think that what you would like to do is to define a class that descends from callback and takes models, data, etc... as constructor arguments. So:
class NewCallback(Callback):
""" NewCallback descends from Callback
"""
def __init__(self, models, data, batch_size):
""" Save params in constructor
"""
self.models = models
def on_epoch_end(self, epoch, logs={}):
x = self.models.predict(self.data)
In case you want to make predictions on the test data you can try this
class CustomCallback(keras.callbacks.Callback):
def __init__(self, model, x_test, y_test):
self.model = model
self.x_test = x_test
self.y_test = y_test
def on_epoch_end(self, epoch, logs={}):
y_pred = self.model.predict(self.x_test, self.y_test)
print('y predicted: ', y_pred)
You need mention the callback during model.fit
model.sequence()
# your model architecture
model.fit(x_train, y_train, epochs=10,
callbacks=[CustomCallback(model, x_test, y_test)])
Similar to on_epoch_end there are many other methods provided by keras
on_train_begin, on_train_end, on_epoch_begin, on_epoch_end, on_test_begin,
on_test_end, on_predict_begin, on_predict_end, on_train_batch_begin, on_train_batch_end,
on_test_batch_begin, on_test_batch_end, on_predict_batch_begin,on_predict_batch_end

Keras record loss and accuracy of train and test for each batch

I am using Keras to train a cnn and I need to record accuracy and loss for each batch. Is there any way to save the statistics? Following is the code I am using but the accuracy is none. Also it looks like the callback is suppressing progress bar.
class Histories(keras.callbacks.Callback):
def __init__(self, test_data):
self.test_data = test_data
def on_train_begin(self, logs={}):
self.train_acc = []
self.test_acc = []
self.train_loss = []
self.test_loss = []
def on_batch_end(self, batch, logs={}):
train_loss_batch = logs.get('loss')
train_acc_batch = logs.get('accuracy')
self.train_loss.append(train_loss_batch)
self.train_acc.append(train_acc_batch)
print('\nTrain loss: {}, acc: {}\n'.format(train_loss_batch, train_acc_batch))
x, y = self.test_data
test_loss_batch, test_acc_batch = self.model.evaluate(x, y, verbose=0)
self.test_loss.append(test_loss_batch)
self.test_acc.append(test_acc_batch)
print('\nTesting loss: {}, acc: {}\n'.format(test_loss_batch, test_acc_batch))
To use the callback:
histories = my_callbacks.Histories((x_test, y_test))
model.fit(x_train_reduced, y_train_reduced, batch_size, epochs, verbose=1, callbacks=[histories])
I have the same problem. I need each time after calculating the gradient on the batch, counting the loss for the validation set and for the set set.
In the Keras API there are remarkable parameters:
steps_per_epoch, validation_steps
They set the number of examples for the era and validation, respectively. So, I wanted to set the size of the epoch in 20 examples, thereby artificially equating it to the size of batch_size. After that I create a callback, which is processed every time after the batch processing is completed:
class LossHistory(Callback):
def __init__(self):
super(Callback, self).__init__()
self.losses = []
self.val_losses = []
def on_train_begin(self, logs=None):
self.losses = []
self.val_losses = []
def on_batch_end(self, batch, logs=None):
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
About this bug I wrote here. So far I'm waiting for an answer. But I have a problem that still requires a solution.
Due to the fact that there is no simple answer to this question, it is necessary to crut. For this, you can refer to members of the class Callback. Here lies the validation set, on which you can test. This is done in this way:
class LossHistory(Callback):
def __init__(self):
super(Callback, self).__init__()
self.losses = []
self.val_losses = []
def on_train_begin(self, logs=None):
self.losses = []
self.val_losses = []
def on_batch_end(self, batch, logs=None):
self.losses.append(logs.get('loss'))
self.val_losses.append(self.model.evaluate(self.validation_data[0], self.validation_data[1]))
P.s. logs.get (val_loss) is considered after each era. In this regard, in it, at the first batch of the first era will lie None.

Resources