How visualize in Tensorboard a metric callback? - keras

I have a model in keras in which I use my custom metric as:
class MyMetrics(keras.callbacks.Callback):
def __init__(self):
initial_value = 0
def on_train_begin(self, logs={}):
...
def on_epoch_end(self, batch, logs={}):
here I calculate my important values
Now, there is a way to visualize them in Tensorboard?
For example if my metric was something like:
def mymetric(y_true,y_pred):
return myImportantValues
I could visualize them in Tensorboard through
mymodel.compile(..., metrics = mymetric)
Is there something similar with a metric callback?
I tried to create a function inside the class MyMetric and pass it to the mymodel.compile but it does not update the values.

You can create an event file with the custom metrics and visualize it in tensorboard directly.
This works for Tensorflow 2.0. In this example, the accuracy/metrics are logged from training history. In your case, you can do it from the on_epoch_end callback.
import datetime
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'logs/train/' + current_time
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
history = model.fit(x=X, y=y, epochs=100, verbose=1)
for epoch in range(len(history.history['accuracy'])):
with train_summary_writer.as_default():
tf.summary.scalar('loss', history.history['loss'][epoch], step=epoch)
tf.summary.scalar('accuracy', history.history['accuracy'][epoch], step=epoch)
After script execution,
tensorboard --logdir logs/train
https://www.tensorflow.org/tensorboard/r2/get_started#using_tensorboard_with_other_methods

You need to ceate a cutom callback first:
class CustomLogs(Callback):
def __init__(self, validation_data=()):
super(Callback, self).__init__()
self.X_val, self.y_val = validation_data
def on_train_begin(self, logs={}):
## on begin of training, we are creating a instance f1_scores
self.model.f1_scores = []
def on_epoch_end(self, epoch, logs={}):
# calculating micro_avg_f1_score
val_predict_proba = np.array(self.model.predict(self.X_val))
val_predict = np.round(val_predict_proba)
val_targ = self.y_val
#using scikit-learn f1_score
f1 = f1_score(val_targ, val_predict, average='micro')
#appending f1_scores for every epoch
self.model.f1_scores.append(f1)
print('micro_f1_score: ',f1)
#initilize your call back with validation data
customLogs = CustomLogs(validation_data=(X_test, Y_test))
#not change in commpile method
model.compile(optimizer='Adam',loss='CategoricalCrossentropy')
#pass customLogs and validation_data in fit method
model.fit(X_train,
Y_train,
batch_size=32,
validation_data=(X_test, Y_test),
callbacks=[customLogs],
epochs=20)
#After fit method accessing the f1_scores
f1_scores = model.f1_scores
# writing the summary in tensorboard
log_dir='/log'
writer=tf.summary.create_file_writer(log_dir)
for idx in range(len(f1_scores)):
with writer.as_default(step=idx+1):
tf.summary.scalar('f1_scores', f1_scores[idx])
writer.flush ()
Now launch : tensorboard --logdir /log.
You can see the plot of f1_scores in tesorboard scalers

Related

How to make custom validation_step in tensorflow 2 Tensorflow 2 / Keras?

I have a question regarding the validation Data.
I have this neural network and I divided my data into train_generator, val_generator, test_generator.
I made a custom model with a custom fit.
class MyModel(tf.keras.Model):
def __init__(self):
def __call__(.....)
def train_step(....)
then I have:
train_generator = DataGenerator(....)
val_generator = DataGenerator(....)
test_generator = DataGenerator(....)
then :
model = MyModel()
model.compile(optimizer=keras.optimizers.Adam(clipnorm=5.),
metrics=["accuracy"])
model.fit(train_generator, validation_data = val_generator, epochs=40)
ok and the program gives me no errors
But my question is : how can I know what happens with my validation_data ?
Is it processed the same way as the train_data ( train_generator ) in the train_step function ?
Or do I need to specify how to process the validation data ?
If it helps I will also live MyModel class
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel2, self).__init__()
self.dec2 = Decoder2()
def __call__(self, y_hat, **kwargs):
print(y_hat.shape)
z_hat = self.dec2(y_hat)
return z_hat
def train_step(self, dataset):
with tf.GradientTape() as tape:
y_hat = dataset[0]
z_true = dataset[1]
z_pred = self(y_hat, training=True)
#print("This is z_true : ", z_true.shape)
#print("This is z_pred : ", z_pred.shape)
loss = tf.reduce_mean(tf.abs(tf.cast(z_pred, tf.float64) - tf.cast(z_true, tf.float64)))
print("loss: ", loss)
global_loss.append(loss)
# Compute gradients. TRE SA FAC GRADIENT CLIPPING
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(z_true, z_pred)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
You have to add a test_step(self, data) function to your MyModel class as you can see it here: Providing your own evaluation step

How can I use the LBFGS optimizer with pytorch ignite?

I started using Ignite recently and i found it very interesting.
I would like to train a model using as an optimizer the LBFGS algorithm from the torch.optim module.
This is my code:
from ignite.engine import Events, Engine, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import RootMeanSquaredError, Loss
from ignite.handlers import EarlyStopping
D_in, H, D_out = 5, 10, 1
model = simpleNN(D_in, H, D_out) # a simple MLP with 1 Hidden Layer
model.double()
train_loader, val_loader = get_data_loaders(i)
optimizer = torch.optim.LBFGS(model.parameters(), lr=1)
loss_func = torch.nn.MSELoss()
#Ignite
trainer = create_supervised_trainer(model, optimizer, loss_func)
evaluator = create_supervised_evaluator(model, metrics={'RMSE': RootMeanSquaredError(),'LOSS': Loss(loss_func)})
#trainer.on(Events.ITERATION_COMPLETED)
def log_training_loss(engine):
print("Epoch[{}] Loss: {:.5f}".format(engine.state.epoch, len(train_loader), engine.state.output))
def score_function(engine):
val_loss = engine.state.metrics['RMSE']
print("VAL_LOSS: {:.5f}".format(val_loss))
return -val_loss
handler = EarlyStopping(patience=10, score_function=score_function, trainer=trainer)
evaluator.add_event_handler(Events.COMPLETED, handler)
trainer.run(train_loader, max_epochs=100)
And the error that raises is:
TypeError: step() missing 1 required positional argument: 'closure'
I know that is required to define a closure for the implementation of LBFGS, so my question is how can I do it using ignite? or is there another approach for doing this?
The way to do it is like this:
from ignite.engine import Engine
model = ...
optimizer = torch.optim.LBFGS(model.parameters(), lr=1)
criterion =
def update_fn(engine, batch):
model.train()
x, y = batch
# pass to device if needed as here: https://github.com/pytorch/ignite/blob/40d815930d7801b21acfecfa21cd2641a5a50249/ignite/engine/__init__.py#L45
def closure():
y_pred = model(x)
loss = criterion(y_pred, y)
optimizer.zero_grad()
loss.backward()
return loss
optimizer.step(closure)
trainer = Engine(update_fn)
# everything else is the same
Source
You need to encapsulate all evaluating step with zero_grad and returning step in
for batch in loader():
def closure():
...
return loss
optim.step(closure)
Pytorch docs for 'closure'

While using custom callback in Earlystopping callback not works

I am working on a project where I am using custom callback with earlystopping callback, in this my model training not stops even val_loss not improving much.
Here is my implmentation:
class CustomCallback(keras.callbacks.Callback):
def __init__(self, x, y):
self.x = x
self.y = y
def on_epoch_end(self, epoch, logs={}):
y_pred = self.model.predict(self.x)
error_rate = np.sum(self.y == y_pred)
print(f'Error number:: {error_rate}')
logs['error_rate'] = error_rate
early_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=2)
custom_callback = CustomCallback(X_data, y_data)
model.fit(train_data, y_train, epochs=100, batch_size=32, validation_data=(cv_data, y_cv), callbacks=[early_stop, custom_callback])
What is wrong in my implementation?
Why not use a custom metric instead of a callback?
def error_rate(y_true, y_pred):
rate = K.cast(K.equal(y_true, y_pred), K.floatx())
return keras.backend.sum(rate)
Are you passing label numbers or one hot tensors as y?? Usually it should be rounding first (there will be nothing equal if you don't)
def error_rate(y_true, y_pred):
y_pred = K.cast(K.greater(y_pred, 0.5), K.floatx())
ate = K.cast(K.equal(y_true, y_pred), K.floatx())
return keras.backend.sum(rate)
Use it as a metric:
model.compile(......, metrics=[error_rate, ...])
Try passing the min_delta argument in EarlyStopping with some value so that an absolute change less than min_delta will count as no improvement and it will stop training.

Use custom function with custom parameters in keras callback

I am training a model in keras and I want to plot graphs of results after each epoch. I know that keras callbacks provide "on_epoch_end" function that can be overloaded if one wants to do some computations after each epoch but my function takes some additional parameters which when given, crashes code by the meta class error. The detail is given below:
Here is how I am doing it right now, which is working fine:-
class NewCallback(Callback):
def on_epoch_end(self, epoch, logs={}): #working fine, printing epoch after each epoch
print("EPOCH IS: "+str(epoch))
epochs=5
batch_size = 16
model_saved=False
if model_saved:
vae.load_weights(args.weights)
else:
# train the autoencoder
vae.fit(x_train,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, None),
callbacks=[NewCallback()])
But I want my callback function like this:-
class NewCallback(Callback,models,data,batch_size):
def on_epoch_end(self, epoch, logs={}):
print("EPOCH IS: "+str(epoch))
x=models.predict(data)
plt.plot(x)
plt.savefig(epoch+".png")
If I call it like this in fit:
callbacks=[NewCallback(models, data, batch_size=batch_size)]
I get this error:
TypeError: metaclass conflict: the metaclass of a derived class must be a (non-strict) subclass of the metaclasses of all its bases
I am looking for a simpler solution to call my function or get this error of meta class resolved, any help will be much appreciated!
I think that what you would like to do is to define a class that descends from callback and takes models, data, etc... as constructor arguments. So:
class NewCallback(Callback):
""" NewCallback descends from Callback
"""
def __init__(self, models, data, batch_size):
""" Save params in constructor
"""
self.models = models
def on_epoch_end(self, epoch, logs={}):
x = self.models.predict(self.data)
In case you want to make predictions on the test data you can try this
class CustomCallback(keras.callbacks.Callback):
def __init__(self, model, x_test, y_test):
self.model = model
self.x_test = x_test
self.y_test = y_test
def on_epoch_end(self, epoch, logs={}):
y_pred = self.model.predict(self.x_test, self.y_test)
print('y predicted: ', y_pred)
You need mention the callback during model.fit
model.sequence()
# your model architecture
model.fit(x_train, y_train, epochs=10,
callbacks=[CustomCallback(model, x_test, y_test)])
Similar to on_epoch_end there are many other methods provided by keras
on_train_begin, on_train_end, on_epoch_begin, on_epoch_end, on_test_begin,
on_test_end, on_predict_begin, on_predict_end, on_train_batch_begin, on_train_batch_end,
on_test_batch_begin, on_test_batch_end, on_predict_batch_begin,on_predict_batch_end

Keras: how to output learning rate onto tensorboard

I added a callback to decay the learning rate:
keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=100,
verbose=0, mode='auto',epsilon=0.00002, cooldown=20, min_lr=0)
Here is my tensorboard callback:
keras.callbacks.TensorBoard(log_dir='./graph/rank{}'.format(hvd.rank()), histogram_freq=10, batch_size=FLAGS.batch_size,
write_graph=True, write_grads=True, write_images=False)
I want to make sure the learning rate scheduler has kicked in during training, so I want to output the learning rate onto tensorboard. But I can not find where I can set it.
I also checked the optimizer api, but no luck.
keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
How can I output the learning rate to tensorboad?
According to the author of Keras, the proper way is to subclass the TensorBoard callback:
from keras import backend as K
from keras.callbacks import TensorBoard
class LRTensorBoard(TensorBoard):
# add other arguments to __init__ if you need
def __init__(self, log_dir, **kwargs):
super().__init__(log_dir=log_dir, **kwargs)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs.update({'lr': K.eval(self.model.optimizer.lr)})
super().on_epoch_end(epoch, logs)
Then pass it as part of the callbacks argument to model.fit (credit Finncent Price):
model.fit(x=..., y=..., callbacks=[LRTensorBoard(log_dir="/tmp/tb_log")])
Note that with the current nightly version of tf (2.5 - probably earlier) learning rates using LearningRateSchedule are automatically added to tensorboard's logs. The following solution is only necessary if you're adapting the learning rate some other way - e.g. via ReduceLROnPlateau or LearningRateScheduler (different to LearningRateSchedule) callbacks.
While extending tf.keras.callbacks.TensorBoard is a viable option, I prefer composition over subclassing.
class LearningRateLogger(tf.keras.callbacks.Callback):
def __init__(self):
super().__init__()
self._supports_tf_logs = True
def on_epoch_end(self, epoch, logs=None):
if logs is None or "learning_rate" in logs:
return
logs["learning_rate"] = self.model.optimizer.lr
This allows us to compose multiple similar callbacks, and use the logged learning rate in multiple other callbacks (e.g. if you add a CSVLogger it should also write the learning rate values to file).
Then in model.fit
model.fit(
callbacks=[
LearningRateLogger(),
# other callbacks that update `logs`
tf.keras.callbacks.TensorBoard(path),
# other callbacks that use updated logs, e.g. CSVLogger
],
**kwargs
)
You gave the optimizer's code twice, instead of TensorBoard Callback. Anyway, I didn`t find the way to display the learning rate on TensorBoard.
I am plotting it after the training finished, taking data from History object:
nb_epoch = len(history1.history['loss'])
learning_rate=history1.history['lr']
xc=range(nb_epoch)
plt.figure(3,figsize=(7,5))
plt.plot(xc,learning_rate)
plt.xlabel('num of Epochs')
plt.ylabel('learning rate')
plt.title('Learning rate')
plt.grid(True)
plt.style.use(['seaborn-ticks'])
The chart looks like this:
LR plot
Sorry, that is not exactly what you are asking about, but perhaps could help.
class XTensorBoard(TensorBoard):
def on_epoch_begin(self, epoch, logs=None):
# get values
lr = float(K.get_value(self.model.optimizer.lr))
decay = float(K.get_value(self.model.optimizer.decay))
# computer lr
lr = lr * (1. / (1 + decay * epoch))
K.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
super().on_epoch_end(epoch, logs)
callbacks_list = [XTensorBoard('./logs')]
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=20, batch_size=32, verbose=2, callbacks=callbacks_list)
lr curve in tensorboard
For tensorflor 2.5 if you have some custom Learning Rate Scheduler:
class LearningRateLogger(tf.keras.callbacks.Callback):
def __init__(self, log_dir):
super().__init__()
self._supports_tf_logs = True
self.log_dir = log_dir
def set_model(self, model):
self.model = model
self.sess = tf.compat.v1.keras.backend.get_session()
self.writer = tf.summary.create_file_writer(self.log_dir)
def on_epoch_end(self, epoch, logs=None):
if logs is None or "learning_rate" in logs:
return
logs["learning_rate"] = self.model.optimizer.lr
logs.update({'learning_rate': self.model.optimizer.lr})
self._write_logs(logs, epoch)
def _write_logs(self, logs, index):
with self.writer.as_default():
for name, value in logs.items():
if name in ['batch', 'size']:
continue
if isinstance(value, np.ndarray):
tf.summary.scalar(name, value.item(), step=index)
else:
tf.summary.scalar(name, value, step=index)
self.writer.flush()
Then for calling the callback in your model.fit:
model.fit(x=..., y=..., callbacks=[LearningRateLogger(log_dir="/path/to/folder/where/tensorboard/is/logging")])

Resources