How to get logit matrix from a customized CNN? - conv-neural-network

This is my model
engine1 = tf.keras.applications.Xception(
# Freezing the weights of the top layer in the InceptionResNetV2 pre-traiined model
include_top = False,
# Use Imagenet weights
weights = 'imagenet',
# Define input shape to 224x224x3
input_shape = (256, 256 , 3)
)
x1 = tf.keras.layers.GlobalAveragePooling2D(name = 'avg_pool')(engine1.output)
x1 =tf.keras.layers.Dropout(0.75)(x1)
x1 = tf.keras.layers.BatchNormalization(
axis=-1,
momentum=0.99,
epsilon=0.01,
center=True,
scale=True,
beta_initializer="zeros",
gamma_initializer="ones",
moving_mean_initializer="zeros",
moving_variance_initializer="ones",
)(x1)
out1 = tf.keras.layers.Dense(3, activation = 'softmax', name = 'dense_output')(x1)
# Build the Keras model
model1 = tf.keras.models.Model(inputs = engine1.input, outputs = out1)
# Compile the model
model1.compile(
# Set optimizer to Adam(0.0001)
optimizer = tf.keras.optimizers.Adam(learning_rate= 3e-4),
#optimizer= SGD(lr=0.001, decay=1e-6, momentum=0.99, nesterov=True),
# Set loss to binary crossentropy
#loss = tf.keras.losses.SparseCategoricalCrossentropy(),
loss = 'categorical_crossentropy',
# Set metrics to accuracy
metrics = ['accuracy']
)
I want logits so I wrote this
logits = model1(X_test)
probs = tf.nn.softmax(logits)
Getting error as
ResourceExhaustedError: OOM when allocating tensor with shape[1288,64,125,125] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc [Op:Conv2D]
How to fix this and get the logits? I want to apply the distillation method after getting the logits. My test set consists of 3 classes and 60 samples.
so logit matrix should be a matrix of 60 * 3.
Edit
To get the logits(1288 * 3) I made a change in the output layer of my model
out1 = tf.keras.layers.Dense(3, activation = 'linear', name = 'dense_output')(x1)
Now I am getting logits,
y_pred_logits = model1.predict(X_test)
I want to apply softmax on this, My softmax function looks like this,
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x)
return e_x / e_x.sum(axis=1)
But when I am doing this
y_pred_logits_activated = softmax(y_pred_logits)
Getting errors as
How to fix this and is this method correct? Further, I want to apply this on logits

Related

Calculate gradient of validation error w.r.t inputs using Keras/Tensorflow or autograd

I need to calculate the gradient of the validation error w.r.t inputs x. I'm trying to see how much the validation error changes when I perturb one of the training samples.
The validation error (E) explicitly depends on the model weights (W).
The model weights explicitly depend on the inputs (x and y).
Therefore, the validation error implicitly depends on the inputs.
I'm trying to calculate the gradient of E w.r.t x directly.
An alternative approach would be to calculate the gradient of E w.r.t W (can easily be calculated) and the gradient of W w.r.t x (cannot do at the moment), which would allow the gradient of E w.r.t x to be calculated.
I have attached a toy example. Thanks in advance!
import numpy as np
import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.utils import to_categorical
import tensorflow as tf
from autograd import grad
train_images = mnist.train_images()
train_labels = mnist.train_labels()
test_images = mnist.test_images()
test_labels = mnist.test_labels()
# Normalize the images.
train_images = (train_images / 255) - 0.5
test_images = (test_images / 255) - 0.5
# Flatten the images.
train_images = train_images.reshape((-1, 784))
test_images = test_images.reshape((-1, 784))
# Build the model.
model = Sequential([
Dense(64, activation='relu', input_shape=(784,)),
Dense(64, activation='relu'),
Dense(10, activation='softmax'),
])
# Compile the model.
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'],
)
# Train the model.
model.fit(
train_images,
to_categorical(train_labels),
epochs=5,
batch_size=32,
)
model.save_weights('model.h5')
# Load the model's saved weights.
# model.load_weights('model.h5')
calculate_mse = tf.keras.losses.MeanSquaredError()
test_x = test_images[:5]
test_y = to_categorical(test_labels)[:5]
train_x = train_images[:1]
train_y = to_categorical(train_labels)[:1]
train_y = tf.convert_to_tensor(train_y, np.float32)
train_x = tf.convert_to_tensor(train_x, np.float64)
with tf.GradientTape() as tape:
tape.watch(train_x)
model.fit(train_x, train_y, epochs=1, verbose=0)
valid_y_hat = model(test_x, training=False)
mse = calculate_mse(test_y, valid_y_hat)
de_dx = tape.gradient(mse, train_x)
print(de_dx)
# approach 2 - does not run
def calculate_validation_mse(x):
model.fit(x, train_y, epochs=1, verbose=0)
valid_y_hat = model(test_x, training=False)
mse = calculate_mse(test_y, valid_y_hat)
return mse
train_x = train_images[:1]
train_y = to_categorical(train_labels)[:1]
validation_gradient = grad(calculate_validation_mse)
de_dx = validation_gradient(train_x)
print(de_dx)
Here's how you can do this. Derivation is as below.
Few things to note,
I have reduced the feature size from 784 to 256 as I was running out of memory in colab (line marked in the code) . Might have to do some mem profiling to find out why
Only computed grads for the first layer. Easily extendable to other layers
Disclaimer: this derivation is correct to best of my knowledge. Please do some research and verify that it is the case. You will run into memory issues for larger inputs and layer sizes.
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.utils import to_categorical
import tensorflow as tf
f = 256
model = Sequential([
Dense(64, activation='relu', input_shape=(f,)),
Dense(64, activation='relu'),
Dense(10, activation='softmax'),
])
# Compile the model.
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'],
)
w = model.weights[0]
# Inputs and labels
x_tr = tf.Variable(np.random.normal(size=(1,f)), shape=(1, f), dtype='float32')
y_tr = np.random.choice([0,1,2,3,4,5,6,7,8,9], size=(1,1))
y_tr_onehot = tf.keras.utils.to_categorical(y_tr, num_classes=10).astype('float32')
x_v = tf.Variable(np.random.normal(size=(1,f)), shape=(1, f), dtype='float32')
y_v = np.random.choice([0,1,2,3,4,5,6,7,8,9], size=(1,1))
y_v_onehot = tf.keras.utils.to_categorical(y_v, num_classes=10).astype('float32')
# In the context of GradientTape
with tf.GradientTape() as tape1:
with tf.GradientTape() as tape2:
y_tr_pred = model(x_tr)
tr_loss = tf.keras.losses.MeanSquaredError()(y_tr_onehot, y_tr_pred)
tmp_g = tape2.gradient(tr_loss, w)
print(tmp_g.shape)
# d(dE_tr/d(theta))/dx
# Warning this step consumes lot of memory for large layers
lr = 0.001
grads_1 = -lr * tape1.jacobian(tmp_g, x_tr)
with tf.GradientTape() as tape3:
y_v_pred = model(x_v)
v_loss = tf.keras.losses.MeanSquaredError()(y_v_onehot, y_v_pred)
# dE_val/d(theta)
grads_2 = tape3.gradient(v_loss, w)[tf.newaxis, :]
# Just crunching the dimension to get the final desired shape of (1,256)
grad = tf.matmul(tf.reshape(grads_2,[1, -1]), tf.reshape(tf.transpose(grads_1,[2,1,0,3]),[1, -1, 256]))

Keras custom lambda layer: how to normalize / scale the output

I am struggling with scaling the output of a lambda layer. The code is as follows:
My X_train is 100*15*24 and Y_train is 100*1 (the network consists with a LSTM layer + Dense layer)
input_shape=(timesteps, num_feat)
data_input = Input(shape=input_shape, name="input_layer")
lstm1 = LSTM(10, name="lstm_layer")(data_input)
dense1 = Dense(4, activation="relu", name="dense1")(lstm1)
dense2 = Dense(1, activation = "custom_activation_1", name = "dense2")(dense1)
dense3 = Dense(1, activation = "custom_activation_2", name = "dense3")(dense1)
#dense 2 and 3 has customed activation function with range the REAL LINE (so I need to normalize it)
## custom lambda layer/ loss function ##
def custom_layer(new_input):
add_input = new_input[0]+new_input[1]
#below three lines are where problem occurs that makes the program does not work
###############################################
scaler = MinMaxScaler()
scaler.fit(add_input)
normalized = scaler.transform(add_input)
###############################################
return normalized
lambda_layer = Lambda(custom_layer, name="lambda_layer")([dense2, dense3])
model = Model(inputs=data_input, outputs=lambda_layer)
model.compile(loss='mse', optimizer='adam',metrics=['accuracy'])
model.fit(X_train, Y_train, epochs=2, batch_size=216)
How can I normalize the output of lambda_layer properly? Any ideas or suggestions are appreciated!
I don't think Scikit transformers would work in Lambda layers. If you're only interested in getting the normalized output w.r.t the data passed in, you can do,
from tensorflow.keras.layers import Input, LSTM, Dense, Lambda
from tensorflow.keras.models import Model
import tensorflow as tf
timesteps = 3
num_feat = 12
input_shape=(timesteps, num_feat)
data_input = Input(shape=input_shape, name="input_layer")
lstm1 = LSTM(10, name="lstm_layer")(data_input)
dense1 = Dense(4, activation="relu", name="dense1")(lstm1)
dense2 = Dense(1, activation = "custom_activation_1", name = "dense2")(dense1)
dense3 = Dense(1, activation = "custom_activation_2", name = "dense3")(dense1)
#dense 2 and 3 has customed activation function with range the REAL LINE (so I need to normalize it)
## custom lambda layer/ loss function ##
def custom_layer(new_input):
add_input = new_input[0]+new_input[1]
normalized = (add_input - tf.reduce_min(add_input, axis=0, keepdims=True))/(tf.reduce_max(add_input, axis=0, keepdims=True) - tf.reduce_max(add_input, axis=0, keepdims=True))
return normalized
lambda_layer = Lambda(custom_layer, name="lambda_layer")([dense2, dense3])
model = Model(inputs=data_input, outputs=lambda_layer)
model.compile(loss='mse', optimizer='adam',metrics=['accuracy'])

TensorFlow 2.0 GradientTape with EarlyStopping

I am using Python 3.7.5 and TensorFlow 2.0's 'GradientTape' API for classification of MNIST dataset using 300 100 dense fully connected architecture. I would like to use TensorFlow's 'EarlyStopping' with GradientTape() so that the training stops according to the variable being watched or monitored and according to patience parameters.
The code I have is below:
# Use tf.data to batch and shuffle the dataset
train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train)).shuffle(100).batch(batch_size)
test_ds = tf.data.Dataset.from_tensor_slices((X_test, y_test)).batch(batch_size)
# Choose an optimizer and loss function for training-
loss_fn = tf.keras.losses.BinaryCrossentropy()
optimizer = tf.keras.optimizers.Adam(lr = 0.001)
def create_nn_gradienttape():
"""
Function to create neural network for use
with GradientTape API following MNIST
300 100 architecture
"""
model = Sequential()
model.add(
Dense(
units = 300, activation = 'relu',
kernel_initializer = tf.keras.initializers.GlorotNormal,
input_shape = (784,)
)
)
model.add(
Dense(
units = 100, activation = 'relu',
kernel_initializer = tf.keras.initializers.GlorotNormal
)
)
model.add(
Dense(
units = 10, activation = 'softmax'
)
)
return model
# Instantiate the model to be trained using GradientTape-
model = create_nn_gradienttape()
# Select metrics to measure the error & accuracy of model.
# These metrics accumulate the values over epochs and then
# print the overall result-
train_loss = tf.keras.metrics.Mean(name = 'train_loss')
train_accuracy = tf.keras.metrics.BinaryAccuracy(name = 'train_accuracy')
test_loss = tf.keras.metrics.Mean(name = 'test_loss')
test_accuracy = tf.keras.metrics.BinaryAccuracy(name = 'train_accuracy')
# Use tf.GradientTape to train the model-
#tf.function
def train_step(data, labels):
"""
Function to perform one step of Gradient
Descent optimization
"""
with tf.GradientTape() as tape:
# 'training=True' is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
# predictions = model(data, training=True)
predictions = model(data)
loss = loss_fn(labels, predictions)
# 'gradients' is a list variable!
gradients = tape.gradient(loss, model.trainable_variables)
# IMPORTANT:
# Multiply mask with computed gradients-
# List to hold element-wise multiplication between-
# computed gradient and masks-
grad_mask_mul = []
# Perform element-wise multiplication between computed gradients and masks-
for grad_layer, mask in zip(gradients, mask_model_stripped.trainable_weights):
grad_mask_mul.append(tf.math.multiply(grad_layer, mask))
# optimizer.apply_gradients(zip(gradients, model.trainable_variables))
optimizer.apply_gradients(zip(grad_mask_mul, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
#tf.function
def test_step(data, labels):
"""
Function to test model performance
on testing dataset
"""
# training=False is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions = model(data)
t_loss = loss_fn(labels, predictions)
test_loss(t_loss)
test_accuracy(labels, predictions)
EPOCHS = 15
for epoch in range(EPOCHS):
# Reset the metrics at the start of the next epoch
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()
for x, y in train_ds:
train_step(x, y)
for x_t, y_t in test_ds:
test_step(x_t, y_t)
template = 'Epoch {0}, Loss: {1:.4f}, Accuracy: {2:.4f}, Test Loss: {3:.4f}, Test Accuracy: {4:4f}'
print(template.format(epoch + 1,
train_loss.result(), train_accuracy.result()*100,
test_loss.result(), test_accuracy.result()*100))
# Count number of non-zero parameters in each layer and in total-
# print("layer-wise manner model, number of nonzero parameters in each layer are: \n")
model_sum_params = 0
for layer in model.trainable_weights:
# print(tf.math.count_nonzero(layer, axis = None).numpy())
model_sum_params += tf.math.count_nonzero(layer, axis = None).numpy()
print("Total number of trainable parameters = {0}\n".format(model_sum_params))
In the code above, How can I use 'tf.keras.callbacks.EarlyStopping' with GradientTape() API ?
Thanks!

Type mismatch in pytorch

I am trying my hands on PyTorch.
I am getting this error:
RuntimeError: Expected object of scalar type Double but got scalar type Float for argument #2 'weight' in call to _thnn_conv2d_forward
This is my code(shamelessly copied from a online tutorial):
class Net(Module):
def __init__(self):
super(Net,self).__init__()
self.cnn_layers = Sequential(
Conv2d(1,4,kernel_size=3,stride=1,padding=1),
BatchNorm2d(4),
ReLU(inplace=True),
MaxPool2d(kernel_size=2,stride=2),
Conv2d(4,4,kernel_size=3,stride=1,padding=1),
BatchNorm2d(4),
ReLU(inplace=True),
MaxPool2d(kernel_size=2,stride=2)
)
self.linear_layers = Sequential(
Linear(900,10)
)
def forward(self,x):
# self.weights = self.weights.double()
x = self.cnn_layers(x)
x = x.view(x.size(0),-1)
x = self.linear_layers(x)
return x
tdata = dt.Data("train")
train_x = torch.from_numpy(tdata.get_train()[0].reshape(925,1,300,300))
train_y = torch.from_numpy(tdata.get_train()[1].astype(int))
val_x = torch.from_numpy(tdata.get_test()[0].reshape(102,1,300,300))
val_y = torch.from_numpy(tdata.get_test()[1].astype(int))
print(val_y.shape)
plt.imshow(tdata.get_train()[0][100],cmap='gray')
plt.show()
model = Net()
# defining the optimizer
optimizer = Adam(model.parameters(), lr=0.07)
# defining the loss function
criterion = CrossEntropyLoss()
# checking if GPU is available
if torch.cuda.is_available():
model = model.cuda()
criterion = criterion.cuda()
print(model)
def train(epoch):
model.train()
tr_loss = 0
# getting the training set
x_train, y_train = Variable(train_x.double()), Variable(train_y.double())
# getting the validation set
x_val, y_val = Variable(val_x), Variable(val_y)
# converting the data into GPU format
if torch.cuda.is_available():
x_train = x_train.cuda()
y_train = y_train.cuda()
x_val = x_val.cuda()
y_val = y_val.cuda()
# clearing the Gradients of the model parameters
optimizer.zero_grad()
# prediction for training and validation set
output_train = model(x_train.double())
output_val = model(x_val)
# computing the training and validation loss
loss_train = criterion(output_train, y_train)
loss_val = criterion(output_val, y_val)
train_losses.append(loss_train)
val_losses.append(loss_val)
# computing the updated weights of all the model parameters
loss_train.backward()
optimizer.step()
tr_loss = loss_train.item()
if epoch%2 == 0:
# printing the validation loss
print('Epoch : ',epoch+1, '\t', 'loss :', loss_val)
n_epochs = 25
# empty list to store training losses
train_losses = []
# empty list to store validation losses
val_losses = []
# training the model
for epoch in range(n_epochs):
train(epoch)
tdata.get_train() and tdata.get_test() returns a tuple (numpy(dtype='double'),numpy(dtype='int') )
I think weights are an internal data structure. So, its type should be adjusted by PyTorch itself. What is the problem here?
You may just add .to(torch.float32) to your train_x and val_x tensors
I don't completely agree with the other answer. It solves partially the problem.
Okay writting :
# converting the target into torch format
train_y = train_y.astype(int);
train_y = torch.from_numpy(train_y).to(torch.float32)
# converting validation images into torch format
val_x = val_x.reshape(6000, 1, 28, 28)
val_x = torch.from_numpy(val_x).to(torch.float32)
# converting the target into torch format
val_y = val_y.astype(int);
val_y = torch.from_numpy(val_y).to(torch.float32)
is important, but also :
# computing the training and validation loss
y_train = y_train.long() #we convert the results because they aren't in the good format
y_train = y_train.squeeze_()
y_val = y_val.long() #we convert the results because they aren't in the good format
y_val = y_val.squeeze_()
loss_train = criterion(output_train, y_train)
loss_val = criterion(output_val, y_val)
train_losses.append(loss_train)
val_losses.append(loss_val)
in my case, this solved my problem.

Custom loss is missing an operation for gradient

I'm not too sure how to deal with this and why I am getting this error.
raise ValueError('An operation has `None` for gradient. '
ValueError: An operation has `None` for gradient. Please make sure that all of your ops have a gradient defined (i.e. are differentiable). Common ops without gradient: K.argmax, K.round, K.eval.
So I am using a custom tripleloss for the loss function from this blog. https://omoindrot.github.io/triplet-loss and I am running it in keras which should not be an issue. But I cannot get it to work correctly with my model.
So this is the loss function from them. The other code that it needs is a direct copy:
def batch_hard_triplet_loss(embeddings, labels, margin = 0.3, squared=False):
# Get the pairwise distance matrix
pairwise_dist = pairwise_distances(embeddings, squared=squared)
mask_anchor_positive = _get_anchor_positive_triplet_mask(labels)
mask_anchor_positive = tf.to_float(mask_anchor_positive)
anchor_positive_dist = tf.multiply(mask_anchor_positive, pairwise_dist)
hardest_positive_dist = tf.reduce_max(anchor_positive_dist, axis=1, keepdims=True)
mask_anchor_negative = _get_anchor_negative_triplet_mask(labels)
mask_anchor_negative = tf.to_float(mask_anchor_negative)
max_anchor_negative_dist = tf.reduce_max(pairwise_dist, axis=1, keepdims=True)
anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (1.0 - mask_anchor_negative)
hardest_negative_dist = tf.reduce_min(anchor_negative_dist, axis=1, keepdims=True)
# Combine biggest d(a, p) and smallest d(a, n) into final triplet loss
triplet_loss = tf.maximum(hardest_positive_dist - hardest_negative_dist + margin, 0.0)
triplet_loss = tf.reduce_mean(triplet_loss)
#triplet_loss = k.mean(triplet_loss) # use keras mean
return triplet_loss
Now this is my model that I am using.
train_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
....
validation_split=0.2) # set validation split
train_generator = train_datagen.flow_from_directory(
IMAGE_DIR,
target_size=(224, 224),
batch_size=BATCHSIZE,
class_mode='categorical',
subset='training') # set as training data
validation_generator = train_datagen.flow_from_directory(
IMAGE_DIR, # same directory as training data
target_size=(224, 224),
batch_size=BATCHSIZE,
class_mode='categorical',
subset='validation') # set as validation data
print("Initializing Model...")
# Get base model
input_layer = preloadmodel.get_layer('model_1').get_layer('input_1').input
layer_output = preloadmodel.get_layer('model_1').get_layer('glb_avg_pool').output
# Make extractor
base_network = Model(inputs=input_layer, outputs=layer_output)
# Define new model
input_images = Input(shape=(224, 224, 3), name='input_image') # input layer for images
#input_labels = Input(shape=(num_classes,), name='input_label') # input layer for labels
embeddings = base_network(input_images) # output of network -> embeddings
output = Dense(1, activation='sigmoid')(embeddings)
model = Model(inputs=input_images, outputs=output)
# Compile model
model.compile(loss=batch_hard_triplet_loss, optimizer='adam')
Ok I solved these issues with a lot of research. Now it didn't fix my problem as the code still does not work, but the issue of the loss function is fixed. Following this blog https://medium.com/#Bloomore/how-to-write-a-custom-loss-function-with-additional-arguments-in-keras-5f193929f7a0
I changes the loss function to this:
def batch_hard_triplet_loss(embeddings, labels, margin = 0.3, squared=False):
# Get the pairwise distance matrix
pairwise_dist = pairwise_distances(embeddings, squared=squared)
mask_anchor_positive = _get_anchor_positive_triplet_mask(labels)
mask_anchor_positive = tf.to_float(mask_anchor_positive)
anchor_positive_dist = tf.multiply(mask_anchor_positive, pairwise_dist)
hardest_positive_dist = tf.reduce_max(anchor_positive_dist, axis=1, keepdims=True)
mask_anchor_negative = _get_anchor_negative_triplet_mask(labels)
mask_anchor_negative = tf.to_float(mask_anchor_negative)
max_anchor_negative_dist = tf.reduce_max(pairwise_dist, axis=1, keepdims=True)
anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (1.0 - mask_anchor_negative)
hardest_negative_dist = tf.reduce_min(anchor_negative_dist, axis=1, keepdims=True)
def loss(y_true, y_pred):
# Combine biggest d(a, p) and smallest d(a, n) into final triplet loss
#triplet_loss = tf.maximum(hardest_positive_dist - hardest_negative_dist + margin, 0.0)
#triplet_loss = tf.reduce_mean(triplet_loss)
triplet_loss = k.maximum(hardest_positive_dist - hardest_negative_dist + margin, 0.0)
triplet_loss = k.mean(triplet_loss) # use keras mean
return triplet_loss
return loss
And then call it in the model like this:
batch_loss = batch_hard_triplet_loss(embeddings, input_labels, 0.4, False)
model = Model(inputs=input_images, outputs=embeddings)
model.compile(loss=batch_loss, optimizer='adam')
It now gives me these issues
tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'input_label' with dtype float and shape [?,99]
[[{{node input_label}}]]
But hey were moving on up.
What the problem is keras only accepts loss with 2 parameters so you need to call the loss form another function like I did here.

Resources