Create a custom regularizer on weights of 2 layers - keras

Here is small snippet of my code describing my custom regularizer that I want to implement.
# Code adapted from https://github.com/keras-team/keras/issues/5563
class CustomRegularization(Layer):
def __init__(self, **kwargs):
super(CustomRegularization, self).__init__(**kwargs)
def call(self ,x ,mask=None):
ld=x[0]
rd=x[1]
reg = K.dot(K.transpose(ld), rd)
reg_norm = K.sqrt(K.sum(K.square(reg)))
self.add_loss(reg_norm, x)
return ld
def compute_output_shape(self, input_shape):
return (input_shape[0][0],input_shape[0][1])
def model():
input1 = Input(shape=(224, 224, 3))
input2 = Input(shape=(224, 224, 3))
inp1 = Flatten()(input1)
inp2 = Flatten()(input2)
layer1 = Dense(1024, activation="sigmoid")
x1_1 = layer1(inp1)
x2_1 = layer1(inp2)
layer2 = Dense(1024, activation="sigmoid")
x1_2 = layer2(inp1)
x2_2 = layer2(inp2)
# get weights of layer1 and layer2
layer1_wt = layer1.trainable_weights[0]
layer2_wt = layer2.trainable_weights[0]
# This is a regularization term on the weights of layer1 and layer2.
regularization = CustomRegularization()([layer1_wt, layer2_wt])
model = Model([input1, input2], [x1_2, x2_2, regularization])
if __name__ == "__main__":
m = model()
This returns the error AttributeError: 'Variable' object has no attribute '_keras_history' and is not able to create the model.
I know that this error would be because of incompatible outputs (since inputs are keras Input layer). [For more details refer to #fchollet's comment on issue #7362 ].
The main problem here are the layer1.trainable_weights[0] and layer2.trainable_weights[0]. These are tf.Variable (tensorflow variables) and not Keras Tensors. I would require them to convert to keras tensors. How do I do that?

Related

tf.GradientTape gradient() returns None

I am trying to train my keras model using TensorFlow, so far I can build the model,
def Model(input_shape, num_of_layers):
num_of_layers = 5
mod = keras.models.Sequential()
mod.add(keras.layers.Dense(1, input_shape = (input_shape,)))
for i in range(num_of_layers - 1):
mod.add(keras.layers.Dense(16, activation = 'tanh'))
mod.add(keras.layers.Dense(1, activation = 'tanh'))
return mod
and loss function.
def loss(u_pred, u_true):
return tf.reduce_mean(tf.keras.losses.mean_squared_error(u_pred, u_true))
Then I create a train function to train the model.
def train(model, X, epoch = 500, lr = 1e-3):
trainable_params = [tf.Variable(model.get_weights()[i]) for i in range(len(model.get_weights()))]
loss_array = []
optim = tf.keras.optimizers.Adam(learning_rate = lr)
for i in range(epoch):
with tf.GradientTape() as g:
g.watch(trainable_params)
loss_val = loss(model(X), tf.zeros_like(X))
grad = g.gradient(loss_val, trainable_params)
...
The grad returns a vector of None when I print it. What went wrong with my train function? I have converted my model's weights and biases to tensor object using tf.Variable. Using tf.cast or tf.convert_to_tensor doesn't help either.

TypeError: linear(): argument 'input' (position 1) must be Tensor, not tuple

I am new to transformers, so I tried to implement Bert class, where I receive this error message:
TypeError: linear(): argument 'input' (position 1) must be Tensor, not tuple
I googled it and it is said everywhere, the problem can be solved by adding the parameter 'return_dict=False'. I tried it this way:
class BertFakesClassifier(nn.Module):
def __init__(self):
super(BertFakesClassifier, self).__init__()
self.bert = BertForSequenceClassification.from_pretrained("bert-base-multilingual-uncased", return_dict=False)
self.relu = nn.ReLU() # relu activation function
self.dense1_l = nn.Linear(768,512) # dense layer 1
self.output_l = nn.Linear(512,2) # dense layer 2 (Output layer)
self.softmax = nn.LogSoftmax(dim=1) # softmax activation function
def forward(self, tokens, attention_mask):
outputs = self.bert(tokens, attention_mask)
x = self.dense1_l(outputs)
x = self.relu(x)
x = self.output_l(x) # output layer
logits = self.softmax(x)
return logits
I tried to add it to the forward method:
class BertFakesClassifier(nn.Module):
def __init__(self):
super(BertFakesClassifier, self).__init__()
self.bert = BertForSequenceClassification.from_pretrained("bert-base-multilingual-uncased")
self.relu = nn.ReLU() # relu activation function
self.dense1_l = nn.Linear(768,512) # dense layer 1
self.output_l = nn.Linear(512,2) # dense layer 2 (Output layer)
self.softmax = nn.LogSoftmax(dim=1) # softmax activation function
def forward(self, tokens, attention_mask):
outputs = self.bert(tokens, attention_mask, return_dict=False)
x = self.dense1_l(outputs)
x = self.relu(x)
x = self.output_l(x) # output layer
logits = self.softmax(x)
return logits
I tried:
def forward(self, tokens, attention_mask, return_dict):
outputs = self.bert(tokens, attention_mask, return_dict=False)
I added return_dict=False at self.bert and forward method at the same time. I also tried to use instead:
outputs = self.bert(tokens, attention_mask)
x = outputs['last_hidden_state'][:, 0, :]
But nothing is working. and I either get error message about input being a tuple or
TypeError: linear(): argument 'input' (position 1) must be Tensor, not SequenceClassifierOutput
or another one
typeerror: forward() got an unexpected keyword argument 'return_dict'
I would really appreciate if anyone could help solving this issue.
Thanks in advance!

How can I apply cuda to custom model in pytorch?

The type of inputs is dictionary of tensors. So while training I convert device to cuda to use gpu. And my custom model is like above. Also I assigned cuda to the model.
class EmbeddingLayer(nn.Module):
def __init__(self):
super(EmbeddingLayer, self).__init__()
# other features
self.other_features_embedding = []
for feature_name in OTHER_FEATURES:
vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY[feature_name]
embedding_dims = int(math.sqrt(len(vocabulary)))
embedding = nn.Embedding(len(vocabulary)+1, embedding_dims)
self.other_features_embedding.append(embedding)
# transformer features
item_vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY['item']
self.item_embedding_dims = int(math.sqrt(len(item_vocabulary)))
self.item_embedding = nn.Embedding(len(item_vocabulary)+1, self.item_embedding_dims)
def forward(self, inputs):
# other features
encoded_other_features = []
for i, feature_name in enumerate(OTHER_FEATURES):
embedding = self.other_features_embedding[i](inputs[feature_name])
encoded_other_features.append(embedding)
encoded_other_features = torch.cat(encoded_other_features, -1)
# transformer features
encoded_sequence_item = self.item_embedding(inputs['sequence_item'])
encoded_target_item = self.item_embedding(inputs['target_item'])
positions = inputs['target_timestamp'].repeat(sequence_length-1, 1).transpose(0, 1) - inputs['sequence_timestamp']
encoded_positions = positions.repeat(1, self.item_embedding_dims).reshape(-1, self.item_embedding_dims, sequence_length-1).transpose(1,2)
encoded_sequence_item_with_position = encoded_sequence_item + encoded_positions
encoded_transformer_features = torch.cat((encoded_sequence_item_with_position, encoded_target_item.reshape(-1, 1, self.item_embedding_dims)), 1)
return encoded_other_features, encoded_transformer_features
class BST(nn.Module):
def __init__(self, hidden_units, dropout, num_heads):
super(BST, self).__init__()
...
self.embedding_layer = EmbeddingLayer()
...
def forward(self, inputs):
other_features, transformer_features = self.embedding_layer(inputs)
...
return self.output(features)
model = BST([256, 128], 0.3, 1)
model.to(device)
def train(model, optimizer, dataloader):
model.train()
for inputs in tqdm(dataloader, total=len(dataloader)):
for k, v in inputs.items():
inputs[k] = v.to(device)
model.zero_grad()
pred = model(inputs)
...
But following error occurs:
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument index in method wrapper__index_select)
I think the error occurs at embedding in EmbeddingLayer. How can I fix this error to use gpu while training?
Your list of nn.Module is not registering the embedding layers as sub modules of your layer. In order to properly register a list of modules you should use nn.ModuleList. Therefore, you should add the following right after the loop in your __init__ function:
embeddings = []
for feature_name in OTHER_FEATURES:
vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY[feature_name]
embedding_dims = int(math.sqrt(len(vocabulary)))
embedding = nn.Embedding(len(vocabulary)+1, embedding_dims)
embeddings.append(embedding)
self.other_features_embedding = nn.ModuleList(embeddings)

How to give multiple arguments in tensorflow Model call function?

I'm trying to build a model in tensorflow by extending the 'Model' class in tensorflow.keras. I need to pass two arguments in the 'call' function of this class, input images x (224,224,3) and output label y. But I get the following error while building the model:
ValueError: Currently, you cannot build your model if it has
positional or keyword arguments that are not input to the model, but
are required for its 'call' method.
class myCNN(Model):
def __init__(self):
super(myCNN, self).__init__()
base_model = tf.keras.applications.VGG16(input_shape=(224,224,3), weights='imagenet')
layer_name = 'block5_conv3'
self.conv_1 = Model(inputs=base_model.input, outputs=base_model.get_layer(layer_name).output)
self.flatten = L.Flatten(name='flatten')
self.fc1 = L.Dense(1000, activation='relu', name='fc1')
self.final = L.Activation('softmax')
# The problem is because I need y
def call(self, x, y):
x = self.conv_1(x)
x = self.flatten(x)
x = self.fc1(x)
return self.final(x)
model = myCNN()
model.build((None, 224, 224, 3, 1))
Inputs parameter of call method can be an input tensor or list/tuple of input tensors.
You can pass two arguments like this:
def call(self, inputs):
x = inputs[0]
y = inputs[1]
x = self.conv_1(x)
x = self.flatten(x)
x = self.fc1(x)
return self.final(x)

How to fix 'No gradients provided for any variable' error when using ctc_loss in Tensorflow

I am trying to make Baidu's Deep Speech 2 model in Tensorflow 2.0.0alpha0. I am having trouble optimizing the Tensorflow ctc_loss using a tf.GradientTape() object for calculating the gradients.
I am currently passing a tensor of shape (batch_size, max_step, feats) to my model and then passing the computed logits to the loss function. I have also tried passing a sparse tensor but this also does not work.
Here is the code for creating my model
import tensorflow as tf
class DeepSpeech2(tf.keras.Model):
def __init__(self, vocab_size, conv_filters=[11], conv_kernel_sizes=[1280], conv_strides=[2],
recur_sizes=[100], rnn_type='gru', bidirect_rnn=False, batch_norm=True,
learning_rate=1e-3, name='DeepSpeech2'):
super(DeepSpeech2, self).__init__()
self._vocab_size = vocab_size
self._conv_filters = conv_filters
self._conv_kernel_sizes = conv_kernel_sizes
self._conv_strides = conv_strides
self._recur_sizes = recur_sizes
self._rnn_type = rnn_type
self._bidirect_rnn = bidirect_rnn
self._batch_norm = batch_norm
self._learning_rate = learning_rate
self._name = name
self._conv_batch_norm = None
with tf.name_scope(self._name):
self._convolution = [tf.keras.layers.Conv1D(filters=conv_filters[i],
kernel_size=conv_kernel_sizes[i], strides=conv_strides[i],
padding='valid', activation='relu',
name='conv1d_{}'.format(i)) for i in range(len(self._conv_filters))]
if self._batch_norm:
self._conv_batch_norm = tf.keras.layers.BatchNormalization(name='bn_conv_1d')
if self._rnn_type == 'gru':
rnn_init = tf.keras.layers.GRU
elif self._rnn_type == 'lstm':
rnn_init = tf.keras.layers.LSTM
else:
raise Exception("Invalid rnn_type: '{}' (must be 'lstm' or 'gru')"
.format(self._rnn_type))
self._rnn = []
for i, r in enumerate(self._recur_sizes):
layer = rnn_init(r, activation='relu', return_sequences=True,
name='{}_{}'.format(self._rnn_type, i))
if self._bidirect_rnn:
layer = tf.keras.layers.Bidirectional(layer)
self._rnn.append(layer)
if self._batch_norm:
self._rnn.append(tf.keras.layers.BatchNormalization())
self._fc = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(
self._vocab_size, name='fc', activation='linear'))
self._optimizer = tf.keras.optimizers.Adam(lr=self._learning_rate)
def __call__(self, specs):
with tf.name_scope(self._name):
feats = specs
for layer in self._convolution:
feats = layer(feats)
if self._conv_batch_norm:
feats = self._conv_batch_norm(feats)
rnn_outputs = feats
for layer in self._rnn:
rnn_outputs = layer(rnn_outputs)
outputs = self._fc(rnn_outputs)
return tf.transpose(outputs, (1, 0, 2))
#tf.function
def train_step(self, specs, spec_lengths, labels, label_lengths):
with tf.GradientTape() as tape:
logits = self.__call__(specs)
loss = tf.nn.ctc_loss(labels=labels, logits=logits,
label_length=label_lengths, logit_length=spec_lengths)
cost = tf.reduce_sum(loss)
decoded, neg_sum_logits = tf.nn.ctc_greedy_decoder(logits, label_lengths)
gradients = tape.gradient(cost, self.trainable_variables)
self._optimizer.apply_gradients(zip(gradients, self.trainable_variables))
return (decoded[0].indices, decoded[0].values, decoded[0].dense_shape), cost
I am currently getting the following error
ValueError: No gradients provided for any variable: ['DeepSpeech2/conv1d_0/kernel:0', 'DeepSpeech2/conv1d_0/bias:0', 'DeepSpeech2/bn_conv_1d/gamma:0', 'DeepSpeech2/bn_conv_1d/beta:0', 'DeepSpeech2/gru_0/kernel:0', 'DeepSpeech2/gru_0/recurrent_kernel:0', 'DeepSpeech2/gru_0/bias:0', 'DeepSpeech2/batch_normalization_v2/gamma:0', 'DeepSpeech2/batch_normalization_v2/beta:0', 'DeepSpeech2/time_distributed/kernel:0', 'DeepSpeech2/time_distributed/bias:0'].
The error occurs at the line where the gradients are applied to the optimizer. When I print out my gradients variable, it is just a list of None
From what I understand, this error is indicating that there is no path from the variables to the loss in the graph but I'm not sure why I am getting this. Any help would be greatly appreciated!

Resources