tensorflow2.0 has a format of class init and call
for example
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = Conv2D(32, 3, activation='relu')
self.flatten = Flatten()
self.d1 = Dense(128, activation='relu')
self.d2 = Dense(10, activation='softmax')
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
model = MyModel()
my question is that if I want to change the
> def call(self, x):
> x = self.conv1(x)
> x = self.flatten(x)
> x = self.d1(x)
> return self.d2(x,activation='relu')
this cause the error.
if I want to change the attribute during some process
how should I do it?
If you want to change the behavior of the forward pass depending on a condition,l you can just add a parameter to the call method.
From your example, it looks like you want to change the activation function of the last layer. Thus, you can just define the last layer with a linear activation function, and apply the desired activation function depending on a condition.
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = tf.keras.layers.Conv2D(32, 3, activation='relu')
self.flatten = tf.keras.layers.Flatten()
self.d1 = tf.keras.layers.Dense(128, activation='relu')
# note: no activation = linear activation
self.d2 = tf.keras.layers.Dense(10)
# Create two activation layers
self.relu = tf.keras.layers.ReLU()
self.softmax = tf.keras.layers.Softmax()
def call(self, x, condition):
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
x = self.d2(x)
# Change the activation depending on the condition
if condition:
tf.print("callign with activation=relu")
x = self.relu(x)
return self.softmax(x)
model = MyModel()
fake_input = tf.zeros((1, 28, 28, 1))
tf.print("condition false")
tf.print(model(fake_input, condition=False))
tf.print("condition true")
tf.print(model(fake_input, condition=True))
Related
I am trying to get better results by allowing a few final layers of a previously frozen backbone (RegNet-800MF) to be trained. How can I implement this in PyTorch Lightning? I am very new to ML so please excuse me if I have left any important information out.
My model (MechClassifier) calls another class (ParametersClassifier) which includes the pre-trained RegNet as its frozen backbone. During training the forward function passes inputs only through the backbone of the ParametersClassifier and not the Classifying layers. I will include the init functions of both below.
My MechClassifier model:
class MechClassifier(pl.LightningModule):
def __init__(
self,
num_classes,
lr=4e-3,
weight_decay=1e-8,
gpus=1,
max_epochs=30,
):
super().__init__()
self.lr = lr
self.weight_decay = weight_decay
self.__dict__.update(locals())
self.backbone = ParametersClassifier.load_from_checkpoint(
checkpoint_path="checkpoints/param_classifier/last.ckpt",
num_classes=3,
gpus=1,
)
self.backbone.freeze()
self.backbone.eval()
self.mf_classifier = nn.Sequential(
nn.Linear(self.backbone.num_ftrs, 8),
nn.ReLU(),
nn.Linear(8, num_classes),
)
self.wd_classifier = nn.Sequential(
nn.Linear(self.backbone.num_ftrs, 8),
nn.ReLU(),
nn.Linear(8, num_classes),
)
def forward(self, x):
self.backbone.eval()
with torch.no_grad():
x = self.backbone.model(x)
# x = self.model(x)
out1 = self.mf_classifier(x)
out2 = self.wd_classifier(x)
# print(out1.size())
return (out1, out2)
ParametersClassifier (loaded from checkpoint):
class ParametersClassifier(pl.LightningModule):
def __init__(
self,
num_classes,
lr=4e-3,
weight_decay=0.05,
gpus=1,
max_epochs=30,
):
super().__init__()
self.lr = lr
self.weight_decay = weight_decay
self.__dict__.update(locals())
self.model = models.regnet_y_800mf(pretrained=True)
self.num_ftrs = self.model.fc.in_features
self.model.fc = nn.Identity()
self.fc1 = nn.Linear(self.num_ftrs, num_classes)
self.fc2 = nn.Linear(self.num_ftrs, num_classes)
self.fc3 = nn.Linear(self.num_ftrs, num_classes)
self.fc4 = nn.Linear(self.num_ftrs, num_classes)
def forward(self, x):
x = self.model(x)
out1 = self.fc1(x)
out2 = self.fc2(x)
out3 = self.fc3(x)
out4 = self.fc4(x)
return (out1, out2, out3, out4)
You can look at the implementation for the Regnet model you are using here. Its forward function:
def forward(self, x: Tensor) -> Tensor:
x = self.stem(x)
x = self.trunk_output(x)
x = self.avgpool(x)
x = x.flatten(start_dim=1)
x = self.fc(x)
return x
Instead of using a torch.no_grad context manager as you did, you should rather switch on/off the requires_grad as necessary. By default module parameters have their requires_grad flag set to True which means they are able to perform gradient computation. If this flag is set to False, you can consider those components as frozen.
Depending on which layers you want to freeze and those that you want to finetune, you can manually do that. For example, if you want to freeze the backbone and finetune the fully connected layer of the Regnet, and replace the following from MechClassifier's __init__:
self.backbone.freeze()
self.backbone.eval()
With the following lines:
## freeze all
self.backbone.model.requires_grad_(False)
## unfreeze last section of 4th block of backbone
block4_section1 = getattr(self.backbone.model.trunk_output.block4, 'block4-1')
block4_section1.requires_grad_(True)
And perform inference on MechClassifier with a forward function like so:
def forward(self, x):
self.backbone.eval()
x = self.backbone.model(x)
out1 = self.mf_classifier(x)
out2 = self.wd_classifier(x)
return (out1, out2)
I am very new to PyTorch and Python in general, and I am now struggling to get the encoded features from my pre-trained LSTM autoencoder which can be seen below:
import torch
import torch.nn as nn
# Bulding an LSTM autoencoder
class Encoder(nn.Module):
def __init__(self, seq_len, n_features, embedding_dim=32):
super(Encoder, self).__init__()
self.seq_len, self.n_features = seq_len, n_features
self.embedding_dim, self.hidden_dim1, self.hidden_dim2 = embedding_dim, 4 * embedding_dim, 2* embedding_dim
self.rnn1 = nn.LSTM(
input_size=n_features,
hidden_size=self.hidden_dim1, #128
num_layers=1,
batch_first=True
)
self.rnn2 = nn.LSTM(
input_size=self.hidden_dim1,
hidden_size=self.hidden_dim2, #64
num_layers=1,
batch_first=True
)
self.rnn3 = nn.LSTM(
input_size=self.hidden_dim2,
hidden_size=embedding_dim, #32
num_layers=1,
batch_first=True
)
def forward(self, x):
x = x.reshape((1, self.seq_len, self.n_features))
x, (_, _) = self.rnn1(x)
x, (_, _) = self.rnn2(x)
x, (hidden_n, _) = self.rnn3(x)
return hidden_n.reshape((self.n_features, self.embedding_dim))
class Decoder(nn.Module):
def __init__(self, seq_len, input_dim=32, n_features=1):
super(Decoder, self).__init__()
self.seq_len, self.input_dim = seq_len, input_dim
self.hidden_dim2, self.hidden_dim1, self.n_features = 4 * input_dim,2 * input_dim, n_features
self.rnn1 = nn.LSTM(
input_size=input_dim,
hidden_size=input_dim,
num_layers=1,
batch_first=True
)
self.rnn2 = nn.LSTM(
input_size=input_dim,
hidden_size=self.hidden_dim1,
num_layers=1,
batch_first=True
)
self.rnn3 = nn.LSTM(
input_size=self.hidden_dim1,
hidden_size=self.hidden_dim2,
num_layers=1,
batch_first=True
)
self.output_layer = nn.Linear(self.hidden_dim2, n_features)
def forward(self, x):
x = x.repeat(self.seq_len, self.n_features)
x = x.reshape((self.n_features, self.seq_len, self.input_dim))
x, (hidden_n, cell_n) = self.rnn1(x)
x, (hidden_n, cell_n) = self.rnn2(x)
x, (hidden_n, cell_n) = self.rnn3(x)
x = x.reshape((self.seq_len, self.hidden_dim2))
return self.output_layer(x)
class RAE(nn.Module):
def __init__(self,seq_len, n_features, embedding_dim=32):
super(RAE, self).__init__()
self.seq_len, self.n_features = seq_len, n_features
self.embedding_dim = embedding_dim
self.encoder = Encoder (seq_len, n_features, embedding_dim).to(device)
self.decoder = Decoder (seq_len, embedding_dim, n_features).to(device)
def forward(self,x):
x = self.encoder(x)
x = self.decoder(x)
return x
### TRAINING
def train_model(model,train_dataset,val_dataset, n_epochs):
optimizer = torch.optim.Adam(model.parameters(), lr = 1e-3)
criterion = nn.MSELoss(reduction='mean').to(device) # nn.L1Loss sum
history = dict(train = [], val = [])
for epoch in range(1, n_epochs + 1):
model = model.train()
train_losses = []
for seq_true in train_dataset:
optimizer.zero_grad()
seq_true = seq_true.to(device)
seq_pred = model(seq_true)
loss = criterion(seq_pred, seq_true)
loss.backward()
optimizer.step()
train_losses.append(loss.item())
val_losses = []
model = model.eval()
with torch.no_grad():
for seq_true in val_dataset:
seq_true = seq_true.to(device)
seq_pred =model(seq_true)
loss = criterion(seq_pred, seq_true)
val_losses.append(loss.item())
#add accuracy
train_loss = np.mean(train_losses)
val_loss = np.mean(val_losses)
history['train'].append(train_loss)
history['val'].append(val_loss)
print(f'Epoch {epoch}: train loss {train_loss} val loss {val_loss}')
return model.eval(),history
Once I trained my model I followed the advice given by ptrblck here and implemented it as follows:
activation = {}
def get_activation(name):
def hook(model, input, output):
activation[name] = output.detach()
return hook
model.encoder.register_forward_hook(get_activation('encoder'))
x = test_dataset_SR[1] # instead of using his random example I used one example from my training set
x = x.cuda()
output = model(x)
print(activation['encoder'])
but this gives me this error:
2 def get_activation(name):
3 def hook(model, input, output):
----> 4 activation[name] = output.detach()
5 return hook
AttributeError: 'tuple' object has no attribute 'detach'
Can you please help me solve this issue? I want to take these encoded features, store them and use them as input to another network. I know I could probably train the encoder separately(not sure), but I will need both encoder and decoder so I thought hooks will be my salvation.
I want to train my dataset on Bert and modify the following architecture to LSTM
the full code in here https://www.geeksforgeeks.org/fine-tuning-bert-model-for-sentiment-analysis/
class BERT_architecture(nn.Module):
def __init__(self, bert):
super(BERT_architecture, self).__init__()
self.bert = bert
# dropout layer
self.dropout = nn.Dropout(0.2)
# relu activation function
self.relu = nn.ReLU()
# dense layer 1
self.fc1 = nn.Linear(768,512)
# dense layer 2 (Output layer)
self.fc2 = nn.Linear(512,2)
#softmax activation function
self.softmax = nn.LogSoftmax(dim=1)
#define the forward pass
def forward(self, sent_id, mask):
#pass the inputs to the model
_, cls_hs = self.bert(sent_id, attention_mask=mask, return_dict=False)
x = self.fc1(cls_hs)
x = self.relu(x)
x = self.dropout(x)
# output layer
x = self.fc2(x)
# apply softmax activation
x = self.softmax(x)
return x
I have a model written in pytorch. Since my dataset is small, I can directly load all of the data to GPU. However, I found the forward speed becomes slow if I do so. The following is a runnable example. Specifically, I have the model:
import numpy as np
from time import time
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
def knn(x, k):
inner = -2*torch.matmul(x.transpose(2, 1), x)
xx = torch.sum(x**2, dim=1, keepdim=True)
pairwise_distance = -xx - inner - xx.transpose(2, 1)
idx = pairwise_distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k)
return idx
def get_graph_feature(x, k=20, idx=None):
batch_size = x.size(0)
num_points = x.size(2)
x = x.view(batch_size, -1, num_points)
if idx is None:
idx = knn(x, k=k) # (batch_size, num_points, k)
idx_base = torch.arange(0, batch_size, device=x.device).view(-1, 1, 1)*num_points
idx = idx + idx_base
idx = idx.view(-1)
_, num_dims, _ = x.size()
x = x.transpose(2, 1).contiguous() # (batch_size, num_points, num_dims) -> (batch_size*num_points, num_dims) # batch_size * num_points * k + range(0, batch_size*num_points)
feature = x.view(batch_size*num_points, -1)[idx, :]
feature = feature.view(batch_size, num_points, k, num_dims)
x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
feature = torch.cat((feature-x, x), dim=3).permute(0, 3, 1, 2).contiguous()
return feature
class DGCNN(nn.Module):
def __init__(self, k=25, output_channels=10):
super(DGCNN, self).__init__()
self.k = k
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(64)
self.bn3 = nn.BatchNorm2d(128)
self.bn4 = nn.BatchNorm2d(256)
self.bn5 = nn.BatchNorm1d(1024)
self.conv1 = nn.Sequential(nn.Conv2d(6, 64, kernel_size=1, bias=False),
self.bn1,
nn.LeakyReLU(negative_slope=0.2))
self.conv2 = nn.Sequential(nn.Conv2d(64*2, 64, kernel_size=1, bias=False),
self.bn2,
nn.LeakyReLU(negative_slope=0.2))
self.conv3 = nn.Sequential(nn.Conv2d(64*2, 128, kernel_size=1, bias=False),
self.bn3,
nn.LeakyReLU(negative_slope=0.2))
self.conv4 = nn.Sequential(nn.Conv2d(128*2, 256, kernel_size=1, bias=False),
self.bn4,
nn.LeakyReLU(negative_slope=0.2))
self.conv5 = nn.Sequential(nn.Conv1d(512, 1024, kernel_size=1, bias=False),
self.bn5,
nn.LeakyReLU(negative_slope=0.2))
self.linear1 = nn.Linear(1024*2, 512, bias=False)
self.bn6 = nn.BatchNorm1d(512)
self.dp1 = nn.Dropout()
self.linear2 = nn.Linear(512, 256)
self.bn7 = nn.BatchNorm1d(256)
self.dp2 = nn.Dropout()
self.linear3 = nn.Linear(256, output_channels)
def forward(self, x):
x = x.transpose(2, 1)
batch_size = x.size(0)
x = get_graph_feature(x, k=self.k)
x = self.conv1(x)
x1 = x.max(dim=-1, keepdim=False)[0]
x = get_graph_feature(x1, k=self.k)
x = self.conv2(x)
x2 = x.max(dim=-1, keepdim=False)[0]
x = get_graph_feature(x2, k=self.k)
x = self.conv3(x)
x3 = x.max(dim=-1, keepdim=False)[0]
x = get_graph_feature(x3, k=self.k)
x = self.conv4(x)
x4 = x.max(dim=-1, keepdim=False)[0]
x = torch.cat((x1, x2, x3, x4), dim=1)
x = self.conv5(x)
x1 = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
x2 = F.adaptive_avg_pool1d(x, 1).view(batch_size, -1)
x = torch.cat((x1, x2), 1)
x = F.leaky_relu(self.bn6(self.linear1(x)), negative_slope=0.2)
x = self.dp1(x)
x = F.leaky_relu(self.bn7(self.linear2(x)), negative_slope=0.2)
x = self.dp2(x)
x = self.linear3(x)
return x
Here is what the dataloader and test function looks like:
class my_loader(Dataset):
def __init__(self, device):
self.data = torch.rand(256, 2048, 3).to(device).float()
self.labels = torch.rand(256).to(device).long()
def __getitem__(self, ind):
return self.data[ind], self.labels[ind]
def __len__(self):
return len(self.data)
def test():
device = torch.device('cuda:2')
test_set = my_loader(device)
test_loader = DataLoader(test_set, batch_size=16, shuffle=True, num_workers=0)
model = DGCNN().to(device)
model.eval()
#---------- this one is 0.12s --------------#
for inputs, labels in test_loader:
tic = time()
pred = model(inputs)
print('time1 {}'.format(time() - tic))
print('------------------')
#---------- this one is 0.004s --------------#
for inputs, labels in test_loader:
inputs = inputs.detach().cpu().to(device)
tic = time()
pred = model(inputs)
print('time2 {}'.format(time() - tic))
print('------------------')
#---------- this one is 0.12s --------------#
for inputs, labels in test_loader:
tic = time()
inputs = inputs.detach().cpu().to(device)
pred = model(inputs)
print('time3 {}'.format(time() - tic))
print('------------------')
Basically, it seems that if there is no explicit call of gpu to cpu transportation either before or after the forward propagation, the forward propagation would cost more time. It just seems like that the forward propagation is implicitly doing gpu->cpu transportation.
I played around with the code a little bit, and I think the problem is that you are measuring times for both cases in the same run. Here is my boiled down version of your code since your model crushed my GPU memory:
class DGCNN(nn.Module):
def __init__(self, num_layers):
super(DGCNN, self).__init__()
self.layers = nn.ModuleList([nn.Linear(256, 256) for _ in range(1200)])
def forward(self, x):
x = x.view(-1, 256)
for layer in self.layers:
x = layer(x)
return x
class my_loader(Dataset):
def __init__(self, device):
self.data = torch.rand(256, 2048, 3).to(device).float()
self.labels = torch.rand(256).to(device).long()
def __getitem__(self, ind):
return self.data[ind], self.labels[ind]
def __len__(self):
return len(self.data)
Now, here I demonstrate different versions of test().
Version #1:
def test():
device = torch.device('cuda:0')
test_set = my_loader(device)
test_loader = DataLoader(test_set, batch_size=16, shuffle=True, num_workers=0)
model = DGCNN().to(device)
model.eval()
#---------- this one is 0.12s --------------#
tic = time()
for inputs, labels in test_loader:
pred = model(inputs)
tac = time()
print(f'# First case -> Full forward pass: {tac - tic:.6f}')
#---------- this one is 0.004s --------------#
tic = time()
for inputs, labels in test_loader:
pred = model(inputs.detach().cpu().to(device))
tac = time()
print(f'# Second case -> Full forward pass: {tac - tic:.6f}')
>>> # First case -> Full forward pass: 3.105103, # Second case -> Full forward pass: 2.831652
Now I switched the order of timing calculations for the cases. Version #2:
def test():
device = torch.device('cuda:0')
test_set = my_loader(device)
test_loader = DataLoader(test_set, batch_size=16, shuffle=True, num_workers=0)
model = DGCNN().to(device)
model.eval()
#---------- this one is 0.004s --------------#
tic = time()
for inputs, labels in test_loader:
pred = model(inputs.detach().cpu().to(device))
tac = time()
print(f'# Second case -> Full forward pass: {tac - tic:.6f}')
#---------- this one is 0.12s --------------#
tic = time()
for inputs, labels in test_loader:
pred = model(inputs)
tac = time()
print(f'# First case -> Full forward pass: {tac - tic:.6f}')
>>> # Second case -> Full forward pass: 3.288522, # First case -> Full forward pass: 2.583231
Apparently, the first timing you calculate seems to end up slower. So, I calculated these timings separately in different runs with fresh kernels. Version #3:
def test():
device = torch.device('cuda:0')
test_set = my_loader(device)
test_loader = DataLoader(test_set, batch_size=16, shuffle=True, num_workers=0)
model = DGCNN().to(device)
model.eval()
#---------- this one is 0.12s --------------#
tic = time()
for inputs, labels in test_loader:
pred = model(inputs)
tac = time()
print(f'# First case -> Full forward pass: {tac - tic:.6f}')
>>> # First case -> Full forward pass: 3.091592
Version #4:
def test():
device = torch.device('cuda:0')
test_set = my_loader(device)
test_loader = DataLoader(test_set, batch_size=16, shuffle=True, num_workers=0)
model = DGCNN().to(device)
model.eval()
#---------- this one is 0.004s --------------#
tic = time()
for inputs, labels in test_loader:
pred = model(inputs.detach().cpu().to(device))
tac = time()
print(f'# Second case -> Full forward pass: {tac - tic:.6f}')
>>> # Second case -> Full forward pass: 3.190248
So, by testing one at a time, it seems like pred = model(inputs) runs slightly faster than pred = model(inputs.detach().cpu().to(device)), which is the obvious expected result.
For a pytorch module, I suppose I could use .named_children, .named_modules, etc. to obtain a list of the submodules. However, I suppose the list is not given in order, right? An example:
In [19]: import transformers
In [20]: model = transformers.DistilBertForSequenceClassification.from_pretrained('distilb
...: ert-base-cased')
In [21]: [name for name, _ in model.named_children()]
Out[21]: ['distilbert', 'pre_classifier', 'classifier', 'dropout']
The order of .named_children() in the above model is given as distilbert, pre_classifier, classifier, and dropout. However, if you examine the code, it is evident that dropout happens before classifier. So how do I get the order of these submodules?
In Pytorch, the results of print(model) or .named_children(), etc are listed based on the order they are declared in __init__ of the model's class e.g.
Case 1
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
self.conv2_drop = nn.Dropout2d()
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=0.6)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
model = Model()
print(model)
[name for name, _ in model.named_children()]
# output
['conv1', 'conv2', 'fc1', 'fc2', 'conv2_drop']
Case 2
Changed order of fc1 and fc2 layers in constructor.
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.fc2 = nn.Linear(50, 10)
self.fc1 = nn.Linear(320, 50)
self.conv2_drop = nn.Dropout2d()
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=0.6)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
model = Model()
print(model)
[name for name, _ in model.named_children()]
# output
['conv1', 'conv2', 'fc2', 'fc1', 'conv2_drop']
That's why classifier is printed before dropout as it's declared so in constructor:
class DistilBertForSequenceClassification(DistilBertPreTrainedModel):
...
self.distilbert = DistilBertModel(config)
self.pre_classifier = nn.Linear(config.dim, config.dim)
self.classifier = nn.Linear(config.dim, config.num_labels)
self.dropout = nn.Dropout(config.seq_classif_dropout)
Nevertheless, you can play with model's submodules using .modules(), etc. but they'll be listed only in the order they are declared in __init__. If you only want to print structure based on forward method, you may try using pytorch-summary.