Pytorch netwrok with variable number of hidden layers - pytorch

I want to create a class that creates a simple network with X fully connected layers, where X is an input given by the user. I tried this using the setattr/getattr but for some reason is not working.
class MLP(nn.Module):
def __init__(self,in_size, out_size,n_layers, hidden_size):
super(MLP,self).__init__()
self.n_layers=n_layers
for i in range(n_layers):
if i==0:
layer_in_size = in_size
else:
layer_in_size = hidden_size
if i==(n_layers-1):
layer_out_size = out_size
else:
layer_out_size = hidden_size
setattr(self,'dense_{}'.format(i), nn.Linear(layer_in_size,layer_out_size))
def forward(self,x):
out = x
for i in range(self.n_layers):
if i==(self.n_layers-1):
out = getattr(self,'dense_{}'.format(i),out)
else:
out = F.relu(getattr(self,'dense_{}'.format(i),out))
return out
This is the error im getting when trying a forward pass with the net:
enter image description here
Some insights of what's the issue will be helpful.

This seems like a problem with forward implementation with the mod2 function. Try the pytorch functions (torch.fmod and torch.remainder) or if you don't need the backprop capabilities try to do .detach() before the mod2 function.

Related

How to save the register_backward_hook variable?

I am trying to put backward hooks in my code and getting the gradient of a specific layer is working. However, I can't seem to save the variable to my dictionary. Does anyone know how to do this?
tcav = {}
def backward_hook(module, grad_input, grad_output):
#print('module:', module)
#print('grad_input:', grad_input)
#print('grad_output:', tuple_of_tensors_to_tensor(grad_output)[0].shape)
return grad_output
for name, layer in model.named_modules():
if name.startswith(layer_name):
print("Hook made for...", name)
layer.register_full_backward_hook(backward_hook)
model.eval()
tcav = {}
for ind, (img, label) in enumerate(loader):
img.requires_grad=True
img = img.to(device).float()
output = model(img)
output.mean().backward()
tcav[ind] = output.grad
return tcav
Here, output.grad seems to be None for all images.

Reset parameters of a neural network in pytorch

I have a neural network with the following structure:
class myNetwork(nn.Module):
def __init__(self):
super(myNetwork, self).__init__()
self.bigru = nn.GRU(input_size=2, hidden_size=100, batch_first=True, bidirectional=True)
self.fc1 = nn.Linear(200, 32)
torch.nn.init.xavier_uniform_(self.fc1.weight)
self.fc2 = nn.Linear(32, 2)
torch.nn.init.xavier_uniform_(self.fc2.weight)
I need to reinstate the model to an unlearned state by resetting the parameters of the neural network. I can do so for nn.Linear layers by using the method below:
def reset_weights(self):
torch.nn.init.xavier_uniform_(self.fc1.weight)
torch.nn.init.xavier_uniform_(self.fc2.weight)
But, to reset the weight of the nn.GRU layer, I could not find any such snippet.
My question is how does one reset the nn.GRU layer? Any other way of resetting the network is also fine. Any help is appreciated.
You can use reset_parameters method on the layer. As given here
for layer in model.children():
if hasattr(layer, 'reset_parameters'):
layer.reset_parameters()
Or Another way would be saving the model first and then reload the module state. Using torch.save and torch.load see docs for more Or Saving and Loading Models
Here is the code with an example that runs:
def lp_norm(mdl: nn.Module, p: int = 2) -> Tensor:
lp_norms = [w.norm(p) for name, w in mdl.named_parameters()]
return sum(lp_norms)
def reset_all_weights(model: nn.Module) -> None:
"""
refs:
- https://discuss.pytorch.org/t/how-to-re-set-alll-parameters-in-a-network/20819/6
- https://stackoverflow.com/questions/63627997/reset-parameters-of-a-neural-network-in-pytorch
- https://pytorch.org/docs/stable/generated/torch.nn.Module.html
"""
#torch.no_grad()
def weight_reset(m: nn.Module):
# - check if the current module has reset_parameters & if it's callabed called it on m
reset_parameters = getattr(m, "reset_parameters", None)
if callable(reset_parameters):
m.reset_parameters()
# Applies fn recursively to every submodule see: https://pytorch.org/docs/stable/generated/torch.nn.Module.html
model.apply(fn=weight_reset)
def reset_all_linear_layer_weights(model: nn.Module) -> nn.Module:
"""
Resets all weights recursively for linear layers.
ref:
- https://pytorch.org/docs/stable/generated/torch.nn.Module.html
"""
#torch.no_grad()
def init_weights(m):
if type(m) == nn.Linear:
m.weight.fill_(1.0)
# Applies fn recursively to every submodule see: https://pytorch.org/docs/stable/generated/torch.nn.Module.html
model.apply(init_weights)
def reset_all_weights_with_specific_layer_type(model: nn.Module, modules_type2reset) -> nn.Module:
"""
Resets all weights recursively for linear layers.
ref:
- https://pytorch.org/docs/stable/generated/torch.nn.Module.html
"""
#torch.no_grad()
def init_weights(m):
if type(m) == modules_type2reset:
# if type(m) == torch.nn.BatchNorm2d:
# m.weight.fill_(1.0)
m.reset_parameters()
# Applies fn recursively to every submodule see: https://pytorch.org/docs/stable/generated/torch.nn.Module.html
model.apply(init_weights)
# -- tests
def reset_params_test():
import torchvision.models as models
from uutils.torch_uu import lp_norm
resnet18 = models.resnet18(pretrained=True)
resnet18_random = models.resnet18(pretrained=False)
print(f'{lp_norm(resnet18)=}')
print(f'{lp_norm(resnet18_random)=}')
print(f'{lp_norm(resnet18)=}')
reset_all_weights(resnet18)
print(f'{lp_norm(resnet18)=}')
if __name__ == '__main__':
reset_params_test()
print('Done! \a\n')
output:
lp_norm(resnet18)=tensor(517.5472, grad_fn=<AddBackward0>)
lp_norm(resnet18_random)=tensor(668.3687, grad_fn=<AddBackward0>)
lp_norm(resnet18)=tensor(517.5472, grad_fn=<AddBackward0>)
lp_norm(resnet18)=tensor(476.0836, grad_fn=<AddBackward0>)
Done!
I am assuming this works because I calculated the norm twice for the pre-trained net and it was the same both times before calling reset.
Though I was unhappy it wasn't closer to the norm of the random net I must admit but I think this is good enough.
same: https://discuss.pytorch.org/t/how-to-re-set-alll-parameters-in-a-network/20819/11
New to pytorch, I wonder if this could be a solution :)
Suppose Model inherents from torch.nn.module,
to reset it to zeros:
dic = Model.state_dict()
for k in dic:
dic[k] *= 0
Model.load_state_dict(dic)
del(dic)
to reset it randomly
dic = Model.state_dict()
for k in dic:
dic[k] = torch.randn(dic[k].size())
Model.load_state_dict(dic)
del(dic)

Can't set the attribute "trainable_weights", likely because it conflicts with an existing read-only

My code was running perfectly in colab. But today it's not running. It says
Can't set the attribute "trainable_weights", likely because it conflicts with an existing read-only #property of the object. Please choose a different name.
I am using LSTM with the attention layer.
class Attention(Layer):
def __init__(self, **kwargs):
self.init = initializers.get('normal')
#self.input_spec = [InputSpec(ndim=3)]
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape)==3
#self.W = self.init((input_shape[-1],1))
self.W = self.init((input_shape[-1],))
#self.input_spec = [InputSpec(shape=input_shape)]
self.trainable_weights = [self.W]
super(Attention, self).build(input_shape) # be sure you call this somewhere!
def call(self, x, mask=None):
eij = K.tanh(K.dot(x, self.W))
ai = K.exp(eij)
weights = ai/K.sum(ai, axis=1).dimshuffle(0,'x')
weighted_input = x*weights.dimshuffle(0,1,'x')
return weighted_input.sum(axis=1)
def get_output_shape_for(self, input_shape):
return (input_shape[0], input_shape[-1])
I am not sure what happened suddenly. Anyone encounter similar problem?
change
self.trainable_weights = [self.W]
to
self._trainable_weights = [self.W]
This is ongoing issue with tf in colab. I could get a link with this here
Looks the issue got closed , maybe time to reopen.
Please remove the build function and use this instead, It worked for me.
def build(self,input_shape):
self.W=self.add_weight(name="att_weight",shape=(input_shape[-1],1),initializer="normal", trainable = True)
self.b=self.add_weight(name="att_bias",shape=(self.attention_dim,),initializer="normal", trainable = True)
self.u=self.add_weight(name="u_bias",shape=(self.attention_dim,1),initializer="normal", trainable = True)
super(Attention, self).build(input_shape)

Pytorch dynamic amount of Layers?

I am trying to specify a dynamic amount of layers, which I seem to be doing wrong.
My issue is that when I define the 100 layers here, I will get an error in the forward step.
But when I define the layer properly it works?
Below simplified example
class PredictFromEmbeddParaSmall(LightningModule):
def __init__(self, hyperparams={'lr': 0.0001}):
super(PredictFromEmbeddParaSmall, self).__init__()
#Input is something like tensor.size=[768*100]
self.TO_ILLUSTRATE = nn.Linear(768, 5)
self.enc_ref=[]
for i in range(100):
self.enc_red.append(nn.Linear(768, 5))
# gather the layers output sth
self.dense_simple1 = nn.Linear(5*100, 2)
self.output = nn.Sigmoid()
def forward(self, x):
# first input to enc_red
x_vecs = []
for i in range(self.para_count):
layer = self.enc_red[i]
# The first dim is the batch size here, output is correct
processed_slice = x[:, i * 768:(i + 1) * 768]
# This works and give the out of size 5
rand = self.TO_ILLUSTRATE(processed_slice)
#This will fail? Error below
ret = layer(processed_slice)
#more things happening we can ignore right now since we fail earlier
I get this error when executing "ret = layer.forward(processed_slice)"
RuntimeError: Expected object of device type cuda but got device type
cpu for argument #1 'self' in call to _th_addmm
Is there a smarter way to program this? OR solve the error?
You should use a ModuleList from pytorch instead of a list: https://pytorch.org/docs/master/generated/torch.nn.ModuleList.html . That is because Pytorch has to keep a graph with all modules of your model, if you just add them in a list they are not properly indexed in the graph, resulting in the error you faced.
Your coude should be something alike:
class PredictFromEmbeddParaSmall(LightningModule):
def __init__(self, hyperparams={'lr': 0.0001}):
super(PredictFromEmbeddParaSmall, self).__init__()
#Input is something like tensor.size=[768*100]
self.TO_ILLUSTRATE = nn.Linear(768, 5)
self.enc_ref=nn.ModuleList() # << MODIFIED LINE <<
for i in range(100):
self.enc_red.append(nn.Linear(768, 5))
# gather the layers output sth
self.dense_simple1 = nn.Linear(5*100, 2)
self.output = nn.Sigmoid()
def forward(self, x):
# first input to enc_red
x_vecs = []
for i in range(self.para_count):
layer = self.enc_red[i]
# The first dim is the batch size here, output is correct
processed_slice = x[:, i * 768:(i + 1) * 768]
# This works and give the out of size 5
rand = self.TO_ILLUSTRATE(processed_slice)
#This will fail? Error below
ret = layer(processed_slice)
#more things happening we can ignore right now since we fail earlier
Then it should work all right!
Edit: alternative way.
Instead of using ModuleList you can also just use nn.Sequential, this allows you to avoid using the for loop in the forward pass. That also means that you will not have access to intermediary activations, so that is not the solution for you if you need them.
class PredictFromEmbeddParaSmall(LightningModule):
def __init__(self, hyperparams={'lr': 0.0001}):
super(PredictFromEmbeddParaSmall, self).__init__()
#Input is something like tensor.size=[768*100]
self.TO_ILLUSTRATE = nn.Linear(768, 5)
self.enc_ref=[]
for i in range(100):
self.enc_red.append(nn.Linear(768, 5))
self.enc_red = nn.Seqential(*self.enc_ref) # << MODIFIED LINE <<
# gather the layers output sth
self.dense_simple1 = nn.Linear(5*100, 2)
self.output = nn.Sigmoid()
def forward(self, x):
# first input to enc_red
x_vecs = []
out = self.enc_red(x) # << MODIFIED LINE <<
A little bit more adjustable solution which comes down to matter of taste or complexity of your exact situation was posted here.
For reference I post an adjusted version of the code here:
import torch
from torch import nn, optim
from torch.nn.modules import Module
from implem.settings import settings
class Model(nn.Module):
def __init__(self, input_size, layers_data: list, learning_rate=0.01, optimizer=optim.Adam):
super().__init__()
self.layers = nn.ModuleList()
self.input_size = input_size # Can be useful later ...
for size, activation in layers_data:
self.layers.append(nn.Linear(input_size, size))
input_size = size # For the next layer
if activation is not None:
assert isinstance(activation, Module), \
"Each tuples should contain a size (int) and a torch.nn.modules.Module."
self.layers.append(activation)
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.learning_rate = learning_rate
self.optimizer = optimizer(params=self.parameters(), lr=learning_rate)
def forward(self, input_data):
for layer in self.layers:
input_data = layer(input_data)
return input_data
# test that the net is working properly
if __name__ == "__main__":
data_size = 5
layer1, layer2 = 10, 10
output_size = 2
data = torch.randn(data_size)
mlp = Model(data_size, [(layer1, nn.ReLU()), (layer2, nn.ReLU()), (output_size, nn.Sigmoid())])
output = mlp(data)
print("done")

ValueError: optimizer got an empty parameter list

I create the following simple linear class:
class Decoder(nn.Module):
def __init__(self, K, h=()):
super().__init__()
h = (K,)+h+(K,)
self.layers = [nn.Linear(h1,h2) for h1,h2 in zip(h, h[1:])]
def forward(self, x):
for layer in self.layers[:-1]:
x = F.relu(layer(x))
return self.layers[-1](x)
However, when I try to put the parameters in a optimizer class I get the error ValueError: optimizer got an empty parameter list.
decoder = Decoder(4)
LR = 1e-3
opt = optim.Adam(decoder.parameters(), lr=LR)
Is there something I'm doing obviously wrong with the class definition?
Since you store your layers in a regular pythonic list inside your Decoder, Pytorch has no way of telling these members of the self.list are actually sub modules. Convert this list into pytorch's nn.ModuleList and your problem will be solved
class Decoder(nn.Module):
def __init__(self, K, h=()):
super().__init__()
h = (K,)+h+(K,)
self.layers = nn.ModuleList(nn.Linear(h1,h2) for h1,h2 in zip(h, h[1:]))

Resources