Initialize weight in pytorch neural net - python-3.x

I have created this neural net:
class _netD(nn.Module):
def __init__(self, num_classes=1, nc=1, ndf=64):
super(_netD, self).__init__()
self.num_classes = num_classes
# nc is number of channels
# num_classes is number of classes
# ndf is the number of output channel at the first layer
self.main = nn.Sequential(
# input is (nc) x 28 x 28
# conv2D(in_channels, out_channels, kernelsize, stride, padding)
nn.Conv2d(nc, ndf , 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 14 x 14
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 7 x 7
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 3 x 3
nn.Conv2d(ndf * 4, ndf * 8, 3, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 2 x 2
nn.Conv2d(ndf * 8, num_classes, 2, 1, 0, bias=False),
# out size = batch x num_classes x 1 x 1
)
if self.num_classes == 1:
self.main.add_module('prob', nn.Sigmoid())
# output = probability
else:
pass
# output = scores
def forward(self, input):
output = self.main(input)
return output.view(input.size(0), self.num_classes).squeeze(1)
I want to loop through the different layers and apply a weight initialization depending on the type of layer. I am trying to do the following:
D = _netD()
for name, param in D.named_parameters():
if type(param) == nn.Conv2d:
param.weight.normal_(...)
But that is not working. Can you please help me?
Thanks

type(param) will only return the actual datatype called a parameter for any type of weight or data in the model. Because named_parameters() doesn't return anything useful in the name either when used on an nn.sequential-based model, you need to look at the modules to see which layers are specifically related to the nn.Conv2d class using isinstance as such:
for layer in D.modules():
if isinstance(layer, nn.Conv2d):
layer.weight.data.normal_(...)
Or, the way that is recommended by Soumith Chintala himself, actually just loop through your main module itself:
for L,layer in D.main:
if isisntance(layer,nn.Conv2d):
layer.weight.data.normal_(..)
I actually prefer the first because you don't have to specify the exact nn.sequential module itself, and will search all possible modules in the model, but either one should do the job for you.

Related

Change the layer format to a different image resolution

There is a class where everything is set to 32x32 image format Taken from here
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 6, 5) # here I changed the image channel from 3 to 1
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 64, 5)
self.fc1 = nn.Linear(64 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 22) # here I changed the number of output neurons from 10 to 22
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
How to change all this under resolution 96 to 96? Channel 1 (grayscale)?
At resolution 32x32 the output of conv2 is shaped (1, 64, 5, 5). On the other hand, if the input is at resolution 96x96, it will be (1, 64, 21, 21). This means fc1 needs to have 28_224 input neurons.
>>> self.fc1 = nn.Linear(64 * 21 * 21, 120)
Alternatively, you can use nn.LazyLinear which will infer this number for you, based on the first inference.
>>> self.fc1 = nn.Linear(120)

Given groups=1, weight of size [32, 3, 3, 3], expected input[1, 1, 32, 340] to have 3 channels, but got 1 channels instead

This is the question:
Before we define the model, we define the size of our alphabet. Our alphabet consists of lowercase English letters, and additionally a special character used for space between symbols or before and after the word. For the first part of this assignment, we don't need that extra character.
Our end goal is to learn to transcribe words of arbitrary length. However, first, we pre-train our simple convolutional neural net to recognize single characters. In order to be able to use the same model for one character and for entire words, we are going to design the model in a way that makes sure that the output size for one character (or when input image size is 32x18) is 1x27, and Kx27 whenever the input image is wider. K here will depend on particular architecture of the network, and is affected by strides, poolings, among other things. A little bit more formally, our model 𝑓𝜃 , for an input image 𝑥 gives output energies 𝑙=𝑓𝜃(𝑥) . If 𝑥∈ℝ32×18 , then 𝑙∈ℝ1×27 . If 𝑥∈ℝ32×100 for example, our model may output 𝑙∈ℝ10×27 , where 𝑙𝑖 corresponds to a particular window in 𝑥 , for example from 𝑥0,9𝑖 to 𝑥32,9𝑖+18 (again, this will depend on the particular architecture).
The code:
# constants for number of classes in total, and for the special extra character for empty space
ALPHABET_SIZE = 27, # Extra character for space inbetween
BETWEEN = 26
print(alphabet.shape) # RETURNS: torch.Size([32, 340])
My CNN Block:
from torch import nn
import torch.nn.functional as F
"""
Remember basics:
1. Bigger strides = less overlap
2. More filters = More features
Image shape = 32, 18
Alphabet shape = 32, 340
"""
class SimpleNet(torch.nn.Module):
def __init__(self):
super().__init__()
self.cnn_block = torch.nn.Sequential(
nn.Conv2d(3, 32, 3),
nn.BatchNorm2d(32),
nn.Conv2d(32, 32, 3),
nn.BatchNorm2d(32),
nn.Conv2d(32, 32, 3),
nn.BatchNorm2d(32),
nn.MaxPool2d(2),
nn.Conv2d(32, 64, 3),
nn.BatchNorm2d(64),
nn.Conv2d(64, 64, 3),
nn.BatchNorm2d(64),
nn.Conv2d(64, 64, 3),
nn.BatchNorm2d(64),
nn.MaxPool2d(2)
)
def forward(self, x):
x = self.cnn_block(x)
# after applying cnn_block, x.shape should be:
# batch_size, alphabet_size, 1, width
return x[:, :, 0, :].permute(0, 2, 1)
model = SimpleNet()
alphabet_energies = model(alphabet.view(1, 1, *alphabet.shape))
def plot_energies(ce):
fig=plt.figure(dpi=200)
ax = plt.axes()
im = ax.imshow(ce.cpu().T)
ax.set_xlabel('window locations →')
ax.set_ylabel('← classes')
ax.xaxis.set_label_position('top')
ax.set_xticks([])
ax.set_yticks([])
cax = fig.add_axes([ax.get_position().x1+0.01,ax.get_position().y0,0.02,ax.get_position().height])
plt.colorbar(im, cax=cax)
plot_energies(alphabet_energies[0].detach())
I get the error in the title at alphabet_energies = model(alphabet.view(1, 1, *alphabet.shape))
Any help would be appreciated.
You should begin to replace nn.Conv2d(3, 32, 3) to nn.Conv2d(1, 32, 3)
Your model begins with a conv2d from 3 channels to 32 but your input image has only 1 channel (greyscale image).

Pytorch Conv2d Autoencoder Output Shape

I built the bellow convolution autoencoder and trying to tune it to get encoder output shape (x_encoder) of [NxHxW] = 1024 without increasing loss. Currently my output shape is [4, 64, 64] Any ideas?
# define the NN architecture
class ConvAutoencoder(nn.Module):
def __init__(self):
super(ConvAutoencoder, self).__init__()
## encoder layers ##
# conv layer (depth from in --> 16), 3x3 kernels
self.conv1 = nn.Conv2d(1, 16, 3, padding=1)
# conv layer (depth from 16 --> 4), 3x3 kernels
self.conv2 = nn.Conv2d(16, 4, 3, padding=1)
# pooling layer to reduce x-y dims by two; kernel and stride of 2
self.pool = nn.MaxPool2d(2, 2)
## decoder layers ##
## a kernel of 2 and a stride of 2 will increase the spatial dims by 2
self.t_conv1 = nn.ConvTranspose2d(4, 16, 2, stride=2)
self.t_conv2 = nn.ConvTranspose2d(16, 1, 2, stride=2)
def forward(self, x):
## encode ##
# add hidden layers with relu activation function
# and maxpooling after
x = F.relu(self.conv1(x))
x = self.pool(x)
# add second hidden layer
x = F.relu(self.conv2(x))
x = self.pool(x) # compressed representation
x_encoder = x
## decode ##
# add transpose conv layers, with relu activation function
x = F.relu(self.t_conv1(x))
# output layer (with sigmoid for scaling from 0 to 1)
x = F.sigmoid(self.t_conv2(x))
return x, x_encoder
If you want to keep the number of your parameters, adding an nn.AdaptiveAvgPool2d((N, H, W)) or nn.AdaptiveMaxPool2d((N, H, W))layer, in place of after the pooling layer (self.pool) can force the output of the decoder to have shape [NxHxW].
This should work assuming the shape of the x_encoder is (torch.Size([1, 4, 64, 64]). You can add a Conv. layer with stride set to 2 or a Conv. layer followed by a pooling layer. Check the code below:
# conv layer (depth from in --> 16), 3x3 kernels
self.conv1 = nn.Conv2d(1, 16, 3, padding=1)
# conv layer (depth from 16 --> 4), 3x3 kernels
self.conv2 = nn.Conv2d(16, 4, 3, padding=1)
# pooling layer to reduce x-y dims by two; kernel and stride of 2
self.pool = nn.MaxPool2d(2, 2)
# The changes
self.conv3 = nn.Conv2d(4, 1, 1, 2)
# or
self.conv3 = nn.Conv2d(4, 1, 1)
self.maxpool2d = nn.MaxPool2d((2, 2))

Convolution - Deconvolution for even and odd size

I have two different size tensors to put in the network.
C = nn.Conv1d(1, 1, kernel_size=1, stride=2)
TC = nn.ConvTranspose1d(1, 1, kernel_size=1, stride=2)
a = torch.rand(1, 1, 100)
b = torch.rand(1, 1, 101)
a_out, b_out = TC(C(a)), TC(C(b))
The results are
a_out = torch.size([1, 1, 99]) # What I want is [1, 1, 100]
b_out = torch.size([1, 1, 101])
Is there any method to handle this problem?
I need your help.
Thanks
It is expected behaviour as per documentation. May be padding can be used when even input length is detected to get same length as input.
Something like this
class PadEven(nn.Module):
def __init__(self, conv, deconv, pad_value=0, padding=(0, 1)):
super().__init__()
self.conv = conv
self.deconv = deconv
self.pad = nn.ConstantPad1d(padding=padding, value=pad_value)
def forward(self, x):
nd = x.size(-1)
x = self.deconv(self.conv(x))
if nd % 2 == 0:
x = self.pad(x)
return x
C = nn.Conv1d(1, 1, kernel_size=1, stride=2)
TC = nn.ConvTranspose1d(1, 1, kernel_size=1, stride=2)
P = PadEven(C, TC)
a = torch.rand(1, 1, 100)
b = torch.rand(1, 1, 101)
a_out, b_out = P(a), P(b)

PyTorch custom DataLoader dimension issues for CNN

I have written a custom Dataset and DataLoader for a PyTorch CNN project. Here is the relevant code for the dataset
class MyDataset(Dataset):
def __init__(self):
pass
def __len__(self):
return COUNT
def __getitem__(self, idx):
x, y = X[idx], Y[idx]
x = image_augment(x) # custom func to resize image to 32x32
return x, y
The shape of each training x is [4, 32, 32, 3].
And here is my Net code, taken directly from this PyTorch example.
class Net(nn.Module):
def __init__(self, nc):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, nc)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
When I try to train this net on my data from my DataLoader, I get the error statement Given groups=1, weight of size [6, 3, 5, 5], expected input[4, 32, 32, 3] to have 3 channels, but got 200 channels instead. It seems to me my issue is with the shape of my data coming from my DataLoader using x.view(4, 3, 32, 32), but then I got an error saying I couldn't use Conv2D on a ByteTensor. I'm a little lost here and would really appreciate any help. Thanks!
I got it eventually. Had to x = x.view(x.shape[0], 3, self.img_height, self.img_width).type('torch.FloatTensor'). for example. This would make that swap from [4, 32, 32, 3] to [4, 3, 32, 32].

Resources