How to generate an onnx file with linear layers using Pytorch - pytorch

I want to create a network on the basis of the vgg16 network, but adding linear layers (Gemm) just after the conv2d layers, for normalization purpose.
After that, I want to export the network in an ONNX file.
The first part seems to work: I took the Pytorch code for generating the vgg16 and modified it as follows
import torch.nn as nn
class VGG(nn.Module):
def __init__(self, features, num_classes=8, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.Linear(4096, 4096), # New shift layer
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.Linear(4096, 4096), # New shift layer
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 8),
nn.Linear(8, 8), # New shift layer
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
n = 224
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
n = int(n / 2)
elif v == 'B':
layers += [nn.AdaptiveAvgPool2d(n)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
linear = nn.Linear(n,n,True)
if batch_norm:
layers += [conv2d, linear, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, linear, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M','B'],
}
def vgg16(**kwargs):
"""VGG 16-layer model (configuration "D")
"""
model = VGG(make_layers(cfg['D']), **kwargs)
return model
But when I insert the weights and export to onnx, I see that my linear layers are not referred to as Gemm but as {Transpose + Matmult + Add}
The Transpose part is the weights matrix and the Add part is for the biases (which are all 0).
Am I wrong to think that it's possible to do this, or is there a way to get a real Gemm layer here or another way to do this normalization (which is simply multiply all outputs by a single value)?

The input data of nn.Linear here is a 4-D tensor, then torch will export it to {Transpose, MatMul, Add}. Only input is 2-D, the GEMM op will be exported.
You can have to look at the source code of Pytorch for more information.

Related

Convolutional Autoencoder outputs black images

I need to train an autoencoder on Adaptiope dataset. I am using a ResNet18 backbone for my encoder part.
The issue I encounter is that even after many epochs, the reconstructed image is always completely black.
On the other hand, when I use a simpler Autoencoder without the resnet18 backbone, reconstructed images turn out close to what I need them to be.
I am trying to understand why is this the case. I am a novice in the field and still cannot grasp the problem. It looks like an architectural problem but I cannot wrap my head around it.
This is my "vanilla" Encoder, with no resnet18 backbone:
`
class Encoder(nn.Module):
def __init__(self,
num_input_channels : int,
base_channel_size : int,
latent_dim : int
):
"""
Inputs:
- num_input_channels : Number of input channels of the image. For CIFAR, this parameter is 3
- base_channel_size : Number of channels we use in the first convolutional layers. Deeper layers might use a duplicate of it.
- latent_dim : Dimensionality of latent representation z
- act_fn : Activation function used throughout the encoder network
"""
super().__init__()
c_hid = base_channel_size
self.layer1 = nn.Sequential(nn.Conv2d(num_input_channels, c_hid, kernel_size=3, padding=1, stride=2), # 32x32 => 16x16
nn.ReLU(),
nn.Conv2d(c_hid, c_hid, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(c_hid, 2*c_hid, kernel_size=3, padding=1, stride=2), # 16x16 => 8x8
nn.ReLU(),
nn.Conv2d(2*c_hid, 2*c_hid, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(2*c_hid, 2*c_hid, kernel_size=3, padding=1, stride=2), # 8x8 => 4x4
nn.ReLU(),
nn.Flatten(), # Image grid to single feature vector
nn.Linear(351232, latent_dim))
self.linear2 = nn.Linear(latent_dim, 20*8)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
enc = self.layer1(x)
lin_p = self.linear2(enc)
p = self.softmax(lin_p)
return enc, p
This is the Encoder with Resnet18 backbone:
class Encoder(nn.Module):
def __init__(self,
num_input_channels : int,
base_channel_size : int,
latent_dim : int
):
"""
Inputs:
- num_input_channels : Number of input channels of the image. For CIFAR, this parameter is 3
- base_channel_size : Number of channels we use in the first convolutional layers. Deeper layers might use a duplicate of it.
- latent_dim : Dimensionality of latent representation z
- act_fn : Activation function used throughout the encoder network
"""
super().__init__()
c_hid = base_channel_size
self.fc_hidden1, self.fc_hidden2, self.CNN_embed_dim = 224, 768, 224
# CNN architechtures
self.ch1, self.ch2, self.ch3, self.ch4 = 16, 32, 64, 128
self.k1, self.k2, self.k3, self.k4 = (5, 5), (3, 3), (3, 3), (3, 3) # 2d kernel size
self.s1, self.s2, self.s3, self.s4 = (2, 2), (2, 2), (2, 2), (2, 2) # 2d strides
self.pd1, self.pd2, self.pd3, self.pd4 = (0, 0), (0, 0), (0, 0), (0, 0) # 2d padding
# encoding components
model = models.resnet18(pretrained=True)
for param in model.parameters():
param.requires_grad = False
modules = list(model.children())[:-1] # delete the last fc layer.
self.resnet_modules=modules
self.resnet = nn.Sequential(*modules)
self.fc1 = nn.Linear(model.fc.in_features, self.fc_hidden1)
self.bn1 = nn.BatchNorm1d(self.fc_hidden1, momentum=0.01)
self.relu = nn.ReLU(inplace=True)
self.layer = nn.Sequential(
nn.Flatten(), # Image grid to single feature vector
nn.Linear(224, latent_dim)) #8x224
#self.flatten = nn.Flatten(), # Image grid to single feature vector
#self.linear1 = nn.Linear(351232, latent_dim)
self.linear2 = nn.Linear(latent_dim, 20*8)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
x = self.resnet(x)
x = x.reshape(x.shape[0], 512)
x = self.fc1(x)
x = self.bn1(x)
x = self.relu(x)
enc = self.layer(x)
#x = self.fc2(x)
#x = self.bn(x)
# enc = self.layer1(x)
lin_p = self.linear2(enc)
p = self.softmax(lin_p)
return enc, p
The decoder is the same for both.
class Decoder_N(nn.Module):
def __init__(self,
num_input_channels : int,
base_channel_size : int,
latent_dim : int,
act_fn : object = nn.GELU):
"""
Inputs:
- num_input_channels : Number of channels of the image to reconstruct. For CIFAR, this parameter is 3
- base_channel_size : Number of channels we use in the last convolutional layers. Early layers might use a duplicate of it.
- latent_dim : Dimensionality of latent representation z
- act_fn : Activation function used throughout the decoder network
"""
super().__init__()
c_hid = 224
self.linear = nn.Sequential(
nn.Linear(latent_dim, 351232),
nn.ReLU()
)
self.net = nn.Sequential(
nn.ConvTranspose2d(2*c_hid, 2*c_hid, kernel_size=3, output_padding=1, padding=1, stride=2), # 4x4 => 8x8
nn.ReLU(),
nn.Conv2d(2*c_hid, 2*c_hid, kernel_size=3, padding=1),
nn.ReLU(),
nn.ConvTranspose2d(2*c_hid, c_hid, kernel_size=3, output_padding=1, padding=1, stride=2), # 8x8 => 16x16
nn.ReLU(),
nn.Conv2d(c_hid, c_hid, kernel_size=3, padding=1),
nn.ReLU(),
nn.ConvTranspose2d(c_hid, 3, kernel_size=3, output_padding=1, padding=1, stride=2), # 16x16 => 32x32
nn.Tanh() # The input images is scaled between -1 and 1, hence the output has to be bounded as well
)
def forward(self, x):
x = self.linear(x)
x = x.reshape(x.shape[0], -1, 28, 28)
x = self.net(x)
return x
`
num_input_channels : 224,
base_channel_size : 3
latent_dim : 64
I expected the "advanced" autoencoder to extract my features better, but apparently this is not the case.
I solved the issue: there were issues with the normalization of images and the BatchNorm layer. I accidentally used mean and std of ImageNet for the dataset instead of the correct ones. Additionally, during training I forgot to add regularizers for the different components of my loss, leading my model to learn literally nothing.

Runtime Error: mat1 and mat2 shapes cannot be multiplied (62x2304 and 1568x3)

I am un able to find error input 32*32 gray images:
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(
in_channels=1, # gray-scale images
out_channels=16,
kernel_size=5, # 5x5 convolutional kernel
stride=1, #no. of pixels pass at a time
padding=2, # to preserve size of input image
),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
)
self.conv2 = nn.Sequential(
nn.Conv2d(16, 32, 5, 1, 2),
nn.ReLU(),
nn.MaxPool2d(2),
)
# fully connected layers
self.out = nn.Linear(32*7*7, 3)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
# flatten the output of conv2
x = x.view(x.size(0), -1)
output = self.out(x)
return output
cnn=CNN()
cnn
Your linear layer expects input of size 32x7x7. Given that your conv1 and conv2 layers performs max pooling with stride=2, that means your network is configured for input size of 28x28 (MNIST usual input size) and not 32x32 as you expect.
Moreover, considering the values in your error message (64x2304) I assume you are working with batch_size=64, but your images are NOT 32x32, but rather 32x?? which is slightly larger than 32, resulting with a feature map of 32x8x9 after the pooling.

Pytorch Conv2d Autoencoder Output Shape

I built the bellow convolution autoencoder and trying to tune it to get encoder output shape (x_encoder) of [NxHxW] = 1024 without increasing loss. Currently my output shape is [4, 64, 64] Any ideas?
# define the NN architecture
class ConvAutoencoder(nn.Module):
def __init__(self):
super(ConvAutoencoder, self).__init__()
## encoder layers ##
# conv layer (depth from in --> 16), 3x3 kernels
self.conv1 = nn.Conv2d(1, 16, 3, padding=1)
# conv layer (depth from 16 --> 4), 3x3 kernels
self.conv2 = nn.Conv2d(16, 4, 3, padding=1)
# pooling layer to reduce x-y dims by two; kernel and stride of 2
self.pool = nn.MaxPool2d(2, 2)
## decoder layers ##
## a kernel of 2 and a stride of 2 will increase the spatial dims by 2
self.t_conv1 = nn.ConvTranspose2d(4, 16, 2, stride=2)
self.t_conv2 = nn.ConvTranspose2d(16, 1, 2, stride=2)
def forward(self, x):
## encode ##
# add hidden layers with relu activation function
# and maxpooling after
x = F.relu(self.conv1(x))
x = self.pool(x)
# add second hidden layer
x = F.relu(self.conv2(x))
x = self.pool(x) # compressed representation
x_encoder = x
## decode ##
# add transpose conv layers, with relu activation function
x = F.relu(self.t_conv1(x))
# output layer (with sigmoid for scaling from 0 to 1)
x = F.sigmoid(self.t_conv2(x))
return x, x_encoder
If you want to keep the number of your parameters, adding an nn.AdaptiveAvgPool2d((N, H, W)) or nn.AdaptiveMaxPool2d((N, H, W))layer, in place of after the pooling layer (self.pool) can force the output of the decoder to have shape [NxHxW].
This should work assuming the shape of the x_encoder is (torch.Size([1, 4, 64, 64]). You can add a Conv. layer with stride set to 2 or a Conv. layer followed by a pooling layer. Check the code below:
# conv layer (depth from in --> 16), 3x3 kernels
self.conv1 = nn.Conv2d(1, 16, 3, padding=1)
# conv layer (depth from 16 --> 4), 3x3 kernels
self.conv2 = nn.Conv2d(16, 4, 3, padding=1)
# pooling layer to reduce x-y dims by two; kernel and stride of 2
self.pool = nn.MaxPool2d(2, 2)
# The changes
self.conv3 = nn.Conv2d(4, 1, 1, 2)
# or
self.conv3 = nn.Conv2d(4, 1, 1)
self.maxpool2d = nn.MaxPool2d((2, 2))

Linear layer input size Pytorch [duplicate]

This question already has answers here:
Pytorch - Inferring linear layer in_features
(2 answers)
Closed 1 year ago.
I'm working on an assignement with 1D signals and I have trouble finding the right input size for the linear layer (XXX). My signals have different lengths and are padded in a batch. I read that the linear layear should always have the same input size (XXX) but I'm not sure how to find it when each batch has a different length. Does anybody have an advice?
Thanks
class NeuralNet(nn.Module):
def __init__(self):
super(NeuralNet, self).__init__()
self.features = nn.Sequential(nn.Conv1d(in_channels = 1, out_channels = 128, kernel_size = 7, stride = 3),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.MaxPool1d(2, 3),
nn.Conv1d(128, 32, 5, 1),
nn.BatchNorm1d(32),
nn.ReLU(),
nn.MaxPool1d(2, 2),
nn.Conv1d(32, 32, 5, 1),
nn.ReLU(),
nn.Conv1d(32, 128, 3, 2),
nn.ReLU(),
nn.MaxPool1d(2, 2),
nn.Conv1d(128, 256, 7, 1),
nn.ReLU(),
nn.MaxPool1d(2, 2),
nn.Conv1d(256, 512, 3, 1),
nn.ReLU(),
nn.Conv1d(512, 128, 3, 1),
nn.ReLU()
)
self.classifier = nn.Sequential(nn.Linear(XXX, 512),
nn.ReLU(),
nn.Dropout(p = 0.1),
nn.Linear(512,2)
)
def forward(self, x):
x = self.features(x)
x = torch.flatten(x)
x = self.classifier(x)
return x
First, you need to decide on a fixed-length input. Let's assume each signal is of length 2048. It didn't work for any length < 1024 because of the previous convolution layers. If you would like to have a signal of length < 1024, then you may need to either remove a couple of Convolutional layers or change the kernel_size or remove maxpool operation.
Assuming, the fixed length is 2048, your nn.Linear layer will take as input 768 neurons. To calculate this, fix an arbitrary size of input neurons to your nn.Linear layer (say 1000) and then try to print the shape of the output from the Conv. layers. You could do something like this in your forward call:
def forward(self, x):
x = self.features(x)
print('Output shape of Conv. layers', x.shape)
x = x.view(-1, x.size(1) * x.size(2))
print('Shape required to pass to Linear Layer', x.shape)
x = self.classifier(x)
return x
This will obviously throw an error because of the shape mismatch. But you'll get to know the number of input neurons required in your first nn.Linear layer. With this approach, you could try a number of experiments of varying input signal lengths (1536, 2048, 4096, etc.)

Pytorch tensor, how to switch channel position - Runtime error

I have my training dataset as below, where X_train is 3D with 3 channels
Shape of X_Train: (708, 256, 3)
Shape of Y_Train: (708, 4)
Then I convert them into a tensor and input into the dataloader:
X_train=torch.from_numpy(X_data)
y_train=torch.from_numpy(y_data)
training_dataset = torch.utils.data.TensorDataset(X_train, y_train)
train_loader = torch.utils.data.DataLoader(training_dataset, batch_size=50, shuffle=False)
However when training the model, I get the following error:
RuntimeError: Given groups=1, weight of size 24 3 5, expected input[708, 256, 3] to have 3 channels, but got 256 channels instead
I suppose this is due to the position of the channel? In Tensorflow, the channel position is at the end, but in PyTorch the format is "Batch Size x Channel x Height x Width"? So how do I swap the positions in the x_train tensor to match the expected format in the dataloader?
class TwoLayerNet(torch.nn.Module):
def __init__(self):
super(TwoLayerNet,self).__init__()
self.conv1 = nn.Sequential(
nn.Conv1d(3, 3*8, kernel_size=5, stride=1),
nn.Sigmoid(),
nn.AvgPool1d(kernel_size=2, stride=0))
self.conv2 = nn.Sequential(
nn.Conv1d(3*8, 12, kernel_size=5, stride=1),
nn.Sigmoid(),
nn.AvgPool1d(kernel_size=2, stride = 0))
#self.drop_out = nn.Dropout()
self.fc1 = nn.Linear(708, 732)
self.fc2 = nn.Linear(732, 4)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out = out.reshape(out.size(0), -1)
out = self.drop_out(out)
out = self.fc1(out)
out = self.fc2(out)
return out
Use permute.
X_train = torch.rand(708, 256, 3)
X_train = X_train.permute(2, 0, 1)
X_train.shape
# => torch.Size([3, 708, 256])

Resources