Pytorch - AttributeError: 'tuple' object has no attribute 'dim' - pytorch
I am trying to use this architecture:
class Net(BaseFeaturesExtractor):
def __init__(self, observation_space: gym.spaces.Box, features_dim: int = 512):
super(Net, self).__init__(observation_space, features_dim)
self.conv1 = nn.Conv2d(1, 64, 3, stride=1, padding=1)
self.conv2 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.conv3 = nn.Conv2d(64, 64, 3, stride=1)
self.conv4 = nn.Conv2d(64, 64, 3, stride=1)
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(64)
self.bn3 = nn.BatchNorm2d(64)
self.bn4 = nn.BatchNorm2d(64)
self.fc1 = nn.Linear(64 * (7 - 4) * (6 - 4), 128)
self.fc_bn1 = nn.BatchNorm1d(128)
self.fc2 = nn.Linear(128, 64)
self.fc_bn2 = nn.BatchNorm1d(64)
self.fc3 = nn.Linear(64, 7)
self.fc4 = nn.Linear(64, 1)
def forward(self, s):
# s: batch_size x board_x x board_y
s = s.view(-1, 1, 7, 6) # batch_size x 1 x board_x x board_y
s = F.relu(self.bn1(self.conv1(s))) # batch_size x num_channels x board_x x board_y
s = F.relu(self.bn2(self.conv2(s))) # batch_size x num_channels x board_x x board_y
s = F.relu(self.bn3(self.conv3(s))) # batch_size x num_channels x (board_x-2) x (board_y-2)
s = F.relu(self.bn4(self.conv4(s))) # batch_size x num_channels x (board_x-4) x (board_y-4)
s = s.view(-1,64 * (7 - 4) * (6 - 4))
s = F.dropout(
F.relu(self.fc_bn1(self.fc1(s))),
p=0.3,
training=self.training) # batch_size x 128
s = F.dropout(
F.relu(self.fc_bn2(self.fc2(s))),
p=0.3,
training=self.training) # batch_size x 64
pi = self.fc3(s) # batch_size x action_size
v = self.fc4(s) # batch_size x 1
return F.log_softmax(pi, dim=1), th.tanh(v)
When I am trying to use this architecture, I am getting following error:
Traceback (most recent call last):
File "/Users/joe/Documents/JUPYTER/ConnectX/training3.py", line 130, in <module>
learner.learn(total_timesteps=iterations, callback=eval_callback)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/stable_baselines3/ppo/ppo.py", line 264, in learn
reset_num_timesteps=reset_num_timesteps,
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/stable_baselines3/common/on_policy_algorithm.py", line 222, in learn
continue_training = self.collect_rollouts(self.env, callback, self.rollout_buffer, n_rollout_steps=self.n_steps)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/stable_baselines3/common/on_policy_algorithm.py", line 154, in collect_rollouts
actions, values, log_probs = self.policy.forward(obs_tensor)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/stable_baselines3/common/policies.py", line 545, in forward
latent_pi, latent_vf, latent_sde = self._get_latent(obs)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/stable_baselines3/common/policies.py", line 564, in _get_latent
latent_pi, latent_vf = self.mlp_extractor(features)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/torch/nn/modules/module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/stable_baselines3/common/torch_layers.py", line 220, in forward
shared_latent = self.shared_net(features)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/torch/nn/modules/module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/torch/nn/modules/container.py", line 117, in forward
input = module(input)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/torch/nn/modules/module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/torch/nn/modules/linear.py", line 93, in forward
return F.linear(input, self.weight, self.bias)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/torch/nn/functional.py", line 1688, in linear
if input.dim() == 2 and bias is not None:
AttributeError: 'tuple' object has no attribute 'dim'
How this problem can be fixed?
I tried to reproduce a small working code based on the class definitions given by you and I was able to get the outputs from the model. Here is the following code:
# BaseFeaturesExtractor class
import gym
import torch as th
from torch import nn
class BaseFeaturesExtractor(nn.Module):
"""
Base class that represents a features extractor.
:param observation_space:
:param features_dim: Number of features extracted.
"""
def __init__(self, observation_space: gym.Space, features_dim: int = 0):
super(BaseFeaturesExtractor, self).__init__()
assert features_dim > 0
self._observation_space = observation_space
self._features_dim = features_dim
#property
def features_dim(self) -> int:
return self._features_dim
def forward(self, observations: th.Tensor) -> th.Tensor:
raise NotImplementedError()
# Net class
class Net(BaseFeaturesExtractor):
def __init__(self, observation_space: gym.spaces.Box, features_dim: int = 512):
super(Net, self).__init__(observation_space, features_dim)
self.conv1 = nn.Conv2d(1, 64, 3, stride=1, padding=1)
self.conv2 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.conv3 = nn.Conv2d(64, 64, 3, stride=1)
self.conv4 = nn.Conv2d(64, 64, 3, stride=1)
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(64)
self.bn3 = nn.BatchNorm2d(64)
self.bn4 = nn.BatchNorm2d(64)
self.fc1 = nn.Linear(64 * (7 - 4) * (6 - 4), 128)
self.fc_bn1 = nn.BatchNorm1d(128)
self.fc2 = nn.Linear(128, 64)
self.fc_bn2 = nn.BatchNorm1d(64)
self.fc3 = nn.Linear(64, 7)
self.fc4 = nn.Linear(64, 1)
def forward(self, s):
# s: batch_size x board_x x board_y
s = s.view(-1, 1, 7, 6) # batch_size x 1 x board_x x board_y
s = F.relu(self.bn1(self.conv1(s))) # batch_size x num_channels x board_x x board_y
s = F.relu(self.bn2(self.conv2(s))) # batch_size x num_channels x board_x x board_y
s = F.relu(self.bn3(self.conv3(s))) # batch_size x num_channels x (board_x-2) x (board_y-2)
s = F.relu(self.bn4(self.conv4(s))) # batch_size x num_channels x (board_x-4) x (board_y-4)
s = s.view(-1,64 * (7 - 4) * (6 - 4))
s = F.dropout(
F.relu(self.fc_bn1(self.fc1(s))),
p=0.3,
training=self.training) # batch_size x 128
s = F.dropout(
F.relu(self.fc_bn2(self.fc2(s))),
p=0.3,
training=self.training) # batch_size x 64
pi = self.fc3(s) # batch_size x action_size
v = self.fc4(s) # batch_size x 1
return F.log_softmax(pi, dim=1), th.tanh(v)
# Minimal code to reproduce a forward pass
import numpy as np
import torch
import torch.nn.functional as F
params = gym.spaces.Box(np.array([-1,0,0]), np.array([+1,+1,+1]))
model = Net(params)
inputs = torch.randn(2, 1, 7, 6)
outputs = model(inputs)
print(outputs[0].shape, outputs[1].shape) # prints (torch.Size([2, 7]), torch.Size([2, 1]))
Related
Expected input batch_size (56180) to match target batch_size (100)
i am getting following error. ValueError: Expected input batch_size (56180) to match target batch_size (100). My model's input is 3 channel(RGB) 227x227 images And batch size is 100. And following is summary. torch.Size([100, 3, 227, 227]) torch.Size([100, 10, 111, 111]) torch.Size([100, 20, 53, 53]) torch.Size([56180, 100]) torch.Size([56180, 64]) torch.Size([56180, 64]) torch.Size([56180, 32]) torch.Size([56180, 32]) torch.Size([56180, 1]) This is binary classification(True, False), so i make final output is 1 class Net(nn.Module): def __init__(self): super(Net, self).__init__() #input image 227x227x3 self.conv1 = nn.Conv2d(3, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(100, 64) self.fc3 = nn.Linear(64, 32) self.fc6 = nn.Linear(32, 1) def forward(self, x): print(x.shape) x = F.relu(F.max_pool2d(self.conv1(x), 2)) print(x.shape) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) print(x.shape) x = x.view(-1, x.size(0)) print(x.shape) x = F.relu(self.fc1(x)) print(x.shape) x = F.dropout(x, training=self.training) print(x.shape) x = self.fc3(x) print(x.shape) x = F.dropout(x, training=self.training) print(x.shape) x = self.fc6(x) print(x.shape) return x def train(model, train_loader, optimizer): model.train() for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(DEVICE), target.to(DEVICE) optimizer.zero_grad() output = model(data) target = target.unsqueeze(-1) loss = F.cross_entropy(output, target) loss.backward() optimizer.step() My question is that i have 100 batch images so that target(Y) is 100 units. But Why i am getting 56180 unit result?
Change the view function (in forward method): x = x.view(x.size(0), -1) The batch size must be in the 0 dimension. Your forward method should be defined like this: def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = x.view(x.size(0), -1) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc3(x) x = F.dropout(x, training=self.training) x = self.fc6(x) return x
Change the layer format to a different image resolution
There is a class where everything is set to 32x32 image format Taken from here class Net(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 6, 5) # here I changed the image channel from 3 to 1 self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 64, 5) self.fc1 = nn.Linear(64 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 22) # here I changed the number of output neurons from 10 to 22 def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = torch.flatten(x, 1) # flatten all dimensions except batch x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x How to change all this under resolution 96 to 96? Channel 1 (grayscale)?
At resolution 32x32 the output of conv2 is shaped (1, 64, 5, 5). On the other hand, if the input is at resolution 96x96, it will be (1, 64, 21, 21). This means fc1 needs to have 28_224 input neurons. >>> self.fc1 = nn.Linear(64 * 21 * 21, 120) Alternatively, you can use nn.LazyLinear which will infer this number for you, based on the first inference. >>> self.fc1 = nn.Linear(120)
1 output channel for image classification
I have 22 classes, on the output layer, respectively, 22 channels, how can I change all this so that the output is 1 channel. Number 1 corresponds to class 1, number 2 - to the second, etc. import torch.nn.functional as F class Net(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 64, 5) self.fc1 = nn.Linear(64 * 21 * 21, 120) self.fc2 = nn.Linear(120, 256) self.fc3 = nn.Linear(256, 22) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = torch.flatten(x, 1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x net = Net()
RuntimeError: Given groups=1, weight of size [16, 1, 3, 3], expected input[16, 3, 1, 28] to have 1 channels, but got 3 channels instead
I know my images have only 1 channel so the first conv layer is (1,16,3,1) , but I have no idea why I got such an error. Here is my code (I post only the related part). org_x = train_csv.drop(['id', 'digit', 'letter'], axis=1).values org_x = org_x.reshape(-1, 28, 28, 1) org_x = org_x/255 org_x = np.array(org_x) org_x = org_x.reshape(-1, 1, 28, 28) org_x = torch.Tensor(org_x).float() x_test = test_csv.drop(['id','letter'], axis=1).values x_test = x_test.reshape(-1, 28, 28, 1) x_test = x_test/255 x_test = np.array(x_test) x_test = x_test.reshape(-1, 1, 28, 28) x_test = torch.Tensor(x_test).float() y = train_csv['digit'] y = list(y) print(len(y)) org_y = np.zeros([len(y), 1]) for i in range(len(y)): org_y[i] = y[i] org_y = np.array(org_y) org_y = torch.Tensor(org_y).float() from sklearn.model_selection import train_test_split x_train, x_valid, y_train, y_valid = train_test_split( org_x, org_y, test_size=0.2, random_state=42) I checked the x_train shape is [1638, 1, 28, 28] and the x_valid shape is [410, 1, 28, 28]. transform = transforms.Compose([transforms.ToPILImage(), transforms.ToTensor(), transforms.Normalize((0.5, ), (0.5, )) ]) class kmnistDataset(data.Dataset): def __init__(self, images, labels, transforms=None): self.x = images self.y = labels self.transforms = transforms def __len__(self): return (len(self.x)) def __getitem__(self, idx): data = np.asarray(self.x[idx][0:]).astype(np.uint8) if self.transforms: data = self.transforms(data) if self.y is not None: return (data, self.y[idx]) else: return data train_data = kmnistDataset(x_train, y_train, transforms=transform) valid_data = kmnistDataset(x_valid, y_valid, transforms=transform) # dataloaders train_loader = DataLoader(train_data, batch_size=16, shuffle=True) valid_loader = DataLoader(valid_data, batch_size=16, shuffle = False) And here is my model class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 16, 3, padding=1) self.conv2 = nn.Conv2d(16, 32, 3, padding=1) self.conv3 = nn.Conv2d(32, 64, 3, padding=1) self.bn1 = nn.BatchNorm2d(16) self.pool = nn.MaxPool2d(2, 2) unit = 64 * 14 * 14 self.fc1 = nn.Linear(unit, 500) self.fc2 = nn.Linear(500, 10) def forward(self, x): x = self.pool(F.relu(self.bn1(self.conv1(x)))) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = x.view(-1, 128 * 28 * 28) x = F.relu(self.fc1(x)) x = self.fc2(x) return x model = Net() print(model) Lastly, n_epochs = 30 valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): train_loss = 0 valid_loss = 0 ################### # train the model # ################### model.train() for data in train_loader: inputs, labels = data[0], data[1] optimizer.zero_grad() output = model(inputs) loss = criterion(output, labels) loss.backward() optimizer.step() train_loss += loss.item()*data.size(0) ##################### # validate the model# ##################### model.eval() for data in valid_loader: inputs, labels = data[0], data[1] output = model(inputs) loss = criterion(output, labels) valid_loss += loss.item()*data.size(0) train_loss = train_loss/ len(train_loader.dataset) valid_loss = valid_loss / len(valid_loader.dataset) print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss)) When I run it, I got this error message RuntimeError: Given groups=1, weight of size [16, 1, 3, 3], expected input[16, 3, 1, 28] to have 1 channels, but got 3 channels instead To be specific, --------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) <ipython-input-14-b8783819421f> in <module> 14 inputs, labels = data[0], data[1] 15 optimizer.zero_grad() ---> 16 output = model(inputs) 17 loss = criterion(output, labels) 18 loss.backward() /opt/anaconda3/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs) 725 result = self._slow_forward(*input, **kwargs) 726 else: --> 727 result = self.forward(*input, **kwargs) 728 for hook in itertools.chain( 729 _global_forward_hooks.values(), <ipython-input-12-500e34c49306> in forward(self, x) 26 27 def forward(self, x): ---> 28 x = self.pool(F.relu(self.bn1(self.conv1(x)))) 29 x = F.relu(self.conv2(x)) 30 x = F.relu(self.conv3(x)) /opt/anaconda3/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs) 725 result = self._slow_forward(*input, **kwargs) 726 else: --> 727 result = self.forward(*input, **kwargs) 728 for hook in itertools.chain( 729 _global_forward_hooks.values(), /opt/anaconda3/lib/python3.7/site-packages/torch/nn/modules/conv.py in forward(self, input) 421 422 def forward(self, input: Tensor) -> Tensor: --> 423 return self._conv_forward(input, self.weight) 424 425 class Conv3d(_ConvNd): /opt/anaconda3/lib/python3.7/site-packages/torch/nn/modules/conv.py in _conv_forward(self, input, weight) 418 _pair(0), self.dilation, self.groups) 419 return F.conv2d(input, weight, self.bias, self.stride, --> 420 self.padding, self.dilation, self.groups) 421 422 def forward(self, input: Tensor) -> Tensor: RuntimeError: Given groups=1, weight of size [16, 1, 3, 3], expected input[16, 3, 1, 28] to have 1 channels, but got 3 channels instead
I tried a small demo with your code. and it works fine until your code had x = x.view(-1, 64*14*14) and input shape of torch.Size([1, 1, 28 ,28]) import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 16, 3, padding=1) self.conv2 = nn.Conv2d(16, 32, 3, padding=1) self.conv3 = nn.Conv2d(32, 64, 3, padding=1) self.bn1 = nn.BatchNorm2d(16) self.pool = nn.MaxPool2d(2, 2) unit = 64 * 14 * 14 self.fc1 = nn.Linear(unit, 500) self.fc2 = nn.Linear(500, 10) def forward(self, x): x = self.pool(F.relu(self.bn1(self.conv1(x)))) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) #print(x.shape) x = x.view(-1, 64*14*14) x = F.relu(self.fc1(x)) x = self.fc2(x) return x model = Net() print(model) data = torch.rand((1,1,28,28)) pred = model(data) And if i give my data tensor as data = torch.rand((1,3,28,28)) i get your error i.e RuntimeError: Given groups=1, weight of size [16, 1, 3, 3], expected input[16, 3, 1, 28] to have 1 channels, but got 3 channels instead So please check your channel dim of your data just before passing it to your model i.e here (highlighted by ** **) for data in train_loader: inputs, labels = data[0], data[1] optimizer.zero_grad() **print(inputs.shape)** output = model(inputs) loss = criterion(output, labels) loss.backward() optimizer.step() train_loss += loss.item()*data.size(0)
I think the problem is with the BatchNorm() layer ==> self.bn1 = nn.BatchNorm2d(16). the parameter in this layer should be the number of channels of the input. So if you look at your last conv layer conv3, It produces a feature map of 64 channels, thus when you're feeding this feature map to your BatchNorm(), It should be 64 as well. So you can simply do the following: self.bn1 = nn.BatchNorm2d(64)
Convolution Neural Network for regression using pytorch
I am trying to do create CNN for regression purpose. Input is image data. For learning purpose , i have 10 image of shape (10,3,448,448), where 10 are images, 3 are channel and 448 are hieght and width. Output lables are (10,245). Here is my architecture class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=5) self.conv2 = nn.Conv2d(32, 32, kernel_size=5) self.conv3 = nn.Conv2d(32,64, kernel_size=5) self.fc1 = nn.Linear(3*3*64, 256) self.fc2 = nn.Linear(256, 245) def forward(self, x): x = F.relu(self.conv1(x)) #x = F.dropout(x, p=0.5, training=self.training) x = F.relu(F.max_pool2d(self.conv2(x), 2)) x = F.dropout(x, p=0.5, training=self.training) x = F.relu(F.max_pool2d(self.conv3(x),2)) x = F.dropout(x, p=0.5, training=self.training) x = x.view(-1,3*3*64 ) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return x cnn = CNN() print(cnn) it = iter(train_loader) X_batch, y_batch = next(it) print(cnn.forward(X_batch).shape) Using batch size 2 i am expecting data shape produced by model is (2,245). But it is producing data of shape (2592, 245)
after self.conv3 you have tensors of shape [2, 64, 108, 108] which produces [2592, 576] after reshape. So this is where 2592 comes from. Change the lines: "self.fc1 = nn.Linear(3*3*64, 256)" and "x = x.view(-1,3*3*64)" so that they use proper image size after the layers. below is the fixed code: class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=5) self.conv2 = nn.Conv2d(32, 32, kernel_size=5) self.conv3 = nn.Conv2d(32,64, kernel_size=5) self.fc1 = nn.Linear(108*108*64, 256) self.fc2 = nn.Linear(256, 245) def forward(self, x): print (x.shape) x = F.relu(self.conv1(x)) print (x.shape) #x = F.dropout(x, p=0.5, training=self.training) x = F.relu(F.max_pool2d(self.conv2(x), 2)) print (x.shape) x = F.dropout(x, p=0.5, training=self.training) print (x.shape) x = F.relu(F.max_pool2d(self.conv3(x),2)) print (x.shape) x = F.dropout(x, p=0.5, training=self.training) print (x.shape) x = x.view(-1,108*108*64 ) print (x.shape) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return x cnn = CNN() print(cnn) # X_batch, y_batch = next(it) print(cnn.forward(X_batch).shape)