AttributeError: dataset object has no attribute 'c' FastAI - python-3.x

I am trying to train a ResNet based UNet for image segmentation. I have the location of images and mask images in a csv file, that's why I have created my own dataloader, which is as follows:
X = list(df['input_img'])
y = list(df['mask_img'])
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, test_size=0.33, random_state=42)
class NumbersDataset():
def __init__(self, inputs, labels):
self.X = inputs
self.y = labels
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
img_train = cv2.imread(self.X[idx])
img_mask = cv2.imread(self.y[idx])
img_train = cv2.resize(img_train, (427,240), interpolation = cv2.INTER_LANCZOS4)
img_mask = cv2.resize(img_mask, (427,240), interpolation = cv2.INTER_LANCZOS4)
return img_train, img_mask
I then call this datagenerator in the __main__ function:
if __name__ == '__main__':
dataset_train = NumbersDataset(X_train, y_train)
dataloader_train = DataLoader(dataset_train, batch_size=4, shuffle=True, num_workers=2)
dataset_valid = NumbersDataset(X_valid, y_valid)
dataloader_valid = DataLoader(dataset_valid, batch_size=4, shuffle=True, num_workers=2)
datas = DataBunch(train_dl = dataloader_train, valid_dl = dataloader_valid)
leaner = unet_learner(data = datas, arch = models.resnet34)
But I end up getting the following error:
Traceback (most recent call last):
File "dataset_test.py", line 70, in <module>
leaner = unet_learner(data = datas, arch = models.resnet34)
File "/home/sarvagya/miniconda3/envs/gr/lib/python3.6/site-packages/fastai/vision/learner.py", line 118, in unet_learner
model = to_device(models.unet.DynamicUnet(body, n_classes=data.c, img_size=size, blur=blur, blur_final=blur_final,
File "/home/sarvagya/miniconda3/envs/gr/lib/python3.6/site-packages/fastai/basic_data.py", line 122, in __getattr__
def __getattr__(self,k:int)->Any: return getattr(self.train_dl, k)
File "/home/sarvagya/miniconda3/envs/gr/lib/python3.6/site-packages/fastai/basic_data.py", line 38, in __getattr__
def __getattr__(self,k:str)->Any: return getattr(self.dl, k)
File "/home/sarvagya/miniconda3/envs/gr/lib/python3.6/site-packages/fastai/basic_data.py", line 20, in DataLoader___getattr__
def DataLoader___getattr__(dl, k:str)->Any: return getattr(dl.dataset, k)
AttributeError: 'NumbersDataset' object has no attribute 'c'
I tried searching and even tried using SegmentationItemList.from_df but nothing helped. What am I getting wrong here?

You should add the attribute c into your NumbersDataset, like this:
def __init__(self, inputs, labels, c):
self.inputs = inputs
self.labels = labels
self.c = c

Related

Getting ValueError: as_list() is not defined on an unknown TensorShape. but the as_list() works fine on (next(iter(dataset)))

I'm trying to use tf.data.Dataset.list_files to load .tiff images and infer their labels from their names.
I use the following code but stumbled upon a strange issue, as described bellow:
import os
import datetime as dt
import numpy as np
import pathlib
from pathlib import Path
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import cv2
DATA_PATH = Path('PATH TO DATA')
BATCH_SIZE = 32
INPUT_IMAGE_SHAPE = (128, 128, 1)
CROP_SHAPE = INPUT_IMAGE_SHAPE
CENTRAL_CROP_PROP = .7
BRIGHTNESS_DELTA = 0.1
CONTRAST = (0.4, 0.6)
class ConvModel(keras.Model):
def __init__(self, input_shape):
super().__init__()
self.input_image_shape = input_shape
self.model = keras.Sequential([
layers.Input(shape=input_shape),
layers.Conv2D(32, 3),
layers.BatchNormalization(),
layers.ReLU(),
layers.MaxPool2D(),
layers.Conv2D(64, 5),
layers.BatchNormalization(),
layers.ReLU(),
layers.MaxPool2D(),
layers.Conv2D(128, 3, kernel_regularizer=keras.regularizers.l2(0.01)),
layers.BatchNormalization(),
layers.ReLU(),
layers.Flatten(),
layers.Dense(64, activation='relu', kernel_regularizer=keras.regularizers.l2(0.01)),
layers.Dropout(0.5),
layers.Dense(10)
])
def call(self, inputs):
return self.model(inputs)
def preprocessing_func(image):
img = tf.image.central_crop(image, CENTRAL_CROP_PROP)
if img.shape[2] == 3:
img = tf.image.rgb_to_grayscale(img)
return img
def augment(image):
img = tf.image.random_crop(image, size=CROP_SHAPE) # Slices a shape size portion out of value at a uniformly chosen offset. Requires value.shape >= size.
img = tf.image.random_brightness(img, max_delta=BRIGHTNESS_DELTA) # Equivalent to adjust_brightness() using a delta randomly picked in the interval [-max_delta, max_delta)
img = tf.image.random_contrast(img, lower=CONTRAST[0], upper=CONTRAST[1]) # Equivalent to adjust_contrast() but uses a contrast_factor randomly picked in the interval [lower, upper).
img = tf.image.random_flip_left_right(img)
img = tf.image.random_flip_up_down(img)
return img
def load_image(image_file):
# 1) Decode the path
image_file = image_file.decode('utf-8')
# 2) Read the image
img = cv2.imread(image_file)
if len(img.shape) < 3:
img = np.expand_dims(img, axis=-1)
img = preprocessing_func(image=img)
img = augment(img)
img = tf.cast(img, tf.float32)
img.set_shape(INPUT_IMAGE_SHAPE)
# 3) Get the label
label = tf.strings.split(image_file, "\\")[-1]
label = tf.strings.substr(label, pos=0, len=1)
label = tf.strings.to_number(label, out_type=tf.float32)
label = tf.cast(label, tf.float32)
label.set_shape([])
return img, label
def _fixup_shape(images, labels):
images.set_shape(INPUT_IMAGE_SHAPE)
labels.set_shape([])
return images, labels
if __name__=='__main__':
train_ds = tf.data.Dataset.list_files(str(DATA_PATH / '*.tiff'))
train_ds = train_ds.map(lambda x: tf.numpy_function(load_image, [x], (tf.float32, tf.float32)))
# train_ds = train_ds.map(_fixup_shape)
train_ds = train_ds.batch(BATCH_SIZE)
train_ds = train_ds.shuffle(buffer_size=1000)
train_ds = train_ds.prefetch(tf.data.AUTOTUNE)
train_ds = train_ds.repeat()
model = ConvModel(input_shape=INPUT_IMAGE_SHAPE)
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.Adam(learning_rate=3e-4),
metrics=['accuracy']
)
train_log_dir = f'./logs/{dt.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}/train_data'
callbacks = [
keras.callbacks.TensorBoard(
log_dir=train_log_dir,
write_images=True
)
]
model.fit(
train_ds,
batch_size=32,
steps_per_epoch=10,
epochs=10,
callbacks=callbacks
)
While I try to run it it throws up an error :
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-213-b1f3d317135b> in <module>
----> 1 model.fit(
2 train_ds,
3 batch_size=32,
4 steps_per_epoch=10,
5 epochs=10,
~\anaconda3\lib\site-packages\keras\utils\traceback_utils.py in error_handler(*args, **kwargs)
65 except Exception as e: # pylint: disable=broad-except
66 filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
69 del filtered_tb
~\anaconda3\lib\site-packages\tensorflow\python\framework\func_graph.py in autograph_handler(*args, **kwargs)
1127 except Exception as e: # pylint:disable=broad-except
1128 if hasattr(e, "ag_error_metadata"):
-> 1129 raise e.ag_error_metadata.to_exception(e)
1130 else:
1131 raise
ValueError: in user code:
File "C:\Users\mchls\anaconda3\lib\site-packages\keras\engine\training.py", line 878, in train_function *
return step_function(self, iterator)
File "C:\Users\mchls\anaconda3\lib\site-packages\keras\engine\training.py", line 867, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "C:\Users\mchls\anaconda3\lib\site-packages\keras\engine\training.py", line 860, in run_step **
outputs = model.train_step(data)
File "C:\Users\mchls\anaconda3\lib\site-packages\keras\engine\training.py", line 817, in train_step
self.compiled_metrics.update_state(y, y_pred, sample_weight)
File "C:\Users\mchls\anaconda3\lib\site-packages\keras\engine\compile_utils.py", line 439, in update_state
self.build(y_pred, y_true)
File "C:\Users\mchls\anaconda3\lib\site-packages\keras\engine\compile_utils.py", line 359, in build
self._metrics = tf.__internal__.nest.map_structure_up_to(y_pred, self._get_metric_objects,
File "C:\Users\mchls\anaconda3\lib\site-packages\keras\engine\compile_utils.py", line 485, in _get_metric_objects
return [self._get_metric_object(m, y_t, y_p) for m in metrics]
File "C:\Users\mchls\anaconda3\lib\site-packages\keras\engine\compile_utils.py", line 485, in <listcomp>
return [self._get_metric_object(m, y_t, y_p) for m in metrics]
File "C:\Users\mchls\anaconda3\lib\site-packages\keras\engine\compile_utils.py", line 506, in _get_metric_object
y_t_rank = len(y_t.shape.as_list())
ValueError: as_list() is not defined on an unknown TensorShape.
though manually running X.shape.as_list() and y.shape.as_list() works, as shown below:
X, y = next(iter(train_ds))
X.shape.as_list(), y.shape.as_list()
[OUT] ([16, 128, 128, 1], [16])
This issue is fixed, as described in this GitHub thread and in this answer, by manually mapping the following function on the dataset by train_ds = train_ds.map(_fixup_shape).batch(BATCH_SIZE):
def _fixup_shape(images, labels):
images.set_shape([128, 128, 1])
labels.set_shape([]) # I have 19 classes
# weights.set_shape([None])
return images, labels
if __name__=='__main__':
train_ds = tf.data.Dataset.list_files(str(DATA_PATH / '*.tiff'))
train_ds = train_ds.map(lambda x: tf.numpy_function(load_image, [x], (tf.float32, tf.float32)))
train_ds = train_ds.map(_fixup_shape)
train_ds = train_ds.batch(BATCH_SIZE)
train_ds = train_ds.shuffle(buffer_size=1000)
train_ds = train_ds.prefetch(tf.data.AUTOTUNE)
train_ds = train_ds.repeat()
but I can't figure out why is the _fixup_shape is needed in the first place, as I do state the shape inside the load_image function.
Is it a bug in TF 2.6.1 or is it an expected behavior?
Thanks

AttributeError: 'tuple' object has no attribute 'train_dataloader'

I have a 3 file. In the datamodule file, I have created data and used the basic format of the PyTorch Lightning. In the linear_model I made a linear regression model based on this page. Finally, I have a train file, I am calling the model and trying to fit the data. But I am getting this error
GPU available: False, used: False
TPU available: False, using: 0 TPU cores
Traceback (most recent call last):
File "/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/home/mostafiz/Dropbox/MSc/Thesis/regreesion_EC/src/test_train.py", line 10, in <module>
train_dataloader=datamodule.DataModuleClass().setup().train_dataloader(),
AttributeError: 'tuple' object has no attribute 'train_dataloader'
Sample datamodule file
class DataModuleClass(pl.LightningDataModule):
def __init__(self):
super().__init__()
self.sigma = 5
self.batch_size = 10
self.prepare_data()
def prepare_data(self):
x = np.random.uniform(0, 10, 10)
e = np.random.normal(0, self.sigma, len(x))
y = x + e
X = np.transpose(np.array([x, e]))
self.x_train_tensor = torch.from_numpy(X).float().to(device)
self.y_train_tensor = torch.from_numpy(y).float().to(device)
training_dataset = TensorDataset(self.x_train_tensor, self.y_train_tensor)
self.training_dataset = training_dataset
def setup(self):
data = self.training_dataset
self.train_data, self.val_data = random_split(data, [8, 2])
return self.train_data, self.val_data
def train_dataloader(self):
return DataLoader(self.train_data)
def val_dataloader(self):
return DataLoader(self.val_data)
Sample training file
from . import datamodule, linear_model
model = linear_model.LinearRegression(input_dim=2, l1_strength=1, l2_strength=1)
trainer = pl.Trainer()
trainer.fit(model,
train_dataloader=datamodule.DataModuleClass().setup().train_dataloader(),
val_dataloaders=datamodule.DataModuleClass().setup().val_dataloaders())
Let me know if you need more code or explanation.
Update (Based on the comment)
Now, I am getting the following error after removing self.prepare_data() from the __init__() of the DataModuleClass(), removed return self.train_data, self.val_data from setup(), and changed the test file to
data_module = datamodule.DataModuleClass()
trainer = pl.Trainer()
trainer.fit(model,data_module)
Error:
GPU available: False, used: False
TPU available: False, using: 0 TPU cores
Traceback (most recent call last):
File "/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/home/mostafiz/Dropbox/MSc/Thesis/regreesion_EC/src/test_train.py", line 10, in <module>
train_dataloader=datamodule.DataModuleClass().train_dataloader(),
File "/home/mostafiz/Dropbox/MSc/Thesis/regreesion_EC/src/datamodule.py", line 54, in train_dataloader
return DataLoader(self.train_data)
AttributeError: 'DataModuleClass' object has no attribute 'train_data'
Most of the things were correct, except few things like:
def prepare_data(self):
This function was right except that it should not return anything.
Another thing was
def setup(self,stage=None):
Requires stage variable which can be set to a default value of none in case we don't want to switch between different test and train stage.
Putting everything together, here is the code:
from argparse import ArgumentParser
import numpy as np
import pytorch_lightning as pl
from torch.utils.data import random_split, DataLoader, TensorDataset
import torch
from torch.autograd import Variable
from torchvision import transforms
import pytorch_lightning as pl
import torch
from torch import nn
from torch.nn import functional as F
from torch.optim import Adam
from torch.optim.optimizer import Optimizer
class LinearRegression(pl.LightningModule):
def __init__(
self,
input_dim: int = 2,
output_dim: int = 1,
bias: bool = True,
learning_rate: float = 1e-4,
optimizer: Optimizer = Adam,
l1_strength: float = 0.0,
l2_strength: float = 0.0
):
super().__init__()
self.save_hyperparameters()
self.optimizer = optimizer
self.linear = nn.Linear(in_features=self.hparams.input_dim, out_features=self.hparams.output_dim, bias=bias)
def forward(self, x):
y_hat = self.linear(x)
return y_hat
def training_step(self, batch, batch_idx):
x, y = batch
# flatten any input
x = x.view(x.size(0), -1)
y_hat = self(x)
loss = F.mse_loss(y_hat, y, reduction='sum')
# L1 regularizer
if self.hparams.l1_strength > 0:
l1_reg = sum(param.abs().sum() for param in self.parameters())
loss += self.hparams.l1_strength * l1_reg
# L2 regularizer
if self.hparams.l2_strength > 0:
l2_reg = sum(param.pow(2).sum() for param in self.parameters())
loss += self.hparams.l2_strength * l2_reg
loss /= x.size(0)
tensorboard_logs = {'train_mse_loss': loss}
progress_bar_metrics = tensorboard_logs
return {'loss': loss, 'log': tensorboard_logs, 'progress_bar': progress_bar_metrics}
def validation_step(self, batch, batch_idx):
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self(x)
return {'val_loss': F.mse_loss(y_hat, y)}
def validation_epoch_end(self, outputs):
val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
tensorboard_logs = {'val_mse_loss': val_loss}
progress_bar_metrics = tensorboard_logs
return {'val_loss': val_loss, 'log': tensorboard_logs, 'progress_bar': progress_bar_metrics}
def configure_optimizers(self):
return self.optimizer(self.parameters(), lr=self.hparams.learning_rate)
np.random.seed(42)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
class DataModuleClass(pl.LightningDataModule):
def __init__(self):
super().__init__()
self.sigma = 5
self.batch_size = 10
def prepare_data(self):
x = np.random.uniform(0, 10, 10)
e = np.random.normal(0, self.sigma, len(x))
y = x + e
X = np.transpose(np.array([x, e]))
self.x_train_tensor = torch.from_numpy(X).float().to(device)
self.y_train_tensor = torch.from_numpy(y).float().to(device)
training_dataset = TensorDataset(self.x_train_tensor, self.y_train_tensor)
self.training_dataset = training_dataset
def setup(self,stage=None):
data = self.training_dataset
self.train_data, self.val_data = random_split(data, [8, 2])
def train_dataloader(self):
return DataLoader(self.train_data)
def val_dataloader(self):
return DataLoader(self.val_data)
model = LinearRegression(input_dim=2, l1_strength=1, l2_strength=1)
trainer = pl.Trainer()
dummy = DataModuleClass()
trainer.fit(model,dummy)

Problem running GRU model; missing argument for forward()

I am working on a GRU and when I try to make predictions I get an error indicating that I need to define h for forward(). I have tried several things and ran out of patience after googling and scouring stack overflow for hours.
This is the class:
class GRUNet(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, n_layers, drop_prob = 0.2):
super(GRUNet, self).__init__()
self.hidden_dim = hidden_dim
self.n_layers = n_layers
self.gru = nn.GRU(input_dim, hidden_dim, n_layers, batch_first=True, dropout=drop_prob)
self.fc = nn.Linear(hidden_dim, output_dim)
self.relu = nn.ReLU()
def forward(self, x, h):
out, h = self.gru(x,h)
out = self.fc(self.relu(out[:,-1]))
return out, h
def init_hidden(self, batch_size):
weight = next(self.parameters()).data
hidden = weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().to(device)
return hidden
and then this is where I load the model and try to make a prediction. Both of these are in the same script.
inputs = np.load('.//Pred//input_list.npy')
print(inputs.ndim, inputs.shape)
Gmodel = GRUNet(24,256,1,2)
Gmodel = torch.load('.//GRU//GRU_1028_48.pkl')
Gmodel.eval()
pred = Gmodel(inputs)
Without any other arguments to Gmodel I get the following:
Traceback (most recent call last):
File ".\grunet.py", line 136, in <module>
pred = Gmodel(inputs)
File "C:\Users\ryang\Anaconda-3\envs\tf-gpu\lib\site-packages\torch\nn\modules\module.py", line 547, in __call__
result = self.forward(*input, **kwargs)
TypeError: forward() missing 1 required positional argument: 'h'
You need to provide the hidden state as well which is usually initially all zeros or simply None!
That is you either need to explicitly provide one like this :
hidden_state = torch.zeros(size=(num_layers*direction, batch_size, hidden_dim)).to(device)
pred = Gmodel(inputs, hidden_state)
or simply do :
hidden_state = None
pred = Gmodel(inputs, hidden_state)

Cant pass the check estimator for SKlearn

I cant understand why i keep getting this errors? Does anybody know?
class AdaBoost(BaseEstimator, ClassifierMixin):
def __init__(self, M=1, tree_depth=1, random_state=None):
self.M = M
self.tree_depth = tree_depth
self.random_state = random_state
def get_params(self, deep=True):
return {"tree_depth": self.tree_depth, "M": self.M, "random_state": self.random_state}
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
def fit(self, X, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.X_ = X
self.y_ = y
X, y = check_X_y(X, y)
self.models = []
self.alphas = []
n_samples, _ = X.shape
w = np.ones(n_samples) / n_samples
for m in range(self.M):
clf = DecisionTreeClassifier(max_depth = self.tree_depth)
clf.fit(X,y, sample_weight = w)
pred = clf.predict(X)
error = w.dot(pred != y)
alpha = 0.5*(np.log(1-error)-np.log(error))
w = w*np.exp(-alpha*y*pred)
w = w/w.sum() # normalise to sum to 1
self.models.append(clf)
self.alphas.append(alpha)
def predict(self, X):
check_is_fitted(self, ['X_', 'y_', 'classes_'])
n_samples, _ = X.shape
ada = np.zeros(n_samples)
for alpha, clf in zip(self.alphas, self.models):
ada += alpha*clf.predict(X)
return np.sign(ada)
def score(self, X, y):
pred = self.predict(X)
accuracy = 100*sum(pred==y)/len(y)
return accuracy
Error:
Traceback (most recent call last):
File "C:\Users\usethis.py", line 81, in <module>
check_estimator(AdaBoost)
File "C:\Users\AppData\Local\Programs\Python\Python37-32\lib\site-packages\sklearn\utils\estimator_checks.py", line 302, in check_estimator
check(name, estimator)
File "C:\AppData\Local\Programs\Python\Python37-32\lib\site-packages\sklearn\utils\testing.py", line 355, in wrapper
return fn(*args, **kwargs)
File "C:\Users\AppData\Local\Programs\Python\Python37-32\lib\site-packages\sklearn\utils\estimator_checks.py", line 1646, in check_estimators_fit_returns_self
assert estimator.fit(X, y) is estimator
AssertionError
[Finished in 1.7s with exit code 1]
The way scikit-learn is developed requires that fit functions return the object itself after fitting. You can do this by adding return self as the last line in the fit function.
class AdaBoost(BaseEstimator, ClassifierMixin):
...
def fit(self, X, y):
...
return self

AttributeError: 'list' object has no attribute 'dim' when predicting in pytorch

I'm currently loading in a model and 11 input values. Then I'm sending those 11 values into a tensor and attempting to predict outputs.
Here is my code:
# coding: utf-8
# In[5]:
import torch
import torchvision
from torchvision import transforms, datasets
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as utils
import numpy as np
data_np = np.loadtxt('input_preds.csv', delimiter=',')
train_ds = utils.TensorDataset(torch.tensor(data_np, dtype=torch.float32).view(-1,11))
trainset = torch.utils.data.DataLoader(train_ds, batch_size=1, shuffle=True)
# setting device on GPU if available, else CPU, replace .cuda() with .to(device)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class Net(nn.Module):
def __init__(self):
super().__init__()
#self.bn = nn.BatchNorm2d(11)
self.fc1 = nn.Linear(11, 22)
self.fc2 = nn.Linear(22, 44)
self.fc3 = nn.Linear(44, 22)
self.fc4 = nn.Linear(22, 11)
def forward(self, x):
#x = x.view(-1, 11)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
#return F.log_softmax(x, dim=1)
return x
model1 = torch.load('./1e-2')
model2 = torch.load('./1e-3')
for data in trainset:
X = data
X = X
output = model1(X).to(device)
print(output)
However, I get this error
Traceback (most recent call last):
File "inference.py", line 53, in <module>
output = model1(X).to(device)
File "C:\Users\Happy\Miniconda3\envs\torch\lib\site-packages\torch\nn\modules\module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "inference.py", line 40, in forward
x = F.relu(self.fc1(x))
File "C:\Users\Happy\Miniconda3\envs\torch\lib\site-packages\torch\nn\modules\module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "C:\Users\Happy\Miniconda3\envs\torch\lib\site-packages\torch\nn\modules\linear.py", line 55, in forward
return F.linear(input, self.weight, self.bias)
File "C:\Users\Happy\Miniconda3\envs\torch\lib\site-packages\torch\nn\functional.py", line 1022, in linear
if input.dim() == 2 and bias is not None:
AttributeError: 'list' object has no attribute 'dim'
I've tried to convert the batch to a numpy array but that didn't help. How do I resolve this error? Thank you for your help.
It looks like your X (data) is a list of tensors, while a PyTorch tensor is expected.
Try X = torch.stack(X).to(device) before sending to the model.

Resources