Batchsize in DataLoader - pytorch

I have two tensors:
x[train], y[train]
And the shape is
(311, 3, 224, 224), (311) # 311 Has No Information
I want to use DataLoader to load them batch by batch, the code I write is:
from torch.utils.data import Dataset
class KD_Train(Dataset):
def __init__(self,a,b):
self.imgs = a
self.index = b
def __len__(self):
return len(self.imgs)
def __getitem__(self,index):
return self.imgs, self.index
kdt = KD_Train(x[train], y[train])
train_data_loader = Data.DataLoader(
kdt,
batch_size = 64,
shuffle = True,
num_workers = 0)
for step, (a,b) in enumerate (train_data_loader):
print(a.shape)
break
But it shows:
(64, 311, 3, 224, 224)
the DataLoader just add a dimension directly instead of choosing some batches, anyone know what should I do?

Your dataset's __getitem__ method should return a single element:
def __getitem__(self, index):
return self.imgs[index], self.index[index]

Related

How do I set the dimensions of Conv1d correctly?

This is a toy example as I'm learning PyTorch and using it on one-dimensional time series, in this case a sine wave.
I'm trying to use Conv1d, but I get the following error:
RuntimeError: Given groups=1, weight of size [5, 1, 2], expected input[1, 994, 5] to have 1 channels, but got 994 channels instead
My 'lookback' is 5 time steps, and the shape of my data batch is [994, 5].
What am I doing wrong?
import torch;from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F;import pytorch_lightning as pl
from torch import nn, tensor
class TsDs(torch.utils.data.Dataset):
def __init__(self, s, l=5): super().__init__();self.l,self.s=l,s
def __len__(self): return self.s.shape[0] - 1 - self.l
def __getitem__(self, i): return self.s[i:i+self.l], torch.log(self.s[i+self.l+1]/self.s[i+self.l])
def plt(self): plt.plot(self.s)
class TsDm(pl.LightningDataModule):
def __init__(self, length=5000, batch_size=1000): super().__init__();self.batch_size=batch_size;self.s = torch.sin(torch.arange(length)*0.2) + 5
def train_dataloader(self): return DataLoader(TsDs(self.s[:3999]), batch_size=self.batch_size, shuffle=False)
def val_dataloader(self): return DataLoader(TsDs(self.s[4000:]), batch_size=self.batch_size)
dm = TsDm()
class MyModel(pl.LightningModule):
def __init__(self, learning_rate=0.01):
super().__init__();self.learning_rate = learning_rate
super().__init__();self.learning_rate = learning_rate
self.network = nn.Sequential(nn.Conv1d(1,5,2),nn.ReLU(),nn.Linear(5,3),nn.ReLU(),nn.Linear(3,1), nn.Tanh())
# self.network = nn.Sequential(nn.Linear(5,5),nn.ReLU(),nn.Linear(5,3),nn.ReLU(),nn.Linear(3,1), nn.Tanh())
def forward(self, x): return self.network(x)
def step(self, batch, batch_idx, stage):
x, y = batch
loss = -torch.mean(self(x)*y)
print(loss)
return loss
def training_step(self, batch, batch_idx): return self.step(batch, batch_idx, "train")
def validation_step(self, batch, batch_idx): return self.step(batch, batch_idx, "val")
def configure_optimizers(self): return torch.optim.SGD(self.parameters(), lr=self.learning_rate)
mm = MyModel(0.01);trainer = pl.Trainer(max_epochs=10)
trainer.fit(mm, datamodule=dm)
There are two issues in your code:
Looking at the documentation of nn.Conv1d, your input shape should be (B, C, L). In your default case, you have L=5, the sequence length, but you need to create that extra dimension representing the feature size of a sequence element, here C=1. You can do so by changing TsDs's __getitem__ function to:
def __getitem__(self, i):
x = self.s[i:i+self.l] # minibatch x shaped (1, self.l)
y = torch.log(self.s[i+self.l+1]/self.s[i+self.l]) # minibatch y shaped (1,)
return x, y
Your convolutional layer has a stride of 1 and a size of 2, this means its output will be shaped (B, 5, L-1=4). The following layer is a fully connected layer instantiated as nn.Linear(5, 3), which means it expects (*, H_in=5) and will output (*, H_out). You can either
You can flatten the conv1d output with nn.Flatten and feed it to a bigger fully connected layer (for instance nn.Linear(20, 3).
You can use a convolutional layer with a wider kernel, if you use a kernel of 5 (your sequence length you will end up with a tensor of (B, 5, 1) which you feed to a nn.Linear(5, 3). Although this approach doesn't really scale when L is changed.
You could apply a nn.AvgPool1d to get an average representation of the sequence after the convolutional layers have been applied.
Those are just a few directions...

How to apply image transform to a list of images and maintain the right dimensions?

I'm using the Omniglot dataset, which is a set of 19,280 images, each which is 105 x 105 (grayscale).
I defined a custom Dataset class with the following transform:
class OmniglotDataset(Dataset):
def __init__(self, X, transform=None):
self.X = X
self.transform = transform
def __len__(self):
return self.X.shape[0]
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img = self.X[idx]
if self.transform:
img = self.transform(img)
return img
img_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
X_train.shape
(19280, 105, 105)
train_dataset = OmniglotDataset(X_train, transform=img_transform)
When I index a single image, it returns the right dimensions:
train_dataset[0].shape
torch.Size([1, 105, 105])
But when I index several images, it returns the dimensions in the wrong order (I expect 3 x 105 x 105):
train_dataset[[1,2,3]].shape
torch.Size([105, 3, 105])
You got the error because try apply transformation of single image to list:
A more convenient way to get a batch of any size is to use Dataloader:
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
img_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
omniglot = datasets.Omniglot(root='./data', background=True, download=True, transform = img_transform)
data_loader = DataLoader(omniglot, shuffle=False, batch_size = 8)
for image_batch in data_loader:
# now image_batch contain first eight samples
print(image_batch.shape) # torch.Size([8, 1, 105, 105])
break
If you really need to get images in arbitrary order:
from operator import itemgetter
indexes = [1,3,5]
selected_samples = itemgetter(*b)(omniglot)

Tensor size mismatch autoencoder pytorch

I'm using stacked Autoencoder, which is a bunch of Conv layers.
However, I'm having a tensor mismatch error, and I'm not sure about the reason. Everything done in the Encoder is reversed in the Decoder!
This is for time-series data. Input shape is (bactch_size, 1, 3000)
Here's the code
class CDAutoEncoder(nn.Module):
def __init__(self, input_size, output_size, kernel, stride):
super(CDAutoEncoder, self).__init__()
self.forward_pass = nn.Sequential(
nn.Conv1d(input_size, output_size, kernel_size=kernel, stride=stride, padding=0),
nn.PReLU(),
)
self.backward_pass = nn.Sequential(
nn.ConvTranspose1d(output_size, input_size, kernel_size=kernel, stride=stride, padding=0),
nn.PReLU(),
)
def forward(self, x):
y = self.forward_pass(x)
return y
def reconstruct(self, x):
return self.backward_pass(x)
class StackedAutoEncoder(nn.Module):
def __init__(self):
super(StackedAutoEncoder, self).__init__()
self.ae1 = CDAutoEncoder(1, 32, 50, 10)
self.ae2 = CDAutoEncoder(32, 64, 10, 3)
self.ae3 = CDAutoEncoder(64, 64, 5, 1)
def forward(self, x):
a1 = self.ae1(x)
a2 = self.ae2(a1)
a3 = self.ae3(a2)
return self.reconstruct(a3)
def reconstruct(self, x):
a2_reconstruct = self.ae3.reconstruct(x)
a1_reconstruct = self.ae2.reconstruct(a2_reconstruct)
x_reconstruct = self.ae1.reconstruct(a1_reconstruct)
return x_reconstruct
The error:
RuntimeError: The size of tensor a (2990) must match the size of tensor b (3000) at non-singleton dimension 2
I've tried adding padding and it worked, but when I changed the kernel size I get different tensor-size-mismatch-error.
Apparently, there's nothing like 'same' padding, so is there automated solution for this?

How to convert multiclass data into binary class in python for image classification?

I am trying to do binary class image classification using PyTorch.I have a csv file in where my dataset classes (V00XRKL) are stored. The dataset has five classes but I would like to convert it into binary class using the condition like if V00XRKL<2 then the Class =1 ,otherwise Class= 2.
Here is the example of my dataset
.
For this purpose I wrote my code in this way
# read the csv file
classfile = pd.read_csv(csvroot)
print('df length orig:', len(classfile))
# remove the NA value from the file
df_NA = classfile.dropna(how='any', axis=0)
print('df length no NA:', len(df_NA))
# Dropout the duplicates of number from ID and Side
df_drop = df_NA.drop_duplicates(subset=['ID', 'SIDE'])
print('df length no dup:', len(df_drop))
# reset the index
df_in = df_drop.reset_index(drop=True)
df_in.loc[df_in['V00XRKL'] < 2, 'V00XRKL'] = 1
df_in.loc[df_in['V00XRKL'] >= 2, 'V00XRKL'] = 2
#df_in.to_clipboard(sep=',')
After running this code I seen that my data has converted in to binary class,here is the screenshot of converted data .
But the problem is that whenever I try to train my cnn model I am getting this error messages.
Traceback (most recent call last):
File "/home/Downloads/test to con.py", line 379, in <module>
model, train_loss, valid_loss,val_acc = train_model(net, patience, n_epochs,costFunction)
File "/home/Downloads/test to con.py", line 165, in train_model
loss = criterion(output, labels)
File "/home/miniconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 493, in __call__
result = self.forward(*input, **kwargs)
File "/home/miniconda3/lib/python3.6/site-packages/torch/nn/modules/loss.py", line 942, in forward
ignore_index=self.ignore_index, reduction=self.reduction)
File "/home/miniconda3/lib/python3.6/site-packages/torch/nn/functional.py", line 2056, in cross_entropy
return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
File "/home/miniconda3/lib/python3.6/site-packages/torch/nn/functional.py", line 1871, in nll_loss
ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
RuntimeError: Assertion `cur_target >= 0 && cur_target < n_classes' failed. at /opt/conda/conda-bld/pytorch_1556653099582/work/aten/src/THNN/generic/ClassNLLCriterion.c:92
Here is my code that I used to make dataset class
#make a class for data
class MyDataset(Dataset):
def __init__(self, csv_file,root_dir, transform=None):
# self.data = pd.read_csv(csv_file, header=None)
self.df_data= csv_file
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.df_data)
def __getitem__(self,idx):
img_name = os.path.join(self.root_dir, str(self.df_data['ID'].iloc[idx])+'.npy')
patches, p_id = np.load(img_name, allow_pickle= True)
img_class = int(self.df_data.iloc[idx,2])
side = self.df_data.iloc[idx,1]
if side ==1:
image = Image.fromarray(patches['R'].astype('uint8'), 'L') #[image['R':side]|image['L':side]]
else:
image = Image.fromarray(patches['L'].astype('uint8'), 'L')
if self.transform is not None:
image = self.transform(image)
sample = {'image': image, 'grade':img_class}
return sample
And this is the CNN model
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
# Convolution 1
self.layer1 = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)) # maxpolling layer 1
# Convolution 1
self.layer2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.drop_out = nn.Dropout(0.2)
# Fully connected
self.fc1 = nn.Linear(16 * 16 * 64, 1000)
self.fc2 = nn.Linear(1000,2)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
#print(out.shape)
out = self.drop_out(out)
out = self.fc1(out)
out = self.fc2(out)
return out
I would appreciate any kind of help in this regard.

Batchsize in input shape of chainer CNN

I have a training set of 9957 images. The training set has shape (9957, 3, 60, 80).
Is batchsize required when putting training set to model?
If required can the original shape be considered correct for fitting to conv2D layer or do I need to add batchsize to input_shape?
X_train.shape
(9957, 60,80,3)
from chainer.datasets import split_dataset_random
from chainer.dataset import DatasetMixin
import numpy as np
class MyDataset(DatasetMixin):
def __init__(self, X, labels):
super(MyDataset, self).__init__()
self.X_ = X
self.labels_ = labels
self.size_ = X.shape[0]
def __len__(self):
return self.size_
def get_example(self, i):
return np.transpose(self.X_[i, ...], (2, 0, 1)), self.labels_[i]
batch_size = 3
label_train = y_trainHot1
dataset = MyDataset(X_train1, label_train)
dataset_train, valid = split_dataset_random(dataset, 8000, seed=0)
train_iter = iterators.SerialIterator(dataset_train, batch_size)
valid_iter = iterators.SerialIterator(valid, batch_size, repeat=False,
shuffle=False)
The code below tells you that you do not have to care the batch-size by yourself. You just use DatsetMixin and SerialIterator as is instructed in the tutorial of chainer.
from chainer.dataset import DatasetMixin
from chainer.iterators import SerialIterator
import numpy as np
NUM_IMAGES = 9957
NUM_CHANNELS = 3 # RGB
IMAGE_WIDTH = 60
IMAGE_HEIGHT = 80
NUM_CLASSES = 10
BATCH_SIZE = 32
TRAIN_SIZE = min(8000, int(NUM_IMAGES * 0.9))
images = np.random.rand(NUM_IMAGES, NUM_CHANNELS, IMAGE_WIDTH, IMAGE_HEIGHT)
labels = np.random.randint(0, NUM_CLASSES, (NUM_IMAGES,))
class MyDataset(DatasetMixin):
def __init__(self, images_, labels_):
# note: input arg.'s tailing underscore is just to avoid shadowing
super(MyDataset, self).__init__()
self.images_ = images_
self.labels_ = labels_
self.size_ = len(labels_)
def __len__(self):
return self.size_
def get_example(self, i):
return self.images_[i, ...], self.labels_[i]
dataset_train = MyDataset(images[:TRAIN_SIZE, ...], labels[:TRAIN_SIZE])
dataset_valid = MyDataset(images[TRAIN_SIZE:, ...], labels[TRAIN_SIZE:])
train_iter = SerialIterator(dataset_train, BATCH_SIZE)
valid_iter = SerialIterator(dataset_valid, BATCH_SIZE, repeat=False, shuffle=False)
###############################################################################
"""This block is just for the confirmation.
.. note: NOT recommended to call :func:`concat_examples` in your code.
Use :class:`chainer.updaters.StandardUpdater` instead.
"""
from chainer.dataset import concat_examples
batch_image, batch_label = concat_examples(next(train_iter))
print("batch_image.shape\n{}".format(batch_image.shape))
print("batch_label.shape\n{}".format(batch_label.shape))
Output
batch_image.shape
(32, 3, 60, 80)
batch_label.shape
(32,)
It should be noted that chainer.dataset.concat_example is a little bit tricky part. Usually, the users do not pay attention to this function, if you use StandardUpdater which conceals the native function chainer.dataset.concat_example.
Since chainer is designed on the scheme of Trainer, (Standard)Updater, some Optimizer, (Serial)Iterator and Dataset(Mixin), if you do not follow this scheme, you have to dive into the sea of chainer source code.

Resources