How to convert a generator to a Pytorch Dataloader? - pytorch

I have a generator that creates synthetic data. How can I convert this into a PyTorch dataloader?

You can wrap your generator with a data.IterableDataset:
class IterDataset(data.IterableDataset):
def __init__(self, generator):
self.generator = generator
def __iter__(self):
return self.generator()
Naturally, you can then wrap this dataset with a data.DataLoader.
Here is a minimal example showing its use:
>>> gen = lambda: [(yield x) for x in range(10)]
>>> dataset = IterDataset(gen)
>>> for i in data.DataLoader(dataset, batch_size=2):
... print(i)
tensor([0, 1])
tensor([2, 3])
tensor([4, 5])
tensor([6, 7])
tensor([8, 9])

With the limited information that you provide, this is the simplest solution (I assume that your generator creates images from noise such as the original gans):
import torch
def get_data(batch_size, generator, latent_dim=512):
z = torch.randn(batch_size, latent_dim)
return genenerator(z)
def dataloader(batch_size, generator, iteration, latent_dim=512):
for i in range(iteration):
yield(get_data(batch_size, generator, latent_dim))
batch_size = 64
generator = GANs(...)
iteration = 100
latent_dim = 512
loader = dataloader(batch_size, generator, iteration, latent_dim)
for images in loader:
# do something

Related

How do I set the dimensions of Conv1d correctly?

This is a toy example as I'm learning PyTorch and using it on one-dimensional time series, in this case a sine wave.
I'm trying to use Conv1d, but I get the following error:
RuntimeError: Given groups=1, weight of size [5, 1, 2], expected input[1, 994, 5] to have 1 channels, but got 994 channels instead
My 'lookback' is 5 time steps, and the shape of my data batch is [994, 5].
What am I doing wrong?
import torch;from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F;import pytorch_lightning as pl
from torch import nn, tensor
class TsDs(torch.utils.data.Dataset):
def __init__(self, s, l=5): super().__init__();self.l,self.s=l,s
def __len__(self): return self.s.shape[0] - 1 - self.l
def __getitem__(self, i): return self.s[i:i+self.l], torch.log(self.s[i+self.l+1]/self.s[i+self.l])
def plt(self): plt.plot(self.s)
class TsDm(pl.LightningDataModule):
def __init__(self, length=5000, batch_size=1000): super().__init__();self.batch_size=batch_size;self.s = torch.sin(torch.arange(length)*0.2) + 5
def train_dataloader(self): return DataLoader(TsDs(self.s[:3999]), batch_size=self.batch_size, shuffle=False)
def val_dataloader(self): return DataLoader(TsDs(self.s[4000:]), batch_size=self.batch_size)
dm = TsDm()
class MyModel(pl.LightningModule):
def __init__(self, learning_rate=0.01):
super().__init__();self.learning_rate = learning_rate
super().__init__();self.learning_rate = learning_rate
self.network = nn.Sequential(nn.Conv1d(1,5,2),nn.ReLU(),nn.Linear(5,3),nn.ReLU(),nn.Linear(3,1), nn.Tanh())
# self.network = nn.Sequential(nn.Linear(5,5),nn.ReLU(),nn.Linear(5,3),nn.ReLU(),nn.Linear(3,1), nn.Tanh())
def forward(self, x): return self.network(x)
def step(self, batch, batch_idx, stage):
x, y = batch
loss = -torch.mean(self(x)*y)
print(loss)
return loss
def training_step(self, batch, batch_idx): return self.step(batch, batch_idx, "train")
def validation_step(self, batch, batch_idx): return self.step(batch, batch_idx, "val")
def configure_optimizers(self): return torch.optim.SGD(self.parameters(), lr=self.learning_rate)
mm = MyModel(0.01);trainer = pl.Trainer(max_epochs=10)
trainer.fit(mm, datamodule=dm)
There are two issues in your code:
Looking at the documentation of nn.Conv1d, your input shape should be (B, C, L). In your default case, you have L=5, the sequence length, but you need to create that extra dimension representing the feature size of a sequence element, here C=1. You can do so by changing TsDs's __getitem__ function to:
def __getitem__(self, i):
x = self.s[i:i+self.l] # minibatch x shaped (1, self.l)
y = torch.log(self.s[i+self.l+1]/self.s[i+self.l]) # minibatch y shaped (1,)
return x, y
Your convolutional layer has a stride of 1 and a size of 2, this means its output will be shaped (B, 5, L-1=4). The following layer is a fully connected layer instantiated as nn.Linear(5, 3), which means it expects (*, H_in=5) and will output (*, H_out). You can either
You can flatten the conv1d output with nn.Flatten and feed it to a bigger fully connected layer (for instance nn.Linear(20, 3).
You can use a convolutional layer with a wider kernel, if you use a kernel of 5 (your sequence length you will end up with a tensor of (B, 5, 1) which you feed to a nn.Linear(5, 3). Although this approach doesn't really scale when L is changed.
You could apply a nn.AvgPool1d to get an average representation of the sequence after the convolutional layers have been applied.
Those are just a few directions...

Pytorch customized dataloader

I am trying to train a classifier with MNIST dataset using pytorch-lightening.
import pytorch_lightning as pl
from torchvision import transforms
from torchvision.datasets import MNIST, SVHN
from torch.utils.data import DataLoader, random_split
class MNISTData(pl.LightningDataModule):
def __init__(self, data_dir='./', batch_size=256):
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
self.transform = transforms.ToTensor()
def download(self):
MNIST(self.data_dir, train=True, download=True)
MNIST(self.data_dir, train=False, download=True)
def setup(self, stage=None):
if stage == 'fit' or stage is None:
mnist_train = MNIST(self.data_dir, train=True, transform=self.transform)
self.mnist_train, self.mnist_val = random_split(mnist_train, [55000, 5000])
if stage == 'test' or stage is None:
self.mnist_test = MNIST(self.data_dir, train=False, transform=self.transform)
def train_dataloader(self):
mnist_train = DataLoader(self.mnist_train, batch_size=self.batch_size)
return mnist_train
def val_dataloader(self):
mnist_val = DataLoader(self.mnist_val, batch_size=self.batch_size)
return mnist_val
def test_dataloader(self):
mnist_test = DataLoader(self.mnist_test, batch_size=self.batch_size)
After using MNISTData().setup(), I gained MNISTData().mnist_train, MNISTData().mnist_val, MNISTData().mnist_test whose length are 55000, 5000, 10000 with type of torch.utils.data.dataset.Subset.
But when i call dataloader w.r.t MNISTData().train_dataloader, MNISTData().val_dataloader, MNISTData().test_dataloader I only get DataLoader with 215, 20, None datas in them.
Can someone know the reason or could fix the problem?
As I told in the comments, and Ivan posted in his answer, there was missing return statement:
def test_dataloader(self):
mnist_test = DataLoader(self.mnist_test, batch_size=self.batch_size)
return mnist_test # <<< missing return
As per your comment, if we try:
a = MNISTData()
# skip download, assuming you already have it
a.setup()
b, c, d = a.train_dataloader(), a.val_dataloader(), a.test_dataloader()
# len(b)=215, len(c)=20, len(d)=40
I think your question is why the length of b, c, d are different from the length of the datasets. The answer is that the len() of a DataLoader is equal to the number of batches, not the number of samples, therefore:
import math
batch_size = 256
len(b) = math.ceil(55000 / batch_size) = 215
len(c) = math.ceil(5000 / batch_size) = 20
len(d) = math.ceil(10000 / batch_size) = 40
BTW, we're using math.ceil because DataLoader has drop_last=False by default, otherwise it would be math.floor.
Your test_dataloader function is missing a return statement!
def test_dataloader(self):
mnist_test = DataLoader(self.mnist_test, batch_size=self.batch_size)
return mnist_test
>>> ds = MNISTData()
>>> ds.download()
>>> ds.setup()
Then:
>>> [len(subset) for subset in \
(ds.mnist_train, ds.mnist_val, ds.mnist_test)]
[55000, 5000, 10000]
>>> [len(loader) for loader in \
(ds.train_dataloader(), ds.val_dataloader(), ds.test_dataloader())]
[215, 20, 40]
Others pointing out the fact that you are missing a return is the test_dataloader() is certainly correct.
Judging by how the question is framed, it seems you are confused about the length of a Dataset and a DataLoader.
len(Dataset(..)) returns the number of data samples in your dataset.
whereas, len(DataLoader(ds, ...)) returns the number of batches; and that depends of how much batch_size=... you requested, whether you want to drop_last batch etc. The exact calculations are provided correctly by #Berriel

How to drop running stats to default value for Norm layer in pyTorch?

I trained model on some images. Now to fit similar dataset but with another colors I want to load this model but also i want to drop all running stats from Batchnorm layers (set them to default value, like totally untrained). What parameters should i reset? Simple model looks like this
import torch
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv0 = nn.Conv2d(3, 3, 3, padding = 1)
self.norm = nn.BatchNorm2d(3)
self.conv = nn.Conv2d(3, 3, 3, padding = 1)
def forward(self, x):
x = self.conv0(x)
x = self.norm(x)
return self.conv(x)
net = Net()
##or for pretrained it will be
##net = torch.load('net.pth')
def drop_to_default():
for m in net.modules():
if type(m) == nn.BatchNorm2d:
####???####
drop_to_default()
Simplest way to do that is to run reset_running_stats() method on BatchNorm objects:
def drop_to_default():
for m in net.modules():
if type(m) == nn.BatchNorm2d:
m.reset_running_stats()
Below is this method's source code:
def reset_running_stats(self) -> None:
if self.track_running_stats:
# running_mean/running_var/num_batches... are registered at runtime depending
# if self.track_running_stats is on
self.running_mean.zero_() # Zero (neutral) mean
self.running_var.fill_(1) # One (neutral) variance
self.num_batches_tracked.zero_() # Number of batches tracked
You can see the source code here, _NormBase class.

Pytorch Linear Layer now automatically reshape the input?

I remember in the past, nn.Linear only accepts 2D tensors.
But today, I discover that nn.Linear now accepts 3D, or even tensors with arbitrary dimensions.
X = torch.randn((20,20,20,20,10))
linear_layer = nn.Linear(10,5)
output = linear_layer(X)
print(output.shape)
>>> torch.Size([20, 20, 20, 20, 5])
When I check the documentation for Pytorch, it does say that it now takes
Input: :math:(N, *, H_{in}) where :math:* means any number of
additional dimensions and :math:H_{in} = \text{in\_features}
So it seems to me that Pytorch nn.Linear now reshape the input by x.view(-1, input_dim) automatically.
But I cannot find any x.shape or x.view in the source code:
class Linear(Module):
__constants__ = ['bias']
def __init__(self, in_features, out_features, bias=True):
super(Linear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
#weak_script_method
def forward(self, input):
return F.linear(input, self.weight, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
Can anyone confirms this?
torch.nn.Linear uses torch.nn.functional.linear function under the hood, that's where the operations are taking places (see documentation).
It looks like this (removed docstrings and decorators for brevity):
def linear(input, weight, bias=None):
if input.dim() == 2 and bias is not None:
# fused op is marginally faster
ret = torch.addmm(bias, input, weight.t())
else:
output = input.matmul(weight.t())
if bias is not None:
output += bias
ret = output
return ret
First case is addmm, which implements beta*mat + alpha*(mat1 # mat2) and is supposedly faster (see here for example).
Second operation is matmul, and as one can read in their docs it performs various operations based on the shape of tensors provided (five cases, not going to copy them blatantly here).
In summary it preserves dimensions between first batch and last features dimension. No view() is used whatsoever, especially not this x.view(-1, input_dim), check the code below:
import torch
tensor1 = torch.randn(10, 3, 4)
tensor2 = torch.randn(10, 4, 5)
print(torch.matmul(tensor1, tensor2).shape)
print(torch.matmul(tensor1, tensor2).view(-1, tensor1.shape[1]).shape)
which gives:
torch.Size([10, 3, 5]) # preserves input's 3
torch.Size([50, 3]) # destroys the batch even

Making custom multihot embedding layer in keras/tf

I'd like to make a custom embedding layer in keras, but not sure how to go about it.
As input I would pass for each example a variable number of integers (indices, from which I would like to generate a fixed size vector). A numpy version (that has batch_size = 1) of this embedding would be:
class numpyEmbedding():
def __init__(self,vocab_size):
self.vocab_size = vocab_size
self.build()
def build(self):
self.W = np.eye(self.vocab_size,dtype=np.int8)
def __call__(self,x):
return np.sum(self.W[:,x],axis=-1)
I imagine a keras version of this layer should be possible but I am not sure how to get it working and what considerations I need to have since it would have to be applied on mini-batches of arrays rather than single arrays.
Thanks!
Ilya
Edit:
Example input:
vec = np.random.choice(np.arange(10),100).astype(int)
emb=numpyEmbedding(int(10))(vec)
Output:
array([11, 10, 11, 9, 8, 9, 13, 12, 6, 11])
I was able to figure out the answer
class MultihotEmbedding(layers.Layer):
def __init__(self, vocab_size, **kwargs):
self.vocab_size = vocab_size
super(MultihotEmbedding, self).__init__(**kwargs)
def call(self, x):
self.get_embeddings = K.one_hot(x,num_classes=self.vocab_size)
self.reduce_embeddings = K.sum(self.get_embeddings,axis = -2)
return self.reduce_embeddings
def compute_output_shape(self, input_shape):
return (input_shape[0], self.vocab_size)

Resources