LSTM Autoencoder set-up for multiple features using Pytorch - pytorch

I am building an LSTM autoencoder to denoise signals and will take more than 1 feature as it's input.
I have setup the model Encoder part as follows which works for single feature inputs (i.e. sequences with just one feature):
class Encoder(nn.Module):
def __init__(self, seq_len, n_features, num_layers=1, embedding_dim=64):
super(Encoder, self).__init__()
self.seq_len = seq_len
self.n_features = n_features
self.num_layers = num_layers
self.embedding_dim = embedding_dim
self.hidden_dim = 2 * embedding_dim
# input: batch_size, seq_len, features
self.lstm1 = nn.LSTM(
input_size=self.n_features,
hidden_size=self.hidden_dim,
num_layers=self.num_layers,
batch_first=True
) # output: batch size, seq_len, hidden_dim
# input: batch_size, seq_len, hidden_dim
self.lstm2 = nn.LSTM(
input_size=self.hidden_dim,
hidden_size = self.embedding_dim,
num_layers = self.num_layers,
batch_first=True
) # output: batch_size, seq_len, embedding_dim
def forward(self, x):
print(x)
x = x.reshape((1, self.seq_len, self.n_features))
print(x.shape)
x, (_, _) = self.lstm1(x)
print(x.shape)
x, (hidden_n, _) = self.lstm2(x)
print(x.shape, hidden_n.shape)
print(hidden_n)
return hidden_n.reshape((self.n_features, self.embedding_dim))
When I test this setup as follows:
model = Encoder(1024, 1)
model.forward(torch.randn(1024, 1))
with the 1 representing a single feature all is well. However, when I do the following (where 2 represents a sequence of 2 features):
model = Encoder(1024, 2)
model.forward(torch.randn(1024, 2))
I get the following error:
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
Input In [296], in <cell line: 1>()
----> 1 model.forward(torch.randn(1024, 2))
Input In [294], in Encoder.forward(self, x)
36 print(hidden_n)
37 # print(hidden_n.reshape((self.n_features, self.embedding_dim)).shape)
---> 39 return hidden_n.reshape((self.n_features, self.embedding_dim))
RuntimeError: shape '[2, 64]' is invalid for input of size 64
The hidden_n shape comes out as torch.Size([1, 1, 64]). I would like to understand that if we have more than 1 feature, e.g. 2, do we want to get that shape into the format of 1, 2, 64 such that the hidden state has weights for both features?
Can someone please explain why reshape is not liking the way I'm trying to restructure the output of the Encoder and how I should do it such that the model is able to take any feature size into account.
What am I missing/ perhaps misunderstanding here?

Related

PyTorch multi-class: ValueError: Expected input batch_size (416) to match target batch_size (32)

I have created a mutli-class classification neural network. Training, and validation iterators where created with BigBucketIterator method with fields {'text_normalized_tweet':TEXT, 'label': LABEL}
TEXT = a tweet
LABEL = a float number (with 3 values: 0,1,2)
Below I execute a dummy example of my neural network:
import torch.nn as nn
class MultiClassClassifer(nn.Module):
#define all the layers used in model
def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim):
#Constructor
super(MultiClassClassifer, self).__init__()
#embedding layer
self.embedding = nn.Embedding(vocab_size, embedding_dim)
#dense layer
self.hiddenLayer = nn.Linear(embedding_dim, hidden_dim)
#Batch normalization layer
self.batchnorm = nn.BatchNorm1d(hidden_dim)
#output layer
self.output = nn.Linear(hidden_dim, output_dim)
#activation layer
self.act = nn.Softmax(dim=1) #2d-tensor
#initialize weights of embedding layer
self.init_weights()
def init_weights(self):
initrange = 1.0
self.embedding.weight.data.uniform_(-initrange, initrange)
def forward(self, text, text_lengths):
embedded = self.embedding(text)
#packed sequence
packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths, batch_first=True)
tensor, batch_size = packed_embedded[0], packed_embedded[1]
hidden_1 = self.batchnorm(self.hiddenLayer(tensor))
return self.act(self.output(hidden_1))
Instantiate the model
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100
HIDDEN_DIM = 64
OUTPUT_DIM = 3
model = MultiClassClassifer(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM)
When I call
text, text_lengths = batch.text_normalized_tweet
predictions = model(text, text_lengths).squeeze()
loss = criterion(predictions, batch.label)
it returns,
ValueError: Expected input batch_size (416) to match target batch_size (32).
model(text, text_lengths).squeeze() = torch.Size([416, 3])
batch.label = torch.Size([32])
I can see that the two objects have different sizes, but I have no clue how to fix this?
You may find the Google Colab notebook here
Shapes of each in, out tensor of my forward() method:
torch.Size([32, 10, 100]) #self.embedding(text)
torch.Size([320, 100]) #nn.utils.rnn.pack_padded_sequence(embedded, text_lengths, batch_first=True)
torch.Size([320, 64]) #self.batchnorm(self.hiddenLayer(tensor))
torch.Size([320, 3]) #self.act(self.output(hidden_1))
You shouldn't be using the squeeze function after the forward pass, that doesn't make sense.
After removing the squeeze function, as you see, the shape of your final output is [320,3] whereas it is expecting [32,3]. One way to fix this is to average out the embeddings you obtain for each word after the self.Embedding function like shown below:
def forward(self, text, text_lengths):
embedded = self.embedding(text)
embedded = torch.mean(embedded, dim=1, keepdim=True)
packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths, batch_first=True)
tensor, batch_size = packed_embedded[0], packed_embedded[1]
hidden_1 = self.batchnorm(self.hiddenLayer(tensor))
return self.act(self.output(hidden_1))

Increasing num of layers in LSTM increases the input dimensions in Pytorch?

This is my LSTM model and I am finding a peculiar problem while training it.
class LSTM1(nn.Module):
def __init__(self, num_classes, input_size, hidden_size, num_layers, seq_length,drop_prob=0.0):
super(LSTM1, self).__init__()
self.num_classes = num_classes #number of classes
self.num_layers = num_layers #number of layers
self.input_size = input_size #input size
self.hidden_size = hidden_size #hidden state
self.seq_length = seq_length #sequence length
self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, dropout=drop_prob,batch_first=True) #lstm
# self.dropout = nn.Dropout(drop_prob)
# self.fc_1 = nn.Linear(hidden_size, num_classes)
self.fc_1 = nn.Linear(hidden_size, 64) #fully connected 1
self.fc = nn.Linear(64, num_classes) #fully connected last layer
self.relu = nn.ReLU()
def forward(self,x):
# h_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)).to(device) #hidden state
# c_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)).to(device) #internal state
# Propagate input through LSTM
output, (hn, cn) = self.lstm(x) #lstm with input, hidden, and internal state
hn = hn.view(-1, self.hidden_size) #reshaping the data for Dense layer next
#out = self.dropout(hn)
out = self.relu(hn)
out = self.fc_1(out) #first Dense
out = self.relu(out) #relu
out = self.fc(out) #Final Output
out = self.relu(out) #relu
return out
My training data has these dimensions where the first one is the input and second the labels
Training Shape torch.Size([8051, 1, 201]) torch.Size([8051, 1])
When I train with num_layers variable = 1 for the LSTM layer it works fine.
However, when I increase the num_layers to 2 I get the error which says
Training Shape torch.Size([8051, 1, 201]) torch.Size([8051, 1])
Testing Shape torch.Size([4930, 1, 201]) torch.Size([4930, 1])
C:\Users\adity\miniconda3\envs\pytorch3\lib\site-packages\torch\nn\modules\loss.py:528: UserWarning: Using a target size (torch.Size([8051, 1])) that is different to the input size (torch.Size([16102, 1])). This will likely lead to incorrect results due to broadcasting. Please ensure they have the same size.
return F.mse_loss(input, target, reduction=self.reduction)
Traceback (most recent call last):
File "C:\Users\adity\OneDrive - Louisiana State University\Documents\CSC 7343 HW\hw1.py", line 135, in <module>
loss = criterion(outputs.to(device), y_train_tensors.to(device))
File "C:\Users\adity\miniconda3\envs\pytorch3\lib\site-packages\torch\nn\modules\module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "C:\Users\adity\miniconda3\envs\pytorch3\lib\site-packages\torch\nn\modules\loss.py", line 528, in forward
return F.mse_loss(input, target, reduction=self.reduction)
File "C:\Users\adity\miniconda3\envs\pytorch3\lib\site-packages\torch\nn\functional.py", line 3089, in mse_loss
expanded_input, expanded_target = torch.broadcast_tensors(input, target)
File "C:\Users\adity\miniconda3\envs\pytorch3\lib\site-packages\torch\functional.py", line 73, in broadcast_tensors
return _VF.broadcast_tensors(tensors) # type: ignore[attr-defined]
RuntimeError: The size of tensor a (16102) must match the size of tensor b (8051) at non-singleton dimension 0
When I changed layers to 3 the error says the size of tensor should be 24153.
Why is the input dimensions changing when I increase num_layers?
The issue is that you are flattening hn, according to the documentation page, its shape is (D*num_layers, N, Hout), i.e. it depends on the number of hidden layers. So you will either have to change the fully connected that is following or only take the last hidden state of your LSTM.

Expected input batch_size (18) to match target batch_size (6)

Is RNN for image classification available only for gray image?
The following program works for gray image classification.
If RGB images are used, I have this error:
Expected input batch_size (18) to match target batch_size (6)
at this line loss = criterion(outputs, labels).
My data loading for train, valid and test are as follows.
input_size = 300
inputH = 300
inputW = 300
#Data transform (normalization & data augmentation)
stats = ((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
train_resize_tfms = tt.Compose([tt.Resize((inputH, inputW), interpolation=2),
tt.ToTensor(),
tt.Normalize(*stats)])
train_tfms = tt.Compose([tt.Resize((inputH, inputW), interpolation=2),
tt.RandomHorizontalFlip(),
tt.ToTensor(),
tt.Normalize(*stats)])
valid_tfms = tt.Compose([tt.Resize((inputH, inputW), interpolation=2),
tt.ToTensor(),
tt.Normalize(*stats)])
test_tfms = tt.Compose([tt.Resize((inputH, inputW), interpolation=2),
tt.ToTensor(),
tt.Normalize(*stats)])
#Create dataset
train_ds = ImageFolder('./data/train', train_tfms)
valid_ds = ImageFolder('./data/valid', valid_tfms)
test_ds = ImageFolder('./data/test', test_tfms)
from torch.utils.data.dataloader import DataLoader
batch_size = 6
#Training data loader
train_dl = DataLoader(train_ds, batch_size, shuffle = True, num_workers = 8, pin_memory=True)
#Validation data loader
valid_dl = DataLoader(valid_ds, batch_size, shuffle = True, num_workers = 8, pin_memory=True)
#Test data loader
test_dl = DataLoader(test_ds, 1, shuffle = False, num_workers = 1, pin_memory=True)
My model is as follows.
num_steps = 300
hidden_size = 256 #size of hidden layers
num_classes = 5
num_epochs = 20
learning_rate = 0.001
# Fully connected neural network with one hidden layer
num_layers = 2 # 2 RNN layers are stacked
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(RNN, self).__init__()
self.num_layers = num_layers
self.hidden_size = hidden_size
self.rnn = nn.RNN(input_size, hidden_size, num_layers, batch_first=True, dropout=0.2)#batch must have first dimension
#our inpyt needs to have shape
#x -> (batch_size, seq, input_size)
self.fc = nn.Linear(hidden_size, num_classes)#this fc is after RNN. So needs the last hidden size of RNN
def forward(self, x):
#according to ducumentation of RNN in pytorch
#rnn needs input, h_0 for inputs at RNN (h_0 is initial hidden state)
#the following one is initial hidden layer
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)#first one is number of layers and second one is batch size
#output has two outputs. The first tensor contains the output features of the hidden last layer for all time steps
#the second one is hidden state f
out, _ = self.rnn(x, h0)
#output has batch_size, num_steps, hidden size
#we need to decode hidden state only the last time step
#out (N, 30, 128)
#Since we need only the last time step
#Out (N, 128)
out = out[:, -1, :] #-1 for last time step, take all for N and 128
out = self.fc(out)
return out
stacked_rnn_model = RNN(input_size, hidden_size, num_layers, num_classes).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()#cross entropy has softmax at output
#optimizer = torch.optim.Adam(stacked_rnn_model.parameters(), lr=learning_rate) #optimizer used gradient optimization using Adam
optimizer = torch.optim.SGD(stacked_rnn_model.parameters(), lr=learning_rate)
# Train the model
n_total_steps = len(train_dl)
for epoch in range(num_epochs):
t_losses=[]
for i, (images, labels) in enumerate(train_dl):
# origin shape: [6, 3, 300, 300]
# resized: [6, 300, 300]
images = images.reshape(-1, num_steps, input_size).to(device)
print('images shape')
print(images.shape)
labels = labels.to(device)
# Forward pass
outputs = stacked_rnn_model(images)
print('outputs shape')
print(outputs.shape)
loss = criterion(outputs, labels)
t_losses.append(loss)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
Printing images and outputs shapes are
images shape
torch.Size([18, 300, 300])
outputs shape
torch.Size([18, 5])
Where is the mistake?
Tl;dr: You are flattening the first two axes, namely batch and channels.
I am not sure you are taking the right approach but I will write about that layer.
In any case, let's look at the issue you are facing. You have a data loader that produces (6, 3, 300, 300), i.e. batches of 6 three-channel 300x300 images. By the look of it you are looking to reshape each batch element (3, 300, 300) into (step_size=300, -1).
However instead of that you are affecting the first axis - which you shouldn't - with images.reshape(-1, num_steps, input_size). This will have the desired effect when working with a single-channel images since dim=1 wouldn't be the "channel axis". In your case your have 3 channels, therefore, the resulting shape is: (6*3*300*300//300//300, 300, 300) which is (18, 300, 300) since num_steps=300 and input_size=300. As a result you are left with 18 batch elements instead of 6.
Instead what you want is to reshape with (batch_size, num_steps, -1). Leaving the last axis (a.k.a. seq_length) of variable size. This will result in a shape (6, 300, 900).
Here is a corrected and reduced snippet:
batch_size = 6
channels = 3
inputH, inputW = 300, 300
train_ds = TensorDataset(torch.rand(100, 3, inputH, inputW), torch.rand(100, 5))
train_dl = DataLoader(train_ds, batch_size)
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(RNN, self).__init__()
# (batch_size, seq, input_size)
self.rnn = nn.RNN(input_size, hidden_size, num_layers, batch_first=True)
# (batch_size, hidden_size)
self.fc = nn.Linear(hidden_size, num_classes)
# (batch_size, num_classes)
def forward(self, x):
out, _ = self.rnn(x)
out = out[:, -1, :]
out = self.fc(out)
return out
num_steps = 300
input_size = inputH*inputW*channels//num_steps
hidden_size = 256
num_classes = 5
num_layers = 2
rnn = RNN(input_size, hidden_size, num_layers, num_classes)
for x, y in train_dl:
print(x.shape, y.shape)
images = images.reshape(batch_size, num_steps, -1)
print(images.shape)
outputs = rnn(images)
print(outputs.shape)
break
As I said in the beginning I am a bit wary about this approach because you are essentially feeding your RNN a RGB 300x300 image in the form of a sequence of 300 flattened vectors... I can't say if that makes sense and terms of training and if the model will be able to learn from that. I could be wrong!

RuntimeError: Expected hidden[0] size (1, 1, 512), got (1, 128, 512) for LSTM pytorch

I trained the LSTM with a batch size of 128 and during testing my batch size is 1, why do I get this error? I'm suppose to initialize the hidden size when doing testing?
Here is the code that i'm using, I initialize the hidden state init_hidden function as (number_of_layers, batch_size, hidden_size) since batch_first=True
class ImageLSTM(nn.Module):
def __init__(self, n_inputs:int=49,
n_outputs:int=4096,
n_hidden:int=256,
n_layers:int=1,
bidirectional:bool=False):
"""
Takes a 1D flatten images.
"""
super(ImageLSTM, self).__init__()
self.n_inputs = n_inputs
self.n_hidden = n_hidden
self.n_outputs = n_outputs
self.n_layers = n_layers
self.bidirectional = bidirectional
self.lstm = nn.LSTM( input_size=self.n_inputs,
hidden_size=self.n_hidden,
num_layers=self.n_layers,
dropout = 0.5 if self.n_layers>1 else 0,
bidirectional=self.bidirectional,
batch_first=True)
if (self.bidirectional):
self.FC = nn.Sequential(
nn.Linear(self.n_hidden*2, self.n_outputs),
nn.Dropout(p=0.5),
nn.Sigmoid()
)
else:
self.FC = nn.Sequential(
nn.Linear(self.n_hidden, self.n_outputs),
# nn.Dropout(p=0.5),
nn.Sigmoid()
)
def init_hidden(self, batch_size, device=None): # input 4D tensor: (batch size, channels, width, height)
# initialize the hidden and cell state to zero
# vectors:(number of layer, batch size, number of hidden nodes)
if (self.bidirectional):
h0 = torch.zeros(2*self.n_layers, batch_size, self.n_hidden)
c0 = torch.zeros(2*self.n_layers, batch_size, self.n_hidden)
else:
h0 = torch.zeros(self.n_layers, batch_size, self.n_hidden)
c0 = torch.zeros(self.n_layers, batch_size, self.n_hidden)
if device is not None:
h0 = h0.to(device)
c0 = c0.to(device)
self.hidden = (h0,c0)
def forward(self, X): # X: tensor of shape (batch_size, channels, width, height)
# forward propagate LSTM
lstm_out, self.hidden = self.lstm(X, self.hidden) # lstm_out: tensor of shape (batch_size, seq_length, hidden_size)
# Decode the hidden state of the last time step
out = self.FC(lstm_out[:, -1, :])
return out
please edit your post and add code. How did you initialize the hidden-state? What does you model look like.
hidden[0] is not your hidden-size, its the hidden-state of the lstm. The shape of the hidden-state has to be initialized like this:
hidden = ( torch.zeros((batch_size, layers, hidden_size)), torch.zeros((layers, batch_size, hidden_size)) )
You seem to have done this correctly. But the error tells you that you gave a batch of size 1 (because as you said you want to test with only one sample) but the hidden-state is initialized with batch-size=128.
So I guess (please add code) that you hard-coded that the batch-size = 128. Dont do that. Since you have to reinitialize the hidden-state every forward pass you can do this:
...
def forward(self, x):
batch_size = x.shape[0]
hidden = (torch.zeros(self.layers, batch_size, self.hidden_size).to(device=device), torch.zeros(self.layers, batch_size, self.hidden_size).to(device=device))
output, hidden = lstm(x, hidden)
# then do what every you want with the output
I guess that this is what causes this error but please post your code, too!

Maxpool2d error is showing while there is no Maxpool2d

I run the following code to train a neural network that contains a CNN with max pooling and two fully-connected layers:
class Net(nn.Module):
def __init__(self, vocab_size, embedding_size):
torch.manual_seed(0)
super(Net, self).__init__()
self.word_embeddings = nn.Embedding(vocab_size, embedding_size)
self.conv1 = nn.Conv1d(embedding_size, 64, 3)
self.drop1 = nn.Dropout(0.5)
self.max_pool1 = nn.MaxPool1d(2)
self.flat1 = nn.Flatten()
self.fc1 = nn.Linear(64*99, 100)
self.fc2 = nn.Linear(100, 1)
def forward(self, sentence):
embedding = self.word_embeddings(sentence).permute(0, 2, 1)
conv1 = F.relu(self.conv1(embedding))
drop1 = self.drop1(conv1)
max_pool1 = self.max_pool1(drop1)
flat1 = self.flat1(max_pool1)
fc1 = F.relu(self.fc1(flat1))
fc2 = torch.sigmoid(self.fc2(fc1))
return fc2
net = Net(vocab_size, EMBEDDING_SIZE)
EPOCHS = 10
net.cuda()
criterion = nn.BCELoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)
loader = DataLoader(train, batch_size=32)
net.train()
for epoch in range(EPOCHS):
progress = tqdm_notebook(loader, leave=False)
for inputs, target in progress:
net.zero_grad()
output = net(inputs.to(device))
loss = criterion(output, target.to(device))
loss.backward()
optimizer.step()
print(loss)
and I get the following error (the error trace has been updated and it includes the complete trace):
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/loss.py:498: UserWarning: Using a target size (torch.Size([32])) that is different to the input size (torch.Size([32, 1])) is deprecated. Please ensure they have the same size.
return F.binary_cross_entropy(input, target, weight=self.weight, reduction=self.reduction)
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-2-3c8a885417ba> in <module>()
33 for inputs, target in progress:
34 net.zero_grad()
---> 35 output = net(inputs.to(device))
36 loss = criterion(output, target.to(device))
37 loss.backward()
5 frames
/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in _max_pool1d(input, kernel_size, stride, padding, dilation, ceil_mode, return_indices)
455 stride = torch.jit.annotate(List[int], [])
456 return torch.max_pool1d(
--> 457 input, kernel_size, stride, padding, dilation, ceil_mode)
458
459 max_pool1d = boolean_dispatch(
RuntimeError: max_pool2d_with_indices_out_cuda_frame failed with error code 0
I do not have any Maxpool2ds in my code! Could anybody help me with this problem?

Resources