ValueError: Target size (torch.Size([8])) must be the same as input size (torch.Size([8, 2])) - python-3.x

I'm trying to implement a code for sentiment analysis( positive or negative labels) using BERT and i want to add a BiLSTM layer to see if I can increase the accuracy of the pretrained model from HuggingFace. I have the below code and a few questions :
import numpy as np
import pandas as pd
from sklearn import metrics
import transformers
import torch
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
from transformers import BertTokenizer, BertModel, BertConfig
from torch import cuda
import re
import torch.nn as nn
device = 'cuda' if cuda.is_available() else 'cpu'
MAX_LEN = 200
TRAIN_BATCH_SIZE = 8
VALID_BATCH_SIZE = 4
EPOCHS = 1
LEARNING_RATE = 1e-05 #5e-5, 3e-5 or 2e-5
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
class CustomDataset(Dataset):
def __init__(self, dataframe, tokenizer, max_len):
self.tokenizer = tokenizer
self.data = dataframe
self.comment_text = dataframe.review
self.targets = self.data.sentiment
self.max_len = max_len
def __len__(self):
return len(self.comment_text)
def __getitem__(self, index):
comment_text = str(self.comment_text[index])
comment_text = " ".join(comment_text.split())
inputs = self.tokenizer.encode_plus(comment_text,None,add_special_tokens=True,max_length=self.max_len,
pad_to_max_length=True,return_token_type_ids=True)
ids = inputs['input_ids']
mask = inputs['attention_mask']
token_type_ids = inputs["token_type_ids"]
return {
'ids': torch.tensor(ids, dtype=torch.long),
'mask': torch.tensor(mask, dtype=torch.long),
'token_type_ids': torch.tensor(token_type_ids, dtype=torch.long),
'targets': torch.tensor(self.targets[index], dtype=torch.float)
}
train_size = 0.8
train_dataset=df.sample(frac=train_size,random_state=200)
test_dataset=df.drop(train_dataset.index).reset_index(drop=True)
train_dataset = train_dataset.reset_index(drop=True)
print("FULL Dataset: {}".format(df.shape))
print("TRAIN Dataset: {}".format(train_dataset.shape))
print("TEST Dataset: {}".format(test_dataset.shape))
training_set = CustomDataset(train_dataset, tokenizer, MAX_LEN)
testing_set = CustomDataset(test_dataset, tokenizer, MAX_LEN)
train_params = {'batch_size': TRAIN_BATCH_SIZE,'shuffle': True,'num_workers': 0}
test_params = {'batch_size': VALID_BATCH_SIZE,'shuffle': True,'num_workers': 0}
training_loader = DataLoader(training_set, **train_params)
testing_loader = DataLoader(testing_set, **test_params)
class BERTClass(torch.nn.Module):
def __init__(self):
super(BERTClass, self).__init__()
self.bert = BertModel.from_pretrained('bert-base-uncased',return_dict=False, num_labels =2)
self.lstm = nn.LSTM(768, 256, batch_first=True, bidirectional=True)
self.linear = nn.Linear(256*2,2)
def forward(self, ids , mask,token_type_ids):
sequence_output, pooled_output = self.bert(ids, attention_mask=mask, token_type_ids = token_type_ids)
lstm_output, (h, c) = self.lstm(sequence_output) ## extract the 1st token's embeddings
hidden = torch.cat((lstm_output[:, -1, :256], lstm_output[:, 0, 256:]), dim=-1)
linear_output = self.linear(lstm_output[:, -1].view(-1, 256 * 2))
return linear_output
model = BERTClass()
model.to(device)
print(model)
def loss_fn(outputs, targets):
return torch.nn.BCEWithLogitsLoss()(outputs, targets)
optimizer = torch.optim.Adam(params = model.parameters(), lr=LEARNING_RATE)
def train(epoch):
model.train()
for _, data in enumerate(training_loader, 0):
ids = data['ids'].to(device, dtype=torch.long)
mask = data['mask'].to(device, dtype=torch.long)
token_type_ids = data['token_type_ids'].to(device, dtype=torch.long)
targets = data['targets'].to(device, dtype=torch.float)
outputs = model(ids, mask, token_type_ids)
optimizer.zero_grad()
loss = loss_fn(outputs, targets)
if _ % 5000 == 0:
print(f'Epoch: {epoch}, Loss: {loss.item()}')
optimizer.zero_grad()
loss.backward()
optimizer.step()
for epoch in range(EPOCHS):
train(epoch)
So on the above code I ran into the error : Target size (torch.Size([8])) must be the same as input size (torch.Size([8, 2])) . Checked online and tried to use targets = targets.unsqueeze(2) but then I get another error that I must use values from [-2,1] for unsqueeze. I also tried to modify the loss function to
def loss_fn(outputs, targets):
return torch.nn.BCELoss()(outputs, targets)
but I still receive the same error. Can someone advise if there is a solution to this problem? Or what can I do to make this work fine? Many thanks in advance.

Related

How do I use a pt file in Pytorch to predict the label of a new data?

This is my training model run.py, my data is a one-dimensional matrix with one row and one category.
import numpy as np # linear algebra
import pandas as pd
import os
for dirname, _, filenames in os.walk('./kaggle'):
for filename in filenames:
print(os.path.join(dirname, filename))
import torch
from torch.utils.data import DataLoader
from torch import nn,optim
import sys
from tqdm import tqdm
import io
import torch.utils.model_zoo as model_zoo
import torch.onnx
def my_DataLoader(train_root,test_root,batch_size = 100, val_split_factor = 0.2):
train_df = pd.read_csv(train_root, header=None)
test_df = pd.read_csv(test_root, header=None)
train_data = train_df.to_numpy()
test_data = test_df.to_numpy()
train_dataset = torch.utils.data.TensorDataset(torch.from_numpy(train_data[:, :-1]).float(),
torch.from_numpy(train_data[:, -1]).long(),)#
test_dataset = torch.utils.data.TensorDataset(torch.from_numpy(test_data[:, :-1]).float(),
torch.from_numpy(test_data[:, -1]).long())
train_len = train_data.shape[0]
val_len = int(train_len * val_split_factor)
train_len -= val_len
train_dataset, val_dataset = torch.utils.data.random_split(train_dataset, [train_len, val_len])
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)
return train_loader, val_loader, test_loader
class conv_net(nn.Module):
def __init__(self, num_of_class):
super(conv_net, self).__init__()
self.model = nn.Sequential(
#nn.Conv1d(1, 16, kernel_size=5, stride=1, padding=2),
#nn.Conv1d(1, 16, kernel_size=1, stride=1),
nn.Conv1d(1, 16, kernel_size=1, stride=1),
nn.BatchNorm1d(16),
nn.ReLU(),
nn.MaxPool1d(2),
nn.Conv1d(16, 64, kernel_size=5, stride=1, padding=2),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.MaxPool1d(2),
)
#self.relu = nn.ReLU()
self.linear = nn.Sequential(
#nn.Linear(5120,32),
nn.Linear(5120,32),
nn.LeakyReLU(inplace=True),
nn.Linear(32, num_of_class),
)
def forward(self,x):
#org = x
x = x.unsqueeze(1)
x = self.model(x)
#x = self.relu(x)
# print(x.shape)
x = x.view(x.size(0), -1)
#x [b, 2944]
# print(x.shape)
x = self.linear(x)
return x
batch_size=32
lr = 3e-3
epochs = 150
torch.manual_seed(1234)
#device = torch.device("cpu:0 cuda:0" if torch.cuda.is_available() else "cpu")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("using {} device.".format(device))
def evalute(model, loader):
model.eval()
correct = 0
total = len(loader.dataset)
val_bar = tqdm(loader, file=sys.stdout)
for x, y in val_bar:
x, y = x.to(device), y.to(device)
with torch.no_grad():
logits = model(x)
pred = logits.argmax(dim=1)
correct += torch.eq(pred, y).sum().float().item()
return correct / total
def main():
train_loader, val_loader, test_loader = my_DataLoader('./kaggle/train.csv',
'./kaggle/test.csv',
batch_size=batch_size,
val_split_factor=0.2)
model = conv_net(8).to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)
criteon = nn.CrossEntropyLoss()
# Print model's state_dict
print(model)
best_acc, best_epoch = 0, 0
global_step = 0
for epoch in range(epochs):
train_bar = tqdm(train_loader, file=sys.stdout)
for step, (x, y) in enumerate(train_bar):
# x: [b, 187], y: [b]
x, y = x.to(device), y.to(device)
model.train()
logits = model(x)
loss = criteon(logits, y)
optimizer.zero_grad()
loss.backward()
# for param in model.parameters():
# print(param.grad)
optimizer.step()
train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(epoch + 1,
epochs,
loss)
global_step += 1
if epoch % 1 == 0: # You can change the validation frequency as you wish
val_acc = evalute(model, val_loader)
print('val_acc = ',val_acc)
if val_acc > best_acc:
best_epoch = epoch
best_acc = val_acc
# Export the model
name_pt = 'best3.pt'
torch.save(model.state_dict(), name_pt)
print('best acc:', best_acc, 'best epoch:', best_epoch)
model.load_state_dict(torch.load(name_pt))
print('loaded from ckpt!')
test_acc = evalute(model, test_loader)
print('test acc:', test_acc)
if __name__ == '__main__':
main()
Then I try to make predictions and modify with reference to other people's code
import torch
from torchvision.transforms import transforms
import pandas as pd
from PIL import Image
from run import conv_net
from pathlib import Path
name_pt = 'best3.pt'
model = conv_net(8)
checkpoint = torch.load(name_pt)
model.load_state_dict(checkpoint)
testdata = './kaggle/onedata.csv'
test_df = pd.read_csv(testdata, header=None)
test_data = test_df.to_numpy()
csv = torch.utils.data.TensorDataset(torch.from_numpy(test_data[:, :]).float())
output = model(csv)
prediction = int(torch.max(output.data, 1)[1].numpy())
print(prediction)
if (prediction == 0):
print ('other')
if (prediction == 1):
print ('100%PET')
if (prediction == 2):
print ('100% Cotton')
if (prediction == 3):
print ('100% Nylon')
if (prediction == 4):
print ('>70% PET')
if (prediction == 5):
print ('<70% PET')
if (prediction == 6):
print ('Spandex/PET Spandex<5%')
if (prediction == 7):
print ('Spandex/PET Spandex>5%')
Something went wrong
File "C:\Users\54-0461100-01\Desktop\for_spec_train\run.py", line 70, in forward
x = x.unsqueeze(1)
AttributeError: 'TensorDataset' object has no attribute 'unsqueeze'
Most of the questions are for images, not found on CSV files.Any help is appreciated if you have any suggestions.
By the way this is my data format.
LJ column are labels,train and test set are same format
enter image description here
onedata format
enter image description here
When calling output = model(csv) you are passing the model a 'TensorDataset' object as the input instead of a tensor. You can access the tensors in this object by indexing it. https://pytorch.org/docs/stable/_modules/torch/utils/data/dataset.html#TensorDataset
Additionally, you can avoid the TensorDataset object all together by replacing
csv = torch.utils.data.TensorDataset(torch.from_numpy(test_data[:, :]).float())
with
csv = torch.from_numpy(test_data[:, :]).float()

Issues calculating accuracy for custom BERT model

I'm having some issues trying to calculate the accuracy of a custom BERT model which also uses the pretrained model from Huggingface. This is the code that I have :
import numpy as np
import pandas as pd
from sklearn import metrics, linear_model
import torch
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
from transformers import BertTokenizer, BertModel
from torch import cuda
import re
import torch.nn as nn
device = 'cuda' if cuda.is_available() else 'cpu'
MAX_LEN = 200
TRAIN_BATCH_SIZE = 8 # 12, 64
VALID_BATCH_SIZE = 4
EPOCHS = 1
LEARNING_RATE = 1e-4 #3e-4, 1e-4, 5e-5, 3e-5
tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-uncased')
file1 = open('test.txt', 'r')
list_com = []
list_label = []
for line in file1:
possible_labels = 'positive|negative'
label = re.findall(possible_labels, line)
line = re.sub(possible_labels, ' ', line)
line = re.sub('\n', ' ', line)
list_com.append(line)
list_label.append(label[0])
list_tuples = list(zip(list_com, list_label))
file1.close()
labels = ['positive', 'negative']
df = pd.DataFrame(list_tuples, columns=['review', 'sentiment'])
df['sentiment'] = df['sentiment'].map({'positive': 1, 'negative': 0})
for i in range(0,len(df['sentiment'])):
list_label[i] = df['sentiment'][i]
#print(df)
class CustomDataset(Dataset):
def __init__(self, dataframe, tokenizer, max_len):
self.tokenizer = tokenizer
self.data = dataframe
self.comment_text = dataframe.review
self.targets = self.data.sentiment
self.max_len = max_len
def __len__(self):
return len(self.comment_text)
def __getitem__(self, index):
comment_text = str(self.comment_text[index])
comment_text = " ".join(comment_text.split())
inputs = self.tokenizer.encode_plus(comment_text,None,add_special_tokens=True,max_length=self.max_len,
pad_to_max_length=True,return_token_type_ids=False,truncation=True)
ids = inputs['input_ids']
mask = inputs['attention_mask']
return {
'ids': torch.tensor(ids, dtype=torch.long),
'mask': torch.tensor(mask, dtype=torch.long),
'targets': torch.tensor(self.targets[index], dtype=torch.float)
}
train_size = 0.8
train_dataset=df.sample(frac=train_size,random_state=200)
test_dataset=df.drop(train_dataset.index).reset_index(drop=True)
train_dataset = train_dataset.reset_index(drop=True)
print("FULL Dataset: {}".format(df.shape))
print("TRAIN Dataset: {}".format(train_dataset.shape))
print("TEST Dataset: {}".format(test_dataset.shape))
training_set = CustomDataset(train_dataset, tokenizer, MAX_LEN)
testing_set = CustomDataset(test_dataset, tokenizer, MAX_LEN)
train_params = {'batch_size': TRAIN_BATCH_SIZE,'shuffle': True,'num_workers': 0}
test_params = {'batch_size': VALID_BATCH_SIZE,'shuffle': True,'num_workers': 0}
training_loader = DataLoader(training_set, **train_params)
testing_loader = DataLoader(testing_set, **test_params)
class BERTClass(torch.nn.Module):
def __init__(self):
super(BERTClass, self).__init__()
self.bert = BertModel.from_pretrained('bert-base-multilingual-uncased',return_dict=False,num_labels = 2)
self.lstm = nn.LSTM(768, 256, batch_first=True, bidirectional=True)
self.linear = nn.Linear(256*2,2)
def forward(self, ids , mask):
sequence_output, pooled_output = self.bert(ids, attention_mask=mask )
lstm_output, (h, c) = self.lstm(sequence_output) ## extract the 1st token's embeddings
hidden = torch.cat((lstm_output[:, -1, :256], lstm_output[:, 0, 256:]), dim=-1)
linear_output = self.linear(lstm_output[:, -1].view(-1, 256 * 2))
return linear_output
model = BERTClass()
model.to(device)
#print(model)
def loss_fn(outputs, targets):
return torch.nn.CrossEntropyLoss()(outputs, targets)
optimizer = torch.optim.Adam(params = model.parameters(), lr=LEARNING_RATE)
def train(epoch):
model.train()
for _, data in enumerate(training_loader, 0):
ids = data['ids'].to(device, dtype=torch.long)
mask = data['mask'].to(device, dtype=torch.long)
targets = data['targets'].to(device, dtype=torch.long)
outputs = model(ids, mask)
optimizer.zero_grad()
loss = loss_fn(outputs, targets)
if _ % 1000 == 0:
print(f'Epoch: {epoch}, Loss: {loss.item()}')
optimizer.zero_grad()
loss.backward()
optimizer.step()
for epoch in range(EPOCHS):
train(epoch)
def validation(epoch):
model.eval()
fin_targets = []
fin_outputs = []
with torch.no_grad():
for _, data in enumerate(testing_loader, 0):
ids = data['ids'].to(device, dtype=torch.long)
mask = data['mask'].to(device, dtype=torch.long)
targets = data['targets'].to(device, dtype=torch.float)
outputs = model(ids, mask)
fin_targets.extend(targets.cpu().detach().numpy().tolist())
fin_outputs.extend(torch.sigmoid(outputs).cpu().detach().numpy().tolist())
return fin_outputs, fin_targets
for epoch in range(EPOCHS):
outputs, targets = validation(epoch)
outputs = np.array(outputs) >= 0.5
accuracy = metrics.accuracy_score(targets, outputs)
print(f"Accuracy Score = {accuracy}")
torch.save(model.state_dict, 'model.pt')
print(f'Model saved!')
It should be a binary classification, positive(1) or negative(0), but when i try to compute the accuracy i get the error ValueError: Classification metrics can't handle a mix of binary and multilabel-indicator targets oh this line accuracy = metrics.accuracy_score(targets, outputs) .The outputs look like this:
[[ True False]
[False False]
[ True False]
[ True False]
[ True False]
[ True False]
[ True False]
[False True]
[ True False]
[ True False]
[False True]]
Can someone advise what would be the fix to this? Or if there something else that can improve this? Also, I saved the model and I want to know how can I use the saved model to classify user input in another .py file?(assuming that we enter a sentence from keyboard and we want the model to classify it).

While running pytorch i got an error 'TypeError: object of type'CatsAndDogsDataset' has no len()' and I want to know how to fix it

I got an error while running pytorch
I train artificial intelligence with ResNet, and I wrote my own custom dataset for the dataset. After loading the data set from ResnNet, training data and test data were set separately by learning with artificial intelligence. But even though I ran it, an error occurred, but I don't know what kind of problem occurred.
Next, I will attach my ResNet.py code and my own data set CustomDataset.py code.
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchvision import transforms, datasets, models
from customDataset import CatsAndDogsDataset
USE_CUDA = torch.cuda.is_available()
DEVICE = torch.device("cuda" if USE_CUDA else "cpu")
EPOCHS = 3
BATCH_SIZE = 10
dataset = CatsAndDogsDataset(csv_file = 'cats_dogs.csv', root_dir = 'cats_dogs_resized',transform = transforms.ToTensor())
train_set, test_set = torch.utils.data.random_split(dataset, [28,4])
train_loader = DataLoader(dataset=train_set, batch_size=BATCH_SIZE, shuffle=True)
test_loader = DataLoader(dataset=test_set, batch_size=BATCH_SIZE, shuffle=True)
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3,stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, planes,kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3,stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(16, 2, stride=1)
self.layer2 = self._make_layer(32, 2, stride=2)
self.layer3 = self._make_layer(64, 2, stride=2)
self.linear = nn.Linear(64, num_classes)
def _make_layer(self, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(BasicBlock(self.in_planes, planes, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
model = ResNet().to(DEVICE)
optimizer = optim.SGD(model.parameters(), lr=0.1,momentum=0.9, weight_decay=0.0005)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1)
print(model)
def train(model, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(DEVICE), target.to(DEVICE)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
def evaluate(model, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(DEVICE), target.to(DEVICE)
output = model(data)
test_loss += F.cross_entropy(output, target,reduction='sum').item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= length(test_loader.dataset)
test_accuracy = 100. * correct / length(test_loader.dataset)
return test_loss, test_accuracy
for epoch in range(1, EPOCHS + 1):
scheduler.step()
train(model, train_loader, optimizer, epoch)
test_loss, test_accuracy = evaluate(model, test_loader)
print('[{}] Test Loss: {:.4f}, Accuracy: {:.2f}%'.format(
epoch, test_loss, test_accuracy))
import os
import pandas as pd
import torch
from torch.utils.data import Dataset
from skimage import io
class CatsAndDogsDataset(Dataset):
def __init__(self, csv_file, root_dir, transform=None):
self.annotations = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transform
def __length__(self):
return length(self.annotations)
def __getitem__(self, index):
img_path = os.path.join(self.root_dir, self.annotations.iloc[index, 0])
image = io.imread(img_path)
y_label = torch.tensor(int(self.annotations.iloc[index, 1]))
if self.transform:
image = self.transform(image)
return (image, ylabel)
In this way, if I write the code and then run it
TypeError: object of type'CatsAndDogsDataset' has no len()
I wonder why I can't have len(). In addition, an error occurred in the result of running Backend.ai instead of pycharm, but the error content is
Cannot verify that dataset is Sized
if sum(lengths) != len(dataset):
It is raise ValueError("sum of input lengths does not equal the length of the input dataset!").
Error appears. Is there a workaround? help me plz
You need to define the function __len__ for your custom dataset (which you seem to have currently incorrectly defined as __length__).
This documentation provides details. Relevant excerpt:
torch.utils.data.Dataset is an abstract class representing a dataset.
Your custom dataset should inherit Dataset and override the following
methods:
__len__ so that len(dataset) returns the size of the dataset.
__getitem__ to support the indexing such that dataset[i] can be used to get i th sample.

TF 2.0 Error: Gradients does not exist for variables during training using gradienttape

I tried to make a class using batchnormalization layer from tf 2.0, however it gave me an error that Gradients does not exist for variables. I tried to use batchnormalization directly but it gave me the same error as well. it seems like it is not traing the variable related to the batchnormalization step.
I tried to use model.trainable_variables instead of model.variables but it didn't work either.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.utils import to_categorical
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy import ndimage
learning_rate = 0.001
training_epochs = 15
batch_size = 100
tf.random.set_seed(777)
cur_dir = os.getcwd()
ckpt_dir_name = 'checkpoints'
model_dir_name = 'minst_cnn_best'
checkpoint_dir = os.path.join(cur_dir, ckpt_dir_name, model_dir_name)
os.makedirs(checkpoint_dir, exist_ok=True)
checkpoint_prefix = os.path.join(checkpoint_dir, model_dir_name)
mnist = tf.keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.astype(np.float32) /255.
test_images = test_images.astype(np.float32) /255.
print(train_images.shape, test_images.shape)
train_images = np.expand_dims(train_images, axis = -1)
test_images = np.expand_dims(test_images, axis = -1)
print(train_images.shape, test_images.shape)
train_labels = to_categorical(train_labels, 10)
test_labels = to_categorical(test_labels, 10)
train_dataset = tf.data.Dataset.from_tensor_slices((train_images,
train_labels)).shuffle(buffer_size = 100000).batch(batch_size)
test_dataset = tf.data.Dataset.from_tensor_slices((test_images,
test_labels)).batch(batch_size)
class ConvBNRelu(tf.keras.Model):
def __init__(self, filters, kernel_size=3, strides=1, padding='SAME'):
super(ConvBNRelu, self).__init__()
self.conv = keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=strides,
padding=padding, kernel_initializer='glorot_normal')
self.batchnorm = tf.keras.layers.BatchNormalization()
def call(self, inputs, training=False):
layer = self.conv(inputs)
layer = self.batchnorm(layer)
layer = tf.nn.relu(layer)
return layer
class DenseBNRelu(tf.keras.Model):
def __init__(self, units):
super(DenseBNRelu, self).__init__()
self.dense = keras.layers.Dense(units=units, kernel_initializer='glorot_normal')
self.batchnorm = tf.keras.layers.BatchNormalization()
def call(self, inputs, training=False):
layer = self.dense(inputs)
layer = self.batchnorm(layer)
layer = tf.nn.relu(layer)
return layer
class MNISTModel(tf.keras.Model):
def __init__(self):
super(MNISTModel, self).__init__()
self.conv1 = ConvBNRelu(filters=32, kernel_size=[3, 3], padding='SAME')
self.pool1 = keras.layers.MaxPool2D(padding='SAME')
self.conv2 = ConvBNRelu(filters=64, kernel_size=[3, 3], padding='SAME')
self.pool2 = keras.layers.MaxPool2D(padding='SAME')
self.conv3 = ConvBNRelu(filters=128, kernel_size=[3, 3], padding='SAME')
self.pool3 = keras.layers.MaxPool2D(padding='SAME')
self.pool3_flat = keras.layers.Flatten()
self.dense4 = DenseBNRelu(units=256)
self.drop4 = keras.layers.Dropout(rate=0.4)
self.dense5 = keras.layers.Dense(units=10, kernel_initializer='glorot_normal')
def call(self, inputs, training=False):
net = self.conv1(inputs)
net = self.pool1(net)
net = self.conv2(net)
net = self.pool2(net)
net = self.conv3(net)
net = self.pool3(net)
net = self.pool3_flat(net)
net = self.dense4(net)
net = self.drop4(net)
net = self.dense5(net)
return net
models = []
num_models = 5
for m in range(num_models):
models.append(MNISTModel())
def loss_fn(model, images, labels):
logits = model(images, training=True)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,
labels=labels))
return loss
def grad(model, images, labels):
with tf.GradientTape() as tape:
loss = loss_fn(model, images, labels)
return tape.gradient(loss, model.variables)
def evaluate(models, images, labels):
predictions = np.zeros_like(labels)
for model in models:
logits = model(images, training=False)
predictions += logits
correct_prediction = tf.equal(tf.argmax(predictions, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return accuracy
optimizer = keras.optimizers.Adam(learning_rate = learning_rate)
checkpoints = []
for m in range(num_models):
checkpoints.append(tf.train.Checkpoint(cnn=models[m]))
for epoch in range(training_epochs):
avg_loss = 0.
avg_train_acc = 0.
avg_test_acc = 0.
train_step = 0
test_step = 0
for images, labels in train_dataset:
for model in models:
grads = grad(model, images, labels)
optimizer.apply_gradients(zip(grads, model.variables))
loss = loss_fn(model, images, labels)
avg_loss += loss / num_models
acc = evaluate(models, images, labels)
avg_train_acc += acc
train_step += 1
avg_loss = avg_loss / train_step
avg_train_acc = avg_train_acc / train_step
for images, labels in test_dataset:
acc = evaluate(models, images, labels)
avg_test_acc += acc
test_step += 1
avg_test_acc = avg_test_acc / test_step
print('Epoch:', '{}'.format(epoch + 1), 'loss =', '{:.8f}'.format(avg_loss),
'train accuracy = ', '{:.4f}'.format(avg_train_acc),
'test accuracy = ', '{:.4f}'.format(avg_test_acc))
for idx, checkpoint in enumerate(checkpoints):
checkpoint.save(file_prefix=checkpoint_prefix+'-{}'.format(idx))
print('Learning Finished!')
W0727 20:27:05.344142 140332288718656 optimizer_v2.py:982] Gradients does not exist for variables ['mnist_model/conv_bn_relu/batch_normalization/moving_mean:0', 'mnist_model/conv_bn_relu/batch_normalization/moving_variance:0', 'mnist_model/conv_bn_relu_1/batch_normalization_1/moving_mean:0', 'mnist_model/conv_bn_relu_1/batch_normalization_1/moving_variance:0', 'mnist_model/conv_bn_relu_2/batch_normalization_2/moving_mean:0', 'mnist_model/conv_bn_relu_2/batch_normalization_2/moving_variance:0', 'mnist_model/dense_bn_relu/batch_normalization_3/moving_mean:0', 'mnist_model/dense_bn_relu/batch_normalization_3/moving_variance:0'] when minimizing the loss.
W0727 20:27:05.407717 140332288718656 deprecation.py:323] From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/optimizer_v2/optimizer_v2.py:460: BaseResourceVariable.constraint (from tensorflow.python.ops.resource_variable_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Apply a constraint manually following the optimizer update step.
W0727 20:27:05.499249 140332288718656 optimizer_v2.py:982] Gradients does not exist for variables ['mnist_model_1/conv_bn_relu_3/batch_normalization_4/moving_mean:0', 'mnist_model_1/conv_bn_relu_3/batch_normalization_4/moving_variance:0', 'mnist_model_1/conv_bn_relu_4/batch_normalization_5/moving_mean:0', 'mnist_model_1/conv_bn_relu_4/batch_normalization_5/moving_variance:0', 'mnist_model_1/conv_bn_relu_5/batch_normalization_6/moving_mean:0', 'mnist_model_1/conv_bn_relu_5/batch_normalization_6/moving_variance:0', 'mnist_model_1/dense_bn_relu_1/batch_normalization_7/moving_mean:0', 'mnist_model_1/dense_bn_relu_1/batch_normalization_7/moving_variance:0'] when minimizing the loss.
...
You're computing the gradient of the loss with respect to the model.variables: this collection contains not only the trainable variables (the model weights) but also the non-trainable variables like the moving mean and variance computed by the batch normalization layer.
You have to compute the gradient with respect to the trainable_variables. In short change the lines
return tape.gradient(loss, model.variables)
and
optimizer.apply_gradients(zip(grads, model.variables))
to
return tape.gradient(loss, model.trainable_variables)
and
optimizer.apply_gradients(zip(grads, model.trainable_variables))

Simple Data recall RNN in Pytorch

I am learning Pytorch and am trying to make a network that can remember previous inputs.
I have tried 2 different input/output structures(see below) but haven't gotten anything to work the way I would like.
input 1:
in:[4,2,7,8]
output [[0,0,4],[0,4,2],[4,2,7],[2,7,8]]
code:
def histroy(num_samples=4,look_back=3):
data=np.random.randint(10,size=(num_samples)).tolist()
lab=[[0]*look_back]
for i in data:
lab.append(lab[-1][1:]+[i])
return data,lab[1:]
input2:
in:[4,2,7,8]
out:[0,4,2,7]
def histroy(num_samples=4):
data=np.random.randint(10,size=(num_samples)).tolist()
lab=[0]
for i in data:
lab.append(i)
return data,lab
I have tried a number of different network structures and training methods but nothing seems to stick.
The only things I think I have right are net.hidden = net.init_hidden()
should go outside of each epoch and loss.backward(retain_graph=True) but that doesnt seem to do anything
Currently, it can learn the last number in the sequence but never seems to learn any of the others
My last attempt:
import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
def histroy(num_samples=4,look_back=3):
data=np.random.randint(10,size=(num_samples)).tolist()
lab=[[0]*look_back]
for i in data:
lab.append(lab[-1][1:]+[i])
return data,lab[1:]
class Net(nn.Module):
def __init__(self, input_dim, hidden_dim, batch_size, output_dim=10, num_layers=1):
super(Net, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.batch_size = batch_size
self.num_layers = num_layers
self.memory = nn.RNN(self.input_dim,self.hidden_dim,self.num_layers)
self.linear = nn.Linear(self.hidden_dim, output_dim)
self.first=True
def init_hidden(self):
# This is what we'll initialise our hidden state as
return (torch.zeros(self.num_layers, self.batch_size, self.hidden_dim),
torch.zeros(self.num_layers, self.batch_size, self.hidden_dim))
def forward(self, input):
self.memory_out, self.hidden = self.memory(input.view(len(input), self.batch_size, -1))
y_pred = self.linear(self.memory_out[-1].view(self.batch_size, -1))
return y_pred.view(-1)
if __name__ == '__main__':
data_amount = 10000
batch_size = 1 # default is 32
data_amount-=data_amount%batch_size
number_of_times_on_the_same_data = 250
look_back=5
net=Net(input_dim=1,hidden_dim=25,batch_size=batch_size,output_dim=look_back)
data,labs=histroy(data_amount,look_back)
data = torch.Tensor(data).float()
labs = torch.Tensor(labs).float()
optimizer = optim.Adam(net.parameters())
criterion = torch.nn.MSELoss(size_average=False)
for epoch in range(number_of_times_on_the_same_data): # loop over the dataset multiple times
running_loss = 0.0
data, labs = histroy(data_amount, look_back)
data = torch.Tensor(data).float()
labs = torch.Tensor(labs).float()
net.hidden = net.init_hidden()
print("epoch",epoch)
for i in range(0, data_amount, batch_size):
inputs = data[i:i + batch_size]
labels = labs[i:i + batch_size]
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward(retain_graph=True)
optimizer.step()
running_loss += loss.item()
if i >= data_amount-batch_size:
print("loss",loss)
net.hidden = net.init_hidden()
print("Outputs",outputs)
print("Input", data[-1*look_back:])
print("labels",labels)
The problem that your network presents, it's the fact that your input is of shape 1:
for i in range(0, data_amount, batch_size):
inputs = data[i:i + batch_size]
labels = labs[i:i + batch_size]
print(inputs.shape,labels.shape)
>>>torch.Size([1]) torch.Size([1, 5])
>>>torch.Size([1]) torch.Size([1, 5])...
That's the reason your RNN is predicting only your last number, because in this case you're not using your look_back attribute. You have to fix your code in order to have inputs of size [1,5]. Your code should look something like this:
import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
def histroy(num_samples=4,look_back=3):
data=np.random.randint(10,size=(num_samples)).tolist()
lab=[[0]*look_back]
for i in data:
lab.append(lab[-1][1:]+[i])
return lab[:-1],lab[1:]
class Net(nn.Module):
def __init__(self, input_dim, hidden_dim, batch_size, output_dim=10, num_layers=1):
super(Net, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.batch_size = batch_size
self.num_layers = num_layers
self.memory = nn.RNN(self.input_dim,self.hidden_dim,self.num_layers)
self.linear = nn.Linear(self.hidden_dim, output_dim)
self.first=True
def init_hidden(self):
# This is what we'll initialise our hidden state as
return (torch.zeros(self.num_layers, self.batch_size, self.hidden_dim),
torch.zeros(self.num_layers, self.batch_size, self.hidden_dim))
def forward(self, input):
self.memory_out, self.hidden = self.memory(input.view(len(input), self.batch_size, -1))
y_pred = self.linear(self.memory_out[-1].view(self.batch_size, -1))
return y_pred.view(-1)
if __name__ == '__main__':
data_amount = 10000
batch_size = 1 # default is 32
data_amount-=data_amount%batch_size
number_of_times_on_the_same_data = 250
look_back=5
net=Net(input_dim=1,hidden_dim=25,batch_size=batch_size,output_dim=look_back)
data,labs=histroy(data_amount,look_back)
data = torch.Tensor(data).float()
labs = torch.Tensor(labs).float()
optimizer = optim.Adam(net.parameters())
criterion = torch.nn.MSELoss(size_average=False)
for epoch in range(number_of_times_on_the_same_data): # loop over the dataset multiple times
running_loss = 0.0
data, labs = histroy(data_amount, look_back)
data = torch.Tensor(data).float()
labs = torch.Tensor(labs).float()
net.hidden = net.init_hidden()
print("epoch",epoch)
for i in range(0, data_amount, batch_size):
inputs = data[i:i + batch_size].view(-1)
labels = labs[i:i + batch_size]
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward(retain_graph=True)
optimizer.step()
running_loss += loss.item()
if i >= data_amount-batch_size:
print("loss",loss)
net.hidden = net.init_hidden()
print("Outputs",outputs)
print("Input", data[i:i + batch_size][-1])
print("labels",labels)
Output:
>>>epoch 0
>>>loss tensor(17.7415, grad_fn=<MseLossBackward>)
>>>Outputs tensor([2.0897, 3.1410, 4.7382, 1.0532, 4.2003], grad_fn=<ViewBackward>)
>>>Input tensor([8., 2., 3., 5., 1.])
>>>labels tensor([[2., 3., 5., 1., 0.]])...

Resources