How do you test a custom dataset in Pytorch? - pytorch

I've been following tutorials in Pytorch that use datasets from Pytorch that allow you to enable whether you'd like to train using the data or not... But now I'm using a .csv and a custom dataset.
class MyDataset(Dataset):
def __init__(self, root, n_inp):
self.df = pd.read_csv(root)
self.data = self.df.to_numpy()
self.x , self.y = (torch.from_numpy(self.data[:,:n_inp]),
torch.from_numpy(self.data[:,n_inp:]))
def __getitem__(self, idx):
return self.x[idx, :], self.y[idx,:]
def __len__(self):
return len(self.data)
How can I tell Pytorch not to train my test_dataset so I can use it as a reference of how accurate my model is?
train_dataset = MyDataset("heart.csv", input_size)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle =True)
test_dataset = MyDataset("heart.csv", input_size)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle =True)

In pytorch, a custom dataset inherits the class Dataset. Mainly it contains two methods __len__() is to specify the length of your dataset object to iterate over and __getitem__() to return a batch of data at a time.
Once the dataloader objects are initialized (train_loader and test_loader as specified in your code), you need to write a train loop and a test loop.
def train(model, optimizer, loss_fn, dataloader):
model.train()
for i, (input, gt) in enumerate(dataloader):
if params.use_gpu: #(If training using GPU)
input, gt = input.cuda(non_blocking = True), gt.cuda(non_blocking = True)
predicted = model(input)
loss = loss_fn(predicted, gt)
optimizer.zero_grad()
loss.backward()
optimizer.step()
and your test loop should be:
def test(model,loss_fn, dataloader):
model.eval()
for i, (input, gt) in enumerate(dataloader):
if params.use_gpu: #(If training using GPU)
input, gt = input.cuda(non_blocking = True), gt.cuda(non_blocking = True)
predicted = model(input)
loss = loss_fn(predicted, gt)
In additional you can use metrics dictionary to log your predicted, loss, epochs etc,. The main difference between training and test loop is that we exclude back propagation (zero_grad(), backward(), step()) in inference stage.
Finally,
for epoch in range(1, epochs + 1):
train(model, optimizer, loss_fn, train_loader)
test(model, loss_fn, test_loader)

There are a couple of things to note when you're testing in pytorch:
Put your model into evaluation mode so that things like dropout and batch normalization aren't in training mode: model.eval()
Put a wrapper around your testing code to avoid the computation of gradients (saving memory and time): with torch.no_grad():
Normalise or standardise your data according to your training set only. This is important for min/max normalisation or z-score standardisation so that the model accurately reflects test performance.
Other than that, what you've written looks pretty fine to me, as you're not applying any transforms to your data (for example, image flipping or gaussian noise injections). To show what code should look like in test mode, see below:
for e in range(num_epochs):
for B, (dat, label) in enumerate(train_loader):
#transforms here
opt.zero_grad()
out = model(dat.to(device))
loss = criterion(out)
loss.backward()
opt.step()
with torch.no_grad():
model.eval()
global_corr = 0
for B, (dat,label) in enumerate(test_loader):
out = model(dat.to(device))
# get batch eval metrics here!

Related

Loss does not decrease

I'm a beginner and just trying to get in pytorch and neural networks. Therefore I created some dataset. The dataset consists of two input variables and one output variable (basicly the output is a linear function with some noise). Now I want to set up a neural network and train it with the dataset. I followed some tutorial and wrote this code:
df = pd.read_csv(r" ... .csv")
X = df[["x", "y"]]
y = df[["goal"]]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=42)
X_train, y_train = np.array(X_train), np.array(y_train)
X_test, y_test = np.array(X_test), np.array(y_test)
# Convert data to torch tensors
class Data(Dataset):
def __init__(self, X, y):
self.X = torch.from_numpy(X.astype(np.float32))
self.y = torch.from_numpy(y.astype(np.float32))
self.len = self.X.shape[0]
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return self.len
batch_size = 32
# Instantiate training and test data
train_data = Data(X_train, y_train)
train_dataloader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)
test_data = Data(X_test, y_test)
test_dataloader = DataLoader(dataset=test_data, batch_size=batch_size, shuffle=True)
input_dim = 2
hidden_dim_1 = 2
output_dim = 1
class NeuralNetwork(nn.Module):
def __init__(self, input_dim, hidden_dim_1, output_dim):
super(NeuralNetwork, self).__init__()
self.layer_1 = nn.Linear(input_dim, hidden_dim_1)
self.layer_out = nn.Linear(hidden_dim_1, output_dim)
def forward(self, x):
x = F.relu(self.layer_1(x))
x = self.layer_out(x)
return x
model = NeuralNetwork(input_dim, hidden_dim_1, output_dim)
optimizer = optim.SGD(model.parameters(), lr=0.01)
def train(epoch):
model.train()
for batch_id, (data, target) in enumerate(train_data):
data = Variable(data)
target = Variable(target)
target = target.to(dtype=torch.float32)
optimizer.zero_grad()
out = model(data)
criterion = F.mse_loss
loss = criterion(out, target)
print(loss.detach().numpy())
loss.backward()
optimizer.step()
for epoch in range(1, 30):
train(epoch)
My problem is that the printed loss is extremly high (e8-area) and does not decrease.
I tried to change some settings of the neural network, changed the batchsize, learningrate and tried other optimizers and loss functions. But none of the changes really helped. My research also didn't bring any success. Seems to me that there is a more basic mistake in my coding. What did I wrong?
Thanks in advance!
Your code seems fine to me (although I might miss a bug). It is in general never safe to say which networks will be successful and which won't, but here are some suggestions if you can't see any progress:
Check the input data. Maybe try plotting it to make sure that it actually contains what you think it does. You may print out the inputs, predicted and expected values (or better, view them in a debugger) to see what's wrong.
Normalize the input data. If there are high values in the input / output data, losses may explode. Ensure that most of the values are roughly between -1 and 1.
Lower the learning rate. 0.01 is generally a good starting point, but who knows.
Try training for more epochs. Depending on the noise in your data, this could be necessary.
Try adding more neurons. A linear function should in theory be fine with not that many, but maybe the noise is too 'complex'.

I am kinda new to the pytorch, now struggling with a classification problem

I built a very simple structure
class classifier (nn.Module):
def __init__(self):
super().__init__()
self.classify = nn.Sequential(
nn.Linear(166,80),
nn.Tanh(),
nn.Linear(80,40),
nn.Tanh(),
nn.Linear(40,1),
nn.Softmax()
)
def forward (self, x):
pred = self.classify(x)
return pred
model = classifier()
The loss function and optimizer are defined as
criteria = nn.BCEWithLogitsLoss()
iteration = 1000
learning_rate = 0.1
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
and here is the training and evaluation section
for epoch in range (iteration):
model.train()
y_pred = model(x_train)
loss = criteria(y_pred,y_train)
optimizer.zero_grad()
loss.backward()
optimizer.step()
model.eval()
with torch.inference_mode():
test_pred = model(x_test)
test_loss = criteria(test_pred, y_test)
if epoch % 100 == 0:
print(loss)
print(test_loss)
I received the same loss values, and by debugging, I found that the weights were not being updated.
The problem is in the network architecture: you are using a Softmax layer on a single valued output at the end. As per the definition of the softmax function, for a output vector x, we have, for index i:
softmax(x_i) = e^{x_i} / sum_j (e^{x_j})
Here, you only have a single valued output. Due to this, the output of your neural network is always 1, irrespective of the inputs or the weights. To fix this, remove the Softmax layer at the end. An activation function like Sigmoid might be more appropriate, and in fact you are already applying this when using the BCEWithLogitsLoss.
The problem lies here
y_pred = model(x_train)
loss = criteria(y_pred,y_train)
optimizer.zero_grad()
loss.backward()
optimizer.step()
after loss is calculated, you are clearing the gradients by doing optimizer.zero_grad()
the ideal case should be:
optimizer.zero_grad()
y_pred = model(x_train)
loss = criteria(y_pred,y_train)
loss.backward()
optimizer.step()

How to compute the parameter importance in pytorch?

I want to develop a lifelong learning system,so i need to prevent important parameter from changing.I read related paper 'Memory Aware Synapses: Learning what (not) to forget',a method was mentioned,I need to calculate the gradient of each parameter conresponding to each input image,so how should i write my code in pytorch?
'Memory Aware Synapses: Learning what (not) to forget'
You can do it using standard optimization procedure and .backward() method on your loss function.
First, scaling as defined in your link:
class Scaler:
def __init__(self, parameters, delta):
self.parameters = parameters
self.delta = delta
def step(self):
"""Multiplies gradients in place."""
for param in self.parameters:
if param.grad is None:
raise ValueError("backward() has to be called before running scaler")
param.grad *= self.delta
One can use it just like optimizer.step(), see below (see comments):
model = torch.nn.Sequential(
torch.nn.Linear(10, 100), torch.nn.ReLU(), torch.nn.Linear(100, 1)
)
scaler = Scaler(model.parameters(), delta=0.001)
optimizer = torch.optim.Adam(model.parameters())
criterion = torch.nn.MSELoss()
X, y = torch.randn(64, 10), torch.randn(64)
# Optimization loop
EPOCHS = 10
for _ in range(EPOCHS):
output = model(X)
loss = criterion(output, y)
loss.backward() # Now model has the gradients
optimizer.step() # Optimize model's parameters
print(next(model.parameters()).grad)
scaler.step() # Scaler gradients
optimizer.zero_grad() # Zero gradient before next step
After scaler.step() you will have gradient scaled available inside param.grad for each parameter (just like those are accessed within Scaler's step method) so you can do whatever you want with them.

How to check accuracy on BCELoss Pytorch?

I'm trying to use Pytorch to take a HeartDisease.csv and predict whether the patient has heart disease or not... the .csv provides 13 inputs and 1 target
I'm using BCELoss and I'm having trouble understanding how to write an accuracy check function.
My num_samples is correct but not my num_correct. I think this is a result of not understanding the predictions tensor. Right now my num_correct is usually over 8000 while my num_samples is 303...
Any insight on how to write this check accuracy function is much appreciated
I wrote this on a google co lab
#imports
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import pandas as pd
#create fully connected network
class NN(nn.Module):
def __init__(self, input_size, num_classes):
super(NN, self).__init__()
self.outputs = nn.Linear(input_size, 1)
def forward(self, x):
x = self.outputs(x)
return torch.sigmoid(x)
#set device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#hyperparameters
input_size = 13 # 13 inputs
num_classes = 1 # heartdisease or not
learning_rate = 0.001
batch_size = 64
num_epochs = 1
#load data
class MyDataset(Dataset):
def __init__(self, root, n_inp):
self.df = pd.read_csv(root)
self.data = self.df.to_numpy()
self.x , self.y = (torch.from_numpy(self.data[:,:n_inp]),
torch.from_numpy(self.data[:,n_inp:]))
def __getitem__(self, idx):
return self.x[idx, :], self.y[idx,:]
def __len__(self):
return len(self.data)
train_dataset = MyDataset("heart.csv", input_size)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle =True)
test_dataset = MyDataset("heart.csv", input_size)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle =True)
#initialize network
model = NN(input_size=input_size, num_classes=num_classes).to(device)
#loss and optimizer
criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
#train network
for epoch in range(num_epochs):
for batch_idx, (data, targets) in enumerate(train_loader):
#get data to cuda if possible
data = data.to(device=device)
targets = targets.to(device=device)
#forward
scores = model(data.float())
targets = targets.float()
loss = criterion(scores, targets)
#backward
optimizer.zero_grad()
loss.backward()
#grad descent or adam step
optimizer.step()
#check accuracy of model
def check_accuracy(loader, model):
num_correct = 0
num_samples = 0
model.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device=device)
y = y.to(device=device)
scores = model(x.float())
_, predictions = scores.max(1)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
print("Got {} / {} with accuracy {}".format(num_correct, num_samples, float(num_correct)/float(num_samples)*100))
model.train()
print("checking accuracy on training data")
check_accuracy(train_loader, model)
print("checking accuracy on test data")
check_accuracy(test_loader, model)
Note: Don't fool yourself. A single linear layer + a sigmoid + BCE loss = logistic regression. This is a linear model, so just take note of that when referring to it as a "neural network", which is a term usually reserved for similar networks but with at least one hidden layer and nonlinear activations.
The sigmoid layer at the end of your model's forward() function returns an (N,1)-sized tensor, where N is the batch size. In other words, it returns a scalar for every data point. Each scalar is a value between 0 and 1 (this is the range of the sigmoid function).
The idea is to interpret those scalars as probabilities corresponding to the positive class. Suppose 1 corresponds to heart disease, and 0 corresponds to no heart disease; heart disease is the positive class, and no heart disease is the negative class. Now suppose a score is 0.6. This might be interpreted as a 60% chance that the associated label is heart disease, and a 40% chance that the associated label is no heart disease. This interpretation of the sigmoid output is what motivates the BCE loss to begin with (it's ultimately just a negative log likelihood).
So what you might do is check if your scores are greater than 0.5. If so, predict heart disease. If not, predict no heart disease.
Right now, you're computing maximums from the scores across dimension 1, which does nothing because dimension 1 is already of size 1; taking the maximum of a single value simply gives you that value.
Try something like this:
def check_accuracy(loader, model):
num_correct = 0
num_samples = 0
model.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device=device)
y = y.to(device=device)
scores = model(x.float())
// Create a Boolean tensor (True for scores > 0.5, False for others)
// and then cast it to a long tensor (Trues -> 1, Falses -> 0)
predictions = (scores > 0.5).long()
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
print("Got {} / {} with accuracy {}".format(num_correct, num_samples, float(num_correct)/float(num_samples)*100))
model.train()
You may also want to squeeze your prediction and target tensors to size (N) instead of (N,1), though I'm not sure it's necessary in your case.

Val loss behaves strange while using custom training loop in tensorflow 2.0

I'm using a VGG16 model written in tf2.0 to train on my own datasets. Some BatchNormalization layers were included in the model and the "training" argument were set to True during training time and False during validation time as described in many tutorials.
The train_loss decreased to a certain level during training as expected. However, the val_loss behaves really strange. I checked out the output of the model after training and found out that, if I set the training argument to True, the output is quite correct, but if I set it to False, the result is incorrect at all.
According to the tutorials in tensorflow website, when training is set to False , the model will normalize its inputs using the mean and variance of its moving statistics learned during training but it doesn't seem so. Am I missing something?
I've provided the trainning and validation code in the below.
def train():
logging.basicConfig(level=logging.INFO)
tdataset = tf.data.Dataset.from_tensor_slices((train_img_list[:200], train_label_list[:200]))
tdataset = tdataset.map(parse_function, 3).shuffle(buffer_size=200).batch(batch_size).repeat(repeat_times)
vdataset = tf.data.Dataset.from_tensor_slices((val_img_list[:100], val_label_list[:100]))
vdataset = vdataset.map(parse_function, 3).batch(batch_size)
### Vgg model
model = VGG_PR(num_classes=num_label)
logging.info('Model loaded')
start_epoch = 0
latest_ckpt = tf.train.latest_checkpoint(os.path.dirname(ckpt_path))
if latest_ckpt:
start_epoch = int(latest_ckpt.split('-')[1].split('.')[0])
model.load_weights(latest_ckpt)
logging.info('model resumed from: {}, start at epoch: {}'.format(latest_ckpt, start_epoch))
else:
logging.info('training from scratch since weights no there')
######## training loop ########
loss_object = tf.keras.losses.MeanSquaredError()
val_loss_object = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.Adam(learning_rate=initial_lr)
train_loss = tf.metrics.Mean(name='train_loss')
val_loss = tf.metrics.Mean(name='val_loss')
writer = tf.summary.create_file_writer(log_path.format(case_num))
with writer.as_default():
for epoch in range(start_epoch, total_epoch):
print('start training')
try:
for batch, data in enumerate(tdataset):
images, labels = data
with tf.GradientTape() as tape:
pred = model(images, training=True)
if len(pred.shape) == 2:
pred = tf.reshape(pred,[-1, 1, 1, num_label])
loss = loss_object(pred, labels)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
if batch % 20 ==0:
logging.info('Epoch: {}, iter: {}, loss:{}'.format(epoch, batch, loss.numpy()))
tf.summary.scalar('train_loss', loss.numpy(), step=epoch*1250*repeat_times+batch) # the tdataset has been repeated 5 times..
tf.summary.text('Zernike_coe_pred', tf.as_string(tf.squeeze(pred)), step=epoch*1250*repeat_times+batch)
tf.summary.text('Zernike_coe_gt', tf.as_string(tf.squeeze(labels)), step=epoch*1250*repeat_times+batch)
writer.flush()
train_loss(loss)
model.save_weights(ckpt_path.format(epoch=epoch))
except KeyboardInterrupt:
logging.info('interrupted.')
model.save_weights(ckpt_path.format(epoch=epoch))
logging.info('model saved into {}'.format(ckpt_path.format(epoch=epoch)))
exit(0)
# validation step
for batch, data in enumerate(vdataset):
images, labels = data
val_pred = model(images, training=False)
if len(val_pred.shape) == 2:
val_pred = tf.reshape(val_pred,[-1, 1, 1, num_label])
v_loss = val_loss_object(val_pred, labels)
val_loss(v_loss)
logging.info('Epoch: {}, average train_loss:{}, val_loss: {}'.format(epoch, train_loss.result(), val_loss.result()))
tf.summary.scalar('val_loss', val_loss.result(), step = epoch)
writer.flush()
train_loss.reset_states()
val_loss.reset_states()
model.save_weights(ckpt_path.format(epoch=epoch))
The train losss reduced to a very small value like the groundtruth label are in the range of [0, 1] and the average train loss can be 0.007, but the val loss is much higher than this. The output of the model tends to be close to 0 if I set training to False.
updated on Nov. 6th:
I have found an interesting thing that if I use tf.function to decorate my model in its call method, the val loss will turn to be correct, but I'm not sure what has happened?
Mentioning the Answer for the benefit of the community.
Issue is resolved, i.e., val loss will turn to be correct if tf.function is used to decorate the model in its call method.

Resources