Get the validation/train loss from Skorch fit - python-3.x

Is there a way to get the train/validation loss from a Skorch fit in e.g a list (if you want to do some plotting, statistics)?

You can use the history (which allows slicing over time) to get this information. For example:
train_loss = net.history[:, 'train_loss']
which returns the train_loss for each recorded epoch.
Here is an example based on the following.
import numpy as np
import torch
import torch.nn.functional as F
from matplotlib import pyplot as plt
from sklearn.datasets import make_classification
from skorch import NeuralNetClassifier
from torch import nn
torch.manual_seed(0)
class ClassifierModule(nn.Module):
def __init__(
self,
num_units=10,
nonlin=F.relu,
dropout=0.5,
):
super(ClassifierModule, self).__init__()
self.num_units = num_units
self.nonlin = nonlin
self.dropout = dropout
self.dense0 = nn.Linear(20, num_units)
self.nonlin = nonlin
self.dropout = nn.Dropout(dropout)
self.dense1 = nn.Linear(num_units, 10)
self.output = nn.Linear(10, 2)
def forward(self, X, **kwargs):
X = self.nonlin(self.dense0(X))
X = self.dropout(X)
X = F.relu(self.dense1(X))
X = F.softmax(self.output(X), dim=-1)
return X
net = NeuralNetClassifier(
ClassifierModule,
max_epochs=20,
lr=0.1,
# device='cuda', # uncomment this to train with CUDA
)
X, y = make_classification(1000, 20, n_informative=10, random_state=0)
X, y = X.astype(np.float32), y.astype(np.int64)
net.fit(X, y)
train_loss = net.history[:, 'train_loss']
valid_loss = net.history[:, 'valid_loss']
plt.plot(train_loss, 'o-', label='training')
plt.plot(valid_loss, 'o-', label='validation')
plt.legend()
plt.show()
And results:

Related

parameters() missing 1 required positional argument: 'self' even after instantinating model

I'm trying to one time run my model and see if it's working. I've searched the error and suggested answers was to instantiate the model once. I already did that. However, even after instantiating the model. it gives an error once requesting access to model parametes. What's the problem?
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, TensorDataset
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from IPython import display
display.set_matplotlib_formats('svg')
data = np.array([[1,1]])
label = np.array([2])
for i in range(-50,51,1):
data = np.append(data, [[i,i]], axis=0)
label = np.append(label, [i+i])
# conver to tensor
T_data = torch.tensor(data).float()
T_label = torch.tensor(label).long()
# split data
train_data, test_data, train_label, test_label = train_test_split(T_data, T_label, test_size= .2)
# convert into Pytorch dataset
train_data = TensorDataset(train_data, train_label)
test_data = TensorDataset(test_data, test_label)
# translate into dataloader
batchsize = 32
train_loader = DataLoader(train_data, batch_size= batchsize, shuffle =True, drop_last=True)
test_loader = DataLoader(test_data, batch_size=test_data.tensors[0].shape[0])
class AddNN(nn.Module):
def __init__(self):
super().__init__()
self.input = nn.Linear(2,16)
## hidden layer
self.fc1 = nn.Linear(16,32)
self.fc2 = nn.Linear(32,1)
#output layer
self.output = nn.Linear(1,1)
# forward pass
def forward(self, x):
x = F.relu(self.input(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return self.output(x)
net =AddNN()
lossfun = nn.MSELoss()
optimizer = torch.optim.Adam(AddNN.parameters(), lr = .05)
It returns TypeError: parameters() missing 1 required positional argument: 'self'
What it means to have missing a required positional argument here??
After model instantiation, you should use that model object instead of the model class inside Adam and with .parameters()
net = AddNN()
lossfun = nn.MSELoss()
optimizer = torch.optim.Adam(net.parameters(), lr = .05)

Custom layer caused the batch dimension mismatch in pytorch. How to fix this problem?

I have tried to train a GCN model.I defined the custom layer I needed. However, It cause some dimension mismatch when I do some batch training.
the codes are as following :
import math
import numpy as np
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from tqdm import tqdm
# =============================================================================
# model define
# =============================================================================
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
class GCN(nn.Module):
def __init__(self, nfeat, nhid, nclass):
super(GCN, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid)
self.gc2 = GraphConvolution(nhid, nclass)
self.linear = nn.Linear(nclass, 1)
# self.dropout = dropout
def forward(self, x, adj):
x = F.relu(self.gc1(x, adj))
# x = F.dropout(x, self.dropout, training=self.training)
x = F.relu(self.gc2(x, adj))
x = self.linear(x)
return x
def train(dataloader, model, loss_fn, optimizer,adj):
size = len(dataloader.dataset)
model.train()
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)
# Compute prediction error
pred = model(X,adj)
loss = loss_fn(pred, y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def test(dataloader, model, loss_fn,adj):
size = len(dataloader.dataset)
num_batches = len(dataloader)
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
X, y = X.to(device), y.to(device)
pred = model(X,adj)
test_loss += loss_fn(pred, y).item()
# correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= num_batches
# correct /= size
# Accuracy: {(100*correct):>0.1f}%,
print(f"Test Error: \n Avg loss: {test_loss:>8f} \n")
when I run the code :
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using {device} device")
model = GCN(1,1,1).to(device)
print(model)
# model(X).shape
loss_fn = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
epochs = 10
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train(train_dataloader, model, loss_fn, optimizer,Adjacency_matrix)
test(test_dataloader, model, loss_fn,Adjacency_matrix)
print("Done!")
I got the error :
when I looking inside this ,I find the model is working well when I drop the dimension of batch-size. How I need to do to tell the model that this dimension is the batch-size which don't need to compute?
the error you're seeing is due to you trying to matrix multiple a 3d tensor (your input) by your 2D weights.
To get around this you can simply reshape your data, as we only really care about the last dim when doing matmuls:
def forward(self, input, adj):
b_size = input.size(0)
input = input.view(-1, input.shape[-1])
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
output = output.view(b_size,-1,output.shape[-1])
if self.bias is not None:
return output + self.bias
else:
return output

Can make regression with PyTorch

I'm trying to do a non linear regression with PyTorch with this code
import os; os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(1, 4)
self.fc2 = nn.Linear(4, 4)
self.fc3 = nn.Linear(4, 1)
def forward(self, x):
x = torch.sigmoid(self.fc1(x))
x = torch.sigmoid(self.fc2(x))
x = self.fc3(x)
return x
if __name__ == '__main__':
net = Net()
X = torch.tensor([[x] for x in np.arange(0, 10, 0.1)], dtype=torch.float32)
Y = 10 * X**2
learning_rate = 0.01
EPOCHS = 100
loss = nn.MSELoss()
optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate)
for epoch in range(EPOCHS):
y_predicted = net(X)
l = loss(Y, y_predicted)
l.backward()
optimizer.step()
optimizer.zero_grad()
with torch.no_grad():
plt.plot(X.numpy(),Y.numpy())
plt.plot(X.numpy(),net(X).numpy())
plt.legend(['sample','prediction'])
plt.show()
When I plot the prediction line I get a straight line rather than a curved one.
Please, can you help me with the regression?
I realized that Y = 10 X^2 is too difficult to regress since the values are too big.
Conversely, Y = 0.01 X^2 can be regress pretty well. I still don't understand why if Y values are too big the regression does not work well.

Tensorflow 2.0 Identical model structure and hyper parameters result in different performance in different calling approaches

there. I am a starter and learning Tensorflow 2.0. I have one model called in 3 different approaches. And the performances are different. Could anyone tell me why this is the case?
The model constructing and calling approach:
import os, sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets, models, layers, regularizers, optimizers
def prepare_mnist_features_and_labels(x, y):
x = tf.cast(x, tf.float32)/255.0
y = tf.cast(y, tf.int64)
return x, y
def mninst_dataset():
(x_train, y_train), (x_eval, y_eval) = datasets.mnist.load_data()
print('x_train/y_train shape:', x_train.shape, y_train.shape)
y_train = tf.one_hot(y_train, depth=10)
y_eval = tf.one_hot(y_eval, depth=10)
ds_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))
ds_train = ds_train.map(prepare_mnist_features_and_labels)
ds_train = ds_train.shuffle(x_train.shape[0]).batch(128)
ds_eval = tf.data.Dataset.from_tensor_slices((x_eval, y_eval))
ds_eval = ds_eval.map(prepare_mnist_features_and_labels)
ds_eval = ds_eval.shuffle(x_eval.shape[0]).batch(128)
sample = next(iter(ds_train))
print('sample: ', sample[0].shape, sample[1].shape)
return ds_train, ds_eval
def main():
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
trainset, evalset = mninst_dataset()
model = keras.Sequential()
model.add(layers.Reshape(target_shape=[28, 28, 1], input_shape=[28, 28]))
model.add(layers.Conv2D(filters=32, kernel_size=(5, 5), activation=tf.nn.relu, strides=[1,1], padding="SAME"))
model.add(layers.MaxPool2D(pool_size=(2,2), strides=[1,1], padding="SAME"))
model.add(layers.Conv2D(filters=64, kernel_size=(5, 5), activation=tf.nn.relu, strides=[1,1], padding="SAME"))
model.add(layers.MaxPool2D(pool_size=(2,2), strides=[2,2], padding="SAME"))
model.add(layers.Flatten())
model.add(layers.Dense(units=512, activation=tf.nn.relu, kernel_regularizer=regularizers.l2(0.01)))
model.add(layers.Dense(units=10, activation=tf.nn.relu, kernel_regularizer=regularizers.l2(0.01)))
model.compile(optimizer=optimizers.Adam(lr=0.01), loss=tf.losses.CategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
model.fit(trainset.repeat(), epochs=30, steps_per_epoch=500,
validation_data=evalset.repeat(), validation_steps=10)
if __name__=='__main__':
main()
The second approach to construct the model and run it is the following:
import os, sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets, models, layers, regularizers, optimizers
from tqdm import tqdm
def prepare_mnist_features_and_labels(x, y):
x = tf.cast(x, tf.float32)/255.0
y = tf.cast(y, tf.int64)
return x, y
def mnist_dataset():
(x_train, y_train), (x_eval, y_eval) = datasets.mnist.load_data()
# y_train = tf.one_hot(y_train, depth=10)
# y_eval = tf.one_hot(y_eval, depth=10)
ds_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))
ds_train = ds_train.map(prepare_mnist_features_and_labels)
# Test: replace x_train.shape[0] by the number of the training samples, which is 60000
ds_train = ds_train.shuffle(x_train.shape[0]).batch(128)
ds_eval = tf.data.Dataset.from_tensor_slices((x_eval, y_eval))
ds_eval = ds_eval.map(prepare_mnist_features_and_labels)
ds_eval = ds_eval.shuffle(x_eval.shape[0]).batch(128)
# sample = next(iter(ds_train))
# print('sample: ', sample[0].shape, sample[1].shape)
return ds_train, ds_eval
# tf.nn.sparse_softmax_cross_entropy_with_logits(labels, logits, name=None):
# labels: Tensof of shape [d_0, d_1, ..., d_{r-1}]. Each label must be an index in [0, num_classes]
# logits: Unscaled of log probabilities of shape [d_0, d_1, ..., d_{r-1}, num_classes]
# A common use is to have logits of shape [batch_size, num_classes] and have labels of shape [batch_size]
def compute_loss(logits, labels):
# print(logits.numpy())
# print(labels.numpy())
return tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits
)
)
def compute_accuracy(logits, labels):
predictions = tf.argmax(logits, axis=1)
# print(predictions)
# print(labels)
# print(list(zip(predictions.numpy(), labels.numpy())))
return tf.reduce_mean(tf.cast(tf.equal(predictions, labels), tf.float32))
def train_one_step(model, optimizer, x, y):
# At each train step, first calculate the forward loss
with tf.GradientTape() as tape:
logits = model(x)
loss = compute_loss(logits, y)
# Then calculate the backward gradients over each trainable variables
grads = tape.gradient(loss, model.trainable_variables)
# Optimize and update variables throught backpropagation
optimizer.apply_gradients(zip(grads, model.trainable_variables))
# Compute current model accuracy
accuracy = compute_accuracy(logits, y)
return loss, accuracy
def train(epoch, model, optimizer, trainset):
#def train(epoch, model, optimizer):
# trainset = mnist_dataset()[0]
loss = 0.0
accuracy = 0.0
#for step, (x, y) in enumerate(tqdm(trainset)):
for step, (x, y) in enumerate(tqdm(trainset)):
loss, accuracy = train_one_step(model, optimizer, x, y)
if step % 110 == 0:
print('epoch', epoch, ': loss', loss.numpy(), '; accuracy', accuracy.numpy())
return loss, accuracy
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.layer1 = layers.Conv2D(filters=32, kernel_size=(5, 5), activation=tf.nn.relu, strides=[1,1], padding="SAME", input_shape=(-1, 28, 28, 1))
self.layer2 = layers.MaxPool2D(pool_size=(2,2), strides=[1,1], padding="SAME")
self.layer3 = layers.Conv2D(filters=64, kernel_size=(5, 5), activation=tf.nn.relu, strides=[1,1], padding="SAME")
self.layer4 = layers.MaxPool2D(pool_size=(2,2), strides=[2,2], padding="SAME")
self.layer5 = layers.Flatten()
self.layer6 = layers.Dense(units=512, activation=tf.nn.relu, kernel_regularizer=regularizers.l2(0.01))
self.layer7 = layers.Dense(units=10, activation=tf.nn.relu, kernel_regularizer=regularizers.l2(0.01))
def call(self, x, training=False):
x = tf.reshape(x, (-1, 28, 28, 1))
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.layer6(x)
x = self.layer7(x)
return x
def main():
# set random seed
tf.random.set_seed(22)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
trainset, evalset = mnist_dataset()
model = MyModel()
optimizer = optimizers.Adam(lr=0.001)
# Save checkpoints with keras api as the first approach
# Save checkpoints manually as a second approach.
# find a way to implement early-stopping strategy in the programming style
# for epoch in tqdm(range(30)):
for epoch in range(50):
loss, accuracy = train(epoch, model, optimizer, trainset)
print('Final epoch', epoch, ': loss', loss.numpy(), '; accuracy', accuracy.numpy())
if __name__ == '__main__':
main()
And the last approach is below:
import os, sys
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets, models, layers, regularizers, optimizers
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def prepare_mnist_features_and_labels(x, y):
x = tf.cast(x, tf.float32)/255.0
y = tf.cast(y, tf.int64)
return x, y
def mnist_dataset():
(x_train, y_train), (x_eval, y_eval) = datasets.mnist.load_data()
print('x_train/y_train shape:', x_train.shape, y_train.shape)
y_train = tf.one_hot(y_train, depth=10)
y_eval = tf.one_hot(y_eval, depth=10)
ds_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))
ds_train = ds_train.map(prepare_mnist_features_and_labels)
ds_train = ds_train.shuffle(x_train.shape[0]).batch(128)
ds_eval = tf.data.Dataset.from_tensor_slices((x_eval, y_eval))
ds_eval = ds_eval.map(prepare_mnist_features_and_labels)
ds_eval = ds_eval.shuffle(x_eval.shape[0]).batch(128)
sample = next(iter(ds_train))
print('sample: ', sample[0].shape, sample[1].shape)
return ds_train, ds_eval
class MyModel(keras.Model):
# self.model = keras.Sequential([
# layers.Reshape(target_shape=(28*28, ), input_shape=(28, 28)),
# layers.Dense(100, activation=tf.nn.relu),
# layers.Dense(100, activation=tf.nn.relu),
# layers.Desnse(10)
# ])
def __init__(self):
super(MyModel, self).__init__()
self.layer1 = layers.Conv2D(filters=32, kernel_size=(5, 5), activation=tf.nn.relu, strides=[1,1], padding="SAME", input_shape=(-1, 28, 28, 1))
self.layer2 = layers.MaxPool2D(pool_size=(2,2), strides=[1,1], padding="SAME")
self.layer3 = layers.Conv2D(filters=64, kernel_size=(5, 5), activation=tf.nn.relu, strides=[1,1], padding="SAME")
self.layer4 = layers.MaxPool2D(pool_size=(2,2), strides=[2,2], padding="SAME")
self.layer5 = layers.Flatten()
self.layer6 = layers.Dense(units=512, activation=tf.nn.relu, kernel_regularizer=regularizers.l2(0.01))
self.layer7 = layers.Dense(units=10, activation=tf.nn.relu, kernel_regularizer=regularizers.l2(0.01))
def call(self, x, training=False):
x = tf.reshape(x, (-1, 28, 28, 1))
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.layer6(x)
x = self.layer7(x)
return x
def main():
tf.random.set_seed(22)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
trainset, evalset = mnist_dataset()
model = MyModel()
model.compile(optimizer=optimizers.Adam(lr=0.001), loss=tf.losses.CategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
model.fit(trainset.repeat(), epochs=30, steps_per_epoch=500, verbose=1,
validation_data=evalset.repeat(), validation_steps=10)
if __name__ == '__main__':
main()
Each of them take a while to train. Can anyone tell me why the performances are different? And in the future how I should debug by myself?
Thank you so much for any help.
the problem is solved after carefully examine the network. It turn out that the last fully connected layer in the model was activated with a relu function, which in not appropriate. And the choice of loss function tf.losses.categoricalCrossentropy and tf.nn.sparse_softmax_cross_entropy_with_logits also make a big difference. No matter what get chosen, Make sure the loss function and the final output of the network match.

How to integrate LIME with PyTorch?

Using this mnist image classification model :
%reset -f
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import torch.utils.data as data_utils
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons
from matplotlib import pyplot
from pandas import DataFrame
import torchvision.datasets as dset
import os
import torch.nn.functional as F
import time
import random
import pickle
from sklearn.metrics import confusion_matrix
import pandas as pd
import sklearn
trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])
root = './data'
if not os.path.exists(root):
os.mkdir(root)
train_set = dset.MNIST(root=root, train=True, transform=trans, download=True)
test_set = dset.MNIST(root=root, train=False, transform=trans, download=True)
batch_size = 64
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(
dataset=test_set,
batch_size=batch_size,
shuffle=True)
class NeuralNet(nn.Module):
def __init__(self):
super(NeuralNet, self).__init__()
self.fc1 = nn.Linear(28*28, 500)
self.fc2 = nn.Linear(500, 256)
self.fc3 = nn.Linear(256, 2)
def forward(self, x):
x = x.view(-1, 28*28)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
num_epochs = 2
random_sample_size = 200
values_0_or_1 = [t for t in train_set if (int(t[1]) == 0 or int(t[1]) == 1)]
values_0_or_1_testset = [t for t in test_set if (int(t[1]) == 0 or int(t[1]) == 1)]
print(len(values_0_or_1))
print(len(values_0_or_1_testset))
train_loader_subset = torch.utils.data.DataLoader(
dataset=values_0_or_1,
batch_size=batch_size,
shuffle=True)
test_loader_subset = torch.utils.data.DataLoader(
dataset=values_0_or_1_testset,
batch_size=batch_size,
shuffle=False)
train_loader = train_loader_subset
# Hyper-parameters
input_size = 100
hidden_size = 100
num_classes = 2
# learning_rate = 0.00001
learning_rate = .0001
# Device configuration
device = 'cpu'
print_progress_every_n_epochs = 1
model = NeuralNet().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
N = len(train_loader)
# Train the model
total_step = len(train_loader)
most_recent_prediction = []
test_actual_predicted_dict = {}
rm = random.sample(list(values_0_or_1), random_sample_size)
train_loader_subset = data_utils.DataLoader(rm, batch_size=4)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader_subset):
# Move tensors to the configured device
images = images.reshape(-1, 2).to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch) % print_progress_every_n_epochs == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
predicted_test = []
model.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
probs_l = []
predicted_values = []
actual_values = []
labels_l = []
with torch.no_grad():
for images, labels in test_loader_subset:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
predicted_test.append(predicted.cpu().numpy())
sm = torch.nn.Softmax()
probabilities = sm(outputs)
probs_l.append(probabilities)
labels_l.append(labels.cpu().numpy())
predicted_values.append(np.concatenate(predicted_test).ravel())
actual_values.append(np.concatenate(labels_l).ravel())
if (epoch) % 1 == 0:
print('test accuracy : ', 100 * len((np.where(np.array(predicted_values[0])==(np.array(actual_values[0])))[0])) / len(actual_values[0]))
I'm to attempting to integrate 'Local Interpretable Model-Agnostic Explanations for machine learning classifiers' : https://marcotcr.github.io/lime/
It appears PyTorch support is not enabled as it is not mentioned in doc and following tutorial :
https://marcotcr.github.io/lime/tutorials/Tutorial%20-%20images.html
With my updated code for PyTorch :
from lime import lime_image
import time
explainer = lime_image.LimeImageExplainer()
explanation = explainer.explain_instance(images[0].reshape(28,28), model(images[0]), top_labels=5, hide_color=0, num_samples=1000)
Causes error :
/opt/conda/lib/python3.6/site-packages/skimage/color/colorconv.py in gray2rgb(image, alpha)
830 is_rgb = False
831 is_alpha = False
--> 832 dims = np.squeeze(image).ndim
833
834 if dims == 3:
AttributeError: 'Tensor' object has no attribute 'ndim'
So appears tensorflow object is expected here ?
How to integrate LIME with PyTorch image classification ?
Here's my solution:
Lime expects an image input of type numpy. This is why you get the attribute error and a solution would be to convert the image (from Tensor) to numpy before passing it to the explainer object. Another solution would be to select a specific image with the test_loader_subset and convert it with img = img.numpy().
Secondly, in order to make LIME work with pytorch (or any other framework), you'll need to specify a batch prediction function which outputs the prediction scores of each class for each image. The name of this function (here I've called it batch_predict) is then passed to explainer.explain_instance(img, batch_predict, ...). The batch_predict needs to loop through all images passed to it, convert them to Tensor, make a prediction and finally return the prediction score list (with numpy values). This is how I got it working.
Note also that the images need to have shape (... ,... ,3) or (... ,... ,1) in order to be properly segmented by the default segmentation algorithm. This means that you might have to use np.transpose(img, (...)). You may specify the segmentation algorithm as well if the results are poor.
Finally you'll need to display the LIME image mask on top of the original image. This snippet shows how this may be done:
from skimage.segmentation import mark_boundaries
temp, mask = explanation.get_image_and_mask(explanation.top_labels[0], positive_only=False, num_features=5, hide_rest=False)
img_boundry = mark_boundaries(temp, mask)
plt.imshow(img_boundry)
plt.show()
This notebook is a good reference:
https://github.com/marcotcr/lime/blob/master/doc/notebooks/Tutorial%20-%20images%20-%20Pytorch.ipynb

Resources