wandb pytorch: top1 accuracy per class - pytorch

I have 5 classes in validation set and i want to draw a graph based on top1 results per class in validation loop using wandb . I have tried a single accuracy graph based on the average of 5 classes and it works fine but i want to do a separate way like top1 accuracy for each class. I am unable to achieve, are there any way to achieve it?
Validation Loader
val_loaders = []
for nuisance in val_nuisances:
val_loaders.append((nuisance, torch.utils.data.DataLoader(
datasets.ImageFolder(os.path.join(valdir, nuisance), transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True,
)))
val_nuisances = ['shape', 'pose', 'texture', 'context', 'weather']
Validation Loop
def validate(val_loaders, model, criterion, args):
overall_top1 = 0
for nuisance, val_loader in val_loaders:
batch_time = AverageMeter('Time', ':6.3f', Summary.NONE)
losses = AverageMeter('Loss', ':.4e', Summary.NONE)
top1 = AverageMeter('Acc#1', ':6.2f', Summary.AVERAGE)
top5 = AverageMeter('Acc#5', ':6.2f', Summary.AVERAGE)
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix=f'Test {nuisance}: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
progress.display_summary()
overall_top1 += top1.avg
overall_top1 /= len(val_loaders)
return top1.avg

I don't see any log to W&B in your code, but logging the top1 accuracy per class would just be
class_names = ['shape', 'pose', 'texture', 'context', 'weather']
top1_accuracies = [0.9, 0.8, 0.9, 0.9, 0.8]
wandb.log({class_names[0]: top1_accuracies[0], class_names[1]: top1_accuracies[1], ...}
In the above example, it looks like you're not actually creating a variable for the top1 accuracy of each class. You'll want to do that first. Taken from https://stackoverflow.com/a/50977153/3959708
You can use sklearn's confusion matrix to get the accuracy
from sklearn.metrics import confusion_matrix
import numpy as np
y_true = [0, 1, 2, 2, 2]
y_pred = [0, 0, 2, 2, 1]
target_names = ['class 0', 'class 1', 'class 2']
#Get the confusion matrix
cm = confusion_matrix(y_true, y_pred)
#array([[1, 0, 0],
# [1, 0, 0],
# [0, 1, 2]])
#Now the normalize the diagonal entries
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
#array([[1. , 0. , 0. ],
# [1. , 0. , 0. ],
# [0. , 0.33333333, 0.66666667]])
#The diagonal entries are the accuracies of each class
cm.diagonal()
#array([1. , 0. , 0.66666667])

Related

Translating LSTM model from Keras to Pytorch

I am having a hard time translating a quite simple LSTM model from Keras to Pytorch. X (get it here) corresponds to 1152 samples of 90 timesteps, each timestep has only 1 dimension. y (here) is a single prediction at t = 91 for all 1152 samples.
In Keras:
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM
import numpy as np
import pandas as pd
X = pd.read_csv('X.csv', header = None).values
X.shape
y = pd.read_csv('y.csv', header = None).values
y.shape
# From Keras documentation [https://keras.io/layers/recurrent/]:
# Input shape 3D tensor with shape (batch_size, timesteps, input_dim).
X = np.reshape(X, (1152, 90, 1))
regressor = Sequential()
regressor.add(LSTM(units = 100, return_sequences = True, input_shape = (90, 1)))
regressor.add(Dropout(0.3))
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.3))
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.3))
regressor.add(LSTM(units = 50))
regressor.add(Dropout(0.3))
regressor.add(Dense(units = 1, activation = 'linear'))
regressor.compile(optimizer = 'rmsprop', loss = 'mean_squared_error', metrics = ['mean_absolute_error'])
regressor.fit(X, y, epochs = 10, batch_size = 32)
... leads me to:
# Epoch 10/10
# 1152/1152 [==============================] - 33s 29ms/sample - loss: 0.0068 - mean_absolute_error: 0.0628
Then in Pytorch:
import torch
from torch import nn, optim
from sklearn.metrics import mean_absolute_error
X = pd.read_csv('X.csv', header = None).values
y = pd.read_csv('y.csv', header = None).values
X = torch.tensor(X, dtype = torch.float32)
y = torch.tensor(y, dtype = torch.float32)
dataset = torch.utils.data.TensorDataset(X, y)
loader = torch.utils.data.DataLoader(dataset, batch_size = 32, shuffle = True)
class regressor_LSTM(nn.Module):
def __init__(self):
super().__init__()
self.lstm1 = nn.LSTM(input_size = 1, hidden_size = 100)
self.lstm2 = nn.LSTM(100, 50)
self.lstm3 = nn.LSTM(50, 50, dropout = 0.3, num_layers = 2)
self.dropout = nn.Dropout(p = 0.3)
self.linear = nn.Linear(in_features = 50, out_features = 1)
def forward(self, X):
# From the Pytorch documentation [https://pytorch.org/docs/stable/_modules/torch/nn/modules/rnn.html]:
# **input** of shape `(seq_len, batch, input_size)`
X = X.view(90, 32, 1)
# I am discarding hidden/cell states since in Keras I am using a stateless approach
# [https://keras.io/examples/lstm_stateful/]
X, _ = self.lstm1(X)
X = self.dropout(X)
X, _ = self.lstm2(X)
X = self.dropout(X)
X, _ = self.lstm3(X)
X = self.dropout(X)
X = self.linear(X)
return X
regressor = regressor_LSTM()
criterion = nn.MSELoss()
optimizer = optim.RMSprop(regressor.parameters())
for epoch in range(10):
running_loss = 0.
running_mae = 0.
for i, data in enumerate(loader):
inputs, labels = data
optimizer.zero_grad()
outputs = regressor(inputs)
outputs = outputs[-1].view(*labels.shape)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
mae = mean_absolute_error(labels.detach().cpu().numpy().flatten(), outputs.detach().cpu().numpy().flatten())
running_mae += mae
print('EPOCH %3d: loss %.5f - MAE %.5f' % (epoch+1, running_loss/len(loader), running_mae/len(loader)))
... leads me to:
# EPOCH 10: loss 0.04220 - MAE 0.16762
You can notice that both loss and MAE are quite different (Pytorch's are much higher). If I use Pytorch's model to predict the values, they all return as a constant.
What am I doing wrong?
Oh I believe I made considerable progress. It seems that the way to represent y is different between Keras and Pytorch. In Keras, we should pass it as a single value representing one timestep in the future (or, at least, for the problem I am trying to solve). But in Pytorch, y must be X shifted one timestep to the future. It is like this:
time_series = [0, 1, 2, 3, 4, 5]
X = [0, 1, 2, 3, 4]
# Keras:
y = [5]
# Pytorch:
y = [1, 2, 3, 4, 5]
This way, Pytorch compares all values in the time slice when calculating loss. I believe Keras rearranges the data under the hood to conform to this approach, as the code works when fed the variables just like that. But in Pytorch, I was estimating loss based only on one value (the one I was trying to predict), not the whole series, therefore I believe it could not correctly capture the time dependency.
When taking this in consideration, I got to:
EPOCH 100: loss 0.00551 - MAE 0.058435
And, most importantly, comparing true and predicted values in a separate dataset got me to
The patterns were clearly captured by the model.
Hooray!

3-layer feedfoward neural network not predicting regression values accurately

I'm pretty new to Tensorflow. Currently, I'm doing a 3-layer network, with 10 neurons in the hidden layer with ReLU, mini-batch gradient descent size of 8, L2 regularisation weight decay parameter (beta) of 0.001. The Tensorflow version I'm using is 1.14 and I'm on Python 3.6.
The issue that boggles my mind is that my predicted values and testing errors are absolutely off the charts.
For example, I plotted out the test errors and the predicted vs target values for a sample size of 50, and this is what came out.
As you can see, both plots are way off, and I haven't had the slightest clue as to why.
Here's how the dataset roughly looks like. The first column is discarded as it is just a counter value, and the last column is the target.
My code:
NUM_FEATURES = 7
num_neuron = 10
batch_size = 8
beta = 0.001
learning_rate = 0.001
epochs = 4000
seed = 10
np.random.seed(seed)
# read and divide data into test and train sets
total_dataset= np.genfromtxt('dataset_excel.csv', delimiter=',')
X_data, Y_data = total_dataset[1:, 1:8], total_dataset[1:, -1]
Y_data = Y_data.reshape(Y_data.shape[0], 1)
# shuffle input, ensure both are shuffled with the same order
shufflestate = np.random.get_state()
np.random.shuffle(X_data)
np.random.set_state(shufflestate)
np.random.shuffle(Y_data)
# 70% used for training, 30% used for testing
trainX = X_data[:280]
trainY = Y_data[:280]
testX = X_data[280:]
testY = Y_data[280:]
trainX = (trainX - np.mean(trainX, axis=0)) / np.std(trainX, axis=0)
# Create the model
x = tf.placeholder(tf.float32, [None, NUM_FEATURES])
y_ = tf.placeholder(tf.float32, [None, 1])
# get 50 samples for plotting of predicted vs target values
limited50testX = testX[:50]
limited50testY = testY[:50]
# Hidden
with tf.name_scope('hidden'):
weight1 = tf.Variable(tf.truncated_normal([NUM_FEATURES, num_neuron],stddev=1.0,name='weight1'))
bias1 = tf.Variable(tf.zeros([num_neuron]),name='bias1')
hidden = tf.nn.relu(tf.matmul(x, weight1) + bias1)
# output
with tf.name_scope('linear'):
weight2 = tf.Variable(tf.truncated_normal([num_neuron, 1],stddev=1.0 / np.sqrt(float(num_neuron))),name='weight2')
bias2 = tf.Variable(tf.zeros([1]),name='bias2')
logits = tf.matmul(hidden, weight2) + bias2
ridgeLoss = tf.square(y_ - logits)
regularisation = tf.nn.l2_loss(weight1) + tf.nn.l2_loss(weight2)
loss = tf.reduce_mean(ridgeLoss + beta * regularisation)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss)
error = tf.reduce_mean(tf.square(y_ - logits))
N = len(trainX)
idx = np.arange(N)
predicted=[]
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_err = []
test_err_ = []
for i in range(epochs):
for batchStart, batchEnd in zip(range(0, trainX.shape[0], batch_size),range(batch_size, trainX.shape[0], batch_size)):
train_op.run(feed_dict={x: trainX[batchStart:batchEnd], y_: trainY[batchStart:batchEnd]})
err = error.eval(feed_dict={x: trainX, y_: trainY})
train_err.append(err)
if i % 100 == 0:
print('iter %d: train error %g' % (i, train_err[i]))
test_err = error.eval(feed_dict={x: testX, y_: testY})
test_err_.append(test_err)
predicted = sess.run(logits, feed_dict={x:limited50testX})
print("predicted values: ", predicted)
print("size of predicted values is", len(predicted))
print("targets: ", limited50testY)
print("size of target values is", len(limited50testY))
#plot predictions vs targets
numberList=np.arange(0, 50, 1).tolist()
predplot = plt.figure(1)
plt.plot(numberList, predicted, label='Predictions')
plt.plot(numberList, limited50testY, label='Targets')
plt.xlabel('50 samples')
plt.ylabel('Value')
plt.legend(loc='lower right')
predplot.show()
# plot training error
trainplot = plt.figure(2)
plt.plot(range(epochs), train_err)
plt.xlabel(str(epochs) + ' iterations')
plt.ylabel('Train Error')
trainplot.show()
#plot testing error
testplot = plt.figure(3)
plt.plot(range(epochs), test_err_)
plt.xlabel(str(epochs) + ' iterations')
plt.ylabel('Test Error')
testplot.show()
Not sure if that's it, but trainX is normalized whereas testX is not. You might want to use the same normalization on testX before predicting.

InvalidArgumentError logits and labels must be same size: logits_size=[3215,25] labels_size=[10,25]

I was having quite a few errors (OOM, shape problems, etc) which I had managed to fix somehow.
But I'm unable to get my head around this error. I have searched quite a bit and I have also tried the sparse cross entropy with logits method in tensorflow and the tf.squeeze function also but that also didn't help me in resolving this error. Here is the link of the code (it's a github gist with the entire stacktrace and errors).
Code Link
Here is the link for the data set(It's around 500 Mb)
Dataset Link
Here is the Code (just in Case):
from PIL import Image
import numpy as np
import glob
from numpy import array
import pandas as pd
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
import h5py
import tensorflow as tf
def loading_saving_image_as_grayscale_train(img):
##combined_path='M:/PycharmProjects/AI+DL+CP/test_img'+img
loading=Image.open(img)
loading=loading.resize((28,28),Image.ANTIALIAS)
loading=loading.convert('L')
#loading.show()
conversion_to_array=np.asarray(loading,dtype=float)
train_data.append(conversion_to_array)
def loading_saving_image_as_grayscale_test(img):
#combined_path = 'M:/PycharmProjects/AI+DL+CP/train_img/' + img
#print(combined_path)
loading=Image.open(img,'r')
loading=loading.resize((28,28),Image.ANTIALIAS)
loading=loading.convert('L')
conversion_to_array=np.asarray(loading,dtype=float)
test_data.append(conversion_to_array)
import os
import requests, zipfile, io
import pandas as pd
#url = requests.get('https://he-s3.s3.amazonaws.com/media/hackathon/deep-learning-challenge-1/identify-the-objects/a0409a00-8-dataset_dp.zip')
#data = zipfile.ZipFile(io.BytesIO(url.content))
#data.extractall()
#os.listdir()
dataframe1=pd.read_csv('test.csv')
dataframe1.index=dataframe1.index+1
only_index=dataframe['image_id']
test_data=[]
train_data=[]
train=glob.glob('train_img/*.png')
test=glob.glob('test_img/*.png')
#other=loading_saving_image_as_grayscale('M:/PycharmProjects/AI+DL+CP/test_img/test_1000b.png')
#print(Image.open('M:/PycharmProjects/AI+DL+CP/test_img/test_1000b.png'))
#print(test)
#loading_sample=Image.open('M:/PycharmProjects/AI+DL+CP/test_img/test_1000b.png')
#loading_sample.show()
#print(train)
#print(test)
for data in train:
#print(data)
loading_saving_image_as_grayscale_train(data)
for item in test:
#print(item)
loading_saving_image_as_grayscale_test(item)
#print(train_data)
#print(test_data)
'''with Image.fromarray(train_data[1]) as img:
width,height=img.size
print(width,height)
'''
def OneHot(label,n_classes):
label=np.array(label).reshape(-1)
label=np.eye(n_classes)[label]
return label
dataframe=pd.read_csv('train.csv')
train_data=np.asarray(train_data)
test_data=np.asarray(test_data)
uni=dataframe['label']
dataframe1=pd.read_csv('test.csv')
dataframe1.index=dataframe1.index+1
only_index=dataframe['image_id']
label=LabelEncoder()
integer_encoding=label.fit_transform(uni)
#del uni
#del dataframe
#print(integer_encoding)
binary=OneHotEncoder(sparse=False)
integer_encoding=integer_encoding.reshape(len(integer_encoding),1)
onehot=binary.fit_transform(integer_encoding)
train_data=np.reshape(train_data,[-1,28,28,1])
test_data=np.reshape(test_data,[-1,28,28,1])
#onehot=np.reshape(onehot,[-1,10])
train_data=np.transpose(train_data,(0,2,1,3))
test_data=np.transpose(test_data,(0,2,1,3))
train_data=train_data.astype(np.float32)
test_data=test_data.astype(np.float32)
print(train_data.shape,test_data.shape,onehot.shape)
graph = tf.Graph()
with graph.as_default():
# placeholders for input data batch_size x 32 x 32 x 3 and labels batch_size x 10
data_placeholder = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
label_placeholder = tf.placeholder(tf.int32, shape=[None, 25])
# defining decaying learning rate
global_step = tf.Variable(0)
decay_rate = tf.train.exponential_decay(1e-4, global_step=global_step, decay_steps=10000, decay_rate=0.97)
layer1_weights = tf.Variable(tf.truncated_normal([3, 3, 1, 64],stddev=0.1))
layer1_biases = tf.Variable(tf.constant(0.1, shape=[64]))
layer2_weights = tf.Variable(tf.truncated_normal([3, 3, 64,32],stddev=0.1))
layer2_biases = tf.Variable(tf.constant(0.1,shape=[32]))
layer3_weights = tf.Variable(tf.truncated_normal([2, 2, 32, 20],stddev=0.1))
layer3_biases = tf.Variable(tf.constant(0.1,shape=[20]))
layer4_weights = tf.Variable(tf.truncated_normal([20,25],stddev=0.1))
layer4_biases = tf.Variable(tf.constant(0.1,shape=[25]))
layer5_weights = tf.Variable(tf.truncated_normal([25, 25], stddev=0.1))
layer5_biases = tf.Variable(tf.constant(0.1, shape=[25]))
def layer_multiplication(data_input_given):
#Convolutional Layer 1
#data_input_given=np.reshape(data_input_given,[-1,64,64,1])
CNN1=tf.nn.relu(tf.nn.conv2d(data_input_given,layer1_weights,strides=[1,1,1,1],padding='SAME')+layer1_biases)
print('CNN1 Done!!')
#Pooling Layer
Pool1=tf.nn.max_pool(CNN1,ksize=[1,4,4,1],strides=[1,4,4,1],padding='SAME')
print('Pool1 DOne')
#second Convolution layer
CNN2=tf.nn.relu(tf.nn.conv2d(Pool1,layer2_weights,strides=[1,1,1,1],padding='SAME'))+layer2_biases
print('CNN2 Done')
#Second Pooling
Pool2 = tf.nn.max_pool(CNN2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
print('pool2 Done')
#Third Convolutional Layer
CNN3 = tf.nn.relu(tf.nn.conv2d(Pool2, layer3_weights, strides=[1, 1, 1, 1], padding='SAME')) + layer3_biases
print('CNN3 Done')
#Third Pooling Layer
Pool3 = tf.nn.max_pool(CNN3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
print('Pool3 DOne')
#Fully Connected Layer
Pool4=tf.nn.max_pool(Pool3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
FullyCon=tf.reshape(Pool4,[-1,20])
FullyCon=tf.nn.relu(tf.matmul(FullyCon,layer4_weights)+layer4_biases)
print('Fullyconnected Done')
dropout = tf.nn.dropout(FullyCon, 0.4)
dropout=tf.reshape(dropout,[-1,25])
dropout=tf.matmul(dropout,layer5_weights)+layer5_biases
#print(dropout.shape)
return dropout
train_input = layer_multiplication(train_data)
print(train_input.shape)
loss = (tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=label_placeholder,logits=train_input))
+ 0.01 * tf.nn.l2_loss(layer1_weights)
+ 0.01 * tf.nn.l2_loss(layer2_weights)
+ 0.01 * tf.nn.l2_loss(layer3_weights)
+ 0.01 * tf.nn.l2_loss(layer4_weights)
)
#other=(tf.squeeze(label_placeholder))
#print(tf.shape())
optimizer = tf.train.GradientDescentOptimizer(name='Stochastic', learning_rate=decay_rate).minimize(loss,global_step=global_step)
#print(train_input.shape)
batch_size = 10
num_steps=10000
prediction=[]
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print('Initialized')
for i in range(num_steps):
print("in loop")
offset = (i * batch_size) % (onehot.shape[0] - batch_size)
batch_data = train_data[offset:(offset + batch_size), :, :]
batch_labels = onehot[offset:(offset + batch_size), :]
print("training")
feed_dict = {data_placeholder: batch_data, label_placeholder: batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_input], feed_dict=feed_dict)
print(sess.run(tf.argmax(label_placeholder, 1), feed_dict={x:test_data}))
prediction.append(sess.run(tf.argmax(label_placeholder,1),feed_dict={x:test_data}))
print('Finished')
submit=pd.Dataframe({'image_id':only_index, 'label':prediction})
submit.to_csv('submit.csv',index=False)
I also had a doubt regarding predicting class labels. Can someone tell me whether the method I'm using for storing the predicted class labels will work or not?
The reshape operations do not make sense:
FullyCon=tf.reshape(Pool4,[-1,20])
this will collapse batch dimension and feature dimensions.
Why would output of Pool4 have 20 dimensions? The fact it has 20 kernels does not mean it has 20 dimensions. Dimensionality is 20 * size of the image on this level of convolutions, which will be much bigger (my guess is it will be 6430).
It should be something among the lines of
output_shape = Pool4.shape[1] * Pool4.shape[2] * Pool4.shape[3]
FullyCon=tf.reshape(Pool4, [-1, output_shape])
and then you will have to change final layer accordingly (to match shapes).
The error has been fixed after reshaping everything properly and also in the softmax with logits part,i had to send the data_placeholder for logits.After doing this the issue got cleared.

TensorFlow: InvalidArgumentError: In[0] is not a matrix

I am new to TensorFlow and need to implement a deep neural network for a regression task. I assume there are no such sample codes on the internet where regression is performed using deep neural network (at least I could not find any. Please post any helpful link, if available). So, I have tried to merge the tutorials on deep neural networks for classification and regression together for my purpose. As expected, I am bombarded with errors. The error message reads:
InvalidArgumentError: In[0] is not a matrix
[[Node: MatMul_35 = MatMul[T=DT_FLOAT, transpose_a=false, transpose_b=false, _device="/job:localhost/replica:0/task:0/cpu:0"](_recv_Placeholder_36_0, Variable_72/read)]]
The code:
import tensorflow as tf
import numpy
import matplotlib.pyplot as plt
n_nodes_hl1 = 100
n_nodes_hl2 = 100
batch_size = 100
n_input = 1;
n_output = 1;
learning_rate = 0.01
train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,
7.042,10.791,5.313,7.997,5.654,9.27,3.1])
train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,
2.827,3.465,1.65,2.904,2.42,2.94,1.3])
x = tf.placeholder('float')
y = tf.placeholder('float')
def neural_network_model(data):
hidden_1_layer = {'weights':tf.Variable(tf.random_normal([n_input, n_nodes_hl1])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
l1 = tf.add(tf.matmul(data,hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1,hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
output = tf.reduce_sum(l2)
return output
def train_neural_network(x):
prediction = neural_network_model(x)
cost = tf.square(y - prediction)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
hm_epochs = 5
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_loss = 0
for (X, Y) in zip(train_X, train_Y):
_, c = sess.run([optimizer, cost], feed_dict={x: X, y: Y})
epoch_loss += c
print('Epoch', epoch, 'completed out of',hm_epochs,'loss:',epoch_loss)
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, prediction, label='Fitted line')
plt.legend()
plt.show()
test_X = numpy.asarray([6.83, 4.668, 8.9, 7.91, 5.7, 8.7, 3.1, 2.1])
test_Y = numpy.asarray([1.84, 2.273, 3.2, 2.831, 2.92, 3.24, 1.35, 1.03])
print("Testing Data")
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:',accuracy.eval({x:test_X, y:test_Y}))
train_neural_network(x)
As far I guess there is an issue with the dimensions of the hidden layer weights and/or biases (I may be wrong).
Side note: Here I have just tried to make a simple model of my project where the training and testing data points have been taken from the internet examples. My actual data would be pixel values of several images.
Change this line (working for me) :
Inputs to matmul() functions should be a matrix - you are feeding a value.
_, c = sess.run([optimizer, cost], feed_dict={x: [[X]], y: [[Y]]})
Output:
('Epoch', 0, 'completed out of', 5, 'loss:', array([[ 1.20472407e+14]], dtype=float32))
('Epoch', 1, 'completed out of', 5, 'loss:', array([[ 6.82631159]], dtype=float32))
('Epoch', 2, 'completed out of', 5, 'loss:', array([[ 8.83840561]], dtype=float32))
('Epoch', 3, 'completed out of', 5, 'loss:', array([[ 8.00222397]], dtype=float32))
('Epoch', 4, 'completed out of', 5, 'loss:', array([[ 7.6564579]], dtype=float32))
Hope this helps !
Comment: This is not a good example to explore if you're going to work
with images.

Tensor flow, making predictions using a trained network

So I am training a network to classify images in tensor flow. After I trained the network I began work on trying to use it to classify other images. The goal is to import an image, feed it to the classifier and have it print the result. I am having some trouble getting that part off the ground though. Here is what I have so far. I found that having tf.argmax(y,1) gave an error. I found that changing it to 0 fixed that error. However I am not convinced that it is actually working. I tossed 2 images through the classifier and they both got the same class even though they are vastly different. Just need some perspective here. Is this valid? Or is there something wrong here that will always feed me the same class (in this case I got class 0 for both of the images I tried).
Is this even the right way to approach making predictions in tensor flow? This is just the culmination of my debugging, not sure if it is what should be done or not.
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
X_train,X_validation,y_train,y_validation=train_test_split(X_train,y_train, test_size=20,random_state=0)
X_train, y_train = shuffle(X_train, y_train)
def LeNet(x):
# Arguments used for tf.truncated_normal, randomly defines variables
for the weights and biases for each layer
mu = 0
sigma = 0.1
# SOLUTION: Layer 1: Convolutional. Input = 32x32x3. Output = 28x28x6.
conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 3, 6), mean = mu, stddev = sigma))
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
# SOLUTION: Activation.
conv1 = tf.nn.relu(conv1)
# SOLUTION: Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Layer 2: Convolutional. Output = 10x10x16.
conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
# SOLUTION: Activation.
conv2 = tf.nn.relu(conv2)
# SOLUTION: Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Flatten. Input = 5x5x16. Output = 400.
fc0 = flatten(conv2)
# SOLUTION: Layer 3: Fully Connected. Input = 400. Output = 120.
fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
# SOLUTION: Activation.
fc1 = tf.nn.relu(fc1)
# SOLUTION: Layer 4: Fully Connected. Input = 120. Output = 84.
fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# SOLUTION: Activation.
fc2 = tf.nn.relu(fc2)
# SOLUTION: Layer 5: Fully Connected. Input = 84. Output = 43.
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(43))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
import tensorflow as tf
x = tf.placeholder(tf.float32, (None, 32, 32, 3))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, 43)
EPOCHS=10
BATCH_SIZE=128
rate = 0.001
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, one_hot_y)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})
validation_accuracy = evaluate(X_validation, y_validation)
print("EPOCH {} ...".format(i+1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './lenet')
print("Model saved")
import cv2
image=cv2.imread('File path')
image=cv2.resize(image,(32,32)) #classifier takes 32X32 images
image=np.array(image)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver3 = tf.train.import_meta_graph('./lenet.meta')
saver3.restore(sess, "./lenet")
pred = tf.nn.softmax(logits)
predictions = sess.run(tf.argmax(y,0), feed_dict={x: image})
print (predictions)
So what had to happen here was first clear the kernel and outputs. Somewhere along the way my placeholders got muddled up and clearing the kernel fixed that right up. Then I had to realize what really had to get done here: I had to call up the softmax function on my new data.
Like this:
pred = tf.nn.softmax(logits)
classification = sess.run(pred, feed_dict={x: image_array})

Resources