Tensorflow RNN stuck at 20% error - python-3.x

I created my first tensorflow neuronal network, initially for generating sequences. It produced weird outputs so I simplified it a lot to see if it can reach an error rate of 0% with just 5 inputs and 5 output classes. Somehow it does not seem to backpropagate at all because it is stuck at 20 % error rate without moving at all. So if anyone can point me to my mistake I made thank you in advance :)
Cheers
import numpy as np
import tensorflow as tf
import sys
trainingInputs = [
[[0],[0],[0],[0]],
[[1],[0],[0],[0]],
[[0],[1],[0],[0]],
[[0],[0],[1],[0]],
[[0],[0],[0],[1]]]
trainingOutputs = [
[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1],
[0,0,0,0]]
data = tf.placeholder(tf.float32, [None, len(trainingInputs[0]),1])
target = tf.placeholder(tf.float32, [None, len(trainingOutputs[0])])
num_hidden = 24
cell = tf.contrib.rnn.LSTMCell(num_hidden,state_is_tuple=True)
val, _ = tf.nn.dynamic_rnn(cell, data, dtype=tf.float32)
val = tf.transpose(val, [1, 0, 2])
last = tf.gather(val, int(val.get_shape()[0]) - 1)
weight = tf.Variable(tf.truncated_normal([num_hidden, int(target.get_shape()[1])]))
bias = tf.Variable(tf.constant(0.1, shape=[target.get_shape()[1]]))
prediction = tf.nn.softmax(tf.matmul(last, weight) + bias)
cross_entropy = -tf.reduce_sum(target * tf.log(tf.clip_by_value(prediction,1e-10,1.0)))
optimizer = tf.train.GradientDescentOptimizer(0.01)
minimize = optimizer.minimize(cross_entropy)
mistakes = tf.not_equal(tf.argmax(target, 1), tf.argmax(prediction, 1))
error = tf.reduce_mean(tf.cast(mistakes, tf.float32))
init_op = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init_op)
batch_size = 1
no_of_batches = int((len(trainingInputs)) / batch_size)
def trainNetwork():
epoch = 1000
for i in range(epoch):
ptr = 0
for j in range(no_of_batches):
inp, out = trainingInputs[ptr:ptr+batch_size], trainingOutputs[ptr:ptr+batch_size]
ptr+=batch_size
sess.run(minimize, feed_dict={data: inp, target: out})
def generateOutput():
incorrect = sess.run(error,{data: trainingInputs, target: trainingOutputs})
sys.stdout.write('error {:3.1f}%'.format(100 * incorrect) + "\n")
sys.stdout.flush()
for i in range(200):
trainNetwork()
generateOutput()
sess.close()

Related

Pytorch NN Training issue: Loss of NN does not decrase

I want to classify random Instagram images as "image has a dog" or "image has not a dog".
To train my NN to classify dogs I want to use the Stanford Dogs Dataset, so I have about 20.000 training images of different dogs with different breeds.
But while training my NN the loss does not decrease, I checked that with different learning rates and with or without dropout layers.
Can anyone give tips or does anyone see bugs in the following code?:
import torch
import torchvision
from torchvision import transforms
from PIL import Image
from os import listdir
import os
import random
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
TRAINDATAPATH = 'C:/Users/.../Desktop/train/'
TESTDATAPATH = 'C:/Users/.../Desktop/#apfel/'
"""normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)"""
normalize = transforms.Normalize(
mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5]
)
transforms = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(256),
transforms.ToTensor(),
normalize])
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
train_data_list = []
target_list = []
train_data = []
batch_size = 1
files = listdir(TRAINDATAPATH)
for i in range(len(listdir(TRAINDATAPATH))):
try:
f = random.choice(files)
files.remove(f)
img = Image.open(TRAINDATAPATH + f)
img_tensor = transforms(img) # (3,256,256)
train_data_list.append(img_tensor)
isObj = 1 if 'obj' in f else 0
isNotObj = 0 if 'obj' in f else 1
target = [isObj, isNotObj]
target_list.append(target)
if len(train_data_list) >= 1:
train_data.append((torch.stack(train_data_list), target_list))
train_data_list = []
target_list = []
print('Loaded batch ', int(len(train_data)/batch_size), 'of ', int(len(listdir(TRAINDATAPATH))/batch_size))
print('Percentage Done: ', 100*int(len(train_data)/batch_size)/int(len(listdir(TRAINDATAPATH))/batch_size), '%')
except Exception:
print("Error occured but ignored")
print(str(Exception))
continue
class Netz(nn.Module):
def __init__(self):
super(Netz, self).__init__()
self.conv1 = nn.Conv2d(3, 6, kernel_size=5)
self.conv2 = nn.Conv2d(6, 12, kernel_size=5)
self.conv3 = nn.Conv2d(12, 18, kernel_size=5)
self.conv4 = nn.Conv2d(18, 24, kernel_size=5)
self.fc1 = nn.Linear(3456, 1000)
self.fc2 = nn.Linear(1000, 2)
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x,2)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x,2)
x = F.relu(x)
x = self.conv3(x)
x = F.max_pool2d(x,2)
x = F.relu(x)
x = self.conv4(x)
x = F.max_pool2d(x,2)
x = F.relu(x)
x = x.view(-1,3456)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return torch.sigmoid(x)
model = Netz()
model.to(torch.device("cuda" if torch.cuda.is_available() else "cpu"))
optimizer = optim.Adadelta(model.parameters(), lr=10)
def train(epoch):
global model
model.train()
batch_idx = 0
for data, target in train_data:
batch_idx += 1
data = data.to(torch.device("cuda" if torch.cuda.is_available() else "cpu"))
target = torch.Tensor(target).to(torch.device("cuda" if torch.cuda.is_available() else "cpu"))
data = Variable(data)
target = Variable(target)
optimizer.zero_grad()
output = model(data)
criterion = F.binary_cross_entropy
loss = criterion(output, target)
loss.backward()
optimizer.step()
print('Train Epoch: '+ str(epoch) + '\tLoss: ' + str(loss.data.item()) )
def test():
global model
model.eval()
files = listdir(TESTDATAPATH)
f = random.choice(files)
img = Image.open(TESTDATAPATH + f)
img_eval_tensor = transforms(img)
img_eval_tensor.unsqueeze_(0)
data = Variable(img_eval_tensor.to(torch.device("cuda" if torch.cuda.is_available() else "cpu")) )
out = model(data)
string_prediction = str(out.data.max(0, keepdim=True)[1])
print(string_prediction[9:10])
for epoch in range(1,4):
train(epoch)
i = 100
while i > 0:
test()
i -= 1
In the TRAINDATAPATH are thousands of Dog images with the filename "obj_XXX.jpg" and some other images WITHOUT dogs with other filenames don't including "obj".
In the TESTDATAPATH are just random images, some with dogs, some without.
The NN classifies them all as "not including dogs" or "0" which is incorrect.
Thanks for every help!
You are doing a binary classification but you are using two classes:
isObj = 1 if 'obj' in f else 0
isNotObj = 0 if 'obj' in f else 1
target = [isObj, isNotObj]
In the binary case, it should be a single class, where 1 means it is a dog, and 0 means it is not. You are already doing it, but twice. You can remove the isNotObj entirely and only keep the isObj.
You need to adapt the model accordingly, such that it only predicts the isObj, therefore fc2 should only have 1 class as output:
self.fc2 = nn.Linear(1000, 1)
In the testing phase you need to make a prediction based on a single class, which can be seen as the probability of being a dog. Then you set a threshold for which you consider the model to be confident enough that it's actually a dog. To make it balanced, the threshold is 0.5, so everything above that is a dog and everything below it is not. This can easily be achieved with torch.round:
# Size: [batch_size, 1]
out = model(data)
predictions = torch.round(out)
# Get rid of the singular dimension
# To get size: [batch_size]
predictions = predictions.squeeze(1)
Besides that, the learning rate of 10 is astronomically high, and a learning rate greater than 1 makes it impossible to converge. A more appropriate learning rate is around 0.01 or 0.001.
And on side note, since you are new to PyTorch: Please don't use Variable, it was deprecated with PyTorch 0.4.0, which was released over 2 years ago, and all of its functionality has been merged into the tensors.

I run this code and I get the following error. How do I fix this?

This is a code to predict stock price movements using TensorFlow and the ReLu activation function. I run the following code:
import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import pandas_datareader as web
dataset = web.DataReader('AAPL', data_source = 'yahoo', start = '1989-01-01', end = '2019-12-25')
import math
close_price = dataset.filter(['Close']).values
data_train_len = math.ceil(len(close_price) * .8)
sc = MinMaxScaler(feature_range = (0, 1))
sc_data = sc.fit_transform(close_price)
data_train = sc_data[0 : data_train_len, : ]
xtrain = []
ytrain = []
for i in range(60, len(data_train)):
xtrain.append(data_train[i - 60 : i, 0])
ytrain.append(data_train[i, 0])
xtrain, ytrain = np.array(xtrain), np.array(ytrain)
xtrain = np.reshape(xtrain, (xtrain.shape[0], xtrain.shape[1], 1))
print(xtrain.shape, ytrain.shape)
data_test = sc_data[data_train_len - 60 : , :]
xtest = []
ytest = close_price[data_train_len :, :]
for i in range(60, len(data_test)):
xtest.append(data_test[i - 60 : i, 0])
xtest = np.array(xtest)
xtest = np.reshape(xtest, (xtest.shape[0], xtest.shape[1], 1))
print(xtest.shape, ytest.shape)
# Number of stock in training data
n_stocks = xtrain.shape[1]
#Model architecture parameters
n_neurons_1 = 1024
n_neurons_2 = 512
n_neurons_3 = 256
n_neurons_4 = 128
# Session
sesh = tf.InteractiveSession()
# Define two variables as placeholders
a = tf.placeholder(dtype = tf.float32, shape = [None, n_stocks])
b = tf.placeholder(dtype = tf.float32, shape = [1, None])
# Initializers
sig = 1
weight_init = tf.variance_scaling_initializer(mode = "fan_avg", distribution = "uniform", scale =
sig)
bias_init = tf.zeros_initializer()
# Hidden weights
w_hid_1 = tf.Variable(weight_init([n_stocks, n_neurons_1]))
bias_hid_1 = tf.Variable(bias_init([n_neurons_1]))
w_hid_2 = tf.Variable(weight_init([n_neurons_1, n_neurons_2]))
bias_hid_2 = tf.Variable(bias_init([n_neurons_2]))
w_hid_3 = tf.Variable(weight_init([n_neurons_2, n_neurons_3]))
bias_hid_3 = tf.Variable(bias_init([n_neurons_3]))
w_hid_4 = tf.Variable(weight_init([n_neurons_3, n_neurons_4]))
bias_hid_4 = tf.Variable(bias_init([n_neurons_4]))
# Output weights
w_out = tf.Variable(weight_init([n_neurons_4, 1]))
bias_out = tf.Variable(bias_init([1]))
# Hidden layers
hid_1 = tf.nn.relu(tf.add(tf.matmul(a, w_hid_1), bias_hid_1))
hid_2 = tf.nn.relu(tf.add(tf.matmul(hid_1, w_hid_2), bias_hid_2))
hid_3 = tf.nn.relu(tf.add(tf.matmul(hid_2, w_hid_3), bias_hid_3))
hid_4 = tf.nn.relu(tf.add(tf.matmul(hid_3, w_hid_4), bias_hid_4))
# Transposed Output layer
out = tf.transpose(tf.add(tf.matmul(hid_4, w_out), bias_out))
# Cost function
mse = tf.reduce_mean(tf.squared_difference(out, b))
rmse = tf.sqrt(tf.reduce_mean(tf.squared_difference(out, b)))
opt1 = tf.train.AdamOptimizer().minimize(mse)
opt2 = tf.train.AdamOptimizer().minimize(rmse)
sesh.run(tf.global_variables_initializer())
# Setup plot
plt.ion()
fig = plt.figure()
ax1 = fig.add_subplot(111)
line1, = ax1.plot(ytest)
line2, = ax1.plot(ytest * 0.5)
plt.show()
# Fitting neural network
batch_size = 256
mse_train = []
rmse_train = []
mse_test = []
rmse_test = []
# Run tensorflow
epochs = 10
for epoch in range(epochs):
# Training data is shuffled
shuffle_ind = np.random.permutation(np.arange(len(ytrain)))
xtrain = xtrain[shuffle_ind]
ytrain = ytrain[shuffle_ind]
# Minibatch training
for i in range(0, len(ytrain) // batch_size):
start = i * batch_size
batch_x = xtrain[start : start + batch_size]
batch_y = ytrain[start : start + batch_size]
# Run optimizer with batch
sesh.run(opt1, feed_dict = {a : batch_x, b : batch_y})
sesh.run(opt2, feed_dict = {a : batch_x, b : batch_y})
I get the following error:
ValueError: Cannot feed value of shape (256, 60, 1) for Tensor 'Placeholder_30:0', which has shape '(?, 60)'
This error appears for both of the last two lines under 'Run Optimizer with Batch'. How do I fix this?
It seems like you trying to feed data that doesn't fit with place holder (I think you placeholder a), simple way to change your place holder to a = tf.placeholder(dtype = tf.float32, shape = [None, n_stocks, 1]) or change your xtest and xtrain dimension (the line that you use reshape) by reduce last dimension using np.squeeze().

Tensorflow Logits and Labels must be broadcastable

I am very green working with Tensorflow, and can not seem to get past this error. I have been trouble shooting this error for two days now and I can't get it to work. Can anyone see an issue with the code? I am using python3 via Jupyter Notebook. Thanks for the assistance.
Here is my code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("official/MNIST_data/", one_hot=True)
Extracting official/MNIST_data/train-images-idx3-ubyte.gz
Extracting official/MNIST_data/train-labels-idx1-ubyte.gz
Extracting official/MNIST_data/t10k-images-idx3-ubyte.gz
Extracting official/MNIST_data/t10k-labels-idx1-ubyte.gz
type(mnist)
tensorflow.contrib.learn.python.learn.datasets.base.Datasets
mnist.train.num_examples
55000
mnist.test.num_examples
10000
Preparation for building CNN model: define supporting Functions
Initialize weights in Filter
def initialize_weights (filter_shape):
init_random_dist = tf.truncated_normal(filter_shape, stddev=.1)
return (tf.Variable(init_random_dist))
def initialize_bias(bias_shape):
initial_bias_vals = tf.constant(.1, shape=bias_shape)
return(tf.Variable(initial_bias_vals))
def create_convolution_layer_and_compute_dot_product(inputs, filter_shape):
filter_initialized_with_weights = initialize_weights(filter_shape)
conv_layer_outputs = tf.nn.conv2d(inputs, filter_initialized_with_weights, strides = [1,1,1,1], padding = 'SAME')
return(conv_layer_outputs)
def create_relu_layer_and_compute_dotproduct_plus_b(inputs, filter_shape):
b = initialize_bias([filter_shape[3]])
relu_layer_outputs = tf.nn.relu(inputs + b)
return (relu_layer_outputs)
def create_maxpool2by2_and_reduce_spatial_size(inputs):
pooling_layer_outputs = tf.nn.max_pool(inputs, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
return(pooling_layer_outputs)
def create_fully_conected_layer_and_compute_dotproduct_plus_bias(inputs, output_size):
input_size = int(inputs.get_shape()[1])
W = initialize_weights([input_size, output_size])
b = initialize_bias([output_size])
fc_xW_plus_bias_outputs = tf.matmul(inputs, W) + b
return(fc_xW_plus_bias_outputs)
Build the Convolutional Neural Network
x = tf.placeholder(tf.float32, shape = [None, 784])
y_true = tf.placeholder(tf.float32, [None, 10])
x_image = tf.reshape(x, [-1,28,28,1])
conv_layer_1_outputs \
= create_convolution_layer_and_compute_dot_product(x_image, filter_shape=[5,5,1,32])
conv_relu_layer_1_outputs \
= create_relu_layer_and_compute_dotproduct_plus_b(conv_layer_1_outputs, filter_shape=[5,5,1,32])
pooling_layer_1_ouptuts = create_maxpool2by2_and_reduce_spatial_size(conv_relu_layer_1_outputs)
conv_layer_2_outputs \
= create_convolution_layer_and_compute_dot_product(conv_layer_1_outputs, filter_shape=[5,5,32,64])
conv_relu_layer_2_outputs \
= create_relu_layer_and_compute_dotproduct_plus_b(conv_layer_2_outputs, filter_shape=[5,5,32,64])
pooling_layer_2_outputs = create_maxpool2by2_and_reduce_spatial_size(conv_relu_layer_2_outputs)
pooling_layer_2_outputs_flat=tf.reshape(pooling_layer_2_outputs, [-1,7*7*64])
fc_layer_1_outputs \
= create_fully_conected_layer_and_compute_dotproduct_plus_bias(pooling_layer_2_outputs_flat, output_size=1024)
fc_relu_layer_1_outputs = tf.nn.relu(fc_layer_1_outputs)
hold_prob = tf.placeholder(tf.float32)
fc_dropout_outputs = tf.nn.dropout(fc_layer_1_outputs, keep_prob=hold_prob)
y_pred = create_fully_conected_layer_and_compute_dotproduct_plus_bias(fc_dropout_outputs, output_size=10)
softmax_cross_entropy_loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_true, logits=y_pred)
cross_entropy_mean = tf.reduce_mean(softmax_cross_entropy_loss)
optimizer = tf.train.AdamOptimizer(learning_rate=.001)
cnn_trainer = optimizer.minimize(cross_entropy_mean)
vars_initializer = tf.global_variables_initializer()
steps = 5000
Run tf.session to train and test deep learning CNN model
with tf.Session() as sess:
sess.run(vars_initializer)
for i in range(steps):
batch_x, batch_y = mnist.train.next_batch(50)
sess.run(cnn_trainer, feed_dict={x: batch_x, y_true: batch_y, hold_prob: .5})
if i % 100 == 0:
print('ON STEP: {}', format(i))
print('ACCURACY: ')
matches = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y_true, 1))
acc = tf.reduce_mean(tf.cast(matches, tf.float32))
test_accuracy = sess.run(acc, feed_dict = {x: mnist.test.images, y_true: mnist.test.labels, hold_prob: 1.0})
print(test_accuracy)
print('\n')
Here is the exact error message:
InvalidArgumentError: logits and labels must be broadcastable: logits_size=[200,10] labels_size=[50,10]
[[node softmax_cross_entropy_with_logits_7 (defined at <ipython-input-162-3d06fe78186c>:1) = SoftmaxCrossEntropyWithLogits[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"](add_31, softmax_cross_entropy_with_logits_7/Reshape_1)]]
Posting this in case someone else is having similar issues.
The error should read "Dumb User" lol. I passed the wrong variable into the second layer.
pooling_layer_1_ouptuts = create_maxpool2by2_and_reduce_spatial_size(conv_relu_layer_1_outputs)
conv_layer_2_outputs \
= create_convolution_layer_and_compute_dot_product(conv_layer_1_outputs, filter_shape=[5,5,32,64])
should be:
pooling_layer_1_ouptuts = create_maxpool2by2_and_reduce_spatial_size(conv_relu_layer_1_outputs)
conv_layer_2_outputs \
= create_convolution_layer_and_compute_dot_product(pooling_layer_1_ouptuts , filter_shape=[5,5,32,64])

Not found: Key Variable_<x> not found in checkpoint

I am trying to save a trained model and use it later in another instance (function). But, somehow this throws me the variable not found error. After reagin through SO and other forums, I understand the problem is the way I store it.
dictionary, reverse_dictionary = build_dataset(training_data)
vocab_size = len(dictionary)
n_input = 3
n_hidden = 512
# RNN output node weights and biases
weights = {'out': tf.Variable(tf.random_normal([n_hidden, vocab_size]))}
biases = {'out': tf.Variable(tf.random_normal([vocab_size]))}
# tf Graph input
x = tf.placeholder("float", [None, n_input, 1])
y = tf.placeholder("float", [None, vocab_size])
# RNN implementation in Tensorflow
def RNN(x,weights,biases):
x = tf.reshape(x, [-1, n_input])
x = tf.split(x, n_input, 1)
rnn_cell = rnn.BasicLSTMCell(n_hidden)
outputs, states = rnn.static_rnn(rnn_cell, x, dtype=tf.float32)
return tf.matmul(outputs[-1], weights['out']) + biases['out']
pred = RNN(x, weights, biases)
learning_rate = 0.001
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(cost)
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
training_iters = 1000
display_step = 500
saver = tf.train.Saver()
# Launch the graph
with tf.Session() as session:
session.run(init)
step = 0
offset = random.randint(0, n_input+1)
end_offset = n_input + 1
acc_total = 0
loss_total = 0
while step < training_iters:
if offset > (len(training_data)-end_offset):
offset = random.randint(0, n_input+1)
symbols_in_keys = [ [dictionary[ str(training_data[i])]] for i in range(offset, offset+n_input) ]
symbols_in_keys = np.reshape(np.array(symbols_in_keys), [-1, n_input, 1])
symbols_out_onehot = np.zeros([vocab_size], dtype=float)
symbols_out_onehot[dictionary[str(training_data[offset+n_input])]] = 1.0
symbols_out_onehot = np.reshape(symbols_out_onehot, [1, -1])
_, acc, loss, onehot_pred = session.run([optimizer, accuracy, cost, pred], \
feed_dict={x: symbols_in_keys, y: symbols_out_onehot})
loss_total += loss
acc_total += acc
if (step+1) % display_step == 0:
print("Iter= " + str(step+1) + ", Average Loss= " + \
"{:.6f}".format(loss_total/display_step) + ", Average Accuracy= " + \
"{:.2f}%".format(100*acc_total/display_step))
acc_total = 0
loss_total = 0
symbols_in = [training_data[i] for i in range(offset, offset + n_input)]
symbols_out = training_data[offset + n_input]
symbols_out_pred = reverse_dictionary[int(tf.argmax(onehot_pred, 1).eval())]
print("%s - [%s] vs [%s]" % (symbols_in,symbols_out,symbols_out_pred))
step += 1
offset += (n_input+1)
saver.save(session, 'userLocation/Model')
While the model files are generated, but when I try to restore the model using
saver = tf.train.Saver()
with tf.Session() as restored_session:
saver.restore(restored_session, 'userLocation/Model')
Error
tensorflow.python.framework.errors_impl.NotFoundError: Key Variable_3 not found in checkpoint
[[Node: save_1/RestoreV2_7 = RestoreV2[dtypes=[DT_FLOAT], _device="/job:localhost/replica:0/task:0/cpu:0"](_recv_save_1/Const_0, save_1/RestoreV2_7/tensor_names, save_1/RestoreV2_7/shape_and_slices)]]
Any pointers as to what am i missing while saving.
I will explain this in 2 different part -
When you save the model in tensorflow, it will save graph in one file(usually the extention is .meta) and variable tensors in other file(usually index file).
Now, while importing you have to do the same 2 step process - a) import the graph first b) then create a session and import variables.
Here is a sample code -
import tensorflow as tf
import numpy as np
tf.set_random_seed(10)
#define graph location in variable
meta_file = 'userLocation/Model.meta'
#importing the graph
ns = tf.train.import_meta_graph(meta_file , clear_devices=True)
#create a session
with tf.Session().as_default() as sess:
#import variables
ns.restore(sess, meta_file[0:len(meta_file)-5])
# for example, if you have 'x' tenbsor in graph
x=tf.get_default_graph().get_tensor_by_name("x:0")
.
.
.
#Further processing/prediction etc

Tensor flow, making predictions using a trained network

So I am training a network to classify images in tensor flow. After I trained the network I began work on trying to use it to classify other images. The goal is to import an image, feed it to the classifier and have it print the result. I am having some trouble getting that part off the ground though. Here is what I have so far. I found that having tf.argmax(y,1) gave an error. I found that changing it to 0 fixed that error. However I am not convinced that it is actually working. I tossed 2 images through the classifier and they both got the same class even though they are vastly different. Just need some perspective here. Is this valid? Or is there something wrong here that will always feed me the same class (in this case I got class 0 for both of the images I tried).
Is this even the right way to approach making predictions in tensor flow? This is just the culmination of my debugging, not sure if it is what should be done or not.
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
X_train,X_validation,y_train,y_validation=train_test_split(X_train,y_train, test_size=20,random_state=0)
X_train, y_train = shuffle(X_train, y_train)
def LeNet(x):
# Arguments used for tf.truncated_normal, randomly defines variables
for the weights and biases for each layer
mu = 0
sigma = 0.1
# SOLUTION: Layer 1: Convolutional. Input = 32x32x3. Output = 28x28x6.
conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 3, 6), mean = mu, stddev = sigma))
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
# SOLUTION: Activation.
conv1 = tf.nn.relu(conv1)
# SOLUTION: Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Layer 2: Convolutional. Output = 10x10x16.
conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
# SOLUTION: Activation.
conv2 = tf.nn.relu(conv2)
# SOLUTION: Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Flatten. Input = 5x5x16. Output = 400.
fc0 = flatten(conv2)
# SOLUTION: Layer 3: Fully Connected. Input = 400. Output = 120.
fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
# SOLUTION: Activation.
fc1 = tf.nn.relu(fc1)
# SOLUTION: Layer 4: Fully Connected. Input = 120. Output = 84.
fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# SOLUTION: Activation.
fc2 = tf.nn.relu(fc2)
# SOLUTION: Layer 5: Fully Connected. Input = 84. Output = 43.
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(43))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
import tensorflow as tf
x = tf.placeholder(tf.float32, (None, 32, 32, 3))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, 43)
EPOCHS=10
BATCH_SIZE=128
rate = 0.001
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, one_hot_y)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})
validation_accuracy = evaluate(X_validation, y_validation)
print("EPOCH {} ...".format(i+1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './lenet')
print("Model saved")
import cv2
image=cv2.imread('File path')
image=cv2.resize(image,(32,32)) #classifier takes 32X32 images
image=np.array(image)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver3 = tf.train.import_meta_graph('./lenet.meta')
saver3.restore(sess, "./lenet")
pred = tf.nn.softmax(logits)
predictions = sess.run(tf.argmax(y,0), feed_dict={x: image})
print (predictions)
So what had to happen here was first clear the kernel and outputs. Somewhere along the way my placeholders got muddled up and clearing the kernel fixed that right up. Then I had to realize what really had to get done here: I had to call up the softmax function on my new data.
Like this:
pred = tf.nn.softmax(logits)
classification = sess.run(pred, feed_dict={x: image_array})

Resources