AdamOptimizer returns invalid datatype error - python-3.x

I have been struggling to fix this for the past couple of days, any help would be appreciated.
I have a network that outputs numeric locations x,y coordinates in an image, hence the int32 datatype. The output is of size (2,2). Ground truth also has the locations as int32.
Then I run, following two lines of code:
cost = tf.reduce_mean(tf.square(y_true - y_pred_cls))
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)
and receive following error:
ValueError: Invalid type tf.int32 for Mean_7:0, expected: [tf.float32, tf.float64, tf.float16, tf.bfloat16].
I understand that average should be a float value, but if I convert predicted value to float, I start getting 'No gradients provided' error.
I have also tried to use softmax_cross_entropy_with_logits_v2 of Tensorflow to turn my problem into a classification problem, but same error. I'd really appreciate any help with the solution.
Code
Code for Input and Labels
x_p = tf.placeholder(tf.float32, shape=[None, img_size_flat*num_channels], name='x_pred')
x_pred_image = tf.reshape(x_p, [-1, 10, 10, num_channels])
k = tf.constant(npatches)
y_true = tf.placeholder(tf.int32, shape=[None, 2, 2], name='y_true')
y_true_cls = tf.placeholder(tf.int32, shape=[None, 2, 2], name='y_true_cls')
Convolution Layer
def new_weights(shape, layer_name):
initializer = tf.contrib.layers.xavier_initializer()
return tf.Variable(initializer(shape), name=layer_name+'_W')
def new_bias(length, layer_name):
return tf.Variable(tf.constant(0.05, shape=[length]), name=layer_name+'_b')
def new_conv_layer(input,
num_input_channels,
filter_size,
num_filters,
name_scope,
layer_name='',
use_pooling=True):
with tf.name_scope(name_scope):
shape = [filter_size, filter_size, num_input_channels, num_filters]
weights = new_weights(shape, layer_name)
biases = new_bias(num_filters, layer_name)
layer = tf.add(tf.nn.conv2d(input=input, filter=weights, strides=[1,1,1,1], padding='SAME'), biases, name=layer_name)
if use_pooling:
layer = tf.nn.max_pool(value=layer,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME', name=layer_name+'_max')
layer = tf.nn.relu(layer, name=layer_name+'_activation')
return layer, weights
Fully Connected Layer
def new_fc_layer(input,
num_inputs,
num_outputs,
name_scope,
layer_name='',
use_relu=True):
with tf.name_scope(name_scope):
weights = new_weights([num_inputs, num_outputs], layer_name)
biases = new_bias(num_outputs, layer_name)
layer = tf.add(tf.matmul(input, weights),biases,name=layer_name)
# layer = tf.matmul(input, weights) + biases
if use_relu:
layer = tf.nn.relu(layer, layer_name+'_activation')
return layer
Custom Layer Code
with tf.name_scope(name_scope):
resh_inp = tf.reshape(input, [-1, 10, 10])
input_shape = tf.shape(resh_inp)
rows, cols = input_shape[1], input_shape[2]
d_rows, d_cols = 2, 2
subm_rows, subm_cols = rows - d_rows + 1, cols - d_cols + 1
ii, jj = tf.meshgrid(tf.range(subm_rows), tf.range(subm_cols), indexing='ij')
d_ii, d_jj = tf.meshgrid(tf.range(d_rows), tf.range(d_cols), indexing='ij')
subm_ii = ii[:, :, tf.newaxis, tf.newaxis] + d_ii
subm_jj = jj[:, :, tf.newaxis, tf.newaxis] + d_jj
subm = tf.gather_nd(resh_inp[0, :, :], tf.stack([subm_ii, subm_jj], axis=-1), name=layer_name + "_gather")
subm_sum = tf.reduce_sum(subm, axis=(2, 3), name=layer_name + "_subm_sum")
_, top_idx = tf.nn.top_k(tf.reshape(subm_sum, [-1]), tf.minimum(k, tf.size(subm_sum)), name=layer_name + "_top_idx")
top_row = top_idx // subm_cols
top_col = top_idx % subm_cols
result = tf.stack([top_row, top_col], axis=-1, name=layer_name + "_result")
result_shape = tf.shape(result)
result = tf.reshape(result, [-1, result_shape[0], result_shape[1]], name=layer_name + "_shaped_result")
Result from above code is passed as y_pred_cls in below code
cost = tf.reduce_mean(tf.square(y_true - y_pred_cls))
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)

Related

type error while calculating vgg loss for srgan

I am trying to train a SRGAN model and am getting the following error while trying to implement the VGG loss on the generated output:
TypeError: Inputs to a layer should be tensors. Got: <keras.engine.functional.Functional object at 0x000001F98244C3D0>
I tried converting the output of the generator to tensors but it did not work.
Here is the code for generator and vgg loss:
#generator
num_res_block=16
def generator(gen_ip, num_res_block):
Convolution2D(filters = 64, kernel_size = 9, strides = 1, padding = "same")(gen_ip)
model = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=[1,2])(model)
gen_model = model
# Using 16 Residual Blocks
for index in range(16):
model = res_block(model)
model = Convolution2D(filters = 64, kernel_size = 3, strides = 1, padding = "same")(model)
model = BatchNormalization(momentum = 0.5)(model)
model = add([gen_model, model])
# Using 2 UpSampling Blocks
for index in range(2):
model = up_sampling_block(model, 3, 256, 1)
model = Convolution2D(filters = 3, kernel_size = 9, strides = 1, padding = "same")(model)
#model = Activation('tanh')(model)
generator_model = Model(inputs = gen_ip, outputs = model)
return generator_model
#vgg
def build_vgg(hr_shape):
vgg=VGG19(weights='imagenet',include_top=False, input_shape=hr_shape)
return Model(inputs=vgg.inputs, outputs=vgg.layers[10].output)
vgg = build_vgg((128,128,3))
print(vgg.summary())
vgg.trainable = False
#this line is where im getting the error
vgg(generator(lr_ip,16))

Tensorflow : Restored variables have unexpected shape

When i saved my tensorflow model and restored it, it has wrong shaped variables and I can't figure out why.
here is my code:
import os
import tensorflow as tf
X = tf.placeholder(tf.float32, shape=[None, 2], name="X")
Y = tf.placeholder(tf.float32, shape=[None, 1], name="Y")
W = tf.Variable(tf.random_normal([2, 1]), name="weight")
b = tf.Variable(tf.random_normal([1]), name="bias")
hypo = tf.sigmoid(tf.matmul(X, W) +b)
cost = -tf.reduce_mean(Y*(tf.log*(hypo)) + (1-Y)*(tf.log(1-hypo)))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-3)
train = optimizer.minimize(cost)
#### Saving model
SAVER_DIR = "model"
saver = tf.train.Saver()
checkpoint_path = os.path.join(SAVER_DIR, "model")
ckpt = tf.train.get_checkpoint_state(SAVER_DIR)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in range(2001):
cost_val, hy_val, _ = sess.run([cost, hypo, train], feed_dict={X:x_dat, Y=y_dat})
saver.save(sess, checkpoint_path, global_step=step)
The input data, "x_dat" is consisted of two columns, and "y_dat" is a single column.
I made [?, 2] shaped placeholder "X", and [2, 1] Variable "W".
After that, I restored model and checked the shape with this code:
#### Restoring model
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.import_meta_graph('./model/model-2000.meta')
saver.restore(sess,'./model/model-2000')
op = sess.graph.get_operations()
for m in op :
print(m.values())
Result:
(<tf.Tensor 'X:0' shape=(?, 1) dtpye=float32>,)
(<tf.Tensor 'Y:0' shape=(?, 1) dtpye=float32>,)
...
(<tf.Tensor 'weight:0' shape=(1, 1) dtpye=float32_ref>,)
...
(<tf.Tensor 'bias:0' shape=(1,) dtpye=float32_ref>,)
Why the X and weight tensor are shaped differently compared to the model I saved?
And how can I use the input data as two columns about this restored model?

Is it possible to build a custom loss function of chamfer distance using Keras

In my assignment about the point cloud,I need to use a keras custom loss function of chamfer distance and apply it to the autoencoder.But I find it is hard to implement the function.
I have tried to write in this way.I hope someone could help me.
def chamfer_loss_func(y_true,y_pred):
'''
Calculate the chamfer distance,use euclidean metric
:param y_true:
:param y_pred:
:return:
'''
batch_size = 32
y_true = K.reshape(y_true,[batch_size,2048,3])
y_pred = K.reshape(y_pred, [batch_size, 2048, 3])
num_t = K.int_shape(y_true)[1]
num_p = K.int_shape(y_pred)[1]
dists_mat = K.zeros(shape=[num_t, num_p])
sum = 0.0
for bi in range(batch_size):
for i in range(num_t):
pnt_t = y_true[bi][i]
for j in range(num_p):
if (i <= j):
pnt_p = y_pred[bi][j]
dists_mat[i][j] = K.eval(K.sum(K.sqrt(tf.subtract(pnt_t, pnt_p))))
else:
dists_mat[i][j] = dists_mat[j][i]
dist_t_to_p = K.mean(K.min(dists_mat, axis=0))
dist_p_to_t = K.mean(K.min(dists_mat, axis=1))
sum += K.max([dist_p_to_t, dist_t_to_p])
return sum / batch_size
And the network structure is listed here:
def get_model(num_pnts):
input_tensor = Input(shape=[num_pnts, 3])
input_cov_tensor = Input(shape=[num_pnts, 9]) # add the local covariance matrix
# the encoder network
concat_1 = concatenate([input_tensor, input_cov_tensor]) # concatenate the two input tensor
dense_1 = Dense(32,activation='relu')(concat_1)
dense_2 = Dense(64,activation='relu')(dense_1)
dense_3 = Dense(128,activation='relu')(dense_2)
encoded = MaxPooling1D(pool_size=num_pnts)(dense_3)
# the decoder network
# use 3 fully connected layers
dense_3 = Dense(128,activation='relu')(encoded)
dense_4 = Dense(128,activation='relu')(dense_3)
dense_5 = Dense(num_pnts*3,activation='linear')(dense_4)
decoded = Reshape(target_shape=[num_pnts,3])(dense_5)
#the autoencoder
autoencoder = Model(inputs=[input_tensor,input_cov_tensor],outputs=decoded)
return autoencoder
And the place where uses the loss function:
model = get_model(2048)
model.compile(optimizer='adam',loss=chamfer_loss_func,)
Can you give me the link of chamfer distance you reference to?
K.sum(K.sqrt(tf.subtract(pnt_t, pnt_p))) looks strange to me. To calculate Euclidean distance, the sqrt should be replaced with square.
And it is not recommended to use for loop in tensorflow, so I have reimplemented it:
import numpy as np
import keras.backend as K
import tensorflow as tf
from keras.layers import Input, Dense, MaxPooling1D, Reshape, concatenate
from keras.models import Model
def dists_mat_calculater(pnts_t, pnts_p):
"""
arguments:
pnts_t : from y_true[bi], shape: (num_t, 3)
pnts_p : from y_pred[bi], shape: (num_p, 3)
return:
dists_mat: shape: (num_t, num_p)
"""
num_t = K.int_shape(pnts_t)[0]
num_p = K.int_shape(pnts_p)[0]
pnts_t = tf.reshape(tf.tile(tf.expand_dims(pnts_t, 1), [1, 1, num_p]), [-1, 3])
pnts_p = tf.tile(pnts_p, [num_t, 1])
dists_mat = K.sum(K.square(tf.subtract(pnts_t, pnts_p)), axis=1)
dists_mat = tf.reshape(dists_mat, [num_t, num_p])
dists_mat_upper = tf.matrix_band_part(dists_mat, 0, -1)
dists_mat_symm = dists_mat_upper + tf.transpose(dists_mat_upper)
dists_mat_symm = tf.matrix_set_diag(dists_mat_symm, tf.diag_part(dists_mat))
return dists_mat_symm
def dist_calculator(pnts):
"""
arguments:
pnts_t : from y_true[bi], shape: (num_t, 3)
pnts_p : from y_pred[bi], shape: (num_p, 3)
return:
dist: shape: (1, )
"""
pnts_t, pnts_p = pnts
dists_mat = dists_mat_calculater(pnts_t, pnts_p)
dist_t_to_p = K.mean(K.min(dists_mat, axis=0)) #shape: (1,)
dist_p_to_t = K.mean(K.min(dists_mat, axis=1)) #shape: (1,)
dist = K.max([dist_p_to_t, dist_t_to_p]) #shape: (1,)
return dist
def chamfer_loss_func_tf(y_true,y_pred):
'''
Calculate the chamfer distance,use euclidean metric
:param y_true:
:param y_pred:
:return:
'''
y_true = K.reshape(y_true,[-1, num_pnts, 3])
y_pred = K.reshape(y_pred, [-1, num_pnts, 3])
return K.mean(tf.map_fn(dist_calculator, elems=(y_true, y_pred), dtype=tf.float32))
dists_mat_calculater calculate the distance matrix, and the part of pairwise calculation is enlightened by Mask R-CNN - overlaps_graph.
I also implement a pure python version for validation purpose:
def chamfer_loss_python(y_true,y_pred):
'''
Calculate the chamfer distance,use euclidean metric
:param y_true:
:param y_pred:
:return:
'''
y_true = np.reshape(y_true,[-1,num_pnts,3])
y_pred = np.reshape(y_pred, [-1,num_pnts, 3])
batch_size = y_true.shape[0]
num_t = y_true.shape[1]
num_p = y_pred.shape[1]
dists_mat = np.zeros((num_t, num_p))
_sum = 0.0
loss_before_mean_py = []
for bi in range(batch_size):
for i in range(num_t):
pnt_t = y_true[bi][i]
for j in range(num_p):
pnt_p = y_pred[bi][j]
if (i <= j):
pnt_p = y_pred[bi][j]
dists_mat[i][j] = np.sum((pnt_t - pnt_p)**2)
else:
dists_mat[i][j] = dists_mat[j][i]
dist_t_to_p = np.mean(np.min(dists_mat, axis=0))
dist_p_to_t = np.mean(np.min(dists_mat, axis=1))
_sum += np.max([dist_p_to_t, dist_t_to_p])
loss_before_mean_py.append(np.max([dist_p_to_t, dist_t_to_p]))
return _sum / batch_size
Following is the testing script:
num_pnts = 8
np.random.seed(1)
Y_true = np.random.randn(32, num_pnts, 3).astype(np.float32)
Y_pred = np.random.randn(32, num_pnts, 3).astype(np.float32)
Y_true_ph = tf.placeholder(tf.float32, shape=(None, num_pnts, 3), name="Y_true_ph")
Y_pred_ph = tf.placeholder(tf.float32, shape=(None, num_pnts, 3), name="Y_pred_ph")
loss = chamfer_loss_func_tf(Y_true_ph, Y_pred_ph)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
loss = sess.run(loss, feed_dict={
Y_true_ph: Y_true,
Y_pred_ph: Y_pred})
loss_py = chamfer_loss_python(Y_true,Y_pred)
print(loss)
print(loss_py)

Rank mismatch error in Tensorflow

I'm working on creating an image classifier that can differentiate between cats and dogs. I have the follwing code:
import cv2
import os
from tqdm import tqdm
import numpy as np
import tensorflow as tf
img_height = 128
img_width = 128
path = "./train"
# class info
file = os.listdir(path)
index = []
images = []
# image size and channels
channels = 3
n_inputs = img_width * img_height * channels
# First convolutional layer
conv1_fmaps = 96 # Number of feature maps created by this layer
conv1_ksize = 4 # kernel size 3x3
conv1_stride = 2
conv1_pad = "SAME"
# Second convolutional layer
conv2_fmaps = 192
conv2_ksize = 4
conv2_stride = 4
conv2_pad = "SAME"
# Third layer is a pooling layer
pool3_fmaps = conv2_fmaps # Isn't it obvious?
n_fc1 = 192 # Total number of output features
n_outputs = 2
with tf.name_scope("inputs"):
X = tf.placeholder(tf.float32, shape=[None, img_width, img_height, channels], name="X")
X_reshaped = tf.reshape(X, shape=[-1, img_height, img_width, channels])
y = tf.placeholder(tf.int32, shape=[None, 2], name="y")
conv1 = tf.layers.conv2d(X_reshaped, filters=conv1_fmaps, kernel_size=conv1_ksize, strides=conv1_stride, padding=conv1_pad, activation=tf.nn.relu, name="conv1")
conv2 = tf.layers.conv2d(conv1, filters=conv2_fmaps, kernel_size=conv2_ksize, strides=conv2_stride, padding=conv2_pad, activation=tf.nn.relu, name="conv2")
n_epochs = 10
batch_size = 250
with tf.name_scope("pool3"):
pool3 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID")
pool3_flat = tf.reshape(pool3, shape=[-1, pool3_fmaps * 8 * 8])
with tf.name_scope("fc1"):
fc1 = tf.layers.dense(pool3_flat, n_fc1, activation=tf.nn.relu name="fc1")
with tf.name_scope("output"):
logits = tf.layers.dense(fc1, n_outputs, name="output")
Y_proba = tf.nn.softmax(logits, name="Y_proba")
with tf.name_scope("train"):
xentropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y)
loss = tf.reduce_mean(xentropy)
optimizer = tf.train.AdamOptimizer()
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
with tf.name_scope("init_and_save"):
saver = tf.train.Saver()
def next_batch(num):
index = []
images = []
# Data set Creation
print("Creating batch dataset "+str(num+1)+"...")
for f in tqdm(range(num * batch_size, (num+1)*batch_size)):
if file[f].find("dog"):
index.append(np.array([0, 1]))
else:
index.append(np.array([1, 0]))
image = cv2.imread(path + "/" + file[f])
image = cv2.resize(image, (img_width, img_height), 0, 0, cv2.INTER_LINEAR)
# image = image.astype(np.float32)
images.append(image)
images = np.array(images, dtype=np.uint8)
images = images.astype('float32')
images = images / 255
print("\nBatch "+str(num+1)+" creation finished.")
# print([images, index])
return [images, index]
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(25000 // batch_size):
X_batch, y_batch = next_batch(iteration)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
print(epoch, "Train accuracy:", acc_train)
save_path = saver.save(sess, "./dogvscat_mnist_model.ckpt")
But I'm getting this error:
ValueError: Rank mismatch: Rank of labels (received 2) should equal rank of logits minus 1 (received 2).
Can anyone point out the problem and help me to solve it. I'm totally new to this.
For tf.nn.sparse_softmax_corss_entropy_with_logits rank(labels) = rank(logits) - 1, so you need to redefine the labels placeholder as follows
...
y = tf.placeholder(tf.int32, shape=[None], name="y")
...
xentropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,labels=y)
...
X_batch, y_batch = next_batch(iteration)
y_batch = np.argmax(y_batch, axis=1)
OR you can you just use tf.nn.softmax_cross_entropy_with_logits without changing labels placeholder.
xentropy=tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=y)

Tensor flow, making predictions using a trained network

So I am training a network to classify images in tensor flow. After I trained the network I began work on trying to use it to classify other images. The goal is to import an image, feed it to the classifier and have it print the result. I am having some trouble getting that part off the ground though. Here is what I have so far. I found that having tf.argmax(y,1) gave an error. I found that changing it to 0 fixed that error. However I am not convinced that it is actually working. I tossed 2 images through the classifier and they both got the same class even though they are vastly different. Just need some perspective here. Is this valid? Or is there something wrong here that will always feed me the same class (in this case I got class 0 for both of the images I tried).
Is this even the right way to approach making predictions in tensor flow? This is just the culmination of my debugging, not sure if it is what should be done or not.
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
X_train,X_validation,y_train,y_validation=train_test_split(X_train,y_train, test_size=20,random_state=0)
X_train, y_train = shuffle(X_train, y_train)
def LeNet(x):
# Arguments used for tf.truncated_normal, randomly defines variables
for the weights and biases for each layer
mu = 0
sigma = 0.1
# SOLUTION: Layer 1: Convolutional. Input = 32x32x3. Output = 28x28x6.
conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 3, 6), mean = mu, stddev = sigma))
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
# SOLUTION: Activation.
conv1 = tf.nn.relu(conv1)
# SOLUTION: Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Layer 2: Convolutional. Output = 10x10x16.
conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
# SOLUTION: Activation.
conv2 = tf.nn.relu(conv2)
# SOLUTION: Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Flatten. Input = 5x5x16. Output = 400.
fc0 = flatten(conv2)
# SOLUTION: Layer 3: Fully Connected. Input = 400. Output = 120.
fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
# SOLUTION: Activation.
fc1 = tf.nn.relu(fc1)
# SOLUTION: Layer 4: Fully Connected. Input = 120. Output = 84.
fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# SOLUTION: Activation.
fc2 = tf.nn.relu(fc2)
# SOLUTION: Layer 5: Fully Connected. Input = 84. Output = 43.
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(43))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
import tensorflow as tf
x = tf.placeholder(tf.float32, (None, 32, 32, 3))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, 43)
EPOCHS=10
BATCH_SIZE=128
rate = 0.001
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, one_hot_y)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})
validation_accuracy = evaluate(X_validation, y_validation)
print("EPOCH {} ...".format(i+1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './lenet')
print("Model saved")
import cv2
image=cv2.imread('File path')
image=cv2.resize(image,(32,32)) #classifier takes 32X32 images
image=np.array(image)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver3 = tf.train.import_meta_graph('./lenet.meta')
saver3.restore(sess, "./lenet")
pred = tf.nn.softmax(logits)
predictions = sess.run(tf.argmax(y,0), feed_dict={x: image})
print (predictions)
So what had to happen here was first clear the kernel and outputs. Somewhere along the way my placeholders got muddled up and clearing the kernel fixed that right up. Then I had to realize what really had to get done here: I had to call up the softmax function on my new data.
Like this:
pred = tf.nn.softmax(logits)
classification = sess.run(pred, feed_dict={x: image_array})

Resources