Tensorflow CNN with custom images with RGB values - python-3.x

I have followed tutorials on creating a CNN with the MNIST dataset, and understands most of it. Then I tried to convert it into my own custom images with RGB values. But have trouble in certain parts of the code, as I do not fully understands what happens and how to proceed next. I know I have to change the channels to 3, but does not know if the rest of the helper functions are correct? I also do not understand when I have initialized everything how to train it. Because of the batch_x, batch_y = iterator.get_next()
I am not able to use the feed_dict, and do not how to train this? On the MNIST data it was possible to set the dropout, but how can i specify this now? And as far as a I understand, I do not train it on the real data know?
How can I also calculate the result in the same way as with the MNIST data, when I create and test the validation data?
The code looks like this:
import tensorflow as tf
import process_images as image_util
from tensorflow.contrib.data import Dataset, Iterator
# With MNIST
#from tensorflow.examples.tutorials.mnist import input_data
#mnist = input_data.read_data_sets("MNISt_data/", one_hot=True)
filenames_dummy, labels_dummy = image_util.run_it()
#The filenames_dummy and labels_dummy are two lists looking like this, respectively:
#["data/image_1.png", "data/image_2.png", ..., "data/image_n.png"]
# The values of the labels are 0-3, since I have 4 classes.
#[0, 1, ..., 3]
filenames = tf.constant(filenames_dummy)
labels = tf.constant(labels_dummy)
def _parse_function(filename, label):
image_string = tf.read_file(filename)
image_decoded = tf.image.decode_png(image_string, channels=3)
# The image size is 425x425.
image_resized = tf.image.resize_images(image_decoded, [425,425])
return image_resized, label
dataset = tf.contrib.data.Dataset.from_tensor_slices((filenames, labels))
dataset = dataset.map(_parse_function)
dataset = dataset.batch(30)
dataset = dataset.repeat()
iterator = dataset.make_one_shot_iterator()
# Helper functions
# INIT weights
def init_weights(shape):
init_random_dist = tf.truncated_normal(shape, stddev=0.1)
return(tf.Variable(init_random_dist))
# INIT Bias
def init_bias(shape):
init_bias_vals = tf.constant(0.1, shape=shape)
return tf.Variable(init_bias_vals)
# CONV2D
def conv2d(x, W):
# x --> input tensor [batch, H, W, Channels]
# W --> [filter H, filter W, Channels IN, Channels OUT]
return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')
# Pooling
def max_pooling_2by2(x):
# x --> [batch, h, w, c]
return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
#Convolutional layer
def convolutional_layer(input_x, shape):
W =init_weights(shape)
b = init_bias([shape[3]])
return tf.nn.relu(conv2d(input_x, W)+b)
# Normal (FULLY CONNTCTED)
def normal_full_layer(input_layer, size):
input_size = int(input_layer.get_shape()[1])
W = init_weights([input_size, size])
b = init_bias([size])
return tf.matmul(input_layer, W) + b
# PLACEHOLDERS
x = tf.placeholder(tf.float32, shape=[None, 180625])
y_true = tf.placeholder(tf.float32, shape=[None, 4])
# With MNIST
#x = tf.placeholder(tf.float32, shape=[None, 784])
#y_true = tf.placeholder(tf.float32, shape=[None, 10])
# Layers
x_image = tf.reshape(x, [-1, 425,425, 1])
# With MNIST
#x_image = tf.reshape(x, [-1, 28,28, 1])
convo_1 = convolutional_layer(x_image, shape=[5,5,1,32])
convo_1_pooling = max_pooling_2by2(convo_1)
convo_2 = convolutional_layer(convo_1_pooling, shape=[5,5,32, 64])
convo_2_pooling = max_pooling_2by2(convo_2)
convo_2_flat = tf.reshape(convo_2_pooling, [-1, 7*7*64])
full_layer_one = tf.nn.relu(normal_full_layer(convo_2_flat, 1024))
# Dropout
hold_prob = tf.placeholder(tf.float32)
full_one_dropout = tf.nn.dropout(full_layer_one, keep_prob=hold_prob)
y_pred = normal_full_layer(full_one_dropout, 4)
# With MNIST
#y_pred = normal_full_layer(full_one_dropout, 10)
# LOSS function
cross_entropy =
tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=y_pred))
# Optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train = optimizer.minimize(cross_entropy)
init = tf.global_variables_initializer()
steps = 5000
with tf.Session() as sess:
sess.run(init)
for i in range(steps):
batch_x, batch_y = iterator.get_next()
test1, test2 = sess.run([batch_x, batch_y])
# With MNIST
#sess.run(train, feed_dict={x:batch_x, y_true:batch_y, hold_prob:0.5})
if i%100 == 0:
print("ON STEP {}".format(i))
print("Accuracy: ")
matches = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y_true, 1))
accuracy = tf.reduce_mean(tf.cast(matches, tf.float32))
# With MNIST
#print(sess.run(accuracy, feed_dict={x:mnist.test.images, y_true:mnist.test.labels, hold_prob:1.0}))

Related

TF 2.0 Error: Gradients does not exist for variables during training using gradienttape

I tried to make a class using batchnormalization layer from tf 2.0, however it gave me an error that Gradients does not exist for variables. I tried to use batchnormalization directly but it gave me the same error as well. it seems like it is not traing the variable related to the batchnormalization step.
I tried to use model.trainable_variables instead of model.variables but it didn't work either.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.utils import to_categorical
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy import ndimage
learning_rate = 0.001
training_epochs = 15
batch_size = 100
tf.random.set_seed(777)
cur_dir = os.getcwd()
ckpt_dir_name = 'checkpoints'
model_dir_name = 'minst_cnn_best'
checkpoint_dir = os.path.join(cur_dir, ckpt_dir_name, model_dir_name)
os.makedirs(checkpoint_dir, exist_ok=True)
checkpoint_prefix = os.path.join(checkpoint_dir, model_dir_name)
mnist = tf.keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.astype(np.float32) /255.
test_images = test_images.astype(np.float32) /255.
print(train_images.shape, test_images.shape)
train_images = np.expand_dims(train_images, axis = -1)
test_images = np.expand_dims(test_images, axis = -1)
print(train_images.shape, test_images.shape)
train_labels = to_categorical(train_labels, 10)
test_labels = to_categorical(test_labels, 10)
train_dataset = tf.data.Dataset.from_tensor_slices((train_images,
train_labels)).shuffle(buffer_size = 100000).batch(batch_size)
test_dataset = tf.data.Dataset.from_tensor_slices((test_images,
test_labels)).batch(batch_size)
class ConvBNRelu(tf.keras.Model):
def __init__(self, filters, kernel_size=3, strides=1, padding='SAME'):
super(ConvBNRelu, self).__init__()
self.conv = keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=strides,
padding=padding, kernel_initializer='glorot_normal')
self.batchnorm = tf.keras.layers.BatchNormalization()
def call(self, inputs, training=False):
layer = self.conv(inputs)
layer = self.batchnorm(layer)
layer = tf.nn.relu(layer)
return layer
class DenseBNRelu(tf.keras.Model):
def __init__(self, units):
super(DenseBNRelu, self).__init__()
self.dense = keras.layers.Dense(units=units, kernel_initializer='glorot_normal')
self.batchnorm = tf.keras.layers.BatchNormalization()
def call(self, inputs, training=False):
layer = self.dense(inputs)
layer = self.batchnorm(layer)
layer = tf.nn.relu(layer)
return layer
class MNISTModel(tf.keras.Model):
def __init__(self):
super(MNISTModel, self).__init__()
self.conv1 = ConvBNRelu(filters=32, kernel_size=[3, 3], padding='SAME')
self.pool1 = keras.layers.MaxPool2D(padding='SAME')
self.conv2 = ConvBNRelu(filters=64, kernel_size=[3, 3], padding='SAME')
self.pool2 = keras.layers.MaxPool2D(padding='SAME')
self.conv3 = ConvBNRelu(filters=128, kernel_size=[3, 3], padding='SAME')
self.pool3 = keras.layers.MaxPool2D(padding='SAME')
self.pool3_flat = keras.layers.Flatten()
self.dense4 = DenseBNRelu(units=256)
self.drop4 = keras.layers.Dropout(rate=0.4)
self.dense5 = keras.layers.Dense(units=10, kernel_initializer='glorot_normal')
def call(self, inputs, training=False):
net = self.conv1(inputs)
net = self.pool1(net)
net = self.conv2(net)
net = self.pool2(net)
net = self.conv3(net)
net = self.pool3(net)
net = self.pool3_flat(net)
net = self.dense4(net)
net = self.drop4(net)
net = self.dense5(net)
return net
models = []
num_models = 5
for m in range(num_models):
models.append(MNISTModel())
def loss_fn(model, images, labels):
logits = model(images, training=True)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,
labels=labels))
return loss
def grad(model, images, labels):
with tf.GradientTape() as tape:
loss = loss_fn(model, images, labels)
return tape.gradient(loss, model.variables)
def evaluate(models, images, labels):
predictions = np.zeros_like(labels)
for model in models:
logits = model(images, training=False)
predictions += logits
correct_prediction = tf.equal(tf.argmax(predictions, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return accuracy
optimizer = keras.optimizers.Adam(learning_rate = learning_rate)
checkpoints = []
for m in range(num_models):
checkpoints.append(tf.train.Checkpoint(cnn=models[m]))
for epoch in range(training_epochs):
avg_loss = 0.
avg_train_acc = 0.
avg_test_acc = 0.
train_step = 0
test_step = 0
for images, labels in train_dataset:
for model in models:
grads = grad(model, images, labels)
optimizer.apply_gradients(zip(grads, model.variables))
loss = loss_fn(model, images, labels)
avg_loss += loss / num_models
acc = evaluate(models, images, labels)
avg_train_acc += acc
train_step += 1
avg_loss = avg_loss / train_step
avg_train_acc = avg_train_acc / train_step
for images, labels in test_dataset:
acc = evaluate(models, images, labels)
avg_test_acc += acc
test_step += 1
avg_test_acc = avg_test_acc / test_step
print('Epoch:', '{}'.format(epoch + 1), 'loss =', '{:.8f}'.format(avg_loss),
'train accuracy = ', '{:.4f}'.format(avg_train_acc),
'test accuracy = ', '{:.4f}'.format(avg_test_acc))
for idx, checkpoint in enumerate(checkpoints):
checkpoint.save(file_prefix=checkpoint_prefix+'-{}'.format(idx))
print('Learning Finished!')
W0727 20:27:05.344142 140332288718656 optimizer_v2.py:982] Gradients does not exist for variables ['mnist_model/conv_bn_relu/batch_normalization/moving_mean:0', 'mnist_model/conv_bn_relu/batch_normalization/moving_variance:0', 'mnist_model/conv_bn_relu_1/batch_normalization_1/moving_mean:0', 'mnist_model/conv_bn_relu_1/batch_normalization_1/moving_variance:0', 'mnist_model/conv_bn_relu_2/batch_normalization_2/moving_mean:0', 'mnist_model/conv_bn_relu_2/batch_normalization_2/moving_variance:0', 'mnist_model/dense_bn_relu/batch_normalization_3/moving_mean:0', 'mnist_model/dense_bn_relu/batch_normalization_3/moving_variance:0'] when minimizing the loss.
W0727 20:27:05.407717 140332288718656 deprecation.py:323] From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/optimizer_v2/optimizer_v2.py:460: BaseResourceVariable.constraint (from tensorflow.python.ops.resource_variable_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Apply a constraint manually following the optimizer update step.
W0727 20:27:05.499249 140332288718656 optimizer_v2.py:982] Gradients does not exist for variables ['mnist_model_1/conv_bn_relu_3/batch_normalization_4/moving_mean:0', 'mnist_model_1/conv_bn_relu_3/batch_normalization_4/moving_variance:0', 'mnist_model_1/conv_bn_relu_4/batch_normalization_5/moving_mean:0', 'mnist_model_1/conv_bn_relu_4/batch_normalization_5/moving_variance:0', 'mnist_model_1/conv_bn_relu_5/batch_normalization_6/moving_mean:0', 'mnist_model_1/conv_bn_relu_5/batch_normalization_6/moving_variance:0', 'mnist_model_1/dense_bn_relu_1/batch_normalization_7/moving_mean:0', 'mnist_model_1/dense_bn_relu_1/batch_normalization_7/moving_variance:0'] when minimizing the loss.
...
You're computing the gradient of the loss with respect to the model.variables: this collection contains not only the trainable variables (the model weights) but also the non-trainable variables like the moving mean and variance computed by the batch normalization layer.
You have to compute the gradient with respect to the trainable_variables. In short change the lines
return tape.gradient(loss, model.variables)
and
optimizer.apply_gradients(zip(grads, model.variables))
to
return tape.gradient(loss, model.trainable_variables)
and
optimizer.apply_gradients(zip(grads, model.trainable_variables))

Is it possible to build a custom loss function of chamfer distance using Keras

In my assignment about the point cloud,I need to use a keras custom loss function of chamfer distance and apply it to the autoencoder.But I find it is hard to implement the function.
I have tried to write in this way.I hope someone could help me.
def chamfer_loss_func(y_true,y_pred):
'''
Calculate the chamfer distance,use euclidean metric
:param y_true:
:param y_pred:
:return:
'''
batch_size = 32
y_true = K.reshape(y_true,[batch_size,2048,3])
y_pred = K.reshape(y_pred, [batch_size, 2048, 3])
num_t = K.int_shape(y_true)[1]
num_p = K.int_shape(y_pred)[1]
dists_mat = K.zeros(shape=[num_t, num_p])
sum = 0.0
for bi in range(batch_size):
for i in range(num_t):
pnt_t = y_true[bi][i]
for j in range(num_p):
if (i <= j):
pnt_p = y_pred[bi][j]
dists_mat[i][j] = K.eval(K.sum(K.sqrt(tf.subtract(pnt_t, pnt_p))))
else:
dists_mat[i][j] = dists_mat[j][i]
dist_t_to_p = K.mean(K.min(dists_mat, axis=0))
dist_p_to_t = K.mean(K.min(dists_mat, axis=1))
sum += K.max([dist_p_to_t, dist_t_to_p])
return sum / batch_size
And the network structure is listed here:
def get_model(num_pnts):
input_tensor = Input(shape=[num_pnts, 3])
input_cov_tensor = Input(shape=[num_pnts, 9]) # add the local covariance matrix
# the encoder network
concat_1 = concatenate([input_tensor, input_cov_tensor]) # concatenate the two input tensor
dense_1 = Dense(32,activation='relu')(concat_1)
dense_2 = Dense(64,activation='relu')(dense_1)
dense_3 = Dense(128,activation='relu')(dense_2)
encoded = MaxPooling1D(pool_size=num_pnts)(dense_3)
# the decoder network
# use 3 fully connected layers
dense_3 = Dense(128,activation='relu')(encoded)
dense_4 = Dense(128,activation='relu')(dense_3)
dense_5 = Dense(num_pnts*3,activation='linear')(dense_4)
decoded = Reshape(target_shape=[num_pnts,3])(dense_5)
#the autoencoder
autoencoder = Model(inputs=[input_tensor,input_cov_tensor],outputs=decoded)
return autoencoder
And the place where uses the loss function:
model = get_model(2048)
model.compile(optimizer='adam',loss=chamfer_loss_func,)
Can you give me the link of chamfer distance you reference to?
K.sum(K.sqrt(tf.subtract(pnt_t, pnt_p))) looks strange to me. To calculate Euclidean distance, the sqrt should be replaced with square.
And it is not recommended to use for loop in tensorflow, so I have reimplemented it:
import numpy as np
import keras.backend as K
import tensorflow as tf
from keras.layers import Input, Dense, MaxPooling1D, Reshape, concatenate
from keras.models import Model
def dists_mat_calculater(pnts_t, pnts_p):
"""
arguments:
pnts_t : from y_true[bi], shape: (num_t, 3)
pnts_p : from y_pred[bi], shape: (num_p, 3)
return:
dists_mat: shape: (num_t, num_p)
"""
num_t = K.int_shape(pnts_t)[0]
num_p = K.int_shape(pnts_p)[0]
pnts_t = tf.reshape(tf.tile(tf.expand_dims(pnts_t, 1), [1, 1, num_p]), [-1, 3])
pnts_p = tf.tile(pnts_p, [num_t, 1])
dists_mat = K.sum(K.square(tf.subtract(pnts_t, pnts_p)), axis=1)
dists_mat = tf.reshape(dists_mat, [num_t, num_p])
dists_mat_upper = tf.matrix_band_part(dists_mat, 0, -1)
dists_mat_symm = dists_mat_upper + tf.transpose(dists_mat_upper)
dists_mat_symm = tf.matrix_set_diag(dists_mat_symm, tf.diag_part(dists_mat))
return dists_mat_symm
def dist_calculator(pnts):
"""
arguments:
pnts_t : from y_true[bi], shape: (num_t, 3)
pnts_p : from y_pred[bi], shape: (num_p, 3)
return:
dist: shape: (1, )
"""
pnts_t, pnts_p = pnts
dists_mat = dists_mat_calculater(pnts_t, pnts_p)
dist_t_to_p = K.mean(K.min(dists_mat, axis=0)) #shape: (1,)
dist_p_to_t = K.mean(K.min(dists_mat, axis=1)) #shape: (1,)
dist = K.max([dist_p_to_t, dist_t_to_p]) #shape: (1,)
return dist
def chamfer_loss_func_tf(y_true,y_pred):
'''
Calculate the chamfer distance,use euclidean metric
:param y_true:
:param y_pred:
:return:
'''
y_true = K.reshape(y_true,[-1, num_pnts, 3])
y_pred = K.reshape(y_pred, [-1, num_pnts, 3])
return K.mean(tf.map_fn(dist_calculator, elems=(y_true, y_pred), dtype=tf.float32))
dists_mat_calculater calculate the distance matrix, and the part of pairwise calculation is enlightened by Mask R-CNN - overlaps_graph.
I also implement a pure python version for validation purpose:
def chamfer_loss_python(y_true,y_pred):
'''
Calculate the chamfer distance,use euclidean metric
:param y_true:
:param y_pred:
:return:
'''
y_true = np.reshape(y_true,[-1,num_pnts,3])
y_pred = np.reshape(y_pred, [-1,num_pnts, 3])
batch_size = y_true.shape[0]
num_t = y_true.shape[1]
num_p = y_pred.shape[1]
dists_mat = np.zeros((num_t, num_p))
_sum = 0.0
loss_before_mean_py = []
for bi in range(batch_size):
for i in range(num_t):
pnt_t = y_true[bi][i]
for j in range(num_p):
pnt_p = y_pred[bi][j]
if (i <= j):
pnt_p = y_pred[bi][j]
dists_mat[i][j] = np.sum((pnt_t - pnt_p)**2)
else:
dists_mat[i][j] = dists_mat[j][i]
dist_t_to_p = np.mean(np.min(dists_mat, axis=0))
dist_p_to_t = np.mean(np.min(dists_mat, axis=1))
_sum += np.max([dist_p_to_t, dist_t_to_p])
loss_before_mean_py.append(np.max([dist_p_to_t, dist_t_to_p]))
return _sum / batch_size
Following is the testing script:
num_pnts = 8
np.random.seed(1)
Y_true = np.random.randn(32, num_pnts, 3).astype(np.float32)
Y_pred = np.random.randn(32, num_pnts, 3).astype(np.float32)
Y_true_ph = tf.placeholder(tf.float32, shape=(None, num_pnts, 3), name="Y_true_ph")
Y_pred_ph = tf.placeholder(tf.float32, shape=(None, num_pnts, 3), name="Y_pred_ph")
loss = chamfer_loss_func_tf(Y_true_ph, Y_pred_ph)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
loss = sess.run(loss, feed_dict={
Y_true_ph: Y_true,
Y_pred_ph: Y_pred})
loss_py = chamfer_loss_python(Y_true,Y_pred)
print(loss)
print(loss_py)

Rank mismatch error in Tensorflow

I'm working on creating an image classifier that can differentiate between cats and dogs. I have the follwing code:
import cv2
import os
from tqdm import tqdm
import numpy as np
import tensorflow as tf
img_height = 128
img_width = 128
path = "./train"
# class info
file = os.listdir(path)
index = []
images = []
# image size and channels
channels = 3
n_inputs = img_width * img_height * channels
# First convolutional layer
conv1_fmaps = 96 # Number of feature maps created by this layer
conv1_ksize = 4 # kernel size 3x3
conv1_stride = 2
conv1_pad = "SAME"
# Second convolutional layer
conv2_fmaps = 192
conv2_ksize = 4
conv2_stride = 4
conv2_pad = "SAME"
# Third layer is a pooling layer
pool3_fmaps = conv2_fmaps # Isn't it obvious?
n_fc1 = 192 # Total number of output features
n_outputs = 2
with tf.name_scope("inputs"):
X = tf.placeholder(tf.float32, shape=[None, img_width, img_height, channels], name="X")
X_reshaped = tf.reshape(X, shape=[-1, img_height, img_width, channels])
y = tf.placeholder(tf.int32, shape=[None, 2], name="y")
conv1 = tf.layers.conv2d(X_reshaped, filters=conv1_fmaps, kernel_size=conv1_ksize, strides=conv1_stride, padding=conv1_pad, activation=tf.nn.relu, name="conv1")
conv2 = tf.layers.conv2d(conv1, filters=conv2_fmaps, kernel_size=conv2_ksize, strides=conv2_stride, padding=conv2_pad, activation=tf.nn.relu, name="conv2")
n_epochs = 10
batch_size = 250
with tf.name_scope("pool3"):
pool3 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID")
pool3_flat = tf.reshape(pool3, shape=[-1, pool3_fmaps * 8 * 8])
with tf.name_scope("fc1"):
fc1 = tf.layers.dense(pool3_flat, n_fc1, activation=tf.nn.relu name="fc1")
with tf.name_scope("output"):
logits = tf.layers.dense(fc1, n_outputs, name="output")
Y_proba = tf.nn.softmax(logits, name="Y_proba")
with tf.name_scope("train"):
xentropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y)
loss = tf.reduce_mean(xentropy)
optimizer = tf.train.AdamOptimizer()
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
with tf.name_scope("init_and_save"):
saver = tf.train.Saver()
def next_batch(num):
index = []
images = []
# Data set Creation
print("Creating batch dataset "+str(num+1)+"...")
for f in tqdm(range(num * batch_size, (num+1)*batch_size)):
if file[f].find("dog"):
index.append(np.array([0, 1]))
else:
index.append(np.array([1, 0]))
image = cv2.imread(path + "/" + file[f])
image = cv2.resize(image, (img_width, img_height), 0, 0, cv2.INTER_LINEAR)
# image = image.astype(np.float32)
images.append(image)
images = np.array(images, dtype=np.uint8)
images = images.astype('float32')
images = images / 255
print("\nBatch "+str(num+1)+" creation finished.")
# print([images, index])
return [images, index]
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(25000 // batch_size):
X_batch, y_batch = next_batch(iteration)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
print(epoch, "Train accuracy:", acc_train)
save_path = saver.save(sess, "./dogvscat_mnist_model.ckpt")
But I'm getting this error:
ValueError: Rank mismatch: Rank of labels (received 2) should equal rank of logits minus 1 (received 2).
Can anyone point out the problem and help me to solve it. I'm totally new to this.
For tf.nn.sparse_softmax_corss_entropy_with_logits rank(labels) = rank(logits) - 1, so you need to redefine the labels placeholder as follows
...
y = tf.placeholder(tf.int32, shape=[None], name="y")
...
xentropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,labels=y)
...
X_batch, y_batch = next_batch(iteration)
y_batch = np.argmax(y_batch, axis=1)
OR you can you just use tf.nn.softmax_cross_entropy_with_logits without changing labels placeholder.
xentropy=tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=y)

Error while running a convolutional network using my own data in Tensorflow

I´m a complete beginner in using Tensorflow and machine learning in general, so there are many concepts that I still don´t understand quite well, so sorry if my error is obvious. I´m trying to train my own convolutional network using my own images (optical microscopy photos) resized to 60x60, and I have only 2 labels to classify them (if the sample is positive or not). Here is my code:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
sess = tf.InteractiveSession()
# Load dataset in two lists (images and labels).
def load_data(data_dir):
directories = [d for d in os.listdir(data_dir)
if os.path.isdir(os.path.join(data_dir, d))]
labels = []
images = []
for d in directories:
label_dir = os.path.join(data_dir, d)
file_names = [os.path.join(label_dir, f)
for f in os.listdir(label_dir) if f.endswith(".JPG")]
for f in file_names:
images.append(f)
labels.append(int(d))
return images, labels
# Load training and testing datasets.
ROOT_PATH = "Proyecto"
train_data_dir = os.path.join(ROOT_PATH, "Imagenes_entrenamiento")
test_data_dir = os.path.join(ROOT_PATH, "Imagenes_test")
images_train, labels_train = load_data(train_data_dir)
images_test, labels_test = load_data(test_data_dir)
# Converting training data to tensors.
timages_train = ops.convert_to_tensor(images_train, dtype=dtypes.string)
tlabels_train = ops.convert_to_tensor(labels_train, dtype=dtypes.int32)
# Converting testing data to tensors.
timages_test = ops.convert_to_tensor(images_test, dtype=dtypes.string)
tlabels_test = ops.convert_to_tensor(labels_test, dtype=dtypes.int32)
# Creation of a training queue.
num_files_train = len(images_train)
filename_train_queue = tf.train.slice_input_producer([timages_train,
tlabels_train], num_epochs=None, shuffle=True, capacity=num_files_train)
# Creation of a testing queue.
num_files_test = len(images_test)
filename_test_queue = tf.train.slice_input_producer([timages_test,
tlabels_test], num_epochs=None, shuffle=True, capacity=num_files_test)
# Decoding and resizing train images
raw_image_train= tf.read_file(filename_train_queue[0])
decoded_image_train = tf.image.decode_jpeg(raw_image_train, channels=3)
decoded_image_train = tf.cast(decoded_image_train, tf.float32)
resized_train_image = tf.image.resize_images(decoded_image_train, [60, 60])
# Decoding and resizing test images
raw_image_test= tf.read_file(filename_test_queue[0])
decoded_image_test = tf.image.decode_jpeg(raw_image_test, channels=3)
decoded_image_test = tf.cast(decoded_image_test, tf.float32)
resized_test_image = tf.image.resize_images(decoded_image_test, [60, 60])
# Extracting training and testing labels.
label_train_queue = filename_train_queue[1]
label_test_queue = filename_test_queue[1]
# Training batch.
batch_size_train = 5
image_train_batch, label_train_batch = tf.train.batch([resized_train_image,
label_train_queue], batch_size_train)
# Testing batch.
batch_size_test = 2
image_test_batch, label_test_batch = tf.train.batch([resized_test_image,
label_test_queue], batch_size_test)
# General model
x = tf.placeholder(tf.float32, shape=[None, 60, 60, 3])
y_ = tf.placeholder(tf.int32, shape=[None])
keep_prob = tf.placeholder(tf.float32)
# Weights and biases
dense_w={
"w_conv1": tf.Variable(tf.truncated_normal([5,5,3,32],stddev=0.1),
name="w_conv1"),
"b_conv1": tf.Variable(tf.constant(0.1,shape=[32]), name="b_conv1"),
"w_conv2": tf.Variable(tf.truncated_normal([5,5,32,64],stddev=0.1),
name="w_conv2"),
"b_conv2": tf.Variable(tf.constant(0.1,shape=[64]), name="b_conv2"),
"w_fc1": tf.Variable(tf.truncated_normal([15*15*64,1024],stddev=0.1),
name="w_fc1"),
"b_fc1": tf.Variable(tf.constant(0.1,shape=[1024]), name="b_fc1"),
"w_fc2": tf.Variable(tf.truncated_normal([1024,2],stddev=0.1),
name="w_fc2"),
"b_fc2": tf.Variable(tf.constant(0.1,shape=[2]), name="b_fc2")
}
# CNN model
def dense_cnn_model(weights):
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
x_image = tf.reshape(x, [-1,60,60,3])
h_conv1 = tf.nn.relu(conv2d(x_image, weights["w_conv1"]) +
weights["b_conv1"])
h_pool1 = max_pool_2x2(h_conv1)
h_conv2 = tf.nn.relu(conv2d(h_pool1, weights["w_conv2"]) +
weights["b_conv2"])
h_pool2 = max_pool_2x2(h_conv2)
h_pool2_flat = tf.reshape(h_pool2, [-1, 15*15*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, weights["w_fc1"]) +
weights["b_fc1"])
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, weights["w_fc2"]) +
weights["b_fc2"])
return y_conv
y_conv = dense_cnn_model(dense_w)
cross_entropy=tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=y_conv, labels=tf.squeeze(y_)))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_,))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init_op = tf.group(tf.local_variables_initializer(),
tf.global_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
## Training:
for i in range(50):
image_train_batch_eval,
label_train_batch_eval=image_train_batch.eval(),
label_train_batch.eval()
if i % 2 == 0:
train_accuracy = accuracy.eval(feed_dict={x:
image_train_batch_eval, y_: label_train_batch_eval,
keep_prob: 0.5})
print('Paso %d, Precisión de entrenamiento: %g' %
(i,train_accuracy))
train_step.run(feed_dict={x: image_train_batch_eval, y_:
label_train_batch_eval, keep_prob: 0.5})
## Testing
image_test_batch_eval, label_test_batch_eval=image_test_batch.eval(),
label_test_batch.eval()
print('Precisión de evaluación: %g' % accuracy.eval(feed_dict={
x: image_test_batch_eval, y_: label_test_batch_eval, keep_prob:1.0}))
coord.request_stop()
coord.join(threads)
EDIT:
The code is corrected.
You need to pass enqueue_many=True to tf.train.batch to indicate that you are enqueuing multiple examples at once, otherwise it will treat it as a single example with many features.

Tensor flow, making predictions using a trained network

So I am training a network to classify images in tensor flow. After I trained the network I began work on trying to use it to classify other images. The goal is to import an image, feed it to the classifier and have it print the result. I am having some trouble getting that part off the ground though. Here is what I have so far. I found that having tf.argmax(y,1) gave an error. I found that changing it to 0 fixed that error. However I am not convinced that it is actually working. I tossed 2 images through the classifier and they both got the same class even though they are vastly different. Just need some perspective here. Is this valid? Or is there something wrong here that will always feed me the same class (in this case I got class 0 for both of the images I tried).
Is this even the right way to approach making predictions in tensor flow? This is just the culmination of my debugging, not sure if it is what should be done or not.
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
X_train,X_validation,y_train,y_validation=train_test_split(X_train,y_train, test_size=20,random_state=0)
X_train, y_train = shuffle(X_train, y_train)
def LeNet(x):
# Arguments used for tf.truncated_normal, randomly defines variables
for the weights and biases for each layer
mu = 0
sigma = 0.1
# SOLUTION: Layer 1: Convolutional. Input = 32x32x3. Output = 28x28x6.
conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 3, 6), mean = mu, stddev = sigma))
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
# SOLUTION: Activation.
conv1 = tf.nn.relu(conv1)
# SOLUTION: Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Layer 2: Convolutional. Output = 10x10x16.
conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
# SOLUTION: Activation.
conv2 = tf.nn.relu(conv2)
# SOLUTION: Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Flatten. Input = 5x5x16. Output = 400.
fc0 = flatten(conv2)
# SOLUTION: Layer 3: Fully Connected. Input = 400. Output = 120.
fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
# SOLUTION: Activation.
fc1 = tf.nn.relu(fc1)
# SOLUTION: Layer 4: Fully Connected. Input = 120. Output = 84.
fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# SOLUTION: Activation.
fc2 = tf.nn.relu(fc2)
# SOLUTION: Layer 5: Fully Connected. Input = 84. Output = 43.
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(43))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
import tensorflow as tf
x = tf.placeholder(tf.float32, (None, 32, 32, 3))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, 43)
EPOCHS=10
BATCH_SIZE=128
rate = 0.001
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, one_hot_y)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})
validation_accuracy = evaluate(X_validation, y_validation)
print("EPOCH {} ...".format(i+1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './lenet')
print("Model saved")
import cv2
image=cv2.imread('File path')
image=cv2.resize(image,(32,32)) #classifier takes 32X32 images
image=np.array(image)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver3 = tf.train.import_meta_graph('./lenet.meta')
saver3.restore(sess, "./lenet")
pred = tf.nn.softmax(logits)
predictions = sess.run(tf.argmax(y,0), feed_dict={x: image})
print (predictions)
So what had to happen here was first clear the kernel and outputs. Somewhere along the way my placeholders got muddled up and clearing the kernel fixed that right up. Then I had to realize what really had to get done here: I had to call up the softmax function on my new data.
Like this:
pred = tf.nn.softmax(logits)
classification = sess.run(pred, feed_dict={x: image_array})

Resources