Tensor flow, making predictions using a trained network - python-3.x

So I am training a network to classify images in tensor flow. After I trained the network I began work on trying to use it to classify other images. The goal is to import an image, feed it to the classifier and have it print the result. I am having some trouble getting that part off the ground though. Here is what I have so far. I found that having tf.argmax(y,1) gave an error. I found that changing it to 0 fixed that error. However I am not convinced that it is actually working. I tossed 2 images through the classifier and they both got the same class even though they are vastly different. Just need some perspective here. Is this valid? Or is there something wrong here that will always feed me the same class (in this case I got class 0 for both of the images I tried).
Is this even the right way to approach making predictions in tensor flow? This is just the culmination of my debugging, not sure if it is what should be done or not.
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
X_train,X_validation,y_train,y_validation=train_test_split(X_train,y_train, test_size=20,random_state=0)
X_train, y_train = shuffle(X_train, y_train)
def LeNet(x):
# Arguments used for tf.truncated_normal, randomly defines variables
for the weights and biases for each layer
mu = 0
sigma = 0.1
# SOLUTION: Layer 1: Convolutional. Input = 32x32x3. Output = 28x28x6.
conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 3, 6), mean = mu, stddev = sigma))
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
# SOLUTION: Activation.
conv1 = tf.nn.relu(conv1)
# SOLUTION: Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Layer 2: Convolutional. Output = 10x10x16.
conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
# SOLUTION: Activation.
conv2 = tf.nn.relu(conv2)
# SOLUTION: Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Flatten. Input = 5x5x16. Output = 400.
fc0 = flatten(conv2)
# SOLUTION: Layer 3: Fully Connected. Input = 400. Output = 120.
fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
# SOLUTION: Activation.
fc1 = tf.nn.relu(fc1)
# SOLUTION: Layer 4: Fully Connected. Input = 120. Output = 84.
fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# SOLUTION: Activation.
fc2 = tf.nn.relu(fc2)
# SOLUTION: Layer 5: Fully Connected. Input = 84. Output = 43.
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(43))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
import tensorflow as tf
x = tf.placeholder(tf.float32, (None, 32, 32, 3))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, 43)
EPOCHS=10
BATCH_SIZE=128
rate = 0.001
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, one_hot_y)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})
validation_accuracy = evaluate(X_validation, y_validation)
print("EPOCH {} ...".format(i+1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './lenet')
print("Model saved")
import cv2
image=cv2.imread('File path')
image=cv2.resize(image,(32,32)) #classifier takes 32X32 images
image=np.array(image)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver3 = tf.train.import_meta_graph('./lenet.meta')
saver3.restore(sess, "./lenet")
pred = tf.nn.softmax(logits)
predictions = sess.run(tf.argmax(y,0), feed_dict={x: image})
print (predictions)

So what had to happen here was first clear the kernel and outputs. Somewhere along the way my placeholders got muddled up and clearing the kernel fixed that right up. Then I had to realize what really had to get done here: I had to call up the softmax function on my new data.
Like this:
pred = tf.nn.softmax(logits)
classification = sess.run(pred, feed_dict={x: image_array})

Related

Translating LSTM model from Keras to Pytorch

I am having a hard time translating a quite simple LSTM model from Keras to Pytorch. X (get it here) corresponds to 1152 samples of 90 timesteps, each timestep has only 1 dimension. y (here) is a single prediction at t = 91 for all 1152 samples.
In Keras:
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM
import numpy as np
import pandas as pd
X = pd.read_csv('X.csv', header = None).values
X.shape
y = pd.read_csv('y.csv', header = None).values
y.shape
# From Keras documentation [https://keras.io/layers/recurrent/]:
# Input shape 3D tensor with shape (batch_size, timesteps, input_dim).
X = np.reshape(X, (1152, 90, 1))
regressor = Sequential()
regressor.add(LSTM(units = 100, return_sequences = True, input_shape = (90, 1)))
regressor.add(Dropout(0.3))
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.3))
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.3))
regressor.add(LSTM(units = 50))
regressor.add(Dropout(0.3))
regressor.add(Dense(units = 1, activation = 'linear'))
regressor.compile(optimizer = 'rmsprop', loss = 'mean_squared_error', metrics = ['mean_absolute_error'])
regressor.fit(X, y, epochs = 10, batch_size = 32)
... leads me to:
# Epoch 10/10
# 1152/1152 [==============================] - 33s 29ms/sample - loss: 0.0068 - mean_absolute_error: 0.0628
Then in Pytorch:
import torch
from torch import nn, optim
from sklearn.metrics import mean_absolute_error
X = pd.read_csv('X.csv', header = None).values
y = pd.read_csv('y.csv', header = None).values
X = torch.tensor(X, dtype = torch.float32)
y = torch.tensor(y, dtype = torch.float32)
dataset = torch.utils.data.TensorDataset(X, y)
loader = torch.utils.data.DataLoader(dataset, batch_size = 32, shuffle = True)
class regressor_LSTM(nn.Module):
def __init__(self):
super().__init__()
self.lstm1 = nn.LSTM(input_size = 1, hidden_size = 100)
self.lstm2 = nn.LSTM(100, 50)
self.lstm3 = nn.LSTM(50, 50, dropout = 0.3, num_layers = 2)
self.dropout = nn.Dropout(p = 0.3)
self.linear = nn.Linear(in_features = 50, out_features = 1)
def forward(self, X):
# From the Pytorch documentation [https://pytorch.org/docs/stable/_modules/torch/nn/modules/rnn.html]:
# **input** of shape `(seq_len, batch, input_size)`
X = X.view(90, 32, 1)
# I am discarding hidden/cell states since in Keras I am using a stateless approach
# [https://keras.io/examples/lstm_stateful/]
X, _ = self.lstm1(X)
X = self.dropout(X)
X, _ = self.lstm2(X)
X = self.dropout(X)
X, _ = self.lstm3(X)
X = self.dropout(X)
X = self.linear(X)
return X
regressor = regressor_LSTM()
criterion = nn.MSELoss()
optimizer = optim.RMSprop(regressor.parameters())
for epoch in range(10):
running_loss = 0.
running_mae = 0.
for i, data in enumerate(loader):
inputs, labels = data
optimizer.zero_grad()
outputs = regressor(inputs)
outputs = outputs[-1].view(*labels.shape)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
mae = mean_absolute_error(labels.detach().cpu().numpy().flatten(), outputs.detach().cpu().numpy().flatten())
running_mae += mae
print('EPOCH %3d: loss %.5f - MAE %.5f' % (epoch+1, running_loss/len(loader), running_mae/len(loader)))
... leads me to:
# EPOCH 10: loss 0.04220 - MAE 0.16762
You can notice that both loss and MAE are quite different (Pytorch's are much higher). If I use Pytorch's model to predict the values, they all return as a constant.
What am I doing wrong?
Oh I believe I made considerable progress. It seems that the way to represent y is different between Keras and Pytorch. In Keras, we should pass it as a single value representing one timestep in the future (or, at least, for the problem I am trying to solve). But in Pytorch, y must be X shifted one timestep to the future. It is like this:
time_series = [0, 1, 2, 3, 4, 5]
X = [0, 1, 2, 3, 4]
# Keras:
y = [5]
# Pytorch:
y = [1, 2, 3, 4, 5]
This way, Pytorch compares all values in the time slice when calculating loss. I believe Keras rearranges the data under the hood to conform to this approach, as the code works when fed the variables just like that. But in Pytorch, I was estimating loss based only on one value (the one I was trying to predict), not the whole series, therefore I believe it could not correctly capture the time dependency.
When taking this in consideration, I got to:
EPOCH 100: loss 0.00551 - MAE 0.058435
And, most importantly, comparing true and predicted values in a separate dataset got me to
The patterns were clearly captured by the model.
Hooray!

AdamOptimizer returns invalid datatype error

I have been struggling to fix this for the past couple of days, any help would be appreciated.
I have a network that outputs numeric locations x,y coordinates in an image, hence the int32 datatype. The output is of size (2,2). Ground truth also has the locations as int32.
Then I run, following two lines of code:
cost = tf.reduce_mean(tf.square(y_true - y_pred_cls))
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)
and receive following error:
ValueError: Invalid type tf.int32 for Mean_7:0, expected: [tf.float32, tf.float64, tf.float16, tf.bfloat16].
I understand that average should be a float value, but if I convert predicted value to float, I start getting 'No gradients provided' error.
I have also tried to use softmax_cross_entropy_with_logits_v2 of Tensorflow to turn my problem into a classification problem, but same error. I'd really appreciate any help with the solution.
Code
Code for Input and Labels
x_p = tf.placeholder(tf.float32, shape=[None, img_size_flat*num_channels], name='x_pred')
x_pred_image = tf.reshape(x_p, [-1, 10, 10, num_channels])
k = tf.constant(npatches)
y_true = tf.placeholder(tf.int32, shape=[None, 2, 2], name='y_true')
y_true_cls = tf.placeholder(tf.int32, shape=[None, 2, 2], name='y_true_cls')
Convolution Layer
def new_weights(shape, layer_name):
initializer = tf.contrib.layers.xavier_initializer()
return tf.Variable(initializer(shape), name=layer_name+'_W')
def new_bias(length, layer_name):
return tf.Variable(tf.constant(0.05, shape=[length]), name=layer_name+'_b')
def new_conv_layer(input,
num_input_channels,
filter_size,
num_filters,
name_scope,
layer_name='',
use_pooling=True):
with tf.name_scope(name_scope):
shape = [filter_size, filter_size, num_input_channels, num_filters]
weights = new_weights(shape, layer_name)
biases = new_bias(num_filters, layer_name)
layer = tf.add(tf.nn.conv2d(input=input, filter=weights, strides=[1,1,1,1], padding='SAME'), biases, name=layer_name)
if use_pooling:
layer = tf.nn.max_pool(value=layer,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME', name=layer_name+'_max')
layer = tf.nn.relu(layer, name=layer_name+'_activation')
return layer, weights
Fully Connected Layer
def new_fc_layer(input,
num_inputs,
num_outputs,
name_scope,
layer_name='',
use_relu=True):
with tf.name_scope(name_scope):
weights = new_weights([num_inputs, num_outputs], layer_name)
biases = new_bias(num_outputs, layer_name)
layer = tf.add(tf.matmul(input, weights),biases,name=layer_name)
# layer = tf.matmul(input, weights) + biases
if use_relu:
layer = tf.nn.relu(layer, layer_name+'_activation')
return layer
Custom Layer Code
with tf.name_scope(name_scope):
resh_inp = tf.reshape(input, [-1, 10, 10])
input_shape = tf.shape(resh_inp)
rows, cols = input_shape[1], input_shape[2]
d_rows, d_cols = 2, 2
subm_rows, subm_cols = rows - d_rows + 1, cols - d_cols + 1
ii, jj = tf.meshgrid(tf.range(subm_rows), tf.range(subm_cols), indexing='ij')
d_ii, d_jj = tf.meshgrid(tf.range(d_rows), tf.range(d_cols), indexing='ij')
subm_ii = ii[:, :, tf.newaxis, tf.newaxis] + d_ii
subm_jj = jj[:, :, tf.newaxis, tf.newaxis] + d_jj
subm = tf.gather_nd(resh_inp[0, :, :], tf.stack([subm_ii, subm_jj], axis=-1), name=layer_name + "_gather")
subm_sum = tf.reduce_sum(subm, axis=(2, 3), name=layer_name + "_subm_sum")
_, top_idx = tf.nn.top_k(tf.reshape(subm_sum, [-1]), tf.minimum(k, tf.size(subm_sum)), name=layer_name + "_top_idx")
top_row = top_idx // subm_cols
top_col = top_idx % subm_cols
result = tf.stack([top_row, top_col], axis=-1, name=layer_name + "_result")
result_shape = tf.shape(result)
result = tf.reshape(result, [-1, result_shape[0], result_shape[1]], name=layer_name + "_shaped_result")
Result from above code is passed as y_pred_cls in below code
cost = tf.reduce_mean(tf.square(y_true - y_pred_cls))
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)

Tensorflow CNN with custom images with RGB values

I have followed tutorials on creating a CNN with the MNIST dataset, and understands most of it. Then I tried to convert it into my own custom images with RGB values. But have trouble in certain parts of the code, as I do not fully understands what happens and how to proceed next. I know I have to change the channels to 3, but does not know if the rest of the helper functions are correct? I also do not understand when I have initialized everything how to train it. Because of the batch_x, batch_y = iterator.get_next()
I am not able to use the feed_dict, and do not how to train this? On the MNIST data it was possible to set the dropout, but how can i specify this now? And as far as a I understand, I do not train it on the real data know?
How can I also calculate the result in the same way as with the MNIST data, when I create and test the validation data?
The code looks like this:
import tensorflow as tf
import process_images as image_util
from tensorflow.contrib.data import Dataset, Iterator
# With MNIST
#from tensorflow.examples.tutorials.mnist import input_data
#mnist = input_data.read_data_sets("MNISt_data/", one_hot=True)
filenames_dummy, labels_dummy = image_util.run_it()
#The filenames_dummy and labels_dummy are two lists looking like this, respectively:
#["data/image_1.png", "data/image_2.png", ..., "data/image_n.png"]
# The values of the labels are 0-3, since I have 4 classes.
#[0, 1, ..., 3]
filenames = tf.constant(filenames_dummy)
labels = tf.constant(labels_dummy)
def _parse_function(filename, label):
image_string = tf.read_file(filename)
image_decoded = tf.image.decode_png(image_string, channels=3)
# The image size is 425x425.
image_resized = tf.image.resize_images(image_decoded, [425,425])
return image_resized, label
dataset = tf.contrib.data.Dataset.from_tensor_slices((filenames, labels))
dataset = dataset.map(_parse_function)
dataset = dataset.batch(30)
dataset = dataset.repeat()
iterator = dataset.make_one_shot_iterator()
# Helper functions
# INIT weights
def init_weights(shape):
init_random_dist = tf.truncated_normal(shape, stddev=0.1)
return(tf.Variable(init_random_dist))
# INIT Bias
def init_bias(shape):
init_bias_vals = tf.constant(0.1, shape=shape)
return tf.Variable(init_bias_vals)
# CONV2D
def conv2d(x, W):
# x --> input tensor [batch, H, W, Channels]
# W --> [filter H, filter W, Channels IN, Channels OUT]
return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')
# Pooling
def max_pooling_2by2(x):
# x --> [batch, h, w, c]
return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
#Convolutional layer
def convolutional_layer(input_x, shape):
W =init_weights(shape)
b = init_bias([shape[3]])
return tf.nn.relu(conv2d(input_x, W)+b)
# Normal (FULLY CONNTCTED)
def normal_full_layer(input_layer, size):
input_size = int(input_layer.get_shape()[1])
W = init_weights([input_size, size])
b = init_bias([size])
return tf.matmul(input_layer, W) + b
# PLACEHOLDERS
x = tf.placeholder(tf.float32, shape=[None, 180625])
y_true = tf.placeholder(tf.float32, shape=[None, 4])
# With MNIST
#x = tf.placeholder(tf.float32, shape=[None, 784])
#y_true = tf.placeholder(tf.float32, shape=[None, 10])
# Layers
x_image = tf.reshape(x, [-1, 425,425, 1])
# With MNIST
#x_image = tf.reshape(x, [-1, 28,28, 1])
convo_1 = convolutional_layer(x_image, shape=[5,5,1,32])
convo_1_pooling = max_pooling_2by2(convo_1)
convo_2 = convolutional_layer(convo_1_pooling, shape=[5,5,32, 64])
convo_2_pooling = max_pooling_2by2(convo_2)
convo_2_flat = tf.reshape(convo_2_pooling, [-1, 7*7*64])
full_layer_one = tf.nn.relu(normal_full_layer(convo_2_flat, 1024))
# Dropout
hold_prob = tf.placeholder(tf.float32)
full_one_dropout = tf.nn.dropout(full_layer_one, keep_prob=hold_prob)
y_pred = normal_full_layer(full_one_dropout, 4)
# With MNIST
#y_pred = normal_full_layer(full_one_dropout, 10)
# LOSS function
cross_entropy =
tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=y_pred))
# Optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train = optimizer.minimize(cross_entropy)
init = tf.global_variables_initializer()
steps = 5000
with tf.Session() as sess:
sess.run(init)
for i in range(steps):
batch_x, batch_y = iterator.get_next()
test1, test2 = sess.run([batch_x, batch_y])
# With MNIST
#sess.run(train, feed_dict={x:batch_x, y_true:batch_y, hold_prob:0.5})
if i%100 == 0:
print("ON STEP {}".format(i))
print("Accuracy: ")
matches = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y_true, 1))
accuracy = tf.reduce_mean(tf.cast(matches, tf.float32))
# With MNIST
#print(sess.run(accuracy, feed_dict={x:mnist.test.images, y_true:mnist.test.labels, hold_prob:1.0}))

Rank mismatch error in Tensorflow

I'm working on creating an image classifier that can differentiate between cats and dogs. I have the follwing code:
import cv2
import os
from tqdm import tqdm
import numpy as np
import tensorflow as tf
img_height = 128
img_width = 128
path = "./train"
# class info
file = os.listdir(path)
index = []
images = []
# image size and channels
channels = 3
n_inputs = img_width * img_height * channels
# First convolutional layer
conv1_fmaps = 96 # Number of feature maps created by this layer
conv1_ksize = 4 # kernel size 3x3
conv1_stride = 2
conv1_pad = "SAME"
# Second convolutional layer
conv2_fmaps = 192
conv2_ksize = 4
conv2_stride = 4
conv2_pad = "SAME"
# Third layer is a pooling layer
pool3_fmaps = conv2_fmaps # Isn't it obvious?
n_fc1 = 192 # Total number of output features
n_outputs = 2
with tf.name_scope("inputs"):
X = tf.placeholder(tf.float32, shape=[None, img_width, img_height, channels], name="X")
X_reshaped = tf.reshape(X, shape=[-1, img_height, img_width, channels])
y = tf.placeholder(tf.int32, shape=[None, 2], name="y")
conv1 = tf.layers.conv2d(X_reshaped, filters=conv1_fmaps, kernel_size=conv1_ksize, strides=conv1_stride, padding=conv1_pad, activation=tf.nn.relu, name="conv1")
conv2 = tf.layers.conv2d(conv1, filters=conv2_fmaps, kernel_size=conv2_ksize, strides=conv2_stride, padding=conv2_pad, activation=tf.nn.relu, name="conv2")
n_epochs = 10
batch_size = 250
with tf.name_scope("pool3"):
pool3 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID")
pool3_flat = tf.reshape(pool3, shape=[-1, pool3_fmaps * 8 * 8])
with tf.name_scope("fc1"):
fc1 = tf.layers.dense(pool3_flat, n_fc1, activation=tf.nn.relu name="fc1")
with tf.name_scope("output"):
logits = tf.layers.dense(fc1, n_outputs, name="output")
Y_proba = tf.nn.softmax(logits, name="Y_proba")
with tf.name_scope("train"):
xentropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y)
loss = tf.reduce_mean(xentropy)
optimizer = tf.train.AdamOptimizer()
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
with tf.name_scope("init_and_save"):
saver = tf.train.Saver()
def next_batch(num):
index = []
images = []
# Data set Creation
print("Creating batch dataset "+str(num+1)+"...")
for f in tqdm(range(num * batch_size, (num+1)*batch_size)):
if file[f].find("dog"):
index.append(np.array([0, 1]))
else:
index.append(np.array([1, 0]))
image = cv2.imread(path + "/" + file[f])
image = cv2.resize(image, (img_width, img_height), 0, 0, cv2.INTER_LINEAR)
# image = image.astype(np.float32)
images.append(image)
images = np.array(images, dtype=np.uint8)
images = images.astype('float32')
images = images / 255
print("\nBatch "+str(num+1)+" creation finished.")
# print([images, index])
return [images, index]
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(25000 // batch_size):
X_batch, y_batch = next_batch(iteration)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
print(epoch, "Train accuracy:", acc_train)
save_path = saver.save(sess, "./dogvscat_mnist_model.ckpt")
But I'm getting this error:
ValueError: Rank mismatch: Rank of labels (received 2) should equal rank of logits minus 1 (received 2).
Can anyone point out the problem and help me to solve it. I'm totally new to this.
For tf.nn.sparse_softmax_corss_entropy_with_logits rank(labels) = rank(logits) - 1, so you need to redefine the labels placeholder as follows
...
y = tf.placeholder(tf.int32, shape=[None], name="y")
...
xentropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,labels=y)
...
X_batch, y_batch = next_batch(iteration)
y_batch = np.argmax(y_batch, axis=1)
OR you can you just use tf.nn.softmax_cross_entropy_with_logits without changing labels placeholder.
xentropy=tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=y)

Error while running a convolutional network using my own data in Tensorflow

I´m a complete beginner in using Tensorflow and machine learning in general, so there are many concepts that I still don´t understand quite well, so sorry if my error is obvious. I´m trying to train my own convolutional network using my own images (optical microscopy photos) resized to 60x60, and I have only 2 labels to classify them (if the sample is positive or not). Here is my code:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
sess = tf.InteractiveSession()
# Load dataset in two lists (images and labels).
def load_data(data_dir):
directories = [d for d in os.listdir(data_dir)
if os.path.isdir(os.path.join(data_dir, d))]
labels = []
images = []
for d in directories:
label_dir = os.path.join(data_dir, d)
file_names = [os.path.join(label_dir, f)
for f in os.listdir(label_dir) if f.endswith(".JPG")]
for f in file_names:
images.append(f)
labels.append(int(d))
return images, labels
# Load training and testing datasets.
ROOT_PATH = "Proyecto"
train_data_dir = os.path.join(ROOT_PATH, "Imagenes_entrenamiento")
test_data_dir = os.path.join(ROOT_PATH, "Imagenes_test")
images_train, labels_train = load_data(train_data_dir)
images_test, labels_test = load_data(test_data_dir)
# Converting training data to tensors.
timages_train = ops.convert_to_tensor(images_train, dtype=dtypes.string)
tlabels_train = ops.convert_to_tensor(labels_train, dtype=dtypes.int32)
# Converting testing data to tensors.
timages_test = ops.convert_to_tensor(images_test, dtype=dtypes.string)
tlabels_test = ops.convert_to_tensor(labels_test, dtype=dtypes.int32)
# Creation of a training queue.
num_files_train = len(images_train)
filename_train_queue = tf.train.slice_input_producer([timages_train,
tlabels_train], num_epochs=None, shuffle=True, capacity=num_files_train)
# Creation of a testing queue.
num_files_test = len(images_test)
filename_test_queue = tf.train.slice_input_producer([timages_test,
tlabels_test], num_epochs=None, shuffle=True, capacity=num_files_test)
# Decoding and resizing train images
raw_image_train= tf.read_file(filename_train_queue[0])
decoded_image_train = tf.image.decode_jpeg(raw_image_train, channels=3)
decoded_image_train = tf.cast(decoded_image_train, tf.float32)
resized_train_image = tf.image.resize_images(decoded_image_train, [60, 60])
# Decoding and resizing test images
raw_image_test= tf.read_file(filename_test_queue[0])
decoded_image_test = tf.image.decode_jpeg(raw_image_test, channels=3)
decoded_image_test = tf.cast(decoded_image_test, tf.float32)
resized_test_image = tf.image.resize_images(decoded_image_test, [60, 60])
# Extracting training and testing labels.
label_train_queue = filename_train_queue[1]
label_test_queue = filename_test_queue[1]
# Training batch.
batch_size_train = 5
image_train_batch, label_train_batch = tf.train.batch([resized_train_image,
label_train_queue], batch_size_train)
# Testing batch.
batch_size_test = 2
image_test_batch, label_test_batch = tf.train.batch([resized_test_image,
label_test_queue], batch_size_test)
# General model
x = tf.placeholder(tf.float32, shape=[None, 60, 60, 3])
y_ = tf.placeholder(tf.int32, shape=[None])
keep_prob = tf.placeholder(tf.float32)
# Weights and biases
dense_w={
"w_conv1": tf.Variable(tf.truncated_normal([5,5,3,32],stddev=0.1),
name="w_conv1"),
"b_conv1": tf.Variable(tf.constant(0.1,shape=[32]), name="b_conv1"),
"w_conv2": tf.Variable(tf.truncated_normal([5,5,32,64],stddev=0.1),
name="w_conv2"),
"b_conv2": tf.Variable(tf.constant(0.1,shape=[64]), name="b_conv2"),
"w_fc1": tf.Variable(tf.truncated_normal([15*15*64,1024],stddev=0.1),
name="w_fc1"),
"b_fc1": tf.Variable(tf.constant(0.1,shape=[1024]), name="b_fc1"),
"w_fc2": tf.Variable(tf.truncated_normal([1024,2],stddev=0.1),
name="w_fc2"),
"b_fc2": tf.Variable(tf.constant(0.1,shape=[2]), name="b_fc2")
}
# CNN model
def dense_cnn_model(weights):
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
x_image = tf.reshape(x, [-1,60,60,3])
h_conv1 = tf.nn.relu(conv2d(x_image, weights["w_conv1"]) +
weights["b_conv1"])
h_pool1 = max_pool_2x2(h_conv1)
h_conv2 = tf.nn.relu(conv2d(h_pool1, weights["w_conv2"]) +
weights["b_conv2"])
h_pool2 = max_pool_2x2(h_conv2)
h_pool2_flat = tf.reshape(h_pool2, [-1, 15*15*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, weights["w_fc1"]) +
weights["b_fc1"])
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, weights["w_fc2"]) +
weights["b_fc2"])
return y_conv
y_conv = dense_cnn_model(dense_w)
cross_entropy=tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=y_conv, labels=tf.squeeze(y_)))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_,))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init_op = tf.group(tf.local_variables_initializer(),
tf.global_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
## Training:
for i in range(50):
image_train_batch_eval,
label_train_batch_eval=image_train_batch.eval(),
label_train_batch.eval()
if i % 2 == 0:
train_accuracy = accuracy.eval(feed_dict={x:
image_train_batch_eval, y_: label_train_batch_eval,
keep_prob: 0.5})
print('Paso %d, Precisión de entrenamiento: %g' %
(i,train_accuracy))
train_step.run(feed_dict={x: image_train_batch_eval, y_:
label_train_batch_eval, keep_prob: 0.5})
## Testing
image_test_batch_eval, label_test_batch_eval=image_test_batch.eval(),
label_test_batch.eval()
print('Precisión de evaluación: %g' % accuracy.eval(feed_dict={
x: image_test_batch_eval, y_: label_test_batch_eval, keep_prob:1.0}))
coord.request_stop()
coord.join(threads)
EDIT:
The code is corrected.
You need to pass enqueue_many=True to tf.train.batch to indicate that you are enqueuing multiple examples at once, otherwise it will treat it as a single example with many features.

Resources