Multi-variable linear regression using Tensorflow - python-3.x

I am trying to implement multi-varibale linear regression using tensorflow. I have a csv file with 200 rows and 3 columns (features) with the last column as output. Something like this:
I have written the following code:
from __future__ import print_function
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import csv
import pandas
rng = np.random
# Parameters
learning_rate = 0.01
training_epochs = 1000
display_step = 50
I get the data from the file using pandas and store it:
# Training Data
dataframe = pandas.read_csv("Advertising.csv", delim_whitespace=True, header=None)
dataset = dataframe.values
X1,X2,X3,y1 = [],[],[],[]
for i in range(1,len(dataset)):
X = dataset[i][0]
X1.append(np.float32(X.split(",")[1]))
X2.append(np.float32(X.split(",")[2]))
X3.append(np.float32(X.split(",")[3]))
y1.append(np.float32(X.split(",")[4]))
X = np.column_stack((X1,X2))
X = np.column_stack((X,X3))
I assign the placeholders and variables and the linear regression model:
n_samples = len(X1)
#print(n_samples) = 17
# tf Graph Input
X_1 = tf.placeholder(tf.float32, [3, None])
Y = tf.placeholder(tf.float32, [None])
# Set model weights
W1 = tf.Variable(rng.randn(), [n_samples,3])
b = tf.Variable(rng.randn(), [n_samples])
# Construct a linear model
pred = tf.add(tf.matmul(W1, X_1), b)
# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
# Gradient descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
for (x1, y) in zip(X, y1):
sess.run(optimizer, feed_dict={X_1: x1, Y: y})
# Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost, feed_dict={X_1: x1, Y: y})
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
"Weights=", sess.run(W1),"b=", sess.run(b))
I get the following error which I am not able to debug:
ValueError: Shape must be rank 2 but is rank 0 for 'MatMul' (op:
'MatMul') with input shapes: [], [3,?].
Can you help me with hot to solve this?
Thanks in advance.

tf.variable doesn't take inputs as you are thinking, the second parameter is not shape. To set the shape of the variable you do this with the initializer (the first parameter). see https://www.tensorflow.org/api_docs/python/tf/Variable
Your code
# Set model weights
W1 = tf.Variable(rng.randn(), [n_samples,3])
b = tf.Variable(rng.randn(), [n_samples])
My suggested change
initial1 = tf.constant(rng.randn(), dtype=tf.float32, shape=[n_samples,3])
initial2 = tf.constant(rng.randn(), dtype=tf.float32, shape=[n_samples,3])
W1 = tf.Variable(initial_value=initial1)
b = tf.Variable(initial_value=initial2)
In answer to the additional issues which arise after fixing the initial question the following code runs - but there still might be some logical error which you need to think about - like your #display logs per epoch step.
from __future__ import print_function
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import csv
import pandas
rng = np.random
# Parameters
learning_rate = 0.01
training_epochs = 1000
display_step = 50
# Training Data
#Created some fake data
dataframe = [[230.1,37.8,69.2,22.1],[2230.1,32.8,61.2,21.1]] #pandas.read_csv("Advertising.csv", delim_whitespace=True, header=None)
dataset = dataframe
X1,X2,X3,y1 = [],[],[],[]
for i in range(0,len(dataset)):
X = dataset[i][0]
X1.append(np.float32(dataset[i][0]))
X2.append(np.float32(dataset[i][1]))
X3.append(np.float32(dataset[i][2]))
y1.append(np.float32(dataset[i][3]))
#X=np.array([X1,X2,X3])
X = np.column_stack((X1,X2,X3)) ##MYEDIT: This combines all three values. If you find you need to stack in a different way then you will need to ensure the shapes below match this shape.
#X = np.column_stack((X,X3))
n_samples = len(X1)
#print(n_samples) = 17
# tf Graph Input
X_1 = tf.placeholder(tf.float32, [ None,3])##MYEDIT: Changed order
Y = tf.placeholder(tf.float32, [None])
# Set model weights
initial1 = tf.constant(rng.randn(), dtype=tf.float32, shape=[3,1]) ###MYEDIT: change order and you are only giving 1 sample at a time with your method of calling
initial2 = tf.constant(rng.randn(), dtype=tf.float32, shape=[3,1])
W1 = tf.Variable(initial_value=initial1)
b = tf.Variable(initial_value=initial2)
mul=tf.matmul(W1, X_1) ##MYEDIT: remove matmul from pred for clarity and shape checking
# Construct a linear model
pred = tf.add(mul, b)
# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
# Gradient descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
for (x1, y) in zip(X, y1):
Xformatted=np.array([x1]) #has shape (1,3) #MYEDIT: separated this to demonstrate shapes
yformatted=np.array([y]) #shape (1,) #MYEDIT: separated this to demonstrate shapes
#NB. X_1 shape is (?,3) and Y shape is (?,)
sess.run(optimizer, feed_dict={X_1: Xformatted, Y: yformatted})
# Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost, feed_dict={X_1: Xformatted, Y: yformatted}) #NB. x1 an y are out of scope here - you will only get the last values. Double check if this is what you meant.
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
"Weights=", sess.run(W1),"b=", sess.run(b))

You need to feed a matrix into tf.matmul(W1, X_1). Check the types for your W1 and X_1 for your code.
See the question here for more details

Related

A Traceback while drawing the trajectory of gradient descent in 2D using TensorFlow

I want to draw the trajectory of gradient descent in 2D. Unfortunately, I had the following traceback:
InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_1' with dtype float and shape [?,1]
I am using TensorFlow v 1.13.1, along with Python v 3.6.7 from Google COLAB.
From the below code, I found that the variable target is of class <tf.Tensor 'Placeholder_1:0' shape=(?, 1) dtype=float32>.
I tried to feed it as mentioned feed_dict={features: x, target: y}, but I still have the same traceback.
Here is the code I used for this task:
## BLOCK 1
import tensorflow as tf
import numpy as np
from matplotlib import animation, rc
import matplotlib_utils
from IPython.display import HTML, display_html
import matplotlib.pyplot as plt
%matplotlib inline
## BLOCK 2
tf.reset_default_graph()
# generate model data
N = 1000
D = 3
x = np.random.random((N, D))
w = np.random.random((D, 1))
y = x # w + np.random.randn(N, 1) * 0.20
## Deep Learning steps:
# 1. Get input (features) and true output (target)
features = tf.placeholder(tf.float32, shape=(None, D))
target = tf.placeholder(tf.float32, shape=(None, 1))
weights = tf.get_variable("weights", shape=(D, 1), dtype=tf.float32)
# 2. Compute the "guess" (predictions) based on the features and weights
predictions = features # weights
# 3. Compute the loss based on the difference between the predictions and the target
loss = tf.reduce_mean((target - predictions) ** 2)
# 4. Update the weights (parameters) based on the gradient descent of the loss
optimizer = tf.train.GradientDescentOptimizer(0.1)
step = optimizer.minimize(loss)
s = tf.Session()
s.run(tf.global_variables_initializer())
_, curr_loss, curr_weights = s.run([step, loss, weights],
feed_dict={features: x, target: y})
I am expecting that the following code will run properly (traceback raised while running this code):
## BLOCK 3
# nice figure settings
fig, ax = plt.subplots()
y_true_value = s.run(target)
level_x = np.arange(0, 2, 0.02)
level_y = np.arange(0, 3, 0.02)
X, Y = np.meshgrid(level_x, level_y)
Z = (X - y_true_value[0])**2 + (Y - y_true_value[1])**2
ax.set_xlim(-0.02, 2)
ax.set_ylim(-0.02, 3)
s.run(tf.global_variables_initializer())
ax.scatter(*s.run(target), c='red')
contour = ax.contour(X, Y, Z, 10)
ax.clabel(contour, inline=1, fontsize=10)
line, = ax.plot([], [], lw=2)
# start animation with empty trajectory
def init():
line.set_data([], [])
return (line,)
trajectory = [s.run(predictions)]
# one animation step (make one GD step)
def animate(i):
s.run(step)
trajectory.append(s.run(predictions))
line.set_data(*zip(*trajectory))
return (line,)
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=100, interval=20, blit=True)
Note: the library matplotlib_utils can be found here!
Example
This is an example in which the code runs perfectly.
If I run the following code instead of the second block, it displays a beautiful gradient descent in 2D.
y_guess = tf.Variable(np.zeros(2, dtype='float32'))
y_true = tf.range(1, 3, dtype='float32')
loss = tf.reduce_mean((y_guess - y_true + 0.5*tf.random_normal([2]))**2)
optimizer = tf.train.RMSPropOptimizer(0.03, 0.5)
step = optimizer.minimize(loss, var_list=y_guess)
This trajectory is something like this:
Adding this black of code will display perfect auto-generator of the trajectory:
## BLOCK 4
try:
display_html(HTML(anim.to_html5_video()))
except (RuntimeError, KeyError):
# In case the build-in renderers are unaviable, fall back to
# a custom one, that doesn't require external libraries
anim.save(None, writer=matplotlib_utils.SimpleMovieWriter(0.001))
Now I want to use my own code (second block of code) to draw such trajectory of gradient descent in 2D.
Pass the feed_dict argument with tf.session.run. Example:
s.run([step, loss, weights], feed_dict={features: x, target: y})
Explanation:
When the operation on the computation graph depends on a placeholder, they must be provided. Operations like s.run(tf.global_variables_initializer()) do not depend on the placeholders, so not passing them does not raise an error.

How to use Tensorflow to compute per-sample gradient and reduce them with an arbitrary function?

Given a (mini-)batch of size M, I want to acquire M gradients (one for each sample) and reduce them to a single gradient with an arbitrary function (instead of the typical mean). Then, I want to use this last gradient to train the network with.
I have the example below which somewhat works. The problem is that not only it gets slower and slower as iterations go by, but also the programs keeps on using more and more memory. As far as I can tell, this happens because I'm adding new operations to Tensorflow's computational graph on each iteration, but I lack the knowledge of Tensorflow to avoid doing this and still achieve my objective.
import sys
import numpy as np
import tensorflow as tf
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Parameters
learning_rate = 0.0001
n_iterations = 1000000
batch_size = 2
display_step = 1000
# Network Parameters
n_hidden_1 = 256
n_hidden_2 = 256
n_input = 784
n_output = 10
# Create model
def build_network(x, weights, biases):
assert sys.version_info[:2] >= (3,6) # otherwise iteration on dict is ill-defined
layer = x
for (w_str,w),(b_str,b) in zip(weights.items(), biases.items()):
print(w_str,b_str)
layer = tf.add(tf.matmul(layer, w), b)
return layer
def max_gradient(gradients):
assert len(gradients) >= 1
# compute the resulting gradient, by choosing the (abs) max component wise
tgv = gradients[0]
for gv in gradients[1:]:
for (tg,tv),(g,v) in zip(tgv,gv):
assert (tv == v).all()
np.copyto(tg,g,where=abs(g) > abs(tg))
return tgv
def main(sess):
# tf Graph input/output
X = tf.placeholder('float', [None, n_input])
Y = tf.placeholder('float', [None, n_output])
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_output]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_output]))
}
# Construct model
model = build_network(X, weights, biases)
# Loss
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=model, labels=Y))
# Optimizer
opt = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
# Gradient
gv_op = opt.compute_gradients(loss_op)
# Trainable variables
t_vars = tf.trainable_variables()
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for i in range(n_iterations):
# Get new training data
X_train, Y_train = mnist.train.next_batch(batch_size)
# Run the cost operation (to get the loss value)
c = sess.run(loss_op, feed_dict={X: X_train, Y: Y_train})
print(i, c)
# get the gradients for each batch sample
gradients = [sess.run(gv_op,
feed_dict={X: X_train[[j]], Y: Y_train[[j]]})
for j in range(batch_size)]
# compute the resulting gradient
tgv = max_gradient(gradients)
# assert that all the variables match
for (_,v1),v2 in zip(tgv,t_vars):
assert (v1 == sess.run(v2)).all()
# place the actual variables for the variable slots
tgv = [(g,v) for (g,_),v in zip(tgv, t_vars)]
# apply the transformation
sess.run(opt.apply_gradients(tgv))
if __name__ == '__main__':
with tf.Session() as sess:
main(sess)
How can I compute per-sample gradient and reduce them with an arbitrary function without constantly adding new operations to Tensorflow's graph?

Input size (depth of inputs) must be accessible via shape inference, but saw value None error whaen trying to set tf.expand_dims axis to 0

I am trying to use 20 news groups data set available in sklearn to train a LSTM to do incremental learning (classification). I used the sklearn's TfidfVectorizer to pre-process the data. Then I turned the resulting sparse matrix into a numpy array before feeding it. After that when coding the below line:
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs_, initial_state=initial_state)
It gave an error saying that the 'inputs_' should have 3 dimensions. so I used:
inputs_ = tf.expand_dims(inputs_, 0)
To expand the dimension. But when I do that i get the error:
ValueError: Input size (depth of inputs) must be accessible via shape
inference, but saw value None.
The shape of 'input_' is:
(1, 134410)
I already went through this post, but it did not help.
I cannot seem to understand how to solve this issue. Any help is much appreciated. Thank you in advance!
show below is my complete code:
import os
from collections import Counter
import tensorflow as tf
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.datasets import fetch_20newsgroups
import matplotlib as mplt
from matplotlib import cm
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from sklearn.metrics import f1_score, recall_score, precision_score
from string import punctuation
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
def pre_process():
newsgroups_data = fetch_20newsgroups(subset='all', remove=('headers', 'footers', 'quotes'))
vectorizer = TfidfVectorizer()
features = vectorizer.fit_transform(newsgroups_data.data)
lb = LabelBinarizer()
labels = np.reshape(newsgroups_data.target, [-1])
labels = lb.fit_transform(labels)
return features, labels
def get_batches(x, y, batch_size=1):
for ii in range(0, len(y), batch_size):
yield x[ii:ii + batch_size], y[ii:ii + batch_size]
def plot_error(errorplot, datapoint, numberOfWrongPreds):
errorplot.set_xdata(np.append(errorplot.get_xdata(), datapoint))
errorplot.set_ydata(np.append(errorplot.get_ydata(), numberOfWrongPreds))
errorplot.autoscale(enable=True, axis='both', tight=None)
plt.draw()
def train_test():
features, labels = pre_process()
#Defining Hyperparameters
epochs = 1
lstm_layers = 1
batch_size = 1
lstm_size = 30
learning_rate = 0.003
print(lstm_size)
print(batch_size)
print(epochs)
#--------------placeholders-------------------------------------
# Create the graph object
graph = tf.Graph()
# Add nodes to the graph
with graph.as_default():
tf.set_random_seed(1)
inputs_ = tf.placeholder(tf.float32, [None,None], name = "inputs")
# labels_ = tf.placeholder(dtype= tf.int32)
labels_ = tf.placeholder(tf.int32, [None,None], name = "labels")
#getting dynamic batch size according to the input tensor size
# dynamic_batch_size = tf.shape(inputs_)[0]
#output_keep_prob is the dropout added to the RNN's outputs, the dropout will have no effect on the calculation of the subsequent states.
keep_prob = tf.placeholder(tf.float32, name = "keep_prob")
# Your basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# Add dropout to the cell
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
#Stack up multiple LSTM layers, for deep learning
cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)
# Getting an initial state of all zeros
initial_state = cell.zero_state(batch_size, tf.float32)
inputs_ = tf.expand_dims(inputs_, 0)
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs_, initial_state=initial_state)
#hidden layer
hidden = tf.layers.dense(outputs[:, -1], units=25, activation=tf.nn.relu)
logit = tf.contrib.layers.fully_connected(hidden, 1, activation_fn=None)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logit, labels=labels_))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
saver = tf.train.Saver()
# ----------------------------online training-----------------------------------------
with tf.Session(graph=graph) as sess:
tf.set_random_seed(1)
sess.run(tf.global_variables_initializer())
iteration = 1
state = sess.run(initial_state)
wrongPred = 0
errorplot, = plt.plot([], [])
for ii, (x, y) in enumerate(get_batches(features, labels, batch_size), 1):
feed = {inputs_: x.toarray(),
labels_: y,
keep_prob: 0.5,
initial_state: state}
predictions = tf.round(tf.nn.softmax(logit)).eval(feed_dict=feed)
print("----------------------------------------------------------")
print("Iteration: {}".format(iteration))
print("Prediction: ", predictions)
print("Actual: ",y)
pred = np.array(predictions)
print(pred)
print(y)
if not ((pred==y).all()):
wrongPred += 1
if ii % 27 == 0:
plot_error(errorplot,ii,wrongPred)
loss, states, _ = sess.run([cost, final_state, optimizer], feed_dict=feed)
print("Train loss: {:.3f}".format(loss))
iteration += 1
saver.save(sess, "checkpoints/sentiment.ckpt")
errorRate = wrongPred/len(labels)
print("ERROR RATE: ", errorRate )
if __name__ == '__main__':
train_test()
ValueError: Input size (depth of inputs) must be accessible via shape inference, but saw value None.
This error is given because you don't specify the size nor the amount of inputs.
I got the script working like this:
inputs_ = tf.placeholder(tf.float32, [1,None], name = "inputs")
inputs_withextradim = tf.expand_dims(inputs_, 2)
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs_withextradim, initial_state=initial_state)

Tesnsor Flow error :Shape must be rank 2 but is rank 1 for 'MatMul' (op: 'MatMul') with input shapes: [?], [1024]

The problem is i have a single feature input and the shape is one dimensional.I get the data from a stored file which has date and a "Delhi" feature.These feature are meant to be run through the system.Here is the program:
import matplotlib.pyplot as plt
import numpy as np
import json,pickle
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
sc = MinMaxScaler()
def initialiseDeep(X_train, X_test, y_train, y_test):
# Model architecture parameters
n_stocks = 1
n_neurons_1 = 1024
n_neurons_2 = 512
n_neurons_3 = 256
n_neurons_4 = 128
n_target = 1
# Placeholder
X = tf.placeholder(dtype=tf.float32,shape=[None])
Y = tf.placeholder(dtype=tf.float32,shape=[None])
# Initializers
sigma = 1
weight_initializer = tf.variance_scaling_initializer(mode="fan_avg", distribution="uniform", scale=sigma)
bias_initializer = tf.zeros_initializer()
# Layer 1: Variables for hidden weights and biases
W_hidden_1 = tf.Variable(weight_initializer([n_stocks, n_neurons_1]))
bias_hidden_1 = tf.Variable(bias_initializer([n_neurons_1]))
# Layer 2: Variables for hidden weights and biases
W_hidden_2 = tf.Variable(weight_initializer([n_neurons_1, n_neurons_2]))
bias_hidden_2 = tf.Variable(bias_initializer([n_neurons_2]))
# Layer 3: Variables for hidden weights and biases
W_hidden_3 = tf.Variable(weight_initializer([n_neurons_2, n_neurons_3]))
bias_hidden_3 = tf.Variable(bias_initializer([n_neurons_3]))
# Layer 4: Variables for hidden weights and biases
W_hidden_4 = tf.Variable(weight_initializer([n_neurons_3, n_neurons_4]))
bias_hidden_4 = tf.Variable(bias_initializer([n_neurons_4]))
# Output layer: Variables for output weights and biases
W_out = tf.Variable(weight_initializer([n_neurons_4, n_target]))
bias_out = tf.Variable(bias_initializer([n_target]))
# Hidden layer
hidden_1 = tf.nn.relu(tf.add(tf.matmul(X, W_hidden_1), bias_hidden_1))
hidden_2 = tf.nn.relu(tf.add(tf.matmul(hidden_1, W_hidden_2), bias_hidden_2))
hidden_3 = tf.nn.relu(tf.add(tf.matmul(hidden_2, W_hidden_3), bias_hidden_3))
hidden_4 = tf.nn.relu(tf.add(tf.matmul(hidden_3, W_hidden_4), bias_hidden_4))
# Output layer (must be transposed)
out = tf.transpose(tf.add(tf.matmul(hidden_4, W_out), bias_out))
# Cost function
mse = tf.reduce_mean(tf.squared_difference(out, Y))
# Optimizer
opt = tf.train.AdamOptimizer().minimize(mse)
# Make Session
net = tf.Session()
# Run initializer
net.run(tf.global_variables_initializer())
# Setup interactive plot
plt.ion()
fig = plt.figure()
ax1 = fig.add_subplot(111)
line1, = ax1.plot(y_test)
line2, = ax1.plot(y_test*0.5)
plt.show()
# Number of epochs and batch size
epochs = 10
batch_size = 15
for e in range(epochs):
# Shuffle training data
shuffle_indices = np.random.permutation(np.arange(len(y_train)))
X_train = X_train[shuffle_indices]
y_train = y_train[shuffle_indices]
# Minibatch training
for i in range(0, len(y_train) // batch_size):
start = i * batch_size
batch_x = X_train[start:start + batch_size]
batch_y = y_train[start:start + batch_size]
# Run optimizer with batch
net.run(opt, feed_dict={X: batch_x, Y: batch_y})
# Show progress
if np.mod(i, 5) == 0:
# Prediction
pred = net.run(out, feed_dict={X: X_test})
line2.set_ydata(pred)
plt.title('Epoch ' + str(e) + ', Batch ' + str(i))
file_name = 'img/epoch_' + str(e) + '_batch_' + str(i) + '.jpg'
plt.savefig(file_name)
plt.pause(0.01)
# Print final MSE after Training
mse_final = net.run(mse, feed_dict={X: X_test, Y: y_test})
print(mse_final)
file_Name = "DataSet"
fileObject = open(file_Name,'rb')
Data=pickle.load(fileObject)
JSON=json.loads(Data)
X=[]
y=[]
DataSet=dict(JSON)
for i in range(1,DataSet['Totaldata']+1):
X.append(int(DataSet[str(i)]['dateFloat']))
y.append(float(DataSet[str(i)]['Delhi']))
test_len=20
X=np.asarray(X)
y=np.asarray(y)
X_train, X_test, y_train, y_test=np.asarray(X[:len(X)-test_len]),np.asarray(X[len(X)-test_len:]),np.asarray(y[:len(X)-test_len]),np.asarray(y[len(X)-test_len:])
initialiseDeep(X_train, X_test, y_train, y_test)
but when i run this i get the following error:
raise ValueError(err.message)
ValueError: Shape must be rank 2 but is rank 1 for 'MatMul' (op: 'MatMul')
with input shapes: [?], [1024].
the cause for the error is this line :
hidden_1 = tf.nn.relu(tf.add(tf.matmul(X, W_hidden_1), bias_hidden_1))
Can someone help me fix this, Iam a newbie on deep learning.
tf.matmul takes inputs of rank >= 2.
So, you can expand the dimensions of the inputs to 2-dimension using tf.expand_dims().
some examples from the documentation.
if 't' is a tensor of shape [2]
1. tf.shape(tf.expand_dims(t, 0)) ## Now the shape is [1, 2]
2. tf.shape(tf.expand_dims(t, 1)) ## Now the shape is [2, 1]
reference: https://www.tensorflow.org/api_docs/python/tf/expand_dims
As cited in the documentation of tensor flow :https://www.tensorflow.org/api_docs/python/tf/matmul
This is a matrix multiplication and it can happen only for matrices with atleast rank 2 and the number of columns in matrix 1 should be equal to number of rows in matrix 2 in tf.matmul(matrix1,matrix2).
Please check the shape's of the two matrices using the function X.shape and W_hidden_1.shape. As I understand from the error this would not be following the above mentioned rule. You can also use tf.transpose to obtain the requisite dimensionality.
Cheers
Check your tensors
Find the tensor with shape = (1222,)
Perform following operation
X=tf.expand_dims(X,1)
It will convert it to shape(1222,1)
And you are good to go

How do I obtain predictions and probabilities from new data input to a CNN in Tensorflow

I'll preface this by saying this is my first posted question on SO. I've just recently started working with Tensorflow, and have been attempting to apply a convolutional-neural network model approach for classification of .csv records in a file representing images from scans of microarray data. (FYI: Microarrays are a grid of spotted DNA on a glass slide, representing specific DNA target sequences for determining the presence of those DNA targets in a sample. The individual pixels represent fluorescence intensity value from 0-1). The file has ~200,000 records in total. Each record (image) has 10816 pixels that represent DNA sequences from known viruses, and one index label which identifies the virus species. The pixels create a pattern which is unique to each of the different viruses. There are 2165 different viruses in total represented within the 200,000 records. I have trained the network on images of labeled microarray datasets, but when I try to pass a new dataset through to classify it/them as one of the 2165 different viruses and determine predicted values and probabilities, I don't seem to be having much luck. This is the code that I am currently using for this:
import tensorflow as tf
import numpy as np
import csv
def extract_data(filename):
print("extracting data...")
NUM_LABELS = 2165
NUM_FEATURES = 10816
labels = []
fvecs = []
rowCount = 0
#iterate over the rows, split the label from the features
#convert the labels to integers and features to floats
for line in open(filename):
rowCount = rowCount + 1
row = line.split(',')
labels.append(row[3])#(int(row[7])) #<<<IT ALWAYS PREDICTS THIS VALUE!
for x in row [4:10820]:
fvecs.append(float(x))
#convert the array of float arrasy into a numpy float matrix
fvecs_np = np.matrix(fvecs).astype(np.float32)
#convert the array of int lables inta a numpy array
labels_np = np.array(labels).astype(dtype=np.uint8)
#convert the int numpy array into a one-hot matrix
labels_onehot = (np.arange(NUM_LABELS) == labels_np[:, None]).astype(np.float32)
print("arrays converted")
return fvecs_np, labels_onehot
def TestModels():
fvecs_np, labels_onehot = extract_data("MicroarrayTestData.csv")
print('RESTORING NN MODEL')
weights = {}
biases = {}
sess=tf.Session()
init = tf.global_variables_initializer()
#Load meta graph and restore weights
ModelID = "MicroarrayCNN_Data-1000.meta"
print("RESTORING:::", ModelID)
saver = tf.train.import_meta_graph(ModelID)
saver.restore(sess,tf.train.latest_checkpoint('./'))
graph = tf.get_default_graph()
x = graph.get_tensor_by_name("x:0")
y = graph.get_tensor_by_name("y:0")
keep_prob = tf.placeholder(tf.float32)
y_ = tf.placeholder("float", shape=[None, 2165])
wc1 = graph.get_tensor_by_name("wc1:0")
wc2 = graph.get_tensor_by_name("wc2:0")
wd1 = graph.get_tensor_by_name("wd1:0")
Wout = graph.get_tensor_by_name("Wout:0")
bc1 = graph.get_tensor_by_name("bc1:0")
bc2 = graph.get_tensor_by_name("bc2:0")
bd1 = graph.get_tensor_by_name("bd1:0")
Bout = graph.get_tensor_by_name("Bout:0")
weights = {wc1, wc2, wd1, Wout}
biases = {bc1, bc2, bd1, Bout}
print("NEXTArgmax")
prediction=tf.argmax(y,1)
probabilities = y
predY = prediction.eval(feed_dict={x: fvecs_np, y: labels_onehot}, session=sess)
probY = probabilities.eval(feed_dict={x: fvecs_np, y: labels_onehot}, session=sess)
accuracy = tf.reduce_mean(tf.cast(prediction, "float"))
print(sess.run(accuracy, feed_dict={x: fvecs_np, y: labels_onehot}))
print("%%%%%%%%%%%%%%%%%%%%%%%%%%")
print("Predicted::: ", predY, accuracy)
print("%%%%%%%%%%%%%%%%%%%%%%%%%%")
feed_dictTEST = {y: labels_onehot}
probabilities=probY
print("probabilities", probabilities.eval(feed_dict={x: fvecs_np}, session=sess))
########## Run Analysis ###########
TestModels()
So, when I run this code I get the correct prediction for the test set, although I am not sure I believe it, because it appears that whatever value I append in line 14 (see below) is the output it predicts:
labels.append(row[3])#<<<IT ALWAYS PREDICTS THIS VALUE!
I don't understand this, and it makes me suspicious that I've set up the CNN incorrectly, as I would have expected it to ignore my input label and determine a bast match from the trained network based on the trained patterns. The only thing I can figure is that when I pass the value through for the prediction; it is instead training the model on this data as well, and then predicting itself. Is this a correct assumption, or am I misinterpreting how Tensorflow works?
The other issue is that when I try to use code that (based on other tutorials) which is supposed to output the probabilities of all of the 2165 possible outputs, I get the error:
InvalidArgumentError (see above for traceback): Shape [-1,2165] has negative dimensions
[[Node: y = Placeholder[dtype=DT_FLOAT, shape=[?,2165], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
To me, it looks like it is the correct layer based on the 2165 value in the Tensor shape, but I don't understand the -1 value. So, to wrap up the summary, my questions are:
Based on the fact that I get the value that I have in the label of the input data, is this the correct method to make a classification using this model?
Am I missing a layer or have I configured the model incorrectly in order to extract the probabilities of all of the possible output classes, or am I using the wrong code to extract the information? I try to print out the accuracy to see if that would work, but instead it outputs the description of a tensor, so clearly that is incorrect as well.
(ADDITIONAL INFORMATION)
As requested, I'm also including the original code that was used to train the model, which is now below. You can see I do sort of a piece meal training of a limited number of related records at a time by their taxonomic relationships as I iterate through the file. This is mostly because the Mac that I'm training on (Mac Pro w/ 64GB ram) tends to give me the "Killed -9" error due to overuse of resources if I don't do it this way. There may be a better way to do it, but this seems to work.
Original Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
from __future__ import print_function
import tensorflow as tf
import numpy as np
import csv
import random
# Parameters
num_epochs = 2
train_size = 1609
learning_rate = 0.001 #(larger >speed, lower >accuracy)
training_iters = 5000 # How much do you want to train (more = better trained)
batch_size = 32 #How many samples to train on, size of the training batch
display_step = 10 # How often to diplay what is going on during training
# Network Parameters
n_input = 10816 # MNIST data input (img shape: 28*28)...in my case 104x104 = 10816(rough array size)
n_classes = 2165 #3280 #2307 #787# Switched to 100 taxa/training set, dynamic was too wonky.
dropout = 0.75 # Dropout, probability to keep units. Jeffery Hinton's group developed it, that prevents overfitting to find new paths. More generalized model.
# Functions
def extract_data(filename):
print("extracting data...")
# arrays to hold the labels and feature vectors.
NUM_LABELS = 2165
NUM_FEATURES = 10826
taxCount = 0
taxCurrent = 0
labels = []
fvecs = []
rowCount = 0
#iterate over the rows, split the label from the features
#convert the labels to integers and features to floats
print("entering CNN loop")
for line in open(filename):
rowCount = rowCount + 1
row = line.split(',')
taxCurrent = row[3]
print("profile:", row[0:12])
labels.append(int(row[3]))
fvecs.append([float(x) for x in row [4:10820]])
#convert the array of float arrasy into a numpy float matrix
fvecs_np = np.matrix(fvecs).astype(np.float32)
#convert the array of int lables inta a numpy array
labels_np = np.array(labels).astype(dtype=np.uint8)
#convert the int numpy array into a one-hot matrix
labels_onehot = (np.arange(NUM_LABELS) == labels_np[:, None]).astype(np.float32)
print("arrays converted")
return fvecs_np, labels_onehot
# Create some wrappers for simplicity
def conv2d(x, W, b, strides=1): #Layer 1 : Convolutional layer
# Conv2D wrapper, with bias and relu activation
print("conv2d")
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME') # Strides are the tensors...list of integers. Tensors=data
x = tf.nn.bias_add(x, b) #bias is the tuning knob
return tf.nn.relu(x) #rectified linear unit (activation function)
def maxpool2d(x, k=2): #Layer 2 : Takes samples from the image. (This is a 4D tensor)
print("maxpool2d")
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
# Create model
def conv_net(x, weights, biases, dropout):
print("conv_net setup")
# Reshape input picture
x = tf.reshape(x, shape=[-1, 104, 104, 1]) #-->52x52 , -->26x26x64
# Convolution Layer
conv1 = conv2d(x, weights['wc1'], biases['bc1']) #defined above already
# Max Pooling (down-sampling)
conv1 = maxpool2d(conv1, k=2)
print(conv1.get_shape)
# Convolution Layer
conv2 = conv2d(conv1, weights['wc2'], biases['bc2']) #wc2 and bc2 are just placeholders...could actually skip this layer...maybe
# Max Pooling (down-sampling)
conv2 = maxpool2d(conv2, k=2)
print(conv2.get_shape)
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
fc1 = tf.nn.relu(fc1) #activation function for the NN
# Apply Dropout
fc1 = tf.nn.dropout(fc1, dropout)
# Output, class prediction
out = tf.add(tf.matmul(fc1, weights['Wout']), biases['Bout'])
return out
def Train_Network(Txid_IN, Sess_File_Name):
import tensorflow as tf
tf.reset_default_graph()
x,y = 0,0
weights = {}
biases = {}
# tf Graph input
print("setting placeholders")
x = tf.placeholder(tf.float32, [None, n_input], name="x") #Gateway for data (images)
y = tf.placeholder(tf.float32, [None, n_classes], name="y") # Gateway for data (labels)
keep_prob = tf.placeholder(tf.float32) #dropout # Gateway for dropout(keep probability)
# Store layers weight & bias
#CREATE weights
weights = {
# 5x5 conv, 1 input, 32 outputs
'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32]),name="wc1"), #
# 5x5 conv, 32 inputs, 64 outputs
'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64]),name="wc2"),
# fully connected, 7*7*64 inputs, 1024 outputs
'wd1': tf.Variable(tf.random_normal([26*26*64, 1024]),name="wd1"),
# 1024 inputs, 10 outputs (class prediction)
'Wout': tf.Variable(tf.random_normal([1024, n_classes]),name="Wout")
}
biases = {
'bc1': tf.Variable(tf.random_normal([32]), name="bc1"),
'bc2': tf.Variable(tf.random_normal([64]), name="bc2"),
'bd1': tf.Variable(tf.random_normal([1024]), name="bd1"),
'Bout': tf.Variable(tf.random_normal([n_classes]), name="Bout")
}
# Construct model
print("constructing model")
pred = conv_net(x, weights, biases, keep_prob)
print(pred)
# Define loss(cost) and optimizer
#cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) Deprecated version of the statement
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = pred, labels=y)) #added reduce_mean 6/27
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
print("%%%%%%%%%%%%%%%%%%%%")
print ("%% ", correct_pred)
print ("%% ", accuracy)
print("%%%%%%%%%%%%%%%%%%%%")
# Initializing the variables
#init = tf.initialize_all_variables()
init = tf.global_variables_initializer()
saver = tf.train.Saver()
fvecs_np, labels_onehot = extract_data("MicroarrayDataOUT.csv") #CHAGE TO PICORNAVIRUS!!!!!AHHHHHH!!!
print("starting session")
# Launch the graph
FitStep = 0
with tf.Session() as sess: #graph is encapsulated by its session
sess.run(init)
step = 1
# Keep training until reach max iterations (training_iters)
while step * batch_size < training_iters:
if FitStep >= 5:
break
else:
#iterate and train
print(step)
print(fvecs_np, labels_onehot)
for step in range(num_epochs * train_size // batch_size):
sess.run(optimizer, feed_dict={x: fvecs_np, y: labels_onehot, keep_prob:dropout}) #no dropout???...added Keep_prob:dropout
if FitStep >= 5:
break
#else:
###batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop)
###sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,
### keep_prob: dropout}) <<<<SOMETHING IS WRONG IN HERE?!!!
if step % display_step == 0:
# Calculate batch loss and accuracy
loss, acc = sess.run([cost, accuracy], feed_dict={x: fvecs_np,
y: labels_onehot,
keep_prob: 1.})
print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(np.mean(loss)) + ", Training Accuracy= " + \
"{:.5f}".format(acc))
TrainAcc = float("{:.5f}".format(acc))
#print("******", TrainAcc)
if TrainAcc >= .99: #Changed from .95 temporarily
print(FitStep)
FitStep = FitStep+1
saver.save(sess, Sess_File_Name, global_step=1000) #
print("Saved Session:", Sess_File_Name)
step += 1
print("Optimization Finished!")
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: fvecs_np[:256],
y: labels_onehot[:256],
keep_prob: 1.}))
#feed_dictTEST = {x: fvecs_np[50]}
#prediction=tf.argmax(y,1)
#print(prediction)
#best = sess.run([prediction],feed_dictTEST)
#print(best)
print("DONE")
sess.close()
def Tax_Iterator(CSV_inFile, CSV_outFile): #Deprecate
#Need to copy *.csv file to MySQL for sorting
resultFileINIT = open(CSV_outFile,'w')
resultFileINIT.close()
TaxCount = 0
TaxThreshold = 2165
ThresholdStep = 2165
PrevTax = 0
linecounter = 0
#Open all GenBank profile list
for line in open(CSV_inFile):
linecounter = linecounter+1
print(linecounter)
resultFile = open(CSV_outFile,'a')
wr = csv.writer(resultFile, dialect='excel')
# Check for new TXID
row = line.split(',')
print(row[7], "===", PrevTax)
if row[7] != PrevTax:
print("X1")
TaxCount = TaxCount+1
PrevTax = row[7]
#Check it current Tax count is < or > threshold
# < threshold
print(TaxCount,"=+=", TaxThreshold)
if TaxCount<=3300:
print("X2")
CurrentTax= row[7]
CurrTxCount = CurrentTax
print("TaxCount=", TaxCount)
print( "Add to CSV")
print("row:", CurrentTax, "***", row[0:15])
wr.writerow(row[0:-1])
# is > threshold
else:
print("X3")
# but same TXID....
print(row[7], "=-=", CurrentTax)
if row[7]==CurrentTax:
print("X4")
CurrentTax= row[7]
print("TaxCount=", TaxCount)
print( "Add to CSV")
print("row:", CurrentTax, "***", row[0:15])
wr.writerow(row[0:-1])
# but different TXID...
else:
print(row[7], "=*=", CurrentTax)
if row[7]>CurrentTax:
print("X5")
TaxThreshold=TaxThreshold+ThresholdStep
resultFile.close()
Sess_File_Name = "CNN_VirusIDvSPECIES_XXALL"+ str(TaxThreshold-ThresholdStep)
print("<<<< Start Training >>>>"
print("Training on :: ", CurrTxCount, "Taxa", TaxCount, "data points.")
Train_Network(CurrTxCount, Sess_File_Name)
print("Training complete")
resultFileINIT = open(CSV_outFile,'w')
resultFileINIT.close()
CurrentTax= row[7]
#reset tax count
CurrTxCount = 0
TaxCount = 0
resultFile.close()
Sess_File_Name = "MicroarrayCNN_Data"+ str(TaxThreshold+ThresholdStep)
print("<<<< Start Training >>>>")
print("Training on :: ", CurrTxCount, "Taxa", TaxCount, "data points.")
Train_Network(CurrTxCount, Sess_File_Name)
resultFileINIT = open(CSV_outFile,'w')
resultFileINIT.close()
CurrentTax= row[7]
Tax_Iterator("MicroarrayInput.csv", "MicroarrayOutput.csv")
You defined prediction as prediction=tf.argmax(y,1). And in both feed_dict, you feed labels_onehot for y. Consequently, your "prediction" is always equal to the labels.
As you didn't post the code you used to train your network, I can't tell you what exactly you need to change.
Edit: I have isses understanding the underlying problem you're trying to solve - based on your code, you're trying to train a neural network with 2165 different classes using 1609 training examples. How is this even possible? If each example had a different class, there would still be some classes without any training example. Or does one image belong to many classes? From your statement at the beginning of your question, I had assumed you're trying to output a real-valued number between 0-1.
I'm actually surprised that the code actually worked as it looks like you're adding only a single number to your labels list, but your model expects a list with length 2165 for each training example.

Resources