Hope you are in good health. Actually I am facing a problem in my self built code. I am a beginner in Machine Learning and working on neural network. I build my own neural network and trying to train dataset with the help of tensorflow but getting alot of loss and having some problem with printing my accuracy Please view the code Thank you!
Here is my code I implement it on kaggle:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from subprocess import check_output
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
print(check_output(["ls", "../input"]).decode("utf8"))
mnist = input_data.read_data_sets('../input',one_hot=True)
x_train=mnist.train.images
y_train=mnist.train.labels
x_test=mnist.test.images
y_test=mnist.test.labels
# Any results you write to the current directory are saved as output.
layer1_neuron=500
layer2_neuron=500
layer3_neuron=500
number_of_class=10
batch_size=200
x=tf.placeholder('float',[None,784]) #28 * 28 is 784 (shape of the data)
y=tf.placeholder('float')
#my neural network
def neural_network(x_train):
hidden_layer_1={
'weights':tf.Variable(tf.random_normal([784,layer1_neuron])),
'biases': tf.Variable(tf.random_normal([layer1_neuron]))
}
hidden_layer_2={
'weights':tf.Variable(tf.random_normal([layer1_neuron,layer2_neuron])),
'biases':tf.Variable(tf.random_normal([layer2_neuron]))
}
hidden_layer_3={
'weights':tf.Variable(tf.random_normal([layer2_neuron,layer3_neuron])),
'biases':tf.Variable(tf.random_normal([layer3_neuron]))
}
output={
'weights':tf.Variable(tf.random_normal([layer3_neuron,number_of_class])),
'biases':tf.Variable(tf.random_normal([number_of_class]))
}
l1=tf.add(tf.matmul(x_train,hidden_layer_1['weights']),hidden_layer_1['biases'])
l1=tf.nn.relu(l1)
l2=tf.add(tf.matmul(l1,hidden_layer_2['weights']),hidden_layer_2['biases'])
l2=tf.nn.relu(l2)
l3=tf.add(tf.matmul(l2,hidden_layer_3['weights']),hidden_layer_3['biases'])
l3=tf.nn.relu(l3)
output=tf.add(tf.matmul(l3,output['weights']),output['biases'])
return output
# for splitting out batches of data
def next_batch(num, data, labels):
idx = np.arange(0 , len(data))
np.random.shuffle(idx)
idx = idx[:num]
data_shuffle = [data[ i] for i in idx]
labels_shuffle = [labels[ i] for i in idx]
return np.asarray(data_shuffle), np.asarray(labels_shuffle)
def traning_neuralNetwork(x_test,x_train,y_test,y_train):
total_epochs=5
total_loss=0
epoch_loss=0
batch_size=200
num_batch = int(np.ceil(42000/batch_size))
prediction=[]
prediction=neural_network(x_train)
cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=y_train))
optimizer=tf.train.AdamOptimizer().minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range (total_epochs):
epoch_loss=0
for _ in range (num_batch):
x_train,y_train=next_batch(batch_size,x_train,y_train)
_,epoch_loss=sess.run([optimizer,cost],feed_dict={x:x_train,y:y_train})
total_loss+=epoch_loss
print('Epoch ',epoch, " loss = ",total_loss)
print("Traning Complete!")
correct=tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
accuracy=tf.reduce_mean(tf.cast(correct,'float'))
print('accuracy',accuracy.eval({x : x_test,y : y_test}))
traning_neuralNetwork(x_test,x_train,y_test,y_train)
Output Error
WARNING:tensorflow:From <ipython-input-3-92b45e11aa74>:61: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.
See tf.nn.softmax_cross_entropy_with_logits_v2.
Epoch 0 loss = 968685.6919555664
Epoch 1 loss = 1076421.9005126953
Epoch 2 loss = 1108946.4575500488
Epoch 3 loss = 1117600.8527259827
Epoch 4 loss = 1119452.7342455387
Traning Complete!
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1329 try:
-> 1330 return fn(*args)
1331 except errors.OpError as e:
/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run_fn(feed_dict, fetch_list, target_list, options, run_metadata)
1314 return self._call_tf_sessionrun(
-> 1315 options, feed_dict, fetch_list, target_list, run_metadata)
1316
/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py in _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list, run_metadata)
1422 self._session, options, feed_dict, fetch_list, target_list,
-> 1423 status, run_metadata)
1424
/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py in __exit__(self, type_arg, value_arg, traceback_arg)
515 compat.as_text(c_api.TF_Message(self.status.status)),
--> 516 c_api.TF_GetCode(self.status.status))
517 # Delete the underlying status object from memory otherwise it stays alive
InvalidArgumentError: Incompatible shapes: [55000] vs. [10000]
[[Node: Equal = Equal[T=DT_INT64, _device="/job:localhost/replica:0/task:0/device:GPU:0"](ArgMax, ArgMax_1)]]
During handling of the above exception, another exception occurred:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-4-6ce6d479d3df> in <module>()
1
2
----> 3 traning_neuralNetwork(x_test,x_train,y_test,y_train)
<ipython-input-3-92b45e11aa74> in traning_neuralNetwork(x_test, x_train, y_test, y_train)
75 correct=tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
76 accuracy=tf.reduce_mean(tf.cast(correct,'float'))
---> 77 print('accuracy',accuracy.eval({x : x_test,y : y_test}))
78
79
/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in eval(self, feed_dict, session)
659
660 """
--> 661 return _eval_using_default_session(self, feed_dict, self.graph, session)
662
663
/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in _eval_using_default_session(tensors, feed_dict, graph, session)
5061 "the tensor's graph is different from the session's "
5062 "graph.")
-> 5063 return session.run(tensors, feed_dict)
5064
5065
/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
906 try:
907 result = self._run(None, fetches, feed_dict, options_ptr,
--> 908 run_metadata_ptr)
909 if run_metadata:
910 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
1141 if final_fetches or final_targets or (handle and feed_dict_tensor):
1142 results = self._do_run(handle, final_targets, final_fetches,
-> 1143 feed_dict_tensor, options, run_metadata)
1144 else:
1145 results = []
/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
1322 if handle is None:
1323 return self._do_call(_run_fn, feeds, fetches, targets, options,
-> 1324 run_metadata)
1325 else:
1326 return self._do_call(_prun_fn, handle, feeds, fetches)
/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1341 except KeyError:
1342 pass
-> 1343 raise type(e)(node_def, op, message)
1344
1345 def _extend_graph(self):
InvalidArgumentError: Incompatible shapes: [55000] vs. [10000]
[[Node: Equal = Equal[T=DT_INT64, _device="/job:localhost/replica:0/task:0/device:GPU:0"](ArgMax, ArgMax_1)]]
Caused by op 'Equal', defined at:
File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/conda/lib/python3.6/site-packages/ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "/opt/conda/lib/python3.6/site-packages/traitlets/config/application.py", line 658, in launch_instance
app.start()
File "/opt/conda/lib/python3.6/site-packages/ipykernel/kernelapp.py", line 477, in start
ioloop.IOLoop.instance().start()
File "/opt/conda/lib/python3.6/site-packages/zmq/eventloop/ioloop.py", line 177, in start
super(ZMQIOLoop, self).start()
File "/opt/conda/lib/python3.6/site-packages/tornado/ioloop.py", line 888, in start
handler_func(fd_obj, events)
File "/opt/conda/lib/python3.6/site-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 440, in _handle_events
self._handle_recv()
File "/opt/conda/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 472, in _handle_recv
self._run_callback(callback, msg)
File "/opt/conda/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 414, in _run_callback
callback(*args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File "/opt/conda/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 235, in dispatch_shell
handler(stream, idents, msg)
File "/opt/conda/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 399, in execute_request
user_expressions, allow_stdin)
File "/opt/conda/lib/python3.6/site-packages/ipykernel/ipkernel.py", line 196, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/opt/conda/lib/python3.6/site-packages/ipykernel/zmqshell.py", line 533, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2698, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "/opt/conda/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2808, in run_ast_nodes
if self.run_code(code, result):
File "/opt/conda/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2862, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-4-6ce6d479d3df>", line 3, in <module>
traning_neuralNetwork(x_test,x_train,y_test,y_train)
File "<ipython-input-3-92b45e11aa74>", line 75, in traning_neuralNetwork
correct=tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
File "/opt/conda/lib/python3.6/site-packages/tensorflow/python/ops/gen_math_ops.py", line 2455, in equal
"Equal", x=x, y=y, name=name)
File "/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 3306, in create_op
op_def=op_def)
File "/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1669, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
InvalidArgumentError (see above for traceback): Incompatible shapes: [55000] vs. [10000]
[[Node: Equal = Equal[T=DT_INT64, _device="/job:localhost/replica:0/task:0/device:GPU:0"](ArgMax, ArgMax_1)]]
Your data Matrix is a 4D array of size (48000 * 28 * 28 * 1) which is > 784,500.
As a side note, 32,928,000 is actually 28 * 28 * 42000, probably X_train matrix has a different shape.
Could you add print(X_train.shape[0]) and double check your matrix?
To solve your problem with GraphDef, have a look at this response:
https://stackoverflow.com/a/36358913/4601719
tf.initialize_all_variables is depreciated, please use tf.global_variables_initializer as recommended here:
https://www.tensorflow.org/api_docs/python/tf/initialize_all_variables
Related
I am trying to make a simple image classification Neural Network with Keras but unable to figure out why the error is coming in model.fit!
My image shape is (300,300,3) and I am converting it to (27000,) for the sake of simplicity (I am beginner in Machine Learning, Correct me if I am wrong somewhere;)
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
import numpy as np
import matplotlib.pyplot as plt
import random
model = Sequential()
model.add(Dense(512, input_shape=(270000,), activation='relu')) #For image with shape (300,300,3)
model.add(Dense(256, activation='relu'))
model.add(Dense(1, activation='relu'))
dataset = []
i = 1
while i <= 3:
n = 1000
while n:
temp_img = plt.imread('finger_dataset/' + str(i) + '/' + str(n) + '.png')
dataset.append([temp_img, i])
n -= 1
i += 1
random.shuffle(dataset)
i = 0
X_train = []
y_train = []
while i < len(dataset):
X_train.append(dataset[i][0])
y_train.append(dataset[i][1])
i+=1
X_train = np.array(X_train)
y_train = np.array(y_train)
X_train = X_train.reshape((len(X_train), -1))
y_train = y_train.reshape((-1,1))
model.compile(optimizer='sgd', loss='mse', metrics=['accuracy'])
model.fit(X_train, y_train, epochs=100, batch_size=512, shuffle=True)
The error i am getting after running model.fit(....) is
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\client\session.py in _do_call(self, fn, *args)
1364 try:
-> 1365 return fn(*args)
1366 except errors.OpError as e:
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\client\session.py in _run_fn(feed_dict, fetch_list, target_list, options, run_metadata)
1349 return self._call_tf_sessionrun(options, feed_dict, fetch_list,
-> 1350 target_list, run_metadata)
1351
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\client\session.py in _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list, run_metadata)
1442 fetch_list, target_list,
-> 1443 run_metadata)
1444
InvalidArgumentError: Cannot parse tensor from proto: dtype: DT_FLOAT
tensor_shape {
dim {
size: 270000
}
dim {
size: 512
}
}
float_val: 0
[[{{node training/SGD/moment_0}}]]
During handling of the above exception, another exception occurred:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-10-fef3f263c0f5> in <module>
----> 1 model.fit(X_train, y_train, epochs=100, batch_size=512, shuffle=True)
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
1211 else:
1212 fit_inputs = x + y + sample_weights
-> 1213 self._make_train_function()
1214 fit_function = self.train_function
1215
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\keras\engine\training.py in _make_train_function(self)
331 updates=updates + metrics_updates,
332 name='train_function',
--> 333 **self._function_kwargs)
334
335 def _make_test_function(self):
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\keras\backend\tensorflow_backend.py in function(inputs, outputs, updates, **kwargs)
3004 def function(inputs, outputs, updates=None, **kwargs):
3005 if _is_tf_1():
-> 3006 v1_variable_initialization()
3007 return tf_keras_backend.function(inputs, outputs,
3008 updates=updates,
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\keras\backend\tensorflow_backend.py in v1_variable_initialization()
418
419 def v1_variable_initialization():
--> 420 session = get_session()
421 with session.graph.as_default():
422 variables = tf.global_variables()
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\keras\backend\tensorflow_backend.py in get_session()
383 '`get_session` is not available when '
384 'TensorFlow is executing eagerly.')
--> 385 return tf_keras_backend.get_session()
386
387
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\keras\backend.py in get_session(op_input_list)
484 if not _MANUAL_VAR_INIT:
485 with session.graph.as_default():
--> 486 _initialize_variables(session)
487 return session
488
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\keras\backend.py in _initialize_variables(session)
908 v._keras_initialized = True
909 if uninitialized_vars:
--> 910 session.run(variables_module.variables_initializer(uninitialized_vars))
911
912
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\client\session.py in run(self, fetches, feed_dict, options, run_metadata)
954 try:
955 result = self._run(None, fetches, feed_dict, options_ptr,
--> 956 run_metadata_ptr)
957 if run_metadata:
958 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\client\session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
1178 if final_fetches or final_targets or (handle and feed_dict_tensor):
1179 results = self._do_run(handle, final_targets, final_fetches,
-> 1180 feed_dict_tensor, options, run_metadata)
1181 else:
1182 results = []
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\client\session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
1357 if handle is None:
1358 return self._do_call(_run_fn, feeds, fetches, targets, options,
-> 1359 run_metadata)
1360 else:
1361 return self._do_call(_prun_fn, handle, feeds, fetches)
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\client\session.py in _do_call(self, fn, *args)
1382 '\nsession_config.graph_options.rewrite_options.'
1383 'disable_meta_optimizer = True')
-> 1384 raise type(e)(node_def, op, message)
1385
1386 def _extend_graph(self):
InvalidArgumentError: Cannot parse tensor from proto: dtype: DT_FLOAT
tensor_shape {
dim {
size: 270000
}
dim {
size: 512
}
}
float_val: 0
[[node training/SGD/moment_0 (defined at c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\framework\ops.py:1748) ]]
Original stack trace for 'training/SGD/moment_0':
File "c:\users\aakash\appdata\local\programs\python\python36\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\traitlets\config\application.py", line 658, in launch_instance
app.start()
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\ipykernel\kernelapp.py", line 505, in start
self.io_loop.start()
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tornado\platform\asyncio.py", line 132, in start
self.asyncio_loop.run_forever()
File "c:\users\aakash\appdata\local\programs\python\python36\lib\asyncio\base_events.py", line 422, in run_forever
self._run_once()
File "c:\users\aakash\appdata\local\programs\python\python36\lib\asyncio\base_events.py", line 1434, in _run_once
handle._run()
File "c:\users\aakash\appdata\local\programs\python\python36\lib\asyncio\events.py", line 145, in _run
self._callback(*self._args)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tornado\ioloop.py", line 758, in _run_callback
ret = callback()
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tornado\stack_context.py", line 300, in null_wrapper
return fn(*args, **kwargs)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tornado\gen.py", line 1233, in inner
self.run()
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tornado\gen.py", line 1147, in run
yielded = self.gen.send(value)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\ipykernel\kernelbase.py", line 357, in process_one
yield gen.maybe_future(dispatch(*args))
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tornado\gen.py", line 326, in wrapper
yielded = next(result)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\ipykernel\kernelbase.py", line 267, in dispatch_shell
yield gen.maybe_future(handler(stream, idents, msg))
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tornado\gen.py", line 326, in wrapper
yielded = next(result)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\ipykernel\kernelbase.py", line 534, in execute_request
user_expressions, allow_stdin,
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tornado\gen.py", line 326, in wrapper
yielded = next(result)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\ipykernel\ipkernel.py", line 294, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\ipykernel\zmqshell.py", line 536, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py", line 2819, in run_cell
raw_cell, store_history, silent, shell_futures)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py", line 2845, in _run_cell
return runner(coro)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\IPython\core\async_helpers.py", line 67, in _pseudo_sync_runner
coro.send(None)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py", line 3020, in run_cell_async
interactivity=interactivity, compiler=compiler, result=result)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py", line 3191, in run_ast_nodes
if (yield from self.run_code(code, result)):
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py", line 3267, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-10-fef3f263c0f5>", line 1, in <module>
model.fit(X_train, y_train, epochs=100, batch_size=512, shuffle=True)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\keras\engine\training.py", line 1213, in fit
self._make_train_function()
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\keras\engine\training.py", line 316, in _make_train_function
loss=self.total_loss)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\keras\legacy\interfaces.py", line 91, in wrapper
return func(*args, **kwargs)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\keras\optimizers.py", line 202, in get_updates
for (i, shape) in enumerate(shapes)]
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\keras\optimizers.py", line 202, in <listcomp>
for (i, shape) in enumerate(shapes)]
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\keras\backend\tensorflow_backend.py", line 963, in zeros
v = tf.zeros(shape=shape, dtype=dtype, name=name)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\ops\array_ops.py", line 2350, in zeros
output = fill(shape, constant(zero, dtype=dtype), name=name)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\ops\array_ops.py", line 171, in fill
result = gen_array_ops.fill(dims, value, name=name)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\ops\gen_array_ops.py", line 3602, in fill
"Fill", dims=dims, value=value, name=name)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\framework\op_def_library.py", line 794, in _apply_op_helper
op_def=op_def)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\util\deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\framework\ops.py", line 3357, in create_op
attrs, op_def, compute_device)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\framework\ops.py", line 3426, in _create_op_internal
op_def=op_def)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\framework\ops.py", line 1748, in __init__
self._traceback = tf_stack.extract_stack()
1
If you are using tensorflow==2.0 or 2.2
try
loss_fn=tf.keras.losses.MeanSquaredError(reduction=losses_utils.ReductionV2.AUTO, name='mean_squared_error')
model.compile(optimizer='sgd', loss=loss_fn, metrics=['accuracy'])
well it worked for me... try it...
And i am new too so peace
After training a model successfully, I am unable to restore it. This method used to work earlier but somehow it is giving an error 'FailedPreconditionError'. Tensorflow should have stored all the variables while saving so I not sure about the reason for the error.
I have saved the model during training and tried to restore later using save and restore options.
validation_accuracy_track = []
train_accuracy_track = []
connection_probability_track = []
number_of_ex = X_train.shape[0]
total_steps_for_one_pass = number_of_ex//BATCH_SIZE + 1
print_every = 10
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
best_accuracy_valid = 0
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for step in range(0, total_steps_for_one_pass):
if step>=number_of_ex//BATCH_SIZE:
batch_x, batch_y = X_train[step*BATCH_SIZE:,:],y_train[step*BATCH_SIZE:]
step = 0
else:
start = step*BATCH_SIZE
finish = (step+1)*BATCH_SIZE
batch_x, batch_y = X_train[step:finish,:],y_train[step:finish]
tr_op = sess.run([training_operation], feed_dict={x: batch_x, y: batch_y, is_testing : False})
prob = sess.run(new_prob)
if i%print_every == 0:
tr_accuracy = sess.run(accuracy*100, feed_dict={x: X_train,y:y_train, is_testing: True}) # evaluate(X_train, y_train)
print("Train Accuracy = {:.5f}".format(tr_accuracy))
validation_accuracy = sess.run(accuracy*100, feed_dict={x: validation_data,y:validation_label_one_hot, is_testing: True}) #evaluate(X_validation, y_validation)
validation_accuracy_track.append(validation_accuracy)
train_accuracy_track.append(tr_accuracy)
connection_probability_track.append(prob)
print("EPOCH {} ...".format(i+1))
print("Validation Accuracy = {:.5f}".format(validation_accuracy))
print(prob)
print()
if (validation_accuracy >= best_accuracy_valid):
best_accuracy_valid = validation_accuracy
saver.save(sess, './PendigitSGDBased')
saver.save(sess, './lenet')
print("Model saved")
This is the output while training
Training...
Train Accuracy = 99.94996
EPOCH 1 ...
Validation Accuracy = 99.26617
0.83325
Train Accuracy = 99.93328
EPOCH 11 ...
Validation Accuracy = 99.13275
0.1345745
Train Accuracy = 99.94996
EPOCH 21 ...
Validation Accuracy = 99.53302
0.021734525
Train Accuracy = 99.96664
EPOCH 31 ...
Validation Accuracy = 99.59973
0.0035102463
Train Accuracy = 99.96664
EPOCH 41 ...
Validation Accuracy = 99.59973
0.0005669242
Model saved
when restoring I get this
with tf.Session() as sess:
saver.restore(sess, './lenet')
sess.run(accuracy*100, feed_dict={x: validation_data,y:validation_label_one_hot, is_testing: True})
This is the error
INFO:tensorflow:Restoring parameters from ./lenet
---------------------------------------------------------------------------
FailedPreconditionError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1333 try:
-> 1334 return fn(*args)
1335 except errors.OpError as e:
6 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in _run_fn(feed_dict, fetch_list, target_list, options, run_metadata)
1318 return self._call_tf_sessionrun(
-> 1319 options, feed_dict, fetch_list, target_list, run_metadata)
1320
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list, run_metadata)
1406 self._session, options, feed_dict, fetch_list, target_list,
-> 1407 run_metadata)
1408
FailedPreconditionError: Attempting to use uninitialized value Variable_14
[[{{node Variable_14/read}}]]
[[{{node mul_26}}]]
During handling of the above exception, another exception occurred:
FailedPreconditionError Traceback (most recent call last)
<ipython-input-34-3e0c62c51f1c> in <module>()
1 with tf.Session() as sess:
2 saver.restore(sess, './lenet')
----> 3 sess.run(accuracy*100, feed_dict={x: validation_data,y:validation_label_one_hot, is_testing: True})
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
927 try:
928 result = self._run(None, fetches, feed_dict, options_ptr,
--> 929 run_metadata_ptr)
930 if run_metadata:
931 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
1150 if final_fetches or final_targets or (handle and feed_dict_tensor):
1151 results = self._do_run(handle, final_targets, final_fetches,
-> 1152 feed_dict_tensor, options, run_metadata)
1153 else:
1154 results = []
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
1326 if handle is None:
1327 return self._do_call(_run_fn, feeds, fetches, targets, options,
-> 1328 run_metadata)
1329 else:
1330 return self._do_call(_prun_fn, handle, feeds, fetches)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1346 pass
1347 message = error_interpolation.interpolate(message, self._graph)
-> 1348 raise type(e)(node_def, op, message)
1349
1350 def _extend_graph(self):
FailedPreconditionError: Attempting to use uninitialized value Variable_14
[[node Variable_14/read (defined at <ipython-input-12-95a60851d74f>:1) ]]
[[node mul_26 (defined at <ipython-input-34-3e0c62c51f1c>:3) ]]
Caused by op 'Variable_14/read', defined at:
File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "/usr/local/lib/python3.6/dist-packages/traitlets/config/application.py", line 658, in launch_instance
app.start()
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelapp.py", line 477, in start
ioloop.IOLoop.instance().start()
File "/usr/local/lib/python3.6/dist-packages/tornado/ioloop.py", line 888, in start
handler_func(fd_obj, events)
File "/usr/local/lib/python3.6/dist-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/zmq/eventloop/zmqstream.py", line 450, in _handle_events
self._handle_recv()
File "/usr/local/lib/python3.6/dist-packages/zmq/eventloop/zmqstream.py", line 480, in _handle_recv
self._run_callback(callback, msg)
File "/usr/local/lib/python3.6/dist-packages/zmq/eventloop/zmqstream.py", line 432, in _run_callback
callback(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py", line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py", line 235, in dispatch_shell
handler(stream, idents, msg)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py", line 399, in execute_request
user_expressions, allow_stdin)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/ipkernel.py", line 196, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/zmqshell.py", line 533, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py", line 2718, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py", line 2822, in run_ast_nodes
if self.run_code(code, result):
File "/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py", line 2882, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-12-95a60851d74f>", line 1, in <module>
connection_probability = tf.Variable(.9999)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variables.py", line 213, in __call__
return cls._variable_v1_call(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variables.py", line 176, in _variable_v1_call
aggregation=aggregation)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variables.py", line 155, in <lambda>
previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variable_scope.py", line 2495, in default_variable_creator
expected_shape=expected_shape, import_scope=import_scope)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variables.py", line 217, in __call__
return super(VariableMetaclass, cls).__call__(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variables.py", line 1395, in __init__
constraint=constraint)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variables.py", line 1557, in _init_from_args
self._snapshot = array_ops.identity(self._variable, name="read")
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/dispatch.py", line 180, in wrapper
return target(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/array_ops.py", line 81, in identity
ret = gen_array_ops.identity(input, name=name)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gen_array_ops.py", line 3890, in identity
"Identity", input=input, name=name)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py", line 788, in _apply_op_helper
op_def=op_def)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py", line 3300, in create_op
op_def=op_def)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py", line 1801, in __init__
self._traceback = tf_stack.extract_stack()
FailedPreconditionError (see above for traceback): Attempting to use uninitialized value Variable_14
[[node Variable_14/read (defined at <ipython-input-12-95a60851d74f>:1) ]]
[[node mul_26 (defined at <ipython-input-34-3e0c62c51f1c>:3) ]]
What is the possible reason for this - why is there a change in behavior for train and test? Any help would be appreciated.
I have written the following code in my Jupyter Notebook which does Linear Regression in Tensorflow :
import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.datasets import load_boston
import matplotlib.pyplot as plt
boston=load_boston()
type(boston)
boston.feature_names
bd=pd.DataFrame(data=boston.data,columns=boston.feature_names)
bd['Price']=pd.DataFrame(data=boston.target)
np.random.shuffle(bd.values)
W0=tf.Variable(0.0000000000003)
W1=tf.Variable(0.000000000002)
b=tf.Variable(0.0000000000001)
#print(bd.shape[1])
tf.summary.histogram('Weights', W0)
tf.summary.histogram('Weights', W1)
tf.summary.histogram('Biases', b)
dataset_input=bd.iloc[:, 0 : bd.shape[1]-1];
#dataset_input.head(2)
dataset_output=bd.iloc[:, bd.shape[1]-1]
dataset_output=dataset_output.values
dataset_output=dataset_output.reshape((bd.shape[0],1)) #converted (506,) to (506,1) because in pandas
#the shape was not changing and it was needed later in feed_dict
dataset_input=dataset_input.values #only dataset_input is in DataFrame form and converting it into np.ndarray
# ADDED
dataset_input = np.array(dataset_input, dtype=np.float32)
# ADDED
dataset_output = np.array(dataset_output, dtype=np.float32)
X=tf.placeholder(tf.float32, shape=(None,bd.shape[1]-1))
Y=tf.placeholder(tf.float32, shape=(None,1))
Y_=W0*X*X + W1*X + b
#Y_pred = tf.add(tf.multiply(tf.pow(X, pow_i), W), Y_pred)
print(X.shape)
print(Y.shape)
loss=tf.reduce_mean(tf.square(Y_-Y))
tf.summary.scalar('loss',loss)
optimizer=tf.train.GradientDescentOptimizer(0.0000000000001)
train=optimizer.minimize(loss)
init=tf.global_variables_initializer()#tf.global_variables_initializer()#tf.initialize_all_variables()
sess=tf.Session()
sess.run(init)
wb_=[]
with tf.Session() as sess:
summary_merge = tf.summary.merge_all()
writer=tf.summary.FileWriter("Users/ajay/Desktop",sess.graph)
epochs=6
sess.run(init)
for i in range(epochs):
s_mer=sess.run(summary_merge,feed_dict={X: dataset_input, Y: dataset_output}) #ERROR________ERROR
sess.run(train,feed_dict={X:dataset_input,Y:dataset_output})
#CHANGED
print("loss",sess.run(loss, feed_dict={X:dataset_input,Y:dataset_output}))
#sess.run(loss, feed_dict={X:dataset_input,Y:dataset_output})
writer.add_summary(s_mer,i)
tf.summary.histogram(name="loss",values=loss)
if(i%5==0):
print(i, sess.run([W0,W1,b]))
wb_.append(sess.run([W0,W1,b]))
print(writer.get_logdir())
print(writer.close())
It gives the following error :
(?, 13)
(?, 1)
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1038 try:
-> 1039 return fn(*args)
1040 except errors.OpError as e:
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
1020 feed_dict, fetch_list, target_list,
-> 1021 status, run_metadata)
1022
~/anaconda3/envs/Tensorflow/lib/python3.6/contextlib.py in __exit__(self, type, value, traceback)
87 try:
---> 88 next(self.gen)
89 except StopIteration:
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py in raise_exception_on_not_ok_status()
465 compat.as_text(pywrap_tensorflow.TF_Message(status)),
--> 466 pywrap_tensorflow.TF_GetCode(status))
467 finally:
InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder' with dtype float
[[Node: Placeholder = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
During handling of the above exception, another exception occurred:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-8-fdf4bc1529e6> in <module>()
73
74 for i in range(epochs):
---> 75 s_mer=sess.run(summary_merge,feed_dict={X: dataset_input, Y: dataset_output}) #ERROR________ERROR
76 sess.run(train,feed_dict={X:dataset_input,Y:dataset_output})
77
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
776 try:
777 result = self._run(None, fetches, feed_dict, options_ptr,
--> 778 run_metadata_ptr)
779 if run_metadata:
780 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
980 if final_fetches or final_targets:
981 results = self._do_run(handle, final_targets, final_fetches,
--> 982 feed_dict_string, options, run_metadata)
983 else:
984 results = []
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
1030 if handle is None:
1031 return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
-> 1032 target_list, options, run_metadata)
1033 else:
1034 return self._do_call(_prun_fn, self._session, handle, feed_dict,
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1050 except KeyError:
1051 pass
-> 1052 raise type(e)(node_def, op, message)
1053
1054 def _extend_graph(self):
InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder' with dtype float
[[Node: Placeholder = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
Caused by op 'Placeholder', defined at:
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/traitlets/config/application.py", line 658, in launch_instance
app.start()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/kernelapp.py", line 486, in start
self.io_loop.start()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tornado/platform/asyncio.py", line 127, in start
self.asyncio_loop.run_forever()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/asyncio/base_events.py", line 421, in run_forever
self._run_once()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/asyncio/base_events.py", line 1431, in _run_once
handle._run()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/asyncio/events.py", line 145, in _run
self._callback(*self._args)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tornado/platform/asyncio.py", line 117, in _handle_events
handler_func(fileobj, events)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tornado/stack_context.py", line 276, in null_wrapper
return fn(*args, **kwargs)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 450, in _handle_events
self._handle_recv()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 480, in _handle_recv
self._run_callback(callback, msg)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 432, in _run_callback
callback(*args, **kwargs)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tornado/stack_context.py", line 276, in null_wrapper
return fn(*args, **kwargs)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 233, in dispatch_shell
handler(stream, idents, msg)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 399, in execute_request
user_expressions, allow_stdin)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/ipkernel.py", line 208, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/zmqshell.py", line 537, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2662, in run_cell
raw_cell, store_history, silent, shell_futures)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2785, in _run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2903, in run_ast_nodes
if self.run_code(code, result):
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2963, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-1-67ab0a0aaf1a>", line 44, in <module>
X=tf.placeholder(tf.float32, shape=(None,bd.shape[1]-1))
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/ops/array_ops.py", line 1507, in placeholder
name=name)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/ops/gen_array_ops.py", line 1997, in _placeholder
name=name)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 768, in apply_op
op_def=op_def)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 2336, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1228, in __init__
self._traceback = _extract_stack()
InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'Placeholder' with dtype float
[[Node: Placeholder = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
But when I run this in Terminal it gives the following output :
MacBook-Air:Desktop ajay$ python3 LR.py
/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
(?, 13)
(?, 1)
2018-08-16 13:40:30.387637: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
loss 592.02905
0 [1.08720556e-07, 3.084597e-10, 4.6065617e-12]
Users/ajay/Desktop
None
loss 591.9113
Users/ajay/Desktop
None
loss 591.7939
Users/ajay/Desktop
None
loss 591.67664
Users/ajay/Desktop
None
loss 591.5597
Users/ajay/Desktop
None
loss 591.44293
5 [6.502434e-07, 1.8367864e-09, 2.7130916e-11]
Users/ajay/Desktop
None
Is my Terminal ignoring something with respect to the PlaceHolder or something else ?
I'm not able to identify the errors related to "Placeholders".
Can anyone explain me how to identify them and when do they most probably occur ?
Also, is there anything related to the datatype in this code
/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
I think you can add tf.reset_default_graph() as the first line in your code in Jupyter Notebook and check.
I am a newbie in Tensorflow and trying out the code on linear Regression given below:
import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.datasets import load_boston
import matplotlib.pyplot as plt
boston=load_boston()
type(boston)
boston.feature_names
bd=pd.DataFrame(data=boston.data,columns=boston.feature_names)
bd['Price']=pd.DataFrame(data=boston.target)
np.random.shuffle(bd.values)
W=tf.Variable(0.0)
b=tf.Variable(0.0)
#print(bd.shape[1])
tf.summary.histogram('Weights', W)
tf.summary.histogram('Biases', b)
dataset_input=bd.iloc[:, 0 : bd.shape[1]-1];
#dataset_input.head(2)
dataset_output=bd.iloc[:, bd.shape[1]-1]
dataset_output=dataset_output.values
dataset_output=dataset_output.reshape((bd.shape[0],1)) #converted (506,) to (506,1) because in pandas
#the shape was not changing and it was needed later in feed_dict
dataset_input=dataset_input.values #only dataset_input is in DataFrame form and converting it into np.ndarray
X=tf.placeholder(tf.float32, shape=(None,bd.shape[1]-1))
Y=tf.placeholder(tf.float32, shape=(None,1))
Y_=W*X+b
print(X.shape)
print(Y.shape)
loss=tf.reduce_mean(tf.square(Y_-Y))
tf.summary.scalar('loss',loss)
optimizer=tf.train.GradientDescentOptimizer(0.5)
train=optimizer.minimize(loss)
init=tf.global_variables_initializer()#tf.global_variables_initializer()#tf.initialize_all_variables()
sess=tf.Session()
sess.run(init)
wb_=[]
with tf.Session() as sess:
summary_merge = tf.summary.merge_all()
writer=tf.summary.FileWriter("Users/ajay/Documents",sess.graph)
epochs=10
sess.run(init)
for i in range(epochs):
s_mer=sess.run(summary_merge,feed_dict={X:dataset_input,Y:dataset_output}) #ERROR________ERROR
sess.run(train,feed_dict={X:dataset_input,Y:dataset_output})
sess.run(loss)
writer.add_summary(s_mer,i)
#tf.summary.histogram(name="loss",values=loss)
if(i%5==0):
print(i, sess.run([W,b]))
wb_.append(sess.run([W,b]))
print(writer.get_logdir())
print(writer.close())
As the loop runs the line :
s_mer=sess.run(summary_merge,feed_dict={X:dataset_input,Y:dataset_output})
is giving me error.
I really don't understand why feed_dict is present inside the above line.
What would happen if it is not there ?
Also, why are we passing s_mer into add_summary() ? I didn't understand its role. Can't we just pass "summary_merge" into the add_summary().
Please, explain the role of add_summary().
The error I'm getting in my IPython NB is :
InvalidArgumentError Traceback (most recent call last)
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1038 try:
-> 1039 return fn(*args)
1040 except errors.OpError as e:
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
1020 feed_dict, fetch_list, target_list,
-> 1021 status, run_metadata)
1022
~/anaconda3/envs/Tensorflow/lib/python3.6/contextlib.py in __exit__(self, type, value, traceback)
87 try:
---> 88 next(self.gen)
89 except StopIteration:
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py in raise_exception_on_not_ok_status()
465 compat.as_text(pywrap_tensorflow.TF_Message(status)),
--> 466 pywrap_tensorflow.TF_GetCode(status))
467 finally:
InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_56' with dtype float
[[Node: Placeholder_56 = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
During handling of the above exception, another exception occurred:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-48-ca71ed6cdb0f> in <module>()
68
69 for i in range(epochs):
---> 70 s_mer=sess.run(summary_merge,feed_dict={X:dataset_input,Y:dataset_output}) #ERROR________ERROR
71 sess.run(train,feed_dict={X:dataset_input,Y:dataset_output})
72
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
776 try:
777 result = self._run(None, fetches, feed_dict, options_ptr,
--> 778 run_metadata_ptr)
779 if run_metadata:
780 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
980 if final_fetches or final_targets:
981 results = self._do_run(handle, final_targets, final_fetches,
--> 982 feed_dict_string, options, run_metadata)
983 else:
984 results = []
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
1030 if handle is None:
1031 return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
-> 1032 target_list, options, run_metadata)
1033 else:
1034 return self._do_call(_prun_fn, self._session, handle, feed_dict,
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1050 except KeyError:
1051 pass
-> 1052 raise type(e)(node_def, op, message)
1053
1054 def _extend_graph(self):
InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_56' with dtype float
[[Node: Placeholder_56 = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
Caused by op 'Placeholder_56', defined at:
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/traitlets/config/application.py", line 658, in launch_instance
app.start()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/kernelapp.py", line 486, in start
self.io_loop.start()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tornado/platform/asyncio.py", line 127, in start
self.asyncio_loop.run_forever()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/asyncio/base_events.py", line 421, in run_forever
self._run_once()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/asyncio/base_events.py", line 1431, in _run_once
handle._run()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/asyncio/events.py", line 145, in _run
self._callback(*self._args)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tornado/platform/asyncio.py", line 117, in _handle_events
handler_func(fileobj, events)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tornado/stack_context.py", line 276, in null_wrapper
return fn(*args, **kwargs)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 450, in _handle_events
self._handle_recv()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 480, in _handle_recv
self._run_callback(callback, msg)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 432, in _run_callback
callback(*args, **kwargs)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tornado/stack_context.py", line 276, in null_wrapper
return fn(*args, **kwargs)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 233, in dispatch_shell
handler(stream, idents, msg)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 399, in execute_request
user_expressions, allow_stdin)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/ipkernel.py", line 208, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/zmqshell.py", line 537, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2662, in run_cell
raw_cell, store_history, silent, shell_futures)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2785, in _run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2903, in run_ast_nodes
if self.run_code(code, result):
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2963, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-35-a13d7623267d>", line 47, in <module>
X=tf.placeholder(tf.float32, shape=(None,bd.shape[1]-1))
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/ops/array_ops.py", line 1507, in placeholder
name=name)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/ops/gen_array_ops.py", line 1997, in _placeholder
name=name)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 768, in apply_op
op_def=op_def)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 2336, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1228, in __init__
self._traceback = _extract_stack()
InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'Placeholder_56' with dtype float
[[Node: Placeholder_56 = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
The problem is that variables dataset_input and dataset_output are of dtype float64 but you use for placeholders float32, but they need to be same. dataset_input = np.array(dataset_input, dtype=np.float32) and dataset_output = np.array(dataset_output, dtype=np.float32) helps on my computer.
Then when you are calling sess.run(loss) you need to add feed_dict={X:dataset_input,Y:dataset_output} as well because it uses X and Y placeholders.
I really don't understand why feed_dict is present inside the above line. What would happen if it is not there ?
You need to use feed_dict to feed data to your placeholders. If you don't provide it with any data then similar errors occurs, because it has no data for your computations.
Hope it helps.
I am newbie in Tensorflow and trying out Linear Regression on "Boston Housing Prices".
I am using the entire dataset as training set just for the sake of practicing.
dataset_input is the input training examples.
dataset_output is the output training examples.
X, Y have been used as placeholder which will get the input from the above 2 variables that I just described and later feeding it to the feed_dict under sess.run(train,feed_dict={X:dataset_input,Y:dataset_output}).
but somewhere in the code, I am getting this error:
InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_2' with dtype float
[[Node: Placeholder_2 = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0
Here, is the code:
import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.datasets import load_boston
import matplotlib.pyplot as plt
boston=load_boston()
type(boston)
boston.feature_names
bd=pd.DataFrame(data=boston.data,columns=boston.feature_names)
bd['Price']=pd.DataFrame(data=boston.target)
np.random.shuffle(bd.values)
#bd.describe()
#bd.dtypes.index
W=tf.Variable(0.0)
b=tf.Variable(0.0)
print(bd.shape[1])
dataset_input=bd.iloc[:, 0 : bd.shape[1]-1];
dataset_input.head(2)
dataset_output=bd.iloc[:, bd.shape[1]-1]
dataset_output=dataset_output.values
dataset_output=dataset_output.reshape((bd.shape[0],1))#converted (506,) to (506,1) because in pandas
#the shape was not changing and it was needed later in feed_dict
#dataset_output.head(2)
print("Output.shape=",dataset_output.shape)
print("Input.shape=",dataset_input.shape)
X=tf.placeholder(tf.float32, shape=(None,bd.shape[1]-1))
Y=tf.placeholder(tf.float32, shape=(None,1))
Y_=W*X+b
print(X.shape)
print(Y.shape)
#Y_=tf.convert_to_tensor(dataset_output)
loss=tf.reduce_mean(tf.square(Y_-Y))
optimizer=tf.train.GradientDescentOptimizer(0.5)
train=optimizer.minimize(loss)
init=tf.global_variables_initializer()#tf.global_variables_initializer()#tf.initialize_all_variables()
sess=tf.Session()
sess.run(init)
with tf.Session() as sess:
epochs=1000
sess.run(init)
points=[ [],[] ]
for i in range(epochs):
sess.run(train,feed_dict={X:dataset_input,Y:dataset_output})# cannot understand whether the error is here ?
if(i%10==0):
points[0].append(1+i)
points[1].append(sess.run(loss))
if(i%100==0):
print(i+1,sess.run(cost))
plt.plot(points[0],points[1],'r--')
plt.axis([0,epochs,50,600])#
plt.show()
and the trace back is :
14
Output.shape= (506, 1)
Input.shape= (506, 13)
(?, 13)
(?, 1)
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1038 try:
-> 1039 return fn(*args)
1040 except errors.OpError as e:
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
1020 feed_dict, fetch_list, target_list,
-> 1021 status, run_metadata)
1022
~/anaconda3/envs/Tensorflow/lib/python3.6/contextlib.py in __exit__(self, type, value, traceback)
87 try:
---> 88 next(self.gen)
89 except StopIteration:
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py in raise_exception_on_not_ok_status()
465 compat.as_text(pywrap_tensorflow.TF_Message(status)),
--> 466 pywrap_tensorflow.TF_GetCode(status))
467 finally:
InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_2' with dtype float
[[Node: Placeholder_2 = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
During handling of the above exception, another exception occurred:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-35-520f8b9256aa> in <module>()
60 if(i%10==0):
61 points[0].append(1+i)
---> 62 points[1].append(sess.run(loss))
63 if(i%100==0):
64 print(i+1,sess.run(cost))
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
776 try:
777 result = self._run(None, fetches, feed_dict, options_ptr,
--> 778 run_metadata_ptr)
779 if run_metadata:
780 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
980 if final_fetches or final_targets:
981 results = self._do_run(handle, final_targets, final_fetches,
--> 982 feed_dict_string, options, run_metadata)
983 else:
984 results = []
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
1030 if handle is None:
1031 return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
-> 1032 target_list, options, run_metadata)
1033 else:
1034 return self._do_call(_prun_fn, self._session, handle, feed_dict,
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1050 except KeyError:
1051 pass
-> 1052 raise type(e)(node_def, op, message)
1053
1054 def _extend_graph(self):
InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_2' with dtype float
[[Node: Placeholder_2 = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
Caused by op 'Placeholder_2', defined at:
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/traitlets/config/application.py", line 658, in launch_instance
app.start()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/kernelapp.py", line 486, in start
self.io_loop.start()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tornado/platform/asyncio.py", line 127, in start
self.asyncio_loop.run_forever()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/asyncio/base_events.py", line 421, in run_forever
self._run_once()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/asyncio/base_events.py", line 1431, in _run_once
handle._run()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/asyncio/events.py", line 145, in _run
self._callback(*self._args)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tornado/platform/asyncio.py", line 117, in _handle_events
handler_func(fileobj, events)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tornado/stack_context.py", line 276, in null_wrapper
return fn(*args, **kwargs)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 450, in _handle_events
self._handle_recv()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 480, in _handle_recv
self._run_callback(callback, msg)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 432, in _run_callback
callback(*args, **kwargs)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tornado/stack_context.py", line 276, in null_wrapper
return fn(*args, **kwargs)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 233, in dispatch_shell
handler(stream, idents, msg)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 399, in execute_request
user_expressions, allow_stdin)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/ipkernel.py", line 208, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/zmqshell.py", line 537, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2662, in run_cell
raw_cell, store_history, silent, shell_futures)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2785, in _run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2903, in run_ast_nodes
if self.run_code(code, result):
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2963, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-35-520f8b9256aa>", line 37, in <module>
X=tf.placeholder(tf.float32, shape=(None,bd.shape[1]-1))
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/ops/array_ops.py", line 1507, in placeholder
name=name)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/ops/gen_array_ops.py", line 1997, in _placeholder
name=name)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 768, in apply_op
op_def=op_def)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 2336, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1228, in __init__
self._traceback = _extract_stack()
InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'Placeholder_2' with dtype float
[[Node: Placeholder_2 = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
The value you are trying to feed into X, your dataset_input, is a pandas DataFrame, not a proper numpy ndarray as Tensorflow expects.
Simply defining e.g. dataset_input = boston.data should work here.