FailedPreconditionError while restoring model in tensorflow - python-3.x

After training a model successfully, I am unable to restore it. This method used to work earlier but somehow it is giving an error 'FailedPreconditionError'. Tensorflow should have stored all the variables while saving so I not sure about the reason for the error.
I have saved the model during training and tried to restore later using save and restore options.
validation_accuracy_track = []
train_accuracy_track = []
connection_probability_track = []
number_of_ex = X_train.shape[0]
total_steps_for_one_pass = number_of_ex//BATCH_SIZE + 1
print_every = 10
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
best_accuracy_valid = 0
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for step in range(0, total_steps_for_one_pass):
if step>=number_of_ex//BATCH_SIZE:
batch_x, batch_y = X_train[step*BATCH_SIZE:,:],y_train[step*BATCH_SIZE:]
step = 0
else:
start = step*BATCH_SIZE
finish = (step+1)*BATCH_SIZE
batch_x, batch_y = X_train[step:finish,:],y_train[step:finish]
tr_op = sess.run([training_operation], feed_dict={x: batch_x, y: batch_y, is_testing : False})
prob = sess.run(new_prob)
if i%print_every == 0:
tr_accuracy = sess.run(accuracy*100, feed_dict={x: X_train,y:y_train, is_testing: True}) # evaluate(X_train, y_train)
print("Train Accuracy = {:.5f}".format(tr_accuracy))
validation_accuracy = sess.run(accuracy*100, feed_dict={x: validation_data,y:validation_label_one_hot, is_testing: True}) #evaluate(X_validation, y_validation)
validation_accuracy_track.append(validation_accuracy)
train_accuracy_track.append(tr_accuracy)
connection_probability_track.append(prob)
print("EPOCH {} ...".format(i+1))
print("Validation Accuracy = {:.5f}".format(validation_accuracy))
print(prob)
print()
if (validation_accuracy >= best_accuracy_valid):
best_accuracy_valid = validation_accuracy
saver.save(sess, './PendigitSGDBased')
saver.save(sess, './lenet')
print("Model saved")
This is the output while training
Training...
Train Accuracy = 99.94996
EPOCH 1 ...
Validation Accuracy = 99.26617
0.83325
Train Accuracy = 99.93328
EPOCH 11 ...
Validation Accuracy = 99.13275
0.1345745
Train Accuracy = 99.94996
EPOCH 21 ...
Validation Accuracy = 99.53302
0.021734525
Train Accuracy = 99.96664
EPOCH 31 ...
Validation Accuracy = 99.59973
0.0035102463
Train Accuracy = 99.96664
EPOCH 41 ...
Validation Accuracy = 99.59973
0.0005669242
Model saved
when restoring I get this
with tf.Session() as sess:
saver.restore(sess, './lenet')
sess.run(accuracy*100, feed_dict={x: validation_data,y:validation_label_one_hot, is_testing: True})
This is the error
INFO:tensorflow:Restoring parameters from ./lenet
---------------------------------------------------------------------------
FailedPreconditionError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1333 try:
-> 1334 return fn(*args)
1335 except errors.OpError as e:
6 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in _run_fn(feed_dict, fetch_list, target_list, options, run_metadata)
1318 return self._call_tf_sessionrun(
-> 1319 options, feed_dict, fetch_list, target_list, run_metadata)
1320
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list, run_metadata)
1406 self._session, options, feed_dict, fetch_list, target_list,
-> 1407 run_metadata)
1408
FailedPreconditionError: Attempting to use uninitialized value Variable_14
[[{{node Variable_14/read}}]]
[[{{node mul_26}}]]
During handling of the above exception, another exception occurred:
FailedPreconditionError Traceback (most recent call last)
<ipython-input-34-3e0c62c51f1c> in <module>()
1 with tf.Session() as sess:
2 saver.restore(sess, './lenet')
----> 3 sess.run(accuracy*100, feed_dict={x: validation_data,y:validation_label_one_hot, is_testing: True})
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
927 try:
928 result = self._run(None, fetches, feed_dict, options_ptr,
--> 929 run_metadata_ptr)
930 if run_metadata:
931 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
1150 if final_fetches or final_targets or (handle and feed_dict_tensor):
1151 results = self._do_run(handle, final_targets, final_fetches,
-> 1152 feed_dict_tensor, options, run_metadata)
1153 else:
1154 results = []
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
1326 if handle is None:
1327 return self._do_call(_run_fn, feeds, fetches, targets, options,
-> 1328 run_metadata)
1329 else:
1330 return self._do_call(_prun_fn, handle, feeds, fetches)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1346 pass
1347 message = error_interpolation.interpolate(message, self._graph)
-> 1348 raise type(e)(node_def, op, message)
1349
1350 def _extend_graph(self):
FailedPreconditionError: Attempting to use uninitialized value Variable_14
[[node Variable_14/read (defined at <ipython-input-12-95a60851d74f>:1) ]]
[[node mul_26 (defined at <ipython-input-34-3e0c62c51f1c>:3) ]]
Caused by op 'Variable_14/read', defined at:
File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "/usr/local/lib/python3.6/dist-packages/traitlets/config/application.py", line 658, in launch_instance
app.start()
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelapp.py", line 477, in start
ioloop.IOLoop.instance().start()
File "/usr/local/lib/python3.6/dist-packages/tornado/ioloop.py", line 888, in start
handler_func(fd_obj, events)
File "/usr/local/lib/python3.6/dist-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/zmq/eventloop/zmqstream.py", line 450, in _handle_events
self._handle_recv()
File "/usr/local/lib/python3.6/dist-packages/zmq/eventloop/zmqstream.py", line 480, in _handle_recv
self._run_callback(callback, msg)
File "/usr/local/lib/python3.6/dist-packages/zmq/eventloop/zmqstream.py", line 432, in _run_callback
callback(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py", line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py", line 235, in dispatch_shell
handler(stream, idents, msg)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py", line 399, in execute_request
user_expressions, allow_stdin)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/ipkernel.py", line 196, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/zmqshell.py", line 533, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py", line 2718, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py", line 2822, in run_ast_nodes
if self.run_code(code, result):
File "/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py", line 2882, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-12-95a60851d74f>", line 1, in <module>
connection_probability = tf.Variable(.9999)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variables.py", line 213, in __call__
return cls._variable_v1_call(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variables.py", line 176, in _variable_v1_call
aggregation=aggregation)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variables.py", line 155, in <lambda>
previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variable_scope.py", line 2495, in default_variable_creator
expected_shape=expected_shape, import_scope=import_scope)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variables.py", line 217, in __call__
return super(VariableMetaclass, cls).__call__(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variables.py", line 1395, in __init__
constraint=constraint)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variables.py", line 1557, in _init_from_args
self._snapshot = array_ops.identity(self._variable, name="read")
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/dispatch.py", line 180, in wrapper
return target(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/array_ops.py", line 81, in identity
ret = gen_array_ops.identity(input, name=name)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gen_array_ops.py", line 3890, in identity
"Identity", input=input, name=name)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py", line 788, in _apply_op_helper
op_def=op_def)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py", line 3300, in create_op
op_def=op_def)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py", line 1801, in __init__
self._traceback = tf_stack.extract_stack()
FailedPreconditionError (see above for traceback): Attempting to use uninitialized value Variable_14
[[node Variable_14/read (defined at <ipython-input-12-95a60851d74f>:1) ]]
[[node mul_26 (defined at <ipython-input-34-3e0c62c51f1c>:3) ]]
What is the possible reason for this - why is there a change in behavior for train and test? Any help would be appreciated.

Related

Model in keras giving error when doing fit

I am trying to make a simple image classification Neural Network with Keras but unable to figure out why the error is coming in model.fit!
My image shape is (300,300,3) and I am converting it to (27000,) for the sake of simplicity (I am beginner in Machine Learning, Correct me if I am wrong somewhere;)
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
import numpy as np
import matplotlib.pyplot as plt
import random
model = Sequential()
model.add(Dense(512, input_shape=(270000,), activation='relu')) #For image with shape (300,300,3)
model.add(Dense(256, activation='relu'))
model.add(Dense(1, activation='relu'))
dataset = []
i = 1
while i <= 3:
n = 1000
while n:
temp_img = plt.imread('finger_dataset/' + str(i) + '/' + str(n) + '.png')
dataset.append([temp_img, i])
n -= 1
i += 1
random.shuffle(dataset)
i = 0
X_train = []
y_train = []
while i < len(dataset):
X_train.append(dataset[i][0])
y_train.append(dataset[i][1])
i+=1
X_train = np.array(X_train)
y_train = np.array(y_train)
X_train = X_train.reshape((len(X_train), -1))
y_train = y_train.reshape((-1,1))
model.compile(optimizer='sgd', loss='mse', metrics=['accuracy'])
model.fit(X_train, y_train, epochs=100, batch_size=512, shuffle=True)
The error i am getting after running model.fit(....) is
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\client\session.py in _do_call(self, fn, *args)
1364 try:
-> 1365 return fn(*args)
1366 except errors.OpError as e:
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\client\session.py in _run_fn(feed_dict, fetch_list, target_list, options, run_metadata)
1349 return self._call_tf_sessionrun(options, feed_dict, fetch_list,
-> 1350 target_list, run_metadata)
1351
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\client\session.py in _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list, run_metadata)
1442 fetch_list, target_list,
-> 1443 run_metadata)
1444
InvalidArgumentError: Cannot parse tensor from proto: dtype: DT_FLOAT
tensor_shape {
dim {
size: 270000
}
dim {
size: 512
}
}
float_val: 0
[[{{node training/SGD/moment_0}}]]
During handling of the above exception, another exception occurred:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-10-fef3f263c0f5> in <module>
----> 1 model.fit(X_train, y_train, epochs=100, batch_size=512, shuffle=True)
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
1211 else:
1212 fit_inputs = x + y + sample_weights
-> 1213 self._make_train_function()
1214 fit_function = self.train_function
1215
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\keras\engine\training.py in _make_train_function(self)
331 updates=updates + metrics_updates,
332 name='train_function',
--> 333 **self._function_kwargs)
334
335 def _make_test_function(self):
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\keras\backend\tensorflow_backend.py in function(inputs, outputs, updates, **kwargs)
3004 def function(inputs, outputs, updates=None, **kwargs):
3005 if _is_tf_1():
-> 3006 v1_variable_initialization()
3007 return tf_keras_backend.function(inputs, outputs,
3008 updates=updates,
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\keras\backend\tensorflow_backend.py in v1_variable_initialization()
418
419 def v1_variable_initialization():
--> 420 session = get_session()
421 with session.graph.as_default():
422 variables = tf.global_variables()
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\keras\backend\tensorflow_backend.py in get_session()
383 '`get_session` is not available when '
384 'TensorFlow is executing eagerly.')
--> 385 return tf_keras_backend.get_session()
386
387
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\keras\backend.py in get_session(op_input_list)
484 if not _MANUAL_VAR_INIT:
485 with session.graph.as_default():
--> 486 _initialize_variables(session)
487 return session
488
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\keras\backend.py in _initialize_variables(session)
908 v._keras_initialized = True
909 if uninitialized_vars:
--> 910 session.run(variables_module.variables_initializer(uninitialized_vars))
911
912
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\client\session.py in run(self, fetches, feed_dict, options, run_metadata)
954 try:
955 result = self._run(None, fetches, feed_dict, options_ptr,
--> 956 run_metadata_ptr)
957 if run_metadata:
958 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\client\session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
1178 if final_fetches or final_targets or (handle and feed_dict_tensor):
1179 results = self._do_run(handle, final_targets, final_fetches,
-> 1180 feed_dict_tensor, options, run_metadata)
1181 else:
1182 results = []
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\client\session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
1357 if handle is None:
1358 return self._do_call(_run_fn, feeds, fetches, targets, options,
-> 1359 run_metadata)
1360 else:
1361 return self._do_call(_prun_fn, handle, feeds, fetches)
c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\client\session.py in _do_call(self, fn, *args)
1382 '\nsession_config.graph_options.rewrite_options.'
1383 'disable_meta_optimizer = True')
-> 1384 raise type(e)(node_def, op, message)
1385
1386 def _extend_graph(self):
InvalidArgumentError: Cannot parse tensor from proto: dtype: DT_FLOAT
tensor_shape {
dim {
size: 270000
}
dim {
size: 512
}
}
float_val: 0
[[node training/SGD/moment_0 (defined at c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\framework\ops.py:1748) ]]
Original stack trace for 'training/SGD/moment_0':
File "c:\users\aakash\appdata\local\programs\python\python36\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\traitlets\config\application.py", line 658, in launch_instance
app.start()
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\ipykernel\kernelapp.py", line 505, in start
self.io_loop.start()
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tornado\platform\asyncio.py", line 132, in start
self.asyncio_loop.run_forever()
File "c:\users\aakash\appdata\local\programs\python\python36\lib\asyncio\base_events.py", line 422, in run_forever
self._run_once()
File "c:\users\aakash\appdata\local\programs\python\python36\lib\asyncio\base_events.py", line 1434, in _run_once
handle._run()
File "c:\users\aakash\appdata\local\programs\python\python36\lib\asyncio\events.py", line 145, in _run
self._callback(*self._args)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tornado\ioloop.py", line 758, in _run_callback
ret = callback()
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tornado\stack_context.py", line 300, in null_wrapper
return fn(*args, **kwargs)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tornado\gen.py", line 1233, in inner
self.run()
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tornado\gen.py", line 1147, in run
yielded = self.gen.send(value)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\ipykernel\kernelbase.py", line 357, in process_one
yield gen.maybe_future(dispatch(*args))
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tornado\gen.py", line 326, in wrapper
yielded = next(result)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\ipykernel\kernelbase.py", line 267, in dispatch_shell
yield gen.maybe_future(handler(stream, idents, msg))
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tornado\gen.py", line 326, in wrapper
yielded = next(result)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\ipykernel\kernelbase.py", line 534, in execute_request
user_expressions, allow_stdin,
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tornado\gen.py", line 326, in wrapper
yielded = next(result)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\ipykernel\ipkernel.py", line 294, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\ipykernel\zmqshell.py", line 536, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py", line 2819, in run_cell
raw_cell, store_history, silent, shell_futures)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py", line 2845, in _run_cell
return runner(coro)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\IPython\core\async_helpers.py", line 67, in _pseudo_sync_runner
coro.send(None)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py", line 3020, in run_cell_async
interactivity=interactivity, compiler=compiler, result=result)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py", line 3191, in run_ast_nodes
if (yield from self.run_code(code, result)):
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\IPython\core\interactiveshell.py", line 3267, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-10-fef3f263c0f5>", line 1, in <module>
model.fit(X_train, y_train, epochs=100, batch_size=512, shuffle=True)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\keras\engine\training.py", line 1213, in fit
self._make_train_function()
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\keras\engine\training.py", line 316, in _make_train_function
loss=self.total_loss)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\keras\legacy\interfaces.py", line 91, in wrapper
return func(*args, **kwargs)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\keras\optimizers.py", line 202, in get_updates
for (i, shape) in enumerate(shapes)]
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\keras\optimizers.py", line 202, in <listcomp>
for (i, shape) in enumerate(shapes)]
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\keras\backend\tensorflow_backend.py", line 963, in zeros
v = tf.zeros(shape=shape, dtype=dtype, name=name)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\ops\array_ops.py", line 2350, in zeros
output = fill(shape, constant(zero, dtype=dtype), name=name)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\ops\array_ops.py", line 171, in fill
result = gen_array_ops.fill(dims, value, name=name)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\ops\gen_array_ops.py", line 3602, in fill
"Fill", dims=dims, value=value, name=name)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\framework\op_def_library.py", line 794, in _apply_op_helper
op_def=op_def)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\util\deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\framework\ops.py", line 3357, in create_op
attrs, op_def, compute_device)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\framework\ops.py", line 3426, in _create_op_internal
op_def=op_def)
File "c:\users\aakash\appdata\local\programs\python\python36\lib\site-packages\tensorflow_core\python\framework\ops.py", line 1748, in __init__
self._traceback = tf_stack.extract_stack()
1
​
If you are using tensorflow==2.0 or 2.2
try
loss_fn=tf.keras.losses.MeanSquaredError(reduction=losses_utils.ReductionV2.AUTO, name='mean_squared_error')
model.compile(optimizer='sgd', loss=loss_fn, metrics=['accuracy'])
well it worked for me... try it...
And i am new too so peace

TensorFlow can not add training data through placeholder

Good day to all. Studying TF. Made an example with adding data through placeholder and it does not work. As I understand it, fetch_batch creates batchy just as a string and cannot feed them through feed_dict. But why and how to fix do not understand. I would be very grateful for the help.
import tensorflow as tf
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler
housing = fetch_california_housing()
m, n = housing.data.shape
learning_rate = 0.1
n_epochs = 1000
scaler = StandardScaler()
scaled_housing_data = scaler.fit_transform(housing.data)
scaled_housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_housing_data]
#X = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name="X")
#y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y")
X = tf.placeholder(tf.float32, shape=(None, n+1), name="X")
y = tf.placeholder(tf.float32, shape=(None, 1), name="y")
def fetch_batch (batch_index, batch_size, epoch):
np.random.seed (epoch * batch_size + batch_size)
indices = np.random.randint (m, size=batch_size)
X_batch = scaled_housing_data_plus_bias [indices]
y_batch = housing.target.reshape(-1, 1) [indices]
return X_batch, y_batch
batch_size = 100
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name="theta")
y_pred = tf.matmul(X, theta, name="prediction")
erorr = y_pred - y
mse = tf.reduce_mean(tf.square(erorr), name="mse")
#gradients = 2/m * tf.matmul(tf.transpose(X), erorr, name="gradients")
#training_op = tf.assign(theta, theta - learning_rate * gradients)
optimizer = tf.train.GradientDescentOptimizer (learning_rate=learning_rate)
training_op = optimizer.minimize(mse)
n_batches = int(np.ceil(m / batch_size))
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
for batch_index in range(n_batches):
X_batch, y_batch = fetch_batch (batch_index, batch_size, epoch)
sess.run(training_op, feed_dict={X:X_batch, y:y_batch})
if epoch % 100 == 0 :
print("Epoch:", epoch, "MSE:", mse.eval())
but in the end I get an error
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1333 try:
-> 1334 return fn(*args)
1335 except errors.OpError as e:
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in _run_fn(feed_dict, fetch_list, target_list, options, run_metadata)
1318 return self._call_tf_sessionrun(
-> 1319 options, feed_dict, fetch_list, target_list, run_metadata)
1320
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list, run_metadata)
1406 self._session, options, feed_dict, fetch_list, target_list,
-> 1407 run_metadata)
1408
InvalidArgumentError: You must feed a value for placeholder tensor 'X_11' with dtype float and shape [?,9]
[[{{node X_11}} = Placeholder[dtype=DT_FLOAT, shape=[?,9], _device="/job:localhost/replica:0/task:0/device:GPU:0"]()]]
[[{{node mse_12/_7}} = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_15_mse_12", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
During handling of the above exception, another exception occurred:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-19-46a547b04ceb> in <module>()
56 sess.run(training_op, feed_dict={X:X_batch, y:y_batch})
57 if epoch % 100 == 0 :
---> 58 print("Epoch:", epoch, "MSE:", mse.eval())
59
60
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py in eval(self, feed_dict, session)
711
712 """
--> 713 return _eval_using_default_session(self, feed_dict, self.graph, session)
714
715
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py in _eval_using_default_session(tensors, feed_dict, graph, session)
5155 "the tensor's graph is different from the session's "
5156 "graph.")
-> 5157 return session.run(tensors, feed_dict)
5158
5159
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
927 try:
928 result = self._run(None, fetches, feed_dict, options_ptr,
--> 929 run_metadata_ptr)
930 if run_metadata:
931 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
1150 if final_fetches or final_targets or (handle and feed_dict_tensor):
1151 results = self._do_run(handle, final_targets, final_fetches,
-> 1152 feed_dict_tensor, options, run_metadata)
1153 else:
1154 results = []
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
1326 if handle is None:
1327 return self._do_call(_run_fn, feeds, fetches, targets, options,
-> 1328 run_metadata)
1329 else:
1330 return self._do_call(_prun_fn, handle, feeds, fetches)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1346 pass
1347 message = error_interpolation.interpolate(message, self._graph)
-> 1348 raise type(e)(node_def, op, message)
1349
1350 def _extend_graph(self):
InvalidArgumentError: You must feed a value for placeholder tensor 'X_11' with dtype float and shape [?,9]
[[node X_11 (defined at <ipython-input-19-46a547b04ceb>:23) = Placeholder[dtype=DT_FLOAT, shape=[?,9], _device="/job:localhost/replica:0/task:0/device:GPU:0"]()]]
[[{{node mse_12/_7}} = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_15_mse_12", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
Caused by op 'X_11', defined at:
File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "/usr/local/lib/python3.6/dist-packages/traitlets/config/application.py", line 658, in launch_instance
app.start()
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelapp.py", line 477, in start
ioloop.IOLoop.instance().start()
File "/usr/local/lib/python3.6/dist-packages/tornado/ioloop.py", line 888, in start
handler_func(fd_obj, events)
File "/usr/local/lib/python3.6/dist-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/zmq/eventloop/zmqstream.py", line 450, in _handle_events
self._handle_recv()
File "/usr/local/lib/python3.6/dist-packages/zmq/eventloop/zmqstream.py", line 480, in _handle_recv
self._run_callback(callback, msg)
File "/usr/local/lib/python3.6/dist-packages/zmq/eventloop/zmqstream.py", line 432, in _run_callback
callback(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py", line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py", line 235, in dispatch_shell
handler(stream, idents, msg)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/kernelbase.py", line 399, in execute_request
user_expressions, allow_stdin)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/ipkernel.py", line 196, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/usr/local/lib/python3.6/dist-packages/ipykernel/zmqshell.py", line 533, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py", line 2718, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py", line 2822, in run_ast_nodes
if self.run_code(code, result):
File "/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py", line 2882, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-19-46a547b04ceb>", line 23, in <module>
X = tf.placeholder(tf.float32, shape=(None, n+1), name="X")
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/array_ops.py", line 1747, in placeholder
return gen_array_ops.placeholder(dtype=dtype, shape=shape, name=name)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gen_array_ops.py", line 5206, in placeholder
"Placeholder", dtype=dtype, shape=shape, name=name)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/deprecation.py", line 488, in new_func
return func(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py", line 3274, in create_op
op_def=op_def)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py", line 1770, in __init__
self._traceback = tf_stack.extract_stack()
InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'X_11' with dtype float and shape [?,9]
[[node X_11 (defined at <ipython-input-19-46a547b04ceb>:23) = Placeholder[dtype=DT_FLOAT, shape=[?,9], _device="/job:localhost/replica:0/task:0/device:GPU:0"]()]]
[[{{node mse_12/_7}} = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_15_mse_12", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
print("MSE=%s" % sess.run(mse, feed_dict={X:X_train, y:y_train}))
or
_, mse_value = sess.run([training_op, mse], feed_dict={X:X_train, y:y_train})
print("MSE=%s" % mse_value)

InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_56' with dtype float

I am a newbie in Tensorflow and trying out the code on linear Regression given below:
import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.datasets import load_boston
import matplotlib.pyplot as plt
boston=load_boston()
type(boston)
boston.feature_names
bd=pd.DataFrame(data=boston.data,columns=boston.feature_names)
bd['Price']=pd.DataFrame(data=boston.target)
np.random.shuffle(bd.values)
W=tf.Variable(0.0)
b=tf.Variable(0.0)
#print(bd.shape[1])
tf.summary.histogram('Weights', W)
tf.summary.histogram('Biases', b)
dataset_input=bd.iloc[:, 0 : bd.shape[1]-1];
#dataset_input.head(2)
dataset_output=bd.iloc[:, bd.shape[1]-1]
dataset_output=dataset_output.values
dataset_output=dataset_output.reshape((bd.shape[0],1)) #converted (506,) to (506,1) because in pandas
#the shape was not changing and it was needed later in feed_dict
dataset_input=dataset_input.values #only dataset_input is in DataFrame form and converting it into np.ndarray
X=tf.placeholder(tf.float32, shape=(None,bd.shape[1]-1))
Y=tf.placeholder(tf.float32, shape=(None,1))
Y_=W*X+b
print(X.shape)
print(Y.shape)
loss=tf.reduce_mean(tf.square(Y_-Y))
tf.summary.scalar('loss',loss)
optimizer=tf.train.GradientDescentOptimizer(0.5)
train=optimizer.minimize(loss)
init=tf.global_variables_initializer()#tf.global_variables_initializer()#tf.initialize_all_variables()
sess=tf.Session()
sess.run(init)
wb_=[]
with tf.Session() as sess:
summary_merge = tf.summary.merge_all()
writer=tf.summary.FileWriter("Users/ajay/Documents",sess.graph)
epochs=10
sess.run(init)
for i in range(epochs):
s_mer=sess.run(summary_merge,feed_dict={X:dataset_input,Y:dataset_output}) #ERROR________ERROR
sess.run(train,feed_dict={X:dataset_input,Y:dataset_output})
sess.run(loss)
writer.add_summary(s_mer,i)
#tf.summary.histogram(name="loss",values=loss)
if(i%5==0):
print(i, sess.run([W,b]))
wb_.append(sess.run([W,b]))
print(writer.get_logdir())
print(writer.close())
As the loop runs the line :
s_mer=sess.run(summary_merge,feed_dict={X:dataset_input,Y:dataset_output})
is giving me error.
I really don't understand why feed_dict is present inside the above line.
What would happen if it is not there ?
Also, why are we passing s_mer into add_summary() ? I didn't understand its role. Can't we just pass "summary_merge" into the add_summary().
Please, explain the role of add_summary().
The error I'm getting in my IPython NB is :
InvalidArgumentError Traceback (most recent call last)
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1038 try:
-> 1039 return fn(*args)
1040 except errors.OpError as e:
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
1020 feed_dict, fetch_list, target_list,
-> 1021 status, run_metadata)
1022
~/anaconda3/envs/Tensorflow/lib/python3.6/contextlib.py in __exit__(self, type, value, traceback)
87 try:
---> 88 next(self.gen)
89 except StopIteration:
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py in raise_exception_on_not_ok_status()
465 compat.as_text(pywrap_tensorflow.TF_Message(status)),
--> 466 pywrap_tensorflow.TF_GetCode(status))
467 finally:
InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_56' with dtype float
[[Node: Placeholder_56 = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
During handling of the above exception, another exception occurred:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-48-ca71ed6cdb0f> in <module>()
68
69 for i in range(epochs):
---> 70 s_mer=sess.run(summary_merge,feed_dict={X:dataset_input,Y:dataset_output}) #ERROR________ERROR
71 sess.run(train,feed_dict={X:dataset_input,Y:dataset_output})
72
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
776 try:
777 result = self._run(None, fetches, feed_dict, options_ptr,
--> 778 run_metadata_ptr)
779 if run_metadata:
780 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
980 if final_fetches or final_targets:
981 results = self._do_run(handle, final_targets, final_fetches,
--> 982 feed_dict_string, options, run_metadata)
983 else:
984 results = []
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
1030 if handle is None:
1031 return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
-> 1032 target_list, options, run_metadata)
1033 else:
1034 return self._do_call(_prun_fn, self._session, handle, feed_dict,
~/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1050 except KeyError:
1051 pass
-> 1052 raise type(e)(node_def, op, message)
1053
1054 def _extend_graph(self):
InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_56' with dtype float
[[Node: Placeholder_56 = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
Caused by op 'Placeholder_56', defined at:
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/traitlets/config/application.py", line 658, in launch_instance
app.start()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/kernelapp.py", line 486, in start
self.io_loop.start()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tornado/platform/asyncio.py", line 127, in start
self.asyncio_loop.run_forever()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/asyncio/base_events.py", line 421, in run_forever
self._run_once()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/asyncio/base_events.py", line 1431, in _run_once
handle._run()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/asyncio/events.py", line 145, in _run
self._callback(*self._args)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tornado/platform/asyncio.py", line 117, in _handle_events
handler_func(fileobj, events)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tornado/stack_context.py", line 276, in null_wrapper
return fn(*args, **kwargs)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 450, in _handle_events
self._handle_recv()
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 480, in _handle_recv
self._run_callback(callback, msg)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 432, in _run_callback
callback(*args, **kwargs)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tornado/stack_context.py", line 276, in null_wrapper
return fn(*args, **kwargs)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 233, in dispatch_shell
handler(stream, idents, msg)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 399, in execute_request
user_expressions, allow_stdin)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/ipkernel.py", line 208, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/ipykernel/zmqshell.py", line 537, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2662, in run_cell
raw_cell, store_history, silent, shell_futures)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2785, in _run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2903, in run_ast_nodes
if self.run_code(code, result):
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2963, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-35-a13d7623267d>", line 47, in <module>
X=tf.placeholder(tf.float32, shape=(None,bd.shape[1]-1))
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/ops/array_ops.py", line 1507, in placeholder
name=name)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/ops/gen_array_ops.py", line 1997, in _placeholder
name=name)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 768, in apply_op
op_def=op_def)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 2336, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/Users/ajay/anaconda3/envs/Tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1228, in __init__
self._traceback = _extract_stack()
InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'Placeholder_56' with dtype float
[[Node: Placeholder_56 = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
The problem is that variables dataset_input and dataset_output are of dtype float64 but you use for placeholders float32, but they need to be same. dataset_input = np.array(dataset_input, dtype=np.float32) and dataset_output = np.array(dataset_output, dtype=np.float32) helps on my computer.
Then when you are calling sess.run(loss) you need to add feed_dict={X:dataset_input,Y:dataset_output} as well because it uses X and Y placeholders.
I really don't understand why feed_dict is present inside the above line. What would happen if it is not there ?
You need to use feed_dict to feed data to your placeholders. If you don't provide it with any data then similar errors occurs, because it has no data for your computations.
Hope it helps.

Tensorflow Loss and accuracy error Neural Network

Hope you are in good health. Actually I am facing a problem in my self built code. I am a beginner in Machine Learning and working on neural network. I build my own neural network and trying to train dataset with the help of tensorflow but getting alot of loss and having some problem with printing my accuracy Please view the code Thank you!
Here is my code I implement it on kaggle:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from subprocess import check_output
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
print(check_output(["ls", "../input"]).decode("utf8"))
mnist = input_data.read_data_sets('../input',one_hot=True)
x_train=mnist.train.images
y_train=mnist.train.labels
x_test=mnist.test.images
y_test=mnist.test.labels
# Any results you write to the current directory are saved as output.
layer1_neuron=500
layer2_neuron=500
layer3_neuron=500
number_of_class=10
batch_size=200
x=tf.placeholder('float',[None,784]) #28 * 28 is 784 (shape of the data)
y=tf.placeholder('float')
#my neural network
def neural_network(x_train):
hidden_layer_1={
'weights':tf.Variable(tf.random_normal([784,layer1_neuron])),
'biases': tf.Variable(tf.random_normal([layer1_neuron]))
}
hidden_layer_2={
'weights':tf.Variable(tf.random_normal([layer1_neuron,layer2_neuron])),
'biases':tf.Variable(tf.random_normal([layer2_neuron]))
}
hidden_layer_3={
'weights':tf.Variable(tf.random_normal([layer2_neuron,layer3_neuron])),
'biases':tf.Variable(tf.random_normal([layer3_neuron]))
}
output={
'weights':tf.Variable(tf.random_normal([layer3_neuron,number_of_class])),
'biases':tf.Variable(tf.random_normal([number_of_class]))
}
l1=tf.add(tf.matmul(x_train,hidden_layer_1['weights']),hidden_layer_1['biases'])
l1=tf.nn.relu(l1)
l2=tf.add(tf.matmul(l1,hidden_layer_2['weights']),hidden_layer_2['biases'])
l2=tf.nn.relu(l2)
l3=tf.add(tf.matmul(l2,hidden_layer_3['weights']),hidden_layer_3['biases'])
l3=tf.nn.relu(l3)
output=tf.add(tf.matmul(l3,output['weights']),output['biases'])
return output
# for splitting out batches of data
def next_batch(num, data, labels):
idx = np.arange(0 , len(data))
np.random.shuffle(idx)
idx = idx[:num]
data_shuffle = [data[ i] for i in idx]
labels_shuffle = [labels[ i] for i in idx]
return np.asarray(data_shuffle), np.asarray(labels_shuffle)
def traning_neuralNetwork(x_test,x_train,y_test,y_train):
total_epochs=5
total_loss=0
epoch_loss=0
batch_size=200
num_batch = int(np.ceil(42000/batch_size))
prediction=[]
prediction=neural_network(x_train)
cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=y_train))
optimizer=tf.train.AdamOptimizer().minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range (total_epochs):
epoch_loss=0
for _ in range (num_batch):
x_train,y_train=next_batch(batch_size,x_train,y_train)
_,epoch_loss=sess.run([optimizer,cost],feed_dict={x:x_train,y:y_train})
total_loss+=epoch_loss
print('Epoch ',epoch, " loss = ",total_loss)
print("Traning Complete!")
correct=tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
accuracy=tf.reduce_mean(tf.cast(correct,'float'))
print('accuracy',accuracy.eval({x : x_test,y : y_test}))
traning_neuralNetwork(x_test,x_train,y_test,y_train)
Output Error
WARNING:tensorflow:From <ipython-input-3-92b45e11aa74>:61: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.
See tf.nn.softmax_cross_entropy_with_logits_v2.
Epoch 0 loss = 968685.6919555664
Epoch 1 loss = 1076421.9005126953
Epoch 2 loss = 1108946.4575500488
Epoch 3 loss = 1117600.8527259827
Epoch 4 loss = 1119452.7342455387
Traning Complete!
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1329 try:
-> 1330 return fn(*args)
1331 except errors.OpError as e:
/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run_fn(feed_dict, fetch_list, target_list, options, run_metadata)
1314 return self._call_tf_sessionrun(
-> 1315 options, feed_dict, fetch_list, target_list, run_metadata)
1316
/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py in _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list, run_metadata)
1422 self._session, options, feed_dict, fetch_list, target_list,
-> 1423 status, run_metadata)
1424
/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py in __exit__(self, type_arg, value_arg, traceback_arg)
515 compat.as_text(c_api.TF_Message(self.status.status)),
--> 516 c_api.TF_GetCode(self.status.status))
517 # Delete the underlying status object from memory otherwise it stays alive
InvalidArgumentError: Incompatible shapes: [55000] vs. [10000]
[[Node: Equal = Equal[T=DT_INT64, _device="/job:localhost/replica:0/task:0/device:GPU:0"](ArgMax, ArgMax_1)]]
During handling of the above exception, another exception occurred:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-4-6ce6d479d3df> in <module>()
1
2
----> 3 traning_neuralNetwork(x_test,x_train,y_test,y_train)
<ipython-input-3-92b45e11aa74> in traning_neuralNetwork(x_test, x_train, y_test, y_train)
75 correct=tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
76 accuracy=tf.reduce_mean(tf.cast(correct,'float'))
---> 77 print('accuracy',accuracy.eval({x : x_test,y : y_test}))
78
79
/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in eval(self, feed_dict, session)
659
660 """
--> 661 return _eval_using_default_session(self, feed_dict, self.graph, session)
662
663
/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in _eval_using_default_session(tensors, feed_dict, graph, session)
5061 "the tensor's graph is different from the session's "
5062 "graph.")
-> 5063 return session.run(tensors, feed_dict)
5064
5065
/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
906 try:
907 result = self._run(None, fetches, feed_dict, options_ptr,
--> 908 run_metadata_ptr)
909 if run_metadata:
910 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
1141 if final_fetches or final_targets or (handle and feed_dict_tensor):
1142 results = self._do_run(handle, final_targets, final_fetches,
-> 1143 feed_dict_tensor, options, run_metadata)
1144 else:
1145 results = []
/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
1322 if handle is None:
1323 return self._do_call(_run_fn, feeds, fetches, targets, options,
-> 1324 run_metadata)
1325 else:
1326 return self._do_call(_prun_fn, handle, feeds, fetches)
/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1341 except KeyError:
1342 pass
-> 1343 raise type(e)(node_def, op, message)
1344
1345 def _extend_graph(self):
InvalidArgumentError: Incompatible shapes: [55000] vs. [10000]
[[Node: Equal = Equal[T=DT_INT64, _device="/job:localhost/replica:0/task:0/device:GPU:0"](ArgMax, ArgMax_1)]]
Caused by op 'Equal', defined at:
File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/conda/lib/python3.6/site-packages/ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "/opt/conda/lib/python3.6/site-packages/traitlets/config/application.py", line 658, in launch_instance
app.start()
File "/opt/conda/lib/python3.6/site-packages/ipykernel/kernelapp.py", line 477, in start
ioloop.IOLoop.instance().start()
File "/opt/conda/lib/python3.6/site-packages/zmq/eventloop/ioloop.py", line 177, in start
super(ZMQIOLoop, self).start()
File "/opt/conda/lib/python3.6/site-packages/tornado/ioloop.py", line 888, in start
handler_func(fd_obj, events)
File "/opt/conda/lib/python3.6/site-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 440, in _handle_events
self._handle_recv()
File "/opt/conda/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 472, in _handle_recv
self._run_callback(callback, msg)
File "/opt/conda/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 414, in _run_callback
callback(*args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File "/opt/conda/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 235, in dispatch_shell
handler(stream, idents, msg)
File "/opt/conda/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 399, in execute_request
user_expressions, allow_stdin)
File "/opt/conda/lib/python3.6/site-packages/ipykernel/ipkernel.py", line 196, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/opt/conda/lib/python3.6/site-packages/ipykernel/zmqshell.py", line 533, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2698, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "/opt/conda/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2808, in run_ast_nodes
if self.run_code(code, result):
File "/opt/conda/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2862, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-4-6ce6d479d3df>", line 3, in <module>
traning_neuralNetwork(x_test,x_train,y_test,y_train)
File "<ipython-input-3-92b45e11aa74>", line 75, in traning_neuralNetwork
correct=tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
File "/opt/conda/lib/python3.6/site-packages/tensorflow/python/ops/gen_math_ops.py", line 2455, in equal
"Equal", x=x, y=y, name=name)
File "/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 3306, in create_op
op_def=op_def)
File "/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1669, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
InvalidArgumentError (see above for traceback): Incompatible shapes: [55000] vs. [10000]
[[Node: Equal = Equal[T=DT_INT64, _device="/job:localhost/replica:0/task:0/device:GPU:0"](ArgMax, ArgMax_1)]]
Your data Matrix is a 4D array of size (48000 * 28 * 28 * 1) which is > 784,500.
As a side note, 32,928,000 is actually 28 * 28 * 42000, probably X_train matrix has a different shape.
Could you add print(X_train.shape[0]) and double check your matrix?
To solve your problem with GraphDef, have a look at this response:
https://stackoverflow.com/a/36358913/4601719
tf.initialize_all_variables is depreciated, please use tf.global_variables_initializer as recommended here:
https://www.tensorflow.org/api_docs/python/tf/initialize_all_variables

Keras/Tensorflow InvalidArgumentError: Inputs to operation loss_8/classification_loss/Select_1 of type

I am getting a fairly indecipherable error for this image recognition code. I am creating a faster r-cnn model using Keras-Retinanet with a tensorflow backend. The image annotation is in a PASCAL VOC format. My best guess is that somehow the data is formated improperly, but this error message isn't giving me much clarity in what is breaking. Specifically, what is: "InvalidArgumentError: Inputs to operation loss_8/classification_loss/Select_1 of type Select must have the same size and shape. Input 0: [67503,10] != input 1: [67503,5]" referring to?
batch_size = 1 # batch_size > 1 is not yet supported
model.fit_generator(
generator=train_generator,
steps_per_epoch=len(train_generator.image_names) // batch_size,
epochs=50,
verbose=1,
validation_data=test_generator,
validation_steps=3000, # len(test_generator.image_names) // batch_size,
callbacks=[
keras.callbacks.ModelCheckpoint('snapshots/resnet50_model_1.h5', monitor='val_loss', verbose=1, save_best_only=True),
keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=1, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0),
],
)
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
~/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1322 try:
-> 1323 return fn(*args)
1324 except errors.OpError as e:
~/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
1301 feed_dict, fetch_list, target_list,
-> 1302 status, run_metadata)
1303
~/anaconda3/lib/python3.5/site-packages/tensorflow/python/framework/errors_impl.py in __exit__(self, type_arg, value_arg, traceback_arg)
472 compat.as_text(c_api.TF_Message(self.status.status)),
--> 473 c_api.TF_GetCode(self.status.status))
474 # Delete the underlying status object from memory otherwise it stays alive
InvalidArgumentError: Inputs to operation loss_8/classification_loss/Select_1 of type Select must have the same size and shape. Input 0: [67503,10] != input 1: [67503,5]
[[Node: loss_8/classification_loss/Select_1 = Select[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:GPU:0"](loss_8/classification_loss/Equal, loss_8/classification_loss/sub_1, loss_8/classification_loss/GatherNd)]]
[[Node: loss_8/classification_loss/Mean_2/_7785 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_12605_loss_8/classification_loss/Mean_2", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
During handling of the above exception, another exception occurred:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-55-006fdf1f857f> in <module>()
9 callbacks=[
10 keras.callbacks.ModelCheckpoint('snapshots/resnet50_model_1.h5', monitor='val_loss', verbose=1, save_best_only=True),
---> 11 keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=1, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0),
12 ],
13 )
~/anaconda3/lib/python3.5/site-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
85 warnings.warn('Update your `' + object_name +
86 '` call to the Keras 2 API: ' + signature, stacklevel=2)
---> 87 return func(*args, **kwargs)
88 wrapper._original_function = func
89 return wrapper
~/anaconda3/lib/python3.5/site-packages/keras/engine/training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
2075 outs = self.train_on_batch(x, y,
2076 sample_weight=sample_weight,
-> 2077 class_weight=class_weight)
2078
2079 if not isinstance(outs, list):
~/anaconda3/lib/python3.5/site-packages/keras/engine/training.py in train_on_batch(self, x, y, sample_weight, class_weight)
1795 ins = x + y + sample_weights
1796 self._make_train_function()
-> 1797 outputs = self.train_function(ins)
1798 if len(outputs) == 1:
1799 return outputs[0]
~/anaconda3/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
2330 updated = session.run(self.outputs + [self.updates_op],
2331 feed_dict=feed_dict,
-> 2332 **self.session_kwargs)
2333 return updated[:len(self.outputs)]
2334
~/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
887 try:
888 result = self._run(None, fetches, feed_dict, options_ptr,
--> 889 run_metadata_ptr)
890 if run_metadata:
891 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
~/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
1118 if final_fetches or final_targets or (handle and feed_dict_tensor):
1119 results = self._do_run(handle, final_targets, final_fetches,
-> 1120 feed_dict_tensor, options, run_metadata)
1121 else:
1122 results = []
~/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
1315 if handle is None:
1316 return self._do_call(_run_fn, self._session, feeds, fetches, targets,
-> 1317 options, run_metadata)
1318 else:
1319 return self._do_call(_prun_fn, self._session, handle, feeds, fetches)
~/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
1334 except KeyError:
1335 pass
-> 1336 raise type(e)(node_def, op, message)
1337
1338 def _extend_graph(self):
InvalidArgumentError: Inputs to operation loss_8/classification_loss/Select_1 of type Select must have the same size and shape. Input 0: [67503,10] != input 1: [67503,5]
[[Node: loss_8/classification_loss/Select_1 = Select[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:GPU:0"](loss_8/classification_loss/Equal, loss_8/classification_loss/sub_1, loss_8/classification_loss/GatherNd)]]
[[Node: loss_8/classification_loss/Mean_2/_7785 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_12605_loss_8/classification_loss/Mean_2", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
Caused by op 'loss_8/classification_loss/Select_1', defined at:
File "/home/james/anaconda3/lib/python3.5/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/home/james/anaconda3/lib/python3.5/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/james/anaconda3/lib/python3.5/site-packages/ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "/home/james/anaconda3/lib/python3.5/site-packages/traitlets/config/application.py", line 658, in launch_instance
app.start()
File "/home/james/anaconda3/lib/python3.5/site-packages/ipykernel/kernelapp.py", line 477, in start
ioloop.IOLoop.instance().start()
File "/home/james/anaconda3/lib/python3.5/site-packages/zmq/eventloop/ioloop.py", line 177, in start
super(ZMQIOLoop, self).start()
File "/home/james/anaconda3/lib/python3.5/site-packages/tornado/ioloop.py", line 888, in start
handler_func(fd_obj, events)
File "/home/james/anaconda3/lib/python3.5/site-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/home/james/anaconda3/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py", line 440, in _handle_events
self._handle_recv()
File "/home/james/anaconda3/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py", line 472, in _handle_recv
self._run_callback(callback, msg)
File "/home/james/anaconda3/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py", line 414, in _run_callback
callback(*args, **kwargs)
File "/home/james/anaconda3/lib/python3.5/site-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/home/james/anaconda3/lib/python3.5/site-packages/ipykernel/kernelbase.py", line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File "/home/james/anaconda3/lib/python3.5/site-packages/ipykernel/kernelbase.py", line 235, in dispatch_shell
handler(stream, idents, msg)
File "/home/james/anaconda3/lib/python3.5/site-packages/ipykernel/kernelbase.py", line 399, in execute_request
user_expressions, allow_stdin)
File "/home/james/anaconda3/lib/python3.5/site-packages/ipykernel/ipkernel.py", line 196, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/home/james/anaconda3/lib/python3.5/site-packages/ipykernel/zmqshell.py", line 533, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/home/james/anaconda3/lib/python3.5/site-packages/IPython/core/interactiveshell.py", line 2698, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "/home/james/anaconda3/lib/python3.5/site-packages/IPython/core/interactiveshell.py", line 2802, in run_ast_nodes
if self.run_code(code, result):
File "/home/james/anaconda3/lib/python3.5/site-packages/IPython/core/interactiveshell.py", line 2862, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-50-1a9dc197138b>", line 11, in <module>
optimizer=keras.optimizers.adam(lr=1e-5, clipnorm=0.001)
File "/home/james/anaconda3/lib/python3.5/site-packages/keras/engine/training.py", line 860, in compile
sample_weight, mask)
File "/home/james/anaconda3/lib/python3.5/site-packages/keras/engine/training.py", line 460, in weighted
score_array = fn(y_true, y_pred)
File "/home/james/anaconda3/lib/python3.5/site-packages/keras_retinanet/losses.py", line 37, in _focal
focal_weight = keras_retinanet.backend.where(keras.backend.equal(labels, 1), 1 - classification, classification)
File "/home/james/anaconda3/lib/python3.5/site-packages/keras_retinanet/backend/tensorflow_backend.py", line 47, in where
return tensorflow.where(*args, **kwargs)
File "/home/james/anaconda3/lib/python3.5/site-packages/tensorflow/python/ops/array_ops.py", line 2441, in where
return gen_math_ops._select(condition=condition, t=x, e=y, name=name)
File "/home/james/anaconda3/lib/python3.5/site-packages/tensorflow/python/ops/gen_math_ops.py", line 3988, in _select
"Select", condition=condition, t=t, e=e, name=name)
File "/home/james/anaconda3/lib/python3.5/site-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "/home/james/anaconda3/lib/python3.5/site-packages/tensorflow/python/framework/ops.py", line 2956, in create_op
op_def=op_def)
File "/home/james/anaconda3/lib/python3.5/site-packages/tensorflow/python/framework/ops.py", line 1470, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
InvalidArgumentError (see above for traceback): Inputs to operation loss_8/classification_loss/Select_1 of type Select must have the same size and shape. Input 0: [67503,10] != input 1: [67503,5]
[[Node: loss_8/classification_loss/Select_1 = Select[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:GPU:0"](loss_8/classification_loss/Equal, loss_8/classification_loss/sub_1, loss_8/classification_loss/GatherNd)]]
[[Node: loss_8/classification_loss/Mean_2/_7785 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_12605_loss_8/classification_loss/Mean_2", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]

Resources