InvalidArgumentError when I use model.fit() - python-3.x

I am trying to build a custom layer in keras/tensorflow. The purpose would be to extend it as a RNN. The problem appears during training, where, apparently, there is an incompatibility with the output shape and the training dataset.
X_train.shape
(100, 5)
X_target.shape
(100, 5)
This is the custom layer:
class MyLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
print(self.output_dim)
print(input_shape)
# Create a trainable weight variable for this layer.
self.kernel = self.add_weight(name='kernel',
shape=(self.output_dim, input_shape[1]),
initializer='uniform',
trainable=True)
super(MyLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
print('shape x: ')
print(x.shape)
print('shape kernel: ')
print(self.kernel.shape)
matrix = tf.transpose(self.kernel)
print('matrix')
print(matrix.shape)
prod = K.dot(x, matrix)
print('after product')
print(prod.shape)
return prod
def compute_output_shape(self, input_shape):
print('Compute output shape')
print(input_shape)
print(self.output_dim)
return (input_shape[0], self.output_dim)
model = Sequential()
model.add(MyLayer(5, batch_input_shape=(100, 5)))
model.compile(optimizer='adam', loss='mse')
# fit model
model.fit(X_train, X_target)
tf.keras.__version__ = '2.1.6-tf'
Since I am passing a x_training with a shape of (100,5), I would expect that, by multiplying it by a matrix of (5,5), I would obtain a matrix of (100, 5), which is the same shape as the target and thus, I would be able to train, in this case, a 5x5 weight matrix. Instead, I get this:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-77-4dee23ead957> in <module>()
6 model.compile(optimizer='adam', loss='mse')
7 # fit model
----> 8 model.fit(X_train, X_target)#, epochs=300, verbose=0)
~/anaconda3/envs/ldsa/lib/python3.5/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
1037 initial_epoch=initial_epoch,
1038 steps_per_epoch=steps_per_epoch,
-> 1039 validation_steps=validation_steps)
1040
1041 def evaluate(self, x=None, y=None,
~/anaconda3/envs/ldsa/lib/python3.5/site-packages/keras/engine/training_arrays.py in fit_loop(model, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch, steps_per_epoch, validation_steps)
197 ins_batch[i] = ins_batch[i].toarray()
198
--> 199 outs = f(ins_batch)
200 outs = to_list(outs)
201 for l, o in zip(out_labels, outs):
~/anaconda3/envs/ldsa/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
2713 return self._legacy_call(inputs)
2714
-> 2715 return self._call(inputs)
2716 else:
2717 if py_any(is_tensor(x) for x in inputs):
~/anaconda3/envs/ldsa/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py in _call(self, inputs)
2673 fetched = self._callable_fn(*array_vals, run_metadata=self.run_metadata)
2674 else:
-> 2675 fetched = self._callable_fn(*array_vals)
2676 return fetched[:len(self.outputs)]
2677
~/anaconda3/envs/ldsa/lib/python3.5/site-packages/tensorflow/python/client/session.py in __call__(self, *args, **kwargs)
1380 ret = tf_session.TF_SessionRunCallable(
1381 self._session._session, self._handle, args, status,
-> 1382 run_metadata_ptr)
1383 if run_metadata:
1384 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
~/anaconda3/envs/ldsa/lib/python3.5/site-packages/tensorflow/python/framework/errors_impl.py in __exit__(self, type_arg, value_arg, traceback_arg)
517 None, None,
518 compat.as_text(c_api.TF_Message(self.status.status)),
--> 519 c_api.TF_GetCode(self.status.status))
520 # Delete the underlying status object from memory otherwise it stays alive
521 # as there is a reference to status from this from the traceback due to
InvalidArgumentError: Incompatible shapes: [100,5] vs. [32,5]
[[Node: training_12/Adam/gradients/loss_17/my_layer_19_loss/sub_grad/BroadcastGradientArgs = BroadcastGradientArgs[T=DT_INT32, _class=["loc:#training_12/Adam/gradients/loss_17/my_layer_19_loss/sub_grad/Reshape"], _device="/job:localhost/replica:0/task:0/device:CPU:0"](training_12/Adam/gradients/loss_17/my_layer_19_loss/sub_grad/Shape, training_12/Adam/gradients/loss_17/my_layer_19_loss/sub_grad/Shape_1)]]
My surprise comes from this: Incompatible shapes: [100,5] vs. [32,5], where does this 32 come from?

Related

MLP with TensorFlow Input Shape not Matching Output Shape

I'm trying to train a multilayer perceptron based on the Iris dataset using TensorFlow in Pycharm with Jupyter Notebook. Every time I run my code it fails on the model.fit() function and gives the following error.
ValueError: Shapes (None, 1) and (None, 3) are incompatible.
I've tried playing around with different values for the hyperparameters but there's obviously something I'm not getting. Any help/pointers that anyone could provide would be much appreciated.
Here's my data setup and preprocessing:
import pandas as pd
import numpy as np
# read iris data into pandas dataframe
iris = pd.read_csv("data/IRIS.csv", header=0)
# apply label to index column
iris.index.name = "id"
# create copy of iris dataframe in which to store normalised values and keep original dataframe for comparison later on
iris_unnormalized = iris
iris_normalized = iris.copy()
# isolate columns with numerical values
iris_num = iris.select_dtypes(include=[np.number])
# find max value in each column
col_maxes = iris_num.max()
# find overall max value among all columns
iris_num_max = col_maxes.max()
# divide all numerical values by overall max value in order to normalize data to a value between 0 and 1
iris_num_norm = iris_num / iris_num_max
# reassign normalised values back to their corresponding columns
iris_normalized[iris_num_norm.columns] = iris_num_norm
# specify seed for reproducibility
np.random.seed(1671)
training = iris_normalized.sample(frac = 0.8)
test = iris_normalized.drop(training.index)
# initialize the training input and output list
# same for testing set
X_train = []
Y_train = []
X_test = []
Y_test = []
# loop through the dataframe and separate inputs and outputs for training and testing
for index, row in training.iterrows():
X_train.append([row['sepal length cm'], row['sepal width cm'], row['petal length cm'], row['petal width cm']])
Y_train.append([row['species']])
for index, row in test.iterrows():
X_test.append([row['sepal length cm'], row['sepal width cm'], row['petal length cm'], row['petal width cm']])
Y_test.append([row['species']])
X_train = np.array(X_train).astype('float32')
Y_train = np.array(Y_train)
X_test = np.array(X_test).astype('float32')
Y_test = np.array(Y_test)
print(X_train.shape, "training samples") # Output: (120, 4) training samples
print(X_test.shape, "test samples") # Output: (30, 4) test samples
Here's where I try to create the neural network:
import tensorflow as tf
from tensorflow import keras
NB_CLASSES = 3 # number of iris varieties
N_HIDDEN = 128
BATCH_SIZE = 10
VERBOSE = 1
VALIDATION_SPLIT = 0.2 # how much of training set to hold for validation
EPOCHS = 200
model = tf.keras.models.Sequential(
[
keras.layers.Dense(N_HIDDEN, input_shape=(10,4,), batch_size=BATCH_SIZE, name="dense_layer1", activation="relu"),
keras.layers.Dense(N_HIDDEN, input_shape=(4,), batch_size=BATCH_SIZE, name="dense_layer2", activation="relu"),
keras.layers.Dense(NB_CLASSES, input_shape=(4,), batch_size=BATCH_SIZE, name="dense_layer3", activation="softmax"),
]
)
model.summary()
################### model summary output: #####################
Layer (type) Output Shape Param #
=================================================================
dense_layer1 (Dense) (10, 10, 128) 640
_________________________________________________________________
dense_layer2 (Dense) (10, 10, 128) 16512
_________________________________________________________________
dense_layer3 (Dense) (10, 10, 3) 387
=================================================================
Total params: 17,539
Trainable params: 17,539
Non-trainable params: 0
# compiling the model
model.compile(optimizer='SGD', loss='categorical_crossentropy', metrics=['accuracy'])
#train the model
model.fit(X_train, Y_train,
batch_size=BATCH_SIZE,
epochs = EPOCHS,
verbose = VERBOSE,
validation_split = VALIDATION_SPLIT)
The data setup and normalization work fine but when I run the code to create the neural network I get the below error:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_8888/2571387568.py in <module>
38
39 #train the model
---> 40 model.fit(X_train, Y_train,
41 batch_size=BATCH_SIZE,
42 epochs = EPOCHS,
\venv\lib\site-packages\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1182 _r=1):
1183 callbacks.on_train_batch_begin(step)
-> 1184 tmp_logs = self.train_function(iterator)
1185 if data_handler.should_sync:
1186 context.async_wait()
\venv\lib\site-packages\tensorflow\python\eager\def_function.py in __call__(self, *args, **kwds)
883
884 with OptionalXlaContext(self._jit_compile):
--> 885 result = self._call(*args, **kwds)
886
887 new_tracing_count = self.experimental_get_tracing_count()
\venv\lib\site-packages\tensorflow\python\eager\def_function.py in _call(self, *args, **kwds)
931 # This is the first call of __call__, so we have to initialize.
932 initializers = []
--> 933 self._initialize(args, kwds, add_initializers_to=initializers)
934 finally:
935 # At this point we know that the initialization is complete (or less
\venv\lib\site-packages\tensorflow\python\eager\def_function.py in _initialize(self, args, kwds, add_initializers_to)
757 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
758 self._concrete_stateful_fn = (
--> 759 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
760 *args, **kwds))
761
\venv\lib\site-packages\tensorflow\python\eager\function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
3064 args, kwargs = None, None
3065 with self._lock:
-> 3066 graph_function, _ = self._maybe_define_function(args, kwargs)
3067 return graph_function
3068
\venv\lib\site-packages\tensorflow\python\eager\function.py in _maybe_define_function(self, args, kwargs)
3461
3462 self._function_cache.missed.add(call_context_key)
-> 3463 graph_function = self._create_graph_function(args, kwargs)
3464 self._function_cache.primary[cache_key] = graph_function
3465
\venv\lib\site-packages\tensorflow\python\eager\function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3296 arg_names = base_arg_names + missing_arg_names
3297 graph_function = ConcreteFunction(
-> 3298 func_graph_module.func_graph_from_py_func(
3299 self._name,
3300 self._python_function,
\venv\lib\site-packages\tensorflow\python\framework\func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes, acd_record_initial_resource_uses)
1005 _, original_func = tf_decorator.unwrap(python_func)
1006
-> 1007 func_outputs = python_func(*func_args, **func_kwargs)
1008
1009 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
\venv\lib\site-packages\tensorflow\python\eager\def_function.py in wrapped_fn(*args, **kwds)
666 # the function a weak reference to itself to avoid a reference cycle.
667 with OptionalXlaContext(compile_with_xla):
--> 668 out = weak_wrapped_fn().__wrapped__(*args, **kwds)
669 return out
670
\venv\lib\site-packages\tensorflow\python\framework\func_graph.py in wrapper(*args, **kwargs)
992 except Exception as e: # pylint:disable=broad-except
993 if hasattr(e, "ag_error_metadata"):
--> 994 raise e.ag_error_metadata.to_exception(e)
995 else:
996 raise
ValueError: in user code:
\venv\lib\site-packages\keras\engine\training.py:853 train_function *
return step_function(self, iterator)
\venv\lib\site-packages\keras\engine\training.py:842 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
\venv\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1286 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
\venv\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2849 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
\venv\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:3632 _call_for_each_replica
return fn(*args, **kwargs)
\venv\lib\site-packages\keras\engine\training.py:835 run_step **
outputs = model.train_step(data)
\venv\lib\site-packages\keras\engine\training.py:788 train_step
loss = self.compiled_loss(
\venv\lib\site-packages\keras\engine\compile_utils.py:201 __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
\venv\lib\site-packages\keras\losses.py:141 __call__
losses = call_fn(y_true, y_pred)
\venv\lib\site-packages\keras\losses.py:245 call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
\venv\lib\site-packages\tensorflow\python\util\dispatch.py:206 wrapper
return target(*args, **kwargs)
\venv\lib\site-packages\keras\losses.py:1665 categorical_crossentropy
return backend.categorical_crossentropy(
\venv\lib\site-packages\tensorflow\python\util\dispatch.py:206 wrapper
return target(*args, **kwargs)
\venv\lib\site-packages\keras\backend.py:4839 categorical_crossentropy
target.shape.assert_is_compatible_with(output.shape)
\venv\lib\site-packages\tensorflow\python\framework\tensor_shape.py:1161 assert_is_compatible_with
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
ValueError: Shapes (None, 1) and (None, 3) are incompatible

ValueError: Layer sequential_2 expects 1 inputs, but it received 2 input tensors. Inputs received

I am trying to build a simple Model using the IAM Handwritten dataset from Kaggle and some sample code from a textbook I'm using, but I keep getting an error when I try to fit the model.
The error says ValueError: Layer sequential_2 expects 1 inputs, but it received 2 input tensors. Inputs received: [<tf.Tensor 'IteratorGetNext:0' shape=(None, None, None, None) dtype=float32>, <tf.Tensor 'IteratorGetNext:1' shape=(None, None) dtype=float32>]
full source code :
from __future__ import division
import numpy as np
import os
import glob
import tensorflow as tf
from random import *
from PIL import Image
from keras.utils import to_categorical
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.image as mpimg
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Lambda, ELU, Activation, BatchNormalization
from keras.layers.convolutional import Convolution2D, Cropping2D, ZeroPadding2D, MaxPooling2D
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD, Adam, RMSprop
d = {}
from subprocess import check_output
with open('./forms_for_parsing.txt') as f:
for line in f:
key = line.split(' ')[0]
writer = line.split(' ')[1]
d[key] = writer
print(len(d.keys()))
tmp = []
target_list = []
path_to_files = os.path.join('./input/data_subset/data_subset', '*')
for filename in sorted(glob.glob(path_to_files)):
# print(filename)
tmp.append(filename)
image_name = filename.split('/')[-1]
file, ext = os.path.splitext(image_name)
parts = file.split('-')
p = parts[0].split('\\')
form = p[1] + '-' + parts[1]
for key in d:
if key == form:
target_list.append(str(d[form]))
# print(d)
# print(parts[0])
# p = parts[0].split('\\')
# print(p[1])
# print(form)
img_files = np.asarray(tmp)
img_targets = np.asarray(target_list)
print(img_files.shape)
print(img_targets.shape)
for filename in img_files[:20]:
img=mpimg.imread(filename)
plt.figure(figsize=(10,10))
plt.imshow(img, cmap ='gray')
encoder = LabelEncoder()
encoder.fit(img_targets)
encoded_Y = encoder.transform(img_targets)
print(img_files[:5], img_targets[:5], encoded_Y[:5])
train_files, rem_files, train_targets, rem_targets = train_test_split(
img_files, encoded_Y, train_size=0.66, random_state=52, shuffle= True)
validation_files, test_files, validation_targets, test_targets = train_test_split(
rem_files, rem_targets, train_size=0.5, random_state=22, shuffle=True)
print(train_files.shape, validation_files.shape, test_files.shape)
print(train_targets.shape, validation_targets.shape, test_targets.shape)
batch_size = 16 # 8
num_classes = 50
# Start with train generator shared in the class and add image augmentations
def generate_data(samples, target_files, batch_size=batch_size, factor = 0.1 ):
num_samples = len(samples)
from sklearn.utils import shuffle
while 1: # Loop forever so the generator never terminates
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
batch_targets = target_files[offset:offset+batch_size]
images = []
targets = []
for i in range(len(batch_samples)):
batch_sample = batch_samples[i]
batch_target = batch_targets[i]
im = Image.open(batch_sample)
cur_width = im.size[0]
cur_height = im.size[1]
# print(cur_width, cur_height)
height_fac = 113 / cur_height
new_width = int(cur_width * height_fac)
size = new_width, 113
imresize = im.resize((size), Image.ANTIALIAS) # Resize so height = 113 while keeping aspect ratio
now_width = imresize.size[0]
now_height = imresize.size[1]
# Generate crops of size 113x113 from this resized image and keep random 10% of crops
avail_x_points = list(range(0, now_width - 113 ))# total x start points are from 0 to width -113
# Pick random x%
pick_num = int(len(avail_x_points)*factor)
# Now pick
random_startx = sample(avail_x_points, pick_num)
for start in random_startx:
imcrop = imresize.crop((start, 0, start+113, 113))
images.append(np.asarray(imcrop))
targets.append(batch_target)
# trim image to only see section with road
X_train = np.array(images)
y_train = np.array(targets)
#reshape X_train for feeding in later
X_train = X_train.reshape(X_train.shape[0], 113, 113, 1)
#convert to float and normalize
X_train = X_train.astype('float32')
X_train /= 255
#One hot encode y
y_train = to_categorical(y_train, num_classes)
yield shuffle(X_train, y_train)
train_generator = generate_data(train_files, train_targets, batch_size=batch_size, factor = 0.3)
validation_generator = generate_data(validation_files, validation_targets, batch_size=batch_size, factor = 0.3)
test_generator = generate_data(test_files, test_targets, batch_size=batch_size, factor = 0.1)
def resize_image(image):
return tf.image.resize(image,[56,56])
# Function to resize image to 64x64
row, col, ch = 113, 113, 1
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(row, col, ch)))
# Resise data within the neural network
model.add(Lambda(resize_image)) #resize images to allow for easy computation
# CNN model - Building the model suggested in paper
model.add(Convolution2D(filters= 32, kernel_size =(5,5), strides= (2,2), padding='same', name='conv1')) #96
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2), name='pool1'))
model.add(Convolution2D(filters= 64, kernel_size =(3,3), strides= (1,1), padding='same', name='conv2')) #256
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2), name='pool2'))
model.add(Convolution2D(filters= 128, kernel_size =(3,3), strides= (1,1), padding='same', name='conv3')) #256
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2), name='pool3'))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(512, name='dense1')) #1024
# model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(256, name='dense2')) #1024
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes,name='output'))
model.add(Activation('softmax')) #softmax since output is within 50 classes
model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=['accuracy'])
model.summary()
nb_epoch = 8
samples_per_epoch = 3268
nb_val_samples = 842
# #save every model using Keras checkpoint
from keras.callbacks import ModelCheckpoint
#filepath="check-{epoch:02d}-{val_loss:.4f}.hdf5"
filepath="low_loss.hdf5"
checkpoint = ModelCheckpoint(filepath= filepath, verbose=1, save_best_only=False)
callbacks_list = [checkpoint]
# #Model fit generator
history_object = model.fit_generator(train_generator, steps_per_epoch = (samples_per_epoch/batch_size),
validation_data=validation_generator,
validation_steps=nb_val_samples, epochs=nb_epoch, verbose=1, callbacks=callbacks_list)
and this is error i got :
ValueError Traceback (most recent call last)
<ipython-input-79-99c01bc062d8> in <module>
12
13 # #Model fit generator
---> 14 history_object = model.fit_generator(train_generator, steps_per_epoch = (samples_per_epoch/batch_size),
15 validation_data=validation_generator,
16 validation_steps=nb_val_samples, epochs=nb_epoch, verbose=1, callbacks=callbacks_list)
~\anaconda3\lib\site-packages\tensorflow\python\util\deprecation.py in new_func(*args, **kwargs)
322 'in a future version' if date is None else ('after %s' % date),
323 instructions)
--> 324 return func(*args, **kwargs)
325 return tf_decorator.make_decorator(
326 func, new_func, 'deprecated',
~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, validation_freq, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
1813 """
1814 _keras_api_gauge.get_cell('fit_generator').set(True)
-> 1815 return self.fit(
1816 generator,
1817 steps_per_epoch=steps_per_epoch,
~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in _method_wrapper(self, *args, **kwargs)
106 def _method_wrapper(self, *args, **kwargs):
107 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
--> 108 return method(self, *args, **kwargs)
109
110 # Running inside `run_distribute_coordinator` already.
~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1096 batch_size=batch_size):
1097 callbacks.on_train_batch_begin(step)
-> 1098 tmp_logs = train_function(iterator)
1099 if data_handler.should_sync:
1100 context.async_wait()
~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in _call(self, *args, **kwds)
812 # In this case we have not created variables on the first call. So we can
813 # run the first trace but we should fail if variables are created.
--> 814 results = self._stateful_fn(*args, **kwds)
815 if self._created_variables:
816 raise ValueError("Creating variables on a non-first call to a function"
~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in __call__(self, *args, **kwargs)
2826 """Calls a graph function specialized to the inputs."""
2827 with self._lock:
-> 2828 graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
2829 return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
2830
~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _maybe_define_function(self, args, kwargs)
3208 and self.input_signature is None
3209 and call_context_key in self._function_cache.missed):
-> 3210 return self._define_function_with_shape_relaxation(args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _define_function_with_shape_relaxation(self, args, kwargs)
3139 expand_composites=True)
3140
-> 3141 graph_function = self._create_graph_function(
3142 args, kwargs, override_flat_arg_shapes=relaxed_arg_shapes)
3143 self._function_cache.arg_relaxed[rank_only_cache_key] = graph_function
~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~\anaconda3\lib\site-packages\tensorflow\python\framework\func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~\anaconda3\lib\site-packages\tensorflow\python\framework\func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
ValueError: in user code:
C:\Users\subha\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:806 train_function *
return step_function(self, iterator)
C:\Users\subha\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:796 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
C:\Users\subha\anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1211 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
C:\Users\subha\anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2585 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
C:\Users\subha\anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2945 _call_for_each_replica
return fn(*args, **kwargs)
C:\Users\subha\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:789 run_step **
outputs = model.train_step(data)
C:\Users\subha\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:747 train_step
y_pred = self(x, training=True)
C:\Users\subha\anaconda3\lib\site-packages\tensorflow\python\keras\engine\base_layer.py:975 __call__
input_spec.assert_input_compatibility(self.input_spec, inputs,
C:\Users\subha\anaconda3\lib\site-packages\tensorflow\python\keras\engine\input_spec.py:155 assert_input_compatibility
raise ValueError('Layer ' + layer_name + ' expects ' +
ValueError: Layer sequential_2 expects 1 inputs, but it received 2 input tensors. Inputs received: [<tf.Tensor 'IteratorGetNext:0' shape=(None, None, None, None) dtype=float32>, <tf.Tensor 'IteratorGetNext:1' shape=(None, None) dtype=float32>]
i couldn't understand the error message so kindly somebody help me out!
thank u

Tensorflow.keras: AlreadyExistsError

I am building a model using tensorflow. I trained my model and it worked normally. Then, I modified my code and when I try to train my model, I am getting a AlreadyExistError. I restart my Jupyter Notebook but I’m still getting the same error. I need some help please.
here is my piece of code where I build the network and train it. The problem occurs in the last line.
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv1D, Dropout, Dense, Flatten, LSTM, MaxPooling1D, Bidirectional
from tensorflow.keras.optimizers import Adam
from keras.callbacks import EarlyStopping, TensorBoard
model = Sequential()
model.add(Conv1D(32, kernel_size=3, activation='elu', padding='same',
input_shape=(vector_size, 1)))
model.add(Conv1D(32, kernel_size=3, activation='elu', padding='same'))
model.add(Conv1D(32, kernel_size=3, activation='relu', padding='same'))
model.add(MaxPooling1D(pool_size=3))
model.add(Bidirectional(LSTM(512, dropout=0.2, recurrent_dropout=0.3)))
model.add(Dense(512, activation='sigmoid'))
model.add(Dropout(0.2))
model.add(Dense(512, activation='sigmoid'))
model.add(Dropout(0.25))
model.add(Dense(512, activation='sigmoid'))
model.add(Dropout(0.25))
model.add(Dense(2, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001, decay=1e-6), metrics=['accuracy'])
tensorboard = TensorBoard(log_dir='logs/', histogram_freq=0, write_graph=True, write_images=True)
model.summary()
model.fit(np.array(x_train), np.array(y_train), batch_size=batch_size, epochs=no_epochs,
validation_data=(np.array(x_test), np.array(y_test)), callbacks=[tensorboard, EarlyStopping(min_delta=0.0001, patience=3)])
Train on 90000 samples, validate on 10000 samples Epoch 1/10
500/90000 [..............................] - ETA: 2:00:49
/anaconda3/lib/python3.7/site-packages/keras/callbacks/callbacks.py:846:
RuntimeWarning: Early stopping conditioned on metric val_loss which
is not available. Available metrics are: (self.monitor,
','.join(list(logs.keys()))), RuntimeWarning
--------------------------------------------------------------------------- AlreadyExistsError Traceback (most recent call
last) in
1 model.fit(np.array(x_train), np.array(y_train), batch_size=batch_size, epochs=no_epochs,
----> 2 validation_data=(np.array(x_test), np.array(y_test)), callbacks=[tensorboard, EarlyStopping(min_delta=0.0001, patience=3)])
3 print('You can continue')
/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training.py
in fit(self, x, y, batch_size, epochs, verbose, callbacks,
validation_split, validation_data, shuffle, class_weight,
sample_weight, initial_epoch, steps_per_epoch, validation_steps,
validation_freq, max_queue_size, workers, use_multiprocessing,
**kwargs)
817 max_queue_size=max_queue_size,
818 workers=workers,
--> 819 use_multiprocessing=use_multiprocessing)
820
821 def evaluate(self,
/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_v2.py
in fit(self, model, x, y, batch_size, epochs, verbose, callbacks,
validation_split, validation_data, shuffle, class_weight,
sample_weight, initial_epoch, steps_per_epoch, validation_steps,
validation_freq, max_queue_size, workers, use_multiprocessing,
**kwargs)
340 mode=ModeKeys.TRAIN,
341 training_context=training_context,
--> 342 total_epochs=epochs)
343 cbks.make_logs(model, epoch_logs, training_result, ModeKeys.TRAIN)
344
/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_v2.py
in run_one_epoch(model, iterator, execution_function, dataset_size,
batch_size, strategy, steps_per_epoch, num_samples, mode,
training_context, total_epochs)
126 step=step, mode=mode, size=current_batch_size) as batch_logs:
127 try:
--> 128 batch_outs = execution_function(iterator)
129 except (StopIteration, errors.OutOfRangeError):
130 # TODO(kaftan): File bug about tf function and errors.OutOfRangeError?
/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_v2_utils.py
in execution_function(input_fn)
96 # numpy translates Tensors to values in Eager mode.
97 return nest.map_structure(_non_none_constant_value,
---> 98 distributed_function(input_fn))
99
100 return execution_function
/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/eager/def_function.py
in call(self, *args, **kwds)
566 xla_context.Exit()
567 else:
--> 568 result = self._call(*args, **kwds)
569
570 if tracing_count == self._get_tracing_count():
/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/eager/def_function.py
in _call(self, *args, **kwds)
630 # Lifting succeeded, so variables are initialized and we can run the
631 # stateless function.
--> 632 return self._stateless_fn(*args, **kwds)
633 else:
634 canon_args, canon_kwds = \
/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/eager/function.py
in call(self, *args, **kwargs) 2361 with self._lock:
2362 graph_function, args, kwargs =
self._maybe_define_function(args, kwargs)
-> 2363 return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access 2364 2365 #property
/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/eager/function.py
in _filtered_call(self, args, kwargs) 1609 if
isinstance(t, (ops.Tensor, 1610
resource_variable_ops.BaseResourceVariable))),
-> 1611 self.captured_inputs) 1612 1613 def _call_flat(self, args, captured_inputs, cancellation_manager=None):
/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/eager/function.py
in _call_flat(self, args, captured_inputs, cancellation_manager)
1690 # No tape is watching; skip to running the function.
1691 return
self._build_call_outputs(self._inference_function.call(
-> 1692 ctx, args, cancellation_manager=cancellation_manager)) 1693
forward_backward = self._select_forward_and_backward_functions(
1694 args,
/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/eager/function.py
in call(self, ctx, args, cancellation_manager)
543 inputs=args,
544 attrs=("executor_type", executor_type, "config_proto", config),
--> 545 ctx=ctx)
546 else:
547 outputs = execute.execute_with_cancellation(
/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/eager/execute.py
in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
65 else:
66 message = e.message
---> 67 six.raise_from(core._status_to_exception(e.code, message), None)
68 except TypeError as e:
69 keras_symbolic_tensors = [
/anaconda3/lib/python3.7/site-packages/six.py in raise_from(value,
from_value)
AlreadyExistsError: Resource
__per_step_0/sequential/bidirectional/forward_lstm/while_grad/body/_429/gradients/AddN_13/tmp_var/N10tensorflow19TemporaryVariableOp6TmpVarE
[[{{node
sequential/bidirectional/forward_lstm/while_grad/body/_429/gradients/AddN_13/tmp_var}}]]
[Op:__inference_distributed_function_12060]
Function call stack: distributed_function
You must have a problem on same architectures from previous run being seen while training.
This should reset the keras session :
from tensorflow.keras import backend
backend.clear_session()
In my case I got the problem when using EMR on AWS, and I solved just uninstalling Keras, if you have tensorflow you already have Keras, but there are other packages that depends of the old Keras library. So you need to uninstall the old Keras to avoid this problem.
!pip uninstall keras

Input to reshape is a tensor with 'batch_size' values, but the requested shape requires a multiple of 'n_features'

I'm trying to make own attention model and I found example code in here:
https://www.kaggle.com/takuok/bidirectional-lstm-and-attention-lb-0-043
and it works just fine when I run it without modification.
But my own data contain only numeric values, I had to change example code.
so I erase embedding part in example code and plus, this is what I fixed.
xtr = np.reshape(xtr, (xtr.shape[0], 1, xtr.shape[1]))
# xtr.shape() = (n_sample_train, 1, 150), y.shape() = (n_sample_train, 6)
xte = np.reshape(xte, (xte.shape[0], 1, xte.shape[1]))
# xtr.shape() = (n_sample_test, 1, 150)
model = BidLstm(maxlen, max_features)
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
and my BidLstm func looks like,
def BidLstm(maxlen, max_features):
inp = Input(shape=(1,150))
#x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp) -> I don't need embedding since my own data is numeric.
x = Bidirectional(LSTM(300, return_sequences=True, dropout=0.25,
recurrent_dropout=0.25))(inp)
x = Attention(maxlen)(x)
x = Dense(256, activation="relu")(x)
x = Dropout(0.25)(x)
x = Dense(6, activation="sigmoid")(x)
model = Model(inputs=inp, outputs=x)
return model
and it said,
InvalidArgumentErrorTraceback (most recent call last)
<ipython-input-62-929955370368> in <module>
29
30 early = EarlyStopping(monitor="val_loss", mode="min", patience=1)
---> 31 model.fit(xtr, y, batch_size=128, epochs=15, validation_split=0.1, callbacks=[early])
32 #model.fit(xtr, y, batch_size=256, epochs=1, validation_split=0.1)
33
/usr/local/lib/python3.5/dist-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
1037 initial_epoch=initial_epoch,
1038 steps_per_epoch=steps_per_epoch,
-> 1039 validation_steps=validation_steps)
1040
1041 def evaluate(self, x=None, y=None,
/usr/local/lib/python3.5/dist-packages/keras/engine/training_arrays.py in fit_loop(model, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch, steps_per_epoch, validation_steps)
197 ins_batch[i] = ins_batch[i].toarray()
198
--> 199 outs = f(ins_batch)
200 outs = to_list(outs)
201 for l, o in zip(out_labels, outs):
/usr/local/lib/python3.5/dist-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
2713 return self._legacy_call(inputs)
2714
-> 2715 return self._call(inputs)
2716 else:
2717 if py_any(is_tensor(x) for x in inputs):
/usr/local/lib/python3.5/dist-packages/keras/backend/tensorflow_backend.py in _call(self, inputs)
2673 fetched = self._callable_fn(*array_vals, run_metadata=self.run_metadata)
2674 else:
-> 2675 fetched = self._callable_fn(*array_vals)
2676 return fetched[:len(self.outputs)]
2677
/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py in __call__(self, *args, **kwargs)
1437 ret = tf_session.TF_SessionRunCallable(
1438 self._session._session, self._handle, args, status,
-> 1439 run_metadata_ptr)
1440 if run_metadata:
1441 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/errors_impl.py in __exit__(self, type_arg, value_arg, traceback_arg)
526 None, None,
527 compat.as_text(c_api.TF_Message(self.status.status)),
--> 528 c_api.TF_GetCode(self.status.status))
529 # Delete the underlying status object from memory otherwise it stays alive
530 # as there is a reference to status from this from the traceback due to
InvalidArgumentError: Input to reshape is a tensor with 128 values, but the requested shape requires a multiple of 150
[[{{node attention_16/Reshape_2}}]]
[[{{node loss_5/mul}}]]
I think something wrong in loss function saids in here:
Input to reshape is a tensor with 2 * "batch_size" values, but the requested shape has "batch_size"
but I don't know which part to fix it.
my keras and tensorflow versions are 2.2.4 and 1.13.0-rc0
please help. thanks.
Edit 1
I've change my batch size, like keras saids, multiple of 150(batch_size = 150). than it reports
Train on 143613 samples, validate on 15958 samples
Epoch 1/15
143400/143613 [============================>.] - ETA: 0s - loss: 0.1505 - acc: 0.9619
InvalidArgumentError: Input to reshape is a tensor with 63 values, but the requested shape requires a multiple of 150
[[{{node attention_18/Reshape_2}}]]
[[{{node metrics_6/acc/Mean_1}}]]
and details is same as before. what should I do?
Your input shape must be (150,1).
LSTM shapes are (batch, steps, features). It's pointless to use LSTMs with 1 step only. (Unless you are using custom training loops with stateful=True, which is not your case).

Keras: TypeError: run() got an unexpected keyword argument 'kernel_regularizer'

I am using tensorflow==1.2.1 and Keras==2.0.6 to build a model:
input_num = X_norm_keras[:,2:].shape[1]
model_keras = Sequential()
model_keras.add(Dense(10, input_dim=input_num, activation='relu'))
model_keras.add(Dense(1, activation='linear'))
kernel_regularizer=regularizers.l2(0.2), optimizer='adam')
model_keras.compile(loss='mean_squared_error', kernel_regularizer=regularizers.l2(0.2), optimizer='adam')
model_keras.fit(X_norm_train[:,2:], y_norm_train, batch_size=25, epochs=250)
But got the following errors:
Using TensorFlow backend.
total data points = (25, 106)
Epoch 1/250
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-4-4cbd897903e7> in <module>()
102 model_keras.compile(loss='mean_squared_error', kernel_regularizer=regularizers.l2(0.2), optimizer='adam')
--> 103 model_keras.fit(X_norm_train[:,2:], y_norm_train, batch_size=25, epochs=250)
/usr/local/lib/python3.4/dist-packages/keras/models.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, **kwargs)
861 class_weight=class_weight,
862 sample_weight=sample_weight,
--> 863 initial_epoch=initial_epoch)
864
865 def evaluate(self, x, y, batch_size=32, verbose=1,
/usr/local/lib/python3.4/dist-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, **kwargs)
1428 val_f=val_f, val_ins=val_ins, shuffle=shuffle,
1429 callback_metrics=callback_metrics,
-> 1430 initial_epoch=initial_epoch)
1431
1432 def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):
/usr/local/lib/python3.4/dist-packages/keras/engine/training.py in _fit_loop(self, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch)
1077 batch_logs['size'] = len(batch_ids)
1078 callbacks.on_batch_begin(batch_index, batch_logs)
-> 1079 outs = f(ins_batch)
1080 if not isinstance(outs, list):
1081 outs = [outs]
/usr/local/lib/python3.4/dist-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
2266 updated = session.run(self.outputs + [self.updates_op],
2267 feed_dict=feed_dict,
-> 2268 **self.session_kwargs)
2269 return updated[:len(self.outputs)]
2270
TypeError: run() got an unexpected keyword argument 'kernel_regularizer'
Am I missing anything here? Thanks!
The regularizer kernel_regularizer=regularizers.l2(0.2) should be an argument of Dense(), not model.compile().
From the documentation of model.compile():
**kwargs: When using the Theano/CNTK backends, these arguments are passed into K.function. When using the TensorFlow backend, these arguments are passed into tf.Session.run.
That's why you are seeing an error coming from run().

Resources