tensorflow error: ValueError: Input 0 of layer sequential is incompatible with the layer expected axis -1 - python-3.x

I am trying to modify code provided to me to import a image file and build a training and test set using keras.
I am receiving the following error:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-154-b4983c6bd066> in <module>()
1 # Fit the model
----> 2 history = model.fit(X_train, y_train, batch_size = 256, epochs = 15, verbose=2, validation_data=(X_test,y_test))
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\keras\engine\training.py in _method_wrapper(self, *args, **kwargs)
106 def _method_wrapper(self, *args, **kwargs):
107 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
--> 108 return method(self, *args, **kwargs)
109
110 # Running inside `run_distribute_coordinator` already.
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1096 batch_size=batch_size):
1097 callbacks.on_train_batch_begin(step)
-> 1098 tmp_logs = train_function(iterator)
1099 if data_handler.should_sync:
1100 context.async_wait()
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\eager\def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\eager\def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\eager\def_function.py in _initialize(self, args, kwds, add_initializers_to)
695 self._concrete_stateful_fn = (
696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
--> 697 *args, **kwds))
698
699 def invalid_creator_scope(*unused_args, **unused_kwds):
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\eager\function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\eager\function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\eager\function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3073 arg_names=arg_names,
3074 override_flat_arg_shapes=override_flat_arg_shapes,
-> 3075 capture_by_value=self._capture_by_value),
3076 self._function_attributes,
3077 function_spec=self.function_spec,
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\framework\func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\eager\def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\framework\func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
ValueError: in user code:
C:\Users\synar\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\keras\engine\training.py:806 train_function *
return step_function(self, iterator)
C:\Users\synar\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\keras\engine\training.py:796 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
C:\Users\synar\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\distribute\distribute_lib.py:1211 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
C:\Users\synar\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\distribute\distribute_lib.py:2585 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
C:\Users\synar\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\distribute\distribute_lib.py:2945 _call_for_each_replica
return fn(*args, **kwargs)
C:\Users\synar\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\keras\engine\training.py:789 run_step **
outputs = model.train_step(data)
C:\Users\synar\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\keras\engine\training.py:747 train_step
y_pred = self(x, training=True)
C:\Users\synar\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\keras\engine\base_layer.py:976 __call__
self.name)
C:\Users\synar\AppData\Roaming\Python\Python37\site-packages\tensorflow\python\keras\engine\input_spec.py:216 assert_input_compatibility
' but received input with shape ' + str(shape))
ValueError: Input 0 of layer sequential_41 is incompatible with the layer: expected axis -1 of input shape to have value 784 but received input with shape [None, 1]
My code I have implemented so is:
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn import model_selection
from scipy.io import loadmat
data = loadmat('notMNIST_small.mat')
X_temp = data['images']/255
#for i in range(X_temp.shape[2]):
X = np.empty(shape=[X_temp.shape[2]] + [784], dtype='float32')
for i in range(X_temp.shape[2]):
X[i,:] = X_temp[:,:,i].flatten()
y = pd.get_dummies(data['labels']).to_numpy()
print(X_temp.shape)
print(X.shape)
print(y.shape)
X[1,:]
X = np.array(data['labels']).reshape(-1, 1)
y = np.array(data['labels'])
X_train, X_test, y_train, y_test =train_test_split(
X, y, test_size=0.2, random_state=9)
stdscaler = preprocessing.StandardScaler().fit(X_train)
X_train_scaled = stdscaler.transform(X_train)
X_test_scaled = stdscaler.transform(X_test)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.regularizers import l2, l1
from tensorflow.keras.optimizers import SGD
# Stochastic Logistic Regression
model = Sequential()
# Model
model.add(Dense(units=10, input_shape = [784,], activation = 'relu', kernel_regularizer=l2(0)))
model.add(Dense(units = 40, activation = 'relu'))
model.add(Dense(units = 10, activation = 'sigmoid'))
# Compile model
sgd = SGD(lr=0.1)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
I get the error when implementing the following cell:
# Fit the model
history = model.fit(X_train, y_train, batch_size = 256, epochs = 15, verbose=2, validation_data=(X_test,y_test))
Any help in resolving this would be great I am new to machine learning so excuse my ignorance.

Add extra dimension to your X_train as
x_train = x_train.reshape(-1, 28*28)
model = Sequential()
# Model
model.add(Dense(units=10, input_dim = 784, activation = 'relu', kernel_regularizer=l2(0)))
model.add(Dense(units = 40, activation = 'relu'))
model.add(Dense(units = 10, activation = 'sigmoid'))
# Compile model
sgd = SGD(lr=0.1)
model.compile(loss='categorical_crossentropy', optimizer=sgd)

Related

MLP with TensorFlow Input Shape not Matching Output Shape

I'm trying to train a multilayer perceptron based on the Iris dataset using TensorFlow in Pycharm with Jupyter Notebook. Every time I run my code it fails on the model.fit() function and gives the following error.
ValueError: Shapes (None, 1) and (None, 3) are incompatible.
I've tried playing around with different values for the hyperparameters but there's obviously something I'm not getting. Any help/pointers that anyone could provide would be much appreciated.
Here's my data setup and preprocessing:
import pandas as pd
import numpy as np
# read iris data into pandas dataframe
iris = pd.read_csv("data/IRIS.csv", header=0)
# apply label to index column
iris.index.name = "id"
# create copy of iris dataframe in which to store normalised values and keep original dataframe for comparison later on
iris_unnormalized = iris
iris_normalized = iris.copy()
# isolate columns with numerical values
iris_num = iris.select_dtypes(include=[np.number])
# find max value in each column
col_maxes = iris_num.max()
# find overall max value among all columns
iris_num_max = col_maxes.max()
# divide all numerical values by overall max value in order to normalize data to a value between 0 and 1
iris_num_norm = iris_num / iris_num_max
# reassign normalised values back to their corresponding columns
iris_normalized[iris_num_norm.columns] = iris_num_norm
# specify seed for reproducibility
np.random.seed(1671)
training = iris_normalized.sample(frac = 0.8)
test = iris_normalized.drop(training.index)
# initialize the training input and output list
# same for testing set
X_train = []
Y_train = []
X_test = []
Y_test = []
# loop through the dataframe and separate inputs and outputs for training and testing
for index, row in training.iterrows():
X_train.append([row['sepal length cm'], row['sepal width cm'], row['petal length cm'], row['petal width cm']])
Y_train.append([row['species']])
for index, row in test.iterrows():
X_test.append([row['sepal length cm'], row['sepal width cm'], row['petal length cm'], row['petal width cm']])
Y_test.append([row['species']])
X_train = np.array(X_train).astype('float32')
Y_train = np.array(Y_train)
X_test = np.array(X_test).astype('float32')
Y_test = np.array(Y_test)
print(X_train.shape, "training samples") # Output: (120, 4) training samples
print(X_test.shape, "test samples") # Output: (30, 4) test samples
Here's where I try to create the neural network:
import tensorflow as tf
from tensorflow import keras
NB_CLASSES = 3 # number of iris varieties
N_HIDDEN = 128
BATCH_SIZE = 10
VERBOSE = 1
VALIDATION_SPLIT = 0.2 # how much of training set to hold for validation
EPOCHS = 200
model = tf.keras.models.Sequential(
[
keras.layers.Dense(N_HIDDEN, input_shape=(10,4,), batch_size=BATCH_SIZE, name="dense_layer1", activation="relu"),
keras.layers.Dense(N_HIDDEN, input_shape=(4,), batch_size=BATCH_SIZE, name="dense_layer2", activation="relu"),
keras.layers.Dense(NB_CLASSES, input_shape=(4,), batch_size=BATCH_SIZE, name="dense_layer3", activation="softmax"),
]
)
model.summary()
################### model summary output: #####################
Layer (type) Output Shape Param #
=================================================================
dense_layer1 (Dense) (10, 10, 128) 640
_________________________________________________________________
dense_layer2 (Dense) (10, 10, 128) 16512
_________________________________________________________________
dense_layer3 (Dense) (10, 10, 3) 387
=================================================================
Total params: 17,539
Trainable params: 17,539
Non-trainable params: 0
# compiling the model
model.compile(optimizer='SGD', loss='categorical_crossentropy', metrics=['accuracy'])
#train the model
model.fit(X_train, Y_train,
batch_size=BATCH_SIZE,
epochs = EPOCHS,
verbose = VERBOSE,
validation_split = VALIDATION_SPLIT)
The data setup and normalization work fine but when I run the code to create the neural network I get the below error:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_8888/2571387568.py in <module>
38
39 #train the model
---> 40 model.fit(X_train, Y_train,
41 batch_size=BATCH_SIZE,
42 epochs = EPOCHS,
\venv\lib\site-packages\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1182 _r=1):
1183 callbacks.on_train_batch_begin(step)
-> 1184 tmp_logs = self.train_function(iterator)
1185 if data_handler.should_sync:
1186 context.async_wait()
\venv\lib\site-packages\tensorflow\python\eager\def_function.py in __call__(self, *args, **kwds)
883
884 with OptionalXlaContext(self._jit_compile):
--> 885 result = self._call(*args, **kwds)
886
887 new_tracing_count = self.experimental_get_tracing_count()
\venv\lib\site-packages\tensorflow\python\eager\def_function.py in _call(self, *args, **kwds)
931 # This is the first call of __call__, so we have to initialize.
932 initializers = []
--> 933 self._initialize(args, kwds, add_initializers_to=initializers)
934 finally:
935 # At this point we know that the initialization is complete (or less
\venv\lib\site-packages\tensorflow\python\eager\def_function.py in _initialize(self, args, kwds, add_initializers_to)
757 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
758 self._concrete_stateful_fn = (
--> 759 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
760 *args, **kwds))
761
\venv\lib\site-packages\tensorflow\python\eager\function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
3064 args, kwargs = None, None
3065 with self._lock:
-> 3066 graph_function, _ = self._maybe_define_function(args, kwargs)
3067 return graph_function
3068
\venv\lib\site-packages\tensorflow\python\eager\function.py in _maybe_define_function(self, args, kwargs)
3461
3462 self._function_cache.missed.add(call_context_key)
-> 3463 graph_function = self._create_graph_function(args, kwargs)
3464 self._function_cache.primary[cache_key] = graph_function
3465
\venv\lib\site-packages\tensorflow\python\eager\function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3296 arg_names = base_arg_names + missing_arg_names
3297 graph_function = ConcreteFunction(
-> 3298 func_graph_module.func_graph_from_py_func(
3299 self._name,
3300 self._python_function,
\venv\lib\site-packages\tensorflow\python\framework\func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes, acd_record_initial_resource_uses)
1005 _, original_func = tf_decorator.unwrap(python_func)
1006
-> 1007 func_outputs = python_func(*func_args, **func_kwargs)
1008
1009 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
\venv\lib\site-packages\tensorflow\python\eager\def_function.py in wrapped_fn(*args, **kwds)
666 # the function a weak reference to itself to avoid a reference cycle.
667 with OptionalXlaContext(compile_with_xla):
--> 668 out = weak_wrapped_fn().__wrapped__(*args, **kwds)
669 return out
670
\venv\lib\site-packages\tensorflow\python\framework\func_graph.py in wrapper(*args, **kwargs)
992 except Exception as e: # pylint:disable=broad-except
993 if hasattr(e, "ag_error_metadata"):
--> 994 raise e.ag_error_metadata.to_exception(e)
995 else:
996 raise
ValueError: in user code:
\venv\lib\site-packages\keras\engine\training.py:853 train_function *
return step_function(self, iterator)
\venv\lib\site-packages\keras\engine\training.py:842 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
\venv\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1286 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
\venv\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2849 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
\venv\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:3632 _call_for_each_replica
return fn(*args, **kwargs)
\venv\lib\site-packages\keras\engine\training.py:835 run_step **
outputs = model.train_step(data)
\venv\lib\site-packages\keras\engine\training.py:788 train_step
loss = self.compiled_loss(
\venv\lib\site-packages\keras\engine\compile_utils.py:201 __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
\venv\lib\site-packages\keras\losses.py:141 __call__
losses = call_fn(y_true, y_pred)
\venv\lib\site-packages\keras\losses.py:245 call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
\venv\lib\site-packages\tensorflow\python\util\dispatch.py:206 wrapper
return target(*args, **kwargs)
\venv\lib\site-packages\keras\losses.py:1665 categorical_crossentropy
return backend.categorical_crossentropy(
\venv\lib\site-packages\tensorflow\python\util\dispatch.py:206 wrapper
return target(*args, **kwargs)
\venv\lib\site-packages\keras\backend.py:4839 categorical_crossentropy
target.shape.assert_is_compatible_with(output.shape)
\venv\lib\site-packages\tensorflow\python\framework\tensor_shape.py:1161 assert_is_compatible_with
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
ValueError: Shapes (None, 1) and (None, 3) are incompatible

layers compatibility between attention layer and CONV1D in keras

I am building a model in bilstm-attention-conv1d fashion (i want to use multiple conv1d with different kernel sizes) I am facing the layers incompatibility issue between the attention layer and conv1d layer. I have tried Reshape function but it's not working, Following is my code:
my model is as follows
sequence_input = Input(shape=(maxlen,), dtype="int32")
embedded_sequences = Embedding(50000, output_dim=output_dim)(sequence_input)
lstm = Bidirectional(LSTM(RNN_CELL_SIZE, return_sequences = True), name="bi_lstm_0")(embedded_sequences)
# Getting our LSTM outputs
(lstm, forward_h, forward_c, backward_h, backward_c) = Bidirectional(LSTM(RNN_CELL_SIZE, return_sequences=True, return_state=True),
name="bi_lstm_1")(lstm)
state_h = Concatenate()([forward_h, backward_h])
state_c = Concatenate()([forward_c, backward_c])
context_vector, attention_weights = Attention(10)(lstm, state_h)
x = Reshape((maxlen, output_dim, 1))(context_vector)
kernel_sizes = [1,2,3,4,5]
convs = []
for kernel_size in range(len(kernel_sizes)):
conv = Conv1D(128, kernel_size, activation='relu')(x)
convs.append(conv)
avg_pool = GlobalAveragePooling1D()(convs)
max_pool = GlobalMaxPooling1D()(convs)
conc = concatenate([avg_pool, max_pool])
output = Dense(50, activation="sigmoid")(conc)
model = keras.Model(inputs=sequence_input, outputs=output)
print(model.summary())
my code gives me the following error:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-114-8e5c0c75e84a> in <module>()
13 context_vector, attention_weights = Attention(10)(lstm, state_h)
14
---> 15 x = Reshape((maxlen, output_dim, 1))(context_vector)
16
17
6 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, *args, **kwargs)
950 if _in_functional_construction_mode(self, inputs, args, kwargs, input_list):
951 return self._functional_construction_call(inputs, args, kwargs,
--> 952 input_list)
953
954 # Maintains info about the `Layer.call` stack.
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/base_layer.py in _functional_construction_call(self, inputs, args, kwargs, input_list)
1089 # Check input assumptions set after layer building, e.g. input shape.
1090 outputs = self._keras_tensor_symbolic_call(
-> 1091 inputs, input_masks, args, kwargs)
1092
1093 if outputs is None:
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/base_layer.py in _keras_tensor_symbolic_call(self, inputs, input_masks, args, kwargs)
820 return nest.map_structure(keras_tensor.KerasTensor, output_signature)
821 else:
--> 822 return self._infer_output_signature(inputs, args, kwargs, input_masks)
823
824 def _infer_output_signature(self, inputs, args, kwargs, input_masks):
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/base_layer.py in _infer_output_signature(self, inputs, args, kwargs, input_masks)
861 # TODO(kaftan): do we maybe_build here, or have we already done it?
862 self._maybe_build(inputs)
--> 863 outputs = call_fn(inputs, *args, **kwargs)
864
865 self._handle_activity_regularization(inputs, outputs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/layers/core.py in call(self, inputs)
555 # Set the static shape for the result since it might lost during array_ops
556 # reshape, eg, some `None` dim in the result could be inferred.
--> 557 result.set_shape(self.compute_output_shape(inputs.shape))
558 return result
559
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/layers/core.py in compute_output_shape(self, input_shape)
546 output_shape = [input_shape[0]]
547 output_shape += self._fix_unknown_dimension(input_shape[1:],
--> 548 self.target_shape)
549 return tensor_shape.TensorShape(output_shape)
550
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/layers/core.py in _fix_unknown_dimension(self, input_shape, output_shape)
534 output_shape[unknown] = original // known
535 elif original != known:
--> 536 raise ValueError(msg)
537 return output_shape
538
ValueError: total size of new array must be unchanged, input_shape = [256], output_shape = [2500, 100, 1]
kindly help me

ValueError: Input 0 of layer dense_24 is incompatible with the layer

Code to build a model, The issue I am having is when I attempt to load model and implement to test dataset, I get the error:
learning_rate=0.001
epochs = 10
decay_rate = learning_rate / epochs
def scheduler(epochs, lr):
if epochs == 15:
lr = 0.001
return lr
else:
lr = lr * tensorflow.math.exp(-0.1)
return lr
callback = keras.callbacks.LearningRateScheduler(scheduler)
wv_model = Sequential()
# Add embedding layer
# No of output dimenstions is 100 as we embedded with Word2Vec 100d
Embed_Layer = Embedding(vocab_size, 100, weights=[embedding_matrix], input_length=(MAX_SEQUENCE_LENGTH,), trainable=True)
# define Inputs
review_input = Input(shape=(MAX_SEQUENCE_LENGTH,),dtype= 'int32',name = 'review_input')
review_embedding = Embed_Layer(review_input)
Flatten_Layer = Flatten()
review_flatten = Flatten_Layer(review_embedding)
output_size = 2
dense1 = Dense(100,activation='relu')(review_flatten)
dense2 = Dense(32,activation='relu')(dense1)
predict = Dense(5, activation='softmax')(dense2)
wv_model = Model(inputs=[review_input],outputs=[predict])
# wv_model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['acc'])
opt = keras.optimizers.SGD(lr = 0.01, momentum=0.8, decay=0.0)
wv_model.compile(loss='mean_squared_error', optimizer=opt, metrics=['mean_squared_error'])
tensorboard = TensorBoard(
log_dir="logs",
histogram_freq=1,
write_graph=True,
write_images=False,
update_freq="epoch",
profile_batch=2,
embeddings_freq=0,
embeddings_metadata=None)
keras_callbacks = [tensorboard]
checkpoint = ModelCheckpoint('best_model.h5', monitor='val_loss', mode='min', verbose=1, save_best_only=True)
stp = keras.callbacks.EarlyStopping(patience=4)
callbacks_list = [checkpoint,stp, tensorboard,callback]
wv_model.fit(X_train, y_train, validation_data=(X_test, y_test),
epochs=epochs, batch_size=256,
verbose=1, callbacks=callbacks_list)
eval = wv_model.evaluate(X_test, y_test)[1]
print(eval)
wv_model.load_weights('./models/best_model.h5')
print(wv_model.summary())
Out:
Layer (type) Output Shape Param #
=================================================================
review_input (InputLayer) [(None, 100)] 0
_________________________________________________________________
embedding_8 (Embedding) (None, 100, 100) 22228800
_________________________________________________________________
flatten_8 (Flatten) (None, 10000) 0
_________________________________________________________________
dense_24 (Dense) (None, 100) 1000100
_________________________________________________________________
dense_25 (Dense) (None, 32) 3232
_________________________________________________________________
dense_26 (Dense) (None, 5) 165
=================================================================
Total params: 23,232,297
Trainable params: 23,232,297
Non-trainable params: 0
_________________________________________________________________
None
To validate the dataset:
predictions = load_model('./models/best_model.h5').predict(X12_test)
print("y_test", y_test)
print("predictions", predictions)
print("validation set RMSE ", rmse2(predictions, y_test))
y_test = y_test.overall.values
Out:
WARNING:tensorflow:Model was constructed with shape (None, 100) for input Tensor("review_input_13:0", shape=(None, 100), dtype=int32), but it was called on an input with incompatible shape (None, 6000).
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-80-82850281ff1c> in <module>
----> 1 predictions_o = load_model('./models/best_model.h5').predict(X12_test)
2
3 print("y1_test_truth", y1_test)
4 print("predictions_o", predictions_o)
5 print("validation set RMSE ", rmse2(predictions_o, y1_test))
~/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py in _method_wrapper(self, *args, **kwargs)
128 raise ValueError('{} is not supported in multi-worker mode.'.format(
129 method.__name__))
--> 130 return method(self, *args, **kwargs)
131
132 return tf_decorator.make_decorator(
~/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py in predict(self, x, batch_size, verbose, steps, callbacks, max_queue_size, workers, use_multiprocessing)
1597 for step in data_handler.steps():
1598 callbacks.on_predict_batch_begin(step)
-> 1599 tmp_batch_outputs = predict_function(iterator)
1600 if data_handler.should_sync:
1601 context.async_wait()
~/.local/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~/.local/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
~/.local/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
694 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
695 self._concrete_stateful_fn = (
--> 696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
697 *args, **kwds))
698
~/.local/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
~/.local/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
~/.local/lib/python3.8/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~/.local/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~/.local/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~/.local/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
ValueError: in user code:
/home/x/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:1462 predict_function *
return step_function(self, iterator)
/home/x/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:1452 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
/home/x/.local/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:1211 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/home/x/.local/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2585 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/home/x/.local/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2945 _call_for_each_replica
return fn(*args, **kwargs)
/home/x/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:1445 run_step **
outputs = model.predict_step(data)
/home/x/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:1418 predict_step
return self(x, training=False)
/home/x/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py:985 __call__
outputs = call_fn(inputs, *args, **kwargs)
/home/x/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/functional.py:385 call
return self._run_internal_graph(
/home/x/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/functional.py:508 _run_internal_graph
outputs = node.layer(*args, **kwargs)
/home/x/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py:975 __call__
input_spec.assert_input_compatibility(self.input_spec, inputs,
/home/x/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/input_spec.py:212 assert_input_compatibility
raise ValueError(
ValueError: Input 0 of layer dense_24 is incompatible with the layer: expected axis -1 of input shape to have value 10000 but received input with shape [None, 600000]
I'm trying to work out where and what I need to change to ensure the dimensions are working correctly, however I haven't managed to work out what exactly I need to change. Any help would be greatly appreciated.
Updates:
shape of data:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(data, y, test_size=0.2, random_state = 40)
[nSamp, inpShape] = X_train.shape
print("X train shape ", X_train.shape)
print("X test shape ", X_test.shape)
print("y train shape ",y_train.shape)
print("y test shape ",y_test.shape)
print(nSamp, inpShape)
Out:
X train shape (160000, 100)
X test shape (40000, 100)
y train shape (160000, 5)
y test shape (40000, 5)
160000 100
From the warning in the first line it seems that X12_test is not of correct shape, according to the warning you have your model is built to take an input of shape (None, 100) while you are calling using an input of shape (None, 6000)

ValueError: Layer sequential_2 expects 1 inputs, but it received 2 input tensors. Inputs received

I am trying to build a simple Model using the IAM Handwritten dataset from Kaggle and some sample code from a textbook I'm using, but I keep getting an error when I try to fit the model.
The error says ValueError: Layer sequential_2 expects 1 inputs, but it received 2 input tensors. Inputs received: [<tf.Tensor 'IteratorGetNext:0' shape=(None, None, None, None) dtype=float32>, <tf.Tensor 'IteratorGetNext:1' shape=(None, None) dtype=float32>]
full source code :
from __future__ import division
import numpy as np
import os
import glob
import tensorflow as tf
from random import *
from PIL import Image
from keras.utils import to_categorical
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.image as mpimg
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Lambda, ELU, Activation, BatchNormalization
from keras.layers.convolutional import Convolution2D, Cropping2D, ZeroPadding2D, MaxPooling2D
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD, Adam, RMSprop
d = {}
from subprocess import check_output
with open('./forms_for_parsing.txt') as f:
for line in f:
key = line.split(' ')[0]
writer = line.split(' ')[1]
d[key] = writer
print(len(d.keys()))
tmp = []
target_list = []
path_to_files = os.path.join('./input/data_subset/data_subset', '*')
for filename in sorted(glob.glob(path_to_files)):
# print(filename)
tmp.append(filename)
image_name = filename.split('/')[-1]
file, ext = os.path.splitext(image_name)
parts = file.split('-')
p = parts[0].split('\\')
form = p[1] + '-' + parts[1]
for key in d:
if key == form:
target_list.append(str(d[form]))
# print(d)
# print(parts[0])
# p = parts[0].split('\\')
# print(p[1])
# print(form)
img_files = np.asarray(tmp)
img_targets = np.asarray(target_list)
print(img_files.shape)
print(img_targets.shape)
for filename in img_files[:20]:
img=mpimg.imread(filename)
plt.figure(figsize=(10,10))
plt.imshow(img, cmap ='gray')
encoder = LabelEncoder()
encoder.fit(img_targets)
encoded_Y = encoder.transform(img_targets)
print(img_files[:5], img_targets[:5], encoded_Y[:5])
train_files, rem_files, train_targets, rem_targets = train_test_split(
img_files, encoded_Y, train_size=0.66, random_state=52, shuffle= True)
validation_files, test_files, validation_targets, test_targets = train_test_split(
rem_files, rem_targets, train_size=0.5, random_state=22, shuffle=True)
print(train_files.shape, validation_files.shape, test_files.shape)
print(train_targets.shape, validation_targets.shape, test_targets.shape)
batch_size = 16 # 8
num_classes = 50
# Start with train generator shared in the class and add image augmentations
def generate_data(samples, target_files, batch_size=batch_size, factor = 0.1 ):
num_samples = len(samples)
from sklearn.utils import shuffle
while 1: # Loop forever so the generator never terminates
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
batch_targets = target_files[offset:offset+batch_size]
images = []
targets = []
for i in range(len(batch_samples)):
batch_sample = batch_samples[i]
batch_target = batch_targets[i]
im = Image.open(batch_sample)
cur_width = im.size[0]
cur_height = im.size[1]
# print(cur_width, cur_height)
height_fac = 113 / cur_height
new_width = int(cur_width * height_fac)
size = new_width, 113
imresize = im.resize((size), Image.ANTIALIAS) # Resize so height = 113 while keeping aspect ratio
now_width = imresize.size[0]
now_height = imresize.size[1]
# Generate crops of size 113x113 from this resized image and keep random 10% of crops
avail_x_points = list(range(0, now_width - 113 ))# total x start points are from 0 to width -113
# Pick random x%
pick_num = int(len(avail_x_points)*factor)
# Now pick
random_startx = sample(avail_x_points, pick_num)
for start in random_startx:
imcrop = imresize.crop((start, 0, start+113, 113))
images.append(np.asarray(imcrop))
targets.append(batch_target)
# trim image to only see section with road
X_train = np.array(images)
y_train = np.array(targets)
#reshape X_train for feeding in later
X_train = X_train.reshape(X_train.shape[0], 113, 113, 1)
#convert to float and normalize
X_train = X_train.astype('float32')
X_train /= 255
#One hot encode y
y_train = to_categorical(y_train, num_classes)
yield shuffle(X_train, y_train)
train_generator = generate_data(train_files, train_targets, batch_size=batch_size, factor = 0.3)
validation_generator = generate_data(validation_files, validation_targets, batch_size=batch_size, factor = 0.3)
test_generator = generate_data(test_files, test_targets, batch_size=batch_size, factor = 0.1)
def resize_image(image):
return tf.image.resize(image,[56,56])
# Function to resize image to 64x64
row, col, ch = 113, 113, 1
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(row, col, ch)))
# Resise data within the neural network
model.add(Lambda(resize_image)) #resize images to allow for easy computation
# CNN model - Building the model suggested in paper
model.add(Convolution2D(filters= 32, kernel_size =(5,5), strides= (2,2), padding='same', name='conv1')) #96
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2), name='pool1'))
model.add(Convolution2D(filters= 64, kernel_size =(3,3), strides= (1,1), padding='same', name='conv2')) #256
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2), name='pool2'))
model.add(Convolution2D(filters= 128, kernel_size =(3,3), strides= (1,1), padding='same', name='conv3')) #256
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2), name='pool3'))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(512, name='dense1')) #1024
# model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(256, name='dense2')) #1024
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes,name='output'))
model.add(Activation('softmax')) #softmax since output is within 50 classes
model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=['accuracy'])
model.summary()
nb_epoch = 8
samples_per_epoch = 3268
nb_val_samples = 842
# #save every model using Keras checkpoint
from keras.callbacks import ModelCheckpoint
#filepath="check-{epoch:02d}-{val_loss:.4f}.hdf5"
filepath="low_loss.hdf5"
checkpoint = ModelCheckpoint(filepath= filepath, verbose=1, save_best_only=False)
callbacks_list = [checkpoint]
# #Model fit generator
history_object = model.fit_generator(train_generator, steps_per_epoch = (samples_per_epoch/batch_size),
validation_data=validation_generator,
validation_steps=nb_val_samples, epochs=nb_epoch, verbose=1, callbacks=callbacks_list)
and this is error i got :
ValueError Traceback (most recent call last)
<ipython-input-79-99c01bc062d8> in <module>
12
13 # #Model fit generator
---> 14 history_object = model.fit_generator(train_generator, steps_per_epoch = (samples_per_epoch/batch_size),
15 validation_data=validation_generator,
16 validation_steps=nb_val_samples, epochs=nb_epoch, verbose=1, callbacks=callbacks_list)
~\anaconda3\lib\site-packages\tensorflow\python\util\deprecation.py in new_func(*args, **kwargs)
322 'in a future version' if date is None else ('after %s' % date),
323 instructions)
--> 324 return func(*args, **kwargs)
325 return tf_decorator.make_decorator(
326 func, new_func, 'deprecated',
~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, validation_freq, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
1813 """
1814 _keras_api_gauge.get_cell('fit_generator').set(True)
-> 1815 return self.fit(
1816 generator,
1817 steps_per_epoch=steps_per_epoch,
~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in _method_wrapper(self, *args, **kwargs)
106 def _method_wrapper(self, *args, **kwargs):
107 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
--> 108 return method(self, *args, **kwargs)
109
110 # Running inside `run_distribute_coordinator` already.
~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1096 batch_size=batch_size):
1097 callbacks.on_train_batch_begin(step)
-> 1098 tmp_logs = train_function(iterator)
1099 if data_handler.should_sync:
1100 context.async_wait()
~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in _call(self, *args, **kwds)
812 # In this case we have not created variables on the first call. So we can
813 # run the first trace but we should fail if variables are created.
--> 814 results = self._stateful_fn(*args, **kwds)
815 if self._created_variables:
816 raise ValueError("Creating variables on a non-first call to a function"
~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in __call__(self, *args, **kwargs)
2826 """Calls a graph function specialized to the inputs."""
2827 with self._lock:
-> 2828 graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
2829 return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
2830
~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _maybe_define_function(self, args, kwargs)
3208 and self.input_signature is None
3209 and call_context_key in self._function_cache.missed):
-> 3210 return self._define_function_with_shape_relaxation(args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _define_function_with_shape_relaxation(self, args, kwargs)
3139 expand_composites=True)
3140
-> 3141 graph_function = self._create_graph_function(
3142 args, kwargs, override_flat_arg_shapes=relaxed_arg_shapes)
3143 self._function_cache.arg_relaxed[rank_only_cache_key] = graph_function
~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3063 arg_names = base_arg_names + missing_arg_names
3064 graph_function = ConcreteFunction(
-> 3065 func_graph_module.func_graph_from_py_func(
3066 self._name,
3067 self._python_function,
~\anaconda3\lib\site-packages\tensorflow\python\framework\func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
~\anaconda3\lib\site-packages\tensorflow\python\framework\func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
ValueError: in user code:
C:\Users\subha\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:806 train_function *
return step_function(self, iterator)
C:\Users\subha\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:796 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
C:\Users\subha\anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1211 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
C:\Users\subha\anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2585 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
C:\Users\subha\anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2945 _call_for_each_replica
return fn(*args, **kwargs)
C:\Users\subha\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:789 run_step **
outputs = model.train_step(data)
C:\Users\subha\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:747 train_step
y_pred = self(x, training=True)
C:\Users\subha\anaconda3\lib\site-packages\tensorflow\python\keras\engine\base_layer.py:975 __call__
input_spec.assert_input_compatibility(self.input_spec, inputs,
C:\Users\subha\anaconda3\lib\site-packages\tensorflow\python\keras\engine\input_spec.py:155 assert_input_compatibility
raise ValueError('Layer ' + layer_name + ' expects ' +
ValueError: Layer sequential_2 expects 1 inputs, but it received 2 input tensors. Inputs received: [<tf.Tensor 'IteratorGetNext:0' shape=(None, None, None, None) dtype=float32>, <tf.Tensor 'IteratorGetNext:1' shape=(None, None) dtype=float32>]
i couldn't understand the error message so kindly somebody help me out!
thank u

InvalidArgumentError Function call stack: train_function

Hello I got this error which I could not solve any thoughts. I am trying to build a model using my own dataset. So I choose transfer learning (VGG16) but it still isn't working. Thank you in advance.
Im using Python 3.8X
The latest version of Tensorflow 2.2X
Im trying to build a classifier that can dd
import tensorflow as tf
from tensorflow.keras.layers import Input, Lambda, Dense, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
IMAGE_SIZE = [224, 224]
train_path = 'dataset/Train'
val_path = 'dataset/validation'
vgg = VGG16(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False)
for layer in vgg.layers:
layer.trainable = False
folders = glob('datasets/Train/*')
x = Flatten()(vgg.output)
x = Dense(1000, activation='relu')(x)
prediction = Dense(len(folders), activation='softmax')(x)
# create a model object
model = Model(inputs=vgg.input, outputs=prediction)
model.summary()
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('dataset/train',
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
test_set = test_datagen.flow_from_directory('dataset/validation',
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
r = model.fit(
training_set,
validation_data=test_set,
epochs=5,
steps_per_epoch=len(training_set),
validation_steps=len(test_set)
)
Below is the error
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-46-a479a62b157d> in <module>
2 steps_per_epoch = 1,
3 epochs = 10,
----> 4 validation_data = test_set
5 )
~/opt/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in _method_wrapper(self, *args, **kwargs)
106 def _method_wrapper(self, *args, **kwargs):
107 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
--> 108 return method(self, *args, **kwargs)
109
110 # Running inside `run_distribute_coordinator` already.
~/opt/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1100 _r=1):
1101 callbacks.on_train_batch_begin(step)
-> 1102 tmp_logs = self.train_function(iterator)
1103 if data_handler.should_sync:
1104 context.async_wait()
~/opt/anaconda3/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
794 else:
795 compiler = "nonXla"
--> 796 result = self._call(*args, **kwds)
797
798 new_tracing_count = self._get_tracing_count()
~/opt/anaconda3/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
821 # In this case we have created variables on the first call, so we run the
822 # defunned version which is guaranteed to never create variables.
--> 823 return self._stateless_fn(*args, **kwds) # pylint: disable=not-callable
824 elif self._stateful_fn is not None:
825 # Release the lock early so that multiple threads can perform the call
~/opt/anaconda3/lib/python3.7/site-packages/tensorflow/python/eager/function.py in __call__(self, *args, **kwargs)
2920 with self._lock:
2921 graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
-> 2922 return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
2923
2924 #property
~/opt/anaconda3/lib/python3.7/site-packages/tensorflow/python/eager/function.py in _filtered_call(self, args, kwargs, cancellation_manager)
1856 resource_variable_ops.BaseResourceVariable))],
1857 captured_inputs=self.captured_inputs,
-> 1858 cancellation_manager=cancellation_manager)
1859
1860 def _call_flat(self, args, captured_inputs, cancellation_manager=None):
~/opt/anaconda3/lib/python3.7/site-packages/tensorflow/python/eager/function.py in _call_flat(self, args, captured_inputs, cancellation_manager)
1932 # No tape is watching; skip to running the function.
1933 return self._build_call_outputs(self._inference_function.call(
-> 1934 ctx, args, cancellation_manager=cancellation_manager))
1935 forward_backward = self._select_forward_and_backward_functions(
1936 args,
~/opt/anaconda3/lib/python3.7/site-packages/tensorflow/python/eager/function.py in call(self, ctx, args, cancellation_manager)
555 inputs=args,
556 attrs=attrs,
--> 557 ctx=ctx)
558 else:
559 outputs = execute.execute_with_cancellation(
~/opt/anaconda3/lib/python3.7/site-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
58 ctx.ensure_initialized()
59 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 60 inputs, attrs, num_outputs)
61 except core._NotOkStatusException as e:
62 if name is not None:
InvalidArgumentError: Reduction axis -1 is empty in shape [32,0]
[[node ArgMax_1 (defined at <ipython-input-45-71c422cbdbf7>:4) ]] [Op:__inference_train_function_3410]
Function call stack:
train_function
[ ]:

Resources