I have built a BiLSTM model with an attention layer for sentence classification task but I am getting an error that my assertion has failed due to mismatch in number of parameters. The attention layer code is here and the error is below the code.
class attention(Layer):
def __init__(self, return_sequences=True):
self.return_sequences = return_sequences
super(attention,self).__init__()
def build(self, input_shape):
self.W=self.add_weight(name="att_weight", shape=(input_shape[-1],1),
initializer="normal")
self.b=self.add_weight(name="att_bias", shape=(input_shape[1],1),
initializer="zeros")
super(attention,self).build(input_shape)
def call(self, x):
e = K.tanh(K.dot(x,self.W)+self.b)
a = K.softmax(e, axis=1)
output = x*a
if self.return_sequences:
return output
return K.sum(output, axis=1)
When i am training the model with attention layer included, it is giving an error that assertion failed.
Epoch 1/10
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-45-ac310033130c> in <module>()
1 #Early stopping, Adam, dropout = 0.3, 0.5, 0.5
2 #history = model.fit(sequences_matrix, Y_train, batch_size=256, epochs=5, validation_split=0.1, callbacks=[EarlyStopping(monitor='val_loss', min_delta=0.0001)])
----> 3 history = model.fit(sequences_matrix, Y_train, batch_size=32, epochs=10, validation_split=0.1)
8 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
58 ctx.ensure_initialized()
59 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 60 inputs, attrs, num_outputs)
61 except core._NotOkStatusException as e:
62 if name is not None:
InvalidArgumentError: assertion failed: [Condition x == y did not hold element-wise:] [x (sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/Shape_1:0) = ] [32 1] [y (sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/strided_slice:0) = ] [32 758]
[[node sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/assert_equal_1/Assert/Assert (defined at <ipython-input-45-ac310033130c>:3) ]] [Op:__inference_train_function_19854]
Function call stack:
train_function
My model is
model = Sequential()
model.add(Embedding(max_words, 768, input_length=max_len, weights=[embedding]))
model.add(BatchNormalization())
model.add(Activation('tanh'))
model.add(SpatialDropout1D(0.1))
model.add(Conv1D(16, kernel_size=11, activation='relu'))
model.add(Bidirectional(LSTM(16, return_sequences=True)))
model.add(attention(return_sequences=True))
model.add(BatchNormalization())
model.add(Activation('tanh'))
model.add(Dropout(0.2))
model.add(Dense(2, activation='softmax', use_bias=True, kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4), bias_regularizer=regularizers.l2(1e-4),
activity_regularizer=regularizers.l2(1e-5)))
model.summary()
Shape of Y_train is
max_words = 48369
max_len = 768
tok = Tokenizer(num_words = max_words)
tok.fit_on_texts(X_train)
sequences = tok.texts_to_sequences(X_train)
sequences_matrix = sequence.pad_sequences(sequences, maxlen = max_len)
Y_train = np.array(Y_train)
Y_test = np.array(Y_test)
print(Y_train.shape)
(43532, 1)
your target is in 2D so you need to set return_sequences=False in the last attention layer in order to return output in 2D format
Add flatten layer before Dropout and then execute.
model.add(Flatten())
Related
I have this neural network based on this
words_input = Input(shape=(500,),dtype='int32',name='words_input')
words = Embedding(input_dim=wordEmbeddings.shape[0], output_dim=wordEmbeddings.shape[1], weights=[wordEmbeddings], trainable=False)(words_input)
conv_1 = Conv1D(filters=100, kernel_size=10, strides=2, activation='relu')(words)
avgpool_1 = AveragePooling1D(pool_size=10, strides=10)(conv_1)
b_lstm = Bidirectional(LSTM(200, activation='tanh', return_sequences=False))(avgpool_1)
dense_1 = Dense(128, activation='relu')(b_lstm)
dropout = Dropout(0.1)(dense_1)
dense_2 = Dense(5, activation='softmax')(dropout)
sgd = keras.optimizers.Adam(lr=0.0001)
model = Model(inputs=words_input, outputs=dense_2)
extractor = Model(inputs=model.inputs, outputs=model.get_layer(words).output)
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['acc'])
model.summary()
I added the line
extractor = Model(inputs=model.inputs, outputs=model.get_layer(words).output)
cause I want to extract the word2vec embeddings of the words from the inputs like they show here
But I'm getting this error
TypeError Traceback (most recent call last)
/tmp/ipykernel_6732/2108362002.py in <module>
11 sgd = keras.optimizers.Adam(lr=0.0001)
12 model = Model(inputs=words_input, outputs=dense_2)
---> 13 extractor = Model(inputs=model.inputs, outputs=model.get_layer(words).output)
14 model.compile(loss='mean_squared_error', optimizer='adam', metrics=['acc'])
15 model.summary()
~/.local/lib/python3.8/site-packages/keras/engine/training.py in get_layer(self, name, index)
3271 if name is not None:
3272 for layer in self.layers:
-> 3273 if layer.name == name:
3274 return layer
3275 raise ValueError(
~/.local/lib/python3.8/site-packages/tensorflow/python/util/traceback_utils.py in error_handler(*args, **kwargs)
151 except Exception as e:
152 filtered_tb = _process_traceback_frames(e.__traceback__)
--> 153 raise e.with_traceback(filtered_tb) from None
154 finally:
155 del filtered_tb
~/.local/lib/python3.8/site-packages/keras/layers/core/tf_op_layer.py in handle(self, op, args, kwargs)
117 for x in tf.nest.flatten([args, kwargs])
118 ):
--> 119 return TFOpLambda(op)(*args, **kwargs)
120 else:
121 return self.NOT_SUPPORTED
~/.local/lib/python3.8/site-packages/keras/utils/traceback_utils.py in error_handler(*args, **kwargs)
68 # To get the full stack trace, call:
69 # `tf.debugging.disable_traceback_filtering()`
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
TypeError: Exception encountered when calling layer "tf.__operators__.eq" (type TFOpLambda).
Expected float32 passed to parameter 'y' of op 'Equal', got 'words_input' of type 'str' instead. Error: Expected float32, but got words_input of type 'str'.
Call arguments received by layer "tf.__operators__.eq" (type TFOpLambda):
• self=tf.Tensor(shape=(None, 500, 300), dtype=float32)
• other='words_input'
Any idea what I am doing wrong? Why is it passing the name of the first layer "words_input" to the parameter y? Which is what I assume it is doing?
You are not passing the correct name to the get_layer of the model, try this code
tf.keras.backend.clear_session()
words_input = keras.Input(shape=(500,),dtype='int32',name='words_input')
words = keras.layers.Embedding(input_dim=wordEmbeddings.shape[0], output_dim=wordEmbeddings.shape[1], weights=[wordEmbeddings], trainable=False, name='words')(words_input)
conv_1 = keras.layers.Conv1D(filters=100, kernel_size=10, strides=2, activation='relu')(words)
avgpool_1 = keras.layers.AveragePooling1D(pool_size=10, strides=10)(conv_1)
b_lstm = keras.layers.Bidirectional(keras.layers.LSTM(200, activation='tanh', return_sequences=False))(avgpool_1)
dense_1 = keras.layers.Dense(128, activation='relu')(b_lstm)
dropout = keras.layers.Dropout(0.1)(dense_1)
dense_2 = keras.layers.Dense(5, activation='softmax')(dropout)
sgd = keras.optimizers.Adam(learning_rate=0.0001)
model = keras.Model(inputs=words_input, outputs=dense_2)
extractor = keras.Model(inputs=model.inputs, outputs=model.get_layer('words').output)
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['acc'])
model.summary()
extractor.summary()
Output:
I am implementing artificial bee colony optimization in ANN using [this][1] api. but i am getting this error. This is my code:
def ANN(optimizer = "adam", neurons = 32, batch_size = 32, epochs = 50, activation = "relu", patience =5, loss = 'mse'):
model = Sequential()
model.add(Dense(neurons, input_dim=look_back, activation= activation))
model.add(Dense(neurons, activation= activation))
model.add(Dense(1))
model.compile(optimizer = optimizer, loss = loss)
early_stopping = EarlyStopping(monitor = "loss", patience = patience)
history = model.fit(x_train, y_train, batch_size = batch_size, epochs = epochs, callbacks = [early_stopping], verbose = 0)
return model
boundaries = [(0,2), (0,2), (0,2), (0,2), (10,100), (20,50), (3,20)]
def performance(x_train, y_train, x_test, y_test, optimizer = None, activation = None, loss = None, batch_size = None, neurons = None, epochs = None, patience=None):
model = ANN(optimizer=optimizer, activation= activation, loss=loss, batch_size=batch_size, neurons= neurons, epochs = epochs, patience=patience)
trainScore = model.evaluate(x_train, y_train, verbose=0)
print('Train Score: %.2f MSE (%.2f RMSE)' % (trainScore, math.sqrt(trainScore)))
testScore = model.evaluate(x_test, y_test, verbose=0)
print('Test Score: %.2f MSE (%.2f RMSE)' % (testScore, math.sqrt(testScore)))
trainPredict = model.predict(x_train)
testPredict = model.predict(x_test)
#calculate mean absolute percent error
trainMAPE = mean_absolute_error(y_train, trainPredict)
testMAPE = mean_absolute_error(y_test, testPredict)
return print('testMAPE: %.2f MAPE' % trainMAPE), print('testMAPE: %.2f MAPE' % testMAPE)
writer = pd.ExcelWriter('/content/Scores.xlsx')
for sheetNum in range(1,5):
dataframe = pd.read_excel('Fri.xlsx',sheet_name='Sheet'+str(sheetNum))
# load the dataset
dataset = dataframe.values
dataset = dataset.astype('float32')
train_size = int(len(dataset) * 0.48)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size, :], dataset[train_size:len(dataset), :]
# reshape into X=t and Y=t+1
look_back = 10
x_train, y_train = create_dataset(train, look_back)
x_test, y_test = create_dataset(test, look_back)
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
x_train = scaler.fit_transform(x_train)
x_test = scaler.fit_transform(x_test)
abc_obj = abc(performance(x_train, y_train, x_test, y_test), boundaries)
abc_obj.fit()
#Get solution obtained after fit() execution:
solution = abc_obj.get_solution()
This is my error:
TypeError Traceback (most recent call last)
<ipython-input-38-f9098d8d18fc> in <module>()
23 x_train = scaler.fit_transform(x_train)
24 x_test = scaler.fit_transform(x_test)
---> 25 abc_obj = abc(performance(x_train, y_train, x_test, y_test), boundaries)
26 abc_obj.fit()
27
2 frames
/usr/local/lib/python3.7/dist-packages/keras/layers/core.py in __init__(self, units, activation, use_bias, kernel_initializer, bias_initializer, kernel_regularizer, bias_regularizer, activity_regularizer, kernel_constraint, bias_constraint, **kwargs)
1144 activity_regularizer=activity_regularizer, **kwargs)
1145
-> 1146 self.units = int(units) if not isinstance(units, int) else units
1147 self.activation = activations.get(activation)
1148 self.use_bias = use_bias
TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType'
can you help me with this, please? I think i am not defining the function "performance" correctly. but I don't understand how can I make it better.
[1]: https://pypi.org/project/beecolpy/
I'm trying to run a simple autoencoder model. I'm reading training data from a csv which consists of word embeddings. I have this code, but the error in the title is raised in model.fit() function and connected with my validation data. I tried many things however the error remained. I'm new in NLP and maybe my logic is totally wrong I don't know. So, I'd be appreciated if anybody can help. Here is my code:
def train_predict(df):
X_train, X_validation = train_test_split(df, test_size=0.3, random_state=42, shuffle=True)
X = X_train.iloc[:, :-1].to_numpy() #shape is (1880,220) in here
X = tf.expand_dims(X, axis=-1) #shape is (1880,220,1)
X_val = X_validation.iloc[:,:-1].to_numpy() #shape is (300,220)
X_val= tf.expand_dims(X_val, axis=-1) #shape is (300,220,1)
inputs, decoder_output, visualization = autoEncoder(X)
model = Model(inputs=inputs, outputs=decoder_output)
encoder_model = Model(inputs=inputs, outputs=visualization)
batch_size = 128
train_steps = len(X) // batch_size
val_steps = len(X_val) // batch_size
model.summary()
model.compile(optimizer='adam', metrics=['accuracy'], loss='mean_squared_error')
model.fit(X, steps_per_epoch=train_steps, validation_data=X_val, validation_steps=val_steps,epochs=100)
result = model.evaluate(X_val, steps=10)
Also the detail of my autoEncoder function code is as follows:
def autoEncoder(X_train):
inputs = tf.keras.layers.Input(shape=(X_train.shape[1],1))
# parameters
conv_1 = Conv1D(filters=64, kernel_size=3, activation='relu', padding='same')(inputs)
max_pool_1 = MaxPool1D(pool_size=2)(conv_1)
conv_2 = Conv1D(filters=128, kernel_size=3, activation='relu', padding='same')(max_pool_1)
max_pool_2 = MaxPool1D(pool_size=2)(conv_2)
# BOTTLE NECK
bottle_neck = Conv1D(filters=256, kernel_size=3, activation='relu', padding='same')(max_pool_2)
visualization = Conv1D(filters=1, kernel_size=3, activation='sigmoid', padding='same')(bottle_neck)
# DECODER
conv_3 = Conv1D(filters=128, kernel_size=3, activation='relu', padding='same')(bottle_neck)
upsample_1 = UpSampling1D(size=2)(conv_3)
conv_4 = Conv1D(filters=64, kernel_size=3, activation='relu', padding='same')(upsample_1)
upsample_2 = UpSampling1D(size=2)(conv_4)
decoder_output = Conv1D(filters=1, kernel_size=3, activation='sigmoid', padding='same')(upsample_2)
return inputs, decoder_output, visualization
It'd be excellent if you could copy-paste the entire stack of error that your code produces, something that everyone should follow for error-related questions because that makes debugging that much easier.
Here's an attempt to reproduce the same error using a dummy dataset:
import numpy as np
import tensorflow as tf
np.random.seed(11)
np.set_printoptions(precision=2)
def autoEncoder(X_train):
inputs = tf.keras.layers.Input(shape=(X_train.shape[1], 1))
conv_1 = tf.keras.layers.Conv1D(filters=64, kernel_size=3, activation='relu', padding='same')(inputs)
max_pool_1 = tf.keras.layers.MaxPool1D(pool_size=2)(conv_1)
conv_2 = tf.keras.layers.Conv1D(filters=128, kernel_size=3, activation='relu', padding='same')(max_pool_1)
max_pool_2 = tf.keras.layers.MaxPool1D(pool_size=2)(conv_2)
bottle_neck = tf.keras.layers.Conv1D(filters=256, kernel_size=3, activation='relu', padding='same')(max_pool_2)
visualization = tf.keras.layers.Conv1D(filters=1, kernel_size=3, activation='sigmoid', padding='same')(bottle_neck)
conv_3 = tf.keras.layers.Conv1D(filters=128, kernel_size=3, activation='relu', padding='same')(bottle_neck)
upsample_1 = tf.keras.layers.UpSampling1D(size=2)(conv_3)
conv_4 = tf.keras.layers.Conv1D(filters=64, kernel_size=3, activation='relu', padding='same')(upsample_1)
upsample_2 = tf.keras.layers.UpSampling1D(size=2)(conv_4)
decoder_output = tf.keras.layers.Conv1D(filters=1, kernel_size=3, activation='sigmoid', padding='same')(upsample_2)
return inputs, decoder_output, visualization
X = np.random.randn(1880, 220)
X_val = np.random.randn(300, 220)
X = np.expand_dims(X, axis=-1)
X = tf.convert_to_tensor(X) # (1880, 220, 1)
X_val = np.expand_dims(X_val, axis=-1)
X_val = tf.convert_to_tensor(X_val) # (300, 220, 1)
inputs, decoder_output, visualization = autoEncoder(X)
model = tf.keras.Model(inputs=inputs, outputs=decoder_output)
encoder_model = tf.keras.Model(inputs=inputs, outputs=visualization)
batch_size = 128
train_steps = len(X) // batch_size
val_steps = len(X_val) // batch_size
model.compile(optimizer='adam', metrics=['accuracy'], loss='mean_squared_error')
model.fit(X, steps_per_epoch=train_steps, validation_data = X_val, validation_steps=val_steps, epochs=100)
On google-colab this gives the following error:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-29-a889c5a46f35> in <module>()
3 val_steps = len(X_val) // batch_size
4 model.compile(optimizer='adam', metrics=['accuracy'], loss='mean_squared_error')
----> 5 model.fit(X, steps_per_epoch=train_steps, validation_data = X_val, validation_steps=val_steps, epochs=100)
1 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1041 (x, y, sample_weight), validation_split=validation_split))
1042
-> 1043 if validation_data:
1044 val_x, val_y, val_sample_weight = (
1045 data_adapter.unpack_x_y_sample_weight(validation_data))
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py in __bool__(self)
990
991 def __bool__(self):
--> 992 return bool(self._numpy())
993
994 __nonzero__ = __bool__
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
which is identical to your OP. The reason it'd be better to post the error stack is because the answer is hidden in these lines, specifically:
1043 if validation_data:
1044 val_x, val_y, val_sample_weight = (
1045 data_adapter.unpack_x_y_sample_weight(validation_data))
The format of validation_data is identical to (x, y, sample_weight). Here's what fit method documentation has to say:
validation_data will override validation_split. validation_data could be: - tuple (x_val, y_val) of Numpy arrays or tensors - tuple (x_val, y_val, val_sample_weights) of Numpy arrays - dataset For the first two cases, batch_size must be provided. For the last case, validation_steps could be provided.
I think you now understand why you're getting an error, there's no Y for the your autoencoder. Which shouldn't be of any concern since your X itself is your Y. Here's a line from an encoder tutorial that would help us in this situation:
Train the model using x_train as both the input and the target. The encoder will learn to compress the dataset from 784 dimensions to the latent space, and the decoder will learn to reconstruct the original images.
So, what you were expected to do is to write the following:
model.fit(X, X, steps_per_epoch=train_steps, validation_data=(X_val, X_val), validation_steps=val_steps, epochs=100)
which indeed starts the training!
my method has 2 input data model
network
branches:
49The first branch is composed of an embedding followed by simple Multi-layer
Perceptron (MLP) designed to handle input of the product description.
The second branch is a CNN to operate over the product image data.
These branches are then be concatenated together to form the final.
The problem is when we try to split the data with train_test_split by Cross Validation, It given as this error.
ValueError: Found input variables with inconsistent numbers of samples: [2, 8382]
MLP and CNN
def create_mlp(dim, regress=False):
# define our MLP network
model = Sequential()
model.add(Dense(8, input_dim=dim, activation="relu"))
model.add(Dense(4, activation="relu"))
# check to see if the regression node should be added
if regress:
model.add(Dense(1, activation="linear"))
# return our model
return model
def create_cnn(width, height, depth, filters=(64, 32, 16), regress=False):
# initialize the input shape and channel dimension, assuming
# TensorFlow/channels-last ordering
inputShape = (height, width, depth)
chanDim = -1
# define the model input
inputs = Input(shape=inputShape)
# loop over the number of filters
for (i, f) in enumerate(filters):
# if this is the first CONV layer then set the input
# appropriately
if i == 0:
x = inputs
# CONV => RELU => BN => POOL
x = Conv2D(f, (3, 3), padding="same")(x)
x = Activation("relu")(x)
x = BatchNormalization(axis=chanDim)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# flatten the volume, then FC => RELU => BN => DROPOUT
x = Flatten()(x)
x = Dense(16)(x)
x = Activation("relu")(x)
x = BatchNormalization(axis=chanDim)(x)
x = Dropout(0.5)(x)
# apply another FC layer, this one to match the number of nodes
# coming out of the MLP
x = Dense(4)(x)
x = Activation("relu")(x)
# check to see if the regression node should be added
if regress:
x = Dense(1, activation="linear")(x)
# construct the CNN
model = Model(inputs, x)
# return the CNN
return model
mlp = create_mlp(trainEmbedX.shape[1], regress=False)
cnn = create_cnn(64, 64, 3, regress=False)
combinedInput = concatenate([mlp.output, cnn.output])
x = Dense(4, activation="relu")(combinedInput)
x = Dense(1, activation="sigmoid")(x)
model = Model(inputs=[mlp.input, cnn.input], outputs=x)
model.compile(loss="binary_crossentropy", metrics=['accuracy'], optimizer="adam") # binary_crossentropy
The error occurs here
n_folds=3
epochs=3
batch_size=128
#save the model history in a list after fitting so that we can plot later
model_history = []
for i in range(n_folds):
print("Training on Fold: ",i+1)
t_x, val_x, t_y, val_y = train_test_split([trainEmbedX,trainImagesX], trainY, test_size = 0.2, random_state = np.random.randint(1,1000, 1)[0])
model_history.append(fit_and_evaluate(t_x, val_x, t_y, val_y, epochs, batch_size))
print("======="*12, end="\n\n\n")
Training on Fold: 1
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-84-651638774259> in <module>
7 for i in range(n_folds):
8 print("Training on Fold: ",i+1)
----> 9 t_x, val_x, t_y, val_y = train_test_split([trainEmbedX,trainImagesX], trainY, test_size = 0.2, random_state = np.random.randint(1,1000, 1)[0])
10 model_history.append(fit_and_evaluate(t_x, val_x, t_y, val_y, epochs, batch_size))
11 print("======="*12, end="\n\n\n")
~/anaconda3/envs/baron/lib/python3.6/site-packages/sklearn/model_selection/_split.py in train_test_split(*arrays, **options)
2182 test_size = 0.25
2183
-> 2184 arrays = indexable(*arrays)
2185
2186 if shuffle is False:
~/anaconda3/envs/baron/lib/python3.6/site-packages/sklearn/utils/validation.py in indexable(*iterables)
258 else:
259 result.append(np.array(X))
--> 260 check_consistent_length(*result)
261 return result
262
~/anaconda3/envs/baron/lib/python3.6/site-packages/sklearn/utils/validation.py in check_consistent_length(*arrays)
233 if len(uniques) > 1:
234 raise ValueError("Found input variables with inconsistent numbers of"
--> 235 " samples: %r" % [int(l) for l in lengths])
236
237
ValueError: Found input variables with inconsistent numbers of samples: [2, 8382]
This error happens with mismatching dimensions of X and Y in train_test_split.
By looking at your snippet, you try to concatenate two arrays by [trainEmbedX,trainImagesX] which will add a dimension if the original arrays trainEmbedX and trainImagesX are not 1D, hence you have the shape [2, 8382] in the error.
So instead of [trainEmbedX,trainImagesX], I suggest to use np.concatenate to merge these two arrays by np.concatenate((trainEmbedX,trainImagesX),axis=1).
I am trying to train a 2D neural network using keras. I have a weird error message, "ValueError: setting an array element with a sequence." when I try to use model.fit function in keras. Specifically, the error says that my "tensor_train_labels" is a sequence instead of an array. But my labels are indeed numpy arrays (not a sequence). I am not sure why does keras complain about it ?
I am following this tutorial for building my network
tensor_train_data.shape
#TensorShape([Dimension(209), Dimension(64), Dimension(64), Dimension(3)])
tensor_test_data.shape
#TensorShape([Dimension(50), Dimension(64), Dimension(64), Dimension(3)])
tensor_train_labels = tf.reshape(tensor_train_labels, [209,1])
tensor_test_labels = tf.reshape(tensor_test_labels, [50,1])
batch_size = 10
epochs = 8
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=(3,3), activation='relu',
input_shape=(64, 64, 3)))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2,2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation = 'relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(2, activation = 'softmax'))
model.compile(loss='categorical_crossentropy', optimizer =
tf.keras.optimizers.Adam(lr=0.0001, decay=1e-6), metrics=['accuracy'])
model.fit(tensor_train_data/255.0,
tf.keras.utils.to_categorical(tensor_train_labels),
batch_size = batch_size,
shuffle = True,
epochs = epochs,
validation_data = (tensor_test_data/ 255.0,
tf.keras.utils.to_categorical(tensor_test_labels)))
scores = model.evaluate(tensor_test_labels/ 255.0,
tf.keras.utils.to_categorical(tensor_test_labels))
print('Loss: %.3f' % scores[0])
print('Accuracy: %.3f' % scores[1])
The Error :
ValueError Traceback (most recent call last)
<ipython-input-224-80431a1b3e79> in <module>
1 model.compile(loss='categorical_crossentropy', optimizer = tf.keras.optimizers.Adam(lr=0.0001, decay=1e-6), metrics=['accuracy'])
----> 2 model.fit(tensor_train_data/255.0, tf.keras.utils.to_categorical(tensor_train_labels),
3 batch_size = batch_size,
4 shuffle = True,
5 epochs = epochs,
~\AppData\Local\conda\conda\envs\deeplearning\lib\site-packages\tensorflow\python\keras\utils\np_utils.py in to_categorical(y,
num_classes)
37 last.
38 """
---> 39 y = np.array(y, dtype='int')
40 input_shape = y.shape
41 if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
ValueError: setting an array element with a sequence.
The possible error is that you have arrays of different sizes when you are trying to convert it into the numpy array. Possible solution : https://stackoverflow.com/a/49617425/8185479