Unimplemented Error Graph execution error in cnn model - conv-neural-network

What's the problem with this?
I'm confuse right now I need to train my model and I read the documentation somehow but I'm getting this kind of error. At first the model is success and I save it but the accuracy is too low, so I reconstruct my model and get this. I just change the imgimg_width, img_height
classification_no = 6
# Train and Test dataset
train_dataset_path = "Datasets/train/"
test_dataset_path = "Datasets/test/"
img_width, img_height =100,100
size = 20
def detection_model():
detection_model = Sequential(
[
# module 1
tf.keras.layers.Conv2D(32, kernel_size=(3,3),input_shape = (img_width,img_height, 1)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation('relu'),
tf.keras.layers.Conv2D(64,kernel_size= (3,3),padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation('relu'),
tf.keras.layers.MaxPooling2D(pool_size=(2,2)),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
# Dense
tf.keras.layers.Dense(64),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation('relu'),
# Dropout
tf.keras.layers.Dropout(0.5),
# Dense
tf.keras.layers.Dense(64),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation('relu'),
#output
tf.keras.layers.Dense(6, activation= 'softmax'),
]
)
detection_model.compile(loss='categorical_crossentropy',
optimizer=Adam(
learning_rate = 0.0001 ,
beta_1=0.9,
beta_2=0.999,
decay=1e-6),
metrics=['accuracy'])
detection_model.summary()
return detection_model
train = ImageDataGenerator(rescale= 1./255)
validation = ImageDataGenerator(rescale= 1./255)
def get_model_train():
data_train = train.flow_from_directory(
train_dataset_path,
target_size= (img_width,img_height),
batch_size= 16,
color_mode= 'rgba',
class_mode= 'categorical')
return data_train
def get_model_test():
data_test = validation.flow_from_directory(
test_dataset_path,
target_size= (img_width,img_height),
batch_size= 16,
color_mode= 'rgba',
class_mode= 'categorical')
return data_test
model = detection_model()
fit_model = model.fit(
get_model_train(),
steps_per_epoch= 2006//16 ,
epochs= size,
validation_data= get_model_test(),
validation_steps= 2006//16,
)

Related

How to fix error validation split in MLP using Keras?

I'm newbie in Neural Network. I'm going to do a text classification research using MLP model with keras. Input layer consisting of 900 nodes, 2 hidden layers, and 2 outputs.
The code I use is as follows:
#Split data training & testing (90:10)
Train_X, Test_X, Train_Y, Test_Y = model_selection.train_test_split(dataset['review'],dataset['sentimen'],test_size=0.2, random_state=8)
Encoder = LabelEncoder()
Train_Y = Encoder.fit_transform(Train_Y)
Test_Y = Encoder.fit_transform(Test_Y)
Tfidf_vect = TfidfVectorizer(max_features=None)
Tfidf_vect.fit(dataset['review'])
Train_X_Tfidf = Tfidf_vect.transform(Train_X)
Test_X_Tfidf = Tfidf_vect.transform(Test_X)
#ANN Architecture
model = Sequential()
model.add(Dense(units = 100, activation = 'sigmoid', input_shape=(32, 900)))
model.add(Dense(units = 100, activation = 'sigmoid'))
model.add(Dense(units = 2, activation = 'sigmoid'))
opt = Adam (learning_rate=0.001)
model.compile(loss = 'binary_crossentropy', optimizer = opt,
metrics = ['accuracy'])
print(model.summary())
#Hyperparameter
epochs= 100
batch_size= 32
es = EarlyStopping(monitor="val_loss",mode='min',patience=10)
model_prediction = model.fit(Train_X_Tfidf, Train_Y, epochs=epochs,
batch_size=batch_size, verbose=1,
validation_split=0.1, callbacks =[es])
But getting Error:
/usr/local/lib/python3.8/dist-packages/keras/engine/data_adapter.py in train_validation_split(arrays, validation_split)
1478 unsplitable = [type(t) for t in flat_arrays if not _can_split(t)]
1479 if unsplitable:
-> 1480 raise ValueError(
1481 "`validation_split` is only supported for Tensors or NumPy "
1482 "arrays, found following types in the input: {}".format(unsplitable))
ValueError: `validation_split` is only supported for Tensors or NumPy arrays, found following types in the input: [<class 'scipy.sparse.csr.csr_matrix'>]
How to Fix it? Thank you so much.

ValueError: Unknown initializer: my_filter

I build my CNN by using the the code:
def arbitrary_functionality(tensor):
return tf.abs(tensor)
def my_filter(shape, dtype=None):
f = np.array([
[[[-1]], [[2]], [[-2]], [[2]], [[-1]]],
[[[2]], [[-6]], [[8]], [[-6]], [[2]]],
[[[-2]], [[8]], [[-12]], [[8]], [[-2]]],
[[[2]], [[-6]], [[8]], [[-6]], [[2]]],
[[[-1]], [[2]], [[-2]], [[2]], [[-1]]]])
assert f.shape == shape
return K.variable(f, dtype='float32')
input_layer = Input(shape=(256, 256, 1))
conv = Conv2D(1, [5, 5], kernel_initializer=my_filter, input_shape=(256, 256, 1), trainable=True, padding='same')(input_layer)
conv = Conv2D(8, (5, 5), padding='same', strides=1, use_bias=False)(conv)
lambda_layer = Lambda(arbitrary_functionality)(conv)
output_layer = Activation(activation='tanh')(lambda_layer)
output_layer = AveragePooling2D(pool_size= (5, 5), strides=2)(output_layer)
hidden = Dense(256)(output_layer)
hidden = LeakyReLU(alpha=0.2)(hidden)
output = Dense(2, activation='softmax')(hidden)
model = Model(inputs=input_layer, outputs=output)
# Callback for loss logging per epoch
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.val_losses = []
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs.aget('loss'))
self.val_losses.append(logs.get('val_loss'))
history = LossHistory()
tensorboard = TensorBoard (log_dir='E:/logs/trail' , histogram_freq=0, write_graph=True , write_images=False)
adam = keras.optimizers.Adam(lr= lrate, beta_1= 0.9, beta_2= 0.999, epsilon= 1e-08, decay= decay)
model.compile(loss = 'binary_crossentropy', optimizer = adam, metrics = ['accuracy', 'mse'])
batch_si = 64
fitted_model = model.fit(X_train, y_train, batch_size= batch_si, callbacks=[tensorboard], epochs=epochs, verbose=1, validation_split= 0.2 , shuffle=True)
# Save Model
model.save('E:/models/trail.h5', overwrite = True)
model.save_weights('E:/models/weights_trail.hdf5', overwrite=True)
# Evaluate the model
scores = model.evaluate(X_test, y_test, batch_size=batch_si, verbose=1)
print("Model Accuracy: {:5.2f}%".format(100*scores[1]))
# Load and Evaluate the Model
new_model = tf.keras.models.load_model('E:/models/trail.h5', custom_objects={'tf': tf})
new_model.load_weights('E:/models/trail.hdf5')
new_model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy', 'mse'])
scores = new_model.evaluate(X_test, y_test, verbose=1)
print("Accuracy After Model Reloaded: {:5.2f}%".format(100*scores[1]))
Now problem is, I can evaluate my output successfully before save an reload the model. But when i reload the trained model file and try to evaluate output, I am getting error the following error:
ValueError: Unknown initializer: my_filter
You have to register custom function name (see here: https://www.tensorflow.org/guide/keras/save_and_serialize#custom_objects):
new_model = tf.keras.models.load_model('E:/models/trail.h5', custom_objects={'my_filter': my_filter, 'tf': tf})

Getting Precision,Recall,Sensitivity and Specificity in keras CNN

I have created a CNN that does binary classification on images. The CNN is seen below:
def neural_network():
classifier = Sequential()
# Adding a first convolutional layer
classifier.add(Convolution2D(48, 3, input_shape = (320, 320, 3), activation = 'relu'))
classifier.add(MaxPooling2D())
# Adding a second convolutional layer
classifier.add(Convolution2D(48, 3, activation = 'relu'))
classifier.add(MaxPooling2D())
#Flattening
classifier.add(Flatten())
#Full connected
classifier.add(Dense(256, activation = 'relu'))
#Full connected
classifier.add(Dense(1, activation = 'sigmoid'))
# Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
classifier.summary()
train_datagen = ImageDataGenerator(rescale = 1./255,
horizontal_flip = True,
vertical_flip=True,
brightness_range=[0.5, 1.5])
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('/content/drive/My Drive/data_sep/train',
target_size = (320, 320),
batch_size = 32,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('/content/drive/My Drive/data_sep/validate',
target_size = (320, 320),
batch_size = 32,
class_mode = 'binary')
es = EarlyStopping(
monitor="val_accuracy",
patience=15,
mode="max",
baseline=None,
restore_best_weights=True,
)
filepath = "/content/drive/My Drive/data_sep/weightsbestval.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
history = classifier.fit(training_set,
epochs = 50,
validation_data = test_set,
callbacks= callbacks_list
)
best_score = max(history.history['val_accuracy'])
return best_score
The images in the folders are organized in the following way:
-train
-healthy
-patient
-validation
-healthy
-patient
Is there a way to calculate the metrics Precision,Recall,Sensitivity and Specificity or at least the true positives,true negatives,false positive and false negatives from this code?
from sklearn.metrics import classification_report
test_set = test_datagen.flow_from_directory('/content/drive/My Drive/data_sep/validate',
target_size = (320, 320),
batch_size = 32,
class_mode = 'binary')
predictions = model.predict_generator(
test_set,
steps = np.math.ceil(test_set.samples / test_set.batch_size),
)
predicted_classes = np.argmax(predictions, axis=1)
true_classes = test_set.classes
class_labels = list(test_set.class_indices.keys())
report = classification_report(true_classes, predicted_classes, target_names=class_labels)
accuracy = metrics.accuracy_score(true_classes, predicted_classes)
& if you do print(report) ,it will print everything
And if your whole data files are not divisible by your batch size, then use
test_set = test_datagen.flow_from_directory('/content/drive/My Drive/data_sep/validate',
target_size = (320, 320),
batch_size = 1,
class_mode = 'binary')

Model.fit() Validation Accuracy different than Model.predict()

I have created a CNN to do binary classification in keras with the following code:
def neural_network():
classifier = Sequential()
# Adding a first convolutional layer
classifier.add(Convolution2D(48, 3, input_shape = (320, 320, 3), activation = 'relu'))
classifier.add(MaxPooling2D())
# Adding a second convolutional layer
classifier.add(Convolution2D(48, 3, activation = 'relu'))
classifier.add(MaxPooling2D())
#Flattening
classifier.add(Flatten())
#Full connected
classifier.add(Dense(256, activation = 'relu'))
#Full connected
classifier.add(Dense(256, activation = 'sigmoid'))
#Full connected
classifier.add(Dense(1, activation = 'sigmoid'))
# Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
classifier.summary()
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
horizontal_flip = True,
vertical_flip=True,
brightness_range=[0.5, 1.5])
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('/content/drive/My Drive/data_sep/train',
target_size = (320, 320),
batch_size = 32,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('/content/drive/My Drive/data_sep/validate',
target_size = (320, 320),
batch_size = 32,
class_mode = 'binary')
es = EarlyStopping(
monitor="val_accuracy",
mode="max",
patience
baseline=None,
restore_best_weights=True,
)
filepath = "/content/drive/My Drive/data_sep/weightsbestval.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
history = classifier.fit(training_set,
epochs = 10,
validation_data = test_set,
callbacks= es
)
best_score = max(history.history['val_accuracy'])
from sklearn.metrics import classification_report
predictions =(classifier.predict(test_set) > 0.5).astype("int32")
newlist = predictions.tolist()
finallist = []
for number in newlist:
finallist.append(number[0])
predicted_classes = np.asarray(finallist)
true_classes = test_set.classes
class_labels = list(test_set.class_indices.keys())
report = classification_report(true_classes, predicted_classes, target_names=class_labels)
accuracy = metrics.accuracy_score(true_classes, predicted_classes)
print(true_classes)
print(predicted_classes)
print(class_labels)
correct = 0
for i in range(len(true_classes)):
if (true_classes[i] == predicted_classes[i]):
correct = correct + 1
print(correct)
print((correct*1.0)/(len(true_classes)*1.0))
print(report)
return best_score
When I run the model I get a validation accuracy of 81.90% by model.fit()
But after finishing the model.predict validation accuracy is 40%.
I have added a callback where the best weights are restored. So what could be the problem here?
What fixed it for me was that I created another Image Data Generator variable
test2_datagen = ImageDataGenerator(rescale = 1./255)
test2_set = test2_datagen.flow_from_directory('/content/drive/My Drive/data_sep/validate',
target_size = (320, 320),
batch_size = 32,
class_mode = 'binary',
Shuffle = False)
But as you can see I set Shuffle = False . I am posting this answer in case anyone has the same problem. So I used test2_set for for the prediction.
test2_set = test2_datagen.flow_from_directory('/content/drive/My Drive/data_sep/validate',
target_size= (320, 320),
batch_size= 32,
class_mode= 'binary',
shuffle= False)
Emphasis on the lowercase shuffle parameter, otherwise this code will fail
Since you are saving best model in this line
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
please load this model in your code , and then predict
from keras.models import load_model
loaded_model = load_model('data_sep/weightsbestval.hdf5')
Then
loaded_model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics['accuracy'])
score = loaded_model.evaluate(X_test, Y_test, verbose=0)
print ("%s: %.2f%%" % (loaded_model.metrics_names[1], score[1]*100))
Plz vote / mark correct if you find this useful

Error when checking target: expected dense_1 to have 4 dimensions, but got array with shape (20, 1)

I'm trying to implement a model to classify(binary) my picture,but I got this problem,could anyone help me to solve it.Thank you guys~
here is the model, and the training process
def googLeNet(input = Input(shape=(224, 224, 3))):
.................
.................
...............
averagepool1_7x7_s1 = AveragePooling2D(pool_size=(7, 7), padding='same')(inception_5b)
drop1 = Dropout(rate=0.4)(averagepool1_7x7_s1)
linear = Dense(units=1, activation='linear')(drop1)
last = Dense(units=1, activation='softmax')(linear)
model = Model(input=input, outputs=last)
return model
-----------------------------------------------------------------------
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(224, 224),
batch_size=20,
class_mode='binary'
)
validation_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(224, 224),
batch_size=20,
class_mode='binary'
)
model = googLeNet()
model.summary()
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc']
)
history = model.fit_generator(train_generator,
steps_per_epoch=100,
epochs=30,
validation_data=validation_generator,
validation_steps=50
)

Resources