Hello everyone I started training a network ana it got stuck, it did not finish the first epoch.
Here is the code I used:
top_model_weights_path = '/data/fc_model.h5'
img_width, img_height = 150, 150
-train_data_dir = '/data/train'
validation_data_dir = '/data/validation'
nb_train_samples = 2000
nb_validation_samples = 800
epochs = 50
batch_size = 16
model = applications.VGG16(weights='imagenet', include_top=False, input_shape=(150, 150, 3))
print('Model loaded.')
top_model = Sequential()
top_model.add(Flatten(input_shape=model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(1, activation='sigmoid'))
top_model.load_weights(top_model_weights_path)
model = Model(inputs= model.input, outputs= top_model(model.output))
for layer in model.layers[:25]:
layer.trainable = False
model.compile(loss='binary_crossentropy',
optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
metrics=['accuracy'])
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples,
epochs=epochs,
validation_data=validation_generator,
nb_val_samples=nb_validation_samples)
I am using Transfer Learning. I followed this tutorial online :
Tutorial
Please help thank you.
Related
What's the problem with this?
I'm confuse right now I need to train my model and I read the documentation somehow but I'm getting this kind of error. At first the model is success and I save it but the accuracy is too low, so I reconstruct my model and get this. I just change the imgimg_width, img_height
classification_no = 6
# Train and Test dataset
train_dataset_path = "Datasets/train/"
test_dataset_path = "Datasets/test/"
img_width, img_height =100,100
size = 20
def detection_model():
detection_model = Sequential(
[
# module 1
tf.keras.layers.Conv2D(32, kernel_size=(3,3),input_shape = (img_width,img_height, 1)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation('relu'),
tf.keras.layers.Conv2D(64,kernel_size= (3,3),padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation('relu'),
tf.keras.layers.MaxPooling2D(pool_size=(2,2)),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
# Dense
tf.keras.layers.Dense(64),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation('relu'),
# Dropout
tf.keras.layers.Dropout(0.5),
# Dense
tf.keras.layers.Dense(64),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation('relu'),
#output
tf.keras.layers.Dense(6, activation= 'softmax'),
]
)
detection_model.compile(loss='categorical_crossentropy',
optimizer=Adam(
learning_rate = 0.0001 ,
beta_1=0.9,
beta_2=0.999,
decay=1e-6),
metrics=['accuracy'])
detection_model.summary()
return detection_model
train = ImageDataGenerator(rescale= 1./255)
validation = ImageDataGenerator(rescale= 1./255)
def get_model_train():
data_train = train.flow_from_directory(
train_dataset_path,
target_size= (img_width,img_height),
batch_size= 16,
color_mode= 'rgba',
class_mode= 'categorical')
return data_train
def get_model_test():
data_test = validation.flow_from_directory(
test_dataset_path,
target_size= (img_width,img_height),
batch_size= 16,
color_mode= 'rgba',
class_mode= 'categorical')
return data_test
model = detection_model()
fit_model = model.fit(
get_model_train(),
steps_per_epoch= 2006//16 ,
epochs= size,
validation_data= get_model_test(),
validation_steps= 2006//16,
)
I have created a CNN that does binary classification on images. The CNN is seen below:
def neural_network():
classifier = Sequential()
# Adding a first convolutional layer
classifier.add(Convolution2D(48, 3, input_shape = (320, 320, 3), activation = 'relu'))
classifier.add(MaxPooling2D())
# Adding a second convolutional layer
classifier.add(Convolution2D(48, 3, activation = 'relu'))
classifier.add(MaxPooling2D())
#Flattening
classifier.add(Flatten())
#Full connected
classifier.add(Dense(256, activation = 'relu'))
#Full connected
classifier.add(Dense(1, activation = 'sigmoid'))
# Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
classifier.summary()
train_datagen = ImageDataGenerator(rescale = 1./255,
horizontal_flip = True,
vertical_flip=True,
brightness_range=[0.5, 1.5])
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('/content/drive/My Drive/data_sep/train',
target_size = (320, 320),
batch_size = 32,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('/content/drive/My Drive/data_sep/validate',
target_size = (320, 320),
batch_size = 32,
class_mode = 'binary')
es = EarlyStopping(
monitor="val_accuracy",
patience=15,
mode="max",
baseline=None,
restore_best_weights=True,
)
filepath = "/content/drive/My Drive/data_sep/weightsbestval.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
history = classifier.fit(training_set,
epochs = 50,
validation_data = test_set,
callbacks= callbacks_list
)
best_score = max(history.history['val_accuracy'])
return best_score
The images in the folders are organized in the following way:
-train
-healthy
-patient
-validation
-healthy
-patient
Is there a way to calculate the metrics Precision,Recall,Sensitivity and Specificity or at least the true positives,true negatives,false positive and false negatives from this code?
from sklearn.metrics import classification_report
test_set = test_datagen.flow_from_directory('/content/drive/My Drive/data_sep/validate',
target_size = (320, 320),
batch_size = 32,
class_mode = 'binary')
predictions = model.predict_generator(
test_set,
steps = np.math.ceil(test_set.samples / test_set.batch_size),
)
predicted_classes = np.argmax(predictions, axis=1)
true_classes = test_set.classes
class_labels = list(test_set.class_indices.keys())
report = classification_report(true_classes, predicted_classes, target_names=class_labels)
accuracy = metrics.accuracy_score(true_classes, predicted_classes)
& if you do print(report) ,it will print everything
And if your whole data files are not divisible by your batch size, then use
test_set = test_datagen.flow_from_directory('/content/drive/My Drive/data_sep/validate',
target_size = (320, 320),
batch_size = 1,
class_mode = 'binary')
I try to apply k-fold cross validation to the cnn classification problem
let say I have a carA, carB
so I made the subfolder
car/trainCross/fold0 car/trainCross/fold1
car/validCross/fold0 car/validCross/fold1
and following code
model_path = '../carPrediction/model/'+ 'saved.hdf5'
for i in range(2):
print('training->',i,' split')
train_generator = train_datagen.flow_from_directory(TRAIN_CROPPED_PATH +'fold'+str(i),
target_size=(image_size, image_size),
batch_size=batch_size,
class_mode='categorical',
seed=2019,
color_mode='rgb')
print(VALID_CROPPED_PATH+'fold'+str(i))
validation_generator = valid_datagen.flow_from_directory(
VALID_CROPPED_PATH+'fold'+str(i),
target_size=(image_size,image_size),
batch_size=batch_size,
class_mode='categorical',
seed=2019,
color_mode='rgb'
)
test_generator = test_datagen.flow_from_dataframe(
dataframe=df_test,
directory=TEST_CROPPED_PATH,
x_col='img_file',
y_col=None,
target_size= (image_size,image_size),
color_mode='rgb',
class_mode=None,
batch_size=batch_size,
shuffle=False
)
try:
model = load_model(model_path, compile=True)
except Exception as OSError:
pass
patient = 2
callbacks1 = [
EarlyStopping(monitor='val_loss', patience=patient, mode='min', verbose=1),
ReduceLROnPlateau(monitor = 'val_loss', factor = 0.5, patience = patient / 2, min_lr=0.00001, verbose=1, mode='min'),
ModelCheckpoint(filepath=model_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min'),
]
history = model.fit_generator(
train_generator,
steps_per_epoch=get_steps(nb_train_sample, batch_size),
epochs=2,
validation_data=validation_generator,
validation_steps=get_steps(nb_validation_sample, batch_size),
verbose=1,
callbacks = callbacks1
)
but not sure in this way is correct
any thought?
I'm trying to implement a model to classify(binary) my picture,but I got this problem,could anyone help me to solve it.Thank you guys~
here is the model, and the training process
def googLeNet(input = Input(shape=(224, 224, 3))):
.................
.................
...............
averagepool1_7x7_s1 = AveragePooling2D(pool_size=(7, 7), padding='same')(inception_5b)
drop1 = Dropout(rate=0.4)(averagepool1_7x7_s1)
linear = Dense(units=1, activation='linear')(drop1)
last = Dense(units=1, activation='softmax')(linear)
model = Model(input=input, outputs=last)
return model
-----------------------------------------------------------------------
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(224, 224),
batch_size=20,
class_mode='binary'
)
validation_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(224, 224),
batch_size=20,
class_mode='binary'
)
model = googLeNet()
model.summary()
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc']
)
history = model.fit_generator(train_generator,
steps_per_epoch=100,
epochs=30,
validation_data=validation_generator,
validation_steps=50
)
I have used below data augmentation for memory saving.
total number of test images = 400
batch size = 128
when i check accuracy of testset using model.evaluate_generator, it is different with final validation_accuracy from last epoch of my model.
Furthermore, output of model.evaluate_generator is changed when i repeat this.
below is my code.
please help!
train_datagen = ImageDataGenerator(
rescale=1./255,)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
color_mode= "grayscale",
target_size=(img_width, img_height),
batch_size=128,
class_mode='categorical',)
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
color_mode= "grayscale",
target_size=(img_width, img_height),
batch_size=128,
class_mode='categorical')
#%%
hist = model.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples,
nb_epoch=nb_epoch,
validation_data=validation_generator,
nb_val_samples=nb_validation_samples)
scoreSeg = model.evaluate_generator(validation_generator, 400)
print("Accuracy = ",scoreSeg[1])