Concatenate four CNN models in keras - keras

I want to combine four CNN (pop1, pop2, pop3, and pop4) into one in Keras. My goal is to build a classifier able to assign an image to three possible outcomes. Here is how a concatenate the CNNs:
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Concatenate
from keras.models import Model
# Start With Pop1 Branch ############################################################
classifier_pop1 = Sequential()
classifier_pop1.add(Conv2D(1, (3,3), input_shape = (4009, 36, 1), activation = 'relu'))
classifier_pop1.add(MaxPooling2D(pool_size = (3,3)))
classifier_pop1.add(Conv2D(1, (3,3), activation = 'relu'))
classifier_pop1.add(MaxPooling2D(pool_size = (3,3)))
classifier_pop1.add(Flatten())
classifier_pop1.add(Dense(units = 300, activation = 'relu'))
classifier_pop1.add(Dense(units = 24, activation = 'relu'))
# Start With Pop2 Branch ############################################################
classifier_pop2 = Sequential()
classifier_pop2.add(Conv2D(1, (3,3), input_shape = (4009, 36, 1), activation = 'relu'))
classifier_pop2.add(MaxPooling2D(pool_size = (3,3)))
classifier_pop2.add(Conv2D(1, (3,3), activation = 'relu'))
classifier_pop2.add(MaxPooling2D(pool_size = (3,3)))
classifier_pop2.add(Flatten())
classifier_pop2.add(Dense(units = 300, activation = 'relu'))
classifier_pop2.add(Dense(units = 24, activation = 'relu'))
# Start With Pop3 Branch ############################################################
classifier_pop3 = Sequential()
classifier_pop3.add(Conv2D(1, (3,3), input_shape = (4009, 32, 1), activation = 'relu'))
classifier_pop3.add(MaxPooling2D(pool_size = (3,3)))
classifier_pop3.add(Conv2D(1, (3,3), activation = 'relu'))
classifier_pop3.add(MaxPooling2D(pool_size = (3,3)))
classifier_pop3.add(Flatten())
classifier_pop3.add(Dense(units = 300, activation = 'relu'))
classifier_pop3.add(Dense(units = 24, activation = 'relu'))
# Start With Pop4 Branch ############################################################
classifier_pop4 = Sequential()
classifier_ pop4.add(Conv2D(1, (3,3), input_shape = (4009, 18, 1), activation = 'relu'))
classifier_ pop4.add(MaxPooling2D(pool_size = (3,3)))
classifier_ pop4.add(Conv2D(1, (3,3), activation = 'relu'))
classifier_ pop4.add(MaxPooling2D(pool_size = (3,3)))
classifier_ pop4.add(Flatten())
classifier_ pop4.add(Dense(units = 300, activation = 'relu'))
classifier_ pop4.add(Dense(units = 24, activation = 'relu'))
# Making The Combinition ##########################################################
model_concat = Concatenate()([classifier_pop1.output,classifier_pop2.output,classifier_pop3.output,classifier_ pop4.output])
model_concat = Dense(3, activation='softmax')(model_concat)
model = Model(inputs=[classifier_pop1.input,classifier_pop2.input,classifier_pop3.input,classifier_ pop4.input], outputs=model_concat)
Model_plot
## Compiling the model
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
I suspect that the error in my code has to do with the below code. I have four folders (pop1, pop2, pop3, and pop4) that contain three subfolders inside (Model1, Model2, and Model3). I am not sure how to use imageGenerator in this part. I just followed this other post: Combining Two CNN's
##Image preprocessing
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set_pop1 = train_datagen.flow_from_directory('./Datasets/training_set/Pop1',
target_size = (4009, 36),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=True)
test_set_pop1 = test_datagen.flow_from_directory('./Datasets/test_set/Pop1',
target_size = (4009, 36),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=False)
training_set_pop2 = train_datagen.flow_from_directory('./Datasets/training_set/Pop2',
target_size = (4009, 36),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=True)
test_set_pop2 = test_datagen.flow_from_directory('./Datasets/test_set/Pop2',
target_size = (4009, 36),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=False)
training_set_pop3 = train_datagen.flow_from_directory('./Datasets/training_set/Pop3',
target_size = (4009, 32),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=True)
test_set_pop3 = test_datagen.flow_from_directory('./Datasets/test_set/Pop3',
target_size = (4009, 32),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=False)
training_set_pop4 = train_datagen.flow_from_directory('./Datasets/training_set/Planiceps',
target_size = (4009, 18),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=True)
test_set_pop4 = test_datagen.flow_from_directory('./Datasets/test_set/Planiceps',
target_size = (4009, 18),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=False)
model.fit([training_set_pop1,training_set_pop2,training_set_pop3,training_set_pop4],
steps_per_epoch = int(2400/100),
epochs = 4,
validation_data = [test_set_pop1,test_set_pop2,test_set_pop3,test_set_pop4],
validation_steps = int(600/100))
Then, I got this error:
ValueError: Failed to find data adapter that can handle input: (<class 'list'> containing values of types {"<class 'keras_preprocessing.image.directory_iterator.DirectoryIterator'>"}), <class 'NoneType'>

Maybe you could try the following, since you can't pass the generators as a list:
model.fit(zip(training_set_pop1,training_set_pop2,training_set_pop3,training_set_pop4),
steps_per_epoch = int(2400/100),
epochs = 4,
validation_data = zip(test_set_pop1,test_set_pop2,test_set_pop3,test_set_pop4),
validation_steps = int(600/100))

Related

Unable to find Accuracy and the ROC curve of my CNN model

My sample CNN code looks below:
classifier = Sequential()
#1st Conv layer
classifier.add(Convolution2D(64, (9, 9), input_shape=(64, 64, 3), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(4,4)))
#2nd Conv layer
classifier.add(Convolution2D(32, (3, 3), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2,2)))
classifier.add(Flatten())
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dropout(0.1))
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dropout(0.2))
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dense(units = 2, activation = 'softmax'))
classifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
training_set = train_datagen.flow_from_directory('D:/regionGrowing_MLT/png_orig_imgs/Training',
target_size = (64, 64),
batch_size = 32,
class_mode = 'categorical')
test_datagen = ImageDataGenerator(rescale = 1./255)
test_set = test_datagen.flow_from_directory('D:/regionGrowing_MLT/png_orig_imgs/Test',
target_size = (64, 64),
batch_size = 32,
class_mode = 'categorical'
)
probs=classifier.fit(x = training_set, validation_data = test_set, epochs = 50)
I tried the following line to find the ROC curve, but i get an error message:
predictions = classifier.predict(test_set)
fpr, tpr,threshold = roc_curve(test_set,predictions)
The following error message is displayed:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-52-2ea53b1ba7f1> in <module>
----> 1 fpr, tpr,threshold = roc_curve(test_set,predictions)
ValueError: Expected array-like (array or non-string sequence), got <keras.preprocessing.image.DirectoryIterator object at 0x000002D21D1B61C0>
Any suggestions would be appreciated.
Emm! From the error, I think you have to change keras.processing image object to array. Try this I think this will help you out.
Accuracy
fil_acc_orig = accuracy_score(y_test, predictions.to_array())
ROC Curve
fil_acc_orig = roc_curve(y_test, predictions.to_array())

Keras parallel layers merging

I read Keras documentation, but had not found any explanation on the following error
Code:
import numpy as np
import pandas as pd
from tensorflow.keras import layers
from keras.optimizers import SGD
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers.core import Dense, Activation, Dropout, Reshape, Flatten
from keras.utils.np_utils import to_categorical
data = pd.read_excel('oildata.xlsx')
firstBranch = Sequential()
#firstBranch.add(Reshape((1,28,28), input_shape=(,)))
firstBranch.add(LSTM(64, input_shape=(10, 1100)))
#firstBranch.add(MaxPooling2D((2, 2), strides=(2, 2)))
firstBranch.add(Flatten())
secondBranch = Sequential()
secondBranch.add(BatchNormalization(name = 'batch_norm_0', input_shape = (1000, 10, 1, 1)))
secondBranch.add(ConvLSTM2D(name ='conv_lstm_1',
filters = 64, kernel_size = (10, 1),
padding = 'same',
return_sequences = False))
secondBranch.add(Dropout(0.10, name = 'dropout_1'))
secondBranch.add(BatchNormalization(name = 'batch_norm_1'))
# model.add(ConvLSTM2D(name ='conv_lstm_2',
# filters = 64, kernel_size = (5, 1),
# padding='same',
# return_sequences = False))
# model.add(Dropout(0.20, name = 'dropout_2'))
# model.add(BatchNormalization(name = 'batch_norm_2'))
secondBranch.add(Flatten())
secondBranch.add(RepeatVector(1000))
secondBranch.add(Reshape((1000, 10, 1, 64)))
# model.add(ConvLSTM2D(name ='conv_lstm_3',
# filters = 64, kernel_size = (10, 1),
# padding='same',
# return_sequences = True))
# model.add(Dropout(0.20, name = 'dropout_3'))
# model.add(BatchNormalization(name = 'batch_norm_3'))
secondBranch.add(ConvLSTM2D(name ='conv_lstm_4',
filters = 64, kernel_size = (5, 1),
padding='same',
return_sequences = True))
secondBranch.add(TimeDistributed(Dense(units=1, name = 'dense_1', activation = 'relu')))
secondBranch.add(Dense(units=1, name = 'dense_2'))
secondBranch.add(Flatten())
thirdBranch = Sequential()
thirdBranch.add(Reshape((1,28,28), input_shape=(784,)))
thirdBranch.add(Dense(10, activation='relu'))
thirdBranch.add(Flatten())
fourthBranch = Sequential()
#fourthBranch.add(Reshape((1,28,28), input_shape=(784,)))
fourthBranch.add(Dense(10, activation='relu'))
fourthBranch.add(Flatten())
#merged = Concatenate([firstBranch, secondBranch, thirdBranch,fourthBranch], mode='concat')
merged = Concatenate([firstBranch,secondBranch,thirdBranch,fourthBranch])
model = Sequential()
model.add(merged)
model.add(Dense(28*3, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(28, activation='relu'))
model.add(Dense(19))
model.add(Activation("softmax"))
sgd = SGD(lr=0.5, momentum=0.8, decay=0.1, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
model.fit([X,X,X,X],X, batch_size=100, verbose=2)
yPred = model.predict([X,X,X,X],X)
Error:
TypeError Traceback (most recent call last)
<ipython-input-385-11a86cc54884> in <module>
88 sgd = SGD(lr=0.5, momentum=0.0, decay=0.0, nesterov=False)
89 model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
---> 90 model.fit([X,X,X,X],X, batch_size=100, verbose=2)
91
92 yPred = model.predict([X,X,X,X],X)
...........................................
TypeError: list indices must be integers or slices, not ListWrapper"
What does it mean ListWrapper? Data is turned into frames and had to fit the model.

Yolov3 model over fitting with Cifar10 Dataset

I am trying to implement Yolov3 using the cifar10 dataset for testing, when I run it the accuracy converges to 0.1 (always selecting the same class), the dataset size should not be an issue, I am not sure why the model would be doing this, I have confirmed that the rest of the code works fines on a smaller model however, is there anyway to get Yolov3 to work?
import cv2
import random
import struct
import mlxtend
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.models import Sequential
from tensorflow.keras import layers
from mlxtend.data import loadlocal_mnist
from matplotlib.patches import Rectangle
from keras.layers.merge import add, concatenate
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers.experimental import preprocessing
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
data = cifar10
(train_images, train_labels), (test_images, test_labels) = data.load_data()
train_images, test_images = train_images/255.0, test_images/255.0
def create_model():
model = Sequential()
model.add(Conv2D(filters = 64, activation = 'relu', padding='same', kernel_size = 7, strides = 2, input_shape=(32, 32, 3)))
model.add(MaxPooling2D(pool_size = (2, 2), strides = 2))
model.add(Conv2D(filters = 192, activation = 'relu', padding='same', kernel_size = 3))
model.add(MaxPooling2D(pool_size = (2, 2), strides = 2))
model.add(Conv2D(filters = 128, activation = 'relu', padding='same', kernel_size = 1))
model.add(Conv2D(filters = 256, activation = 'relu', padding='same', kernel_size = 3))
model.add(Conv2D(filters = 256, activation = 'relu', padding='same', kernel_size = 1))
model.add(Conv2D(filters = 512, activation = 'relu', padding='same', kernel_size = 3))
model.add(MaxPooling2D(pool_size = (2, 2), strides = 2))
model.add(Conv2D(filters = 256, activation = 'relu', padding='same', kernel_size = 1))
model.add(Conv2D(filters = 512, activation = 'relu', padding='same', kernel_size = 3))
model.add(Conv2D(filters = 256, activation = 'relu', padding='same', kernel_size = 1))
model.add(Conv2D(filters = 512, activation = 'relu', padding='same', kernel_size = 3))
model.add(Conv2D(filters = 256, activation = 'relu', padding='same', kernel_size = 1))
model.add(Conv2D(filters = 512, activation = 'relu', padding='same', kernel_size = 3))
model.add(Conv2D(filters = 256, activation = 'relu', padding='same', kernel_size = 1))
model.add(Conv2D(filters = 512, activation = 'relu', padding='same', kernel_size = 3))
model.add(Conv2D(filters = 512, activation = 'relu', padding='same', kernel_size = 1))
model.add(Conv2D(filters = 1024, activation = 'relu', padding='same', kernel_size = 3))
model.add(MaxPooling2D(pool_size = (2, 2), strides = 2))
model.add(Conv2D(filters = 512, activation = 'relu', padding='same', kernel_size = 1))
model.add(Conv2D(filters = 1024, activation = 'relu', padding='same', kernel_size = 3))
model.add(Conv2D(filters = 512, activation = 'relu', padding='same', kernel_size = 1))
model.add(Conv2D(filters = 1024, activation = 'relu', padding='same', kernel_size = 3))
model.add(Conv2D(filters = 1024, activation = 'relu', padding='same', kernel_size = 3))
model.add(Conv2D(filters = 1024, activation = 'relu', padding='same', kernel_size = 3, strides = 2))
model.add(Conv2D(filters = 1024, activation = 'relu', padding='same', kernel_size = 3))
model.add(Conv2D(filters = 1024, activation = 'relu', padding='same', kernel_size = 3))
model.add(Flatten())
model.add(Dense(len(train_labels)))
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model = create_model()
num_epochs = 5
batch_size = 32
model.fit(train_images, train_labels, epochs = num_epochs, validation_split = 0.25, batch_size = batch_size)
results = model.evaulate(test_images, test_labels)
print("Test Accuracy:", results[1])

Computing the False Positive Rate (FPR) and True Positive Rate (TPR) in a CNN

I am designing a CNN for classification two types of images, and I need to compute the FPR and TPR.
In the following, you can see my code, but I don't know how can I compute FPR and TPR based on this code.
Could you please let me know how can I do that.
I know for computing the FPR and TPR I can use the following code
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_predic)
while y_predict can be computed by y_predic = model.predict(x_test)
but in my code, I don't know how can do that.
I would be so thankful if you could help me.
num_classes = 1
batch_size = 512
epoch = 100
model = Sequential()
model.add(Conv2D(filters=8, kernel_size=(3, 3), padding='Valid', activation='relu', input_shape=(64, 64, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.3))
model.add(Conv2D(filters=8, kernel_size=(3, 3), padding='Same', activation='relu'))
model.add(Dropout(rate=0.3))
model.add(Conv2D(filters=16, kernel_size=(3, 3), padding='Same',activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.3))
model.add(Flatten())
model.add(Dense(units=128, activation='relu'))
model.add(Dense(units=num_classes, activation='sigmoid'))
model.compile(optimizer='adam', loss=keras.losses.binary_crossentropy, metrics=['accuracy'])
print(model.summary())
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('D:/Data/CNN/train',
target_size = (64, 64),
batch_size = batch_size,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('D:/Data/CNN/test',
target_size = (64, 64),
batch_size = batch_size,
class_mode = 'binary', shuffle= True)
history = model.fit_generator(training_set,
steps_per_epoch = 4000//batch_size,
epochs = epoch,
verbose= 2,
validation_data = test_set,
validation_steps = 1000//batch_size)
You can use keras.metrics almost as is. It has both FPR, TPR and SensitivityAtSpecificity
model.compile(optimizer='adam', loss=keras.losses.binary_crossentropy, metrics=['accuracy', tf.keras.metrics.SensitivityAtSpecificity(0.5)])
But keras model supports only keras.metrics, so no tf.metrics could be used without writing custom metric
https://www.tensorflow.org/api_docs/python/tf/keras/metrics

Increase the number of classes in image classifier

I have done a programm on image classification of two objects namely dogs and cats using CNN in keras. Now how can I increase the number of classes,i.e, dogs, cats, and frog?
Here's the code:
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.callbacks import ModelCheckpoint
classifier = Sequential()
classifier.add(Conv2D(32, (3, 3), input_shape = (64, 64, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Conv2D(32, (3, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Flatten())
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dense(units = 1, activation = 'sigmoid'))
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
filepath="weights-improvment-{epoch:02d}-{val_acc:.2f}.hdf5"
checpoint=ModelCheckpoint(filepath,monitor='val_acc',verbose=1,save_best_only=True,mode='max')
callback_list=[checpoint]
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('training_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('test_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
classifier.fit_generator(training_set,
steps_per_epoch = 8000,
epochs = 10,
validation_data = test_set,
validation_steps = 2000)
classifier.save('model_after_trained.h5')
In order to classify more than two classes, the number of neurons(units) in the last layer must be changed to the number of classes to be predicted.
Suppose if you want to predict 3 objects, the last layer must be changed as:
classifier.add(Dense(units = 3, activation = 'sigmoid'))
Please find the below link which will help you to do multi-class classification using CNN: https://www.codesofinterest.com/2017/08/bottleneck-features-multi-class-classification-keras.html
Hope this helps!!!

Resources