I have done a programm on image classification of two objects namely dogs and cats using CNN in keras. Now how can I increase the number of classes,i.e, dogs, cats, and frog?
Here's the code:
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.callbacks import ModelCheckpoint
classifier = Sequential()
classifier.add(Conv2D(32, (3, 3), input_shape = (64, 64, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Conv2D(32, (3, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Flatten())
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dense(units = 1, activation = 'sigmoid'))
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
filepath="weights-improvment-{epoch:02d}-{val_acc:.2f}.hdf5"
checpoint=ModelCheckpoint(filepath,monitor='val_acc',verbose=1,save_best_only=True,mode='max')
callback_list=[checpoint]
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('training_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('test_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
classifier.fit_generator(training_set,
steps_per_epoch = 8000,
epochs = 10,
validation_data = test_set,
validation_steps = 2000)
classifier.save('model_after_trained.h5')
In order to classify more than two classes, the number of neurons(units) in the last layer must be changed to the number of classes to be predicted.
Suppose if you want to predict 3 objects, the last layer must be changed as:
classifier.add(Dense(units = 3, activation = 'sigmoid'))
Please find the below link which will help you to do multi-class classification using CNN: https://www.codesofinterest.com/2017/08/bottleneck-features-multi-class-classification-keras.html
Hope this helps!!!
Related
My sample CNN code looks below:
classifier = Sequential()
#1st Conv layer
classifier.add(Convolution2D(64, (9, 9), input_shape=(64, 64, 3), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(4,4)))
#2nd Conv layer
classifier.add(Convolution2D(32, (3, 3), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2,2)))
classifier.add(Flatten())
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dropout(0.1))
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dropout(0.2))
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dense(units = 2, activation = 'softmax'))
classifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
training_set = train_datagen.flow_from_directory('D:/regionGrowing_MLT/png_orig_imgs/Training',
target_size = (64, 64),
batch_size = 32,
class_mode = 'categorical')
test_datagen = ImageDataGenerator(rescale = 1./255)
test_set = test_datagen.flow_from_directory('D:/regionGrowing_MLT/png_orig_imgs/Test',
target_size = (64, 64),
batch_size = 32,
class_mode = 'categorical'
)
probs=classifier.fit(x = training_set, validation_data = test_set, epochs = 50)
I tried the following line to find the ROC curve, but i get an error message:
predictions = classifier.predict(test_set)
fpr, tpr,threshold = roc_curve(test_set,predictions)
The following error message is displayed:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-52-2ea53b1ba7f1> in <module>
----> 1 fpr, tpr,threshold = roc_curve(test_set,predictions)
ValueError: Expected array-like (array or non-string sequence), got <keras.preprocessing.image.DirectoryIterator object at 0x000002D21D1B61C0>
Any suggestions would be appreciated.
Emm! From the error, I think you have to change keras.processing image object to array. Try this I think this will help you out.
Accuracy
fil_acc_orig = accuracy_score(y_test, predictions.to_array())
ROC Curve
fil_acc_orig = roc_curve(y_test, predictions.to_array())
I read Keras documentation, but had not found any explanation on the following error
Code:
import numpy as np
import pandas as pd
from tensorflow.keras import layers
from keras.optimizers import SGD
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers.core import Dense, Activation, Dropout, Reshape, Flatten
from keras.utils.np_utils import to_categorical
data = pd.read_excel('oildata.xlsx')
firstBranch = Sequential()
#firstBranch.add(Reshape((1,28,28), input_shape=(,)))
firstBranch.add(LSTM(64, input_shape=(10, 1100)))
#firstBranch.add(MaxPooling2D((2, 2), strides=(2, 2)))
firstBranch.add(Flatten())
secondBranch = Sequential()
secondBranch.add(BatchNormalization(name = 'batch_norm_0', input_shape = (1000, 10, 1, 1)))
secondBranch.add(ConvLSTM2D(name ='conv_lstm_1',
filters = 64, kernel_size = (10, 1),
padding = 'same',
return_sequences = False))
secondBranch.add(Dropout(0.10, name = 'dropout_1'))
secondBranch.add(BatchNormalization(name = 'batch_norm_1'))
# model.add(ConvLSTM2D(name ='conv_lstm_2',
# filters = 64, kernel_size = (5, 1),
# padding='same',
# return_sequences = False))
# model.add(Dropout(0.20, name = 'dropout_2'))
# model.add(BatchNormalization(name = 'batch_norm_2'))
secondBranch.add(Flatten())
secondBranch.add(RepeatVector(1000))
secondBranch.add(Reshape((1000, 10, 1, 64)))
# model.add(ConvLSTM2D(name ='conv_lstm_3',
# filters = 64, kernel_size = (10, 1),
# padding='same',
# return_sequences = True))
# model.add(Dropout(0.20, name = 'dropout_3'))
# model.add(BatchNormalization(name = 'batch_norm_3'))
secondBranch.add(ConvLSTM2D(name ='conv_lstm_4',
filters = 64, kernel_size = (5, 1),
padding='same',
return_sequences = True))
secondBranch.add(TimeDistributed(Dense(units=1, name = 'dense_1', activation = 'relu')))
secondBranch.add(Dense(units=1, name = 'dense_2'))
secondBranch.add(Flatten())
thirdBranch = Sequential()
thirdBranch.add(Reshape((1,28,28), input_shape=(784,)))
thirdBranch.add(Dense(10, activation='relu'))
thirdBranch.add(Flatten())
fourthBranch = Sequential()
#fourthBranch.add(Reshape((1,28,28), input_shape=(784,)))
fourthBranch.add(Dense(10, activation='relu'))
fourthBranch.add(Flatten())
#merged = Concatenate([firstBranch, secondBranch, thirdBranch,fourthBranch], mode='concat')
merged = Concatenate([firstBranch,secondBranch,thirdBranch,fourthBranch])
model = Sequential()
model.add(merged)
model.add(Dense(28*3, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(28, activation='relu'))
model.add(Dense(19))
model.add(Activation("softmax"))
sgd = SGD(lr=0.5, momentum=0.8, decay=0.1, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
model.fit([X,X,X,X],X, batch_size=100, verbose=2)
yPred = model.predict([X,X,X,X],X)
Error:
TypeError Traceback (most recent call last)
<ipython-input-385-11a86cc54884> in <module>
88 sgd = SGD(lr=0.5, momentum=0.0, decay=0.0, nesterov=False)
89 model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
---> 90 model.fit([X,X,X,X],X, batch_size=100, verbose=2)
91
92 yPred = model.predict([X,X,X,X],X)
...........................................
TypeError: list indices must be integers or slices, not ListWrapper"
What does it mean ListWrapper? Data is turned into frames and had to fit the model.
I am building a CNN for non image data in Keras 2.1.0 on Window 10.
My input feature is a 3x12 matrix of non negative number and my output is a binary multi-label vector with length 6x1
And I was running into this error expected conv2d_14_input to have shape (3, 12, 1) but got array with shape (3, 12, 6500)
Here is my code below
import tensorflow as tf
from scipy.io import loadmat
import numpy as np
from tensorflow.keras.layers import BatchNormalization
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout
from tensorflow.keras.layers import Conv2D, MaxPool2D, Flatten
reshape_channel_train = loadmat('reshape_channel_train')
reshape_channel_test = loadmat('reshape_channel_test.mat')
reshape_label_train = loadmat('reshape_label_train')
reshape_label_test = loadmat('reshape_label_test')
X_train = reshape_channel_train['store_train']
X_test = reshape_channel_test['store_test']
X_train = np.expand_dims(X_train,axis = 0)
X_test = np.expand_dims(X_test, axis = 0)
Y_train = reshape_label_train['label_train']
Y_test = reshape_label_test['label_test']
classifier = Sequential()
classifier.add(Conv2D(8, kernel_size=(3,3) , input_shape=(3, 12, 1), padding="same"))
classifier.add(BatchNormalization())
classifier.add(Activation('relu'))
classifier.add(Conv2D(8, kernel_size=(3,3), input_shape=(3, 12, 1), padding="same"))
classifier.add(BatchNormalization())
classifier.add(Activation('relu'))
classifier.add(Flatten())
classifier.add(Dense(8, activation='relu'))
classifier.add(Dense(6, activation='sigmoid'))
classifier.compile(optimizer='nadam', loss='binary_crossentropy', metrics=['accuracy'])
history = classifier.fit(X_train, Y_train, batch_size = 32, epochs=100,
validation_data=(X_test, Y_test), verbose=2)
After some searching, I have use the dimension expanding trick but it seem not to work
X_train = np.expand_dims(X_train,axis = 0)
X_test = np.expand_dims(X_test, axis = 0)
The X_train variable containing 6500 training instances is loaded from a Matlab .mat file with dimension 3x12x6500.
Where each training instance is a 3x12 matrix.
Before using the expand_dim tricks, the k-th training sample could be invoke by X_train[:,:,k] and X_train[:,:,k].shape would return (3,12). Also X_train.shape would return (3, 12, 6500)
After using the expand_dim tricks the command X_train[:,:,k].shape would return (1, 3, 6500)
Please help me with this !
Thank you
you manage your data wrongly. A Conv2D layer accepts data in this format (n_sample, height, width, channels) which in your case (for your X_train) became (6500,3,12,1). you need to simply reconduct to this case
# create data as in your matlab data
n_class = 6
n_sample = 6500
X_train = np.random.uniform(0,1, (3,12,n_sample)) # (3,12,n_sample)
Y_train = tf.keras.utils.to_categorical(np.random.randint(0,n_class, n_sample)) # (n_sample, n_classes)
# reshape your data for conv2d
X_train = X_train.transpose(2,0,1) # (n_sample,3,12)
X_train = np.expand_dims(X_train, -1) # (n_sample,3,12,1)
classifier = Sequential()
classifier.add(Conv2D(8, kernel_size=(3,3) , input_shape=(3, 12, 1), padding="same"))
classifier.add(BatchNormalization())
classifier.add(Activation('relu'))
classifier.add(Conv2D(8, kernel_size=(3,3), padding="same"))
classifier.add(BatchNormalization())
classifier.add(Activation('relu'))
classifier.add(Flatten())
classifier.add(Dense(8, activation='relu'))
classifier.add(Dense(n_class, activation='softmax'))
classifier.compile(optimizer='nadam', loss='categorical_crossentropy', metrics=['accuracy'])
history = classifier.fit(X_train, Y_train, batch_size = 32, epochs=2, verbose=2)
# get predictions
pred = np.argmax(classifier.predict(X_train), 1)
I also use a softmax activation with categorical_crossentropy which is more suited for multiclass problem but you can also modify this. remember to applicate the same data manipulation also on your test data
you need to pass data_format="channels_last" argument, bcoz your channels are at last
you try this:
x_train=x_train.reshape((6500,3,12,1))
x_test=x_test.reshape((-1,3,12,1))
and in each of conv2d layer conv2D(<other args>, data_format="channels_last")
I want to combine four CNN (pop1, pop2, pop3, and pop4) into one in Keras. My goal is to build a classifier able to assign an image to three possible outcomes. Here is how a concatenate the CNNs:
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Concatenate
from keras.models import Model
# Start With Pop1 Branch ############################################################
classifier_pop1 = Sequential()
classifier_pop1.add(Conv2D(1, (3,3), input_shape = (4009, 36, 1), activation = 'relu'))
classifier_pop1.add(MaxPooling2D(pool_size = (3,3)))
classifier_pop1.add(Conv2D(1, (3,3), activation = 'relu'))
classifier_pop1.add(MaxPooling2D(pool_size = (3,3)))
classifier_pop1.add(Flatten())
classifier_pop1.add(Dense(units = 300, activation = 'relu'))
classifier_pop1.add(Dense(units = 24, activation = 'relu'))
# Start With Pop2 Branch ############################################################
classifier_pop2 = Sequential()
classifier_pop2.add(Conv2D(1, (3,3), input_shape = (4009, 36, 1), activation = 'relu'))
classifier_pop2.add(MaxPooling2D(pool_size = (3,3)))
classifier_pop2.add(Conv2D(1, (3,3), activation = 'relu'))
classifier_pop2.add(MaxPooling2D(pool_size = (3,3)))
classifier_pop2.add(Flatten())
classifier_pop2.add(Dense(units = 300, activation = 'relu'))
classifier_pop2.add(Dense(units = 24, activation = 'relu'))
# Start With Pop3 Branch ############################################################
classifier_pop3 = Sequential()
classifier_pop3.add(Conv2D(1, (3,3), input_shape = (4009, 32, 1), activation = 'relu'))
classifier_pop3.add(MaxPooling2D(pool_size = (3,3)))
classifier_pop3.add(Conv2D(1, (3,3), activation = 'relu'))
classifier_pop3.add(MaxPooling2D(pool_size = (3,3)))
classifier_pop3.add(Flatten())
classifier_pop3.add(Dense(units = 300, activation = 'relu'))
classifier_pop3.add(Dense(units = 24, activation = 'relu'))
# Start With Pop4 Branch ############################################################
classifier_pop4 = Sequential()
classifier_ pop4.add(Conv2D(1, (3,3), input_shape = (4009, 18, 1), activation = 'relu'))
classifier_ pop4.add(MaxPooling2D(pool_size = (3,3)))
classifier_ pop4.add(Conv2D(1, (3,3), activation = 'relu'))
classifier_ pop4.add(MaxPooling2D(pool_size = (3,3)))
classifier_ pop4.add(Flatten())
classifier_ pop4.add(Dense(units = 300, activation = 'relu'))
classifier_ pop4.add(Dense(units = 24, activation = 'relu'))
# Making The Combinition ##########################################################
model_concat = Concatenate()([classifier_pop1.output,classifier_pop2.output,classifier_pop3.output,classifier_ pop4.output])
model_concat = Dense(3, activation='softmax')(model_concat)
model = Model(inputs=[classifier_pop1.input,classifier_pop2.input,classifier_pop3.input,classifier_ pop4.input], outputs=model_concat)
Model_plot
## Compiling the model
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
I suspect that the error in my code has to do with the below code. I have four folders (pop1, pop2, pop3, and pop4) that contain three subfolders inside (Model1, Model2, and Model3). I am not sure how to use imageGenerator in this part. I just followed this other post: Combining Two CNN's
##Image preprocessing
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set_pop1 = train_datagen.flow_from_directory('./Datasets/training_set/Pop1',
target_size = (4009, 36),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=True)
test_set_pop1 = test_datagen.flow_from_directory('./Datasets/test_set/Pop1',
target_size = (4009, 36),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=False)
training_set_pop2 = train_datagen.flow_from_directory('./Datasets/training_set/Pop2',
target_size = (4009, 36),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=True)
test_set_pop2 = test_datagen.flow_from_directory('./Datasets/test_set/Pop2',
target_size = (4009, 36),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=False)
training_set_pop3 = train_datagen.flow_from_directory('./Datasets/training_set/Pop3',
target_size = (4009, 32),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=True)
test_set_pop3 = test_datagen.flow_from_directory('./Datasets/test_set/Pop3',
target_size = (4009, 32),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=False)
training_set_pop4 = train_datagen.flow_from_directory('./Datasets/training_set/Planiceps',
target_size = (4009, 18),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=True)
test_set_pop4 = test_datagen.flow_from_directory('./Datasets/test_set/Planiceps',
target_size = (4009, 18),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=False)
model.fit([training_set_pop1,training_set_pop2,training_set_pop3,training_set_pop4],
steps_per_epoch = int(2400/100),
epochs = 4,
validation_data = [test_set_pop1,test_set_pop2,test_set_pop3,test_set_pop4],
validation_steps = int(600/100))
Then, I got this error:
ValueError: Failed to find data adapter that can handle input: (<class 'list'> containing values of types {"<class 'keras_preprocessing.image.directory_iterator.DirectoryIterator'>"}), <class 'NoneType'>
Maybe you could try the following, since you can't pass the generators as a list:
model.fit(zip(training_set_pop1,training_set_pop2,training_set_pop3,training_set_pop4),
steps_per_epoch = int(2400/100),
epochs = 4,
validation_data = zip(test_set_pop1,test_set_pop2,test_set_pop3,test_set_pop4),
validation_steps = int(600/100))
I have 123 mri images in nii format for training.
I also have 30 mri images in nii format for testing.
The images are 3D.
The are stored as it is below:
train/A 73 nii files inside A
train/B 50 nii files inside B
test/A 18 nii files inside A
test/B 12 nii files inside B
I want to classify the images into A and B.
I use Python and keras.
The code is:
from google.colab import drive
from keras.models import Sequential
from keras.layers import Conv3D
from keras.layers import MaxPooling3D
from keras.layers import Flatten
from keras.layers import Dense
drive.mount('/content/drive')
classifier = Sequential()
classifier.add(Conv3D(32, (3, 3, 3), input_shape = (110, 110, 110, 1), activation = 'relu'))
classifier.add(MaxPooling3D(pool_size = (2, 2, 2)))
classifier.add(Conv3D(32, (3, 3, 3), activation = 'relu'))
classifier.add(MaxPooling3D(pool_size = (2, 2, 2)))
classifier.add(Flatten())
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dense(units = 1, activation = 'sigmoid'))
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
train_data_dir = '/content/drive/My Drive/DTI_test/DATA/AD/train/'
test_data_dir = '/content/drive/My Drive/DTI_test/DATA/AD/test/'
nb_train_samples =123
nb_test_samples = 30
epochs = 10
batch_size = 5
classifier.fit_generator(train_data_dir,
steps_per_epoch = nb_train_samples,
epochs = epochs,
validation_data = test_data_dir,
validation_steps = nb_test_samples)
When I run the model, I got the error below.
ValueError: `validation_data` should be a tuple `(val_x, val_y, val_sample_weight)` or `(val_x, val_y)`. Found: /content/drive/My Drive/DTI_test/DATA/AD/test/
Any idea what is the error about?
Thanks in advance