activation is not a legal parameter - keras

I'm trying to optimize the hyperparameters of my LSTM network and using BayesSearch for tuning the parameters and I got this error: 'activation is not a legal parameter' and this is part of the error:
ValueError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_5664/3189938758.py in
def create_model1(neurons=10, activation='relu', recurrent_activation='relu',
kernel_initializer='uniform',
recurrent_initializer='Orthogonal', weight_constraint=0,
dropout_rate=0.0, recurrent_dropout=0.0,
learning_rate=0.001, rho=0.9, momentum=0.0):
model = Sequential()
model.add(LSTM(neurons, input_dim=(train_X.shape[1], train_X.shape[2]),
activation=activation,
recurrent_activation=recurrent_activation,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
kernel_constraint=max_norm(weight_constraint),
recurrent_dropout=recurrent_dropout))
model.add(Dropout(dropout_rate))
model.add(Dense(1, activation='sigmoid'))
optimizer = RMSprop(learning_rate=learning_rate, rho=rho, momentum=momentum,
epsilon=1e-07, centered=False)
model.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])
return model
seed = 7
np.random.seed(seed)
regressor1 = KerasRegressor(build_fn=create_model1, epochs=5, batch_size=400, verbose=0)
neurons = [5, 10]
batch_size = [ 400, 800]
epochs = [5, 10]
learning_rate = [0.001, 0.01]
rho = [0.01, 0.1]
momentum = [0.01, 0.1]
kernel_initializer = ['Orthogonal', 'uniform', 'lecun_uniform', 'normal', 'zero',
'glorot_normal', 'glorot_uniform',
'he_normal', 'he_uniform']
recurrent_initializer = ['Orthogonal', 'uniform', 'lecun_uniform', 'normal', 'zero',
'glorot_normal', 'glorot_uniform',
'he_normal','he_uniform']
activation= ['softmax', 'softplus', 'softsign', 'relu', 'tanh', 'sigmoid',
'hard_sigmoid', 'linear']
recurrent_activation = ['softmax', 'softplus', 'softsign', 'relu', 'tanh', 'sigmoid',
'hard_sigmoid', 'linear']
weight_constraint = [1, 2]
dropout_rate = [0.0, 0.1]
recurrent_dropout = [0.0, 0.1]
params = dict(neurons=neurons, batch_size=batch_size,
epochs=epochs,learning_rate=learning_rate, rho=rho, momentum=momentum,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer, activation=activation,
recurrent_activation=recurrent_activation, dropout_rate=dropout_rate,
recurrent_dropout=recurrent_dropout,
weight_constraint=weight_constraint)
Bayes = BayesSearchCV(estimator=regressor, search_spaces=params, scoring='r2',
n_jobs=-1, cv=5)
Bayes_result = Bayes.fit(train_X, train_y.ravel())

Related

Unable to find Accuracy and the ROC curve of my CNN model

My sample CNN code looks below:
classifier = Sequential()
#1st Conv layer
classifier.add(Convolution2D(64, (9, 9), input_shape=(64, 64, 3), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(4,4)))
#2nd Conv layer
classifier.add(Convolution2D(32, (3, 3), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2,2)))
classifier.add(Flatten())
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dropout(0.1))
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dropout(0.2))
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dense(units = 2, activation = 'softmax'))
classifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
training_set = train_datagen.flow_from_directory('D:/regionGrowing_MLT/png_orig_imgs/Training',
target_size = (64, 64),
batch_size = 32,
class_mode = 'categorical')
test_datagen = ImageDataGenerator(rescale = 1./255)
test_set = test_datagen.flow_from_directory('D:/regionGrowing_MLT/png_orig_imgs/Test',
target_size = (64, 64),
batch_size = 32,
class_mode = 'categorical'
)
probs=classifier.fit(x = training_set, validation_data = test_set, epochs = 50)
I tried the following line to find the ROC curve, but i get an error message:
predictions = classifier.predict(test_set)
fpr, tpr,threshold = roc_curve(test_set,predictions)
The following error message is displayed:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-52-2ea53b1ba7f1> in <module>
----> 1 fpr, tpr,threshold = roc_curve(test_set,predictions)
ValueError: Expected array-like (array or non-string sequence), got <keras.preprocessing.image.DirectoryIterator object at 0x000002D21D1B61C0>
Any suggestions would be appreciated.
Emm! From the error, I think you have to change keras.processing image object to array. Try this I think this will help you out.
Accuracy
fil_acc_orig = accuracy_score(y_test, predictions.to_array())
ROC Curve
fil_acc_orig = roc_curve(y_test, predictions.to_array())

InvalidArgumentError: indices[13,9] = 197491 is not in [0, 189722) [[node sequential_7

MODEL CNN
create a list of the target columns
target_cols = [y_toxic,y_severe_toxic,y_obscene,y_threat,y_insult,y_identity_hate]
preds = []
for col in target_cols:
print('\n')
# set the value of y
y = col
# create a stratified split
X_train, X_eval, y_train ,y_eval = train_test_split(X, y,test_size=0.25,shuffle=True,
random_state=5,stratify=y)
# cnn model
model = Sequential()
e = Embedding(189722, 100, weights=[embedding_matrix],
input_length=500, trainable=False)
model.add(e)
model.add(Conv1D(128, 3, activation='relu'))
model.add(MaxPooling1D(3))
model.add(Dropout(0.2))
model.add(Conv1D(64, 3, activation='relu'))
model.add(MaxPooling1D(3))
model.add(Dropout(0.2))
model.add(Conv1D(64, 3, activation='relu'))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
# compile the model
Adam_opt = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(optimizer=Adam_opt, loss='binary_crossentropy', metrics=['acc'])
early_stopping = EarlyStopping(monitor='val_loss', patience=5, mode='min')
save_best = ModelCheckpoint('toxic.hdf', save_best_only=True,
monitor='val_loss', mode='min')
history = model.fit(X_train, y_train, validation_data=(X_eval, y_eval),
epochs=100, verbose=1,callbacks=[early_stopping,save_best])
# make a prediction on y (target column)
model.load_weights(filepath = 'toxic.hdf')
predictions = model.predict(X_test)
y_preds = predictions[:,0]
# append the prediction to a python list
preds.append(y_preds)
let me know this why it getting alike.... I tried ot reshape it still the same error

Keras CNN model always return [0.5 0.5]

Can anyone help me with this issue? My model always return 1 class. The source code is below:
I want to classify images (binary). The model generated good accuracy. Now, I need to test the model which new images, I loaded the model and try to predict the class but it always return 0.
batch_size = 30
epochs = 50
IMG_HEIGHT = 224
IMG_WIDTH = 224
image_gen_train = ImageDataGenerator(
rotation_range=15,
width_shift_range=0.01,
height_shift_range=0.01,
rescale=1./255,
shear_range=0.1,
fill_mode='nearest',
validation_split=0.2)
train_data_gen = image_gen_train.flow_from_directory(batch_size=batch_size,
directory=dataset_dir,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
subset='training',
class_mode='binary') # set as training data
val_data_gen = image_gen_train.flow_from_directory(batch_size=batch_size,
directory=dataset_dir,
shuffle=False,
target_size=(IMG_HEIGHT, IMG_WIDTH),
subset='validation',
class_mode='binary') # set as validation data
sample_training_images, _ = next(train_data_gen)
# This function will plot images in the form of a grid with 1 row and 5 columns where images are placed in each column.
def plotImages(images_arr):
fig, axes = plt.subplots(1, 4, figsize=(20,20))
axes = axes.flatten()
for img, ax in zip( images_arr, axes):
ax.imshow(img)
ax.axis('off')
plt.tight_layout()
plt.savefig('xray_new.png')
plt.clf()
plotImages(sample_training_images[:4])
#the model
model = Sequential()
model.add(Conv2D(64, kernel_size= (3,3), input_shape=(IMG_HEIGHT, IMG_WIDTH, 3),padding='same'))
model.add(BatchNormalization(momentum=0.5, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(Conv2D(64, kernel_size=(3,3), padding='same'))
model.add(BatchNormalization(momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.35))
model.add(Conv2D(128, kernel_size =(3,3),padding='same'))
model.add(BatchNormalization(momentum=0.2, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization(momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(Conv2D(128,(3,3), padding='same' ))
model.add(BatchNormalization(momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.35))
model.add(Conv2D(256, kernel_size = (3,3), padding='same'))
model.add(BatchNormalization(momentum=0.2, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(Conv2D(256, kernel_size= (3,3) ,padding='same'))
model.add(BatchNormalization(momentum=0.1, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.35))
model.add(Flatten())
model.add(Dense(256))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
#model.summary()
model.save("model.h5")
history = model.fit_generator(
train_data_gen,
steps_per_epoch= train_data_gen.samples // batch_size,
epochs=epochs,
validation_data=val_data_gen,
validation_steps= val_data_gen.samples // batch_size,verbose=1)
But when I test the model, it always output 1 class:
filepath = 'model.h5'
model = load_model(filepath,compile=True)
def test(model,image_path):
test_image = image.load_img(image_path, target_size=(IMG_HEIGHT, IMG_WIDTH))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis=0)
# predict the result
prediction = model.predict(test_image)
print(prediction)
if prediction[0][0] == 1:
my = 'Normal'
else:
my = 'Asthma'
print(my)
prediction = np.argmax(prediction)
labels = (train_data_gen.class_indices)
labels = dict((v,k) for k,v in labels.items())
return labels[prediction]
I really appreciate your help!
I think you forgot to divide your input image by 255. in your test part.
After finding the prediction value don't check it against 1. model.predict() always returns a value between 0 and 1.
So you can change your if condition to the following:
if prediction[0][0] > 0.5: my = 'Normal'
else: my = 'asthama'
I think this should solve your problem.

Concatenate four CNN models in keras

I want to combine four CNN (pop1, pop2, pop3, and pop4) into one in Keras. My goal is to build a classifier able to assign an image to three possible outcomes. Here is how a concatenate the CNNs:
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Concatenate
from keras.models import Model
# Start With Pop1 Branch ############################################################
classifier_pop1 = Sequential()
classifier_pop1.add(Conv2D(1, (3,3), input_shape = (4009, 36, 1), activation = 'relu'))
classifier_pop1.add(MaxPooling2D(pool_size = (3,3)))
classifier_pop1.add(Conv2D(1, (3,3), activation = 'relu'))
classifier_pop1.add(MaxPooling2D(pool_size = (3,3)))
classifier_pop1.add(Flatten())
classifier_pop1.add(Dense(units = 300, activation = 'relu'))
classifier_pop1.add(Dense(units = 24, activation = 'relu'))
# Start With Pop2 Branch ############################################################
classifier_pop2 = Sequential()
classifier_pop2.add(Conv2D(1, (3,3), input_shape = (4009, 36, 1), activation = 'relu'))
classifier_pop2.add(MaxPooling2D(pool_size = (3,3)))
classifier_pop2.add(Conv2D(1, (3,3), activation = 'relu'))
classifier_pop2.add(MaxPooling2D(pool_size = (3,3)))
classifier_pop2.add(Flatten())
classifier_pop2.add(Dense(units = 300, activation = 'relu'))
classifier_pop2.add(Dense(units = 24, activation = 'relu'))
# Start With Pop3 Branch ############################################################
classifier_pop3 = Sequential()
classifier_pop3.add(Conv2D(1, (3,3), input_shape = (4009, 32, 1), activation = 'relu'))
classifier_pop3.add(MaxPooling2D(pool_size = (3,3)))
classifier_pop3.add(Conv2D(1, (3,3), activation = 'relu'))
classifier_pop3.add(MaxPooling2D(pool_size = (3,3)))
classifier_pop3.add(Flatten())
classifier_pop3.add(Dense(units = 300, activation = 'relu'))
classifier_pop3.add(Dense(units = 24, activation = 'relu'))
# Start With Pop4 Branch ############################################################
classifier_pop4 = Sequential()
classifier_ pop4.add(Conv2D(1, (3,3), input_shape = (4009, 18, 1), activation = 'relu'))
classifier_ pop4.add(MaxPooling2D(pool_size = (3,3)))
classifier_ pop4.add(Conv2D(1, (3,3), activation = 'relu'))
classifier_ pop4.add(MaxPooling2D(pool_size = (3,3)))
classifier_ pop4.add(Flatten())
classifier_ pop4.add(Dense(units = 300, activation = 'relu'))
classifier_ pop4.add(Dense(units = 24, activation = 'relu'))
# Making The Combinition ##########################################################
model_concat = Concatenate()([classifier_pop1.output,classifier_pop2.output,classifier_pop3.output,classifier_ pop4.output])
model_concat = Dense(3, activation='softmax')(model_concat)
model = Model(inputs=[classifier_pop1.input,classifier_pop2.input,classifier_pop3.input,classifier_ pop4.input], outputs=model_concat)
Model_plot
## Compiling the model
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
I suspect that the error in my code has to do with the below code. I have four folders (pop1, pop2, pop3, and pop4) that contain three subfolders inside (Model1, Model2, and Model3). I am not sure how to use imageGenerator in this part. I just followed this other post: Combining Two CNN's
##Image preprocessing
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set_pop1 = train_datagen.flow_from_directory('./Datasets/training_set/Pop1',
target_size = (4009, 36),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=True)
test_set_pop1 = test_datagen.flow_from_directory('./Datasets/test_set/Pop1',
target_size = (4009, 36),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=False)
training_set_pop2 = train_datagen.flow_from_directory('./Datasets/training_set/Pop2',
target_size = (4009, 36),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=True)
test_set_pop2 = test_datagen.flow_from_directory('./Datasets/test_set/Pop2',
target_size = (4009, 36),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=False)
training_set_pop3 = train_datagen.flow_from_directory('./Datasets/training_set/Pop3',
target_size = (4009, 32),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=True)
test_set_pop3 = test_datagen.flow_from_directory('./Datasets/test_set/Pop3',
target_size = (4009, 32),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=False)
training_set_pop4 = train_datagen.flow_from_directory('./Datasets/training_set/Planiceps',
target_size = (4009, 18),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=True)
test_set_pop4 = test_datagen.flow_from_directory('./Datasets/test_set/Planiceps',
target_size = (4009, 18),
batch_size = 100,
class_mode = 'categorical',
color_mode='grayscale',
shuffle=False)
model.fit([training_set_pop1,training_set_pop2,training_set_pop3,training_set_pop4],
steps_per_epoch = int(2400/100),
epochs = 4,
validation_data = [test_set_pop1,test_set_pop2,test_set_pop3,test_set_pop4],
validation_steps = int(600/100))
Then, I got this error:
ValueError: Failed to find data adapter that can handle input: (<class 'list'> containing values of types {"<class 'keras_preprocessing.image.directory_iterator.DirectoryIterator'>"}), <class 'NoneType'>
Maybe you could try the following, since you can't pass the generators as a list:
model.fit(zip(training_set_pop1,training_set_pop2,training_set_pop3,training_set_pop4),
steps_per_epoch = int(2400/100),
epochs = 4,
validation_data = zip(test_set_pop1,test_set_pop2,test_set_pop3,test_set_pop4),
validation_steps = int(600/100))

Error when checking input: expected conv1d_11_input to have shape (6700, 1) but got array with shape (1, 1)

Iam using 1dCNN for time series data but following error occur in model.fit line.
The error is as follows :
Error when checking input: expected conv1d_11_input to have shape (6700, 1)
but got array with shape (1, 1).
any one plz help
code portion is as below
dataframe = pd.read_excel("file path", header=None,delim_whitespace=True)
dataset = dataframe.values
X=dataframe.values[:,0]
Y=dataframe.values[:,2]
X = np.expand_dims(X, axis=1)
Y = np.expand_dims(Y, axis=1)
(X_train, X_test, Y_train, Y_test) = train_test_split(X, Y, test_size=0.33, random_state=seed)
X_train = np.reshape(X_train, (-1, X_train.shape[1],1))
Y_train = np.reshape(Y_train, (Y_train.shape[0], 1, Y_train.shape[1]))
X_test = np.reshape(X_test, (X_test.shape[0], 1, X_test.shape[1]))
print(X_train.shape)
print(Y_train.shape)
n_timesteps, n_features, n_outputs = X_train.shape[0], X_train.shape[1], Y_train.shape[1]
verbose, epochs, batch_size = 0, 100, 32
model = Sequential()
model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(n_timesteps,1)))
model.add(Conv1D(filters=64, kernel_size=3, activation='relu'))
model.add(Dropout(0.5))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dense(n_outputs, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size, verbose=verbose)'
Use:
n_timesteps, n_outputs = X_train.shape[1], Y_train.shape[1]

Resources