Replicate MLPClassifier() of sklearn in keras - scikit-learn

I am new to keras. I was attempting an ML problem.
About the data:
It has 5 input features, 4 output classes and about 26000 records.
I had first attempted it using MLPClassifier() as follows:
clf = MLPClassifier(verbose=True, tol=1e-6, batch_size=300, hidden_layer_sizes=(200,100,100,100), max_iter=500, learning_rate_init= 0.095, solver='sgd', learning_rate='adaptive', alpha = 0.002)
clf.fit(train, y_train)
After testing, I usually got a LB score around 99.90. To gain more flexibility over the model, I decided to implement the same model in Keras to start with and then make changes in it in an attempt to increase the LB score. I came up with the following:
model = Sequential()
model.add(Dense(200, input_dim=5, init='uniform', activation = 'relu'))
model.add(Dense(100, init='uniform', activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(100, init='uniform', activation='relu'))
model.add(Dense(100, init='uniform', activation='relu'))
model.add(Dense(4, init='uniform', activation='softmax'))
lrate = 0.095
decay = lrate/125
sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
hist = model.fit(train, categorical_labels, nb_epoch=125, batch_size=256, shuffle=True, verbose=2)
The model seems pretty similar to the MLPClassifier() model but the LB scores were pretty disappointing at around 97.
Can somebody please tell what exactly was wrong with this model? Or how can we replicate the MLPClassifier model in keras. I think regularisation might be one of the factors that went wrong here.
Edit 1: Loss curve:
Edit 2:
Here is the code:
#import libraries
import pandas as pd
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import log_loss
from sklearn.preprocessing import MinMaxScaler, scale, StandardScaler, Normalizer
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras import regularizers
from keras.optimizers import SGD
#load data
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
#generic preprocessing
#encode as integer
mapping = {'Front':0, 'Right':1, 'Left':2, 'Rear':3}
train = train.replace({'DetectedCamera':mapping})
test = test.replace({'DetectedCamera':mapping})
#renaming column
train.rename(columns = {'SignFacing (Target)': 'Target'}, inplace=True)
mapping = {'Front':0, 'Left':1, 'Rear':2, 'Right':3}
train = train.replace({'Target':mapping})
#split data
y_train = train['Target']
test_id = test['Id']
train.drop(['Target','Id'], inplace=True, axis=1)
test.drop('Id',inplace=True,axis=1)
train_train, train_test, y_train_train, y_train_test = train_test_split(train, y_train)
scaler = StandardScaler()
scaler.fit(train_train)
train_train = scaler.transform(train_train)
train_test = scaler.transform(train_test)
test = scaler.transform(test)
#training and modelling
model = Sequential()
model.add(Dense(200, input_dim=5, kernel_initializer='uniform', activation = 'relu'))
model.add(Dense(100, kernel_initializer='uniform', activation='relu'))
# model.add(Dropout(0.2))
# model.add(Dense(100, init='uniform', activation='relu'))
# model.add(Dense(100, init='uniform', activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(100, kernel_initializer='uniform', activation='relu'))
model.add(Dense(100, kernel_initializer='uniform', activation='relu'))
model.add(Dense(4, kernel_initializer='uniform', activation='softmax'))
lrate = 0.095
decay = lrate/250
sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
hist = model.fit(train_train, categorical_labels, validation_data=(train_test, categorical_labels_test), nb_epoch=100, batch_size=256, shuffle=True, verbose=2)
Edit 3: These are the files:
train.csv
test.csv

To get a bona fide scikit estimator you can use KerasClassifier from tensorflow.keras.wrappers.scikit_learn. For example:
from sklearn.datasets import make_classification
from tensorflow import keras
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
X, y = make_classification(
n_samples=26000, n_features=5, n_classes=4, n_informative=3, random_state=0
)
def build_fn(optimizer):
model = Sequential()
model.add(
Dense(200, input_dim=5, kernel_initializer="he_normal", activation="relu")
)
model.add(Dense(100, kernel_initializer="he_normal", activation="relu"))
model.add(Dense(100, kernel_initializer="he_normal", activation="relu"))
model.add(Dense(100, kernel_initializer="he_normal", activation="relu"))
model.add(Dense(4, kernel_initializer="he_normal", activation="softmax"))
model.compile(
loss="categorical_crossentropy",
optimizer=optimizer,
metrics=[
keras.metrics.Precision(name="precision"),
keras.metrics.Recall(name="recall"),
keras.metrics.AUC(name="auc"),
],
)
return model
clf = KerasClassifier(build_fn, optimizer="rmsprop", epochs=500, batch_size=300)
clf.fit(X, y)
clf.predict(X)

Related

Why is there no improvement in a categorical data time series model?

I built a simple categorical time series model to predict the next number of a random sequence, but the accuracy hardly moved even I trained it for 10000 epochs. The validation loss started to take off after a few hundred epochs. Could anyone make suggestions for improvement? Here's the model:
import os
import sys
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM
DEVICE = 'CPU'
if DEVICE == 'CPU':
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
else:
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
print(tf.test.gpu_device_name())
TOTAL_CATALOG=4
POSSIBLE_OUTCOME_COL=4
LOOK_BACK_WINDOW=1
TRAINING_DATA_RATIO=0.8
TRAINING_EPOCHS=10000
sys.path.insert(0, '/DataScience/MyModules')
from m6data import getDrawData, series_to_supervised, Split_data, get_all_categories
def get_all_categories_local(last_combination):
all_category = np.arange(1, last_combination+1)
return all_category.reshape(1,all_category.shape[0])
All_categories=get_all_categories_local(TOTAL_CATALOG)
data_sequence = [1,1,2,4,2,3,1,2,3,3,4,1,2,3,4,2,2,3,1,3]
raw_df = pd.DataFrame(data_sequence, columns=['NE'])
values = raw_df.values
# 05-Apr-2022: One-Hot Encoding
oh_encoder = OneHotEncoder(categories=All_categories, sparse=False)
encoded_input = oh_encoder.fit_transform(values)
FEATURES = encoded_input.shape[1]
POSSIBLE_OUTCOME_COL = FEATURES
draw_reframe = series_to_supervised(encoded_input, LOOK_BACK_WINDOW,1)
train, test = Split_data(draw_reframe, TRAINING_DATA_RATIO)
# Total input = all possible One-Hot Encoding outcome * number of look-back samples.
ALL_INPUT = POSSIBLE_OUTCOME_COL * LOOK_BACK_WINDOW
# split into input and outputs
train_X, train_y = train.iloc[:,:ALL_INPUT], train.iloc[:,ALL_INPUT:]
test_X, test_y = test.iloc[:,:ALL_INPUT], test.iloc[:,ALL_INPUT:]
train_X = train_X.values.reshape((train_X.shape[0], LOOK_BACK_WINDOW , FEATURES))
test_X = test_X.values.reshape((test_X.shape[0], LOOK_BACK_WINDOW, FEATURES))
print(train_X.shape, train_y.shape)
print(test_X.shape, test_y.shape)
def create_model():
model = Sequential()
model.add(LSTM(10,
return_sequences=False,
input_shape=(train_X.shape[1], train_X.shape[2]),
activation='relu'
)
)
#model.add(LSTM(20))
model.add(Dense(units=train_y.shape[1], activation='softmax'))
model.compile(optimizer = tf.keras.optimizers.Adam(learning_rate=0.00005),
loss = 'categorical_crossentropy',
metrics=['accuracy'])
return model
model=create_model()
history = model.fit(
train_X, train_y,
epochs=TRAINING_EPOCHS,
batch_size=8,
validation_data=(test_X, test_y),
verbose=1,
)
Here are the plots of accuracies and losses (red=training, blue=validation).
Accuracies
Losses
Thank you in advance for any suggestions.
Update (13-Jun-2022)
I changed my model to the following
def create_model():
model = Sequential()
model.add(LSTM(50,
return_sequences=True,
input_shape=(train_X.shape[1], train_X.shape[2]),
activation='relu'
)
)
model.add(LSTM(units=1000, kernel_regularizer=regularizers.l1(0.05), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=1000, kernel_regularizer=regularizers.l1(0.05), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=1000, kernel_regularizer=regularizers.l1(0.05), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=1000, kernel_regularizer=regularizers.l1(0.05), activation='relu'))
model.add(Dropout(0.3))
model.add(BatchNormalization())
model.add(Dense(1000))
model.add(Dense(units=train_y.shape[1], activation='softmax'))
model.compile(optimizer = tf.keras.optimizers.SGD(learning_rate=1e-2, nesterov=True),
#tf.keras.optimizers.Adam(learning_rate=0.001),
loss = 'categorical_crossentropy',
metrics=['accuracy'])
return model
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=20,min_lr=1e-10)
early_stop = EarlyStopping(monitor='loss', patience=100)
history = model.fit(
train_X, train_y,
epochs=TRAINING_EPOCHS,
batch_size=16,
validation_split=0.1,
validation_data=(test_X, test_y),
verbose=1,
shuffle=False,
callbacks=([reduce_lr], [early_stop])
Accuracy was bouncing around and Val_accuracy was zero all the way. The loss and val_loss were almost the same and dropping together.
Can anyone advise what I can do in this scenario?

Get confusion matrix from a Keras model

I have the following NN model using Keras:
import numpy as np
from keras import Sequential
from keras.layers import Dense
path = 'pima-indians-diabetes.data.csv'
dataset = np.loadtxt(path, delimiter=",")
X = dataset[:,0:8]
Y = dataset[:,8]
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)
model = Sequential()
model.add(Dense(16, input_dim=8, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, y_train, epochs=100, batch_size=16, validation_data=(X_test, y_test))
Kindly, is it possible to extract the confusion matrix? How?
You can use scikit-learn:
y_pred = model.predict(X_test)
confusion_matrix = sklearn.metrics.confusion_matrix(y_test, np.rint(y_pred))
It can be done using TensorFlow (which is almost Keras =)).
You start by making predictions on your test set with your trained model:
predictions = model.predict(x_test)
Then you can import TensorFlow and use its confusion_matrix method as follows.
import tensorflow as tf
conf_matrix = tf.math.confusion_matrix(labels=y_test,
predictions=predictions)
More information in the TensorFlow documentation.

cross_val_score's n_jobs = -1 argument not working in python 3.6

i was trying to improve the accuracy and evaluate my Artificial Neural Network, but i encountered an issue that n_jobs = -1 of cross_val_score was not working,
i am using tensorflow on my cpu and my error was:-
BrokenProcessPool: A task has failed to un-serialize. Please ensure
that the arguments of the function are all picklable.
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
def build_classifier():
classifier = Sequential()
classifier.add(Dense(6, kernel_initializer='uniform', activation='relu', input_dim=11))
classifier.add(Dense(6, kernel_initializer='uniform', activation='relu'))
classifier.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))
classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
return classifier
classifier = KerasClassifier(build_fn = build_classifier, batch_size = 10, nb_epoch = 100)
accuracies = cross_val_score(estimator= classifier, X= x_train, y= y_train, cv = 10, n_jobs= 1)

ValueError: ('Some keys in session_kwargs are not supported at this time: %s', dict_keys(['class_mode'])

from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.datasets import mnist
import numpy
model = Sequential()
model.add(Dense(500,input_shape=(784,))) # 28*28=784
model.add(Activation('tanh')) # tanh
model.add(Dropout(0.5)) # 50% dropout
model.add(Dense(500)) # 500个
model.add(Activation('tanh'))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, class_mode='categorical')
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1] * X_train.shape[2])
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1] * X_test.shape[2])
Y_train = (numpy.arange(10) == y_train[:, None]).astype(int)
Y_test = (numpy.arange(10) == y_test[:, None]).astype(int)
model.fit(X_train,Y_train,batch_size=200,epochs=50,shuffle=True,verbose=0,validation_split=0.3)
model.evaluate(X_test, Y_test, batch_size=200, verbose=0)
print("test set")
scores = model.evaluate(X_test,Y_test,batch_size=200,verbose=0)
print("")
print("The test loss is %f" % scores)
result = model.predict(X_test,batch_size=200,verbose=0)
I found this post Error when profiling keras models, which modifies the tensorflow library.
So, I checked Keras library code from the link. But could not find anything like ['class_mode'] to modify the keras library. Next, I tried running the code after re-installing keras, but even that didn't work.
I used anaconda to import Kreas, maybe I install wrong?
Can anyone suggest a solution for this?
remove class_mode='categorical',it runs

how to get predicted classes when using ImageDataGenerator

I am making a CNN model for image classification( i have two classes). I am using ImageDataGenerator for data preparation and model.fit_generator for training. for testing i am using model.evaluate_generator. For confusion matrix i am using sklearn.metrics.confusion_matrix, that requires actual and predicted classes. I have actual classes of my test data.For prediction i am using model.predict_generator but i don't know how to get predicted classes. generally i use model.predict_classes but it not works with validation_generator. My code looks like following:
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
from sklearn.metrics import confusion_matrix
model = Sequential()
model.add(Conv2D(32, (2, 2), input_shape=(50,50,1),activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(32, (2, 2),activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(64, (2, 2),activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(64,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
batch_size = 10
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train_data',
target_size=(50, 50),
batch_size=batch_size,
class_mode='binary',color_mode='grayscale')
validation_generator = test_datagen.flow_from_directory( 'data/test_data',target_size=(50, 50),
batch_size=batch_size,class_mode='binary',color_mode='grayscale')
model.fit_generator(train_generator,steps_per_epoch=542 ,epochs=10)
print(model.evaluate_generator(validation_generator))
and i calculate confusion matrix and othe parameter with following code with continuation to above code, but i think it is wrong, because validation accuracy calculated with TP TN formula is not matched calculated with model.evaluate_generator:
predict1_data=model.predict_generator(validation_generator)
predict_data=np.round(predict1_data)
print(train_generator.class_indices)
print(validation_generator.class_indices)
actual1=np.zeros(21)
actual1[13:21]=1
actual=np.float32(actual1)
cm = confusion_matrix(actual,predict_data)
TN=cm[0,0]
FP=cm[0,1]
FN=cm[1,0]
TP=cm[1,1]
SEN=TP/(TP+FN);print('SEN=',SEN)
SPE=TN/(TN+FP);print('SPE=',SPE)
ACC=(TP+TN)/(TP+TN+FP+FN);print('ACC=',ACC)
I'm trying to figure out the same thing. The closest I came is:
test_datagen = ImageDataGenerator(rescale=1. / 255)
# preprocess data for testing (resize) and create batches
validation_generator = test_datagen.flow_from_directory(
'data/test/',
target_size=(img_width, img_height),
batch_size=16,
class_mode=None,
shuffle=False,
)
print(validation_generator.class_indices)
print (model.predict_generator(validation_generator))
The probability that this outputs is for class 1 (not for class 0).

Resources