How to define a MLPR with two hidden layers for RandomSearchCV - scikit-learn

I am trying to figure out how to define the paramerer grid for the a MLPR with two hidden layers for input into RandomSearchCV in SkLearn?
Below is what I have been trialing. So, how can I randomise the hidden_layer_sizes for RandomSearchCV?
import numpy as np
import pandas as pd
from sklearn.neural_network import MLPRegressor
from sklearn.datasets import load_boston
from sklearn.model_selection import RandomizedSearchCV
boston = load_boston()
X = boston.data
y = boston.target
params = {'activation':['logistic', 'relu'],
'learning_rate':['adaptive'],
'alpha':np.logspace(0.0001, 100, 10),
'max_iter':[1000],
'hidden_layer_sizes':[(10,10), (30,10), (50,20), (60,30)]}
reg = MLPRegressor()
random_search = RandomizedSearchCV(estimator = reg,
param_distributions=params,
n_iter=10,
scoring = 'neg_mean_squared_error',
cv=3,
n_jobs = -3,
pre_dispatch = '2*n_jobs',
return_train_score = True)
random_search.fit(X,y)
df = pd.DataFrame(random_search.cv_results_)
df['train_RMSE'] = np.sqrt(-df['mean_train_score'])
df['test_RMSE'] = np.sqrt(-df['mean_test_score'])
print(random_search.best_params_)
PS: If anyone also has any comments on my selection of parameters then please feel free to comment. The parameters are to be used for a regression problem with up to 7 inputs.
Any ideas?

Yes, you did it right. In addition you can set the verbose level to see the used hyper parameters of the last cross validation, e.g. [CV] activation=tanh, alpha=1e+100, hidden_layer_sizes=(30, 10), score=-4.180054117738231, total= 2.7s.
I chose a GridSearchCV instead of a RandomizedSearchCV to find the best parameter set and on my machine it took five minutes.
import numpy as np
from sklearn.neural_network import MLPRegressor
from sklearn.datasets import load_boston
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.metrics import explained_variance_score
X, y = load_boston(return_X_y=True)
# Split data for final evaluation:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle=True, random_state=42)
# Define base regressor:
base_reg = MLPRegressor(learning_rate='adaptive', max_iter=5000, random_state=42)
# Define search space:
params = {
'activation': ['logistic', 'relu', 'tanh'], # <-- added 'tanh' as third non-linear activation function
'alpha': np.logspace(0.0001, 100, 10),
'hidden_layer_sizes': [
(10, 10), (20, 10), (30, 10),
(40, 10), (90, 10), (90, 30, 10) # <-- added more neurons or layers
]
}
# Find best hyper params and then refit on all training data:
reg = GridSearchCV(estimator=base_reg, param_grid=params,
n_jobs=8, cv=3, refit=True, verbose=5) # <-- verbose=5
reg.fit(X_train, y_train)
print(reg.best_estimator_)
# MLPRegressor(activation='logistic', alpha=1.0002302850208247,
# batch_size='auto', beta_1=0.9, beta_2=0.999, early_stopping=False,
# epsilon=1e-08, hidden_layer_sizes=(30, 10),
# learning_rate='adaptive', learning_rate_init=0.001, max_iter=5000,
# momentum=0.9, n_iter_no_change=10, nesterovs_momentum=True,
# power_t=0.5, random_state=42, shuffle=True, solver='adam',
# tol=0.0001, validation_fraction=0.1, verbose=False,
# warm_start=False)
print(reg.best_params_)
# {'activation': 'logistic', 'alpha': 1.0002302850208247, 'hidden_layer_sizes': (30, 10)}
# Evaluate on unseen test data:
err = explained_variance_score(y_train, reg.predict(X_train))
print(err) # 0.8936815412058757
err = explained_variance_score(y_test, reg.predict(X_test))
print(err) # 0.801353064635174

Related

SVR hyperparameter selection and visualisation

I am just a beginner in data analysis. I want to use 'Cross-validation Grid Search method" to determine the parameters gamma and C of the Radial Basis Function (RBF) kernel SVM. I don't know where I should put my data on this code, and what data type I should use (training or target data)?
For SVR
import numpy as np
import pandas as pd
from math import sqrt
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostRegressor
from sklearn.metrics import mean_squared_error,explained_variance_score
from TwoStageTrAdaBoostR2 import TwoStageTrAdaBoostR2 # import the two-stage algorithm
from sklearn import preprocessing
from sklearn import svm
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from matplotlib.colors import Normalize
from sklearn.svm import SVC
# Data import (source)
source= pd.read_csv(sourcedata)
# Data import (target)
data= pd.read_csv(targetdata)
# Sample Size
datatrain = data.sample(n=60, random_state=1)
datatest = data[~dataL.index.isin(data.index)]
# Merge training set data (source and target)
train = pd.concat([source, datatrain], sort=False)
train.reset_index(inplace=True, drop=True)
datatest.reset_index(inplace=True, drop=True)
# Variable input
X_train, y_train = train[['x1', 'x2']].values, train['y'].values
X_test, y_test = FL[['x1', 'x2']].values, FL['y'].values
# Parameter setting
#sample_size = [n_source1+n_source2+n_source3+n_source4+n_source5, n_target_train]
n_estimators = 100
steps = 8
fold = 5
random_state = np.random.RandomState(1)
sample_size = [350, 60]
#1 twostage tradaboost.r2
regr_1 = TwoStageTrAdaBoostR2(SVR(C=50, gamma='auto'),
n_estimators = n_estimators, sample_size = sample_size,
steps = steps, fold = fold,
random_state = random_state)
regr_1.fit(X_train, y_train)
y_pred1 = regr_1.predict(X_test)
print("MSE of regular two stage trAdaboostR2--model1:",sqrt(mean_squared_error(y_test, y_pred1)))
#Plot the results
plt.figure()
plt.scatter(y_test, y_test-y_pred1, c="black", label="TwoStageTrAdaBoostR2_model1", s=10)
plt.xlabel("CAR")
plt.ylabel("Err")
plt.title("Two-stage Transfer Learning Boosted Decision Tree Regression", loc='left', fontsize=12, fontweight=0, color="orange")
plt.legend()
plt.show()
for cross-validation grid search methods(best parameters):
# Cross validation grid search (best parameters)
parameter_candidates = [
{'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['linear']},
{'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']},
]
svr = svm.SVC()
clf = grid_search.GridSearchCV(svr, parameters, c=5 ,n_jobs=-1)
clf.fit(X_train, y_train)
print('Best score for data:', clf.best_score_)
print('Best C:',clf.best_estimator_.C)
print('Best Kernel:',clf.best_estimator_.kernel)
print('Best Gamma:',clf.best_estimator_.gamma)
For visualization of parameter effects
c_range = np.logspace(-2, 2, 4)
gamma_range = np.logspace(-2, 2, 5)
tuned_parameters = [{'kernel': ['rbf'],'C': c_range,'gamma':gamma_range},
{'kernel': ['linear'], 'C': c_range,'gamma':gamma_range}]
svr = svm.SVR()
clf = GridSearchCV(svr,param_grid=tuned_parameters,verbose=2,n_jobs=-1,
scoring='explained_variance')
clf.fit(X_train, y_train)
print('Best score for data:', clf.best_score_)
print('Best C:',clf.best_estimator_.C)
print('Best Kernel:',clf.best_estimator_.kernel)
print('Best Gamma:',clf.best_estimator_.gamma)
# scores for rbf kernel
n = len(gamma_range)*len(c_range)
scores_rbf = clf.cv_results_['mean_test_score'][:n].reshape(len(gamma_range),
len(c_range))
# scores for rbf kernel
scores_linear = clf.cv_results_['mean_test_score'][n:].reshape(len(gamma_range),
len(c_range))
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores_rbf, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(c_range)), c_range)
plt.title('Validation accuracy')
plt.show()
When I used this code, I found the following output Heatmap plot!
But I am trying to get a Heatmap like this one
The following code with some typical regression data should work all the way through:
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import GridSearchCV,train_test_split
from matplotlib.colors import Normalize
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
X, y = datasets.load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X,y)
# Cross validation grid search (best parameters)
c_range = np.logspace(-0, 4, 8)
gamma_range = np.logspace(-4, 0, 8)
tuned_parameters = [{'kernel': ['rbf'],'C': c_range,'gamma':gamma_range},
{'kernel': ['linear'], 'C': c_range,'gamma':gamma_range}]
svr = svm.SVR()
clf = GridSearchCV(svr,param_grid=tuned_parameters,verbose=20,n_jobs=-4,cv=4,
scoring='explained_variance')
clf.fit(X_train, y_train)
print('Best score for data:', clf.best_score_)
print('Best C:',clf.best_estimator_.C)
print('Best Kernel:',clf.best_estimator_.kernel)
print('Best Gamma:',clf.best_estimator_.gamma)
# scores for rbf kernel
n = len(gamma_range)*len(c_range)
scores_rbf = clf.cv_results_['mean_test_score'][:n].reshape(len(gamma_range),
len(c_range))
# scores for rbf kernel
scores_linear = clf.cv_results_['mean_test_score'][n:].reshape(len(gamma_range),
len(c_range))
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores_rbf, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=-.2, midpoint=0.5))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)),
[np.format_float_scientific(i,1) for i in gamma_range],rotation=45)
plt.yticks(np.arange(len(c_range)),
[np.format_float_scientific(i,) for i in c_range])
plt.title('Validation accuracy')
plt.show()
The granularity of the grid is very low but it takes some time run otherwise. Also the limits of the grid will need to be more educated that the ones I chose.
I'm not sure why you get the error you get but I kept things simple and initiated the SVR once in my snippet so you can see how it works. I've also used different lengths for the C and gamma arrays that's just to show how these parameters are carried through. Sometimes I find that if everything has the same length is difficult to see which parameter is responsible for what.
The final plot looks like that but this depends heavily on the range of the grid, its granularity and the dataset that you are working with. Also note that I change the parameters of the MidpointNormalize class you provided.

Make new predictions with a trained ANN - Keras

I'm new to the world of ANN and I was wondering, how do I pass a new dataset, possible a new csv, into the model that I alredy trained? I understand, for instance, that:
model.predict()
only accept arrays, and those arrays have to be of the same shape in order to work. So, how do I pass a complete new csv to generate predictions?
Here is my code, I know that probably it's gonna be a mess, but I'm working on it.
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Prova_1.csv')
dataset = dataset[np.isfinite(dataset['ID'])]
X = dataset.iloc[:, 3:6].values
y = dataset.iloc[:, 6].values
# Encoding
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X_1 = LabelEncoder()
X[:, 0] = labelencoder_X_1.fit_transform(X[:, 0])
labelencoder_X_2 = LabelEncoder()
X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])
onehotencoder = OneHotEncoder(categorical_features = [2])
X = onehotencoder.fit_transform(X).toarray()
X = X[:, 1:]
# Splitting
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
#ANN
import keras
from keras.models import Sequential
from keras.layers import Dense
# Initialising the ANN
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(units = 27, kernel_initializer = 'uniform', activation = 'relu', input_dim = 55))
# Adding the second hidden layer
classifier.add(Dense(units = 27, kernel_initializer = 'uniform', activation = 'relu'))
# Adding the output layer
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training set
classifier.fit(X_train, y_train, batch_size = 1, epochs = 100)
# Prediction
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
You need to do all the preprocessing operations to train data for the new data. Because the model trained over your input shape and input features. Make sure your input shape of your input data same as before, unless it will not work.

keras gridSearchCV on sklearn One hot Encoded Data

The problem with this code is that I am giving classifier,
One hot encoded data:
Means:
X-train, X-test, y_train, y_test is one hot encoded.
But the classifier is predicting the output:
y_pred_test, y_pred_train in Numerical form
(which I think is incorrect as well). Can anyone help with this?
This is a dummy example so no concern over low accuracy but just to know why it's predicting the output in not One Hot encoded form.
Thanks !
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
x=pd.DataFrame()
x['names']= np.arange(1,10)
x['Age'] = np.arange(1,10)
y=pd.DataFrame()
y['target'] = np.arange(1,10)
from sklearn.preprocessing import OneHotEncoder, Normalizer
ohX= OneHotEncoder()
x_enc = ohX.fit_transform(x).toarray()
ohY = OneHotEncoder()
y_enc = ohY.fit_transform(y).toarray()
print (x_enc)
print("____")
print (y_enc)
import keras
from keras import regularizers
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.models import load_model
from keras.layers.advanced_activations import LeakyReLU
marker="-------"
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
def create_model(learn_rate=0.001):
model = Sequential()
model.add(Dense(units = 15, input_dim =18,kernel_initializer= 'normal', activation="tanh"))
model.add(Dense(units=9, activation = "softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=['accuracy'])
return model
if __name__=="__main__":
X_train, X_test, y_train, y_test = train_test_split(x_enc, y_enc, test_size=0.33, random_state=42)
print ("\n\n",marker*5," Classification\nX_train shape is: ",X_train.shape,"\tX_test shape is:",X_test.shape)
print ("\ny_train shape is: ",y_train.shape,"\t y_test shape is:",y_test.shape,"\n\n")
norm = Normalizer()
#model
X_train = norm.fit_transform(X_train)
X_test = norm.transform(X_test)
earlyStopping=keras.callbacks.EarlyStopping(monitor='val_loss', patience=0, verbose=0, mode='auto')
model = KerasClassifier(build_fn=create_model, verbose=0)
fit_params={'callbacks': [earlyStopping]}
#grid
# batch_size =[50,100,200, 300,400]
epochs = [2,5]
learn_rate=[0.1,0.001]
param_grid = dict( epochs = epochs, learn_rate = learn_rate)
grid = GridSearchCV(estimator = model, param_grid = param_grid, n_jobs=1)
#Predicting
print (np.shape(X_train), np.shape(y_train))
y_train = np.reshape(y_train, (-1,np.shape(y_train)[1]))
print ("y_train shape after reshaping", np.shape(y_train))
grid_result = grid.fit(X_train, y_train, callbacks=[earlyStopping])
print ("grid score using params: ", grid_result.best_score_, " ",grid_result.best_params_)
#scores
print("SCORES")
print (grid_result.score(X_test,y_test))
# summarize results
#print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
#means = grid_result.cv_results_['mean_test_score']
#stds = grid_result.cv_results_['std_test_score']
#params = grid_result.cv_results_['params']
#for mean, stdev, param in zip(means, stds, params):
# print("%f (%f) with: %r" % (mean, stdev, param))
print("\n\n")
print("y_test is",y_test)
y_hat_test = grid.predict(X_test)
y_hat_train = grid.predict(X_train)
print("y_hat_test is ", y_hat_test)

Keras Conv1D for Time Series

I am just a novice in area of deep learning.
I made my first basic attempt with Keras Conv1D. Not sure what I did and whether I did it right. My input data is simply total sales by every week (total of 313 weeks), for stores across US and with a time step of 1.
Here is my code:
from pandas import read_csv
import matplotlib.pyplot as plt
import numpy
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
def create_dataset(dataset, look_back=1):
    dataX, dataY = [], []
    for i in range(len(dataset)-look_back):
        a = dataset[i:(i+look_back), 0]
        dataX.append(a)
        dataY.append(dataset[i + look_back, 0])
    return numpy.array(dataX), numpy.array(dataY)
seed = 7
numpy.random.seed(seed)
dataframe = read_csv('D:/MIS793/Dataset/Academic Dataset External 2/Python scripts/totalsale _byweek.csv', usecols=[1], engine='python')
plt.plot(dataframe)
plt.show()
dataset = dataframe.values
dataset = dataset.astype('float32')
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
# reshape into X=t and Y=t+1
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
trainX = trainX.reshape(trainX.shape[0], trainX.shape[1], 1).astype('float32')
testX = testX.reshape(testX.shape[0], testX.shape[1], 1).astype('float32')
model = Sequential()
model.add(Conv1D(filters=10, kernel_size=1, padding='same', strides=1, activation='relu',input_shape=(1,1)))
model.add(MaxPooling1D(pool_size=1))
model.add(Flatten())
model.add(Dense(250, activation='relu'))
model.add(Dense(1, activation='linear'))
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
print(model.summary())
model.fit(trainX, trainY, validation_data=(testX, testY), epochs=10, batch_size=100)
scores = model.evaluate(testX, testY, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
Not sure about few things here:
Reshaping of trainX and testX.
Value of kernel_size and input_shape
My idea here is it's just one vector of sales value. 10 filters, each of size 1 move from one value to another. Input shape is of the format time step, dimensions.
I only got accuracy of 10.91%! So my first question is whether I am feeding in the right parameters.
Thanks
ASC
With model.metrics_names you can get the labels of your scores variable.
In your case it will be ['loss', 'mean_absolute_error'].
So what you are printing is not the accuracy, but the mae, multiplied by 100.
I tried using accuracy instead of mae. However I got accuracy as 0%. Just wondering as this was about predicting numerical values, should I really use accuracy? Here is my latest code.
from pandas import read_csv
import matplotlib.pyplot as plt
import numpy
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
dataframe = read_csv('D:/MIS793/Dataset/Academic Dataset External 2/Python scripts/totalsale _byweek.csv', usecols=[1], engine='python')
plt.plot(dataframe)
plt.show()
dataset = dataframe.values
dataset = dataset.astype('float32')
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
# reshape into X=t and Y=t+1
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
trainX = trainX.reshape(trainX.shape[0], trainX.shape[1],1).astype('float32')
testX = testX.reshape(testX.shape[0], testX.shape[1],1).astype('float32')
model = Sequential()
model.add(Conv1D(filters=20, kernel_size=1, padding='same', strides=1, activation='relu',input_shape=(1,1)))
model.add(MaxPooling1D(pool_size=1))
model.add(Conv1D(filters=10, kernel_size=1, padding='same', strides=1, activation='relu'))
model.add(MaxPooling1D(pool_size=1))
model.add(Flatten())
model.add(Dense(4, activation='relu'))
model.add(Dense(1, activation='linear'))
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
print(model.summary())
model.fit(trainX, trainY, validation_data=(testX, testY), epochs=10, batch_size=100)
scores = model.evaluate(testX, testY, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
OR should I go with MAE?
If I go with MAE, my scores will look like below:
[0.12740663779013364, 0.31208728355111426]
First one is loss and second one is MAE. Isn't that a better metrics in this case?
The final line will be like this:
print("MAE: %.2f%%" % (scores[1]))
Thanks
Anindya

Try multiple estimator in one grid-search

Is there a way we can grid-search multiple estimators at a time in Sklearn or any other library. For example can we pass SVM and Random Forest in one grid search ?.
Yes. Example:
pipeline = Pipeline([
('vect', CountVectorizer()),
('clf', SGDClassifier()),
])
parameters = [
{
'vect__max_df': (0.5, 0.75, 1.0),
'clf': (SGDClassifier(),),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
'clf__n_iter': (10, 50, 80),
}, {
'vect__max_df': (0.5, 0.75, 1.0),
'clf': (LinearSVC(),),
'clf__C': (0.01, 0.5, 1.0)
}
]
grid_search = GridSearchCV(pipeline, parameters)
from sklearn.base import BaseEstimator
from sklearn.model_selection import GridSearchCV
class DummyEstimator(BaseEstimator):
def fit(self): pass
def score(self): pass
# Create a pipeline
pipe = Pipeline([('clf', DummyEstimator())]) # Placeholder Estimator
# Candidate learning algorithms and their hyperparameters
search_space = [{'clf': [LogisticRegression()], # Actual Estimator
'clf__penalty': ['l1', 'l2'],
'clf__C': np.logspace(0, 4, 10)},
{'clf': [DecisionTreeClassifier()], # Actual Estimator
'clf__criterion': ['gini', 'entropy']}]
# Create grid search
gs = GridSearchCV(pipe, search_space)
I think what you were looking for is this:
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
names = [
"Naive Bayes",
"Linear SVM",
"Logistic Regression",
"Random Forest",
"Multilayer Perceptron"
]
classifiers = [
MultinomialNB(),
LinearSVC(),
LogisticRegression(),
RandomForestClassifier(),
MLPClassifier()
]
parameters = [
{'vect__ngram_range': [(1, 1), (1, 2)],
'clf__alpha': (1e-2, 1e-3)},
{'vect__ngram_range': [(1, 1), (1, 2)],
'clf__C': (np.logspace(-5, 1, 5))},
{'vect__ngram_range': [(1, 1), (1, 2)],
'clf__C': (np.logspace(-5, 1, 5))},
{'vect__ngram_range': [(1, 1), (1, 2)],
'clf__max_depth': (1, 2)},
{'vect__ngram_range': [(1, 1), (1, 2)],
'clf__alpha': (1e-2, 1e-3)}
]
for name, classifier, params in zip(names, classifiers, parameters):
clf_pipe = Pipeline([
('vect', TfidfVectorizer(stop_words='english')),
('clf', classifier),
])
gs_clf = GridSearchCV(clf_pipe, param_grid=params, n_jobs=-1)
clf = gs_clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
print("{} score: {}".format(name, score))
You can use TransformedTargetRegressor.
This class is designed for transforming the target variable before fitting, taking a regressor and a set of transformers as parameters. But you may give no transformer, then the identity transformer (i.e. no transformation) is applied. Since regressor is a class parameter, we can change it by grid search objects.
import numpy as np
from sklearn.compose import TransformedTargetRegressor
from sklearn.linear_model import LinearRegression
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import GridSearchCV
Y = np.array([1,2,3,4,5,6,7,8,9,10])
X = np.array([0,1,3,5,3,5,7,9,8,9]).reshape((-1, 1))
For doing grid search, we should specify the param_grid as a list of dict, each for different estimator. This is because different estimators use different set of parameters (e.g. setting fit_intercept with MLPRegressor causes error).
Note that the name "regressor" is automatically given to the regressor.
model = TransformedTargetRegressor()
params = [
{
"regressor": [LinearRegression()],
"regressor__fit_intercept": [True, False]
},
{
"regressor": [MLPRegressor()],
"regressor__hidden_layer_sizes": [1, 5, 10]
}
]
We can fit as usual.
g = GridSearchCV(model, params)
g.fit(X, Y)
g.best_estimator_, g.best_score_, g.best_params_
# results in like
(TransformedTargetRegressor(check_inverse=True, func=None, inverse_func=None,
regressor=LinearRegression(copy_X=True, fit_intercept=False, n_jobs=None,
normalize=False),
transformer=None),
-0.419213380219391,
{'regressor': LinearRegression(copy_X=True, fit_intercept=False, n_jobs=None,
normalize=False), 'regressor__fit_intercept': False})
What you can do is create a class that takes in any classifier and for each classifier any setting of parameters.
Create a switcher class that works for any estimator
from sklearn.base import BaseEstimator
class ClfSwitcher(BaseEstimator):
def __init__(
self,
estimator = SGDClassifier(),
):
"""
A Custom BaseEstimator that can switch between classifiers.
:param estimator: sklearn object - The classifier
"""
self.estimator = estimator
def fit(self, X, y=None, **kwargs):
self.estimator.fit(X, y)
return self
def predict(self, X, y=None):
return self.estimator.predict(X)
def predict_proba(self, X):
return self.estimator.predict_proba(X)
def score(self, X, y):
return self.estimator.score(X, y)
Now you can pre-train your tfidf however you like.
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer()
tfidf.fit(data, labels)
Now create a pipeline with this pre-trained tfidf
from sklearn.pipeline import Pipeline
pipeline = Pipeline([
('tfidf',tfidf), # Already pretrained/fit
('clf', ClfSwitcher()),
])
Perform hyper-parameter optimization
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV
parameters = [
{
'clf__estimator': [SGDClassifier()], # SVM if hinge loss / logreg if log loss
'clf__estimator__penalty': ('l2', 'elasticnet', 'l1'),
'clf__estimator__max_iter': [50, 80],
'clf__estimator__tol': [1e-4],
'clf__estimator__loss': ['hinge', 'log', 'modified_huber'],
},
{
'clf__estimator': [MultinomialNB()],
'clf__estimator__alpha': (1e-2, 1e-3, 1e-1),
},
]
gscv = GridSearchCV(pipeline, parameters, cv=5, n_jobs=12, verbose=3)
# param optimization
gscv.fit(train_data, train_labels)
How to interpret clf__estimator__loss
clf__estimator__loss is interpreted as the loss parameter for whatever estimator is, where estimator = SGDClassifier() in the top most example and is itself a parameter of clf which is a ClfSwitcher object.

Resources