I am trying to optimize my keras NN using kerastuner but for some reason its giving me a 'module' object is not callable error on the tuner=randomsearch line.
import pandas as pd
import numpy as np
from sklearn.preprocessing import scale
from keras.models import Sequential
from keras.layers import Dense
from sklearn.metrics import mean_squared_error
from matplotlib import pyplot as plt
from keras_tuner.tuners import randomsearch
from keras_tuner.engine import hyperparameters
import time
LOG_DIR = f"{int(time.time())}"
def build_model(hp):
model = Sequential()
model.add(Dense(hp.Int("input_units", min_value=1, max_value=105, step=1), input_dim=X_train.shape[1], activation='relu'))
for i in range(hp.Int("n_layers", 1, 5)):
model.add(Dense(hp.Int(f"conv_{i}_units", min_value=1, max_value=105, step=1), activation='relu'))
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
return model
tuner = randomsearch(build_model, objective = "val_accuracy", max_trials = 1, executions_per_trial = 1, directory = LOG_DIR)
tuner.search(x=X_train, y=y_train, epochs=1, batch_size=146, validation_data=(X_test, y_test))
I figured it out I imported the wrong randomsearch and hyperparameters.
Related
This question already has answers here:
What function defines accuracy in Keras when the loss is mean squared error (MSE)?
(3 answers)
Closed 9 months ago.
I used an LSTM model for this prediction. But the accuracy is very low. How could I fix this issue?
from keras.layers import Dropout
from keras.layers import Bidirectional
model=Sequential()
model.add(LSTM(50,activation='relu',return_sequences=True,input_shape=(look_back,1)))
model.add(LSTM(50, activation='relu', return_sequences=True))
model.add(LSTM(50, activation='relu', return_sequences=True))
model.add(LSTM(50, activation='sigmoid', return_sequences=False))
model.add(Dense(50))
model.add(Dense(50))
model.add(Dropout(0.2))
model.add(Dense(1))
model.compile(optimizer='adam',loss='mean_squared_error',metrics=['accuracy'])
model.optimizer.learning_rate = 0.0001
Test and Train Prediction Plot
Epochs
your structure seems correct. try my code.
from keras.models import Sequential
from keras.layers import LSTM, Dense,Dropout, Bidirectional
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
#from keras.utils import plot_model
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from keras.layers.merge import Concatenate
import matplotlib.gridspec as gridspec
import random
import scikitplot as skplot
import datetime
from datetime import date
from pandas_datareader import data as pdr
def create_dataset(dataset, look_back=3):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back)]
dataX.append(a)
dataY.append(dataset[i + look_back])
return np.array(dataX), np.array(dataY)
COLUMNS=['your_data_column']
dataset=df[COLUMNS]
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(np.array(dataset).reshape(-1,1))
train_size = int(len(dataset) * 0.60)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size], dataset[train_size:len(dataset)]
look_back=3
trainX=[]
testX=[]
y_train=[]
n_future = 1
features=2
timeSteps=4
model = Sequential()
model.add(Bidirectional(LSTM(units=50, return_sequences=True,
input_shape=(X_train.shape[1], 1))))
model.add(LSTM(units= 50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units= 50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units= 50))
model.add(Dropout(0.2))
model.add(Dense(units = n_future))
model.compile(optimizer="adam", loss="mean_squared_error", metrics=["acc"])
I want to get KerasRegressor history but all the time I get (...) object has no attribute 'History'
'''
# Regression Example With Boston Dataset: Standardized and Wider
import numpy as np
from pandas import read_csv
from keras.models import Sequential
from keras.layers import Dense
#from keras.wrappers.scikit_learn import KerasRegressor
from scikeras.wrappers import KerasRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
import keras.backend as K
# load dataset
dataframe = read_csv("Data 1398-2.csv")
dataset = dataframe.values
# split into input (X) and output (Y) variables
X = dataset[:,0:10]
Y = dataset[:,10]
############
from sklearn import preprocessing
from sklearn.metrics import r2_score
min_max_scaler = preprocessing.MinMaxScaler()
X_scale = min_max_scaler.fit_transform(X)
from sklearn.model_selection import train_test_split
X_train, X_val_and_test, Y_train, Y_val_and_test = train_test_split(X_scale, Y, test_size=0.25)
X_val, X_test, Y_val, Y_test = train_test_split(X_val_and_test, Y_val_and_test, test_size=0.55)
##################
# define wider model
def wider_model():
# create model
model = Sequential()
model.add(Dense(40, input_dim=10, kernel_initializer='normal', activation='relu'))
model.add(Dense(20, kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
# Compile model
model.compile(loss='mean_squared_error',metrics=['mae'], optimizer='adam')
#history = model.fit(X, Y, epochs=10, batch_size=len(X), verbose=1)
return model
# evaluate model with standardized dataset
from keras.callbacks import History
estimators = []
estimators.append(('standardize', StandardScaler()))
estimators.append(('mlp',KerasRegressor(model=wider_model, epochs=100, batch_size=2, verbose=0) ))
pipeline = Pipeline(estimators)
kfold = KFold(n_splits=5)
results = cross_val_score(pipeline, X_train, Y_train, cv=kfold)
print("Wider: %.2f (%.2f) MSE" % (results.mean(), results.std()))
import matplotlib.pyplot as plt
#plt.plot(history.history['loss'])
#plt.plot(history.history['val_loss'])
#plt.title('Model loss')
#plt.ylabel('Loss')
#plt.xlabel('Epoch')
#plt.legend(['Train', 'Val'], loc='upper right')
#plt.show()
'''
Model is at index 1 in your case, but you can also find it. Now to get history object:
pipeline.steps[1][1].model.history.history
If you are sure that Keras Model is always the last estimator, you can also use:
pipeline._final_estimator.model.history.history
I am trying to train a VotingClassifier containing a Keras model using GridSearchCV.
Here is the code:
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import adam
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.ensemble import VotingClassifier
from sklearn import datasets
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
# pretend data
X, y = datasets.make_classification(n_samples=100, n_features=20)
scaler = StandardScaler()
# create model
def create_model():
model = Sequential()
model.add(Dense(20, kernel_initializer="uniform", activation='relu', input_shape=(20,)))
model.add(Dense(30, kernel_initializer="uniform", activation='relu'))
model.add(Dense(10, kernel_initializer="uniform", activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Compile model
optimizer = adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=False)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
keras_model = KerasClassifier(build_fn=create_model)
keras_model._estimator_type = "classifier"
eclf = VotingClassifier(
estimators=[('svc',SVC(probability=True)), ('keras_model', keras_model)]
, voting='soft')
# Test - fit the viting classifier without grid search
eclf.fit(X, y)
print('The VotingClassifier can be fit outside of gridsearch\n')
# parameters to grid search
params = [{'svc__C':[0.01,0.1]}, ]
grid = GridSearchCV(eclf,params,cv=2,scoring='accuracy', verbose=1)
grid.fit(X,y)
I get the following error:
ValueError: The estimator KerasClassifier should be a classifier.
When I train the VotingClassifier outside of GridSearchCV no error occurs, however when I train it within GridSearchCV, I get the error message. This other question, VotingClassifier with pipelines as estimators, has the same error (without using GridSearch) and was fixed by a line asserting that the keras model is a classifier which i have also included:
keras_model._estimator_type = "classifier"
This did not fix the problem here.
Any suggestions?
I am loading my pre-trained keras model and then trying to parallelize a large number of input data using dask? Unfortunately, I'm running into some issues with this relating to how I'm creating my dask array. Any guidance would be greatly appreciated!
Setup:
First I cloned from this repo https://github.com/sanchit2843/dlworkshop.git
Reproducible Code Example:
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import train_test_split
from keras.models import load_model
import keras
from keras.models import Sequential
from keras.layers import Dense
from dask.distributed import Client
import warnings
import dask.array as DaskArray
warnings.filterwarnings('ignore')
dataset = pd.read_csv('data/train.csv')
X = dataset.drop(['price_range'], axis=1).values
y = dataset[['price_range']].values
# scale data
sc = StandardScaler()
X = sc.fit_transform(X)
ohe = OneHotEncoder()
y = ohe.fit_transform(y).toarray()
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.2)
# Neural network
model = Sequential()
model.add(Dense(16, input_dim=20, activation="relu"))
model.add(Dense(12, activation="relu"))
model.add(Dense(4, activation="softmax"))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, y_train, epochs=100, batch_size=64)
# Use dask
client = Client()
def load_and_predict(input_data_chunk):
def contrastive_loss(y_true, y_pred):
margin = 1
square_pred = K.square(y_pred)
margin_square = K.square(K.maximum(margin - y_pred, 0))
return K.mean(y_true * square_pred + (1 - y_true) * margin_square)
mlflow.set_tracking_uri('<uri>')
mlflow.set_experiment('clean_parties_ml')
runs = mlflow.search_runs()
artifact_uri = runs.loc[runs['start_time'].idxmax()]['artifact_uri']
model = mlflow.keras.load_model(artifact_uri + '/model', custom_objects={'contrastive_loss': contrastive_loss})
y_pred = model.predict(input_data_chunk)
return y_pred
da_input_data = da.from_array(X_test, chunks=(100, None))
prediction_results = da_input_data.map_blocks(load_and_predict, dtype=X_test.dtype).compute()
The Error I'm receiving:
AttributeError: '_thread._local' object has no attribute 'value'
Keras/Tensorflow don't play nicely with other threaded systems. There is an ongoing issue on this topic here: https://github.com/dask/dask-examples/issues/35
I am new to CNNs, keras and tf. I am trying to build SqueezNet for keras 2.0 as shown here https://github.com/DT42/squeezenet_demo. It didnĀ“t work for me, so I just tried to make only one layer network and again failed. Below is the code for one layer.
As input I use white-black images 90*90 with 2 classes.
I am a bit lost cause I have tried and read many different things and not sure how to fix this.
import h5py
from keras.models import Model
from keras.layers import Input, Activation, Concatenate
from keras.layers import Flatten, Dropout, Dense
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import AveragePooling2D
from keras import backend as K
import numpy as np
import cv2
import glob
from sklearn.model_selection import train_test_split
class_names = {
"class_A": 0,
"class_B": 1
}
#all imgs in one file
X = list()
y = list()
for img_folder in ["class_A", "class_B"]:
for img in glob.glob("path" + img_folder + "*.jpg"):
input_img = cv2.imread(img)
X.append(input_img)
y.append(class_names[img_folder])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
input_img = Input(shape=(90,90,1))
b = Dense(32)(input_img)
model = Model(inputs=input_img, outputs=b)
print(model.summary())
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=["accuracy"])
model.fit(X_train, y_train,
epochs=20, batch_size=32)