Keras TimeseriesGenerator: error when checking input - python-3.x

When I try to use the TimeSeriesGenerator function, my Keras LSTM NN starts training for a few moments but then gives a ValueError message. What's wrong? I wonder how it can start training and then get an error.
My similar implementation without this function runs smoothly but then the quality of the predictions are awful (and I'm not sure that this function, once successfully implemented, would make a difference).
See the code below:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Nadam
from tensorflow.keras.layers import Input, LSTM, Dense
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, TerminateOnNaN
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
data = pd.read_excel('example.xlsx',usecols=['wave','wind','current','X','Y','RZ'])
data = data.apply(lambda x: (x - np.mean(x)) / np.std(x))
n_cutoff = 200
X = np.array(data.loc[n_cutoff:,['wave','wind']])
Y = np.array(data.loc[n_cutoff:,['RZ']])
X = X.reshape(len(X),2)
X = np.append(X, [[0]*np.size(X, axis=1)], axis=0)
Y = Y.reshape(len(Y),1)
Y = np.insert(Y, 0, 0)
n_lag = 3
n_batch = 15
n = int(0.75*len(X))
generator = TimeseriesGenerator(X, Y, length=n_lag, batch_size=n_batch)
inputs = Input(shape=(n_lag,2))
hidden1 = LSTM(units=100,
activation='softmax',
recurrent_activation='linear',
dropout=0.5,
recurrent_dropout=0.5,
return_sequences=True)(inputs)
hidden2 = LSTM(units=30,
activation='softmax',
recurrent_activation='linear',
dropout=0.5,
recurrent_dropout=0.5,
return_sequences=False)(hidden1)
outputs = Dense(units=1,
activation='linear')(hidden2)
model = Model(inputs=inputs, outputs=outputs)
optimizer = Nadam(learning_rate=1e-2, beta_1=0.95, beta_2=0.9, epsilon=1e-7)
model.compile(loss='mean_squared_error', optimizer=optimizer)
history = model.fit(generator,
verbose=1,
steps_per_epoch=int(n/n_batch),
epochs=1,
shuffle=False,
callbacks=[EarlyStopping(monitor='loss', min_delta=0, patience=20, verbose=1, mode='auto'),
ReduceLROnPlateau(monitor='loss', factor=0.5, patience=10, verbose=1, mode='auto', cooldown=1),
TerminateOnNaN()])
Y_hat = model.predict(X[n:])

Related

Keras-Tuner RuntimeError

I'm getting following error and I'm not able to figure out why:
RuntimeError: Model-building function did not return a valid Keras Model instance, found (<tensorflow.python.keras.engine.functional.Functional object at 0x7f74d8b849d0>, <tensorflow.python.keras.engine.functional.Functional object at 0x7f74d8b80810>)
I have read the answers here and here which seem to telling to import keras from tensorflow instead of stand alone keras which I'm doing but still getting the error. I would very much appreciate your help in figuring this out. Below is my entire code:
from tensorflow.keras.layers import Input, Dense, BatchNormalization, Dropout, Concatenate, Lambda, GaussianNoise, Activation
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
from numba import njit
import tensorflow as tf
import numpy as np
from sklearn.model_selection import KFold
from sklearn.model_selection._split import _BaseKFold, indexable, _num_samples
from sklearn.utils.validation import _deprecate_positional_args
import pandas as pd
import kerastuner as kt
import gc
from tqdm import tqdm
from random import choices
import warnings
warnings.filterwarnings('ignore')
class MyTuner(kt.Tuner):
def run_trial(self, trial, x, y):
cv = PurgedGroupTimeSeriesSplit(n_splits=5, group_gap = 20)
val_losses = []
for train_indices, test_indices in cv.split(x, groups=x[0]):
x_train, y_train = x[train_indices, 1:], y[train_indices]
x_test, y_test = x[test_indices, 1:], y[test_indices]
x_train = apply_transformation(x_train)
x_test = apply_transformation(x_test)
model = self.hypermodel.build(trial.hyperparameters)
model.fit(x_train, y_train, batch_size = hp.Int('batch_size', 500, 5000, step=500, default=4000),
epochs = hp.Int('epochs', 100, 1000, step=200, default=500))
val_losses.append(model.evaluate(x_test, y_test))
self.oracle.update_trial(trial.trial_id, {'val_loss': np.mean(val_losses)})
self.save_model(trial.trial_id, model)
def create_autoencoder(hp, input_dim, output_dim):
i = Input(input_dim)
encoded = BatchNormalization()(i)
encoded = GaussianNoise(hp.Float('gaussian_noise', 1e-2, 1, sampling='log', default=5e-2))(encoded)
encoded = Dense(hp.Int('encoder_dense', 100, 300, step=50, default=64), activation='relu')(encoded)
decoded = Dropout(hp.Float('decoder_dropout_1', 1e-1, 1, sampling='log', default=0.2))(encoded)
decoded = Dense(input_dim,name='decoded')(decoded)
x = Dense(hp.Int('output_x', 32, 100, step=10, default=32),activation='relu')(decoded)
x = BatchNormalization()(x)
x = Dropout(hp.Float('x_dropout_1', 1e-1, 1, sampling='log', default=0.2))(x)
x = Dense(hp.Int('output_x', 32, 100, step=10, default=32),activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(hp.Float('x_dropout_2', 1e-1, 1, sampling='log', default=0.2))(x)
x = Dense(output_dim,activation='sigmoid',name='label_output')(x)
encoder = Model(inputs=i,outputs=encoded)
autoencoder = Model(inputs=i,outputs=[decoded, x])
# optimizer = hp.Choice('optimizer', ['adam', 'sgd'])
autoencoder.compile(optimizer=Adam(hp.Float('lr', 0.00001, 0.1, default=0.001)),
loss='sparse_binary_crossentropy',
metrics=['accuracy'])
return autoencoder, encoder
build_model = lambda hp: create_autoencoder(hp, X[:, 1:].shape[1], y.shape[1])
tuner = MyTuner(
oracle=kt.oracles.BayesianOptimization(
objective=kt.Objective('val_loss', 'min'),
max_trials=20),
hypermodel=build_model,
directory='./',
project_name='autoencoders')
tuner.search(X, (X,y), callbacks=[EarlyStopping('val_loss',patience=5),
ReduceLROnPlateau('val_loss',patience=3)])
encoder_hp = tuner.get_best_hyperparameters(1)[0]
print("Best Encoder Hyper-parameter:", encoder_hp)
best_autoencoder = tuner.get_best_models(1)[0]
RuntimeError: Model-building function did not return a valid Keras Model instance, found (<tensorflow.python.keras.engine.functional.Functional object at 0x7f74d8b849d0>, <tensorflow.python.keras.engine.functional.Functional object at 0x7f74d8b80810>)
(<tensorflow.python.keras.engine.functional.Functional object at 0x7f74d8b849d0>, <tensorflow.python.keras.engine.functional.Functional object at 0x7f74d8b80810>)
As you can see this a tuple of two Keras Model instance. This is output of create_autoencoder(hp, input_dim, output_dim).
def create_autoencoder(hp, input_dim, output_dim):
# some lines of codes
return autoencoder, encoder
From my understanding, you are not using encoder. Therefore, you can remove it in your function.
That function will be look like this
def create_autoencoder(hp, input_dim, output_dim):
# some lines of codes
return autoencoder
It will only return a Keras Model Instance.

How To Do Model Predict Using Distributed Dask With a Pre-Trained Keras Model?

I am loading my pre-trained keras model and then trying to parallelize a large number of input data using dask? Unfortunately, I'm running into some issues with this relating to how I'm creating my dask array. Any guidance would be greatly appreciated!
Setup:
First I cloned from this repo https://github.com/sanchit2843/dlworkshop.git
Reproducible Code Example:
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import train_test_split
from keras.models import load_model
import keras
from keras.models import Sequential
from keras.layers import Dense
from dask.distributed import Client
import warnings
import dask.array as DaskArray
warnings.filterwarnings('ignore')
dataset = pd.read_csv('data/train.csv')
X = dataset.drop(['price_range'], axis=1).values
y = dataset[['price_range']].values
# scale data
sc = StandardScaler()
X = sc.fit_transform(X)
ohe = OneHotEncoder()
y = ohe.fit_transform(y).toarray()
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.2)
# Neural network
model = Sequential()
model.add(Dense(16, input_dim=20, activation="relu"))
model.add(Dense(12, activation="relu"))
model.add(Dense(4, activation="softmax"))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, y_train, epochs=100, batch_size=64)
# Use dask
client = Client()
def load_and_predict(input_data_chunk):
def contrastive_loss(y_true, y_pred):
margin = 1
square_pred = K.square(y_pred)
margin_square = K.square(K.maximum(margin - y_pred, 0))
return K.mean(y_true * square_pred + (1 - y_true) * margin_square)
mlflow.set_tracking_uri('<uri>')
mlflow.set_experiment('clean_parties_ml')
runs = mlflow.search_runs()
artifact_uri = runs.loc[runs['start_time'].idxmax()]['artifact_uri']
model = mlflow.keras.load_model(artifact_uri + '/model', custom_objects={'contrastive_loss': contrastive_loss})
y_pred = model.predict(input_data_chunk)
return y_pred
da_input_data = da.from_array(X_test, chunks=(100, None))
prediction_results = da_input_data.map_blocks(load_and_predict, dtype=X_test.dtype).compute()
The Error I'm receiving:
AttributeError: '_thread._local' object has no attribute 'value'
Keras/Tensorflow don't play nicely with other threaded systems. There is an ongoing issue on this topic here: https://github.com/dask/dask-examples/issues/35

Transferlearning ResNet Model does not learn

I trained ResNet-50 model to classify images from 6 classes (my own dataset) and saved it. But the model did not learn properly and predictions are incorrect. What would be the reason for this poor learning?
Here is my code, and the output plots using Keras and TensorFlow backend. How can I solve this?
from keras.applications.resnet50 import ResNet50, preprocess_input
from keras.layers import Dense, Dropout
from keras.models import Model
from keras.optimizers import Adam, SGD
from keras.preprocessing.image import ImageDataGenerator, image
from keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGE = True
# Define some constant needed throughout the script
N_CLASSES = 6
EPOCHS = 20
PATIENCE = 5
TRAIN_PATH= '/Train/'
VALID_PATH = '/Test/'
MODEL_CHECK_WEIGHT_NAME = 'resnet_monki_v1_chk.h5'
# Define model to be used we freeze the pre trained resnet model weight, and add few layer on top of it to utilize our custom dataset
K.set_learning_phase(0)
model = ResNet50(input_shape=(224,224,3),include_top=False, weights='imagenet', pooling='avg')
K.set_learning_phase(1)
x = model.output
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
output = Dense(N_CLASSES, activation='softmax', name='custom_output')(x)
custom_resnet = Model(inputs=model.input, outputs = output)
for layer in model.layers:
layer.trainable = False
custom_resnet.compile(Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
custom_resnet.summary()
# 4. Load dataset to be used
datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
traingen = datagen.flow_from_directory(TRAIN_PATH, target_size=(224,224), batch_size=32, class_mode='categorical')
validgen = datagen.flow_from_directory(VALID_PATH, target_size=(224,224), batch_size=32, class_mode='categorical', shuffle=False)
# 5. Train Model we use ModelCheckpoint to save the best model based on validation accuracy
es_callback = EarlyStopping(monitor='val_acc', patience=PATIENCE, mode='max')
mc_callback = ModelCheckpoint(filepath=MODEL_CHECK_WEIGHT_NAME, monitor='val_acc', save_best_only=True, mode='max')
train_history = custom_resnet.fit_generator(traingen, steps_per_epoch=len(traingen), epochs= EPOCHS, validation_data=traingen, validation_steps=len(validgen), verbose=2, callbacks=[es_callback, mc_callback])
custom_resnet.save('custom_resnet.h5')
Here are the plots, I had to put the links, the site does not let me put a pic
enter image description here

why is different between model.evaluate() and computed loss by myself based on model.predict()?

I am running neural network by keras. There is my code:
import numpy as np
from keras import Model
from keras.models import Sequential
from keras.layers import Dense
from keras import backend as K
def mean_squared_error(y_true, y_pred):
return K.mean(K.square(y_pred - y_true),axis=-1)
np.random.seed(1)
Train_X = np.random.randint(low=0,high=100,size = (50,5))
Train_Y = np.matmul(Train_X,np.arange(10).reshape(5,2))+np.random.randint(low=0,high=10,size=(50,2))
Test_X = np.random.randint(low=0,high=100,size = (10,5))
Test_Y = np.matmul(Test_X,np.arange(10).reshape(5,2))+np.random.randint(low=0,high=10,size=(10,2))
model = Sequential()
model.add(Dense(4,activation = 'relu'))
model.add(Dense(2,activation='relu'))
model.add(Dense(2,activation='relu'))
model.add(Dense(2))
model.compile(loss=mean_squared_error, optimizer='adam', metrics=['mae'])
history = model.fit(Train_X, Train_Y, epochs=100, batch_size=5,validation_data = (Test_X, Test_Y))
loss1 = model.evaluate(Test_X,Test_Y)
loss2 = history.history['val_loss'][99]
y_pred = model.predict(Test_X)
y_true = Test_Y
loss3 = np.mean(np.square(y_pred-y_true))
I find that loss1 is the same as loss2 but is different with loss3. So i feel so confused. Could someone tell me why?
This is possibly due to different dtypes for Test_Y and y_pred. Keras tries to automatically take care of dtype mismatches for you, so it is possible that Test_Y is a float64 and y_pred is a float32. If that is indeed the case, try converting one of their dtypes for the loss3 calculation and see if the values match.
y_pred = model.predict(Test_X)
y_true = Test_Y.astype(np.float32)
loss3 = np.mean(np.square(y_pred-y_true))

keras gridSearchCV on sklearn One hot Encoded Data

The problem with this code is that I am giving classifier,
One hot encoded data:
Means:
X-train, X-test, y_train, y_test is one hot encoded.
But the classifier is predicting the output:
y_pred_test, y_pred_train in Numerical form
(which I think is incorrect as well). Can anyone help with this?
This is a dummy example so no concern over low accuracy but just to know why it's predicting the output in not One Hot encoded form.
Thanks !
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
x=pd.DataFrame()
x['names']= np.arange(1,10)
x['Age'] = np.arange(1,10)
y=pd.DataFrame()
y['target'] = np.arange(1,10)
from sklearn.preprocessing import OneHotEncoder, Normalizer
ohX= OneHotEncoder()
x_enc = ohX.fit_transform(x).toarray()
ohY = OneHotEncoder()
y_enc = ohY.fit_transform(y).toarray()
print (x_enc)
print("____")
print (y_enc)
import keras
from keras import regularizers
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.models import load_model
from keras.layers.advanced_activations import LeakyReLU
marker="-------"
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
def create_model(learn_rate=0.001):
model = Sequential()
model.add(Dense(units = 15, input_dim =18,kernel_initializer= 'normal', activation="tanh"))
model.add(Dense(units=9, activation = "softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=['accuracy'])
return model
if __name__=="__main__":
X_train, X_test, y_train, y_test = train_test_split(x_enc, y_enc, test_size=0.33, random_state=42)
print ("\n\n",marker*5," Classification\nX_train shape is: ",X_train.shape,"\tX_test shape is:",X_test.shape)
print ("\ny_train shape is: ",y_train.shape,"\t y_test shape is:",y_test.shape,"\n\n")
norm = Normalizer()
#model
X_train = norm.fit_transform(X_train)
X_test = norm.transform(X_test)
earlyStopping=keras.callbacks.EarlyStopping(monitor='val_loss', patience=0, verbose=0, mode='auto')
model = KerasClassifier(build_fn=create_model, verbose=0)
fit_params={'callbacks': [earlyStopping]}
#grid
# batch_size =[50,100,200, 300,400]
epochs = [2,5]
learn_rate=[0.1,0.001]
param_grid = dict( epochs = epochs, learn_rate = learn_rate)
grid = GridSearchCV(estimator = model, param_grid = param_grid, n_jobs=1)
#Predicting
print (np.shape(X_train), np.shape(y_train))
y_train = np.reshape(y_train, (-1,np.shape(y_train)[1]))
print ("y_train shape after reshaping", np.shape(y_train))
grid_result = grid.fit(X_train, y_train, callbacks=[earlyStopping])
print ("grid score using params: ", grid_result.best_score_, " ",grid_result.best_params_)
#scores
print("SCORES")
print (grid_result.score(X_test,y_test))
# summarize results
#print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
#means = grid_result.cv_results_['mean_test_score']
#stds = grid_result.cv_results_['std_test_score']
#params = grid_result.cv_results_['params']
#for mean, stdev, param in zip(means, stds, params):
# print("%f (%f) with: %r" % (mean, stdev, param))
print("\n\n")
print("y_test is",y_test)
y_hat_test = grid.predict(X_test)
y_hat_train = grid.predict(X_train)
print("y_hat_test is ", y_hat_test)

Resources