how to make my curves smoother in a keras text classification model? - python-3.x

My model is defined like this for a sentiment analysis problem:
def create_model(vocab_size, embedding_dim, maxlen, embeddings_matrix):
model = tf.keras.Sequential([
# This is how you need to set the Embedding layer when using pre-trained embeddings
tf.keras.layers.Embedding(vocab_size+1, embedding_dim, input_length=maxlen, weights=[embeddings_matrix], trainable=False),
tf.keras.layers.Conv1D(128, 6 , activation='relu'),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(64, activation = 'relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(64, activation = 'relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(6, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
Then I have very jagged curves for validation accuracy and loss(in blue).
how can I smoothen them , what parameters, or the layers in my model can I change?
My curves should be close to these.

Related

How to properly stack LSTM and sklearn ML models

I am trying to stack Kears and ML models of sklean library for a regression problem. I am able to stack simple dense layers of Keras with any ML models.
My input type is similar to Boston house price (multi-variete). This code snippet shows how I tackle the problem:
#Scikit-learn Models
rnd_reg =RandomForestRegressor(n_estimators=100, random_state=42)
#Keras Model
def build_nn():
model = Sequential(
[Dense(512, activation='relu', input_shape=[X.shape[1]]),
Dense(256, activation='relu'),
Dropout(0.4),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dropout(0.2),
Dense(1, activation='linear')
])
model.compile(optimizer='adam',
loss='mean_squared_error',
metrics=['MeanSquaredError', 'MeanAbsolutePercentageError'])
return model
keras_reg = tf.keras.wrappers.scikit_learn.KerasRegressor(build_nn,
epochs=20,
batch_size=1,
verbose=True)
keras_reg._estimator_type = "regressor"
st_reg = StackingRegressor(
estimators=[('rf', rnd_reg),
('Dense',keras_reg)],
final_estimator=XGBRegressor(random_state=42)
)
Everything works jus fine with this part. The issue is when I want to use RNN (LSTM for example) and stack it with sklearn models. I am not sure if this is due to the wrong input shape that I use or LSTM cannot be stacked with ML models of sklearn.
Below is my LSTM structure:
def build_nn1():
model = Sequential(
#[LSTM(50, activation='relu', batch_input_shape=(None, X.shape[1], 1)),
[LSTM(50, activation='relu', input_shape=[X.shape[1], 1]),
BatchNormalization(),
#Dropout(0.2),
Dense(20, activation='relu'),
Dense(1, activation='relu')
])
model.compile(optimizer=Adam(lr=1e-5),
loss='mean_squared_error',
metrics=['MeanSquaredError', 'MeanAbsolutePercentageError'])
return model
enter code here

Keras tuner best model does not work better than a manually configured model and MSE is very high for train set with this best model

I am working on timeseries data and I used keras tuner to find the best model. Keras tuner returns a very good MSE for best model. But when I use this best model to predict train and test set, it returns high MSE for training set and lower MSE for test set, but the RMSE is normal for both. Also, when I use the model that I configured manually, the results are better than best model from keras tuner! I cannot understand why the results does not make sense, am I doing something wrong? Here is the code.
`
def build_model(hp):
model = keras.Sequential()
model.add(keras.layers.ConvLSTM2D(filters=hp.Int('units1',
min_value=25, max_value=512, step=32, default=128),
kernel_size=(1,1),
activation=hp.Choice('activation1',
values=['relu', 'tanh', 'sigmoid'], default='relu'),
input_shape=(n_past, 1, 1, 1)))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(units=hp.Int('units3',
min_value=10, max_value=128, step=8, default=128),
activation=hp.Choice('activation_2',
values=['relu', 'tanh', 'sigmoid'], default='relu')))
model.add(keras.layers.Dense(1, activation=hp.Choice('activation_2',
values=['relu', 'tanh', 'sigmoid'], default='relu')))
model.compile(loss='mae', optimizer=keras.optimizers.Adam(hp.Float('learning_rate',
min_value=1e-4, max_value=1e-2,
sampling='LOG', default=1e-3)), metrics=['mae'])
return model
bayesian_opt_tuner = BayesianOptimization(build_model, objective='mae', max_trials=20, executions_per_trial=1,
directory=os.path.normpath('C:/keras_tuning'), project_name='timeseries_temp_ts_test_from_TF_ex',
overwrite=True)
EVALUATION_INTERVAL = 200
EPOCHS = 2
bayesian_opt_tuner.search(trainX, trainy,
epochs=EPOCHS,
validation_data=(testX, testy),
validation_steps=50,
steps_per_epoch=EVALUATION_INTERVAL)
model = bayesian_opt_tuner.get_best_models(1)[0]
model.summary()
`
The best MSE score is 0.365387, but when I predict the train and test set the MSE is 28.58 for train set and 6.36 for test set and RMSE is 5.35 and 2.52. While with my own model which is below the MSE of train and test set is 5.95 and 2.39 and RMSE is 2.44 and 1.55.
`
model = Sequential()
model.add(ConvLSTM2D(filters=64, kernel_size=(1,1), activation='relu', input_shape=(n_past, 1, 1, 1))) model.add(Flatten())
model.add(Dense(32))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mean_squared_error')
model.summary()
`

How to detect an object and draw a bounding box based on cnn

Here is my cnn code in python:
x_train, x_valid, y_train, y_valid = train_test_split(X, Y, test_size=0.3, shuffle=True)
x_train=x_train/255. #Normalize the dataset
x_valid=x_valid/255.
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(input_shape = [width, height, 3]),
tf.keras.layers.Dense(30, activation = 'relu' ),
tf.keras.layers.Dense(10, activation = 'relu' ),
tf.keras.layers.Dense(10, activation = 'relu'),
tf.keras.layers.Dense(10, activation = 'relu'),
tf.keras.layers.Dense(10, activation = 'softmax' )])
y_train=to_categorical(y_train, 10)
y_valid=to_categorical(y_valid, 10)
print(model.summary())
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=["accuracy"])
model.fit(x_train, y_train, epochs=5, verbose=1, validation_data = (x_valid, y_valid))
This is the code to get video:
cap = cv2.VideoCapture('cars.mp4')
How do I draw a bounding box? I have trained a neural network from some images. Now, using a video, I want to detect a class based on the trained neural network, how do I detect and find the bounding box coordinates?
Why should not use pretrained model like Yolo and tensorflow object detection?
this blog will help you. try this
YOLO object detection with OpenCV

Find Most Important Input from a Neural Network

I trained a neural network with 37 Inputs. It has around 85% accuracy. Is it possible for me to find out which Input has the most effect. I tried this code but I cannot figure out how to find most important Input
weights = model.layers[0].get_weights()[0]
biases = model.layers[0].get_weights()[1]
One possible solution is to wrap your model with keras.wrappers.scikit_learn and then use Recursive Feature elimination in scikit-learn:
def create_model():
# create model
model = Sequential()
model.add(Dense(512, activation='relu'))
model.add(Dense(512, activation='relu'))
model.add(Dense(10, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model = KerasClassifier(build_fn=create_model, epochs=100, batch_size=128, verbose=0)
rfe = RFE(estimator=model, n_features_to_select=1, step=1)
rfe.fit(X, y)
ranking = rfe.ranking_.reshape(digits.images[0].shape)
# Plot pixel ranking
plt.matshow(ranking, cmap=plt.cm.Blues)
plt.colorbar()
plt.title("Ranking of pixels with RFE")
plt.show()
If you need to visualize weights see here.

Getting differents results for the identical model FFNN in keras

I am building a model based on FFNN (Feed Forward Neural Network) using Keras.
I built a first version:
def mlp0(input_dim, loss):
model = Sequential()
model.add(Dropout(0.5, input_shape=(input_dim,)))
model.add(Dense(512, activation='sigmoid'))
model.add(Dense(1, activation='relu'))
model.compile(loss=loss, optimizer=Adagrad())
return model
This gives me very good results in k-fold cross-validation, but when I predict on validation set, the performance is bad.
So I tried another version.
def mlp1(input_dim, loss):
inputs = keras.Input(shape=(input_dim,))
x = keras.layers.Dropout(0.5)(inputs)
x = keras.layers.Dense(512, activation='sigmoid')(x)
outputs = keras.layers.Dense(1, activation='relu')(x)
model = keras.Model(inputs, outputs)
model.compile(loss=loss, optimizer=Adagrad())
return model
This second model gives worse results on cross-validation but the results are compatible with the results on the validation set.
To my eyes, they are identical models built in different ways, but for some reason they give me different answers. What am I doing wrong?
Edit:
These models behave the same way:
def mlp0(input_dim, loss):
model = Sequential()
model.add(Dense(512, activation='sigmoid', input_shape=(input_dim,), kernel_regularizer=regularizers.l2(0.01)))
model.add(Dense(1, activation='relu', kernel_regularizer=regularizers.l2(0.01)))
model.compile(loss=loss, optimizer=Adam())
return model
import keras
from keras import regularizers
def mlp1(input_dim, loss):
inputs = keras.Input(shape=(input_dim,))
x = keras.layers.Dense(512, activation='sigmoid', kernel_regularizer=regularizers.l2(0.01))(inputs)
outputs = keras.layers.Dense(1, activation='relu', kernel_regularizer=regularizers.l2(0.01))(x)
model = keras.Model(inputs, outputs)
model.compile(loss=loss, optimizer=Adam())
return model
These make me think there is a catch in prediction phase with the dropout

Resources