Evaluate and Predict functions in Keras are not giving the same statistics - keras

I have a very simple DNN with a given data set. However, the standard deviation of error I got from "evaluate" and "predict" are different. The mean error seems OK but the stdev from predict is always larger than the stdev from evaluate. Why do these differences happen and how can I fix it?
Raw data is here for download
from keras.models import Sequential
from keras.layers import Dense, Activation
import keras.backend as K
from keras import optimizers
import pickle
import numpy as np
with open('.\\dump','rb') as f:
xTr = pickle.load(f)
yTr = pickle.load(f)
muX = pickle.load(f)
stdX = pickle.load(f)
muY = pickle.load(f)
stdY = pickle.load(f)
def mean_pred(y_true, y_pred):
y_true = y_true*stdY + muY
y_pred = y_pred*stdY + muY
return K.mean(y_pred - y_true)
def std_pred(y_true, y_pred):
y_true = y_true*stdY + muY
y_pred = y_pred*stdY + muY
return K.std(y_pred - y_true)
model = Sequential()
model.add(Dense(256, input_shape=(100,)))
model.add(Activation('tanh'))
model.add(Dense(1))
adam = optimizers.adam(lr=0.0001)
model.compile(optimizer=adam,loss='mse', metrics=[mean_pred, std_pred])
model.fit(xTr, yTr.reshape(-1,1), epochs = 5, batch_size = 128, verbose=0, shuffle=True)
score = model.evaluate(xTr, yTr.reshape(-1,1), verbose=0)
pred = model.predict(xTr, verbose=0)
print(score) #mse, mean, stdev of error
errArr = []
for i,y in enumerate(yTr):
errArr.append((pred[i][0] - y)*stdY)
e = np.asarray(errArr)
print(e.mean(), e.std()) #mean, stdev of error

Finally got the reason... By default, evaluate is not using all samples even if batch_size is set to none. After set batch_size = 1000 (number of samples in my data set), I got the same mean and standard deviation of error

Related

Calculate gradient of validation error w.r.t inputs using Keras/Tensorflow or autograd

I need to calculate the gradient of the validation error w.r.t inputs x. I'm trying to see how much the validation error changes when I perturb one of the training samples.
The validation error (E) explicitly depends on the model weights (W).
The model weights explicitly depend on the inputs (x and y).
Therefore, the validation error implicitly depends on the inputs.
I'm trying to calculate the gradient of E w.r.t x directly.
An alternative approach would be to calculate the gradient of E w.r.t W (can easily be calculated) and the gradient of W w.r.t x (cannot do at the moment), which would allow the gradient of E w.r.t x to be calculated.
I have attached a toy example. Thanks in advance!
import numpy as np
import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.utils import to_categorical
import tensorflow as tf
from autograd import grad
train_images = mnist.train_images()
train_labels = mnist.train_labels()
test_images = mnist.test_images()
test_labels = mnist.test_labels()
# Normalize the images.
train_images = (train_images / 255) - 0.5
test_images = (test_images / 255) - 0.5
# Flatten the images.
train_images = train_images.reshape((-1, 784))
test_images = test_images.reshape((-1, 784))
# Build the model.
model = Sequential([
Dense(64, activation='relu', input_shape=(784,)),
Dense(64, activation='relu'),
Dense(10, activation='softmax'),
])
# Compile the model.
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'],
)
# Train the model.
model.fit(
train_images,
to_categorical(train_labels),
epochs=5,
batch_size=32,
)
model.save_weights('model.h5')
# Load the model's saved weights.
# model.load_weights('model.h5')
calculate_mse = tf.keras.losses.MeanSquaredError()
test_x = test_images[:5]
test_y = to_categorical(test_labels)[:5]
train_x = train_images[:1]
train_y = to_categorical(train_labels)[:1]
train_y = tf.convert_to_tensor(train_y, np.float32)
train_x = tf.convert_to_tensor(train_x, np.float64)
with tf.GradientTape() as tape:
tape.watch(train_x)
model.fit(train_x, train_y, epochs=1, verbose=0)
valid_y_hat = model(test_x, training=False)
mse = calculate_mse(test_y, valid_y_hat)
de_dx = tape.gradient(mse, train_x)
print(de_dx)
# approach 2 - does not run
def calculate_validation_mse(x):
model.fit(x, train_y, epochs=1, verbose=0)
valid_y_hat = model(test_x, training=False)
mse = calculate_mse(test_y, valid_y_hat)
return mse
train_x = train_images[:1]
train_y = to_categorical(train_labels)[:1]
validation_gradient = grad(calculate_validation_mse)
de_dx = validation_gradient(train_x)
print(de_dx)
Here's how you can do this. Derivation is as below.
Few things to note,
I have reduced the feature size from 784 to 256 as I was running out of memory in colab (line marked in the code) . Might have to do some mem profiling to find out why
Only computed grads for the first layer. Easily extendable to other layers
Disclaimer: this derivation is correct to best of my knowledge. Please do some research and verify that it is the case. You will run into memory issues for larger inputs and layer sizes.
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.utils import to_categorical
import tensorflow as tf
f = 256
model = Sequential([
Dense(64, activation='relu', input_shape=(f,)),
Dense(64, activation='relu'),
Dense(10, activation='softmax'),
])
# Compile the model.
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'],
)
w = model.weights[0]
# Inputs and labels
x_tr = tf.Variable(np.random.normal(size=(1,f)), shape=(1, f), dtype='float32')
y_tr = np.random.choice([0,1,2,3,4,5,6,7,8,9], size=(1,1))
y_tr_onehot = tf.keras.utils.to_categorical(y_tr, num_classes=10).astype('float32')
x_v = tf.Variable(np.random.normal(size=(1,f)), shape=(1, f), dtype='float32')
y_v = np.random.choice([0,1,2,3,4,5,6,7,8,9], size=(1,1))
y_v_onehot = tf.keras.utils.to_categorical(y_v, num_classes=10).astype('float32')
# In the context of GradientTape
with tf.GradientTape() as tape1:
with tf.GradientTape() as tape2:
y_tr_pred = model(x_tr)
tr_loss = tf.keras.losses.MeanSquaredError()(y_tr_onehot, y_tr_pred)
tmp_g = tape2.gradient(tr_loss, w)
print(tmp_g.shape)
# d(dE_tr/d(theta))/dx
# Warning this step consumes lot of memory for large layers
lr = 0.001
grads_1 = -lr * tape1.jacobian(tmp_g, x_tr)
with tf.GradientTape() as tape3:
y_v_pred = model(x_v)
v_loss = tf.keras.losses.MeanSquaredError()(y_v_onehot, y_v_pred)
# dE_val/d(theta)
grads_2 = tape3.gradient(v_loss, w)[tf.newaxis, :]
# Just crunching the dimension to get the final desired shape of (1,256)
grad = tf.matmul(tf.reshape(grads_2,[1, -1]), tf.reshape(tf.transpose(grads_1,[2,1,0,3]),[1, -1, 256]))

Keras TimeseriesGenerator: error when checking input

When I try to use the TimeSeriesGenerator function, my Keras LSTM NN starts training for a few moments but then gives a ValueError message. What's wrong? I wonder how it can start training and then get an error.
My similar implementation without this function runs smoothly but then the quality of the predictions are awful (and I'm not sure that this function, once successfully implemented, would make a difference).
See the code below:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Nadam
from tensorflow.keras.layers import Input, LSTM, Dense
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, TerminateOnNaN
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
data = pd.read_excel('example.xlsx',usecols=['wave','wind','current','X','Y','RZ'])
data = data.apply(lambda x: (x - np.mean(x)) / np.std(x))
n_cutoff = 200
X = np.array(data.loc[n_cutoff:,['wave','wind']])
Y = np.array(data.loc[n_cutoff:,['RZ']])
X = X.reshape(len(X),2)
X = np.append(X, [[0]*np.size(X, axis=1)], axis=0)
Y = Y.reshape(len(Y),1)
Y = np.insert(Y, 0, 0)
n_lag = 3
n_batch = 15
n = int(0.75*len(X))
generator = TimeseriesGenerator(X, Y, length=n_lag, batch_size=n_batch)
inputs = Input(shape=(n_lag,2))
hidden1 = LSTM(units=100,
activation='softmax',
recurrent_activation='linear',
dropout=0.5,
recurrent_dropout=0.5,
return_sequences=True)(inputs)
hidden2 = LSTM(units=30,
activation='softmax',
recurrent_activation='linear',
dropout=0.5,
recurrent_dropout=0.5,
return_sequences=False)(hidden1)
outputs = Dense(units=1,
activation='linear')(hidden2)
model = Model(inputs=inputs, outputs=outputs)
optimizer = Nadam(learning_rate=1e-2, beta_1=0.95, beta_2=0.9, epsilon=1e-7)
model.compile(loss='mean_squared_error', optimizer=optimizer)
history = model.fit(generator,
verbose=1,
steps_per_epoch=int(n/n_batch),
epochs=1,
shuffle=False,
callbacks=[EarlyStopping(monitor='loss', min_delta=0, patience=20, verbose=1, mode='auto'),
ReduceLROnPlateau(monitor='loss', factor=0.5, patience=10, verbose=1, mode='auto', cooldown=1),
TerminateOnNaN()])
Y_hat = model.predict(X[n:])

Implementing and tuning a simple CNN for 3D data using Keras Conv3D

I'm trying to implement a 3D CNN using Keras. However, I am having some difficulties understanding some details in the results obtained and further enhancing the accuracy.
The data that I am trying to analyzing have the shape {64(d1)x64(d2)x38(d3)}, where d1 and d2 are the length and width of the image (64x64 pixels) and d3 is the time dimension. In other words, I have 38 images. The channel parameter is set to 1 as my data are actually raw data and not really colorful images.
My data consist of 219 samples, hence 219x64x64x38. They are divided into training and validation sets with 20% for validation. In addition, I have a fixed 147 additional data for testing.
Below is my code that works fine. It creates a txt file that saves the results for the different combination of parameters in my network (grid search). Here in this code, I only consider tuning 2 parameters: the number of filters and lambda for L2 regularizer. I fixed the dropout and the kernel size for the filters. However, later I considered their variations.
I also tried to set the seed value so that I have some sort of reproducibility (I don't think that I have achieved this task).
My question is that:
Given the below architecture and code, I always reach for all the given combinations of parameters a convergence for the training accuracy towards 1 (which is good). However, for the validation accuracy it is most of the time around 80% +/- 4% (rarely below 70%) despite the hyper-parameters combination. Similar behavior for the test accuracy. How can I enhance this accuracy to above 90% ?
As far as I know, having a gap between the train and validation/test accuracy is a result from overfitting. However, in my model I am adding dropouts and L2 regularizers and also changing the size of my network which should somehow reduce this gap (but it is not).
Is there anything else I can do besides modifying my input data? Does adding more layers help? Or is there maybe a pre-trained 3D CNN like in the case of 2D CNN (e.g., AlexNet)? Should I try ConvLSTM? Is this the limit of this architecture?
Thank you :)
import numpy as np
import tensorflow as tf
import keras
from keras.models import Sequential
from keras.layers import Conv3D, MaxPooling3D, Dense, Flatten, Activation
from keras.utils import to_categorical
from keras.regularizers import l2
from keras.layers import Dropout
from keras.utils import multi_gpu_model
import scipy.io as sio
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from keras.callbacks import ReduceLROnPlateau
tf.set_random_seed(1234)
def normalize_minmax(X_train):
"""
Normalize to [0,1]
"""
from sklearn import preprocessing
min_max_scaler = preprocessing.MinMaxScaler()
X_minmax_train = min_max_scaler.fit_transform(X_train)
return X_minmax_train
# generate and prepare the dataset
def get_data():
# Load and prepare the data
X_data = sio.loadmat('./X_train')['X_train']
Y_data = sio.loadmat('./Y_train')['targets_train']
X_test = sio.loadmat('./X_test')['X_test']
Y_test = sio.loadmat('./Y_test')['targets_test']
return X_data, Y_data, X_test, Y_test
def get_model(X_train, Y_train, X_validation, Y_validation, F1_nb, F2_nb, F3_nb, kernel_size_1, kernel_size_2, kernel_size_3, l2_lambda, learning_rate, reduce_lr, dropout_conv1, dropout_conv2, dropout_conv3, dropout_dense, no_epochs):
no_classes = 5
sample_shape = (64, 64, 38, 1)
batch_size = 32
dropout_seed = 30
conv_seed = 20
# Create the model
model = Sequential()
model.add(Conv3D(F1_nb, kernel_size=kernel_size_1, kernel_regularizer=l2(l2_lambda), padding='same', kernel_initializer='glorot_uniform', input_shape=sample_shape))
model.add(Activation('selu'))
model.add(MaxPooling3D(pool_size=(2,2,2)))
model.add(Dropout(dropout_conv1, seed=conv_seed))
model.add(Conv3D(F2_nb, kernel_size=kernel_size_2, kernel_regularizer=l2(l2_lambda), padding='same', kernel_initializer='glorot_uniform'))
model.add(Activation('selu'))
model.add(MaxPooling3D(pool_size=(2,2,2)))
model.add(Dropout(dropout_conv2, seed=conv_seed))
model.add(Conv3D(F3_nb, kernel_size=kernel_size_3, kernel_regularizer=l2(l2_lambda), padding='same', kernel_initializer='glorot_uniform'))
model.add(Activation('selu'))
model.add(MaxPooling3D(pool_size=(2,2,2)))
model.add(Dropout(dropout_conv3, seed=conv_seed))
model.add(Flatten())
model.add(Dense(512, kernel_regularizer=l2(l2_lambda), kernel_initializer='glorot_uniform'))
model.add(Activation('selu'))
model.add(Dropout(dropout_dense, seed=dropout_seed))
model.add(Dense(no_classes, activation='softmax'))
model = multi_gpu_model(model, gpus = 2)
# Compile the model
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(lr=learning_rate),
metrics=['accuracy'])
# Train the model.
history = model.fit(X_train, Y_train, batch_size=batch_size, epochs=no_epochs, validation_data=(X_validation, Y_validation),callbacks=[reduce_lr])
return model, history
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=0.0001)
learning_rate = 0.001
no_epochs = 100
X_data, Y_data, X_test, Y_test = get_data()
# Normalize the train/val data
for i in range(X_data.shape[0]):
for j in range(X_data.shape[3]):
X_data[i,:,:,j] = normalize_minmax(X_data[i,:,:,j])
X_data = np.expand_dims(X_data, axis=4)
# Normalize the test data
for i in range(X_test.shape[0]):
for j in range(X_test.shape[3]):
X_test[i,:,:,j] = normalize_minmax(X_test[i,:,:,j])
X_test = np.expand_dims(X_test, axis=4)
# Shuffle the training data
# fix random seed for reproducibility
seedValue = 40
permutation = np.random.RandomState(seed=seedValue).permutation(len(X_data))
X_data = X_data[permutation]
Y_data = Y_data[permutation]
Y_data = np.squeeze(Y_data)
Y_test = np.squeeze(Y_test)
#Split between train and validation (20%). Here I did not use the classical validation_split=0.2 just to make sure that the data is the same for the different architectures I am using.
X_train = X_data[0:175,:,:,:,:]
Y_train = Y_data[0:175]
X_validation = X_data[176:,:,:,:]
Y_validation = Y_data[176:]
Y_train = to_categorical(Y_train,num_classes=5).astype(np.integer)
Y_validation = to_categorical(Y_validation,num_classes=5).astype(np.integer)
Y_test = to_categorical(Y_test,num_classes=5).astype(np.integer)
l2_lambda_list = [(1*pow(10,-4)),(2*pow(10,-4)),
(3*pow(10,-4)),
(4*pow(10,-4)),
(5*pow(10,-4)),(6*pow(10,-4)),
(7*pow(10,-4)),
(8*pow(10,-4)),(9*pow(10,-4)),(10*pow(10,-4))
]
filters_nb = [(128,64,64),(128,64,32),(128,64,16),(128,64,8),(128,32,32),(128,32,16),(128,32,8),(128,16,8),(128,8,8),
(64,64,32),(64,64,16),(64,64,8),(64,32,32),(64,32,16),(64,32,8),(64,16,16),(64,16,8),(64,8,8),
(32,32,16),(32,32,8),(32,16,16),(32,16,8),(32,8,8),
(16,16,16),(16,16,8),(16,8,8)
]
DropOut = [(0.25,0.25,0.25,0.5),
(0,0,0,0.1),(0,0,0,0.2),(0,0,0,0.3),(0,0,0,0.4),(0,0,0,0.5),
(0.1,0.1,0.1,0),(0.2,0.2,0.2,0),(0.3,0.3,0.3,0),(0.4,0.4,0.4,0),(0.5,0.5,0.5,0),
(0.1,0.1,0.1,0.1),(0.1,0.1,0.1,0.2),(0.1,0.1,0.1,0.3),(0.1,0.1,0.1,0.4),(0.1,0.1,0.1,0.5),
(0.15,0.15,0.15,0.1),(0.15,0.15,0.15,0.2),(0.15,0.15,0.15,0.3),(0.15,0.15,0.15,0.4),(0.15,0.15,0.15,0.5),
(0.2,0.2,0.2,0.1),(0.2,0.2,0.2,0.2),(0.2,0.2,0.2,0.3),(0.2,0.2,0.2,0.4),(0.2,0.2,0.2,0.5),
(0.25,0.25,0.25,0.1),(0.25,0.25,0.25,0.2),(0.25,0.25,0.25,0.3),(0.25,0.25,0.25,0.4),(0.25,0.25,0.25,0.5),
(0.3,0.3,0.3,0.1),(0.3,0.3,0.3,0.2),(0.3,0.3,0.3,0.3),(0.3,0.3,0.3,0.4),(0.3,0.3,0.3,0.5),
(0.35,0.35,0.35,0.1),(0.35,0.35,0.35,0.2),(0.35,0.35,0.35,0.3),(0.35,0.35,0.35,0.4),(0.35,0.35,0.35,0.5)
]
kernel_size = [(3,3,3),
(2,3,3),(2,3,4),(2,3,5),(2,3,6),(2,3,7),(2,3,8),(2,3,9),(2,3,10),(2,3,11),(2,3,12),(2,3,13),(2,3,14),(2,3,15),
(3,3,3),(3,3,4),(3,3,5),(3,3,6),(3,3,7),(3,3,8),(3,3,9),(3,3,10),(3,3,11),(3,3,12),(3,3,13),(3,3,14),(3,3,15),
(3,4,3),(3,4,4),(3,4,5),(3,4,6),(3,4,7),(3,4,8),(3,4,9),(3,4,10),(3,4,11),(3,4,12),(3,4,13),(3,4,14),(3,4,15),
]
for l in range(len(l2_lambda_list)):
l2_lambda = l2_lambda_list[l]
f = open("My Results.txt", "a")
lambda_Str = str(l2_lambda)
f.write("---------------------------------------\n")
f.write("lambda = "+f"{lambda_Str}\n")
f.write("---------------------------------------\n")
for i in range(len(filters_nb)):
F1_nb = filters_nb[i][0]
F2_nb = filters_nb[i][1]
F3_nb = filters_nb[i][2]
kernel_size_1 = kernel_size[0]
kernel_size_2 = kernel_size_1
kernel_size_3 = kernel_size_1
dropout_conv1 = DropOut[0][0]
dropout_conv2 = DropOut[0][1]
dropout_conv3 = DropOut[0][2]
dropout_dense = DropOut[0][3]
# fit model
model, history = get_model(X_train, Y_train, X_validation, Y_validation, F1_nb, F2_nb, F3_nb, kernel_size_1, kernel_size_2, kernel_size_3, l2_lambda, learning_rate, reduce_lr, dropout_conv1, dropout_conv2, dropout_conv3, dropout_dense, no_epochs)
# Evaluate metrics
predictions = model.predict(X_test)
out = np.argmax(predictions, axis=1)
Y_test = sio.loadmat('./Y_test')['targets_test']
Y_test = np.squeeze(Y_test)
loss = history.history['loss'][no_epochs-1]
acc = history.history['acc'][no_epochs-1]
val_loss = history.history['val_loss'][no_epochs-1]
val_acc = history.history['val_acc'][no_epochs-1]
# accuracy: (tp + tn) / (p + n)
accuracy = accuracy_score(Y_test, out)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(Y_test, out,average='macro')
a = str(filters_nb[i][0]) + ',' + str(filters_nb[i][1]) + ',' + str(filters_nb[i][2]) + ': ' + str('f1-metric: ') + str('%f' % f1) + str(' | loss: ') + str('%f' % loss) + str(' | acc: ') + str('%f' % acc) + str(' | val_loss: ') + str('%f' % val_loss) + str(' | val_acc: ') + str('%f' % val_acc) + str(' | test_acc: ') + str('%f' % accuracy)
f.write(f"{a}\n")
f.close()

Load model from Keras and investigate the details

I'm using Keras to fit a function, and I'm new to Keras.
With a very simple network, the Keras can fit my function very well, I just want to know what the function is and try to understand why it works very well. But the "predict" function hide the details.
Here is the code I create the network:
import numpy as np
import tensorflow as tf
from tensorflow import keras
LABEL_COLUMN = "shat"
BATCH_SIZE = 16
EPOCHS = 20
trainfilePath = "F:\\PyworkingFolder\\WWSHat\\_Data\\alpha0train.csv"
testfilePath = "F:\\PyworkingFolder\\WWSHat\\_Data\\alpha0test.csv"
with open(trainfilePath, encoding='utf-8') as txtContent:
trainArray = np.loadtxt(txtContent, delimiter=",")
with open(testfilePath, encoding='utf-8') as txtContent:
testArray = np.loadtxt(txtContent, delimiter=",")
trainSample = trainArray[:, 0:14]
trainLable = trainArray[:, 14]
testSample = testArray[:, 0:14]
testLable = testArray[:, 14]
model = keras.Sequential([
keras.layers.Dense(14, activation='relu', input_shape=[14]),
keras.layers.Dense(15, activation='relu'),
keras.layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
# optimizer = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
model.summary()
history = model.fit(trainSample, trainLable, epochs=EPOCHS, batch_size=BATCH_SIZE)
model.evaluate(testSample, testLable, verbose=1)
model.save("F:\\PyworkingFolder\\WWSHat\\_Data\\alpha0.h5")
What I understand is:
the layers are weight matrices and basis matrices, it works as
out=max(0, weight * input + basis)
After some search, I find I can read the .h5 file using
import h5py
import numpy as np
FILENAME = "F:\\PyworkingFolder\\WWSHat\\_Data\\alpha0.h5"
with h5py.File(FILENAME, 'r') as f:
dense_1 = f['/model_weights/dense_1/dense_1']
dense_1_bias = dense_1['bias:0'][:]
dense_1_kernel = dense_1['kernel:0'][:]
dense_2 = f['/model_weights/dense_2/dense_2']
dense_2_bias = dense_2['bias:0'][:]
dense_2_kernel = dense_2['kernel:0'][:]
# print("Weight matrix 1:\n")
# print(dense_1_kernel)
# print("Basis matrix 1:\n")
# print(dense_1_bias)
# print("Weight matrix 2:\n")
# print(dense_2_kernel)
# print("Basis matrix 2:\n")
# print(dense_2_bias)
def layer_output(v, kernel, bias):
return np.dot(v, kernel) + bias
reluFunction = np.vectorize(lambda x: x if x >= 0.0 else 0.0)
testV = np.array([[-0.004090321213057993,
0.009615388501909157,
-0.24223693596921558,
0.015504079563927319,
-0.02659541428995062,
0.018512968977547152,
0.00836788544720289,
-0.10874776132746002,
-0.045863474556415526,
-0.010195799916571194,
0.09474219315939948,
0.03606698737846194,
-0.004560110004741025,
0.028042417959738858]])
output_1 = layer_output(testV, dense_1_kernel, dense_1_bias)
output_2 = reluFunction(output_1)
output_3 = layer_output(output_2, dense_2_kernel, dense_2_bias)
output_4 = reluFunction(output_3)
however, the result of output_4 is very different from what I get using
loaded_model = keras.models.load_model("F:\\PyworkingFolder\\WWSHat\\_Data\\alpha0.h5")
predicted = loaded_model(testV)
The "predicted" is very close to the ground truth while "output_4" is far away from the ground truth.
I get stuck here and don't know why and failed to find information about how to extract the function I want from the Keras model, I need your help!
Thanks!
model = keras.Sequential([
keras.layers.Dense(14, activation='relu', input_shape=[14]),
keras.layers.Dense(15, activation='relu'),
keras.layers.Dense(1)
])
In your model, there are 3 layers, the last dense layer has weight and biases too, you didn't consider them in your calculation.

why is different between model.evaluate() and computed loss by myself based on model.predict()?

I am running neural network by keras. There is my code:
import numpy as np
from keras import Model
from keras.models import Sequential
from keras.layers import Dense
from keras import backend as K
def mean_squared_error(y_true, y_pred):
return K.mean(K.square(y_pred - y_true),axis=-1)
np.random.seed(1)
Train_X = np.random.randint(low=0,high=100,size = (50,5))
Train_Y = np.matmul(Train_X,np.arange(10).reshape(5,2))+np.random.randint(low=0,high=10,size=(50,2))
Test_X = np.random.randint(low=0,high=100,size = (10,5))
Test_Y = np.matmul(Test_X,np.arange(10).reshape(5,2))+np.random.randint(low=0,high=10,size=(10,2))
model = Sequential()
model.add(Dense(4,activation = 'relu'))
model.add(Dense(2,activation='relu'))
model.add(Dense(2,activation='relu'))
model.add(Dense(2))
model.compile(loss=mean_squared_error, optimizer='adam', metrics=['mae'])
history = model.fit(Train_X, Train_Y, epochs=100, batch_size=5,validation_data = (Test_X, Test_Y))
loss1 = model.evaluate(Test_X,Test_Y)
loss2 = history.history['val_loss'][99]
y_pred = model.predict(Test_X)
y_true = Test_Y
loss3 = np.mean(np.square(y_pred-y_true))
I find that loss1 is the same as loss2 but is different with loss3. So i feel so confused. Could someone tell me why?
This is possibly due to different dtypes for Test_Y and y_pred. Keras tries to automatically take care of dtype mismatches for you, so it is possible that Test_Y is a float64 and y_pred is a float32. If that is indeed the case, try converting one of their dtypes for the loss3 calculation and see if the values match.
y_pred = model.predict(Test_X)
y_true = Test_Y.astype(np.float32)
loss3 = np.mean(np.square(y_pred-y_true))

Resources