AttributeError: 'Sequential' object has no attribute 'eval' - keras

'''
I am using SHAP, for my model analysis, and while calling 'DeepExplainer' I am getting "AttributeError: 'Sequential' object has no attribute 'eval' "
I am using theano with keras instead of tensor-flow, because there is some version mismatch issue with SHAP, same i have posted in other question. So now i am trying same stuff but this time, _Backend i am using PyTorch and model building is fine but while using SHAP DeepExplainer it is throwing Attribute error, i am newbee to this type of error for Model Explainer domain
'''
Input:
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128))
model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation='sigmoid'))
# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=1,
validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test,
batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
output: No Error
After That:
Input:
import shap
# we use the first 100 training examples as our background dataset to integrate over
explainer = shap.DeepExplainer(model, x_train[:100])
Error:
AttributeError Traceback (most recent call last)
<ipython-input-12-9cca779d01d2> in <module>
1 # we use the first 100 training examples as our background dataset to integrate over
----> 2 explainer = shap.DeepExplainer(model,1)
c:\users\shubh\.conda\envs\pytorch_cpu\lib\site-packages\shap\explainers\deep\__init__.py in __init__(self, model, data, session, learning_phase_flags)
79 self.explainer = TFDeepExplainer(model, data, session, learning_phase_flags)
80 elif framework == 'pytorch':
---> 81 self.explainer = PyTorchDeepExplainer(model, data)
82
83 self.expected_value = self.explainer.expected_value
c:\users\shubh\.conda\envs\pytorch_cpu\lib\site-packages\shap\explainers\deep\deep_pytorch.py in __init__(self, model, data)
47 self.target_handle.remove()
48 del self.layer.target_input
---> 49 self.model = model.eval()
50
51 self.multi_output = False
AttributeError: 'Sequential' object has no attribute 'eval'
Any help or direction for resolving this error?

Related

Unexpected ValueError after training Keras NN a few times

I am working on time series prediction using RNNs implemented in Keras on Google Colaboratory. I implemented the RNN as follows:
from tensorflow import keras
mae = keras.losses.MeanAbsoluteError()
hidden_neurons = 50
output_neurons = 1
epoch_size = 50
batch_size = 72
# x_train has shape (500, 1, 23)
LSTM_layer = keras.layers.LSTM(hidden_neurons, input_shape = (x_train.shape[1], x_train.shape[2]), dropout = 0.05)
output_layer = keras.layers.Dense(1)
test_model = keras.Sequential(layers = (LSTM_layer, output_layer))
test_model.reset_states()
test_model.compile(optimizer = 'adam', loss = mae)
test_model.summary()
history = test_model.fit(tf.expand_dims(x_train, axis=-1), y_train, epochs = epoch_size, batch_size = batch_size, validation_data=(x_test, y_test), shuffle = False)
# y_train has shape (500, 1)
# x_test has shape (500, 1, 23)
# y_test has shape (500, 1)
I have the above code (except the import) in a single code cell. When I start a fresh runtime, the network trains fine as expected. But after executing the code cell for around 3-4 times, Colab throws the following error:
ValueError Traceback (most recent call last)
<ipython-input-23-3ac5cc808611> in <module>
12 test_model.compile(optimizer = 'adam', loss = mae)
13 test_model.summary()
---> 14 history = test_model.fit(tf.expand_dims(x_train, axis=-1), y_train, epochs = epoch_size, batch_size = batch_size, validation_data=(x_test, y_test), shuffle = False)
...
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py in autograph_handler(*args, **kwargs)
1145 except Exception as e: # pylint:disable=broad-except
1146 if hasattr(e, "ag_error_metadata"):
-> 1147 raise e.ag_error_metadata.to_exception(e)
1148 else:
1149 raise
ValueError: Input 0 of layer "sequential_2" is incompatible with the layer: expected shape=(None, 1, 23), found shape=(None, 23)
The error persists if tf.expand_dims(x_train, axis=-1)) is omitted in test_model.fit() while fitting the Sequential model.
I guess this has something to do with the layer inputs somehow being changed during execution. I have tried using test_model.reset_states() and running
keras.backend.clear_session()
del test_model
in a separate code cell, but only forcibly killing the Colab runtime seems to work:
import os
os.kill(os.getpid(), 9)
What could cause the layer inputs to change midway during program run?
EDIT: I got the same error when I tried running the cells on Jupyter Notebook on my PC rather than on Colab.

ValueError: Supported target types are: ('binary', 'multiclass'). Got 'continuous' instead. python

I run the following code:
seed = 7
np.random.seed(seed)
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed)
for train, test in kfold.split(X, Y):
model = Sequential()
model.add(Dense(64, input_dim=12, activation='relu'))
model.add(Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
model.fit(X[train], Y[train], epochs=10, verbose=1)
And got the following error:
ValueError Traceback (most recent call
last) in ()
6 lst_accu_stratified = []
7
----> 8 for train_index, test_index in skf.split(X, Y):
9 x_train_fold, x_test_fold = X_scaled[train_index], X_scaled[test_index]
10 y_train_fold, y_test_fold = Y[train_index], Y[test_index]
3 frames
/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_split.py
in _make_test_folds(self, X, y)
644 raise ValueError(
645 'Supported target types are: {}. Got {!r} instead.'.format(
--> 646 allowed_target_types, type_of_target_y))
647
648 y = column_or_1d(y)
ValueError: Supported target types are: ('binary', 'multiclass'). Got
'continuous' instead.
It doesn't make sense for this model not accepting continuous data. What's wrong?

'KerasClassifier' object has no attribute 'loss'

I am doing churn prediction using keras. I have used column transformer from Sklearn. My code is--
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
def keras_classifier_wrapper():
classifier = Sequential()
classifier.add(Dense(9, input_dim=13, activation='relu'))
classifier.add(Dense(8, activation='relu'))
classifier.add(Dense(1, activation='sigmoid'))
classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
return clf
clf = KerasClassifier(keras_classifier_wrapper, epochs=20, batch_size=50, verbose=0)
categorical_pipe = Pipeline([
('onehot', OneHotEncoder(handle_unknown='ignore'))
])
numerical_pipe = Pipeline([
('imputer', SimpleImputer(strategy='median'))
])
preprocessing = ColumnTransformer(
[('cat', categorical_pipe, cat_var1),
('num', numerical_pipe, num_var1)])
model3 = Pipeline([
('preprocess', preprocessing),
('keras_clf', clf)
])
model3.fit(X_train, y_train)
But it showing an error-
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-162-1f0472b386ae> in <module>()
----> 1 model3.fit(X_train, y_train)
2 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/wrappers/scikit_learn.py in fit(self, x, y, **kwargs)
157 self.model = self.build_fn(**self.filter_sk_params(self.build_fn))
158
--> 159 if (losses.is_categorical_crossentropy(self.model.loss) and
160 len(y.shape) != 2):
161 y = to_categorical(y)
AttributeError: 'KerasClassifier' object has no attribute 'loss'
Can you plz tell me why this error is showing and how to solve it.
Thanks in advance
problem is in your keras_classifier_wrapper function
def keras_classifier_wrapper():
classifier = Sequential()
classifier.add(Dense(9, input_dim=13, activation='relu'))
classifier.add(Dense(8, activation='relu'))
classifier.add(Dense(1, activation='sigmoid'))
classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
return clf # should be return classifier
you are trying to return clf but there is no clf it is defined afterwards. try to return classifier then it will work

dealing with sparse data on TensorFlow 2.2.0

I'm trying to make a Face Recognition app using insightface, I wrote this code on Tensorflow 2.1.0 and Keras 2.3.1 and it worked well but due to some issues I have to migrate to TensorFlow 2.2.0 and Keras 2.4.3, I understand that my problem is my embeddings. they are sparse but in a meaningful way. How can I avoid changing the meaningfulness for my embeddings and avoid the spares data? From the error, I understand (Consider casting elements to a supported type.) that TensorFlow can't convert my np.array to tensor because it is sparse.
what I tried
these arent the commands but I wrote them so you would have a notion of what I tried. np.array(data["embeddings"]).todense(),csr_matrix(data["embeddings"]), tf.convert_to_tensor(data["embeddings"]) and also tried to follow along this but couldn't get to model.fit_generator work.
>>> print(type(embeddings))
<class 'numpy.ndarray'>
>>> print(embeddings.shape)
(49, 512)
>>> print(embeddings)
[[ 0.02751185 0.0143353 0.0324492 ... -0.00347222 0.0154978
-0.01304669]
[ 0.09154768 -0.04196533 0.01197386 ... -0.08363352 0.03335601
0.01748604]
[ 0.00182035 -0.00307933 0.00386595 ... -0.04442558 0.04434329
0.06080627]
...
[-0.01564891 -0.01510727 0.0345119 ... -0.01690779 -0.00816008
0.08056415]
[-0.00543963 -0.03811216 -0.01148985 ... -0.05366111 0.07108331
-0.00186215]
[ 0.00627459 -0.04221528 0.00426272 ... 0.02838095 0.02116473
0.00491964]]
This is my code:
class SoftMax():
def __init__(self, input_shape, num_classes):
self.input_shape = input_shape
self.num_classes = num_classes
def build(self):
#create model
model = Sequential()
#add model layers
model.add(Dense(1024, activation='relu', input_shape=self.input_shape))
model.add(Dropout(0.5))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(self.num_classes, activation='softmax'))
# loss and optimizer
optimizer=Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=False)
model.compile(loss=categorical_crossentropy,
optimizer=optimizer,
metrics=['accuracy'])
return model
def make_model(args, classifier=SoftMax):
# Load the face embeddings
data = pickle.loads(open(args.embeddings, "rb").read())
num_classes = len(np.unique(data["names"]))
ct = ColumnTransformer([('myŁ”Name', OneHotEncoder(), [0])])
labels = np.array(data["names"]).reshape(-1, 1)
labels = ct.fit_transform(labels)
embeddings = np.array(data["embeddings"])
# Initialize Softmax training model arguments
BATCH_SIZE = 32
EPOCHS = 32
input_shape = embeddings.shape[1]
# Build classifier
init_classifier = classifier(input_shape=(input_shape,), num_classes=num_classes)
model = init_classifier.build()
# Create KFold
cv = KFold(n_splits = 5, random_state = None, shuffle=True)
history = {'acc': [], 'val_acc': [], 'loss': [], 'val_loss': []}
# Train
for train_idx, valid_idx in cv.split(embeddings):
X_train, X_val, y_train, y_val = embeddings[train_idx], embeddings[valid_idx], labels[train_idx], labels[valid_idx]
his = model.fit(X_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS, verbose=1, validation_data=(X_val, y_val))
# write the face recognition model to output
model.save(args.mymodel)
f = open(args.le, "wb")
f.write(pickle.dumps(LabelEncoder()))
f.close()
Error
TypeError: Failed to convert object of type <class 'tensorflow.python.framework.sparse_tensor.SparseTensor'> to Tensor. Contents: SparseTensor(indices=Tensor("DeserializeSparse:0", shape=(None, 2), dtype=int64), values=Tensor("DeserializeSparse:1", shape=(None,), dtype=float32), dense_shape=Tensor("stack:0", shape=(2,), dtype=int64)). Consider casting elements to a supported type.

Why does Keras tell me "ValueError: setting an array element with a sequence." despite having all arrays as numpy arrays?

I am trying to train a 2D neural network using keras. I have a weird error message, "ValueError: setting an array element with a sequence." when I try to use model.fit function in keras. Specifically, the error says that my "tensor_train_labels" is a sequence instead of an array. But my labels are indeed numpy arrays (not a sequence). I am not sure why does keras complain about it ?
I am following this tutorial for building my network
tensor_train_data.shape
#TensorShape([Dimension(209), Dimension(64), Dimension(64), Dimension(3)])
tensor_test_data.shape
#TensorShape([Dimension(50), Dimension(64), Dimension(64), Dimension(3)])
tensor_train_labels = tf.reshape(tensor_train_labels, [209,1])
tensor_test_labels = tf.reshape(tensor_test_labels, [50,1])
batch_size = 10
epochs = 8
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=(3,3), activation='relu',
input_shape=(64, 64, 3)))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2,2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation = 'relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(2, activation = 'softmax'))
model.compile(loss='categorical_crossentropy', optimizer =
tf.keras.optimizers.Adam(lr=0.0001, decay=1e-6), metrics=['accuracy'])
model.fit(tensor_train_data/255.0,
tf.keras.utils.to_categorical(tensor_train_labels),
batch_size = batch_size,
shuffle = True,
epochs = epochs,
validation_data = (tensor_test_data/ 255.0,
tf.keras.utils.to_categorical(tensor_test_labels)))
scores = model.evaluate(tensor_test_labels/ 255.0,
tf.keras.utils.to_categorical(tensor_test_labels))
print('Loss: %.3f' % scores[0])
print('Accuracy: %.3f' % scores[1])
The Error :
ValueError Traceback (most recent call last)
<ipython-input-224-80431a1b3e79> in <module>
1 model.compile(loss='categorical_crossentropy', optimizer = tf.keras.optimizers.Adam(lr=0.0001, decay=1e-6), metrics=['accuracy'])
----> 2 model.fit(tensor_train_data/255.0, tf.keras.utils.to_categorical(tensor_train_labels),
3 batch_size = batch_size,
4 shuffle = True,
5 epochs = epochs,
~\AppData\Local\conda\conda\envs\deeplearning\lib\site-packages\tensorflow\python\keras\utils\np_utils.py in to_categorical(y,
num_classes)
37 last.
38 """
---> 39 y = np.array(y, dtype='int')
40 input_shape = y.shape
41 if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
ValueError: setting an array element with a sequence.
The possible error is that you have arrays of different sizes when you are trying to convert it into the numpy array. Possible solution : https://stackoverflow.com/a/49617425/8185479

Resources