UnboundLocalError: local variable 'class_name' referenced before assignment - theano

I am new to Keras and Theano, and now trying to implement my own loss function on Keras. But this error showed up. I thought the problem lies in my own loss function, but I have now idea how to fix it. Could someone help me figure this out?
import theano
import theano.tensor as T
def cost_estimation(y_true, y_pred):
for k in range(10):
d=T.log(1+T.exp((int(bool(y_true[k]==min(y_true)))*2-1)*(y_pred[k]-y_true[k])))
cost=cost+d
return d
The keras layers:
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='relu'))
#loss=keras.losses.categorical_crossentropy,
model.compile(loss='cost_estimation',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,
verbose=1, validation_data=(x_test, y_test))
Here is the error:
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-6-d63101c47c94> in <module>()
130 model.compile(loss='cost_estimation',
131 optimizer='adam',
--> 132 metrics=['accuracy'])
133
134 model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,
/usr/local/lib/python2.7/dist-packages/keras/models.pyc in compile(self, optimizer, loss, metrics, sample_weight_mode, **kwargs)
764 metrics=metrics,
765 sample_weight_mode=sample_weight_mode,
--> 766 **kwargs)
767 self.optimizer = self.model.optimizer
768 self.loss = self.model.loss
/usr/local/lib/python2.7/dist-packages/keras/engine/training.pyc in compile(self, optimizer, loss, metrics, loss_weights, sample_weight_mode, **kwargs)
738 loss_functions = [losses.get(l) for l in loss]
739 else:
--> 740 loss_function = losses.get(loss)
741 loss_functions = [loss_function for _ in range(len(self.outputs))]
742 self.loss_functions = loss_functions
/usr/local/lib/python2.7/dist-packages/keras/losses.pyc in get(identifier)
88 if isinstance(identifier, six.string_types):
89 identifier = str(identifier)
---> 90 return deserialize(identifier)
91 elif callable(identifier):
92 return identifier
/usr/local/lib/python2.7/dist-packages/keras/losses.pyc in deserialize(name, custom_objects)
80 module_objects=globals(),
81 custom_objects=custom_objects,
---> 82 printable_module_name='loss function')
83
84
/usr/local/lib/python2.7/dist-packages/keras/utils/generic_utils.pyc in deserialize_keras_object(identifier, module_objects, custom_objects, printable_module_name)
155 if fn is None:
156 raise ValueError('Unknown ' + printable_module_name,
--> 157 ':' + class_name)
158 return fn
159 else:
UnboundLocalError: local variable 'class_name' referenced before assignment

This seems to be an issue in keras codebase. It seems that if the a string is passed to the loss parameter this error arises. To fix this, pass
cost_estimation itself to loss, this way you avoid that branch of code.
model.compile(optimizer='rmsprop',
loss=cost_estimation, # not 'cost_estimation'
metrics=['accuracy'])

Related

Extracting embeddings from a keras neural network's intermediate layer

I have this neural network based on this
words_input = Input(shape=(500,),dtype='int32',name='words_input')
words = Embedding(input_dim=wordEmbeddings.shape[0], output_dim=wordEmbeddings.shape[1], weights=[wordEmbeddings], trainable=False)(words_input)
conv_1 = Conv1D(filters=100, kernel_size=10, strides=2, activation='relu')(words)
avgpool_1 = AveragePooling1D(pool_size=10, strides=10)(conv_1)
b_lstm = Bidirectional(LSTM(200, activation='tanh', return_sequences=False))(avgpool_1)
dense_1 = Dense(128, activation='relu')(b_lstm)
dropout = Dropout(0.1)(dense_1)
dense_2 = Dense(5, activation='softmax')(dropout)
sgd = keras.optimizers.Adam(lr=0.0001)
model = Model(inputs=words_input, outputs=dense_2)
extractor = Model(inputs=model.inputs, outputs=model.get_layer(words).output)
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['acc'])
model.summary()
I added the line
extractor = Model(inputs=model.inputs, outputs=model.get_layer(words).output)
cause I want to extract the word2vec embeddings of the words from the inputs like they show here
But I'm getting this error
TypeError Traceback (most recent call last)
/tmp/ipykernel_6732/2108362002.py in <module>
11 sgd = keras.optimizers.Adam(lr=0.0001)
12 model = Model(inputs=words_input, outputs=dense_2)
---> 13 extractor = Model(inputs=model.inputs, outputs=model.get_layer(words).output)
14 model.compile(loss='mean_squared_error', optimizer='adam', metrics=['acc'])
15 model.summary()
~/.local/lib/python3.8/site-packages/keras/engine/training.py in get_layer(self, name, index)
3271 if name is not None:
3272 for layer in self.layers:
-> 3273 if layer.name == name:
3274 return layer
3275 raise ValueError(
~/.local/lib/python3.8/site-packages/tensorflow/python/util/traceback_utils.py in error_handler(*args, **kwargs)
151 except Exception as e:
152 filtered_tb = _process_traceback_frames(e.__traceback__)
--> 153 raise e.with_traceback(filtered_tb) from None
154 finally:
155 del filtered_tb
~/.local/lib/python3.8/site-packages/keras/layers/core/tf_op_layer.py in handle(self, op, args, kwargs)
117 for x in tf.nest.flatten([args, kwargs])
118 ):
--> 119 return TFOpLambda(op)(*args, **kwargs)
120 else:
121 return self.NOT_SUPPORTED
~/.local/lib/python3.8/site-packages/keras/utils/traceback_utils.py in error_handler(*args, **kwargs)
68 # To get the full stack trace, call:
69 # `tf.debugging.disable_traceback_filtering()`
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
TypeError: Exception encountered when calling layer "tf.__operators__.eq" (type TFOpLambda).
Expected float32 passed to parameter 'y' of op 'Equal', got 'words_input' of type 'str' instead. Error: Expected float32, but got words_input of type 'str'.
Call arguments received by layer "tf.__operators__.eq" (type TFOpLambda):
• self=tf.Tensor(shape=(None, 500, 300), dtype=float32)
• other='words_input'
Any idea what I am doing wrong? Why is it passing the name of the first layer "words_input" to the parameter y? Which is what I assume it is doing?
You are not passing the correct name to the get_layer of the model, try this code
tf.keras.backend.clear_session()
words_input = keras.Input(shape=(500,),dtype='int32',name='words_input')
words = keras.layers.Embedding(input_dim=wordEmbeddings.shape[0], output_dim=wordEmbeddings.shape[1], weights=[wordEmbeddings], trainable=False, name='words')(words_input)
conv_1 = keras.layers.Conv1D(filters=100, kernel_size=10, strides=2, activation='relu')(words)
avgpool_1 = keras.layers.AveragePooling1D(pool_size=10, strides=10)(conv_1)
b_lstm = keras.layers.Bidirectional(keras.layers.LSTM(200, activation='tanh', return_sequences=False))(avgpool_1)
dense_1 = keras.layers.Dense(128, activation='relu')(b_lstm)
dropout = keras.layers.Dropout(0.1)(dense_1)
dense_2 = keras.layers.Dense(5, activation='softmax')(dropout)
sgd = keras.optimizers.Adam(learning_rate=0.0001)
model = keras.Model(inputs=words_input, outputs=dense_2)
extractor = keras.Model(inputs=model.inputs, outputs=model.get_layer('words').output)
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['acc'])
model.summary()
extractor.summary()
Output:

assertion failed: [Condition x == y did not hold element-wise:]

I have built a BiLSTM model with an attention layer for sentence classification task but I am getting an error that my assertion has failed due to mismatch in number of parameters. The attention layer code is here and the error is below the code.
class attention(Layer):
def __init__(self, return_sequences=True):
self.return_sequences = return_sequences
super(attention,self).__init__()
def build(self, input_shape):
self.W=self.add_weight(name="att_weight", shape=(input_shape[-1],1),
initializer="normal")
self.b=self.add_weight(name="att_bias", shape=(input_shape[1],1),
initializer="zeros")
super(attention,self).build(input_shape)
def call(self, x):
e = K.tanh(K.dot(x,self.W)+self.b)
a = K.softmax(e, axis=1)
output = x*a
if self.return_sequences:
return output
return K.sum(output, axis=1)
When i am training the model with attention layer included, it is giving an error that assertion failed.
Epoch 1/10
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-45-ac310033130c> in <module>()
1 #Early stopping, Adam, dropout = 0.3, 0.5, 0.5
2 #history = model.fit(sequences_matrix, Y_train, batch_size=256, epochs=5, validation_split=0.1, callbacks=[EarlyStopping(monitor='val_loss', min_delta=0.0001)])
----> 3 history = model.fit(sequences_matrix, Y_train, batch_size=32, epochs=10, validation_split=0.1)
8 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
58 ctx.ensure_initialized()
59 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 60 inputs, attrs, num_outputs)
61 except core._NotOkStatusException as e:
62 if name is not None:
InvalidArgumentError: assertion failed: [Condition x == y did not hold element-wise:] [x (sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/Shape_1:0) = ] [32 1] [y (sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/strided_slice:0) = ] [32 758]
[[node sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/assert_equal_1/Assert/Assert (defined at <ipython-input-45-ac310033130c>:3) ]] [Op:__inference_train_function_19854]
Function call stack:
train_function
My model is
model = Sequential()
model.add(Embedding(max_words, 768, input_length=max_len, weights=[embedding]))
model.add(BatchNormalization())
model.add(Activation('tanh'))
model.add(SpatialDropout1D(0.1))
model.add(Conv1D(16, kernel_size=11, activation='relu'))
model.add(Bidirectional(LSTM(16, return_sequences=True)))
model.add(attention(return_sequences=True))
model.add(BatchNormalization())
model.add(Activation('tanh'))
model.add(Dropout(0.2))
model.add(Dense(2, activation='softmax', use_bias=True, kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4), bias_regularizer=regularizers.l2(1e-4),
activity_regularizer=regularizers.l2(1e-5)))
model.summary()
Shape of Y_train is
max_words = 48369
max_len = 768
tok = Tokenizer(num_words = max_words)
tok.fit_on_texts(X_train)
sequences = tok.texts_to_sequences(X_train)
sequences_matrix = sequence.pad_sequences(sequences, maxlen = max_len)
Y_train = np.array(Y_train)
Y_test = np.array(Y_test)
print(Y_train.shape)
(43532, 1)
your target is in 2D so you need to set return_sequences=False in the last attention layer in order to return output in 2D format
Add flatten layer before Dropout and then execute.
model.add(Flatten())

Explaining LSTM keras with Eli5 library

I'm trying to use Eli5 for explaining an LSTM keras model for time series prediction. The keras model receives as input an array with shape (nsamples, timesteps, nfeatures).
This is my code:
def baseline_model():
model = Sequential()
model.add(LSTM(32, input_shape=(X_train.shape[1], X_train.shape[2])))
model.add(Dropout(0.2))
model.add(Dense(1))
model.compile(loss='logcosh', optimizer='adam')
return model
from keras.wrappers.scikit_learn import KerasClassifier, KerasRegressor
import eli5
from eli5.sklearn import PermutationImportance
my_model = KerasRegressor(build_fn= baseline_model, nb_epoch= 30, batch_size= 32, verbose= False)
history = my_model.fit(X_train, y_train)
So far, everything is ok. The problem is when I execute the following line that launchs an error:
# X_train has a shape equal to (nsamples, timesteps, nfeatures) and y_train has a shape (nsamples)
perm = PermutationImportance(my_model, random_state=1).fit(X_train, y_train)
Error:
ValueError Traceback (most recent call last)
in ()
2 d2_train_dataset = X_train.reshape((nsamples, timesteps * features))
3
----> 4 perm = PermutationImportance(my_model, random_state=1).fit(X_train, y_train)
5 #eli5.show_weights(perm, feature_names = X.columns.tolist())
~/anaconda3/lib/python3.6/site-packages/eli5/sklearn/permutation_importance.py in fit(self, X, y, groups, **fit_params)
183 self.estimator_.fit(X, y, **fit_params)
184
--> 185 X = check_array(X)
186
187 if self.cv not in (None, "prefit"):
~/anaconda3/lib/python3.6/site-packages/sklearn/utils/validation.py in check_array(array, accept_sparse, accept_large_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, ensure_min_samples, ensure_min_features, warn_on_dtype, estimator)
568 if not allow_nd and array.ndim >= 3:
569 raise ValueError("Found array with dim %d. %s expected <= 2."
--> 570 % (array.ndim, estimator_name))
571 if force_all_finite:
572 _assert_all_finite(array,
ValueError: Found array with dim 3. Estimator expected <= 2.
What can I do to fix this error? How can I use eli5 with my LSTM Keras Model?

Input 0 is incompatible with layer flatten_5: expected min_ndim=3, found ndim=2

I am trying to fine-tune VGG16 neural network, here is the code:
vgg16_model = VGG16(weights="imagenet", include_top="false", input_shape=(224,224,3))
model = Sequential()
model.add(vgg16_model)
#add fully connected layer:
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(3, activation='softmax'))
I am getting this error:
ValueError Traceback (most recent call last)
in
2 model.add(vgg16_model)
3 #add fully connected layer:
----> 4 model.add(Flatten())
5 model.add(Dense(256, activation='relu'))
6 model.add(Dropout(0.5))
/usr/local/anaconda/lib/python3.6/site-packages/keras/engine/sequential.py in add(self, layer)
179 self.inputs = network.get_source_inputs(self.outputs[0])
180 elif self.outputs:
--> 181 output_tensor = layer(self.outputs[0])
182 if isinstance(output_tensor, list):
183 raise TypeError('All layers in a Sequential model '
/usr/local/anaconda/lib/python3.6/site-packages/keras/engine/base_layer.py in call(self, inputs, **kwargs)
412 # Raise exceptions in case the input is not compatible
413 # with the input_spec specified in the layer constructor.
--> 414 self.assert_input_compatibility(inputs)
415
416 # Collect input shapes to build layer.
/usr/local/anaconda/lib/python3.6/site-packages/keras/engine/base_layer.py in assert_input_compatibility(self, inputs)
325 self.name + ': expected min_ndim=' +
326 str(spec.min_ndim) + ', found ndim=' +
--> 327 str(K.ndim(x)))
328 # Check dtype.
329 if spec.dtype is not None:
ValueError: Input 0 is incompatible with layer flatten_5: expected min_ndim=3, found ndim=2
I tried many suggested solutions but none of them could solve my problem. How can I solve this?
In officially keras webpage, on
Fine-tune InceptionV3 on a new set of classes
from keras.models import Model
vgg16_model = VGG16(weights="imagenet", include_top="false", input_shape=(224,224,3))
x = vgg16_model.output
x=Flatten()(x)
x=Dense(256, activation='relu')(x)
x=Dropout(0.5)(x)
predictions=Dense(3, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
You have an error in include_top="false", this causes you the error message. Try:
vgg16_model = VGG16(weights="imagenet", include_top=False, input_shape=(224,224,3))

sentiment classification using keras

I very new to deep learning classification. I have reviews data with the label(pos, neg) and I 'm trying to classify the data using keras. here is my code:
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
sentences_train, sentences_test, y_train, y_test =
train_test_split(review_list2, label_list, test_size=0.25, random_state=1000)
vectorizer = CountVectorizer()
vectorizer.fit(sentences_train)
X_train = vectorizer.transform(sentences_train)
X_test = vectorizer.transform(sentences_test)
#build the model
from keras.models import Sequential
from keras import layers
input_dim = X_train.shape[1]
model = Sequential()
model.add(layers.Dense(8, input_dim=input_dim, activation='relu'))
model.add(layers.Dense(2, activation='softmax'))
model.compile(loss='binary_crossentropy',
optimizer='adam', metrics=['accuracy'])
model.summary()
history = model.fit(X_train, y_train, epochs=100, verbose=False, validation_data=(X_test, y_test), batch_size=10)
I got an error:
AttributeError Traceback (most recent call last)
<ipython-input-52-34c39f53e335> in <module>
----> 1 history = model.fit(X_train, y_train, epochs=100, verbose=False, validation_data=(X_test, y_test), batch_size=10)
d:\py-ver35\lib\site-packages\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
950 sample_weight=sample_weight,
951 class_weight=class_weight,
--> 952 batch_size=batch_size)
953 # Prepare validation data.
954 do_validation = False
d:\py-ver35\lib\site-packages\keras\engine\training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, check_array_lengths, batch_size)
787 feed_output_shapes,
788 check_batch_axis=False, # Don't enforce the batch size.
--> 789 exception_prefix='target')
790
791 # Generate sample-wise weight values given the `sample_weight` and
d:\py-ver35\lib\site-packages\keras\engine\training_utils.py in standardize_input_data(data, names, shapes, check_batch_axis, exception_prefix)
90 data = data.values if data.__class__.__name__ == 'DataFrame' else data
91 data = [data]
---> 92 data = [standardize_single_array(x) for x in data]
93
94 if len(data) != len(names):
d:\py-ver35\lib\site-packages\keras\engine\training_utils.py in <listcomp>(.0)
90 data = data.values if data.__class__.__name__ == 'DataFrame' else data
91 data = [data]
---> 92 data = [standardize_single_array(x) for x in data]
93
94 if len(data) != len(names):
d:\py-ver35\lib\site-packages\keras\engine\training_utils.py in standardize_single_array(x)
25 'Got tensor with shape: %s' % str(shape))
26 return x
---> 27 elif x.ndim == 1:
28 x = np.expand_dims(x, 1)
29 return x
AttributeError: 'str' object has no attribute 'ndim'
I have tried every solution mention related to this error but still cannot fix.Any help? thanks in Advance

Resources