Keras TimeDistributed Conv1D Error - keras

This is my code:
cnn_input = Input(shape=(cnn_max_length,))
emb_output = Embedding(num_chars + 1, output_dim=32, input_length=cnn_max_length, trainable=True)(cnn_input)
output = TimeDistributed(Convolution1D(filters=128, kernel_size=4, activation='relu'))(emb_output)
I want to train a character-level CNN sequence labeler and I keep receiving this error:
Traceback (most recent call last):
File "word_lstm_char_cnn.py", line 24, in <module>
output = kl.TimeDistributed(kl.Convolution1D(filters=128, kernel_size=4, activation='relu'))(emb_output)
File "/home/user/anaconda3/envs/thesisenv/lib/python3.6/site-packages/keras/engine/base_layer.py", line 457, in __call__
output = self.call(inputs, **kwargs)
es/keras/layers/wrappers.py", line 248, in call
y = self.layer.call(inputs, **kwargs)
File "/home/user/anaconda3/envs/thesisenv/lib/python3.6/site-packages/keras/layers/convolutional.py", line 160, in call
dilation_rate=self.dilation_rate[0])
File "/home/user/anaconda3/envs/thesisenv/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py", line 3526, in conv1d
data_format=tf_data_format)
File "/home/user/anaconda3/envs/thesisenv/lib/python3.6/site-packages/tensorflow/python/ops/nn_ops.py", line 779, in convolution
data_format=data_format)
File "/home/user/anaconda3/envs/thesisenv/lib/python3.6/site-packages/tensorflow/python/ops/nn_ops.py", line 828, in __init__
input_channels_dim = input_shape[num_spatial_dims + 1]
File "/home/user/anaconda3/envs/thesisenv/lib/python3.6/site-packages/tensorflow/python/framework/tensor_shape.py", line 615, in __getitem__
return self._dims[key]
IndexError: list index out of range
The input is 3D, as it should be. If I change the input shape I receive this error:
ValueError: Input 0 is incompatible with layer time_distributed_1: expected ndim=3, found ndim=4

Recommended solution:
There is no need to use TimeDistributed in this case. You can fix the issue with following piece of code:
output = Convolution1D(filters=128, kernel_size=4, activation='relu')(emb_output)
Just in case, if you like to use TimeDistributed you can do something like:
output = TimeDistributed(Dense(100,activation='relu'))(emb_output)
Not recommended: According to docs:
This wrapper applies a layer to every temporal slice of an input.
The input to the TimeDistributed is something like batch_size * seq_len * emb_size. When Conv1D apply to each sequence, it needs 2 dimensions but found only one.
You can fix the problem by adding one dimension to your sequences:
TimeDistributed(Conv1D(100, 1))(keras.backend.reshape(emb, [-1, sequence_len, embeding_dim, 1]))

Related

Invalid placeholder in tensorflow

I am trying to write a custom loss function as follows.
def vgg16_feature_model(flayers, weights='imagenet'):
"""
Feature exctraction VGG16 model.
# Arguments
flayers: list of strings with names of layers to get the features for.
The length of `flayers` should be > 1, otherwise the output shape
is one axis less.
weights: ether "imagenet" or path to the file with weights.
# Returns
features_model: keras.models.Model instance to extract the features.
# Raises
AssertionError: in case of `flayers` is not a list.
AssertionError: in case of length of 'flayers' < 2.
"""
assert isinstance(flayers,list), "First argument 'flayers' must be a list"
assert len(flayers) > 1, "Length of 'flayers' must be > 1."
base_model = VGG16(include_top=False, weights=weights)
vgg16_outputs = [base_model.get_layer(flayers[i]).output for i in range(len(flayers))]
features_model = Model(inputs=[base_model.input], outputs=vgg16_outputs, name='vgg16_features')
features_model.trainable = False
features_model.compile(loss='mse', optimizer='adam')
return features_model
# Losses:
# -------
def total_loss(mask, vgg16_weights='imagenet'):
"""
Total loss defined in Eq 7 of Liu et al 2018 with:
y_true = I_gt,
y_pred = I_out,
y_comp = I_comp.
"""
vgg16_lnames = ['block1_pool', 'block2_pool', 'block3_pool']
vgg_model = vgg16_feature_model(vgg16_lnames, weights=vgg16_weights)
def loss(y_true, y_pred):
mask_inv = 1 - mask
y_comp = mask * y_true + mask_inv * y_pred
print("y_pred", y_pred)
print(y_comp)
input()
vgg_out = vgg_model(y_pred)
vgg_gt = vgg_model(y_true)
print("abc-----------------------------------")
vgg_comp = vgg_model(y_comp)
print("abc")
l_valid = loss_per_pixel(y_true, y_pred, mask)
l_hole = loss_per_pixel(y_true, y_pred, mask_inv)
l_perc = loss_perc(vgg_out, vgg_gt, vgg_comp)
l_style = loss_style(vgg_out, vgg_gt, vgg_comp)
l_tv = loss_tv(y_comp, mask_inv)
return l_valid + 6.*l_hole + 0.05*l_perc + 120.*l_style + 0.1*l_tv
return loss
I am getting an error as
Traceback (most recent call last):
File "inpainter_main.py", line 46, in <module>
model = pconv_model(lr=LR_STAGE1, image_size=IMAGE_SIZE, vgg16_weights=VGG16_WEIGHTS)
File "/home/bitsy-chuck/Downloads/PConv2D-2ndimp/inpainter_utils/pconv2d_model.py", line 118, in pconv_model
model.compile(Adam(lr=lr), loss=total_loss(mask_input, vgg16_weights=vgg16_weights))
File "/home/bitsy-chuck/anaconda3/lib/python3.7/site-packages/tensorflow/python/training/tracking/base.py", line 456, in _method_wrapper
result = method(self, *args, **kwargs)
File "/home/bitsy-chuck/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/training_v1.py", line 446, in compile
self._compile_weights_loss_and_weighted_metrics()
File "/home/bitsy-chuck/anaconda3/lib/python3.7/site-packages/tensorflow/python/training/tracking/base.py", line 456, in _method_wrapper
result = method(self, *args, **kwargs)
File "/home/bitsy-chuck/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/training_v1.py", line 1515, in _compile_weights_loss_and_weighted_metrics
self.total_loss = self._prepare_total_loss(masks)
File "/home/bitsy-chuck/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/training_v1.py", line 1575, in _prepare_total_loss
per_sample_losses = loss_fn.call(y_true, y_pred)
File "/home/bitsy-chuck/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/losses.py", line 246, in call
return self.fn(y_true, y_pred, **self._fn_kwargs)
File "/home/bitsy-chuck/Downloads/PConv2D-2ndimp/inpainter_utils/pconv2d_loss.py", line 58, in loss
vgg_comp = vgg_model(y_comp)
File "/home/bitsy-chuck/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer_v1.py", line 737, in __call__
base_layer_utils.create_keras_history(inputs)
File "/home/bitsy-chuck/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer_utils.py", line 186, in create_keras_history
_, created_layers = _create_keras_history_helper(tensors, set(), [])
File "/home/bitsy-chuck/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer_utils.py", line 249, in _create_keras_history_helper
layer_inputs, processed_ops, created_layers)
File "/home/bitsy-chuck/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer_utils.py", line 246, in _create_keras_history_helper
constants[i] = backend.function([], op_input)([])
File "/home/bitsy-chuck/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/backend.py", line 3632, in __call__
run_metadata=self.run_metadata)
File "/home/bitsy-chuck/anaconda3/lib/python3.7/site-packages/tensorflow/python/client/session.py", line 1472, in __call__
run_metadata_ptr)
tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'pconv2d_dec_16_target' with dtype float and shape [?,?,?,?]
[[{{node pconv2d_dec_16_target}}]]
I first thought that y_comp is not correct, but
y_pred ---> Tensor("pconv2d_dec_16/BiasAdd:0", shape=(None, 512, 512, 3), dtype=float32)
y_comp ---> Tensor("loss_1/pconv2d_dec_16_loss/add:0", shape=(None, 512, 512, 3), dtype=float32)
They both appear the same to me and it should work, according to me.
error is at line vgg_comp = vgg_model(y_comp)
Can anyone also explain why am I getting an error of placeholder?
Tf version 1.3
keras 2.2.4
placeholder errors are usually due to tensorflow versions. I had the exact same error and it was fixed when I installed keras first and then tensorflow first. Using anaconda might help as they cache all the files when you uninstall so it is easy to install again without having to download the entire thing again.
There might be some other fix, I believe, but this fixed mine.

ValueError:Layer conv1d was called with an input that isn't a symbolic tensor.All inputs to the layer should be tensors

I built this model and it was working fine.
###Building the Model.
input_layer= Embedding(num_words, 300, input_length=35, weights=[embedding_matrix],trainable=True)
conv_blocks = []
filter_sizes = (2,3,4)
for fx in filter_sizes:
conv_layer= Conv1D(100, kernel_size=fx, activation='relu', data_format='channels_first')(input_layer)
maxpool_layer = MaxPooling1D(pool_size=4)(conv_layer)
flat_layer= Flatten()(maxpool_layer)
conv_blocks.append(flat_layer)
#conc_layer=concatenate(conv_blocks, axis=1)
conc_layer=Concatenate(axis=-1)([conv_blocks])
graph = Model(inputs=input_layer, outputs=conc_layer)
model = Sequential()
model.add(graph)
model.add(Dropout(0.2))
model.add(Dense(3, activation='sigmoid'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
I recently reran it and I'm getting an error
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "/am/embassy/vol/x6/jetbrains/apps/PyCharm-P/ch-0/191.6183.50/helpers/pydev/_pydev_bundle/pydev_umd.py", line 197, in runfile
pydev_imports.execfile(filename, global_vars, local_vars) # execute the script
File "/am/embassy/vol/x6/jetbrains/apps/PyCharm-P/ch-0/191.6183.50/helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "/home/kosimadukwe/PycharmProjects/untitled/WordEmb.py", line 128, in <module>
conv_layer= Conv1D(100, kernel_size=fx, activation='relu', data_format='channels_first')(input_layer) #filters=100, kernel_size=3
File "/home/kosimadukwe/PycharmProjects/untitled/venv/lib/python3.7/site-packages/keras/engine/base_layer.py", line 414, in __call__
self.assert_input_compatibility(inputs)
File "/home/kosimadukwe/PycharmProjects/untitled/venv/lib/python3.7/site-packages/keras/engine/base_layer.py", line 285, in assert_input_compatibility
str(inputs) + '. All inputs to the layer '
ValueError: Layer conv1d_1 was called with an input that isn't a symbolic tensor. Received type: <class 'keras.layers.embeddings.Embedding'>. Full input: [<keras.layers.embeddings.Embedding object at 0x7fae61513c18>]. All inputs to the layer should be tensors.
I have checked similar post here but none is very similar to mine. I have tried their suggestions like adding the axis to Concatenate() or using concatenate instead but nothing changed.
[embedding_matrix] is a 2d array
Error is thrown because input_layer is Layer and not Tensor.
You are passing Embedding "layer" as input to Conv1D, in this case you have not provided any input to embedding layer.
Change this one:
input_layer= Embedding(num_words, 300, input_length=35, weights=[embedding_matrix],trainable=True)
and add input tensor to this layer:
input_layer= Embedding(num_words, 300, input_length=35, weights=[embedding_matrix],trainable=True)(input_tensor)
Also I think you are trying to Concatenate outputs from three separate filters, if that is the case then:
conc_layer=Concatenate(axis=-1)([conv_blocks])
graph = Model(inputs=input_layer, outputs=conc_layer)
this part would come outside loop.

Keras 2.2.4 ERROR:AttributeError: 'NoneType' object has no attribute 'inbound_nodes'

I'm building a new channel wise operation for my network.
A global average pooling result will multiply(element-wise) the first x(input) value.
But, when i run the train.py file, it will occur errors which i couldn't understand. pls HELP!!!
The error message:
Traceback (most recent call last):
File "E:/githubRemote/train.py", line 49, in <module>
model = init_model()
File "E:/githubRemote/train.py", line 37, in init_model
model = Model(inputs=im_n, outputs=resd)
File "C:\Users\Anaconda3\lib\site-packages\keras\legacy\interfaces.py", line 91, in wrapper
return func(*args, **kwargs)
File "C:\Users\Anaconda3\lib\site-packages\keras\engine\network.py", line 93, in __init__
self._init_graph_network(*args, **kwargs)
File "C:\Users\Anaconda3\lib\site-packages\keras\engine\network.py", line 231, in _init_graph_network
self.inputs, self.outputs)
File "C:\Users\Anaconda3\lib\site-packages\keras\engine\network.py", line 1366, in _map_graph_network
tensor_index=tensor_index)
File "C:\Users\Anaconda3\lib\site-packages\keras\engine\network.py", line 1353, in build_map
node_index, tensor_index)
File "C:\Users\Anaconda3\lib\site-packages\keras\engine\network.py", line 1353, in build_map
node_index, tensor_index)
File "C:\Users\Anaconda3\lib\site-packages\keras\engine\network.py", line 1353, in build_map
node_index, tensor_index)
[Previous line repeated 3 more times]
File "C:\Users\Anaconda3\lib\site-packages\keras\engine\network.py", line 1325, in build_map
node = layer._inbound_nodes[node_index]
AttributeError: 'NoneType' object has no attribute '_inbound_nodes'
My error code is the Multiply layer operation.
When i comment the net = Multiply()([x, excitation])
It will work!
I think the keras model may consider that code line don't make a layer of Keras. So it's a NoneType -.-
My code:
def CAlayer(x, channel, reduction=16):
# tensorflow implement
# avg_pool = tflearn.global_avg_pool(inputx)
# conv_1 = slim.conv2d(avg_pool, channel // reduction, 1)
# conv_2 = slim.conv2d(conv_1, channel, 1, activation_fn=None)
# excitation = tf.nn.sigmoid(conv_2)
# keras implementation
avg_pool = GlobalAveragePooling2D()(x)
avg_pool = expand_dims(avg_pool, axis=1)
avg_pool = expand_dims(avg_pool, axis=1)
conv_1 = Conv2D(channel//reduction, 1, activation=None, padding='same')(avg_pool)
conv_1_ac = Activation('relu')(conv_1)
conv_2 = Conv2D(channel, 1, activation=None, padding='same')(conv_1_ac)
excitation = Activation('sigmoid')(conv_2)
--> net = Multiply()([excitation, x])
# print (net.shape)
return net
In your code where you have used :
avg_pool = expand_dims(avg_pool, axis=1)
this is causing the problem, as expand_dims is a function defined under keras.backend which
gives TensorFlow tensor as an output but all operations should be encapsulated in Keras layers.
You must use its equivalent Keras layer function.
A rule of thumb: All Keras layer functions start with a capital letter.

Keras: Custom Layer with Matrix Multiplication Gives Dimension Error When it Should be Correct

I have a custom keras layers that takes in multiple vectors of the same size (eg: a list of 3 input vectors, each with length 10. In keras, the shape of each input vector will be (?, 10).)
In the custom layer under the call section, I first stack the 3 vectors to form a shape (?, 3, 10), where each vector becomes a row vector and the 3 vectors combine to form a matrix, x (excluding the batch dimension).
Then, x is multiplied by a weight matrix, w, that is of size (3,3) with no batch dimension. The weight matrix is defined in the build part of the custom layer.
The result y is permutated to make the batch dimension the first dimension again.
Lastly, the layer must output 3 vectors of the same length as the original input. So I slice the along axis=1 to give 3 tensors each of same size (?,10).
I tried out a test case and it seems to work. But when I call the model and have a line for model.summary(), it gives the following error:
ValueError: Dimensions must be equal, but are 3 and 0 for 'add' (op: 'Add') with input shapes: [3,3], [0].
I have tried various solutions including K.batch_dot() but for batch_dot() I could not get it to work due to dimension errors again...
Thank you for your help!
Solved
Replace
self.trainable_weights = self._w
with
self.trainable_weights.append(self._w)
Phew
# Test Case
import keras.backend as K
a = K.variable(np.array([[1,2,3],[4,5,6],[7,8,9]]))
b = K.variable(np.repeat(np.array([[1,1,10,1,1],[2,2,20,2,2],[3,3,30,3,3]])[np.newaxis,:],repeats=10,axis=0))
c = K.dot(a,b)
c = K.permute_dimensions(c,pattern=(1,0,2))
y = K.eval(c)
print(y)
print(c.shape) # (10, 3, 5)
# Custom layer build part
def build(self, input_shape):
# input_shape should be a list, since cross stitch must take in inputs from all the individual tasks.
self._input_count = len(input_shape)
w = np.identity(self._input_count) * 0.9
inverse_diag_mask = np.invert(np.identity(self._input_count, dtype=np.bool))
off_value = 0.1 / (self._input_count - 1)
w[inverse_diag_mask] = off_value
self._w = K.variable(np.array(w))
self.trainable_weights = self._w
super(CrossStitchLayer, self).build(input_shape)
# Custom layer call part
def call(self, x, **kwargs):
temp = x # to show shape
x = K.stack(x, axis=1)
y1 = K.dot(self._w, x)
y = K.permute_dimensions(y1, pattern=(1, 0, 2))
results = []
for idx in range(self._input_count):
results.append(y[:, idx, :])
return results
Full Error Message:
Traceback (most recent call last):
File "C:\Users\limka\Anaconda3\envs\my-rdkit-env\lib\site-packages\tensorflow\python\framework\ops.py", line 1628, in _create_c_op
c_op = c_api.TF_FinishOperation(op_desc)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Dimensions must be equal, but are 3 and 0 for 'add' (op: 'Add') with input shapes: [3,3], [0].
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:/Users/limka/Desktop/Python/strain_sensor/run_cross_validation.py", line 10, in <module>
k_folds=10, k_shuffle=True, save_model=False, save_model_name=None, save_model_dir='./save/models/')
File "C:\Users\limka\Desktop\Python\strain_sensor\own_package\cross_validation.py", line 57, in run_skf
model = MTmodel(fl=ss_fl, mode=model_mode, hparams=hparams, labels_norm=True)
File "C:\Users\limka\Desktop\Python\strain_sensor\own_package\models.py", line 217, in __init__
cs_model = cross_stitch(self.features_dim, self.labels_dim, self.hparams)
File "C:\Users\limka\Desktop\Python\strain_sensor\own_package\models.py", line 193, in cross_stitch
model.summary()
File "C:\Users\limka\Anaconda3\envs\my-rdkit-env\lib\site-packages\keras\engine\network.py", line 1260, in summary
print_fn=print_fn)
File "C:\Users\limka\Anaconda3\envs\my-rdkit-env\lib\site-packages\keras\utils\layer_utils.py", line 166, in print_summary
print_layer_summary_with_connections(layers[i])
File "C:\Users\limka\Anaconda3\envs\my-rdkit-env\lib\site-packages\keras\utils\layer_utils.py", line 153, in print_layer_summary_with_connections
layer.count_params(),
File "C:\Users\limka\Anaconda3\envs\my-rdkit-env\lib\site-packages\keras\engine\base_layer.py", line 1129, in count_params
return count_params(self.weights)
File "C:\Users\limka\Anaconda3\envs\my-rdkit-env\lib\site-packages\keras\engine\base_layer.py", line 1022, in weights
return self.trainable_weights + self.non_trainable_weights
File "C:\Users\limka\Anaconda3\envs\my-rdkit-env\lib\site-packages\tensorflow\python\ops\variables.py", line 856, in _run_op
return getattr(ops.Tensor, operator)(a._AsTensor(), *args)
File "C:\Users\limka\Anaconda3\envs\my-rdkit-env\lib\site-packages\tensorflow\python\ops\math_ops.py", line 878, in binary_op_wrapper
return func(x, y, name=name)
File "C:\Users\limka\Anaconda3\envs\my-rdkit-env\lib\site-packages\tensorflow\python\ops\gen_math_ops.py", line 300, in add
"Add", x=x, y=y, name=name)
File "C:\Users\limka\Anaconda3\envs\my-rdkit-env\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "C:\Users\limka\Anaconda3\envs\my-rdkit-env\lib\site-packages\tensorflow\python\util\deprecation.py", line 488, in new_func
return func(*args, **kwargs)
File "C:\Users\limka\Anaconda3\envs\my-rdkit-env\lib\site-packages\tensorflow\python\framework\ops.py", line 3274, in create_op
op_def=op_def)
File "C:\Users\limka\Anaconda3\envs\my-rdkit-env\lib\site-packages\tensorflow\python\framework\ops.py", line 1792, in __init__
control_input_ops)
File "C:\Users\limka\Anaconda3\envs\my-rdkit-env\lib\site-packages\tensorflow\python\framework\ops.py", line 1631, in _create_c_op
raise ValueError(str(e))
ValueError: Dimensions must be equal, but are 3 and 0 for 'add' (op: 'Add') with input shapes: [3,3], [0].

Keras - inverse of K.eval()

I am trying to write a lambda layer which converts an input tensor into a numpy array and performs a set of affine transforms on slices of said array. To get the underlying numpy array of the tensor I am calling K.eval(). Once I have done all of the processing on the numpy array, I need to convert it back into a keras tensor so it can be returned. Is there an operation in the keras backend which I can use to do this? Or should I be updating the original input tensor using a different backend function?
def apply_affine(x, y):
# Get dimensions of main tensor
dimens = K.int_shape(x)
# Get numpy array behind main tensor
filter_arr = K.eval(x)
if dimens[0] is not None:
# Go through batch...
for i in range(0, dimens[0]):
# Get the correpsonding affine transformation in the form of a numpy array
affine = K.eval(y)[i, :, :]
# Create an skimage affine transform from the numpy array
transform = AffineTransform(matrix=affine)
# Loop through each filter output from the previous layer of the CNN
for j in range(0, dims[1]):
# Warp each filter output according to the corresponding affine transform
warp(filter_arr[i, j, :, :], transform)
# Need to convert filter array back to a keras tensor HERE before return
return None
transformed_twin = Lambda(function=lambda x: apply_affine(x[0], x[1]))([twin1, transformInput])
EDIT: Added some context...
AffineTransform: https://github.com/scikit-image/scikit-image/blob/master/skimage/transform/_geometric.py#L715
warp: https://github.com/scikit-image/scikit-image/blob/master/skimage/transform/_warps.py#L601
I am trying to re-implement the CNN in "Unsupervised learning of object landmarks by factorized spatial embeddings". filter_arr is the output from a convolutional layer containing 10 filters. I want to apply the same affine transform to all of the filter outputs. There is an affine transform associated with each data input. The affine transforms for each data input are passed to the neural net as a tensor and are passed to the lambda layer as the second input transformInput. I have left the structure of my current network below.
twin = Sequential()
twin.add(Conv2D(20, (3, 3), activation=None, input_shape=(28, 28, 1)))
# print(twin.output_shape)
# twin.add(BatchNormalization(axis=1, momentum=0.99, epsilon=0.001, center=True))
twin.add(Activation('relu'))
twin.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
# print(twin.output_shape)
twin.add(Conv2D(48, (3, 3), activation=None))
# print(twin.output_shape)
twin.add(BatchNormalization(axis=1, momentum=0.99, epsilon=0.001, center=True))
twin.add(Activation('relu'))
twin.add(Conv2D(64, (3, 3), activation=None))
twin.add(BatchNormalization(axis=1, momentum=0.99, epsilon=0.001, center=True))
twin.add(Activation('relu'))
# print(twin.output_shape)
twin.add(Conv2D(80, (3, 3), activation=None))
twin.add(BatchNormalization(axis=1, momentum=0.99, epsilon=0.001, center=True))
twin.add(Activation('relu'))
# print(twin.output_shape)
twin.add(Conv2D(256, (3, 3), activation=None))
twin.add(BatchNormalization(axis=1, momentum=0.99, epsilon=0.001, center=True))
twin.add(Activation('relu'))
# print(twin.output_shape)
twin.add(Conv2D(no_filters, (3, 3), activation=None))
twin.add(BatchNormalization(axis=1, momentum=0.99, epsilon=0.001, center=True))
twin.add(Activation('relu'))
# print(twin.output_shape)
# Reshape the image outputs to a 1D list so softmax can be used on them
finalDims = twin.layers[-1].output_shape
twin.add(Reshape((finalDims[1], finalDims[2]*finalDims[3])))
twin.add(Activation('softmax'))
twin.add(Reshape(finalDims[1:]))
originalInput = Input(shape=(28, 28, 1))
warpedInput = Input(shape=(28, 28, 1))
transformInput = Input(shape=(3, 3))
twin1 = twin(originalInput)
def apply_affine(x, y):
# Get dimensions of main tensor
dimens = K.int_shape(x)
# Get numpy array behind main tensor
filter_arr = K.eval(x)
if dimens[0] is not None:
# Go through batch...
for i in range(0, dimens[0]):
# Get the correpsonding affine transformation in the form of a numpy array
affine = K.eval(y)[i, :, :]
# Create an skimage affine transform from the numpy array
transform = AffineTransform(matrix=affine)
# Loop through each filter output from the previous layer of the CNN
for j in range(0, dims[1]):
# Warp each filter output according to the corresponding affine transform
warp(filter_arr[i, j, :, :], transform)
# Need to convert filter array back to a keras tensor
return None
transformed_twin = Lambda(function=lambda x: apply_affine(x[0], x[1]))([twin1, transformInput])
twin2 = twin(warpedInput)
siamese = Model([originalInput, warpedInput, transformInput], [transformed_twin, twin2])
EDIT: Traceback when using K.variable()
Traceback (most recent call last):
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\tensorflow\python\client\session.py", line 1039, in _do_call
return fn(*args)
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\tensorflow\python\client\session.py", line 1021, in _run_fn
status, run_metadata)
File "C:\Users\nickb\Anaconda3\envs\py35\lib\contextlib.py", line 66, in __exit__
next(self.gen)
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\tensorflow\python\framework\errors_impl.py", line 466, in raise_exception_on_not_ok_status
pywrap_tensorflow.TF_GetCode(status))
tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'batch_normalization_1/keras_learning_phase' with dtype bool
[[Node: batch_normalization_1/keras_learning_phase = Placeholder[dtype=DT_BOOL, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
Traceback (most recent call last):
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\tensorflow\python\client\session.py", line 1039, in _do_call
return fn(*args)
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\tensorflow\python\client\session.py", line 1021, in _run_fn
status, run_metadata)
File "C:\Users\nickb\Anaconda3\envs\py35\lib\contextlib.py", line 66, in __exit__
next(self.gen)
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\tensorflow\python\framework\errors_impl.py", line 466, in raise_exception_on_not_ok_status
pywrap_tensorflow.TF_GetCode(status))
tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'batch_normalization_1/keras_learning_phase' with dtype bool
[[Node: batch_normalization_1/keras_learning_phase = Placeholder[dtype=DT_BOOL, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:/Users/nickb/PycharmProjects/testing/MNIST_implementation.py", line 96, in <module>
transformed_twin = Lambda(function=lambda x: apply_affine(x[0], x[1]))([twin1, transformInput])
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\keras\engine\topology.py", line 585, in __call__
output = self.call(inputs, **kwargs)
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\keras\layers\core.py", line 659, in call
return self.function(inputs, **arguments)
File "C:/Users/nickb/PycharmProjects/testing/MNIST_implementation.py", line 96, in <lambda>
transformed_twin = Lambda(function=lambda x: apply_affine(x[0], x[1]))([twin1, transformInput])
File "C:/Users/nickb/PycharmProjects/testing/MNIST_implementation.py", line 81, in apply_affine
filter_arr = K.eval(x)
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\keras\backend\tensorflow_backend.py", line 533, in eval
return to_dense(x).eval(session=get_session())
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\tensorflow\python\framework\ops.py", line 569, in eval
return _eval_using_default_session(self, feed_dict, self.graph, session)
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\tensorflow\python\framework\ops.py", line 3741, in _eval_using_default_session
return session.run(tensors, feed_dict)
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\tensorflow\python\client\session.py", line 778, in run
run_metadata_ptr)
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\tensorflow\python\client\session.py", line 982, in _run
feed_dict_string, options, run_metadata)
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\tensorflow\python\client\session.py", line 1032, in _do_run
target_list, options, run_metadata)
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\tensorflow\python\client\session.py", line 1052, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'batch_normalization_1/keras_learning_phase' with dtype bool
[[Node: batch_normalization_1/keras_learning_phase = Placeholder[dtype=DT_BOOL, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
Caused by op 'batch_normalization_1/keras_learning_phase', defined at:
File "C:/Users/nickb/PycharmProjects/testing/MNIST_implementation.py", line 36, in <module>
twin.add(BatchNormalization(axis=1, momentum=0.99, epsilon=0.001, center=True))
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\keras\models.py", line 466, in add
output_tensor = layer(self.outputs[0])
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\keras\engine\topology.py", line 585, in __call__
output = self.call(inputs, **kwargs)
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\keras\layers\normalization.py", line 190, in call
training=training)
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\keras\backend\tensorflow_backend.py", line 2559, in in_train_phase
training = learning_phase()
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\keras\backend\tensorflow_backend.py", line 112, in learning_phase
name='keras_learning_phase')
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\tensorflow\python\ops\array_ops.py", line 1507, in placeholder
name=name)
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\tensorflow\python\ops\gen_array_ops.py", line 1997, in _placeholder
name=name)
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 768, in apply_op
op_def=op_def)
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\tensorflow\python\framework\ops.py", line 2336, in create_op
original_op=self._default_original_op, op_def=op_def)
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\tensorflow\python\framework\ops.py", line 1228, in __init__
self._traceback = _extract_stack()
InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'batch_normalization_1/keras_learning_phase' with dtype bool
[[Node: batch_normalization_1/keras_learning_phase = Placeholder[dtype=DT_BOOL, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
Exception ignored in: <bound method BaseSession.__del__ of <tensorflow.python.client.session.Session object at 0x0000023AB66D9C88>>
Traceback (most recent call last):
File "C:\Users\nickb\Anaconda3\envs\py35\lib\site-packages\tensorflow\python\client\session.py", line 587, in __del__
AttributeError: 'NoneType' object has no attribute 'TF_NewStatus'
Process finished with exit code 1
As stated in the comments above it is best to implement lambda layer functions using the Keras backend. Since there are currently no functions in the Keras backend that perform affine transformations, I decided to use a tensorflow function in my Lambda layer instead of implementing an affine transform function from scratch using existing Keras backend functions:
def apply_affine(x):
import tensorflow as tf
return tf.contrib.image.transform(x[0], x[1])
def apply_affine_output_shape(input_shapes):
return input_shapes[0]
The downside to this approach is that my lambda layer will only work when using Tensorflow as the backend (as opposed to Theano or CNTK). If you wanted an implementation that is compatible with any backend you could check the current backend being used by Keras and then perform the transformation function from the backend currently in use.

Resources