model = Sequential()
model.add(Flatten(input_shape=(1,) + (52,)))
model.add(Dense(100))
model.add(Activation('relu'))
model.add(Dense(2))
model.add(Activation('linear'))
print(model.summary())
I want to change this keras code in sequential version to same code with functional version like the following.
input = Input(shape=(1,) + (52,))
i = Flatten()(input)
h = Dense(100, activation='relu')(i)
o = Dense(2, activation='linear')(h)
model = Model(inputs=i, outputs=o)
model.summary()
But it got error
File "C:\Users\SDS\Anaconda3\lib\site-packages\keras\legacy\interfaces.py", line 91, in wrapper
return func(*args, **kwargs)
File "C:\Users\SDS\Anaconda3\lib\site-packages\keras\engine\network.py", line 93, in __init__
self._init_graph_network(*args, **kwargs)
File "C:\Users\SDS\Anaconda3\lib\site-packages\keras\engine\network.py", line 237, in _init_graph_network
self.inputs, self.outputs)
File "C:\Users\SDS\Anaconda3\lib\site-packages\keras\engine\network.py", line 1430, in _map_graph_network
str(layers_with_complete_input))
ValueError: Graph disconnected: cannot obtain value for tensor Tensor("input_1:0", shape=(?, 1, 52), dtype=float32) at layer "input_1". The following previous layers were accessed without issue: []
Your model definition is incorrect, the inputs parameter of Model should go to your Input layer, like this:
input = Input(shape=(1,) + (52,))
i = Flatten()(input)
h = Dense(100, activation='relu')(i)
o = Dense(2, activation='linear')(h)
model = Model(inputs=inputs, outputs=o)
I believe you cannot put any tensor other than the Input layer as input to a model.
The input for model should be input layer(first layer without any dense layer).
So it should be like :
model = Model(inputs=input, outputs=o)
Related
I am Fitting a Siamese CNN with the following structure:
def get_siamese_model(input_shape):
"""
Model architecture
"""
# Define the tensors for the three input images
A_input = Input(input_shape)
B_input = Input(input_shape)
C_input = Input(input_shape)
# Convolutional Neural Network
#Initialzers
initializer = 'random_uniform'
initializer0 = 'zeros'
model = Sequential()
model.add(Conv2D(64, (10,10), activation='relu', input_shape=input_shape,
kernel_initializer=initializer , kernel_regularizer=l2(2e-4)))
model.add(MaxPooling2D())
model.add(Conv2D(128, (7,7), activation='relu', kernel_initializer=initializer ,
bias_initializer=initializer0, kernel_regularizer=l2(2e-4)))
model.add(MaxPooling2D())
model.add(Conv2D(128, (4,4), activation='relu', kernel_initializer=initializer ,
bias_initializer=initializer0, kernel_regularizer=l2(2e-4)))
print("C3 shape: ", model.output_shape)
model.add(MaxPooling2D())
print("P3 shape: ", model.output_shape)
model.add(Conv2D(256, (4,4), activation='relu', kernel_initializer=initializer ,
bias_initializer=initializer0, kernel_regularizer=l2(2e-4)))
model.add(Flatten())
model.add(Dense(4096, activation='sigmoid',
kernel_regularizer=l2(1e-3),
kernel_initializer=initializer,
bias_initializer=initializer0))
# Generate the encodings (feature vectors) for the three images
encoded_A = model(A_input)
encoded_B = model(B_input)
encoded_C = model(C_input)
#Custom Layer for L1-norm
L1_layer = Lambda(lambda tensors: K.sum(K.abs(tensors[0] - tensors[1]), axis=1,keepdims=True))
L_layerAB = L1_layer([encoded_A, encoded_B])
L2_layer = Lambda(lambda tensors: K.sum(K.abs(tensors[0] - tensors[1]), axis=1,keepdims=True))
L_layerAC = L2_layer([encoded_A, encoded_C])
merge6 = concatenate([L_layerAB, L_layerAC], axis = 0)
prediction = Dense(1,activation='sigmoid')(merge6)
siamese_net = Model(inputs=[A_input,B_input, C_input],outputs= prediction)
# return the model
return siamese_net
The training data is are triplets of pictures in array form with following dimensions: (128,128,3).
And the target data is a label (0,1).
Then we fit the model:
model = siam.get_siamese_model((128,128,3))
model.fit([tripletA,tripletB, tripletC], targets , epochs=2, verbose=1,
batch_size = 1)
This works for batch_size = 1 but anything over batchsize >1 produces the following error:
Epoch 1/5
Traceback (most recent call last):
File "<ipython-input-147-8959bad9406a>", line 2, in <module>
batch_size = 2)
File "C:\Users\valan\Anaconda3\lib\site-packages\keras\engine\training.py", line 1239, in fit
validation_freq=validation_freq)
File "C:\Users\valan\Anaconda3\lib\site-packages\keras\engine\training_arrays.py", line 196, in fit_loop
outs = fit_function(ins_batch)
File "C:\Users\valan\Anaconda3\lib\site-packages\tensorflow_core\python\keras\backend.py", line 3727, in _call_
outputs = self._graph_fn(*converted_inputs)
File "C:\Users\valan\Anaconda3\lib\site-packages\tensorflow_core\python\eager\function.py", line 1551, in _call_
return self._call_impl(args, kwargs)
File "C:\Users\valan\Anaconda3\lib\site-packages\tensorflow_core\python\eager\function.py", line 1591, in _call_impl
return self._call_flat(args, self.captured_inputs, cancellation_manager)
File "C:\Users\valan\Anaconda3\lib\site-packages\tensorflow_core\python\eager\function.py", line 1692, in _call_flat
ctx, args, cancellation_manager=cancellation_manager))
File "C:\Users\valan\Anaconda3\lib\site-packages\tensorflow_core\python\eager\function.py", line 545, in call
ctx=ctx)
File "C:\Users\valan\Anaconda3\lib\site-packages\tensorflow_core\python\eager\execute.py", line 67, in quick_execute
six.raise_from(core._status_to_exception(e.code, message), None)
File "<string>", line 3, in raise_from
InvalidArgumentError: Incompatible shapes: [4,1] vs. [2,1]
[[node loss_16/dense_47_loss/binary_crossentropy/logistic_loss/mul (defined at C:\Users\valan\Anaconda3\lib\site-packages\keras\backend\tensorflow_backend.py:3009) ]] [Op:__inference_keras_scratch_graph_33258]
Does anybody know where the Problem is with batch_size > 1?
EDIT1:
We found out that the following lines caused the error:
L1_layer = Lambda(lambda tensors: K.sum(K.abs(tensors[0] - tensors[1]), axis=1,keepdims=True))
L_layerAB = L1_layer([encoded_A, encoded_B])
L2_layer = Lambda(lambda tensors: K.sum(K.abs(tensors[0] - tensors[1]), axis=1,keepdims=True))
L_layerAC = L2_layer([encoded_A, encoded_C])
Removing these lines and just using sigmoid on encoded A and thus, making the model simpler makes it work for batchsizes >1 .
But does anybody know how to re-add those customized layers properly?
Mentioning the solution in this (Answer) section even though it is present in the Comments section, for the benefit of the community.
For the above code, with batch_size > 1, it is resulting in error,
InvalidArgumentError: Incompatible shapes: [4,1] vs. [2,1]
[[node loss_16/dense_47_loss/binary_crossentropy/logistic_loss/mul (defined at C:\Users\valan\Anaconda3\lib\site-packages\keras\backend\tensorflow_backend.py:3009) ]] [Op:__inference_keras_scratch_graph_33258]
Changing the code from
merge6 = concatenate([L_layerAB, L_layerAC], axis = 0)
to
merge6 = Concatenate()([L_layerAB, L_layerAC])
has resolved the error.
I am building a multi input model in the following way (simplified model)
model_heads = []
input1 = Input(shape=(batch_input_shape[1], batch_input_shape[2]))
input2 = Input(shape=(batch_input_shape[1], batch_input_shape[2]))
inputs = [input1, input2]
model_layer = Conv1D(filters=8, kernel_size=2, padding='causal', dilation_rate=1, activation='relu', input_shape=(batch_input_shape[1], batch_input_shape[2]), kernel_initializer='he_normal')(inputs[0])
model_layer = MaxPooling1D(2)(model_layer)
model_heads.append(model_layer)
model_layer = Conv1D(filters=16, kernel_size=4, padding='causal', dilation_rate=1, activation='relu', input_shape=(batch_input_shape[1], batch_input_shape[2]), kernel_initializer='he_normal')(inputs[1])
model_layer = MaxPooling1D(4)(model_layer)
model_heads.append(model_layer)
flat_out = []
for j in model_heads:
flat_out.append(Flatten()(j))
merged = concatenate(flat_out)
merged = Dense(100, activation=self.activation)(merged)
self.model = Model(inputs=inputs, outputs=output)
self.model.compile(loss='binary_crossentropy', optimizer=self.opt, metrics=['accuracy'])
I feed the input and run training with runs fine, but then I save the model like so:
builder = tf.saved_model.builder.SavedModelBuilder(persistencydir + '/saved.' + str(start_prediction) + "." + str(end_prediction))
signature = tf.saved_model.signature_def_utils.predict_signature_def(inputs={'the_input': self.model.input}, outputs={'the_output': self.model.output})
builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING], signature_def_map={tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature})
builder.save()
This of course complains because predict_signature_def expects a tensor for inputs but receives a python list instead:
File "/home/myname/myutils/anaconda3/envs/tmp_merge/lib/python3.6/site-packages/tensorflow/python/saved_model/signature_def_utils_impl.py", line 205, in predict_signature_def
for key, tensor in inputs.items()}
File "/home/myname/myutils/anaconda3/envs/tmp_merge/lib/python3.6/site-packages/tensorflow/python/saved_model/signature_def_utils_impl.py", line 205, in <dictcomp>
for key, tensor in inputs.items()}
File "/home/myname/myutils/anaconda3/envs/tmp_merge/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py", line 324, in new_func
return func(*args, **kwargs)
File "/home/myname/myutils/anaconda3/envs/tmp_merge/lib/python3.6/site-packages/tensorflow/python/saved_model/utils_impl.py", line 58, in build_tensor_info
dtype=dtypes.as_dtype(tensor.dtype).as_datatype_enum,
AttributeError: 'list' object has no attribute 'dtype'
Is there another way I can save the signature?
Alternatively is it possible to build a Model with multi input using a single Tensor composed of the original inputs (input1 and input2 in my example). All examples I've seen are similar to what I have here, but I haven't found an example that also saves the signature...
Based on here: https://github.com/tensorflow/tensorflow/issues/39568#issuecomment-631120656, you can do:
signature = tf.saved_model.predict_signature_def(
inputs = {'input1':x1,'input2':x2},
outputs = {'output1':y1,'output2':y2})
I'm trying to set up an LSTM model with Keras. The training data has the dimension [7165, 27], and with my current setup it throws the following error:
File "C:\Users\Eier\Anaconda3\lib\site-packages\keras\models.py", line 441, in __init__
self.add(layer)
File "C:\Users\Eier\Anaconda3\lib\site-packages\keras\models.py", line 497, in add
layer(x)
File "C:\Users\Eier\Anaconda3\lib\site-packages\keras\layers\recurrent.py", line 500, in __call__
return super(RNN, self).__call__(inputs, **kwargs)
File "C:\Users\Eier\Anaconda3\lib\site-packages\keras\engine\topology.py", line 575, in __call__
self.assert_input_compatibility(inputs)
File "C:\Users\Eier\Anaconda3\lib\site-packages\keras\engine\topology.py", line 474, in assert_input_compatibility
str(K.ndim(x)))
ValueError: Input 0 is incompatible with layer lstm_64: expected ndim=3, found ndim=4
I know this error is fairly common, but none of the many different solutions found online have worked for me yet. I have already trying reshaping the training data to a 3D matrix, fooling around with different layer combinations, explictly stating batch size, using Flatten() and more to no avail. Would be very grateful if someone could push me in the right direction for fixing this.
Code snippet:
input_dim = 27
units = 5
timesteps = 1
samples = X_train.shape[0]
X_train = np.reshape(X_train, (X_train.shape[0], 1, X_train.shape[1]))
X_test = np.reshape(X_test, (X_test.shape[0], 1, X_test.shape[1]))
model = Sequential([
LSTM(units, return_sequences=True, stateful = True, input_shape=(samples,timesteps,input_dim)),
Dropout(0.2),
LSTM(units,return_sequences=False),
Dropout(0.2),
Dense(1),
Activation('softmax'),
])
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
model.fit(X_train, y_train, batch_size = 32, epochs = 60)
As pointed out by #ShubhamPanchal in comments you don't need to specify samples dimension. LSTM layer expects input to have shape [batch_size, time_steps, channels), so when you pass input_shape argument you have to pass tuple specifying time_steps and channels dimension.
LSTM(32, return_sequences=True, stateful = True, input_shape=(time_steps, input_dim))
Since you are using stateful lstm you need to specify the batch_size argument also. So the full code for the model would be,
model = Sequential([
LSTM(units, return_sequences=True, stateful = True, input_shape=(timesteps,input_dim), batch_size=batch_size),
Dropout(0.2),
LSTM(units,return_sequences=False),
Dropout(0.2),
Dense(1),
Activation('softmax'),
])
I want to define a LSTM layers using tensorflow in keras. The code as following:
model = Sequential()
inputs = Input(shape=(time_steps, 1))
cell = tf.nn.rnn_cell.LSTMCell(n_neurons)
multi_cell = tf.nn.rnn_cell.MultiRNNCell([cell] * n_layers)
lstm_outputs, states = tf.nn.dynamic_rnn(multi_cell, inputs, dtype=tf.float32)
outputs = TimeDistributed(Dense(1))(lstm_outputs)
model = Model(inputs=inputs, outputs=outputs)
adam = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(loss='mean_squared_error', optimizer=adam)
print(model.summary())
when running, an error occurred:
Using TensorFlow backend.
Traceback (most recent call last):
File "/Users/zhjmdcjk/Desktop/Untitled.py", line 81, in <module>
model = Model(inputs=inputs, outputs=outputs)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/legacy/interfaces.py", line 91, in wrapper
return func(*args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/engine/topology.py", line 1734, in __init__
build_map_of_graph(x, finished_nodes, nodes_in_progress)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/engine/topology.py", line 1724, in build_map_of_graph
layer, node_index, tensor_index)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/keras/engine/topology.py", line 1695, in build_map_of_graph
layer, node_index, tensor_index = tensor._keras_history
AttributeError: 'Tensor' object has no attribute '_keras_history'
I am not clear about these, can anyone give me some advice. Thanks a lot!
Is there any particular reason you're using Tensorflow's LSTM in Keras? You can directly use Keras LSTM layers.
inputs = Input(shape=(time_steps, 1))
lstm1 = LSTM(n_neurons, return_sequences=True)(inputs)
lstm_outputs = LSTM(n_neurons, return_sequences=True)(lstm1)
outputs = TimeDistributed(Dense(1))(lstm_outputs)
model = Model(inputs=inputs, outputs=outputs)
Also, you don't need to use model = Sequential() in case of Keras' functional API.
I'm using KERAS (2.0.8) on python3.5 on Windows 64bit PC.
I'm trying to utilize vgg16 with my own full-connect layers.
But, my model.fit_generator throws TypeError with strange keyword
TypeError: run() got an unexpected keyword argument 'metirics'
Of course, I never use 'metirics' in my code...
Here is my code.
def go_vgg():
train_datagen = ImageDataGenerator(rescale=1.0 /255)
validation_datagen=ImageDataGenerator()
train_generator = train_datagen.flow_from_directory(train_data_dir,
target_size=(image_size,image_size),
batch_size=batch_size,
save_to_dir=check_dir,
save_format='png',
shuffle = False,
class_mode = 'binary')
validation_generator = validation_datagen.flow_from_directory(validation_data_dir,
target_size=(image_size,image_size),
batch_size=batch_size,
save_to_dir=check_dir,
save_format='png',
shuffle=False,
class_mode = 'binary')
bmodel = VGG16(include_top=False, weights='imagenet', input_tensor=None, input_shape=(image_size,image_size,3))
bmodel.summary()
smodel = Sequential()
smodel.add(Flatten(input_shape=(7,7,512)) )
smodel.summary()
smodel.add(Dense(256, activation='relu'))
smodel.add(Dropout(0.5))
smodel.add(Dense(1, activation = 'sigmoid'))
model = Model(input=bmodel.input, outputs = smodel(bmodel.output))
for layer in model.layers[:15]:
layer.trainable = False
model.compile(loss='binary_crossentropy',
optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
metirics = ['accuracy'])
history =model.fit_generator(train_generator, steps_per_epoch=2000,
epochs = np_epoch)
The summary result is as follows (I mostely omitted bmodel.summary())
____________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) (None, 224, 224, 3) 0
.
.
.
Epoch 1/2
Traceback (most recent call last):
File "test4.py", line 94, in <module>
main()
File "test4.py", line 86, in main
go_vgg()
File "test4.py", line 80, in go_vgg
epochs = np_epoch)
File "D:\python\lib\site-packages\keras\legacy\interfaces.py", line 87, in wra
pper
return func(*args, **kwargs)
File "D:\python\lib\site-packages\keras\engine\training.py", line 2042, in fit
_generator
class_weight=class_weight)
File "D:\python\lib\site-packages\keras\engine\training.py", line 1762, in tra
in_on_batch
outputs = self.train_function(ins)
File "D:\python\lib\site-packages\keras\backend\tensorflow_backend.py", line 2
273, in __call__
**self.session_kwargs)
TypeError: run() got an unexpected keyword argument 'metirics'
I have no idea where this 'metirics' comes from.
Any help to solve this error would be highly appreciated!
Of course you are using such a keyword, right here:
model.compile(loss='binary_crossentropy',
optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
metirics = ['accuracy'])
The problem is that you misspelled metrics as metirics. Just correct it to metrics:
model.compile(loss='binary_crossentropy',
optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
metrics = ['accuracy'])