The following code is giving me a graph disconnected error but I cant work out where it is coming from and am not sure how to go about debugging. The error is being thrown on the last line decoder = Model(latentInputs, outputs, name="decoder"), I have compared it to working code that I modified but to no avail.
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Conv2DTranspose
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import ReLU
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.layers import UpSampling2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import GaussianNoise
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Reshape
from tensorflow.keras.layers import Add
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K
import tensorflow as tf
import numpy as np
width=256
height=256
depth=3
inputShape = (height, width, depth)
chanDim = -1
filter_size = 3
latentDim = 512
# initialize the input shape to be "channels last" along with
# the channels dimension itself
inputShape = (height, width, depth)
chanDim = -1
# define the input to the encoder
inputs = Input(shape=inputShape)
x = GaussianNoise(0.2)(inputs)
x = Conv2D(128, filter_size, strides=1, padding="same")(x)
x = BatchNormalization(axis=chanDim)(x)
x = LeakyReLU(alpha=0.2)(x)
layer_1 = Conv2D(128, filter_size, strides=2, padding="same")(x)
x = BatchNormalization(axis=chanDim)(layer_1)
x = LeakyReLU(alpha=0.2)(x)
layer_2 = Conv2D(128, filter_size, strides=2, padding="same")(x)
x = BatchNormalization(axis=chanDim)(layer_2)
x = LeakyReLU(alpha=0.2)(x)
layer_3 = Conv2D(128, filter_size, strides=2, padding="same")(x)
x = BatchNormalization(axis=chanDim)(layer_3)
x = LeakyReLU(alpha=0.2)(x)
layer_4 = Conv2D(128, filter_size, strides=2, padding="same")(x)
x = BatchNormalization(axis=chanDim)(layer_4)
x = LeakyReLU(alpha=0.2)(x)
layer_5 = Conv2D(128, filter_size, strides=2, padding="same")(x)
x = BatchNormalization(axis=chanDim)(layer_5)
x = LeakyReLU(alpha=0.2)(x)
layer_6 = Conv2D(128, filter_size, strides=2, padding="same")(x)
x = BatchNormalization(axis=chanDim)(layer_6)
x = LeakyReLU(alpha=0.2)(x)
layer_7 = Conv2D(128, filter_size, strides=2, padding="same")(x)
latent = Flatten()(layer_7)
# flatten the network and then construct our latent vector
volumeSize = K.int_shape(layer_7)
# build the encoder model
encoder = Model(inputs, latent, name="encoder")
encoder.summary()
# start building the decoder model which will accept the
# output of the encoder as its inputs
#%%
latentInputs = Input(shape=(np.prod(volumeSize[1:]),))
x = Reshape((volumeSize[1], volumeSize[2], volumeSize[3]))(latentInputs)
dec_layer_7 = Add()([x, layer_7])
x = Conv2DTranspose(128, filter_size, strides=2, padding="same")(dec_layer_7)
x = BatchNormalization(axis=chanDim)(x)
x = LeakyReLU(alpha=0.2)(x)
dec_layer_6 = Add()([x, layer_6])
x = Conv2DTranspose(128, filter_size, strides=2, padding="same")(dec_layer_6)
x = BatchNormalization(axis=chanDim)(x)
x = LeakyReLU(alpha=0.2)(x)
dec_layer_5 = Add()([x, layer_5])
x = Conv2DTranspose(128, filter_size, strides=2, padding="same")(dec_layer_5)
x = BatchNormalization(axis=chanDim)(x)
x = LeakyReLU(alpha=0.2)(x)
dec_layer_4 = Add()([x, layer_4])
x = Conv2DTranspose(128, filter_size, strides=2, padding="same")(dec_layer_4)
x = BatchNormalization(axis=chanDim)(x)
x = LeakyReLU(alpha=0.2)(x)
dec_layer_3 = Add()([x, layer_3])
x = Conv2DTranspose(128, filter_size, strides=2, padding="same")(dec_layer_3)
x = BatchNormalization(axis=chanDim)(x)
x = LeakyReLU(alpha=0.2)(x)
dec_layer_2 = Add()([x, layer_2])
x = Conv2DTranspose(128, filter_size, strides=2, padding="same")(dec_layer_2)
x = BatchNormalization(axis=chanDim)(x)
x = LeakyReLU(alpha=0.2)(x)
dec_layer_1 = Add()([x, layer_1])
x = Conv2DTranspose(128, filter_size, strides=2, padding="same")(dec_layer_1)
x = BatchNormalization(axis=chanDim)(x)
x = LeakyReLU(alpha=0.2)(x)
outputs = Conv2DTranspose(depth, filter_size, padding="same")(x)
# apply a single CONV_TRANSPOSE layer used to recover the
# original depth of the image
# =============================================================================
# outputs = ReLU(max_value=1.0)(x)
# =============================================================================
# build the decoder model
decoder = Model(latentInputs, outputs, name="decoder")
Error is:
ValueError: Graph disconnected: cannot obtain value for tensor Tensor("input_37:0", shape=(None, 256, 256, 3), dtype=float32) at layer "input_37". The following previous layers were accessed without issue: []
layer_7 refers to another model... you have to provide input for layer_7 in your decoder. a solution can be to define your decoder in this way
decoder = Model([latentInputs, encoder.input], outputs, name="decoder")
here the full example: https://colab.research.google.com/drive/1W8uLy49H_8UuD9DGZvtP7Md1f4ap3u6A?usp=sharing
Related
I'm trying to do some tests with GridSearch, but I have this error, could you tell me why?
in clone raise TypeError("Cannot clone object '%s' (type %s): "
TypeError: Cannot clone object '<keras.engine.functional.Functional object at 0x7f330fe610a0>' (type <class 'keras.engine.functional.Functional'>): it does not seem to be a scikit-learn estimator as it does not implement a 'get_params' method.
My code:
Model:
from keras import backend as K, regularizers
from keras.engine.training import Model
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, \
BatchNormalization, Activation, Input
import ModelLib
class Cifar100_Model(ModelLib.ModelLib):
def build_classifier_model(self, dataset, n_classes=5,
activation='elu', dropout_1_rate=0.25,
dropout_2_rate=0.5,
reg_factor=50e-4, bias_reg_factor=None, batch_norm=False):
n_classes = dataset.n_classes
print(n_classes)
print("----------------------------------------------------------------------------")
l2_reg = regularizers.l2(reg_factor) #K.variable(K.cast_to_floatx(reg_factor))
l2_bias_reg = None
if bias_reg_factor:
l2_bias_reg = regularizers.l2(bias_reg_factor) #K.variable(K.cast_to_floatx(bias_reg_factor))
# input image dimensions
h, w, d = 32, 32, 3
if K.image_data_format() == 'channels_first':
input_shape = (d, h, w)
else:
input_shape = (h, w, d)
# input image dimensions
x = input_1 = Input(shape=input_shape)
x = Conv2D(filters=32, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = Conv2D(filters=32, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(rate=dropout_1_rate)(x)
x = Conv2D(filters=64, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = Conv2D(filters=64, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(rate=dropout_1_rate)(x)
x = Conv2D(filters=128, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = Conv2D(filters=128, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(rate=dropout_1_rate)(x)
x = Conv2D(filters=256, kernel_size=(2, 2), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = Conv2D(filters=256, kernel_size=(2, 2), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(rate=dropout_1_rate)(x)
x = Flatten()(x)
x = Dense(units=512, kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = Dropout(rate=dropout_2_rate)(x)
x = Dense(units=n_classes, kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation='softmax')(x)
model = Model(inputs=[input_1], outputs=[x])
return model
Code test:
from sklearn.model_selection import GridSearchCV
import models.cifar100_model
def load_model():
return models.cifar100_model.Cifar100_Model()
def get_params(self, deep = True):
return {"learning rate" self.learning_rate}
model_lib = load_model()
model = model_lib.build_classifier_model(dataset)
x_train = dataset.x_train
y_train = dataset.y_train_labels
learning_rate = [0.01, 0.1]
param_grid = dict(learning_rate = learning_rate)
grid = GridSearchCV(estimator = model, param_grid=param_grid, n_jobs=-1, cv=3, scoring='accuracy')
gridResult = grid.fit(x_train,y_train)
print(gridResult.best_params_)
I am getting error trying to feed in the following data into my network. I have issue with reshaping the training data and the input to the network. The error I am getting is:
Error when checking target: expected conv1d_92 to have shape (4, 1) but got array with shape (1, 784)
The code is as follows:
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 17 20:57:51 2021
#author: morte
"""
import keras
from keras import layers
from keras.datasets import mnist
import numpy as np
#(x_train, _), (x_test, _) = mnist.load_data()
#x_train = x_train.astype('float32') / 255.
#x_test = x_test.astype('float32') / 255.
#x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
#x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train),1,28*28)) #
x_test = np.reshape(x_test, (len(x_test),1,28*28)) #
input_img = keras.Input(shape=(x_train.shape[1:]))
x = layers.Conv1D(16,(3), activation='relu', padding='same')(input_img)
x = layers.MaxPooling1D(2, padding='same')(x)
x = layers.Conv1D(8,(3), activation='relu', padding='same')(x)
x = layers.MaxPooling1D(2, padding='same')(x)
x = layers.Conv1D(8,(3), activation='relu', padding='same')(x)
encoded = layers.MaxPooling1D(2, padding='same')(x)
# at this point the representation is (4, 4, 8) i.e. 128-dimensional
x = layers.Conv1D(8,(3), activation='relu', padding='same')(encoded)
x = layers.UpSampling1D(2)(x)
x = layers.Conv1D(8,(3), activation='relu', padding='same')(x)
x = layers.UpSampling1D(2)(x)
x = layers.Conv1D(16,(3), activation='relu')(x)
x = layers.UpSampling1D(2)(x)
decoded = layers.Conv1D(1, (3), activation='sigmoid', padding='same')(x)
autoencoder = keras.Model(input_img, decoded)
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
from keras.datasets import mnist
import numpy as np
from keras.callbacks import TensorBoard
autoencoder.fit(x_train, x_train,
epochs=2,
batch_size=128,
shuffle=True,
validation_data=(x_test, x_test),
)
decoded_imgs = autoencoder.predict(x_test)
import matplotlib.pyplot as plt
n = 10
plt.figure(figsize=(20, 4))
for i in range(1, n + 1):
# Display original
ax = plt.subplot(2, n, i)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Display reconstruction
ax = plt.subplot(2, n, i + n)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
Since it is an Autoencoder Network, Encoder-Input has to match Decoder-Output.
You are giving a (1,785) shaped Input-array and outputting an (4,1) array.
For more details you can add the line: autoencoder.summary() (for example after the line autoencoder = keras.Model(input_img, decoded))
This will give you information about the shapes of every layer.
An approach would be for example add a Dense Layer at the end of your Decoder.
I am a newbie in CNN and I am trying the code the Deconvolution (to generate feature maps) in MNIST database (because it's the simplest one to learn for a beginner). I want my model to generate feature maps at the end.The idea is to implement the paper Saliency Detection Via Dense Convolution Network to some extent.
Here is the complete code that I am trying to run:
import keras
from keras.datasets import mnist
import keras.backend as K
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Flatten, Dropout, Activation, Reshape
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.pooling import MaxPooling2D, GlobalAveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D
from keras.initializers import RandomNormal
init = RandomNormal(mean = 0., stddev = 0.02)
def GeneratorDeconv(image_size = 28):
L = int(image_size)
inputs = Input(shape = (100, ))
x = Dense(512*int(L/16)**2)(inputs) #shape(512*(L/16)**2,)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Reshape((int(L/16), int(L/16), 512))(x) # shape(L/16, L/16, 512)
x = Conv2DTranspose(256, (4, 4), strides = (2, 2),
kernel_initializer = init,
padding = 'same')(x) # shape(L/8, L/8, 256)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2DTranspose(128, (4, 4), strides = (2, 2),
kernel_initializer = init,
padding = 'same')(x) # shape(L/4, L/4, 128)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2DTranspose(64, (4, 4), strides = (2, 2),
kernel_initializer = init,
padding = 'same')(x) # shape(L/2, L/2, 64)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2DTranspose(3, (4, 4), strides= (2, 2),
kernel_initializer = init,
padding = 'same')(x) # shape(L, L, 3)
images = Activation('tanh')(x)
model = Model(inputs = inputs, outputs = images)
model.summary()
return model
batch_size = 128
num_classes = 10
epochs = 1
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = GeneratorDeconv()
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
The function def GeneratorDeconv(image_size = 28): I picked from ProgramCreek Python
Now, I am confused that how can I embed it into my own custom model. Up to model.compile(...) the program runs okay. But at model.fit(...) , it gives error:
ValueError: Error when checking input: expected input_2 to have 2
dimensions, but got array with shape (60000, 28, 28, 1)
I don't know how to resolve the issues. Please help.
The input to your model is:
inputs = Input(shape = (100, ))
This will take a vector in the shape of (samples, 100), so it expects a 2D input.
However:
print('x_train shape:', x_train.shape)
>>>x_train shape: (60000, 28, 28, 1)
You are inputting a 4D array, when you specified that your input took a 2D one. That is what is causing the error.
I made some edits to your architecture so the shapes match up and it actually trains:
def GeneratorDeconv(image_size = 28):
L = int(image_size)
inputs = Input(shape = (28, 28,1))
x = Dense(512*int(L/16)**2)(inputs)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2DTranspose(256, (4, 4), strides = (2, 2),
kernel_initializer = init,
padding = 'same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2DTranspose(128, (4, 4), strides = (2, 2),
kernel_initializer = init,
padding = 'same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2DTranspose(64, (4, 4), strides = (2, 2),
kernel_initializer = init,
padding = 'same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2DTranspose(3, (4, 4), strides= (2, 2),
kernel_initializer = init,
padding = 'same')(x)
x = Flatten()(x)
x = Dense(10)(x)
images = Activation('tanh')(x)
model = Model(inputs = inputs, outputs = images)
model.summary()
return model
I have implemented a convolutional auto-encoder with Keras, using the Theano backend. I am changing my approach to try to deal with images of different sizes. As long as I use numpy's stack function to build the dataset (equal size images) I am golden. However, for different size images we cannot use stack, and fit expects a numpy array. So I changed to fit_generator to avoid the size checks. The problem is that the last layer is expecting 16 as the last dimension in the input, and I cannot understand why it is getting the dimensions of the original image.
Have a look at the code bellow, and the error output.
import numpy as np
import keras
from keras.models import Sequential, Model
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D
AE_EPOCHS = 10
VERB = 1
batchsz = 16
outfun = 'sigmoid'
data = []
dimensions = [(10, 15), (12, 15), (7,15), (20,15), (25,15)]
for d in dimensions:
dd = np.random.rand(*d)
dd = dd.reshape((1,)+dd.shape)
data.append(dd)
input_img = Input(shape=(1, None, 15))
filtersz = 3
pad_it = 'same'
size1 = 16
size2 = 8
x = Conv2D(size1, (filtersz, filtersz), activation='relu', padding=pad_it)(input_img)
x = MaxPooling2D((2, 2), padding=pad_it)(x)
x = Conv2D(size2, (filtersz, filtersz), activation='relu', padding=pad_it)(x)
x = MaxPooling2D((2, 2), padding=pad_it)(x)
x = Conv2D(size2, (filtersz, filtersz), activation='relu', padding=pad_it)(x)
encoded = MaxPooling2D((2, 2), padding=pad_it)(x)
x = Conv2D(size2, (filtersz, filtersz), activation='relu', padding=pad_it)(encoded)
x = UpSampling2D((2, 2), data_format="channels_first")(x)
x = Conv2D(size2, (filtersz, filtersz), activation='relu', padding=pad_it)(x)
x = UpSampling2D((2, 2), data_format="channels_first")(x)
x = Conv2D(size1, (filtersz, filtersz), activation='relu', padding=pad_it)(x)
x = UpSampling2D((2, 2), data_format="channels_first")(x)
decoded = Conv2D(1, (filtersz, filtersz), activation=outfun, padding=pad_it)(x)
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss= 'binary_crossentropy')
x_train = data[1:]
x_test= data[0].reshape((1,)+ data[0].shape)
def mygen(xx, *args, **kwargs):
for i in xx:
yield (i,i)
thegen = mygen(x_train)
#If I use this generator somehow None is returned so it is not used
thegenval = mygen(np.array([x_test]))
hist = autoencoder.fit_generator(thegen,
epochs=AE_EPOCHS,
steps_per_epoch=4,
verbose=VERB,
validation_data=(x_test, x_test),
validation_steps=1
)
Traceback (most recent call last):
File "stacko.py", line 107, in
validation_steps=1
File "/usr/local/lib/python3.5/dist-packages/keras/legacy/interfaces.py", line 88, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/training.py", line 1847, in fit_generator
val_x, val_y, val_sample_weight)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/training.py", line 1315, in _standardize_user_data
exception_prefix='target')
File "/usr/local/lib/python3.5/dist-packages/keras/engine/training.py", line 139, in _standardize_input_data
str(array.shape))
ValueError: Error when checking target: expected conv2d_7 to have shape (None, 1, None, 16) but got array with shape (1, 1, 10, 15)
There are two problems with the code above: first, the size of the images' axis must be a multiple of the smallest number of filters per layer (in this case 8); second, the generators for fit_generator must return batches (4D numpy arrays).
The generator is implemented with itertools.cycle and reshapes the figures as one sample batches (if working with multiple images with common sizes one could have variable size batches for each group of dimensions). The working example is below.
import numpy as np
from itertools import cycle
import keras
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D
AE_EPOCHS = 10
VERB = 1
outfun = 'sigmoid'
data = []
dimensions = [(16, 32), (24, 32), (8,32), (32,32)]
for d in dimensions:
dd = np.random.rand(*d)
dd = dd.reshape((1,)+dd.shape)
data.append(dd)
input_img = Input(shape=(1, None, 32))
filtersz = 3
pad_it = 'same'
size1 = 16
size2 = 8
x = Conv2D(size1, (filtersz, filtersz), activation='relu', padding=pad_it)(input_img)
x = MaxPooling2D((2, 2), padding=pad_it)(x)
x = Conv2D(size2, (filtersz, filtersz), activation='relu', padding=pad_it)(x)
x = MaxPooling2D((2, 2), padding=pad_it)(x)
x = Conv2D(size2, (filtersz, filtersz), activation='relu', padding=pad_it)(x)
encoded = MaxPooling2D((2, 2), padding=pad_it)(x)
x = Conv2D(size2, (filtersz, filtersz), activation='relu', padding=pad_it)(encoded)
x = UpSampling2D((2, 2), data_format="channels_first")(x)
x = Conv2D(size2, (filtersz, filtersz), activation='relu', padding=pad_it)(x)
x = UpSampling2D((2, 2), data_format="channels_first")(x)
x = Conv2D(size1, (filtersz, filtersz), activation='relu', padding=pad_it)(x)
x = UpSampling2D((2, 2), data_format="channels_first")(x)
decoded = Conv2D(1, (filtersz, filtersz), activation=outfun, padding=pad_it)(x)
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss= 'binary_crossentropy')
x_train = data[1:]
x_test= [data[0]]
def mygen(xx, *args, **kwargs):
for i in cycle(xx):
ii = i.reshape((1,)+i.shape)
yield ii,ii
thegen = mygen(x_train)
thegenval = mygen(x_test)
hist = autoencoder.fit_generator(
thegen,
epochs=AE_EPOCHS,
steps_per_epoch=3,
verbose=VERB,
validation_data=thegenval,
validation_steps=1
)
I'm getting an error in Keras where the dimension of the output is different than the dimension of the input. I don't understand where the 20 comes from. All my dimensions seem to be showing 18. Also, the Output Shape for convolution2d_70 just says 'multiple', so I'm not sure what that means. Any ideas?
Exception: Error when checking model target: expected convolution2d_70 to have shape (None, 1, 36L, 20L) but got array with shape (49L, 1L, 36L, 18L)
from keras.layers import Dense, Input, Convolution2D, MaxPooling2D, UpSampling2D
from keras.models import Model
from os import listdir
import os.path
import numpy as np
import re
versions = !pip freeze
for line in versions:
if re.search('Keras', line):
print line
samples = 100
x = np.ndarray([samples,36,18])
i=0
for i in range(samples):
x[i] = np.random.randint(15, size=(36, 18))
i+=1
#TODO: CREATE A REAL TEST SET
x_train = x[:49]
x_test = x[50:]
print x_train.shape
print x_test.shape
#INPUT LAYER
input_img = Input(shape=(1,x_train.shape[1],x_train.shape[2]))
x = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(input_img)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(x)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(x)
encoded = MaxPooling2D((2, 2), border_mode='same')(x)
# at this point the representation is (8, 4, 4) i.e. 128-dimensional
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(x)
x = UpSampling2D((2, 2))(x)
x = Convolution2D(16, 3, 3, activation='relu')(x)
x = UpSampling2D((2, 2))(x)
decoded = Convolution2D(1, 3, 3, activation='sigmoid', border_mode='same')(x)
#MODEL
autoencoder = Model(input=input_img, output=decoded)
#SEPERATE ENCODER MODEL
encoder = Model(input=input_img, output=encoded)
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(8, 4, 4))
# retrieve the last layer of the autoencoder model
decoder_layer1 = autoencoder.layers[-3]
decoder_layer2 = autoencoder.layers[-2]
decoder_layer3 = autoencoder.layers[-1]
print decoder_layer3.get_config()
# create the decoder model
decoder = Model(input=encoded_input, output=decoder_layer3(decoder_layer2(decoder_layer1(encoded_input))))
#COMPILER
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
autoencoder.summary()
x_train = x_train.astype('float32') / 15.
x_test = x_test.astype('float32') / 15.
x_test = np.reshape(x_test, (len(x_test), 1, x_test.shape[1], x_test.shape[2]))
x_train = np.reshape(x_train, (len(x_train), 1, x_train.shape[1], x_train.shape[2]))
autoencoder.fit(x_train,
x_train,
nb_epoch=5,
batch_size=1,
verbose=True,
shuffle=True,
validation_data=(x_test,x_test))
Notice that in third convolutional layer in decoder you are missing border_mode='same'. This makes your dimensionality mismatch.