gridSearchCV fails to estimate parameters - python-3.x

I'm trying to do some tests with GridSearch, but I have this error, could you tell me why?
in clone raise TypeError("Cannot clone object '%s' (type %s): "
TypeError: Cannot clone object '<keras.engine.functional.Functional object at 0x7f330fe610a0>' (type <class 'keras.engine.functional.Functional'>): it does not seem to be a scikit-learn estimator as it does not implement a 'get_params' method.
My code:
Model:
from keras import backend as K, regularizers
from keras.engine.training import Model
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, \
BatchNormalization, Activation, Input
import ModelLib
class Cifar100_Model(ModelLib.ModelLib):
def build_classifier_model(self, dataset, n_classes=5,
activation='elu', dropout_1_rate=0.25,
dropout_2_rate=0.5,
reg_factor=50e-4, bias_reg_factor=None, batch_norm=False):
n_classes = dataset.n_classes
print(n_classes)
print("----------------------------------------------------------------------------")
l2_reg = regularizers.l2(reg_factor) #K.variable(K.cast_to_floatx(reg_factor))
l2_bias_reg = None
if bias_reg_factor:
l2_bias_reg = regularizers.l2(bias_reg_factor) #K.variable(K.cast_to_floatx(bias_reg_factor))
# input image dimensions
h, w, d = 32, 32, 3
if K.image_data_format() == 'channels_first':
input_shape = (d, h, w)
else:
input_shape = (h, w, d)
# input image dimensions
x = input_1 = Input(shape=input_shape)
x = Conv2D(filters=32, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = Conv2D(filters=32, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(rate=dropout_1_rate)(x)
x = Conv2D(filters=64, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = Conv2D(filters=64, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(rate=dropout_1_rate)(x)
x = Conv2D(filters=128, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = Conv2D(filters=128, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(rate=dropout_1_rate)(x)
x = Conv2D(filters=256, kernel_size=(2, 2), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = Conv2D(filters=256, kernel_size=(2, 2), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(rate=dropout_1_rate)(x)
x = Flatten()(x)
x = Dense(units=512, kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = Dropout(rate=dropout_2_rate)(x)
x = Dense(units=n_classes, kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation='softmax')(x)
model = Model(inputs=[input_1], outputs=[x])
return model
Code test:
from sklearn.model_selection import GridSearchCV
import models.cifar100_model
def load_model():
return models.cifar100_model.Cifar100_Model()
def get_params(self, deep = True):
return {"learning rate" self.learning_rate}
model_lib = load_model()
model = model_lib.build_classifier_model(dataset)
x_train = dataset.x_train
y_train = dataset.y_train_labels
learning_rate = [0.01, 0.1]
param_grid = dict(learning_rate = learning_rate)
grid = GridSearchCV(estimator = model, param_grid=param_grid, n_jobs=-1, cv=3, scoring='accuracy')
gridResult = grid.fit(x_train,y_train)
print(gridResult.best_params_)

Related

Decreased accuracy when converting keras code to pytorch

I am a beginner in Pytorch and I am trying to convert a keras code to Pytorch for classification task.
The results are different for me in two modes with the same settings. Accuracy is reduced by 5% and the Loss is increases from 0.2 to 4.5. Can anyone tell me if I converted the code correctly to Pytorch or not? Thank you so much.
IMG_HEIGHT=48 ,IMG_WIDTH=48 , channels=3, num_class=164
model = keras.models.Sequential([
keras.layers.Conv2D(filters=16, kernel_size=(3,3), activation='relu', input_shape=(IMG_HEIGHT,IMG_WIDTH,channels)),
keras.layers.Conv2D(filters=32, kernel_size=(3,3), activation='relu'),
keras.layers.MaxPool2D(pool_size=(2, 2)),
keras.layers.BatchNormalization(axis=-1),
keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation='relu'),
keras.layers.Conv2D(filters=128, kernel_size=(3,3), activation='relu'),
keras.layers.MaxPool2D(pool_size=(2, 2)),
keras.layers.BatchNormalization(axis=-1),
keras.layers.Flatten(),
keras.layers.Dense(512, activation='relu'),
keras.layers.BatchNormalization(),
keras.layers.Dropout(rate=0.5),
keras.layers.Dense(164, activation='softmax')])
pytorch :
class convnet(nn.Module):
def __init__(self, num_class):
super(convnet, self).__init__()
self.Conv2d_1 = nn.Conv2d(3, 16, (3,3))
self.relu_1 = nn.ReLU()
self.Conv2d_2 = nn.Conv2d(16, 32, (3,3))
self.relu_2 = nn.ReLU()
self.maxpool_1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.batch_1 = nn.BatchNorm2d(32)
self.Conv2d_3 = nn.Conv2d(32, 64, (3,3))
self.relu_3 = nn.ReLU()
self.Conv2d_4 = nn.Conv2d(64, 128, (3,3))
self.relu_4 = nn.ReLU()
self.maxpool_2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.batch_2 = nn.BatchNorm2d(128)
self.fc1 = nn.Linear(10368, 1024)
self.relu_5 = nn.ReLU()
self.batch_3 = nn.BatchNorm1d(1024)
self.dropout_1 = nn.Dropout(p=0.5)
self.fc2 = nn.Linear(1024,num_class)
self.Softmax_1 = nn.Softmax()
def forward(self, x):
y = self.Conv2d_1(x)
y = self.relu_1(y)
y = self.Conv2d_2(y)
y = self.relu_2(y)
y = self.maxpool_1(y)
y = self.batch_1(y)
y = self.Conv2d_3(y)
y = self.relu_3(y)
y = self.Conv2d_4(y)
y = self.relu_4(y)
y = self.maxpool_2(y)
y = self.batch_2(y)
y = y.view(y.size(0), -1)
y = self.fc1(y)
y = self.relu_5(y)
y = self.batch_3(y)
y = self.dropout_1(y)
y = self.fc2(y)
y = self.Softmax_1(y)
return y

expected string or bytes-like object when calling model subclass

I have attempted to write a generative adversarial network. Below is the code of one of the discriminators.
class D1(Layer):
def __init__ (self, input_shape=(256, 256, 3), name='d1', **kwargs):
super(D1, self).__init__(name=name, **kwargs)
self.h1 = Conv2D(64, (3, 3), strides=(1, 1), padding='same')
self.h2 = MaxPooling2D(pool_size=(2, 2), strides=None, padding='same')
self.h3 = LeakyReLU(alpha=0.2)
self.h4 = Conv2D(128, (3, 3), strides=(1, 1), padding='same')
self.h5 = Conv2D(128, (3, 3), strides=(1, 1), padding='same')
self.h6 = MaxPooling2D(pool_size=(2, 2), strides=None, padding='same')
self.h7 = LeakyReLU(alpha=0.2)
self.h8 = Conv2D(256, (3, 3), strides=(1, 1), padding='same')
self.h9 = Conv2D(256, (3, 3), strides=(1, 1), padding='same')
self.h10 = MaxPooling2D(pool_size=(2, 2), strides=None, padding='same')
self.h11 = LeakyReLU(alpha=0.2)
self.h12 = Conv2D(512, (3, 3), strides=(1, 1), padding='same')
self.h13 = Conv2D(512, (3, 3), strides=(1, 1), padding='same')
self.h14 = MaxPooling2D(pool_size=(2, 2), strides=None, padding='same')
self.h15 = Flatten()
self.h16 = Dropout(0.4)
self.D1R = Dense(1, activation='sigmoid')
self.h17 = Dense(4096, activation='relu')
self.h18 = Dense(4096, activation='relu')
self.D1C = Dense(16, activation='sigmoid')
def call(self, inputs):
x = self.h1(inputs)
x = self.h2(x)
x = self.h3(x)
x = self.h4(x)
x = self.h5(x)
x = self.h6(x)
x = self.h7(x)
x = self.h8(x)
x = self.h9(x)
x = self.h10(x)
x = self.h11(x)
x = self.h12(x)
x = self.h13(x)
x = self.h14(x)
x = self.h15(x)
x = self.h16(x)
d1r = self.D1R(x)
x = self.h17(x)
x = self.h18(x)
d1c = self.D1C(x)
return d1r, d1c'''
class Discriminator1(Model):
def __init__(
self,
input_shape=(None, 256, 256, 3),
name='disc1',
**kwargs
):
super(Discriminator1, self).__init__(name=name, **kwargs)
self.d1 = D1(input_shape=input_shape)
def call(self, inputs):
image = inputs
d1r, d1c = self.d1(image)
d1_loss = d1_loss(d1r, d1c)
self.add_loss(d1_loss)
return out
When I call it in training, it throws a TypeError: expected string or byte-like object. I cannot figure what it is.
Any help? None of my functions are supposed to use strings
'''def generate_latent_noise(latent_dim, n_samples):
x_input = randn(latent_dim * n_samples)
x_input = x_input.reshape(n_samples, latent_dim)
return x_input'''
'''def generate_fake_samples(g, latent_dim, n_samples, y_i, y_l):
x_input = generate_latent_noise(latent_dim, n_samples)
X = g.predict(x_input)
y = zeros((n_samples, 1))
for i in range(n_samples-1):
intent = y_i[i]
bio = y_l[i]
return X, y, intent, bio'''
'''epochs = 200
opt = SGD(learning_rate=1e-3, momentum=0.99)
metric = Accuracy()
yi, yl = retrieve_target_labels('/content/drive/My Drive/Project/input.xlsx')
g = Generator(100)
d1 = D1((256, 256, 3))
d2 = D2((256, 256, 3))
gen = G_Model((256, 256, 3), 100, yi, yl)
disc1 = Discriminator1((256, 256, 3), 100)
disc2 = Discriminator2((256, 256, 3), 100)
art, yc_real, yi_real, yl_real =load_real_samples('/content/drive/MyDrive/Project/TrainSA.xlsx')
half_batch = yi.shape[0]
n_batch = half_batch * 2
batch_per_epoch = int(art.shape[0]/n_batch)
for epoch in range(epochs):
for batch in range(batch_per_epoch):
fake, y, yi, yl = generate_fake_samples(g, 100, half_batch, yi, yl)
real, y_real, c_real, i_real, l_real = generate_real_samples(art, half_batch, yc_real, yi_real, yl_real)
fake_image = tf.convert_to_tensor(fake)
d1r, d1c = d1(fake_image) #error!
d1_loss_fake = d1.losses
d1r, d1c = d1(real)
d1_loss = d1.losses
d2i_fake, d2l_fake = d2(fake_image)
d2_loss_fake = d2.losses
d2i, d2l = d2(real)
d2_loss = d2.losses
g_loss = gen.losses '''
It is a bit difficult to provide a minimal working example as the error is at the end of my code, after a lot of functions have been called, but I tried to include the ones that might be involved in the error.
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-95-d4bb0da7c68f> in <module>()
23 #d1_loss_fake = d1.losses
24 real_image = tf.convert_to_tensor(real, dtype = tf.float32)
---> 25 d1r, d1c = disc1(real_image)
26 d1_loss = d1.losses
27 d2i_fake, d2l_fake = d2(fake_image)
3 frames
/tensorflow-1.15.2/python3.6/tensorflow_core/python/framework/ops.py in
name_scope(self, name)
4126 # Scopes created in the root must match the more restrictive
4127 # op name regex, which constrains the initial character.
-> 4128 if not _VALID_OP_NAME_REGEX.match(name):
4129 raise ValueError("'%s' is not a valid scope name" % name)
4130 old_stack = self._name_stack
TypeError: expected string or bytes-like object

Keras functional api Graph disconnected error

The following code is giving me a graph disconnected error but I cant work out where it is coming from and am not sure how to go about debugging. The error is being thrown on the last line decoder = Model(latentInputs, outputs, name="decoder"), I have compared it to working code that I modified but to no avail.
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Conv2DTranspose
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import ReLU
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.layers import UpSampling2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import GaussianNoise
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Reshape
from tensorflow.keras.layers import Add
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K
import tensorflow as tf
import numpy as np
width=256
height=256
depth=3
inputShape = (height, width, depth)
chanDim = -1
filter_size = 3
latentDim = 512
# initialize the input shape to be "channels last" along with
# the channels dimension itself
inputShape = (height, width, depth)
chanDim = -1
# define the input to the encoder
inputs = Input(shape=inputShape)
x = GaussianNoise(0.2)(inputs)
x = Conv2D(128, filter_size, strides=1, padding="same")(x)
x = BatchNormalization(axis=chanDim)(x)
x = LeakyReLU(alpha=0.2)(x)
layer_1 = Conv2D(128, filter_size, strides=2, padding="same")(x)
x = BatchNormalization(axis=chanDim)(layer_1)
x = LeakyReLU(alpha=0.2)(x)
layer_2 = Conv2D(128, filter_size, strides=2, padding="same")(x)
x = BatchNormalization(axis=chanDim)(layer_2)
x = LeakyReLU(alpha=0.2)(x)
layer_3 = Conv2D(128, filter_size, strides=2, padding="same")(x)
x = BatchNormalization(axis=chanDim)(layer_3)
x = LeakyReLU(alpha=0.2)(x)
layer_4 = Conv2D(128, filter_size, strides=2, padding="same")(x)
x = BatchNormalization(axis=chanDim)(layer_4)
x = LeakyReLU(alpha=0.2)(x)
layer_5 = Conv2D(128, filter_size, strides=2, padding="same")(x)
x = BatchNormalization(axis=chanDim)(layer_5)
x = LeakyReLU(alpha=0.2)(x)
layer_6 = Conv2D(128, filter_size, strides=2, padding="same")(x)
x = BatchNormalization(axis=chanDim)(layer_6)
x = LeakyReLU(alpha=0.2)(x)
layer_7 = Conv2D(128, filter_size, strides=2, padding="same")(x)
latent = Flatten()(layer_7)
# flatten the network and then construct our latent vector
volumeSize = K.int_shape(layer_7)
# build the encoder model
encoder = Model(inputs, latent, name="encoder")
encoder.summary()
# start building the decoder model which will accept the
# output of the encoder as its inputs
#%%
latentInputs = Input(shape=(np.prod(volumeSize[1:]),))
x = Reshape((volumeSize[1], volumeSize[2], volumeSize[3]))(latentInputs)
dec_layer_7 = Add()([x, layer_7])
x = Conv2DTranspose(128, filter_size, strides=2, padding="same")(dec_layer_7)
x = BatchNormalization(axis=chanDim)(x)
x = LeakyReLU(alpha=0.2)(x)
dec_layer_6 = Add()([x, layer_6])
x = Conv2DTranspose(128, filter_size, strides=2, padding="same")(dec_layer_6)
x = BatchNormalization(axis=chanDim)(x)
x = LeakyReLU(alpha=0.2)(x)
dec_layer_5 = Add()([x, layer_5])
x = Conv2DTranspose(128, filter_size, strides=2, padding="same")(dec_layer_5)
x = BatchNormalization(axis=chanDim)(x)
x = LeakyReLU(alpha=0.2)(x)
dec_layer_4 = Add()([x, layer_4])
x = Conv2DTranspose(128, filter_size, strides=2, padding="same")(dec_layer_4)
x = BatchNormalization(axis=chanDim)(x)
x = LeakyReLU(alpha=0.2)(x)
dec_layer_3 = Add()([x, layer_3])
x = Conv2DTranspose(128, filter_size, strides=2, padding="same")(dec_layer_3)
x = BatchNormalization(axis=chanDim)(x)
x = LeakyReLU(alpha=0.2)(x)
dec_layer_2 = Add()([x, layer_2])
x = Conv2DTranspose(128, filter_size, strides=2, padding="same")(dec_layer_2)
x = BatchNormalization(axis=chanDim)(x)
x = LeakyReLU(alpha=0.2)(x)
dec_layer_1 = Add()([x, layer_1])
x = Conv2DTranspose(128, filter_size, strides=2, padding="same")(dec_layer_1)
x = BatchNormalization(axis=chanDim)(x)
x = LeakyReLU(alpha=0.2)(x)
outputs = Conv2DTranspose(depth, filter_size, padding="same")(x)
# apply a single CONV_TRANSPOSE layer used to recover the
# original depth of the image
# =============================================================================
# outputs = ReLU(max_value=1.0)(x)
# =============================================================================
# build the decoder model
decoder = Model(latentInputs, outputs, name="decoder")
Error is:
ValueError: Graph disconnected: cannot obtain value for tensor Tensor("input_37:0", shape=(None, 256, 256, 3), dtype=float32) at layer "input_37". The following previous layers were accessed without issue: []
layer_7 refers to another model... you have to provide input for layer_7 in your decoder. a solution can be to define your decoder in this way
decoder = Model([latentInputs, encoder.input], outputs, name="decoder")
here the full example: https://colab.research.google.com/drive/1W8uLy49H_8UuD9DGZvtP7Md1f4ap3u6A?usp=sharing

How to use deconvolution with MNIST database

I am a newbie in CNN and I am trying the code the Deconvolution (to generate feature maps) in MNIST database (because it's the simplest one to learn for a beginner). I want my model to generate feature maps at the end.The idea is to implement the paper Saliency Detection Via Dense Convolution Network to some extent.
Here is the complete code that I am trying to run:
import keras
from keras.datasets import mnist
import keras.backend as K
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Flatten, Dropout, Activation, Reshape
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.pooling import MaxPooling2D, GlobalAveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D
from keras.initializers import RandomNormal
init = RandomNormal(mean = 0., stddev = 0.02)
def GeneratorDeconv(image_size = 28):
L = int(image_size)
inputs = Input(shape = (100, ))
x = Dense(512*int(L/16)**2)(inputs) #shape(512*(L/16)**2,)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Reshape((int(L/16), int(L/16), 512))(x) # shape(L/16, L/16, 512)
x = Conv2DTranspose(256, (4, 4), strides = (2, 2),
kernel_initializer = init,
padding = 'same')(x) # shape(L/8, L/8, 256)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2DTranspose(128, (4, 4), strides = (2, 2),
kernel_initializer = init,
padding = 'same')(x) # shape(L/4, L/4, 128)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2DTranspose(64, (4, 4), strides = (2, 2),
kernel_initializer = init,
padding = 'same')(x) # shape(L/2, L/2, 64)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2DTranspose(3, (4, 4), strides= (2, 2),
kernel_initializer = init,
padding = 'same')(x) # shape(L, L, 3)
images = Activation('tanh')(x)
model = Model(inputs = inputs, outputs = images)
model.summary()
return model
batch_size = 128
num_classes = 10
epochs = 1
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = GeneratorDeconv()
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
The function def GeneratorDeconv(image_size = 28): I picked from ProgramCreek Python
Now, I am confused that how can I embed it into my own custom model. Up to model.compile(...) the program runs okay. But at model.fit(...) , it gives error:
ValueError: Error when checking input: expected input_2 to have 2
dimensions, but got array with shape (60000, 28, 28, 1)
I don't know how to resolve the issues. Please help.
The input to your model is:
inputs = Input(shape = (100, ))
This will take a vector in the shape of (samples, 100), so it expects a 2D input.
However:
print('x_train shape:', x_train.shape)
>>>x_train shape: (60000, 28, 28, 1)
You are inputting a 4D array, when you specified that your input took a 2D one. That is what is causing the error.
I made some edits to your architecture so the shapes match up and it actually trains:
def GeneratorDeconv(image_size = 28):
L = int(image_size)
inputs = Input(shape = (28, 28,1))
x = Dense(512*int(L/16)**2)(inputs)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2DTranspose(256, (4, 4), strides = (2, 2),
kernel_initializer = init,
padding = 'same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2DTranspose(128, (4, 4), strides = (2, 2),
kernel_initializer = init,
padding = 'same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2DTranspose(64, (4, 4), strides = (2, 2),
kernel_initializer = init,
padding = 'same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2DTranspose(3, (4, 4), strides= (2, 2),
kernel_initializer = init,
padding = 'same')(x)
x = Flatten()(x)
x = Dense(10)(x)
images = Activation('tanh')(x)
model = Model(inputs = inputs, outputs = images)
model.summary()
return model

Keras encoder - decoder for MNIST ->new images

I am trying to fit a encoder-decoder-network in Keras which predicts images from the MNIST data set in a very good way. Now, I have changed the image input so that apples are shown. As a reference here is a link:
http://farm3.static.flickr.com/2133/2203251029_008e350792.jpg
I am converting converted the image to grayscale and resizing them to 28x28. However, when I now run the same network as for the MNIST data set then I am getting only a black image as a prediction. Do you have any ideas how I could improve my network?
Here is my code of the network:
input_img = Input(shape=(28, 28, 1)) # adapt this if using `channels_first` image data format
x = Conv2D(32, (3, 3), strides = (2,2),padding='valid')(input_img)
x =BatchNormalization()(x)
x = Activation('relu')(x)
#x = BatchNormalization()(x)
x = Conv2D(16, (3, 3), strides = (2,2),padding='valid')(x)
x =BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(8, (2, 2), strides = (2,2), padding='valid')(x)
x =BatchNormalization()(x)
encoded = Activation('relu')(x)
x = Conv2D(8, (1, 1), padding='valid')(encoded)
x =BatchNormalization()(x)
x = Activation('relu')(x)
x = UpSampling2D((3, 3))(x)
x = Conv2D(16, (2, 2), padding='valid')(x)
x =BatchNormalization()(x)
x = Activation('relu')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(32, (3,3))(x)
x =BatchNormalization()(x)
x = Activation('relu')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(1, (2, 2), activation='sigmoid', padding='same')(x)
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
How can I improve my network? Any hints are much appreciated!
Cheers,
Andi

Resources