I have attempted to write a generative adversarial network. Below is the code of one of the discriminators.
class D1(Layer):
def __init__ (self, input_shape=(256, 256, 3), name='d1', **kwargs):
super(D1, self).__init__(name=name, **kwargs)
self.h1 = Conv2D(64, (3, 3), strides=(1, 1), padding='same')
self.h2 = MaxPooling2D(pool_size=(2, 2), strides=None, padding='same')
self.h3 = LeakyReLU(alpha=0.2)
self.h4 = Conv2D(128, (3, 3), strides=(1, 1), padding='same')
self.h5 = Conv2D(128, (3, 3), strides=(1, 1), padding='same')
self.h6 = MaxPooling2D(pool_size=(2, 2), strides=None, padding='same')
self.h7 = LeakyReLU(alpha=0.2)
self.h8 = Conv2D(256, (3, 3), strides=(1, 1), padding='same')
self.h9 = Conv2D(256, (3, 3), strides=(1, 1), padding='same')
self.h10 = MaxPooling2D(pool_size=(2, 2), strides=None, padding='same')
self.h11 = LeakyReLU(alpha=0.2)
self.h12 = Conv2D(512, (3, 3), strides=(1, 1), padding='same')
self.h13 = Conv2D(512, (3, 3), strides=(1, 1), padding='same')
self.h14 = MaxPooling2D(pool_size=(2, 2), strides=None, padding='same')
self.h15 = Flatten()
self.h16 = Dropout(0.4)
self.D1R = Dense(1, activation='sigmoid')
self.h17 = Dense(4096, activation='relu')
self.h18 = Dense(4096, activation='relu')
self.D1C = Dense(16, activation='sigmoid')
def call(self, inputs):
x = self.h1(inputs)
x = self.h2(x)
x = self.h3(x)
x = self.h4(x)
x = self.h5(x)
x = self.h6(x)
x = self.h7(x)
x = self.h8(x)
x = self.h9(x)
x = self.h10(x)
x = self.h11(x)
x = self.h12(x)
x = self.h13(x)
x = self.h14(x)
x = self.h15(x)
x = self.h16(x)
d1r = self.D1R(x)
x = self.h17(x)
x = self.h18(x)
d1c = self.D1C(x)
return d1r, d1c'''
class Discriminator1(Model):
def __init__(
self,
input_shape=(None, 256, 256, 3),
name='disc1',
**kwargs
):
super(Discriminator1, self).__init__(name=name, **kwargs)
self.d1 = D1(input_shape=input_shape)
def call(self, inputs):
image = inputs
d1r, d1c = self.d1(image)
d1_loss = d1_loss(d1r, d1c)
self.add_loss(d1_loss)
return out
When I call it in training, it throws a TypeError: expected string or byte-like object. I cannot figure what it is.
Any help? None of my functions are supposed to use strings
'''def generate_latent_noise(latent_dim, n_samples):
x_input = randn(latent_dim * n_samples)
x_input = x_input.reshape(n_samples, latent_dim)
return x_input'''
'''def generate_fake_samples(g, latent_dim, n_samples, y_i, y_l):
x_input = generate_latent_noise(latent_dim, n_samples)
X = g.predict(x_input)
y = zeros((n_samples, 1))
for i in range(n_samples-1):
intent = y_i[i]
bio = y_l[i]
return X, y, intent, bio'''
'''epochs = 200
opt = SGD(learning_rate=1e-3, momentum=0.99)
metric = Accuracy()
yi, yl = retrieve_target_labels('/content/drive/My Drive/Project/input.xlsx')
g = Generator(100)
d1 = D1((256, 256, 3))
d2 = D2((256, 256, 3))
gen = G_Model((256, 256, 3), 100, yi, yl)
disc1 = Discriminator1((256, 256, 3), 100)
disc2 = Discriminator2((256, 256, 3), 100)
art, yc_real, yi_real, yl_real =load_real_samples('/content/drive/MyDrive/Project/TrainSA.xlsx')
half_batch = yi.shape[0]
n_batch = half_batch * 2
batch_per_epoch = int(art.shape[0]/n_batch)
for epoch in range(epochs):
for batch in range(batch_per_epoch):
fake, y, yi, yl = generate_fake_samples(g, 100, half_batch, yi, yl)
real, y_real, c_real, i_real, l_real = generate_real_samples(art, half_batch, yc_real, yi_real, yl_real)
fake_image = tf.convert_to_tensor(fake)
d1r, d1c = d1(fake_image) #error!
d1_loss_fake = d1.losses
d1r, d1c = d1(real)
d1_loss = d1.losses
d2i_fake, d2l_fake = d2(fake_image)
d2_loss_fake = d2.losses
d2i, d2l = d2(real)
d2_loss = d2.losses
g_loss = gen.losses '''
It is a bit difficult to provide a minimal working example as the error is at the end of my code, after a lot of functions have been called, but I tried to include the ones that might be involved in the error.
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-95-d4bb0da7c68f> in <module>()
23 #d1_loss_fake = d1.losses
24 real_image = tf.convert_to_tensor(real, dtype = tf.float32)
---> 25 d1r, d1c = disc1(real_image)
26 d1_loss = d1.losses
27 d2i_fake, d2l_fake = d2(fake_image)
3 frames
/tensorflow-1.15.2/python3.6/tensorflow_core/python/framework/ops.py in
name_scope(self, name)
4126 # Scopes created in the root must match the more restrictive
4127 # op name regex, which constrains the initial character.
-> 4128 if not _VALID_OP_NAME_REGEX.match(name):
4129 raise ValueError("'%s' is not a valid scope name" % name)
4130 old_stack = self._name_stack
TypeError: expected string or bytes-like object
Related
For a toy LeNet-5 CNN architecture on MNIST implemented in TensorFlow-2.10 + Python-3.10, with a batch-size = 256:
class LeNet5(Model):
def __init__(self):
super(LeNet5, self).__init__()
self.conv1 = Conv2D(
filters = 6, kernel_size = (5, 5),
strides = (1, 1), activation = None,
input_shape = (28, 28, 1)
)
self.pool1 = AveragePooling2D(
pool_size = (2, 2), strides = (2, 2)
)
self.conv2 = Conv2D(
filters = 16, kernel_size = (5, 5),
strides = (1, 1), activation = None
)
self.pool2 = AveragePooling2D(
pool_size = (2, 2), strides = (2, 2)
)
self.flatten = Flatten()
self.dense1 = Dense(
units = 120, activation = None
)
self.dense2 = Dense(
units = 84, activation = None
)
self.output_layer = Dense(
units = 10, activation = None
)
def call(self, x):
x = tf.nn.relu(self.conv1(x))
x = self.pool1(x)
x = tf.nn.relu(self.conv2(x))
x = self.pool2(x)
x = self.flatten(x)
x = tf.nn.relu(self.dense1(x))
x = tf.nn.relu(self.dense2(x))
x = tf.nn.softmax(self.output_layer(x))
return x
def shape_computation(self, x):
print(f"Input shape: {x.shape}")
x = self.conv1(x)
print(f"conv1 output shape: {x.shape}")
x = self.pool1(x)
print(f"pool1 output shape: {x.shape}")
x = self.conv2(x)
print(f"conv2 output shape: {x.shape}")
x = self.pool2(x)
print(f"pool2 output shape: {x.shape}")
x = self.flatten(x)
print(f"flattened shape: {x.shape}")
x = self.dense1(x)
print(f"dense1 output shape: {x.shape}")
x = self.dense2(x)
print(f"dense2 output shape: {x.shape}")
x = self.output_layer(x)
print(f"output shape: {x.shape}")
del x
return None
# Initialize an instance of LeNet-5 CNN-
model = LeNet5()
model.build(input_shape = (None, 28, 28, 1))
# Define loss and optimizer-
loss_fn = tf.keras.losses.CategoricalCrossentropy(reduction = tf.keras.losses.Reduction.NONE)
# optimizer = tf.keras.optimizers.Adam(learning_rate = 0.0003)
optimizer = tf.keras.optimizers.SGD(
learning_rate = 10e-3, momentum = 0.0,
nesterov = False
)
with tf.GradientTape() as grad_tape:
pred = model(x)
loss = loss_fn(y, pred)
loss.shape
TensorShape([256])
This computes individual loss for each of the 256 training images in a given batch.
# Compute gradient using loss wrt parameters-
grads = grad_tape.gradient(loss, model.trainable_variables)
type(grads), len(grads)
# (list, 10)
for i in range(len(grads)):
print(f"i: {i}, grads.shape: {grads[i].shape}")
"""
i: 0, grads.shape: (5, 5, 1, 6)
i: 1, grads.shape: (6,)
i: 2, grads.shape: (5, 5, 6, 16)
i: 3, grads.shape: (16,)
i: 4, grads.shape: (256, 120)
i: 5, grads.shape: (120,)
i: 6, grads.shape: (120, 84)
i: 7, grads.shape: (84,)
i: 8, grads.shape: (84, 10)
i: 9, grads.shape: (10,)
"""
Corresponding to loss for each training example, how can I compute gradient corresponding to each training example?
I'm trying to do some tests with GridSearch, but I have this error, could you tell me why?
in clone raise TypeError("Cannot clone object '%s' (type %s): "
TypeError: Cannot clone object '<keras.engine.functional.Functional object at 0x7f330fe610a0>' (type <class 'keras.engine.functional.Functional'>): it does not seem to be a scikit-learn estimator as it does not implement a 'get_params' method.
My code:
Model:
from keras import backend as K, regularizers
from keras.engine.training import Model
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, \
BatchNormalization, Activation, Input
import ModelLib
class Cifar100_Model(ModelLib.ModelLib):
def build_classifier_model(self, dataset, n_classes=5,
activation='elu', dropout_1_rate=0.25,
dropout_2_rate=0.5,
reg_factor=50e-4, bias_reg_factor=None, batch_norm=False):
n_classes = dataset.n_classes
print(n_classes)
print("----------------------------------------------------------------------------")
l2_reg = regularizers.l2(reg_factor) #K.variable(K.cast_to_floatx(reg_factor))
l2_bias_reg = None
if bias_reg_factor:
l2_bias_reg = regularizers.l2(bias_reg_factor) #K.variable(K.cast_to_floatx(bias_reg_factor))
# input image dimensions
h, w, d = 32, 32, 3
if K.image_data_format() == 'channels_first':
input_shape = (d, h, w)
else:
input_shape = (h, w, d)
# input image dimensions
x = input_1 = Input(shape=input_shape)
x = Conv2D(filters=32, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = Conv2D(filters=32, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(rate=dropout_1_rate)(x)
x = Conv2D(filters=64, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = Conv2D(filters=64, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(rate=dropout_1_rate)(x)
x = Conv2D(filters=128, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = Conv2D(filters=128, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(rate=dropout_1_rate)(x)
x = Conv2D(filters=256, kernel_size=(2, 2), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = Conv2D(filters=256, kernel_size=(2, 2), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(rate=dropout_1_rate)(x)
x = Flatten()(x)
x = Dense(units=512, kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = Dropout(rate=dropout_2_rate)(x)
x = Dense(units=n_classes, kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation='softmax')(x)
model = Model(inputs=[input_1], outputs=[x])
return model
Code test:
from sklearn.model_selection import GridSearchCV
import models.cifar100_model
def load_model():
return models.cifar100_model.Cifar100_Model()
def get_params(self, deep = True):
return {"learning rate" self.learning_rate}
model_lib = load_model()
model = model_lib.build_classifier_model(dataset)
x_train = dataset.x_train
y_train = dataset.y_train_labels
learning_rate = [0.01, 0.1]
param_grid = dict(learning_rate = learning_rate)
grid = GridSearchCV(estimator = model, param_grid=param_grid, n_jobs=-1, cv=3, scoring='accuracy')
gridResult = grid.fit(x_train,y_train)
print(gridResult.best_params_)
I am a beginner in Pytorch and I am trying to convert a keras code to Pytorch for classification task.
The results are different for me in two modes with the same settings. Accuracy is reduced by 5% and the Loss is increases from 0.2 to 4.5. Can anyone tell me if I converted the code correctly to Pytorch or not? Thank you so much.
IMG_HEIGHT=48 ,IMG_WIDTH=48 , channels=3, num_class=164
model = keras.models.Sequential([
keras.layers.Conv2D(filters=16, kernel_size=(3,3), activation='relu', input_shape=(IMG_HEIGHT,IMG_WIDTH,channels)),
keras.layers.Conv2D(filters=32, kernel_size=(3,3), activation='relu'),
keras.layers.MaxPool2D(pool_size=(2, 2)),
keras.layers.BatchNormalization(axis=-1),
keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation='relu'),
keras.layers.Conv2D(filters=128, kernel_size=(3,3), activation='relu'),
keras.layers.MaxPool2D(pool_size=(2, 2)),
keras.layers.BatchNormalization(axis=-1),
keras.layers.Flatten(),
keras.layers.Dense(512, activation='relu'),
keras.layers.BatchNormalization(),
keras.layers.Dropout(rate=0.5),
keras.layers.Dense(164, activation='softmax')])
pytorch :
class convnet(nn.Module):
def __init__(self, num_class):
super(convnet, self).__init__()
self.Conv2d_1 = nn.Conv2d(3, 16, (3,3))
self.relu_1 = nn.ReLU()
self.Conv2d_2 = nn.Conv2d(16, 32, (3,3))
self.relu_2 = nn.ReLU()
self.maxpool_1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.batch_1 = nn.BatchNorm2d(32)
self.Conv2d_3 = nn.Conv2d(32, 64, (3,3))
self.relu_3 = nn.ReLU()
self.Conv2d_4 = nn.Conv2d(64, 128, (3,3))
self.relu_4 = nn.ReLU()
self.maxpool_2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.batch_2 = nn.BatchNorm2d(128)
self.fc1 = nn.Linear(10368, 1024)
self.relu_5 = nn.ReLU()
self.batch_3 = nn.BatchNorm1d(1024)
self.dropout_1 = nn.Dropout(p=0.5)
self.fc2 = nn.Linear(1024,num_class)
self.Softmax_1 = nn.Softmax()
def forward(self, x):
y = self.Conv2d_1(x)
y = self.relu_1(y)
y = self.Conv2d_2(y)
y = self.relu_2(y)
y = self.maxpool_1(y)
y = self.batch_1(y)
y = self.Conv2d_3(y)
y = self.relu_3(y)
y = self.Conv2d_4(y)
y = self.relu_4(y)
y = self.maxpool_2(y)
y = self.batch_2(y)
y = y.view(y.size(0), -1)
y = self.fc1(y)
y = self.relu_5(y)
y = self.batch_3(y)
y = self.dropout_1(y)
y = self.fc2(y)
y = self.Softmax_1(y)
return y
I want to design a model by tensorflow2.0,when Icompile the model,it report an error
'my_layer' object has no attribute '_dynamic'
the code is
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
class my_layer(layers.Layer):
def __init__(self,classes):
self.conv1 = layers.Conv2D(32,(3,3),strides=1,padding='same')
self.conv2 = layers.Conv2D(64,(3,3),strides=1,padding='same')
self.conv3 = layers.Conv2D(32, (3, 3), strides=1, padding='same')
self.conv4 = layers.Conv2D(classes, (3, 3), strides=1, padding='same')
self.bn = layers.BatchNormalization()
self.glbavgpool = layers.GlobalMaxPooling2D()
self.fc = layers.Dense(classes)
def call(self,inputs):
x = self.conv1(inputs)
x = self.bn(x)
x = keras.activations.relu(x)
x = self.conv2(x)
x = keras.activations.relu(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.bn(x)
x = self.glbavgpool(x)
out = self.fc(x)
return out
class mymodel(keras.Model):
def __init__(self,classes):
super(mymodel,self).__init__()
self.ml = my_layer(classes=classes)
def call(self,inputs):
return self.ml(inputs)
then I put all the custom layers to my_model, it worked. I think it's probably the wrong way to use mylayer.
You're seeing this error because you forgot to call the superclass constructor in your my_layer class. Add this following line:
class my_layer(layers.Layer):
def __init__(self,classes):
super(my_layer, self).__init__() # <-- Remember to call superclass constructor!!
self.conv1 = layers.Conv2D(32,(3,3),strides=1,padding='same')
self.conv2 = layers.Conv2D(64,(3,3),strides=1,padding='same')
self.conv3 = layers.Conv2D(32, (3, 3), strides=1, padding='same')
self.conv4 = layers.Conv2D(classes, (3, 3), strides=1, padding='same')
self.bn = layers.BatchNormalization()
self.glbavgpool = layers.GlobalMaxPooling2D()
self.fc = layers.Dense(classes)
I have a situation where input is an image and a group of (3) numeric fields and output is an image mask. I am not sure about how to do that in KERAS...
My architecture is somewhat like the attachment. I am aware about the CNN and Dense architectures, just not sure how to pass the inputs in the corresponding networks and do the concat operation. Also, suggestion of berrer architecture for this will be great!!!!!
Please suggest me, preferably with example code.
Thanks in Advance, Utpal.
I can advice to try U-net model for this problem. Usual U-net represents several conv and maxpooling layers, and then several conv and upsampling layers:
In the current problem you can mix up non-spatial data (image annotation) at the middle:
Also maybe it's a good idea to start with pre-trained VGG-16 (see below vgg.load_weights(VGG_Weights_path)).
See code below (based on Divam Gupta's repo):
from keras.models import *
from keras.layers import *
def VGGUnet(n_classes, input_height=416, input_width=608, data_length=128, vgg_level=3):
assert input_height % 32 == 0
assert input_width % 32 == 0
# https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels.h5
img_input = Input(shape=(3, input_height, input_width))
data_input = Input(shape=(data_length,))
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1', data_format=IMAGE_ORDERING)(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2', data_format=IMAGE_ORDERING)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool', data_format=IMAGE_ORDERING)(x)
f1 = x
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1', data_format=IMAGE_ORDERING)(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2', data_format=IMAGE_ORDERING)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool', data_format=IMAGE_ORDERING)(x)
f2 = x
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1', data_format=IMAGE_ORDERING)(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2', data_format=IMAGE_ORDERING)(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3', data_format=IMAGE_ORDERING)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool', data_format=IMAGE_ORDERING)(x)
f3 = x
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1', data_format=IMAGE_ORDERING)(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2', data_format=IMAGE_ORDERING)(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3', data_format=IMAGE_ORDERING)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool', data_format=IMAGE_ORDERING)(x)
f4 = x
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1', data_format=IMAGE_ORDERING)(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2', data_format=IMAGE_ORDERING)(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3', data_format=IMAGE_ORDERING)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool', data_format=IMAGE_ORDERING)(x)
f5 = x
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(1000, activation='softmax', name='predictions')(x)
vgg = Model(img_input, x)
vgg.load_weights(VGG_Weights_path)
levels = [f1, f2, f3, f4, f5]
# Several dense layers for image annotation processing
data_layer = Dense(1024, activation='relu', name='data1')(data_input)
data_layer = Dense(input_height * input_width / 256, activation='relu', name='data2')(data_layer)
data_layer = Reshape((1, input_height / 16, input_width / 16))(data_layer)
# Mix image annotations here
o = (concatenate([f4, data_layer], axis=1))
o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o)
o = (Conv2D(512, (3, 3), padding='valid', data_format=IMAGE_ORDERING))(o)
o = (BatchNormalization())(o)
o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o)
o = (concatenate([o, f3], axis=1))
o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o)
o = (Conv2D(256, (3, 3), padding='valid', data_format=IMAGE_ORDERING))(o)
o = (BatchNormalization())(o)
o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o)
o = (concatenate([o, f2], axis=1))
o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o)
o = (Conv2D(128, (3, 3), padding='valid', data_format=IMAGE_ORDERING))(o)
o = (BatchNormalization())(o)
o = (UpSampling2D((2, 2), data_format=IMAGE_ORDERING))(o)
o = (concatenate([o, f1], axis=1))
o = (ZeroPadding2D((1, 1), data_format=IMAGE_ORDERING))(o)
o = (Conv2D(64, (3, 3), padding='valid', data_format=IMAGE_ORDERING))(o)
o = (BatchNormalization())(o)
o = Conv2D(n_classes, (3, 3), padding='same', data_format=IMAGE_ORDERING)(o)
o_shape = Model(img_input, o).output_shape
output_height = o_shape[2]
output_width = o_shape[3]
o = (Reshape((n_classes, output_height * output_width)))(o)
o = (Permute((2, 1)))(o)
o = (Activation('softmax'))(o)
model = Model([img_input, data_input], o)
model.outputWidth = output_width
model.outputHeight = output_height
return model
To train and evaluate a keras model with several inputs prepare separate arrays for each of the input layers - image_train and annotation_train (preserving an order by the first axis, i.e. number of the sample) and call this:
model.fit([image_train, annotation_train], result_segmentation_train, batch_size=..., epochs=...)
test_loss, test_acc = model.evaluate([image_test, annotation_test], result_segmentation_test)
Good luck!