Concatenate in Sequential KERAS U-NET - keras

I want set up a Keras Sequential Deep UNET, but I dont know how to concatenate
specific layers.
from keras import models
from keras import layers
from keras.layers.convolutional import Conv2D, Conv2DTranspose
model = models.Sequential()
model.add(layers.Conv2D(8,(3,3), activation="relu", padding='same', input_shape=(512, 512, 4)))
model.add(layers.Conv2D(8,(3,3), activation="relu", padding='same'))
model.add(layers.MaxPooling2D(2,2))
model.add(layers.Dropout(0.2))
model.add(layers.Conv2D(16,(3,3), activation="relu", padding='same'))
model.add(layers.Conv2D(16,(3,3), activation="relu", padding='same'))
model.add(layers.MaxPooling2D(2,2))
model.add(layers.Dropout(0.2))
model.add(layers.Conv2D(32,(3,3), activation="relu", padding='same'))
model.add(layers.Conv2D(32,(3,3), activation="relu", padding='same'))
model.add(layers.MaxPooling2D(2,2))
model.add(layers.Dropout(0.2))
model.add(Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same'))
model.add(layers.Conv2D(16,(3,3), activation="relu", padding='same'))
model.add(layers.Conv2D(16,(3,3), activation="relu", padding='same'))
model.add(Conv2DTranspose(8, (2, 2), strides=(2, 2), padding='same'))
model.add(layers.Conv2D(8,(3,3), activation="relu", padding='same'))
model.add(layers.Conv2D(classes,(3,3), activation="relu", padding='same'))
model = model.compile(optimizer=optimizer,loss=loss, metrics=['accuracy'])
model.summary()
In a non Sequential Model it would be something like that
u6 = Conv2DTranspose(n_filters * 8, (3, 3), strides = (2, 2), padding = 'same')(c5)
u6 = concatenate([u6, c4])
u6 = Dropout(dropout)(u6)
c6 = conv2d_block(u6, n_filters * 8, kernel_size = 3, batchnorm = batchnorm)

Related

How to pass two images directly into siamese neural network?

Basically, I am taking two images as inputs and preprocessing them and passing them as input to the Siamese CNN model.
def create_base_network_signet(input_shape):
seq = Sequential()
seq.add(Conv2D(96, kernel_size=(11, 11), activation='relu', name='conv1_1', strides=4, input_shape= input_shape,
kernel_initializer='glorot_uniform'))
seq.add(BatchNormalization(epsilon=1e-06, axis=1, momentum=0.9))
seq.add(MaxPooling2D((3,3), strides=(2, 2)))
seq.add(ZeroPadding2D((2, 2)))
seq.add(Conv2D(256, kernel_size=(5, 5), activation='relu', name='conv2_1', strides=1, kernel_initializer='glorot_uniform'))
seq.add(BatchNormalization(epsilon=1e-06, axis=1, momentum=0.9))
seq.add(MaxPooling2D((3,3), strides=(2, 2)))
seq.add(Dropout(0.3))
seq.add(ZeroPadding2D((1, 1)))
seq.add(Conv2D(384, kernel_size=(3, 3), activation='relu', name='conv3_1', strides=1, kernel_initializer='glorot_uniform'))
seq.add(ZeroPadding2D((1, 1)))
seq.add(Conv2D(256, kernel_size=(3, 3), activation='relu', name='conv3_2', strides=1, kernel_initializer='glorot_uniform'))
seq.add(MaxPooling2D((3,3), strides=(2, 2)))
seq.add(Dropout(0.3))# added extra
seq.add(Flatten(name='flatten'))
seq.add(Dense(1024, kernel_regularizer=l2(0.0005), activation='relu', kernel_initializer='glorot_uniform'))
seq.add(Dropout(0.5))
seq.add(Dense(128, kernel_regularizer=l2(0.0005), activation='relu', kernel_initializer='glorot_uniform'))
seq.add(Dense(1, activation='sigmoid'))
return seq
My aim to pass images is something similar to this below
result = model.fit([image1, image2], y = 1, epochs=10)
However, I am getting an error
Failed to find data adapter that can handle input: (<class 'list'> containing values of types {"<class 'numpy.ndarray'>"}), <class 'int'>

How apply Gridsearch on autoencoder model?

I want to apply GridSearchCV on the autoencoder model. The code of the atuoencoder and GridSearchCV is added below please tell me how I change this code to run GridSearchCV successfully.
autoencoder = Sequential()
# Encoder Layers
autoencoder.add(Conv2D(16, (3, 3), activation='relu', padding='same', input_shape=x_train.shape[1:]))
autoencoder.add(MaxPooling2D((2, 2), padding='same'))
autoencoder.add(Conv2D(8, (3, 3), activation='relu', padding='same'))
autoencoder.add(MaxPooling2D((2, 2), padding='same'))
autoencoder.add(Conv2D(8, (3, 3), strides=(2,2), activation='relu', padding='same'))
# Flatten encoding for visualization
autoencoder.add(Flatten())
autoencoder.add(Reshape((4, 4, 8)))
# Decoder Layers
autoencoder.add(Conv2D(8, (3, 3), activation='relu', padding='same'))
autoencoder.add(UpSampling2D((2, 2)))
autoencoder.add(Conv2D(8, (3, 3), activation='relu', padding='same'))
autoencoder.add(UpSampling2D((2, 2)))
autoencoder.add(Conv2D(16, (3, 3), activation='relu'))
autoencoder.add(UpSampling2D((2, 2)))
autoencoder.add(Conv2D(1, (3, 3), activation='sigmoid', padding='same'))
autoencoder.summary()
I want to apply GridSearch on the above autoencoder code
from sklearn.model_selection import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
model_classifier = KerasClassifier(autoencoder, verbose=1, batch_size=10, epochs=10)
# define the grid search parameters
batch_size = [10]
loss = ['mean_squared_error', 'binary_crossentropy']
optimizer = [Adam, SGD, RMSprop]
learning_rate = [0.001]
epochs = [3, 5]
param_grid = dict(optimizer=optimizer, learning_rate=learning_rate)
grid = GridSearchCV(cv=[(slice(None), slice(None))], estimator=model_classifier, param_grid=param_grid, n_jobs=1)
grid_result = grid.fit(x_train, x_train)
print("training Successfully completed")
I have solved this by hard code. I applied for lop on every parameter and get the result.
For best parameter selection I have find the parameter on which I have got high results.

Keras Error - expected activation_1 to have shape (2622,) but got array with shape (1,)

Hello guys I am trying to make pretrained VGG16 on Keras
But it keeps give me error:
ValueError: Error when checking target: expected activation_1 to have
shape (2622,) but got array with shape (1,)
I was trying to create the model based on this poster : Link
Also, I took the pre-trained weight from here. This weight can be read on here
This my code:
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense, ZeroPadding2D
from keras import backend as K
# dimensions of our images.
img_width, img_height = 224, 224
train_data_dir = 'database/train'
validation_data_dir = 'database/validation'
nb_train_samples = 2000
nb_validation_samples = 800
epochs = 50
batch_size = 16
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
# build the VGG16 network
model = applications.VGG16(weights='imagenet', include_top=False)
print('VGG Pretrained Model loaded.')
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Conv2D(4096, (7, 7), activation='relu'))
model.add(Dropout(0.5))
model.add(Conv2D(4096, (1, 1), activation='relu'))
model.add(Dropout(0.5))
model.add(Conv2D(2622, (1, 1)))
model.add(Flatten())
model.add(Activation('softmax'))
# model.load_weights('./vgg16_face_weights.h5')
#
# vgg_face_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1. / 224,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 224)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
model.save_weights('first_try.h5')
You probably have only one folder inside 'database/train' and 'database/validation'.
Please make sure you have 2622 folders in the two folders so that keras can generate the label correctly.
Following is an example showing that the label should have shape of (batch_size, 2622).
# the above remains the same
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
import numpy as np
classes = 2622
batch_size = 4
y = np.zeros((batch_size, classes))
for i in range(batch_size):
y[i, np.random.choice(classes)] = 1
model.fit(x=np.random.random((batch_size,)+input_shape), y=y, batch_size=batch_size)
model.save_weights('first_try.h5')
EDIT:
To change the last Conv2D layer from 2622 filters to 12 filters while maintaining the loaded weights, here is a workaround:
#define model and load_weights
#......
#build a new model based on the last model
conv = Conv2D(12, (1, 1))(model.layers[-4].output)
flatten = Flatten()(conv)
softmax = Activation('softmax')(flatten)
final_model = Model(inputs=model.input, outputs=softmax)
Ref:Cannot add layers to saved Keras Model. 'Model' object has no attribute 'add'

How can I use dropout in keras

An error occurred while cnn modeling.
When using dropout, the following error message occurs.
this is error message
UnboundLocalError: local variable 'a' referenced before assignment
model
def getModel(input_shape,filter_size=32,pool_size=(2,2),dropout=0.2):
model = Sequential()
model.add(Conv2D(16, (3, 3), input_shape=input_shape, activation='elu', kernel_initializer="he_normal", padding='same', kernel_regularizer=regularizers.l2(0.01)))
I want to use dropout after maxpooling
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(dropout))
model.add(Conv2D(32, (2, 2), kernel_initializer="he_normal", padding='same',strides=1, kernel_regularizer=regularizers.l2(0.02)))
model.add(Activation('elu'))
model.add(Conv2D(32, (2, 3), kernel_initializer="he_normal", padding='same', strides=1, kernel_regularizer=regularizers.l2(0.02)))
model.add(Activation('elu'))
model.add(Conv2D(16, (2, 2), kernel_initializer="he_normal", padding='same', strides=1, kernel_regularizer=regularizers.l2(0.02)))
model.add(Activation('elu'))
model.add(Conv2D(32, (2, 3), kernel_initializer="he_normal", padding='same', strides=1, kernel_regularizer=regularizers.l2(0.02)))
model.add(Activation('elu'))
model.add(Conv2D(32, (2, 2), kernel_initializer="he_normal", padding='same', strides=1, kernel_regularizer=regularizers.l2(0.02)))
model.add(Activation('elu'))
this is flatten area
model.add(Flatten())
model.add(Dense(126, kernel_initializer="glorot_normal" ,kernel_regularizer=regularizers.l2(0.01)))
model.add(Activation('tanh'))
model.add(Dense(classes))
model.add(Activation('sigmoid'))
complile
model.compile(loss='categorical_crossentropy',
optimizer='adadelta', #SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
metrics=['accuracy'])
return model
model fit
np.random.seed(42)
hist = model.fit(X_train, Y_train, batch_size = batch_size, epochs = epochs, verbose = 1, validation_split = .2)
I couldn't figure out what is 'a' here and hence the error,but I think following code should help:
model.add(Conv2D(32, (2, 2), kernel_initializer="he_normal",padding='same',strides=1, kernel_regularizer=regularizers.l2(0.02)))
model.add(Activation('elu'))
model.add(Conv2D(32, (2, 3), kernel_initializer="he_normal", padding='same', strides=1, kernel_regularizer=regularizers.l2(0.02)))
model.add(Activation('elu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(32, (2, 3), kernel_initializer="he_normal", padding='same', strides=1, kernel_regularizer=regularizers.l2(0.02)))
model.add(Activation('elu'))
model.add(Conv2D(32, (2, 2), kernel_initializer="he_normal", padding='same', strides=1, kernel_regularizer=regularizers.l2(0.02)))
model.add(Activation('elu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

Exception: Error when checking model target: expected dense_3 to have shape (None, 1000) but got array with shape (32, 2)

How do I create a VGG-16 sequence for my data?
The data has the following :
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height))) model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1')) model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1')) model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1')) model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3')) model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1')) model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3')) model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1')) model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3')) model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1000, activation='softmax'))
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy')
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=32)
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=32)
model.fit_generator(
train_generator,
samples_per_epoch=2000,
nb_epoch=1,
verbose=1,
validation_data=validation_generator,
nb_val_samples=800)
json_string = model.to_json()
open('my_model_architecture.json','w').write(json_string)
model.save_weights('Second_try.h5')
I got an error:
Exception: Error when checking model target: expected dense_3 to have
shape (None, 32) but got array with shape (32, 2)
How do I change Dense to make it work?
I have 10 species,
I have solved the problem by
changing:
model.add(Dense(1000, activation='softmax'))
to:
model.add(Dense(10, activation='softmax'))
then it works.
Here instead of 1000 you should have the total number of classes because it's the output layer.
model.add(Dense(1000, activation='softmax'))
Also shape of labels (or Y_train/Y_test) should be (total number of classes, total number records).
This helped me resolve similar kind of error.

Resources