Imbalanced data for semantic segmentation in Keras? - keras

I am new with keras and have been learning it for about 3 weeks now. I apologies if my question sounds a bit stupid.
I am currently doing semantic medical image segmentation of 512x512. I'm using UNet from this link https://github.com/zhixuhao/unet . Basically, I want to segment a brain from an image (so two-class segmentation, background vs foreground)
I have made a few modification of the network and I'm getting some results which i am happy with. But I think I can improve the segmentation results by imposing more weight on the foreground because the number of pixels of the brain is much smaller than the number of background pixels. In some cases the brain does not appear in the image especially those located in the bottom slices.
I don't know which part of the code I need to modify in https://github.com/zhixuhao/unet
I would really appreciate if anyone can help me with this. Thanks a lot in advance!
import numpy as np
import os
import skimage.io as io
import skimage.transform as trans
import numpy as np
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as keras
def unet(pretrained_weights=None, input_size=(256, 256, 1)):
inputs = Input(input_size)
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
conv1 = BatchNormalization()(conv1)
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
conv1 = BatchNormalization()(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
conv2 = BatchNormalization()(conv2)
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
conv2 = BatchNormalization()(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
conv3 = BatchNormalization()(conv3)
conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
conv3 = BatchNormalization()(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
conv4 = BatchNormalization()(conv4)
conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
conv4 = BatchNormalization()(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
conv5 = BatchNormalization()(conv5)
conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
conv5 = BatchNormalization()(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(drop5))
merge6 = concatenate([drop4, up6], axis=3)
conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
conv6 = BatchNormalization()(conv6)
conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
conv6 = BatchNormalization()(conv6)
up7 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv6))
merge7 = concatenate([conv3, up7], axis=3)
conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
conv7 = BatchNormalization()(conv7)
conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
conv7 = BatchNormalization()(conv7)
up8 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv7))
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
conv8 = BatchNormalization()(conv8)
conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
conv8 = BatchNormalization()(conv8)
up9 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv8))
merge9 = concatenate([conv1, up9], axis=3)
conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
conv9 = BatchNormalization()(conv9)
conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
conv9 = BatchNormalization()(conv9)
conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
conv9 = BatchNormalization()(conv9)
conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
model = Model(input=inputs, output=conv10)
model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])
# model.summary()
if (pretrained_weights):
model.load_weights(pretrained_weights)
return model
Here's the main.py
from model2 import *
from data2 import *
from keras.models import load_model
class_weight= {0:0.10, 1:0.90}
myGene = trainGenerator(2,'data/brainTIF/trainNew','image','label',save_to_dir = None)
model = unet()
model_checkpoint = ModelCheckpoint('unet_brainTest_e10_s5.hdf5',
monitor='loss')
model.fit_generator(myGene,steps_per_epoch=5,epochs=10,callbacks = [model_checkpoint])
testGene = testGenerator("data/brainTIF/test3")
results = model.predict_generator(testGene,18,verbose=1)
saveResult("data/brainTIF/test_results3",results)

As an option for class_weight for binary classes, you can also handle imbalanced classes using Synthetic Oversampling Technique (SMOTE), increasing the size of the minority group:
from imblearn.over_sampling import SMOTE
sm = SMOTE()
x, y = sm.fit_sample(X_train, Y_train)

Related

VGG19 neural network for single-channel images using in SRGAN

I am trying to write a VGG19 neural network for single-channel images, where everything is essentially the same as in a three-channel network except for the input layer.
def model(self, inputShape=(64, 64, 1)):
inputLayer = Input(shape=inputShape)
After applying the Flatten layer to the convolution tensor I use the same dense layer parameters as in classic VGG19 but I get an error when compiling the model
ValueError: Shapes (None, 64, 64, 1) and (None, 1000) are incompatible
As far as I understand the number of neurons in dense layer should correspond to the dimensionality of the input data. That is 64x64 image, after applying the Flatten layer, the dense layer should receive a vector with 4096 neurons. As described in the classical model
layerSet = Flatten()(layerSet)
layerSet = Dense(4096, activation='relu')(layerSet)
layerSet = Dropout(0.5)(layerSet)
layerSet = Dense(4096, activation='relu')(layerSet)
layerSet = Dropout(0.5)(layerSet)
outputLayer = Dense(1000, activation='relu')(layerSet)
The last dense layer gets 1000 neurons, each corresponding to some recognizable class.
In my case, I need a set of features for SRGAN, so I doubt that for my problem there is a need to use classification vector. Features derived from VGG19 model in association with features derived from discriminative model should be passed as output layer of generative-competitive model.
Next I give you the full code example where I give the model itself and the training method. I expect to eventually get the required features from the model
class VGG19DeepConvolutionNetwork:
__model = None
def __init__(self):
self.model()
def model(self, inputShape=(64, 64, 1)):
inputLayer = Input(shape=inputShape)
layerSet = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(inputLayer)
layerSet = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(layerSet)
layerSet = MaxPooling2D(strides=(2,2), padding='same')(layerSet)
layerSet = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(layerSet)
layerSet = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(layerSet)
layerSet = MaxPooling2D(strides=(2,2), padding='same')(layerSet)
layerSet = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(layerSet)
layerSet = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(layerSet)
layerSet = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(layerSet)
layerSet = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv4')(layerSet)
layerSet = MaxPooling2D(strides=(2,2), padding='same')(layerSet)
layerSet = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(layerSet)
layerSet = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(layerSet)
layerSet = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(layerSet)
layerSet = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv4')(layerSet)
layerSet = MaxPooling2D(strides=(2,2), padding='same')(layerSet)
layerSet = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(layerSet)
layerSet = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(layerSet)
layerSet = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(layerSet)
layerSet = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv4')(layerSet)
layerSet = MaxPooling2D(strides=(2,2), padding='same')(layerSet)
layerSet = Flatten()(layerSet)
layerSet = Dense(4096, activation='relu')(layerSet)
layerSet = Dropout(0.5)(layerSet)
layerSet = Dense(4096, activation='relu')(layerSet)
layerSet = Dropout(0.5)(layerSet)
outputLayer = Dense(1000, activation='relu')(layerSet)
self.__model = Model(inputs=[inputLayer], outputs=[outputLayer])
self.__model.compile(optimizer='adam', loss='categorical_crossentropy')
print(self.__model.summary())
def train(self, imageDataPath:string='srgangImageData.h5', weightsPath:string='vgg19Weights.h5', sliceSize=32, epochsNumber=100):
if self.__model is None:
self.model((sliceSize, sliceSize, 1))
imageData = ImageDataProcessing()
sourceTrain, targetTrain, sourceTest, targetTest = imageData.readImageData(imageDataPath)
del imageData
print( 'train source', sourceTrain.shape )
print( 'train target', targetTrain.shape )
print( 'test source', sourceTest.shape )
print( 'test target', targetTest.shape )
checkpoint = ModelCheckpoint(weightsPath, verbose=1, save_best_only=True, save_weights_only=False, mode='min')
callbacks_list = [checkpoint]
history = self.__model.fit(sourceTrain, targetTrain, batch_size=128, steps_per_epoch=len(sourceTrain)//128, validation_data=(sourceTest, targetTest),
callbacks=callbacks_list, shuffle=True, epochs=epochsNumber, verbose=1)
Some corrections:
The flatten layer should result with 2 x 2 x 512 = 2048 parameters as that is the output of the last convolutional layer. Tensorflow/keras should infer that for you.
The reason the last layer gets 1000 neurons is because the model was originally trained on a dataset with 1000 classes (1 neuron per class).
What version of tensorflow are you using? Are you sure it is failing at the compile step? I tried to compile your model with tensorflow 2.10.0 (Python 3.10.4) and everything worked fine. I tried to do a forward pass with an input of (10,64,64,1) and that worked fine too.
Here is the code I tried both locally and in Google Colab:
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from tensorflow.keras import Model
import tensorflow as tf
class VGG19DeepConvolutionNetwork:
__model = None
def __init__(self):
self.model()
def model(self, inputShape=(64, 64, 1)):
inputLayer = Input(shape=inputShape)
layerSet = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(inputLayer)
layerSet = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(layerSet)
layerSet = MaxPooling2D(strides=(2,2), padding='same')(layerSet)
layerSet = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(layerSet)
layerSet = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(layerSet)
layerSet = MaxPooling2D(strides=(2,2), padding='same')(layerSet)
layerSet = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(layerSet)
layerSet = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(layerSet)
layerSet = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(layerSet)
layerSet = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv4')(layerSet)
layerSet = MaxPooling2D(strides=(2,2), padding='same')(layerSet)
layerSet = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(layerSet)
layerSet = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(layerSet)
layerSet = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(layerSet)
layerSet = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv4')(layerSet)
layerSet = MaxPooling2D(strides=(2,2), padding='same')(layerSet)
layerSet = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(layerSet)
layerSet = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(layerSet)
layerSet = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(layerSet)
layerSet = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv4')(layerSet)
layerSet = MaxPooling2D(strides=(2,2), padding='same')(layerSet)
layerSet = Flatten()(layerSet)
layerSet = Dense(4096, activation='relu')(layerSet)
layerSet = Dropout(0.5)(layerSet)
layerSet = Dense(4096, activation='relu')(layerSet)
layerSet = Dropout(0.5)(layerSet)
outputLayer = Dense(1000, activation='relu')(layerSet)
self.__model = Model(inputs=[inputLayer], outputs=[outputLayer])
self.__model.compile(optimizer='adam', loss='categorical_crossentropy')
print(self.__model.summary())
def getModel(self):
return self.__model
def train(self, imageDataPath: str='srgangImageData.h5', weightsPath: str='vgg19Weights.h5', sliceSize=32, epochsNumber=100):
if self.__model is None:
self.model((sliceSize, sliceSize, 1))
imageData = ImageDataProcessing()
sourceTrain, targetTrain, sourceTest, targetTest = imageData.readImageData(imageDataPath)
del imageData
print( 'train source', sourceTrain.shape )
print( 'train target', targetTrain.shape )
print( 'test source', sourceTest.shape )
print( 'test target', targetTest.shape )
checkpoint = ModelCheckpoint(weightsPath, verbose=1, save_best_only=True, save_weights_only=False, mode='min')
callbacks_list = [checkpoint]
history = self.__model.fit(sourceTrain, targetTrain, batch_size=128, steps_per_epoch=len(sourceTrain)//128, validation_data=(sourceTest, targetTest),
callbacks=callbacks_list, shuffle=True, epochs=epochsNumber, verbose=1)
modelWrapper = VGG19DeepConvolutionNetwork()
model = modelWrapper.getModel()
X = tf.random.uniform((10,64,64,1))
output = model(X)
print(output)
# modelWrapper.train()

Yolov3 model over fitting with Cifar10 Dataset

I am trying to implement Yolov3 using the cifar10 dataset for testing, when I run it the accuracy converges to 0.1 (always selecting the same class), the dataset size should not be an issue, I am not sure why the model would be doing this, I have confirmed that the rest of the code works fines on a smaller model however, is there anyway to get Yolov3 to work?
import cv2
import random
import struct
import mlxtend
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.models import Sequential
from tensorflow.keras import layers
from mlxtend.data import loadlocal_mnist
from matplotlib.patches import Rectangle
from keras.layers.merge import add, concatenate
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers.experimental import preprocessing
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
data = cifar10
(train_images, train_labels), (test_images, test_labels) = data.load_data()
train_images, test_images = train_images/255.0, test_images/255.0
def create_model():
model = Sequential()
model.add(Conv2D(filters = 64, activation = 'relu', padding='same', kernel_size = 7, strides = 2, input_shape=(32, 32, 3)))
model.add(MaxPooling2D(pool_size = (2, 2), strides = 2))
model.add(Conv2D(filters = 192, activation = 'relu', padding='same', kernel_size = 3))
model.add(MaxPooling2D(pool_size = (2, 2), strides = 2))
model.add(Conv2D(filters = 128, activation = 'relu', padding='same', kernel_size = 1))
model.add(Conv2D(filters = 256, activation = 'relu', padding='same', kernel_size = 3))
model.add(Conv2D(filters = 256, activation = 'relu', padding='same', kernel_size = 1))
model.add(Conv2D(filters = 512, activation = 'relu', padding='same', kernel_size = 3))
model.add(MaxPooling2D(pool_size = (2, 2), strides = 2))
model.add(Conv2D(filters = 256, activation = 'relu', padding='same', kernel_size = 1))
model.add(Conv2D(filters = 512, activation = 'relu', padding='same', kernel_size = 3))
model.add(Conv2D(filters = 256, activation = 'relu', padding='same', kernel_size = 1))
model.add(Conv2D(filters = 512, activation = 'relu', padding='same', kernel_size = 3))
model.add(Conv2D(filters = 256, activation = 'relu', padding='same', kernel_size = 1))
model.add(Conv2D(filters = 512, activation = 'relu', padding='same', kernel_size = 3))
model.add(Conv2D(filters = 256, activation = 'relu', padding='same', kernel_size = 1))
model.add(Conv2D(filters = 512, activation = 'relu', padding='same', kernel_size = 3))
model.add(Conv2D(filters = 512, activation = 'relu', padding='same', kernel_size = 1))
model.add(Conv2D(filters = 1024, activation = 'relu', padding='same', kernel_size = 3))
model.add(MaxPooling2D(pool_size = (2, 2), strides = 2))
model.add(Conv2D(filters = 512, activation = 'relu', padding='same', kernel_size = 1))
model.add(Conv2D(filters = 1024, activation = 'relu', padding='same', kernel_size = 3))
model.add(Conv2D(filters = 512, activation = 'relu', padding='same', kernel_size = 1))
model.add(Conv2D(filters = 1024, activation = 'relu', padding='same', kernel_size = 3))
model.add(Conv2D(filters = 1024, activation = 'relu', padding='same', kernel_size = 3))
model.add(Conv2D(filters = 1024, activation = 'relu', padding='same', kernel_size = 3, strides = 2))
model.add(Conv2D(filters = 1024, activation = 'relu', padding='same', kernel_size = 3))
model.add(Conv2D(filters = 1024, activation = 'relu', padding='same', kernel_size = 3))
model.add(Flatten())
model.add(Dense(len(train_labels)))
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model = create_model()
num_epochs = 5
batch_size = 32
model.fit(train_images, train_labels, epochs = num_epochs, validation_split = 0.25, batch_size = batch_size)
results = model.evaulate(test_images, test_labels)
print("Test Accuracy:", results[1])

A `Concatenate` layer requires inputs with matching shapes except for the concat axis

I try to train Unet neural network from zhixuhao, I submit to the input of the image 512x512 but I get this error:
Traceback (most recent call last):
File "unet.py", line 168, in <module>
myunet.train()
File "unet.py", line 159, in train
model.fit(imgs_train, imgs_mask_train, batch_size=1, nb_epoch=10, verbose=1, shuffle=True, callbacks=[model_checkpoint])
File "C:\Users\mimozzza\AppData\Local\Programs\Python\Python37\lib\site-packages\keras\engine\training.py", line 952, in fit
batch_size=batch_size)
File "C:\Users\mimozzza\AppData\Local\Programs\Python\Python37\lib\site-packages\keras\engine\training.py", line 751, in _standardize_user_data
exception_prefix='input')
File "C:\Users\mimozzza\AppData\Local\Programs\Python\Python37\lib\site-packages\keras\engine\training_utils.py", line 138, in standardize_input_data
str(data_shape))
ValueError: Error when checking input: expected input_1 to have shape (256, 256, 3) but got array with shape (512, 512, 3)
Here is the code:
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
print ("conv1 shape:"),conv1.shape
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
print ("conv1 shape:"),conv1.shape
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
print ("pool1 shape:"),pool1.shape
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
print ("conv2 shape:"),conv2.shape
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
print ("conv2 shape:"),conv2.shape
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
print ("pool2 shape:"),pool2.shape
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
print ("conv3 shape:"),conv3.shape
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
print ("conv3 shape:"),conv3.shape
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
print ("pool3 shape:"),pool3.shape
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4, up6], axis=3)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3, up7], axis=3)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1, up9], axis=3)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)
model = Model(input = inputs, output = conv10)
model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])
return model
I looked at other discussions on this topic and realized that the error might be related to the resolution of the image, but my files are the same resolution that was supposed to be zhixuhao initially.
Tell me, what's the problem, and how can I fix it?
PS:
I use keras version 1.0.0, and also use the latest version of the code from GitHub, I use the version on October 24, 2017.
The problem is solved, in the code images were specified with a resolution of 256x256, although I tried to learn from the 512x512 images.

Error when checking target: expected conv2d_29 to have 4 dimensions, but got array with shape (1255, 12)

I would like to train a deep learning model, where input image shape is (224,224,3) . And I would like to feed them into a u-net model.
After training I get the error : Error when checking target: expected conv2d_29 to have 4 dimensions, but got array with shape (1255, 12)
I'm confused since I'm sure the image array and label has no issue. Is the issue within the model? How should I resolve this?
The model is as below:
#def unet(pretrained_weights = None, input_size = (224,224,3)):
concat_axis = 3
input_size= Input((224,224,3))
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(input_size)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
#flat1 = Flatten()(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up_conv5 = UpSampling2D(size=(2, 2), data_format="channels_last")(conv5)
ch, cw = get_crop_shape(conv4, up_conv5)
crop_conv4 = Cropping2D(cropping=(ch,cw), data_format="channels_last")(conv4)
up6 = concatenate([up_conv5, crop_conv4], axis=concat_axis)
conv6 = Conv2D(256, (3, 3), padding="same", activation="relu", kernel_initializer = 'he_normal')(up6)
conv6 = Conv2D(256, (3, 3), padding="same", activation="relu", kernel_initializer = 'he_normal')(conv6)
up_conv6 = UpSampling2D(size=(2, 2), data_format="channels_last")(conv6)
ch, cw = get_crop_shape(conv3, up_conv6)
crop_conv3 = Cropping2D(cropping=(ch,cw), data_format="channels_last")(conv3)
up7 = concatenate([up_conv6, crop_conv3], axis=concat_axis)
conv7 = Conv2D(128, (3, 3), padding="same", activation="relu", kernel_initializer = 'he_normal')(up7)
conv7 = Conv2D(128, (3, 3), padding="same", activation="relu", kernel_initializer = 'he_normal')(conv7)
up_conv7 = UpSampling2D(size=(2, 2), data_format="channels_last")(conv7)
ch, cw = get_crop_shape(conv2, up_conv7)
crop_conv2 = Cropping2D(cropping=(ch,cw), data_format="channels_last")(conv2)
up8 = concatenate([up_conv7, crop_conv2], axis=concat_axis)
conv8 = Conv2D(64, (3, 3), padding="same", activation="relu", kernel_initializer = 'he_normal')(up8)
conv8 = Conv2D(64, (3, 3), padding="same", activation="relu", kernel_initializer = 'he_normal')(conv8)
up_conv8 = UpSampling2D(size=(2, 2), data_format="channels_last")(conv8)
ch, cw = get_crop_shape(conv1, up_conv8)
crop_conv1 = Cropping2D(cropping=(ch,cw), data_format="channels_last")(conv1)
up9 = concatenate([up_conv8, crop_conv1], axis=concat_axis)
conv9 = Conv2D(32, (3, 3), padding="same", activation="relu", kernel_initializer = 'he_normal')(up9)
conv9 = Conv2D(32, (3, 3), padding="same", activation="relu", kernel_initializer = 'he_normal')(conv9)
model = Model(inputs = input_size, outputs = conv9)
Since the model output's layer is conv layer, the output shape has 4 dimensions(Batch_size, width, height, channels). But you are feeding an array of shape (1255, 12). If the target label has a shape of (Batch_size, num_features) then the last layer's output should have a shape of (None, 12) or (Batch_size, 12).
You have two options to deal with this situation.
Using dense layer after flattening the output of conv layer
Reshaping the output of conv layer to have the desired shape.
The choice depends on the problem you're dealing with. If the problem is classification, option one could be used to add softmax activation. With option 1 the modification to the code would be,
conv9 = Conv2D(32, (3, 3), padding="same", activation="relu", kernel_initializer = 'he_normal')(conv9)
flatten1 = Flatten()(conv9)
dense1 = Dense(12, activation="softmax")(flatten1) # The choice of the activation depends on the problem you are dealing with.
model = Model(inputs = input_size, outputs = dense1)
With option 2, the modification would be
conv9 = Conv2D(32, (3, 3), padding="same", activation="relu", kernel_initializer = 'he_normal')(conv9)
reshape1 = Reshape((12,)(conv9) # The choice of the activation depends on the problem you are dealing with.
model = Model(inputs = input_size, outputs = reshape1)
N.B: When the Reshape layer is used to reshape tensor to (None, 12) shape be sure that the product of the output shape of the previous layer should be divisible by 12.

NoneType' object has no attribute '_inbound_nodes

hi I have been struggling to address this problem, while I cannot really figure it out. I will appreciate any suggestion for my peculiar situation. Thank you very much! My network structure is as following:
def get_unet(self):
inputs = Input((self.img_rows, self.img_cols, 1))
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(drop5))
print("drop4 shape",type(drop4),drop4.shape)
print("up6 shape",type(up6),up6.shape)
merge6=tf.concat([drop4, up6], axis=3)
print(merge6.shape)
conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
up7 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv6))
print("conv3,up7",conv3.shape,up7.shape)
merge7 =tf.concat([conv3, up7],axis=3)
conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
up8 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv7))
print("conv2,up8",conv2.shape,up8.shape)
merge8 = tf.concat([conv2, up8],axis=3)
conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
up9 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv8))
print("conv1,up9",conv1.shape,up9.shape)
merge9 = tf.concat([conv1, up9], axis=3)
print("merge9 shape",merge9.shape)
conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
print("conv10 shape",conv10.shape)
print("inputs shape1,outputs conv10 shape2",inputs.shape,conv10.shape)
model = Model(inputs=inputs, outputs=conv10)
model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])
print('model compile')
return model
this is the error:
model = Model(inputs=inputs, outputs=conv10)
File "/usr/local/lib/python3.5/dist-packages/keras/legacy/interfaces.py", line 91, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/network.py", line 91, in __init__
self._init_graph_network(*args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/network.py", line 235, in _init_graph_network
self.inputs, self.outputs)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/network.py", line 1412, in _map_graph_network
tensor_index=tensor_index)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/network.py", line 1399, in build_map
node_index, tensor_index)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/network.py", line 1399, in build_map
node_index, tensor_index)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/network.py", line 1399, in build_map
node_index, tensor_index)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/network.py", line 1399, in build_map
node_index, tensor_index)
File "/usr/local/lib/python3.5/dist-packages/keras/engine/network.py", line 1371, in build_map
node = layer._inbound_nodes[node_index]
AttributeError: 'NoneType' object has no attribute '_inbound_nodes'
Replace all your tf.concat() with keras.layers.concatenate(). That's causing the problem. Also, update your keras in case you haven't done it.

Resources