Related
The proposed method can automatically detect the features of medical images under the condition determined by the algorithms, and achieve the correct and fast recognition results.
I was trying to run the image classification with using CNN method but then I got the error message below
File "<ipython-input-2-4e7ea6cc5087>", line 1, in <module>
runfile('C:/Users/MDIC/Desktop/VGG for 10 Images.py', wdir='C:/Users/MDIC/Desktop')
File "C:\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 786, in runfile
execfile(filename, namespace)
File "C:\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 110, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "C:/Users/MDIC/Desktop/VGG for 10 Images.py", line 224, in <module>
sp = plt.subplot(nrows, ncols, i + 1)
File "C:\Anaconda3\lib\site-packages\matplotlib\pyplot.py", line 1084, in subplot
a = fig.add_subplot(*args, **kwargs)
File "C:\Anaconda3\lib\site-packages\matplotlib\figure.py", line 1367, in add_subplot
a = subplot_class_factory(projection_class)(self, *args, **kwargs)
File "C:\Anaconda3\lib\site-packages\matplotlib\axes\_subplots.py", line 60, in __init__
).format(maxn=rows*cols, num=num))
ValueError: num must be 1 <= num <= 25, not 26
This is my Python code
# Importing libraries
from matplotlib import pyplot as plt
from tensorflow.keras.preprocessing.image import array_to_img, img_to_array, load_img
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.image as mpimg
import numpy as np
import os
# Preparing dataset
# Setting names of the directies for ten sets
base_dir = 'data'
seta ='Man1'
setb ='Man2'
setc ='Man3'
setd ='Man4'
sete ='Man5'
setf ='Man6'
setg ='Man7'
seth ='Man8'
seti ='Man9'
setj ='Man10'
# Each of the sets has three sub directories train, validation and test
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
def prepare_data(base_dir, seta, setb, setc, setd, sete, setf, setg, seth, seti, setj):
# Take the directory names for the base directory and both the sets
# Returns the paths for train, validation for each of the sets
seta_train_dir = os.path.join(train_dir, seta)
setb_train_dir = os.path.join(train_dir, setb)
setc_train_dir = os.path.join(train_dir, setc)
setd_train_dir = os.path.join(train_dir, setd)
sete_train_dir = os.path.join(train_dir, sete)
setf_train_dir = os.path.join(train_dir, setf)
setg_train_dir = os.path.join(train_dir, setg)
seth_train_dir = os.path.join(train_dir, seth)
seti_train_dir = os.path.join(train_dir, seti)
setj_train_dir = os.path.join(train_dir, setj)
seta_valid_dir = os.path.join(validation_dir, seta)
setb_valid_dir = os.path.join(validation_dir, setb)
setc_valid_dir = os.path.join(validation_dir, setc)
setd_valid_dir = os.path.join(validation_dir, setd)
sete_valid_dir = os.path.join(validation_dir, sete)
setf_valid_dir = os.path.join(validation_dir, setf)
setg_valid_dir = os.path.join(validation_dir, setg)
seth_valid_dir = os.path.join(validation_dir, seth)
seti_valid_dir = os.path.join(validation_dir, seti)
setj_valid_dir = os.path.join(validation_dir, setj)
seta_train_fnames = os.listdir(seta_train_dir)
setb_train_fnames = os.listdir(setb_train_dir)
setc_train_fnames = os.listdir(setc_train_dir)
setd_train_fnames = os.listdir(setd_train_dir)
sete_train_fnames = os.listdir(sete_train_dir)
setf_train_fnames = os.listdir(setf_train_dir)
setg_train_fnames = os.listdir(setg_train_dir)
seth_train_fnames = os.listdir(seth_train_dir)
seti_train_fnames = os.listdir(seti_train_dir)
setj_train_fnames = os.listdir(setj_train_dir)
return seta_train_dir, setb_train_dir, setc_train_dir, setd_train_dir, sete_train_dir, setf_train_dir, setg_train_dir, seth_train_dir, seti_train_dir, setj_train_dir, seta_valid_dir, setb_valid_dir, setc_valid_dir, setd_valid_dir, sete_valid_dir, setf_valid_dir, setg_valid_dir, seth_valid_dir, seti_valid_dir, setj_valid_dir, seta_train_fnames, setb_train_fnames, setc_train_fnames, setd_train_fnames, sete_train_fnames, setf_train_fnames, setg_train_fnames, seth_train_fnames, seti_train_fnames, setj_train_fnames
seta_train_dir, setb_train_dir, setc_train_dir, setd_train_dir, sete_train_dir, setf_train_dir, setg_train_dir, seth_train_dir, seti_train_dir, setj_train_dir, seta_valid_dir, setb_valid_dir, setc_valid_dir, setd_valid_dir, sete_valid_dir, setf_valid_dir, setg_valid_dir, seth_valid_dir, seti_valid_dir, setj_valid_dir, seta_train_fnames, setb_train_fnames, setc_train_fnames, setd_train_fnames, sete_train_fnames, setf_train_fnames, setg_train_fnames, seth_train_fnames, seti_train_fnames, setj_train_fnames = prepare_data(base_dir, seta, setb, setc, setd, sete, setf, setg, seth, seti, setj)
seta_test_dir = os.path.join(test_dir, seta)
setb_test_dir = os.path.join(test_dir, setb)
setc_test_dir = os.path.join(test_dir, setc)
setd_test_dir = os.path.join(test_dir, setd)
sete_test_dir = os.path.join(test_dir, sete)
setf_test_dir = os.path.join(test_dir, setf)
setg_test_dir = os.path.join(test_dir, setg)
seth_test_dir = os.path.join(test_dir, seth)
seti_test_dir = os.path.join(test_dir, seti)
setj_test_dir = os.path.join(test_dir, setj)
test_fnames_seta = os.listdir(seta_test_dir)
test_fnames_setb = os.listdir(setb_test_dir)
test_fnames_setc = os.listdir(setc_test_dir)
test_fnames_setd = os.listdir(setd_test_dir)
test_fnames_sete = os.listdir(sete_test_dir)
test_fnames_setf = os.listdir(setf_test_dir)
test_fnames_setg = os.listdir(setg_test_dir)
test_fnames_seth = os.listdir(seth_test_dir)
test_fnames_seti = os.listdir(seti_test_dir)
test_fnames_setj = os.listdir(setj_test_dir)
datagen = ImageDataGenerator(
height_shift_range = 0.2,
width_shift_range = 0.2,
rotation_range = 40,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
fill_mode = 'nearest')
img_path = os.path.join(seta_train_dir, seta_train_fnames[3])
img = load_img(img_path, target_size = (150, 150))
x = img_to_array(img)
x = x.reshape((1,) + x.shape)
i = 0
for batch in datagen.flow(x, batch_size = 1):
plt.figure(i)
imgplot = plt.imshow(array_to_img(batch[0]))
i += 1
if i % 10 == 0:
break
# Convolutional Neural Network model
# Import TensorFlow libraries
from tensorflow.keras import layers
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
img_input = layers.Input(shape = (150, 150, 3))
# 2D Convolution layer with 64 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(64, 3, activation = 'relu')(img_input)
# 2D max pooling layer
x = layers.MaxPooling2D(2)(x)
# 2D Convolution layer with 128 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(128, 3, activation = 'relu')(x)
# 2D Max pooling layer
x = layers.MaxPooling2D(2)(x)
# 2D Convolution layer with 256 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(256, 3, activation = 'relu')(x)
# 2D Max pooling layer
x = layers.MaxPooling2D(2)(x)
# 2D Convolution layer with 512 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(512, 3, activation = 'relu')(x)
# 2D Max pooling layer
x = layers.MaxPooling2D(2)(x)
# 2D Convolution layer with 512 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(512, 3, activation = 'relu')(x)
# Flatten layer
x = layers.Flatten()(x)
# Fully connected layers and ReLU activation algorithm
x = layers.Dense(4096, activation = 'relu')(x)
x = layers.Dense(4096, activation = 'relu')(x)
x = layers.Dense(1000, activation = 'relu')(x)
# Dropout layers for optimisation
x = layers.Dropout(0.5)(x)
# Fully connected layers and sigmoid activation algorithm
model = Sequential()
model.add(Dense(10))
output = layers.Dense(10, activation = 'sigmoid')(x)
model = Model(img_input, output)
model.summary()
import tensorflow as tf
# Using binary_crossentropy as the loss function and
# Adam optimizer as the optimizing function when training
model.compile(loss = 'sparse_categorical_crossentropy',
optimizer = tf.optimizers.Adam(learning_rate = 0.0005),
metrics = ['acc'])
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale = 1./255)
test_datagen = ImageDataGenerator(rescale = 1./255)
# Flow training images in batches of 20 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size = (150, 150),
batch_size = 20,
class_mode = 'binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size = (150, 150),
batch_size = 20,
class_mode = 'binary')
# 4x4 grid
nrows = 5
ncols = 5
pic_index = 0
# Set up matpotlib fig and size it to fit 5x5 pics
fig = plt.gcf()
fig.set_size_inches(nrows * 5, ncols * 5)
pic_index += 10
next_seta_pix = [os.path.join(seta_train_dir, fname)
for fname in seta_train_fnames[pic_index-10:pic_index]]
next_setb_pix = [os.path.join(setb_train_dir, fname)
for fname in setb_train_fnames[pic_index-10:pic_index]]
next_setc_pix = [os.path.join(setc_train_dir, fname)
for fname in setc_train_fnames[pic_index-10:pic_index]]
next_setd_pix = [os.path.join(setd_train_dir, fname)
for fname in setd_train_fnames[pic_index-10:pic_index]]
next_sete_pix = [os.path.join(sete_train_dir, fname)
for fname in sete_train_fnames[pic_index-10:pic_index]]
next_setf_pix = [os.path.join(setf_train_dir, fname)
for fname in setf_train_fnames[pic_index-10:pic_index]]
next_setg_pix = [os.path.join(setg_train_dir, fname)
for fname in setg_train_fnames[pic_index-10:pic_index]]
next_seth_pix = [os.path.join(seth_train_dir, fname)
for fname in seth_train_fnames[pic_index-10:pic_index]]
next_seti_pix = [os.path.join(seti_train_dir, fname)
for fname in seti_train_fnames[pic_index-10:pic_index]]
next_setj_pix = [os.path.join(setj_train_dir, fname)
for fname in setj_train_fnames[pic_index-10:pic_index]]
for i, img_path in enumerate(next_seta_pix + next_setb_pix + next_setc_pix + next_setd_pix + next_sete_pix + next_setf_pix + next_setg_pix + next_seth_pix + next_seti_pix + next_setj_pix):
# Set up subplot; subplot indices start at 1
sp = plt.subplot(nrows, ncols, i + 1)
# Dont show axes
sp.axis('Off')
img = mpimg.imread(img_path)
plt.imshow(img)
plt.show()
# Train the model
mymodel = model.fit_generator(
train_generator,
steps_per_epoch = 10,
epochs = 80,
validation_data = validation_generator,
validation_steps = 7,
verbose = 2)
import random
from tensorflow.keras.preprocessing.image import img_to_array, load_img
successive_outputs = [layer.output for layer in model.layers[1:]]
visualization_model = Model(img_input, successive_outputs)
a_img_files = [os.path.join(seta_train_dir, f) for f in seta_train_fnames]
b_img_files = [os.path.join(setb_train_dir, f) for f in setb_train_fnames]
c_img_files = [os.path.join(setc_train_dir, f) for f in setc_train_fnames]
d_img_files = [os.path.join(setd_train_dir, f) for f in setd_train_fnames]
e_img_files = [os.path.join(sete_train_dir, f) for f in sete_train_fnames]
f_img_files = [os.path.join(setf_train_dir, f) for f in setf_train_fnames]
g_img_files = [os.path.join(setg_train_dir, f) for f in setg_train_fnames]
h_img_files = [os.path.join(seth_train_dir, f) for f in seth_train_fnames]
i_img_files = [os.path.join(seti_train_dir, f) for f in seti_train_fnames]
j_img_files = [os.path.join(setj_train_dir, f) for f in setj_train_fnames]
img_path = random.choice(a_img_files + b_img_files + c_img_files + d_img_files + e_img_files + f_img_files + g_img_files + h_img_files + i_img_files + j_img_files)
img = load_img(img_path, target_size = (150, 150))
x = img_to_array(img)
x = x.reshape((1,) + x.shape)
x /= 255
successive_feature_maps = visualization_model.predict(x)
layer_names = [layer.name for layer in model.layers]
for layer_name, feature_map in zip(layer_names, successive_feature_maps):
if len(feature_map.shape) == 4:
# Just do this for the conv/maxpool layers
n_features = feature_map.shape[-1]
# The feature map has shape(1, size, size, n_features)
size = feature_map.shape[1]
# Will tile images in this matrix
display_grid = np.zeros((size, size * n_features))
for i in range(n_features):
# Postprocess the feature
x = feature_map[0, :, :, i]
x -= x.mean()
x *= 64
x += 128
x = np.clip(x, 0, 255).astype('float32')
# Will tile each filter into this big horizontal grid
display_grid[:, i * size : (i + 1) * size] = x
# Accuracy results for each training and validation epoch
acc = mymodel.history['acc']
val_acc = mymodel.history['val_acc']
# Loss results for each training and validation epoch
loss = mymodel.history['loss']
val_loss = mymodel.history['val_loss']
what i understood from your code , your doing multi class classification because you have used Dense(10) at the last layer so
you need to change class_mode = 'binary' into
class model ='categorical' &
also change activation function sigmoid into
output = layers.Dense(10, activation = 'softmax')(x)
The proposed method can automatically detect the features of hyperspectral images under the condition determined by the algorithms, and achieve the correct and fast recognition results.
Here I was trying to run the face recognition with using CNN method but then I got the error message as below ---
**
File "<ipython-input-6-fdb29ac830ce>", line 1, in <module>
runfile('C:/Users/MDIC/Desktop/Face Recognition With CNN.py', wdir='C:/Users/MDIC/Desktop')
File "C:\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 786, in runfile
execfile(filename, namespace)
File "C:\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 110, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "C:/Users/MDIC/Desktop/Face Recognition With CNN.py", line 221, in <module>
plt.plot(epochs, val_acc)
File "C:\Anaconda3\lib\site-packages\matplotlib\pyplot.py", line 2811, in plot
is not None else {}), **kwargs)
File "C:\Anaconda3\lib\site-packages\matplotlib\__init__.py", line 1810, in inner
return func(ax, *args, **kwargs)
File "C:\Anaconda3\lib\site-packages\matplotlib\axes\_axes.py", line 1611, in plot
for line in self._get_lines(*args, **kwargs):
File "C:\Anaconda3\lib\site-packages\matplotlib\axes\_base.py", line 393, in _grab_next_args
yield from self._plot_args(this, kwargs)
File "C:\Anaconda3\lib\site-packages\matplotlib\axes\_base.py", line 370, in _plot_args
x, y = self._xy_from_xy(x, y)
File "C:\Anaconda3\lib\site-packages\matplotlib\axes\_base.py", line 231, in _xy_from_xy
"have shapes {} and {}".format(x.shape, y.shape))
ValueError: x and y must have same first dimension, but have shapes (2,) and (1,)
**
This is my coding ---
# Importing libraries
from matplotlib import pyplot as plt
from tensorflow.keras.preprocessing.image import array_to_img, img_to_array, load_img
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.image as mpimg
import numpy as np
import os
# Preparing dataset
# Setting names of the directies for both sets
base_dir = 'data'
seta ='Man_One'
setb ='Man_Two'
# Each of the sets has three sub directories train, validation and test
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
def prepare_data(base_dir, seta, setb):
# Take the directory names for the base directory and both the sets
# Returns the paths for train, validation for each of the sets
seta_train_dir = os.path.join(train_dir, seta)
setb_train_dir = os.path.join(train_dir, setb)
seta_valid_dir = os.path.join(validation_dir, seta)
setb_valid_dir = os.path.join(validation_dir, setb)
seta_train_fnames = os.listdir(seta_train_dir)
setb_train_fnames = os.listdir(setb_train_dir)
return seta_train_dir, setb_train_dir, seta_valid_dir, setb_valid_dir, seta_train_fnames, setb_train_fnames
seta_train_dir, setb_train_dir, seta_valid_dir, setb_valid_dir, seta_train_fnames, setb_train_fnames = prepare_data(base_dir, seta, setb)
seta_test_dir = os.path.join(test_dir, seta)
setb_test_dir = os.path.join(test_dir, setb)
test_fnames_seta = os.listdir(seta_test_dir)
test_fnames_setb = os.listdir(setb_test_dir)
datagen = ImageDataGenerator(
height_shift_range = 0.2,
width_shift_range = 0.2,
rotation_range = 40,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
fill_mode = 'nearest')
img_path = os.path.join(seta_train_dir, seta_train_fnames[3])
img = load_img(img_path, target_size = (150, 150))
x = img_to_array(img)
x = x.reshape((1,) + x.shape)
i = 0
for batch in datagen.flow(x, batch_size = 1):
plt.figure(i)
imgplot = plt.imshow(array_to_img(batch[0]))
i += 1
if i % 5 == 0:
break
# Convolutional Neural Network model
# Import TensorFlow libraries
from tensorflow.keras import layers
from tensorflow.keras import Model
img_input = layers.Input(shape = (150, 150, 3))
# 2D Convolution layer with 64 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(64, 3, activation = 'relu')(img_input)
# 2D max pooling layer
x = layers.MaxPooling2D(2)(x)
# 2D Convolution layer with 128 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(128, 3, activation = 'relu')(x)
# 2D Max pooling layer
x = layers.MaxPooling2D(2)(x)
# 2D Convolution layer with 256 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(256, 3, activation = 'relu')(x)
# 2D Max pooling layer
x = layers.MaxPooling2D(2)(x)
# 2D Convolution layer with 512 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(512, 3, activation = 'relu')(x)
# 2D Max pooling layer
x = layers.MaxPooling2D(2)(x)
# 2D Convolution layer with 512 filters of dimension 3x3 and ReLU activation algorithm
x = layers.Conv2D(512, 3, activation = 'relu')(x)
# Flatten layer
x = layers.Flatten()(x)
# Fully connected layers and ReLU activation algorithm
x = layers.Dense(1024, activation = 'relu')(x)
x = layers.Dense(1024, activation = 'relu')(x)
x = layers.Dense(1000, activation = 'relu')(x)
# Dropout layers for optimisation
x = layers.Dropout(0.5)(x)
# Fully connected layers and sigmoid activation algorithm
output = layers.Dense(1, activation = 'sigmoid')(x)
model = Model(img_input, output)
model.summary()
import tensorflow as tf
# Using binary_crossentropy as the loss function and
# Adam optimizer as the optimizing function when training
model.compile(loss = 'binary_crossentropy',
optimizer = tf.optimizers.Adam(learning_rate = 0.0005),
metrics = ['acc'])
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale = 1./255)
test_datagen = ImageDataGenerator(rescale = 1./255)
# Flow training images in batches of 20 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size = (150, 150),
batch_size = 20,
class_mode = 'binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size = (150, 150),
batch_size = 20,
class_mode = 'binary')
# 4x4 grid
ncols = 5
nrows = 5
pic_index = 0
# Set up matpotlib fig and size it to fit 5x5 pics
fig = plt.gcf()
fig.set_size_inches(ncols * 5, nrows * 5)
pic_index += 10
next_seta_pix = [os.path.join(seta_train_dir, fname)
for fname in seta_train_fnames[pic_index - 10:pic_index]]
next_setb_pix = [os.path.join(setb_train_dir, fname)
for fname in setb_train_fnames[pic_index - 10:pic_index]]
for i, img_path in enumerate(next_seta_pix + next_setb_pix):
# Set up subplot; subplot indices start at 1
sp = plt.subplot(nrows, ncols, i + 1)
sp.axis('Off')
img = mpimg.imread(img_path)
plt.imshow(img)
plt.show()
# Train the model
mymodel = model.fit_generator(
train_generator,
steps_per_epoch = 10,
epochs = 80,
validation_data = validation_generator,
validation_steps = 7,
verbose = 2)
import random
from tensorflow.keras.preprocessing.image import img_to_array, load_img
successive_outputs = [layer.output for layer in model.layers[1:]]
visualization_model = Model(img_input, successive_outputs)
a_img_files = [os.path.join(seta_train_dir, f) for f in seta_train_fnames]
b_img_files = [os.path.join(setb_train_dir, f) for f in setb_train_fnames]
img_path = random.choice(a_img_files + b_img_files)
img = load_img(img_path, target_size = (150, 150))
x = img_to_array(img)
x = x.reshape((1,) + x.shape)
x /= 255
successive_feature_maps = visualization_model.predict(x)
layer_names = [layer.name for layer in model.layers]
for layer_name, feature_map in zip(layer_names, successive_feature_maps):
if len(feature_map.shape) == 4:
# Just do this for the conv/maxpool layers
n_features = feature_map.shape[-1]
# The feature map has shape(1, size, size, n_features)
size = feature_map.shape[1]
# Will tile images in this matrix
display_grid = np.zeros((size, size * n_features))
for i in range(n_features):
# Postprocess the feature
x = feature_map[0, :, :, i]
x -= x.mean()
x *= 64
x += 128
x = np.clip(x, 0, 255).astype('float32')
# Will tile each filter into this big horizontal grid
display_grid[:, i * size : (i + 1) * size] = x
# Accuracy results for each training and validation epoch
acc = mymodel.history['acc']
val_acc = mymodel.history['val_acc']
# Loss results for each training and validation epoch
loss = mymodel.history['loss']
val_loss = mymodel.history['val_loss']
epochs = range(len(acc))
# Plot accuracy for each training and validation epoch
plt.plot(epochs, acc)
plt.plot(epochs, val_acc)
plt.title('Training and validation accuracy')
plt.legend(['train', 'val'], loc='center')
plt.figure()
# Plot loss for each training and validation epoch
plt.plot(epochs, loss)
plt.plot(epochs, val_loss)
plt.title('Training and validation loss')
plt.legend(['train', 'val'], loc='center')
plt.figure()
# Testing model on a random train image from set a
train_img = random.choice(seta_train_fnames)
train_image_path = os.path.join(seta_train_dir, train_img)
train_img = load_img(train_image_path, target_size = (150, 150))
plt.figure()
plt.imshow(train_img)
train_img = (np.expand_dims(train_img, 0))
train_img = tf.cast(train_img, tf.float32)
print(train_img.shape)
model.predict(train_img)
# Testing model on a random train image from set b
train_img = random.choice(setb_train_fnames)
train_image_path = os.path.join(setb_train_dir, train_img)
train_img = load_img(train_image_path, target_size = (150, 150))
plt.figure()
plt.imshow(train_img)
train_img = (np.expand_dims(train_img, 0))
train_img = tf.cast(train_img, tf.float32)
print(train_img.shape)
model.predict(train_img)
# Testing a random image from the test set a
cal_mo = 0
cal_mt = 0
cal_unconclusive = 0
alist = []
for fname in test_fnames_seta:
if fname.startswith('.'):
continue
file_path = os.path.join(seta_test_dir, fname)
load_file = load_img(file_path, target_size = (150, 150))
load_file = (np.expand_dims(load_file, 0))
load_file = tf.cast(load_file, tf.float32)
pred_img = model.predict(load_file)
if(pred_img[0]<0.5):
cal_mo+=1
elif(pred_img[0]>0.5):
cal_mt+=1
else:
print(pred_img[0], "\n")
cal_unconclusive+=1
alist.append(file_path)
print(alist)
print("Identified as: \n")
print("Man_One:", cal_mo)
print("Man_Two:", cal_mt)
print( "Inconclusive:", cal_unconclusive)
print( "Percentage:", (cal_mo/(cal_mo + cal_mt + cal_unconclusive)) * 100)
a = (cal_mo/(cal_mo + cal_mt + cal_unconclusive)) * 100
# Testing a random image from the test set b
cal_mo = 0
cal_mt = 0
cal_unconclusive = 0
alist = []
for fname in test_fnames_setb:
if fname.startswith('.'):
continue
file_path = os.path.join(setb_test_dir, fname)
load_file = load_img(file_path, target_size = (150, 150))
load_file = (np.expand_dims(load_file, 0))
load_file = tf.cast(load_file, tf.float32)
pred_img = model.predict(load_file)
if(pred_img[0]<0.5):
cal_mo+=1
elif(pred_img[0]>0.5):
cal_mt+=1
else:
print(pred_img[0], "\n")
cal_unconclusive+=1
alist.append(file_path)
print(alist)
print("Identified as: \n")
print("Man_One:", cal_mo)
print("Man_Two:", cal_mt)
print( "Inconclusive:", cal_unconclusive)
print( "Percentage:", (cal_mt/(cal_mo + cal_mt + cal_unconclusive)) * 100)
b = (cal_mt/(cal_mo + cal_mt + cal_unconclusive)) * 100
avg = (a+b)/2
print("Average Percentage:", avg)
Kindly look carefully at the above programming since it is a little bit long
Please help me a soon as possible
Thank you very much
It could be that your validation generated data terminates before reaching the 80 epochs of training. Check that you have at least 7*80 validation images.
Then check the number of elements in your: mymodel.history['val_acc']. It must be the same for training and validation if you use the epochs = range(len(acc)) as your x values for the graphs. The problem is that your acc and val_acc have different number of elements.
I am learning CNN and I have found a script online that classifies building rooftops from satellite images. The script works just fine but I am not able to figure out a way to test the script on a new single image. I am showing the code briefly and then I will show what I have tried:
seq = iaa.Sequential([
iaa.imgcorruptlike.Fog(severity=1),
iaa.imgcorruptlike.Spatter(severity =1),
])
batch_size = 16
size = 512
epochs =50
version = 1 # version 2 for MobilV2unet
data_augmentation = True
model_type = 'UNet%d' % (version)
translearn = True
from tensorflow.keras.applications import MobileNetV2
def m_u_net(input_shape):
inputs = Input(shape=input_shape, name="input_image")
encoder = MobileNetV2(input_tensor=inputs, weights="imagenet", include_top=False, alpha=1.3)
#encoder.trainable=False
skip_connection_names = ["input_image", "block_1_expand_relu", "block_3_expand_relu", "block_6_expand_relu"]
encoder_output = encoder.get_layer("block_13_expand_relu").output
f = [16, 32, 48, 64]
x = encoder_output
for i in range(1, len(skip_connection_names)+1, 1):
x_skip = encoder.get_layer(skip_connection_names[-i]).output
x = UpSampling2D((2, 2))(x)
x = Concatenate()([x, x_skip])
x = Conv2D(f[-i], (3, 3), padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(f[-i], (3, 3), padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(1, (1, 1), padding="same")(x)
x = Activation("sigmoid")(x)
model = Model(inputs, x)
return model
def load_rasters_simple(path, pathX, pathY ): # Subset from original raster with extent and upperleft coord
"""Load training data pairs (two high resolution images and two low resolution images)"""
pathXabs = os.path.join(path, pathX)
pathYabs = os.path.join(path, pathY)
le = len(os.listdir(pathXabs) )
stackX = []
stackY = []
for i in range(0, le):
fileX = os.path.join(pathXabs, os.listdir(pathXabs)[i])
fileY = os.path.join(pathYabs, os.listdir(pathXabs)[i])
dataX = gdal_array.LoadFile(fileX) #.astype(np.int),ysize=extent[1],xsize=extent[0]
stackX.append(dataX)
dataY = gdal_array.LoadFile(fileY) #.astype(np.int),ysize=extent[1],xsize=extent[0]
stackY.append(dataY)
stackX = np.array(stackX)
stackY = np.array(stackY)
return stackX, stackY
X, Y= load_rasters_simple('/Users/vaibhavsaxena/Desktop/segmentation/Classification/Satellite dataset ó± (global cities)','image','label')
def slice (arr, size, inputsize,stride):
result = []
if stride is None:
stride = size
for i in range(0, (inputsize-size)+1, stride):
for j in range(0, (inputsize-size)+1, stride):
s = arr[i:(i+size),j:(j+size), ]
result.append(s)
result = np.array(result)
return result
def batchslice (arr, size, inputsize, stride, num_img):
result = []
for i in range(0, num_img):
s= slice(arr[i,], size, inputsize, stride )
result.append(s )
result = np.array(result)
result = result.reshape(result.shape[0]*result.shape[1], result.shape[2], result.shape[3], -1)
return result
Y=batchslice(Y, size, Y.shape[1], size, Y.shape[0]).squeeze()
X_cl =batchslice(X_cl, size, X_cl.shape[1], size, X_cl.shape[0])
X_train = X_cl[:int(X_cl.shape[0]*0.8),]
Y_train = Y[:int(Y.shape[0]*0.8),]
X_test = X_cl[int(X_cl.shape[0]*0.8)+1:,]
Y_test = Y[int(Y.shape[0]*0.8)+1:,]
THEN the big unet model architecture. The whole script can be found here.
This model just works fine with the dataset. I am trying to test it with my own out of dataset image and this is what I have tried:
model = load_model('no_aug_unet_model.h5', custom_objects=dependencies)
model.compile(loss='binary_crossentropy', metrics=[iou],
optimizer=Adam(learning_rate=lr_schedule(0)))
from keras.preprocessing import image
test_image= image.load_img('bangkok_noi_2.jpg', target_size = (2000, 2000))
test_image = image.img_to_array(test_image)
test_image1 = test_image.reshape((1,2000,2000,3))
testpre = model.predict(test_image1)
img = Image.fromarray(test_image, 'RGB')
img.show()
The original shape of my test image is (1852, 3312, 3).
I am getting a weirdly predicted image that makes no sense unlike the expectations. I believe, I am doing the wrong preprocessing with my test image. Any help would be extremely appreciated.
The whole script can be found here.
I'm writing a U-net CNN in keras, and trying to use fit_generator for training. In order for this to work, I used a generator script, that could feed the images and labels for my network (simple fit function is working but I want to train a big dataset which cannot fit into the memory).
My problem is that in the model summary, it says correctly that, the output layer has a shape: (None, 288, 512, 4)
https://i.imgur.com/69xG8pO.jpg
but when I try actual training I get this error:
https://i.imgur.com/j7H6sHX.jpg
I don't get why keras wants (288, 512, 1) when in the summary it expects (288, 512, 4)
I tried it with my own unet code, and copied a working code from github also, but both of them has the exact same problem which leads me to believe that my generator script is the weak link. Below is the code I used (the image and label array functions used here were already working when I used them with "fit" in a previous CNN):
def generator(img_path, label_path, batch_size, height, width, num_classes):
input_pairs = get_pairs(img_path, label_path) # rewrite if param name changes
random.shuffle(input_pairs)
iterate_pairs = itertools.cycle(input_pairs)
while True:
X = []
Y = []
for _ in range(batch_size):
im, lab = next(iterate_pairs)
appended_im = next(iter(im))
appended_lab = next(iter(lab))
X.append(input_image_array(appended_im, width, height))
Y.append(input_label_array(appended_lab, width, height, num_classes, palette))
yield (np.array(X), np.array(Y))
I tried the generator out and the provided batches has the shapes of (for batch size of 15):
(15, 288, 512, 3)
(15, 288, 512, 4)
So I really do not know what could be the problem here.
EDIT: Here is the model code I used:
def conv_block(input_tensor, n_filter, kernel=(3, 3), padding='same', initializer="he_normal"):
x = Conv2D(n_filter, kernel, padding=padding, kernel_initializer=initializer)(input_tensor)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(n_filter, kernel, padding=padding, kernel_initializer=initializer)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
return x
def deconv_block(input_tensor, residual, n_filter, kernel=(3, 3), strides=(2, 2), padding='same'):
y = Conv2DTranspose(n_filter, kernel, strides, padding)(input_tensor)
y = concatenate([y, residual], axis=3)
y = conv_block(y, n_filter)
return y
# NETWORK - n_classes is the desired number of classes, filters are fixed
def Unet(input_height, input_width, n_classes=4, filters=64):
# Downsampling
input_layer = Input(shape=(input_height, input_width, 3), name='input')
conv_1 = conv_block(input_layer, filters)
conv_1_out = MaxPooling2D(pool_size=(2, 2))(conv_1)
conv_2 = conv_block(conv_1_out, filters*2)
conv_2_out = MaxPooling2D(pool_size=(2, 2))(conv_2)
conv_3 = conv_block(conv_2_out, filters*4)
conv_3_out = MaxPooling2D(pool_size=(2, 2))(conv_3)
conv_4 = conv_block(conv_3_out, filters*8)
conv_4_out = MaxPooling2D(pool_size=(2, 2))(conv_4)
conv_4_drop = Dropout(0.5)(conv_4_out)
conv_5 = conv_block(conv_4_drop, filters*16)
conv_5_drop = Dropout(0.5)(conv_5)
# Upsampling
deconv_1 = deconv_block(conv_5_drop, conv_4, filters*8)
deconv_1_drop = Dropout(0.5)(deconv_1)
deconv_2 = deconv_block(deconv_1_drop, conv_3, filters*4)
deconv_2_drop = Dropout(0.5)(deconv_2)
deconv_3 = deconv_block(deconv_2_drop, conv_2, filters*2)
deconv_3 = deconv_block(deconv_3, conv_1, filters)
# Output - mapping each 64-component feature vector to number of classes
output = Conv2D(n_classes, (1, 1))(deconv_3)
output = BatchNormalization()(output)
output = Activation("softmax")(output)
# embed into functional API
model = Model(inputs=input_layer, outputs=output, name="Unet")
return model
Change your loss to categorical_crossentropy.
When using the sparse_categorical_crossentropy loss, your targets
should be integer targets.
i have bulit a model to distinguish cats from dogs using kaggle's cats_vs_dogs data set. I have tried two ways to do it. For the first one, I used three existing models(ResNet50, Xception InceptionV3)to extract features, i put the traing data through these models's convolutional base,predict and concatenate the results, then use them for a standalone densely-connected classifier.The result is pretty good, after five epoches training, val_acc became 99.58%. Then i want to use data augmentation and fine-tuing, so i extended the those three models by adding layers on top, and running he whole thing end-to-end on the input data. The strange thing is the second way got good result in the traing but lousy one in the validation, and the val_acc is always a constant(0.5). i feel very confused, how come these two ways have such different results.
here is my code
from keras.models import *
from keras.layers import *
from keras.applications import *
from keras.preprocessing.image import *
res_net_input = Input((224, 224, 3), name='res_net')
res_net_base_model = ResNet50(input_tensor=res_net_input, weights='imagenet', include_top=False)
for layers in res_net_base_model.layers:
layers.trainable = False
xception_input = Input((299, 299, 3), name='xception')
xception_base_model = Xception(input_tensor=xception_input, weights='imagenet', include_top=False)
for layers in xception_base_model.layers:
layers.trainable = False
inception_input = Input((299, 299, 3), name='inception')
inception_base_model = InceptionV3(input_tensor=inception_input, weights='imagenet', include_top=False)
for layers in inception_base_model.layers:
layers.trainable = False
res_result = GlobalAveragePooling2D()(res_net_base_model.output)
xcp_result = GlobalAveragePooling2D()(xception_base_model.output)
icp_result = GlobalAveragePooling2D()(inception_base_model.output)
concatenated = concatenate([res_result, xcp_result, icp_result], axis=1)
x = Dropout(0.5)(concatenated)
x = Dense(1, activation='sigmoid')(x)
model = Model([res_net_base_model.input, xception_base_model.input, inception_base_model.input], x)
model.compile(optimizer='adadelta',
loss='binary_crossentropy',
metrics=['accuracy'])
train_imgen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
rotation_range=5.,
horizontal_flip = True)
validation_imgen = ImageDataGenerator(rescale = 1./255)
def generate_generator_multiple(generator,dir1, batch_size, img_size1, img_size2, img_size3):
genX1 = generator.flow_from_directory(dir1,
target_size = (img_size1[0],img_size1[1]),
class_mode = 'binary',
batch_size = batch_size,
shuffle=False,
)
genX2 = generator.flow_from_directory(dir1,
target_size = (img_size2[0],img_size2[1]),
class_mode = 'binary',
batch_size = batch_size,
shuffle=False,
seed=7)
genX3 = generator.flow_from_directory(dir1,
target_size = (img_size3[0],img_size3[1]),
class_mode = 'binary',
batch_size = batch_size,
shuffle=False,
seed=7)
while True:
X1i = genX1.next()
X2i = genX2.next()
X3i = genX3.next()
yield [X1i[0], X2i[0],X3i[0]], X1i[1]
tain_generator = generate_generator_multiple(train_imgen , '/output/keras/dog_vs_cat_full/train', 100, (224,224), (299, 299), (299, 299))
validation_generator = generate_generator_multiple(validation_imgen,'/output/keras/dog_vs_cat_full/validation', 100, (224,224), (299, 299), (299, 299))
history=model.fit_generator(tain_generator,
steps_per_epoch=200,
epochs = 5,
validation_data = validation_generator,
validation_steps = 50,
shuffle=False)