NameError: name 'classifier' is not defined - python-3.x

I am new to machine learning. I was trying to predict on a dataset but when I run the program it give me following error:
NameError: name 'classifier' is not defined
Here is my code:
import numpy as np
from keras.preprocessing import image
test_image = image.load_img('dataset/single_prediction/1.jpg', target_size = (64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = classifier.predict(test_image)
training_set.class_indices
if result[0][0] == 1:
prediction = 'nsfw'
else:
prediction = 'sfw'

You are using classifier to make predictions. But the classifier is not defined. That is what the error is.
To solve this, You must have the saved keras model that is trained for your specific problem with it. If you have that, you can load it and make predictions.
Below code shows how you can load the model.
from keras.models import load_model
classifier = load_model('path_to_your_model')
After the model is loaded you can use that to make predictions like you do.
import numpy as np
from keras.preprocessing import image
test_image = image.load_img('dataset/single_prediction/1.jpg', target_size = (64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = classifier.predict(test_image)
training_set.class_indices
if result[0][0] == 1:
prediction = 'nsfw'
else:
prediction = 'sfw'

You have to specify an 'empty' version, before you start adding the layers into the model.
You can simply fix this error by adding this line above your code:
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.models import load_model
#empty model
classifier = Sequential()
Then continue with specifying like:
#add layers, start with hidden layer and first deep layer
classifier.add(Dense(output_dim=15, init="uniform", activation='relu',input_dim = 15))
classifier.add(Dropout(rate=0.1))

Related

ValueError: Could not interpret optimizer identifier: []

Hi i'm working on Image classification using XGBoost and VGG16 imagenet as feature extractor here's my code
I tried to implement feature extractor on image dataset from kaggle CK+48 with tensorflow 2.11.0. I'm facing this error,
ValueError: Could not interpret optimizer identifier: []
have tried alot to solve it, will appreciate the help
import numpy as np
import matplotlib.pyplot as plt
import glob
import cv2
import keras
from tensorflow.keras import Model
#from tensorflow.python.keras.layers import Dense, Flatten, GlobalAveragePooling2D, Activatation
from keras.models import Model, Sequential
from keras.models import load_model
from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.layers import BatchNormalization
import os
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM, BatchNormalization
from keras.callbacks import TensorBoard
from keras.callbacks import ModelCheckpoint
#from keras.optimizers import adam
import seaborn as sns
from keras.applications.vgg16 import VGG16
# Read input images and assign labels based on folder names
print(os.listdir("C:/Users/Tanzeel ur Rehman/Desktop/CK+48"))
SIZE = 256 #Resize
#Capture training data and labels into respective lists
train_images = []
train_labels = []
for directory_path in glob.glob("C:/Users/Tanzeel ur Rehman/Desktop/CK+48/train"):
label = directory_path.split("\\")[-1]
print(label)
for img_path in glob.glob(os.path.join(directory_path, "*.jpg")):
print(img_path)
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
img = cv2.resize(img, (SIZE, SIZE))
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
train_images.append(img)
train_labels.append(label)
#Convert lists to arrays
train_images = np.array(train_images)
train_labels = np.array(train_labels)
# Capture test/validation data and labels into respective lists
test_images = []
test_labels = []
for directory_path in glob.glob("C:/Users/Tanzeel ur Rehman/Desktop/CK+48/test"):
fruit_label = directory_path.split("\\")[-1]
for img_path in glob.glob(os.path.join(directory_path, "*.jpg")):
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
img = cv2.resize(img, (SIZE, SIZE))
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
test_images.append(img)
test_labels.append(fruit_label)
#Convert lists to arrays
test_images = np.array(test_images)
test_labels = np.array(test_labels)
#Encode labels from text to integers.
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
le.fit(test_labels)
test_labels_encoded = le.transform(test_labels)
le.fit(train_labels)
train_labels_encoded = le.transform(train_labels)
#Split data into test and train datasets (already split but assigning to meaningful convention)
x_train, y_train, x_test, y_test = train_images, train_labels_encoded, test_images, test_labels_encoded
###################################################################
# Normalize pixel values to between 0 and 1
x_train, x_test = x_train / 255.0, x_test / 255.0
#One hot encode y values for neural network.
#from keras.utils import to_categorical
#y_train_one_hot = to_categorical(y_train)
#y_test_one_hot = to_categorical(y_test)
#############################
#Load model wothout classifier/fully connected layers
VGG_model = VGG16(weights='imagenet', include_top=False, input_shape=(SIZE, SIZE, 3))
#Make loaded layers as non-trainable. This is important as we want to work with pre-trained weights
for layer in VGG_model.layers:
layer.trainable = True
VGG_model.summary() #Trainable parameters will be 0
#Now, let us use features from convolutional network for RF
feature_extractor=VGG_model.predict(x_train)
The error is in the last line of the code, when I try to use features from convolutional network for random Forests on training dataset of images.

Tuple index out of range when trying to fit the CNN model

The dataset that I am using is the standard chest Xray dataset https://www.kaggle.com/datasets/paultimothymooney/chest-xray-pneumonia. Have been getting this error (tuple index out of range) while fitting the CNN model. Is there a way to circumvent this issue? I suppose argument "validation_data" needs to be appended in some way.
import os
import glob
import cv2
import numpy as np
import pandas as pd
from PIL import Image
import tensorflow as tf
import random
#from pathlib import path
import pathlib2 as pathlib
from pathlib2 import Path
#from keras.models import sequential, Model, load_model
from tensorflow.keras.models import Sequential, Model, load_model
from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Input, Flatten, Activation
from tensorflow.keras.optimizers import Adam, SGD, RMSprop
from tensorflow.keras.callbacks import Callback, EarlyStopping
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import confusion_matrix
from tensorflow.keras import backend as K
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.preprocessing import image
%matplotlib inline
import shutup; shutup.please()
# DATA PATH #
print (os.listdir("C:/Users/Syd_R/OneDrive/Desktop/Peeumonia_data/archive/chest_xray/chest_xray/"))
data_dir = Path("C:/Users/Syd_R/OneDrive/Desktop/Peeumonia_data/archive/chest_xray/chest_xray/")
train_dir = data_dir/'train'
val_dir = data_dir/'val'
test_dir = data_dir/'test'
# LOAD TRAINING DATA TO DATAFRAME #
def load_train():
normal_cases_dir =train_dir/'NORMAL'
pneumonia_cases_dir = train_dir/ 'PNEUMONIA'
# list of all images
normal_cases = normal_cases_dir.glob('*.jpeg')
pneumonia_cases = pneumonia_cases_dir.glob('*.jpeg')
train_data=[]
train_label=[]
for img in normal_cases:
train_data.append(img)
train_label.append('NORMAL')
for img in pneumonia_cases:
train_data.append(img)
train_label.append('PNEUMONIA')
df=pd.DataFrame(train_data)
df.columns = ['images']
df['labels'] = train_label
df=df.sample(frac=1).reset_index(drop=True)
return df
train_data = load_train()
train_data.shape
# VIZUALIZE THE AMOUNT OF TRAINING DATA WITH LABELS #
plt.bar(train_data['labels'].value_counts().index,train_data['labels'].value_counts().values)
plt.show()
# VIZUALIZE THE TRAINING IMAGE DATA BY RANDOM SAMPLING#
plt.figure(figsize=(10,5))
for i in range(10):
ax = plt.subplot(2,5,i+1)
num= random.randint(0, 5000+i)
im=train_data.loc[num].at['images']
im1=train_data.loc[num].at['labels']
img = cv2.imread(str(im))
img = cv2.resize(img, (224,224))
plt.imshow(img)
plt.title(im1)
plt.axis("off")
print(num)
# DATA PRE-PROCESSING #
def prepare_and_load(isval=True):
if isval==True:
normal_dir=val_dir/'NORMAL'
pneumonia_dir=val_dir/'PNEUMONIA'
else:
normal_dir=test_dir/'NORMAL'
pneumonia_dir=test_dir/'PNEUMONIA'
normal_cases = normal_dir.glob('*.jpeg')
pneumonia_cases = pneumonia_dir.glob('*.jpeg')
data,labels=([] for x in range (2))
def prepare(case):
for img in case:
img = cv2.imread(str(img))
img = cv2.resize(img, (224,224))
if img.shape[2] ==1:
img = np.dstack([img, img, img])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32)/255
if case==normal_cases:
label = to_categorical(0, num_classes=2)
else:
label = to_categorical(1, num_classes=2)
data.append(img)
labels.append(label)
return data,labels
prepare(normal_cases)
d,l=prepare(pneumonia_cases)
d=np.array(d)
l=np.array(1)
return d,l
val_data,val_labels = prepare_and_load(isval=True)
test_data,test_labels = prepare_and_load(isval=False)
print('Number of test images -->', len(test_data))
print('Number of validation images -->', len(val_data))
# DEFINE A FUNCTION TO GENERATE BATCHES FROM TRAINING IMAGES #
def data_gen(data, batch_size):
# Get tiotal number of samples in the data
n= len(data)
steps = n//batch_size
# Define two numpy arrays for containing batch data and labels
batch_data = np.zeros((batch_size, 224, 224, 3), dtype=np.float32)
batch_labels = np.zeros((batch_size,2), dtype=np.float32)
# Get a numpy array of all the indices of the input data
indices = np.arange(n)
# Initalize a counter
i=0
while True:
np.random.shuffle(indices)
# Get the next batch
count = 0
next_batch =indices [(i*batch_size): (i+1)*batch_size]
for j,idx in enumerate(next_batch):
img_name = data.iloc[idx]['images']
label = data.iloc[idx]['images']
if label=='NORMAL':
label=0
else:
label=1
# one hot encoding
encoded_label = to_categorical(label, num_classes=2)
# read the image and resize
img = cv2.imread(str(img_name))
img = cv2.resize(img,(224,224))
# check if it's grayscale
if img.shape[2]==1:
img = np.dstack([img, img, img])
# cv2 reads in BGR mode by default
orig_imag = cv2.cvtColor(img, cv2. COLOR_BGR2RGB)
# normalize the image pixels
orig_img = img.astype(np.float32)/255
batch_data[count]= orig_img
batch_labels[count] = encoded_label
count+=1
if count==batch_size-1:
break
i+=1
yield batch_data, batch_labels
if i>=steps:
i=0
# DEFINE THE CNN MODEL #
model = Sequential()
model.add(Conv2D(32, (3,3), input_shape=(224, 224, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(32, (3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64, (3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dense(2))
model.add(Activation('softmax'))
# DEFINE PARAMETERS FOR THE CNN MODEL #
batch_size = 64
nb_epochs = 3
# Get a train data generator
train_data_gen = data_gen(data= train_data, batch_size=batch_size)
# DEFINE THE NUMBER OF TRAINING STEPS #
nb_train_steps = train_data.shape[0]//batch_size
print("Number of training and validation steps: {} and {}".format(nb_train_steps, len(val_data)))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# FIT THE MODEL #
history = model.fit_generator(train_data_gen,
epochs=nb_epochs,
steps_per_epoch=nb_train_steps,
validation_data=(val_data, val_labels))

Transferlearning ResNet Model does not learn

I trained ResNet-50 model to classify images from 6 classes (my own dataset) and saved it. But the model did not learn properly and predictions are incorrect. What would be the reason for this poor learning?
Here is my code, and the output plots using Keras and TensorFlow backend. How can I solve this?
from keras.applications.resnet50 import ResNet50, preprocess_input
from keras.layers import Dense, Dropout
from keras.models import Model
from keras.optimizers import Adam, SGD
from keras.preprocessing.image import ImageDataGenerator, image
from keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGE = True
# Define some constant needed throughout the script
N_CLASSES = 6
EPOCHS = 20
PATIENCE = 5
TRAIN_PATH= '/Train/'
VALID_PATH = '/Test/'
MODEL_CHECK_WEIGHT_NAME = 'resnet_monki_v1_chk.h5'
# Define model to be used we freeze the pre trained resnet model weight, and add few layer on top of it to utilize our custom dataset
K.set_learning_phase(0)
model = ResNet50(input_shape=(224,224,3),include_top=False, weights='imagenet', pooling='avg')
K.set_learning_phase(1)
x = model.output
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
output = Dense(N_CLASSES, activation='softmax', name='custom_output')(x)
custom_resnet = Model(inputs=model.input, outputs = output)
for layer in model.layers:
layer.trainable = False
custom_resnet.compile(Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
custom_resnet.summary()
# 4. Load dataset to be used
datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
traingen = datagen.flow_from_directory(TRAIN_PATH, target_size=(224,224), batch_size=32, class_mode='categorical')
validgen = datagen.flow_from_directory(VALID_PATH, target_size=(224,224), batch_size=32, class_mode='categorical', shuffle=False)
# 5. Train Model we use ModelCheckpoint to save the best model based on validation accuracy
es_callback = EarlyStopping(monitor='val_acc', patience=PATIENCE, mode='max')
mc_callback = ModelCheckpoint(filepath=MODEL_CHECK_WEIGHT_NAME, monitor='val_acc', save_best_only=True, mode='max')
train_history = custom_resnet.fit_generator(traingen, steps_per_epoch=len(traingen), epochs= EPOCHS, validation_data=traingen, validation_steps=len(validgen), verbose=2, callbacks=[es_callback, mc_callback])
custom_resnet.save('custom_resnet.h5')
Here are the plots, I had to put the links, the site does not let me put a pic
enter image description here

how to export the keras model with resnet101 to tensorflow serving with exporter.export_saved_model method

I'm using Keras and resnet 101 for my training and wants to export my model into TensorFlow serving with this method exporter.export_inference_graph and it gives me the error.
FailedPreconditionError (see above for traceback): Attempting to use uninitialized value conv2_block2_1_bn/moving_variance/local_step_1
Is there any specific reason that you want to Save the Model using export_saved_model?
If your Goal is to Save the pretrained model, resnet and perform inference using Tensorflow Serving, you can do it using the code mentioned below:
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D
from tensorflow.keras import Model
my_resnet = ResNet50(weights='imagenet', include_top=False, input_shape=(224,224,3))
# Add Global Average Pooling Layer
x = my_resnet.output
x = GlobalAveragePooling2D()(x)
# Add a Output Layer
my_resnet_output = Dense(5, activation='softmax')(x)
# Combine whole Neural Network
my_resnet_model = Model(inputs=my_resnet.input, outputs=my_resnet_output)
my_resnet_model.save('my_flowers')
The last line of code saves the model in .pb format.
Now, we need to write the Code for Client File and can perform the inference using Tensorflow Serving.
import grpc
import requests
import tensorflow as tf
import cv2
import os
import numpy as np
def main():
img_array = cv2.imread('daisy.jpg')
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
new_array = new_array / 255
import json
data = json.dumps(
{"signature_name": "serving_default", "instances": new_array.reshape(-1, 224, 224, 3).tolist()})
print('Data: {} ... {}'.format(data[:50], data[len(data) - 52:]))
headers = {"content-type": "application/json"}
json_response = requests.post('http://35.226.32.128/v1/models/test0221/versions/1:predict', data=data, headers=headers)
predictions = json.loads(json_response.text)['predictions']
np.argmax(predictions[0])
dicti
for flower, label in dicti.items():
if label == np.argmax(predictions[0]):
print(flower)
if __name__ == '__main__':
main()

Conv2d input parameter mismatch

I am giving variable size images (all 278 images of different size 139 of each category) input to my cnn model. As a fact that cnn required fixed size images, so from here i got solution for this is to make input_shape=(None,Nonen,1) (for tensorflow backend and gray scale). but this solution doesnot work with flatten layer, so from their only i got solution of using GlobleMaxpooling or Globalaveragepooling. So from uses these facrts i am making a cnn model in keras to train my network with following code:
import os,cv2
import numpy as np
from sklearn.utils import shuffle
from keras import backend as K
from keras.utils import np_utils
from keras.models import Sequential
from keras.optimizers import SGD,RMSprop,adam
from keras.layers import Conv2D, MaxPooling2D,BatchNormalization,GlobalAveragePooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import regularizers
from keras import initializers
from skimage.io import imread_collection
from keras.preprocessing import image
from keras import Input
import keras
from keras import backend as K
#%%
PATH = os.getcwd()
# Define data path
data_path = PATH+'/current_exp'
data_dir_list = os.listdir(data_path)
img_rows=None
img_cols=None
num_channel=1
# Define the number of classes
num_classes = 2
img_data_list=[]
for dataset in data_dir_list:
img_list=os.listdir(data_path+'/'+ dataset)
print ('Loaded the images of dataset-'+'{}\n'.format(dataset))
for img in img_list:
input_img=cv2.imread(data_path + '/'+ dataset + '/'+ img,0)
img_data_list.append(input_img)
img_data = np.array(img_data_list)
if num_channel==1:
if K.image_dim_ordering()=='th':
img_data= np.expand_dims(img_data, axis=1)
print (img_data.shape)
else:
img_data= np.expand_dims(img_data, axis=4)
print (img_data.shape)
else:
if K.image_dim_ordering()=='th':
img_data=np.rollaxis(img_data,3,1)
print (img_data.shape)
#%%
num_classes = 2
#Total 278 sample, 139 for 0 category and 139 for category 1
num_of_samples = img_data.shape[0]
labels = np.ones((num_of_samples,),dtype='int64')
labels[0:138]=0
labels[138:]=1
x,y = shuffle(img_data,labels, random_state=2)
y = keras.utils.to_categorical(y, 2)
model = Sequential()
model.add(Conv2D(32,(2,2),input_shape=(None,None,1),activation='tanh',kernel_initializer=initializers.glorot_uniform(seed=100)))
model.add(Conv2D(32, (2,2),activation='tanh'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (2,2),activation='tanh'))
model.add(Conv2D(64, (2,2),activation='tanh'))
model.add(MaxPooling2D())
model.add(Dropout(0.25))
#model.add(Flatten())
model.add(GlobalAveragePooling2D())
model.add(Dense(256,activation='tanh'))
model.add(Dropout(0.25))
model.add(Dense(2,activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='rmsprop',metrics=['accuracy'])
model.fit(x, y,batch_size=1,epochs=5,verbose=1)
but i am getting following error:
ValueError: Error when checking input: expected conv2d_1_input to have 4 dimensions, but got array with shape (278, 1)
how to solve it.
In the docs for Conv2D it says that the input tensor has to be in this format:
(samples, channels, rows, cols)
I believe you can't have a variable input size unless your network is fully convolutional.
Maybe what you want to do is to keep it to a fixed input size, and just resize the image to that size before feeding it into your network?
Your array with input data cannot have variable dimensions (this is a numpy limitation).
So the array, instead of being a regular array of numbers with 4 dimensions is being created as an array of arrays.
You should fit each image individually because of this limitation.
for epoch in range(epochs):
for img,class in zip(x,y):
#expand the first dimension of the image to have a batch size
img = img.reshape((1,) + img.shape)) #print and check there are 4 dimensions, like (1, width, height, 1).
class = class.reshape((1,) + class.shape)) #print and check there are two dimensions, like (1, classes).
model.train_on_batch(img,class,....)

Resources