How to map classes from predict_classes() to their respective jpeg files? - python-3.x

i am using flow from directory to fetch the images and create a generator which i am then using in a predict_generator for predicting probabilities and classes. The problem is that when i predict both, the labels get shuffled although i am not using the shuffle argument anywhere. How can i assign the correct class to the correct label? Below is my complete code:
code
from __future__ import division
import numpy as np
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
top_model_weights_path = '/home/rehan/ethnicity.071217.23-0.28.hdf5'
path = "/home/rehan/countries/pakistan/guys/test/"
img_width, img_height = 139, 139
confidence = 0.8
model = applications.InceptionResNetV2(include_top=False, weights='imagenet',
input_shape=(img_width, img_height, 3))
print("base pretrained model loaded")
validation_generator = ImageDataGenerator(rescale=1./255).flow_from_directory(path, target_size=(img_width, img_height),
batch_size=6)
print("generator built")
print(validation_generator.filenames)
features = model.predict_generator(validation_generator)
print("features found")
model = Sequential()
model.add(Flatten(input_shape=(3, 3, 1536)))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(6, activation='softmax'))
model.load_weights(top_model_weights_path)
print("top model loaded")
prediction_proba = model.predict_proba(features)
prediction_classes = model.predict_classes(features)
print(prediction_proba)
print(prediction_classes)
output of classes
[4 4 4 4 0 4 1 0 4 1 3 4]
output of file names
['test1/pakistan_guys_19_0327850289.jpg', 'test1/pakistan_guys_19_0328320258.jpg', 'test1/pakistan_guys_19_0328792595.jpg', 'test1/pakistan_guys_19_0329098521.jpg', 'test1/pakistan_guys_19_0330327554.jpg', 'test1/pakistan_guys_19_0331605496.jpg', 'test1/pakistan_guys_19_0340513245.jpg', 'test1/pakistan_guys_19_0340525097.jpg', 'test1/pakistan_guys_19_0340536960.jpg', 'test1/pakistan_guys_19_0340551769.jpg', 'test1/pakistan_guys_19_0341250408.jpg', 'test1/pakistan_guys_19_0341327910.jpg']

By default shuffle is set to True, change your generator to false like this:
validation_generator = ImageDataGenerator(rescale=1./255).flow_from_directory(path, target_size=(img_width, img_height), batch_size=6, shuffle=False)

Related

Transferlearning ResNet Model does not learn

I trained ResNet-50 model to classify images from 6 classes (my own dataset) and saved it. But the model did not learn properly and predictions are incorrect. What would be the reason for this poor learning?
Here is my code, and the output plots using Keras and TensorFlow backend. How can I solve this?
from keras.applications.resnet50 import ResNet50, preprocess_input
from keras.layers import Dense, Dropout
from keras.models import Model
from keras.optimizers import Adam, SGD
from keras.preprocessing.image import ImageDataGenerator, image
from keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGE = True
# Define some constant needed throughout the script
N_CLASSES = 6
EPOCHS = 20
PATIENCE = 5
TRAIN_PATH= '/Train/'
VALID_PATH = '/Test/'
MODEL_CHECK_WEIGHT_NAME = 'resnet_monki_v1_chk.h5'
# Define model to be used we freeze the pre trained resnet model weight, and add few layer on top of it to utilize our custom dataset
K.set_learning_phase(0)
model = ResNet50(input_shape=(224,224,3),include_top=False, weights='imagenet', pooling='avg')
K.set_learning_phase(1)
x = model.output
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
output = Dense(N_CLASSES, activation='softmax', name='custom_output')(x)
custom_resnet = Model(inputs=model.input, outputs = output)
for layer in model.layers:
layer.trainable = False
custom_resnet.compile(Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
custom_resnet.summary()
# 4. Load dataset to be used
datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
traingen = datagen.flow_from_directory(TRAIN_PATH, target_size=(224,224), batch_size=32, class_mode='categorical')
validgen = datagen.flow_from_directory(VALID_PATH, target_size=(224,224), batch_size=32, class_mode='categorical', shuffle=False)
# 5. Train Model we use ModelCheckpoint to save the best model based on validation accuracy
es_callback = EarlyStopping(monitor='val_acc', patience=PATIENCE, mode='max')
mc_callback = ModelCheckpoint(filepath=MODEL_CHECK_WEIGHT_NAME, monitor='val_acc', save_best_only=True, mode='max')
train_history = custom_resnet.fit_generator(traingen, steps_per_epoch=len(traingen), epochs= EPOCHS, validation_data=traingen, validation_steps=len(validgen), verbose=2, callbacks=[es_callback, mc_callback])
custom_resnet.save('custom_resnet.h5')
Here are the plots, I had to put the links, the site does not let me put a pic
enter image description here

Multi-layer autoencoder using keras, specifying different optimizers

Currently I'm trying to implement a multi-layer autoencoder using Keras, working on the Mnist dataset (handwritten digits). My code is looking like this:
from keras.layers import Input, Dense, initializers
from keras.models import Model
import numpy as np
from Dataset import Dataset
import matplotlib.pyplot as plt
from keras import optimizers, losses
from keras import backend as K
import tensorflow as tf
from keras.callbacks import TensorBoard
from keras.layers import Dropout
from keras.models import Sequential
from keras import models
from keras import layers
import keras
from keras.optimizers import Adam
#global variables
d = Dataset()
num_features = d.X_train.shape[1]
low_dim = 32
def autoencoder(epochs):
w = initializers.RandomNormal(mean=0.0, stddev=0.05, seed=None)
model = Sequential()
#First autoencoder
model.add(Dense(400, activation='relu', kernel_initializer=w, input_dim=num_features, name='hidden'))
model.add(Dropout(0.2))
model.add(Dense(num_features, activation='sigmoid', input_dim = 400, name = 'output'))
#Second autoencoder
model.add(Dense(100, activation='relu', kernel_initializer=w, input_dim=num_features, name='hidden2'))
model.add(Dropout(0.2))
model.add(Dense(num_features, activation = 'sigmoid', input_dim = 100, name='output2'))
#Third autoencoder
model.add(Dense(50, activation='relu', kernel_initializer=w, input_dim=num_features, name='hidden3'))
model.add(Dropout(0.2))
model.add(Dense(num_features, activation='sigmoid', input_dim=10, name='output3'))
model.compile(optimizer=Adam(lr=0.01), loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(d.X_train, d.X_train,
epochs=epochs,
batch_size=64,
shuffle=True,
validation_data=(d.X_test, d.X_test))
model.test_on_batch(d.X_test, d.X_test)
print(history.history.keys())
plt.plot(history.history['acc'])
print(history.history['acc'])
plt.show()
return model
def finding_index():
elements, index = np.unique(d.Y_test, return_index = True)
return elements, index
def plotting():
ae = autoencoder(2)
elements, index = finding_index()
y_proba = ae.predict(d.X_test)
plt.figure(figsize=(20, 4))
#size = 20
for i in range(len(index)):
ax = plt.subplot(2, len(index), i + 1)
plt.imshow(d.X_test[index[i]].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(2, len(index), i + 1 + len(index))
plt.imshow(y_proba[index[i]].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
plotting()
I have two questions, is it supposed to be like this when you stack autoencoders or should I let one layer reduce dimensions to let's say 400 and then the next to a 100 and so on, or the way I have done it? The second one is, can you different optimizers (in my case Adam) for different layers? I would like to use SGD (stochastic gradient descent) for the last layer. Thanks in advance!
You should not do it the way you've done it, but the way you described it in the question. Also you should go down first and then up again (e.g 400, 100, 50, 25, 10, 25, 50, 100, 400) in granular steps.
For the second question is the answer that it depends. You could train the model with Adam first and then freeze all but the last layer to train this further with SGD. But you can't tell Keras to use different classifiers for different layers.

Adding a layer stops learning Keras

Code
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential,Model
from keras.layers import LeakyReLU,Dropout, Flatten, Dense,Input
from keras import applications
from keras.preprocessing import image
from keras import backend as K
from keras import regularizers
from keras.optimizers import adam
K.set_image_dim_ordering('tf')
input_tensor = Input(shape=(150,150,3))
img_width, img_height = 150,150
top_model_weights_path = 'bottleneck_fc_model.h5'
train_data_dir = 'Cats and Dogs Dataset/train'
validation_data_dir = 'Cats and Dogs Dataset/validation'
nb_train_samples = 20000
nb_validation_samples = 5000
epochs = 50
batch_size = 128
base_model=applications.inception_v3.InceptionV3(include_top=False, weights='imagenet', input_tensor=input_tensor, pooling=None)
i=0;
for layer in base_model.layers:
layer.trainable = False
i+=1
base_model.output
top_model=Sequential()
top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
top_model.add(Dense(1024,activation="relu"))
top_model.add(Dropout(0.5))
top_model.add(Dense(10,activation="relu"))//Layer with issue
top_model.add(Dropout(0.8))//
top_model.add(Dense(2, activation='softmax'))
model = Model(inputs=base_model.input,outputs=top_model(base_model.output))
model.summary
datagen = ImageDataGenerator(rescale=1. / 255)
train_data = datagen.flow_from_directory(train_data_dir,target_size=(img_width, img_height),batch_size=batch_size,classes=[ 'cats','dogs'])#,class_mode="binary",shuffle=True)
validation_data = datagen.flow_from_directory(validation_data_dir,target_size=(img_width, img_height), batch_size=batch_size,classes=['cats','dogs'])#,class_mode="binary",shuffle=True)
adm=adam(lr=0.02)
model.compile(optimizer=adm,loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(train_data, steps_per_epoch=nb_train_samples//batch_size, epochs=epochs,validation_data=validation_data, shuffle=True,verbose=1)
I have implemented a Image Classifier on the cats and dogs Dataset(https://www.kaggle.com/c/dogs-vs-cats/data) using keras(transfer learned using the inception netowrk). The code runs without errors but the accuracy is stuck at 50% for the validation set and the training set from the first epoch and the loss isnt decreasing. I am using Atom with hydrogen.
The issue goes away when I remove the marked layer , I cant seem to understand why this is happening.
What I have tried to fix this
different batch sizes - 4,16,64,256
change optimizer - tried adam ,rmsprop , sgd with modified learning rates
tried different activations for the layer - relu,sigmoid and leakyrelu
changed the dropout - the issue vanishes when dropout is 0.9(i.e. make the
layer useless, this works for obvious reason but also points out there is something that i am missing )
changed the final activation to sigmoid
Can someone please tell me what i am missing because i cant think of any reason why adding a layer stops learning
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential,Model
from keras.layers import LeakyReLU,Dropout, Flatten, Dense,Input
from keras import applications
from keras.preprocessing import image
from keras import backend as K
from keras import regularizers
from keras.optimizers import adam
K.set_image_dim_ordering('tf')
input_tensor = Input(shape=(150,150,3))
img_width, img_height = 150,150
top_model_weights_path = 'bottleneck_fc_model.h5'
train_data_dir = 'Cats and Dogs Dataset/train'
validation_data_dir = 'Cats and Dogs Dataset/validation'
nb_train_samples = 20000
nb_validation_samples = 5000
epochs = 50
batch_size = 64
base_model=applications.inception_v3.InceptionV3(include_top=False, weights='imagenet', input_tensor=input_tensor, pooling=None)
i=0;
for layer in base_model.layers:
layer.trainable = False
i+=1
base_model.output
top_model=Sequential()
top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
top_model.add(Dense(512,activation="relu")) //decrease in units
top_model.add(Dropout(0.4)) // change the drop out
top_model.add(Dense(128,activation="relu")) //increase in units
top_model.add(Dropout(0.2)) // decrease in dropout
top_model.add(Dense(2, activation='softmax'))
model = Model(inputs=base_model.input,outputs=top_model(base_model.output))
model.summary
datagen = ImageDataGenerator(rescale=1. / 255)
train_data = datagen.flow_from_directory(train_data_dir,target_size=(img_width, img_height),batch_size=batch_size,classes=[ 'cats','dogs'])#,class_mode="binary",shuffle=True)
validation_data = datagen.flow_from_directory(validation_data_dir,target_size=(img_width, img_height), batch_size=batch_size,classes=['cats','dogs'])#,class_mode="binary",shuffle=True)
adm=adam(lr=0.02)
model.compile(optimizer=adm,loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(train_data, steps_per_epoch=nb_train_samples//batch_size, epochs=epochs,validation_data=validation_data, shuffle=True,verbose=1)
I have reduce the number of units in first dense layer while increase the number of units in 2nd dense layer .. and also deceasing the drop out rate .. run this code and let me know. one more thing more complex the network is higher the chance of over-fitting .. increase in dropout value might result in no learning for that layer. try to keep you network simple .

multi-label text classification. I have a text/label csv. Text is pure text, labels are alphanumeric

import keras
import keras.backend as K
from keras.optimizers import Adam
from keras.models import Sequential
from keras.layers import Dense
from keras.layers.core import Activation
from keras.preprocessing.text import Tokenizer # for
tokenizing text
from keras.preprocessing.sequence import pad_sequences # for
padding sentences with zeros. To make the sentence length same
from keras.utils import to_categorical # for one-
hot encoding of the labels
from keras.layers import Dense, Input, Flatten, Dropout,
BatchNormalization
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Sequential
from sklearn.model_selection import train_test_split
MAX_SEQUENCE_LENGTH = 300
MAX_NB_WORDS = 20000
#Reading the data
raw_data=pd.read_csv("/home/riaz.k/Desktop/TRAIN.csv")
raw_data.head()
# create training and testing vars
train, test = train_test_split(raw_data, test_size=0.3)
train.head()
test.head()
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(train.Procedure)
train_sequences = tokenizer.texts_to_sequences(train.Procedure)
test_sequences = tokenizer.texts_to_sequences(test.Procedure)
word_index = tokenizer.word_index
containing words and their index
# print(tokenizer.word_index)
print('Found %s unique tokens.' % len(word_index))
train_data = pad_sequences(train_sequences,
maxlen=MAX_SEQUENCE_LENGTH)
train
test_data=pad_sequences(test_sequences,maxlen=MAX_SEQUENCE_LENGTH)
test
print(train_data.shape)
print(test_data.shape)
print (word_index)
train_labels = train['dxcode']
test_labels = test['dxcode']
from sklearn import preprocessing
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder() # converts the character
array to numeric array.
Assigns levels to unique labels.
le.fit(train_labels)
le.fit(test_labels)
train_labels = le.transform(train_labels)
test_labels = le.transform(test_labels)
print(le.classes_)
print(np.unique(train_labels, return_counts=True))
print(np.unique(test_labels, return_counts=True))
le.inverse_transform(1)
labels_train = to_categorical(np.asanyarray(train_labels))
labels_test = to_categorical(np.asarray(test_labels))
print('Shape of data tensor:', train_data.shape)
print('Shape of label tensor:', labels_train.shape)
print('Shape of label tensor:', labels_test.shape)
EMBEDDING_DIM = 100
print(MAX_SEQUENCE_LENGTH)
print('Training model.')
model = Sequential()
model.add(Embedding(MAX_NB_WORDS,
EMBEDDING_DIM,
input_length=MAX_SEQUENCE_LENGTH
))
model.add(Dropout(0.2))
model.add(Conv1D(128, 5, activation='relu'))
model.add(MaxPooling1D(5))
model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(Conv1D(128, 5, activation='relu'))
model.add(MaxPooling1D(5))
model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(23, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['acc'],)
model.fit(train_data, labels_train,
batch_size=32,
epochs=10,
validation_data=(test_data, labels_test))
model.evaluate(test_data, labels_test)
pred = model.predict(test_data)
pred
# print(model.layers)
for layer in model.layers:
print(layer)
import keras.backend as K
emd = K.function(inputs=[model.layers[0].input],
outputs=[model.layers[0].output])
rbind = np.concatenate((train_data, test_data), axis=0)
print(rbind.shape)
### Submissions file
test_results = model.predict_classes(rbind)
#print(test_results)
test_labels = le.inverse_transform(test_results)
#test_labels = [le.inverse_transform(i) for i in test_results]
submissions_CNN =
pd.DataFrame({'id':raw_data['Claimno'],"label":test_labels})
submissions_CNN.to_csv("/home/riaz.k/Desktop/submissions.csv",index=False)
The text document can be labelled with more than one label, so how can I do a multi-label classification on this dataset? I've read a lot of documentation from sklearn, but I can't seem to find the right way to do multi-label classification. Thanks in advance for any help.
Are you getting the error on this line:
train_labels = le.transform(train_labels)
If yes, then its because in the line just above it, you are doing this:
le.fit(test_labels)
What this does is it forgets the previous data (previous call to fit() on the line above it) and only remembers the data in the test_labels. So when a new label (which is present in train but not in test) comes, it will throw this error.
You need to replave the lines:
le.fit(train_labels)
le.fit(test_labels)
with this:
# I am using .tolist() because I observe that your
# train_labels, test_labels are pandas Series objects
le.fit(train_labels.tolist() + test_labels.tolist())

Keras Conv1D for Time Series

I am just a novice in area of deep learning.
I made my first basic attempt with Keras Conv1D. Not sure what I did and whether I did it right. My input data is simply total sales by every week (total of 313 weeks), for stores across US and with a time step of 1.
Here is my code:
from pandas import read_csv
import matplotlib.pyplot as plt
import numpy
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
def create_dataset(dataset, look_back=1):
    dataX, dataY = [], []
    for i in range(len(dataset)-look_back):
        a = dataset[i:(i+look_back), 0]
        dataX.append(a)
        dataY.append(dataset[i + look_back, 0])
    return numpy.array(dataX), numpy.array(dataY)
seed = 7
numpy.random.seed(seed)
dataframe = read_csv('D:/MIS793/Dataset/Academic Dataset External 2/Python scripts/totalsale _byweek.csv', usecols=[1], engine='python')
plt.plot(dataframe)
plt.show()
dataset = dataframe.values
dataset = dataset.astype('float32')
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
# reshape into X=t and Y=t+1
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
trainX = trainX.reshape(trainX.shape[0], trainX.shape[1], 1).astype('float32')
testX = testX.reshape(testX.shape[0], testX.shape[1], 1).astype('float32')
model = Sequential()
model.add(Conv1D(filters=10, kernel_size=1, padding='same', strides=1, activation='relu',input_shape=(1,1)))
model.add(MaxPooling1D(pool_size=1))
model.add(Flatten())
model.add(Dense(250, activation='relu'))
model.add(Dense(1, activation='linear'))
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
print(model.summary())
model.fit(trainX, trainY, validation_data=(testX, testY), epochs=10, batch_size=100)
scores = model.evaluate(testX, testY, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
Not sure about few things here:
Reshaping of trainX and testX.
Value of kernel_size and input_shape
My idea here is it's just one vector of sales value. 10 filters, each of size 1 move from one value to another. Input shape is of the format time step, dimensions.
I only got accuracy of 10.91%! So my first question is whether I am feeding in the right parameters.
Thanks
ASC
With model.metrics_names you can get the labels of your scores variable.
In your case it will be ['loss', 'mean_absolute_error'].
So what you are printing is not the accuracy, but the mae, multiplied by 100.
I tried using accuracy instead of mae. However I got accuracy as 0%. Just wondering as this was about predicting numerical values, should I really use accuracy? Here is my latest code.
from pandas import read_csv
import matplotlib.pyplot as plt
import numpy
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
dataframe = read_csv('D:/MIS793/Dataset/Academic Dataset External 2/Python scripts/totalsale _byweek.csv', usecols=[1], engine='python')
plt.plot(dataframe)
plt.show()
dataset = dataframe.values
dataset = dataset.astype('float32')
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
# reshape into X=t and Y=t+1
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
trainX = trainX.reshape(trainX.shape[0], trainX.shape[1],1).astype('float32')
testX = testX.reshape(testX.shape[0], testX.shape[1],1).astype('float32')
model = Sequential()
model.add(Conv1D(filters=20, kernel_size=1, padding='same', strides=1, activation='relu',input_shape=(1,1)))
model.add(MaxPooling1D(pool_size=1))
model.add(Conv1D(filters=10, kernel_size=1, padding='same', strides=1, activation='relu'))
model.add(MaxPooling1D(pool_size=1))
model.add(Flatten())
model.add(Dense(4, activation='relu'))
model.add(Dense(1, activation='linear'))
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
print(model.summary())
model.fit(trainX, trainY, validation_data=(testX, testY), epochs=10, batch_size=100)
scores = model.evaluate(testX, testY, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
OR should I go with MAE?
If I go with MAE, my scores will look like below:
[0.12740663779013364, 0.31208728355111426]
First one is loss and second one is MAE. Isn't that a better metrics in this case?
The final line will be like this:
print("MAE: %.2f%%" % (scores[1]))
Thanks
Anindya

Resources