How to save multilayer perceptron model in Tensorflow - python-3.x

I'm trying to save the following file but not sure how. I've tried placing with tf.Session as sess just prior to training my model history = model.fit_generator... but was receiving ValueError: No variables to save. Then I tried placing with tf.Session... above my model initialisation at model = Sequential(). I'm new to Tensorflow so I'm just trying to learn the ropes.
Any guidance would be great, thanks!
import numpy as np
import keras
from keras import backend as K
from keras.models import Sequential
from keras.layers import Activation, Dropout, Input
from keras.layers.core import Dense, Flatten
from keras.optimizers import Adam, Adadelta, SGD
from keras.metrics import categorical_crossentropy
from keras.preprocessing.image import ImageDataGenerator
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import *
from sklearn.metrics import confusion_matrix
from keras.models import Model
from keras.utils import np_utils
import itertools
import matplotlib.pyplot as plt
import livelossplot
#%matplotlib inline
#plot_losses = livelossplot.PlotLossesKeras()
PATH = './Food-5K/'
train_path = '%straining/' %PATH
valid_path = '%svalidation/' %PATH
test_path = '%sevaluation/' %PATH
classes = ('food', 'non-food')
print (train_path)
batch_size = 16
epochs = 20
nb_train_samples = 3001
nb_validation_samples = 1000
train_batches = ImageDataGenerator(rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True).flow_from_directory(train_path, target_size=(224,224),
batch_size=32, class_mode='binary')
valid_batches = ImageDataGenerator(rescale = 1./255 ).flow_from_directory(valid_path, target_size=(224,224),
batch_size=batch_size, class_mode='binary')
test_batches = ImageDataGenerator(rescale = 1./255).flow_from_directory(test_path, target_size=(224,224),
batch_size=batch_size, class_mode='binary')
print(type(train_batches[0]))
x_train, y_train = train_batches[0]
x_test, y_test = valid_batches[0]
print('x_train.shape: ' + str(x_train.shape))
print('y_train.shape: ' + str(y_train.shape))
print('y_train.shape: ' + str(y_train.reshape(y_train.shape + (1,)).shape))
print('x_test.shape: ' + str(x_test.shape))
print('y_test.shape: ' + str(y_test.shape))
print('y_test.shape: ' + str(y_test.reshape(y_test.shape + (1,)).shape))
X_train_flatten = x_train.reshape(x_train.shape[0], -1).T
X_test_flatten = x_test.reshape(x_test.shape[0], -1).T
y_train_flatten = y_train.T
y_test_flatten = y_test.T
print('X_train_flatten.shape: ' + str(X_train_flatten.T.shape))
print('y_train_flatten.shape: ' + str(y_train_flatten.shape))
#print('y_train_flatten.shape: ' + str(np.squeeze(y_train_flatten, axis=(2,)).shape))
print('X_test_flatten.shape: ' + str(X_test_flatten.T.shape))
print('y_test_flatten.shape: ' + str(y_test_flatten.shape))
#print('y_test_flatten.shape: ' + str(np.squeeze(y_test_flatten, axis=(2,)).shape))
train_set_x = X_train_flatten/255.
test_set_x = X_test_flatten/255.
print('len(train_set_x): ' + str(train_set_x.shape))
print('len(test_set_x): ' + str(test_set_x.shape))
print(y_train.shape)
# plots images with labels within jupyter notebook
def plots(ims, figsize=(80,60), rows=1, interp=False, titles=None):
if type(ims[0]) is np.ndarray:
#print(ims[0])
#ims = np.array(ims).astype(np.uint8)
#print(ims)
if (ims.shape[-1] != 3):
ims = ims.transpose((1,2,3,1))
f = plt.figure(figsize=figsize)
cols = len(ims)//rows if len(ims) % 2 == 0 else len(ims)//rows + 1
for i in range(len(ims)):
sp = f.add_subplot(rows, cols, i+1)
sp.axis('Off')
if titles is not None:
sp.set_title(titles[i], fontsize=15)
plt.imshow(ims[i], interpolation=None if interp else 'none')
imgs, labels = next(train_batches)
plots(imgs, titles=labels)
# Deep Multilayer Perceptron model
model = Sequential()
# Set the initial random weights > kernel_initializer
model.add(Flatten(input_shape=(224, 224, 3)))
model.add(Dense(200, input_dim=150528, kernel_initializer='normal'))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(100, input_dim=200, kernel_initializer='normal'))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(10, input_dim=100, kernel_initializer='normal'))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1, input_dim=10, kernel_initializer='normal'))
model.add(Activation('sigmoid'))
# Rho > is a hyper-parameter which attenuates the influence of past gradient.
model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=['accuracy'])
model.summary()
# Train
history = model.fit_generator(train_batches, steps_per_epoch=32, #steps_per_epoch=nb_train_samples,
#callbacks=[plot_losses],
validation_steps = 16,
validation_data=train_batches, epochs=epochs, verbose=1)
# Evaluate
x_test, y_test = valid_batches[0]
evaluation = model.evaluate(x_test, y_test, batch_size=batch_size, verbose=1)
print('Summary: Loss over the test dataset: %.2f, Accuracy: %.2f' % (evaluation[0], evaluation[1]))
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()

From Keras documentation:
from keras.models import load_model
model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'
del model # deletes the existing model
# returns a compiled model
# identical to the previous one
model = load_model('my_model.h5')

Related

Why is there no improvement in a categorical data time series model?

I built a simple categorical time series model to predict the next number of a random sequence, but the accuracy hardly moved even I trained it for 10000 epochs. The validation loss started to take off after a few hundred epochs. Could anyone make suggestions for improvement? Here's the model:
import os
import sys
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM
DEVICE = 'CPU'
if DEVICE == 'CPU':
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
else:
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
print(tf.test.gpu_device_name())
TOTAL_CATALOG=4
POSSIBLE_OUTCOME_COL=4
LOOK_BACK_WINDOW=1
TRAINING_DATA_RATIO=0.8
TRAINING_EPOCHS=10000
sys.path.insert(0, '/DataScience/MyModules')
from m6data import getDrawData, series_to_supervised, Split_data, get_all_categories
def get_all_categories_local(last_combination):
all_category = np.arange(1, last_combination+1)
return all_category.reshape(1,all_category.shape[0])
All_categories=get_all_categories_local(TOTAL_CATALOG)
data_sequence = [1,1,2,4,2,3,1,2,3,3,4,1,2,3,4,2,2,3,1,3]
raw_df = pd.DataFrame(data_sequence, columns=['NE'])
values = raw_df.values
# 05-Apr-2022: One-Hot Encoding
oh_encoder = OneHotEncoder(categories=All_categories, sparse=False)
encoded_input = oh_encoder.fit_transform(values)
FEATURES = encoded_input.shape[1]
POSSIBLE_OUTCOME_COL = FEATURES
draw_reframe = series_to_supervised(encoded_input, LOOK_BACK_WINDOW,1)
train, test = Split_data(draw_reframe, TRAINING_DATA_RATIO)
# Total input = all possible One-Hot Encoding outcome * number of look-back samples.
ALL_INPUT = POSSIBLE_OUTCOME_COL * LOOK_BACK_WINDOW
# split into input and outputs
train_X, train_y = train.iloc[:,:ALL_INPUT], train.iloc[:,ALL_INPUT:]
test_X, test_y = test.iloc[:,:ALL_INPUT], test.iloc[:,ALL_INPUT:]
train_X = train_X.values.reshape((train_X.shape[0], LOOK_BACK_WINDOW , FEATURES))
test_X = test_X.values.reshape((test_X.shape[0], LOOK_BACK_WINDOW, FEATURES))
print(train_X.shape, train_y.shape)
print(test_X.shape, test_y.shape)
def create_model():
model = Sequential()
model.add(LSTM(10,
return_sequences=False,
input_shape=(train_X.shape[1], train_X.shape[2]),
activation='relu'
)
)
#model.add(LSTM(20))
model.add(Dense(units=train_y.shape[1], activation='softmax'))
model.compile(optimizer = tf.keras.optimizers.Adam(learning_rate=0.00005),
loss = 'categorical_crossentropy',
metrics=['accuracy'])
return model
model=create_model()
history = model.fit(
train_X, train_y,
epochs=TRAINING_EPOCHS,
batch_size=8,
validation_data=(test_X, test_y),
verbose=1,
)
Here are the plots of accuracies and losses (red=training, blue=validation).
Accuracies
Losses
Thank you in advance for any suggestions.
Update (13-Jun-2022)
I changed my model to the following
def create_model():
model = Sequential()
model.add(LSTM(50,
return_sequences=True,
input_shape=(train_X.shape[1], train_X.shape[2]),
activation='relu'
)
)
model.add(LSTM(units=1000, kernel_regularizer=regularizers.l1(0.05), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=1000, kernel_regularizer=regularizers.l1(0.05), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=1000, kernel_regularizer=regularizers.l1(0.05), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=1000, kernel_regularizer=regularizers.l1(0.05), activation='relu'))
model.add(Dropout(0.3))
model.add(BatchNormalization())
model.add(Dense(1000))
model.add(Dense(units=train_y.shape[1], activation='softmax'))
model.compile(optimizer = tf.keras.optimizers.SGD(learning_rate=1e-2, nesterov=True),
#tf.keras.optimizers.Adam(learning_rate=0.001),
loss = 'categorical_crossentropy',
metrics=['accuracy'])
return model
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=20,min_lr=1e-10)
early_stop = EarlyStopping(monitor='loss', patience=100)
history = model.fit(
train_X, train_y,
epochs=TRAINING_EPOCHS,
batch_size=16,
validation_split=0.1,
validation_data=(test_X, test_y),
verbose=1,
shuffle=False,
callbacks=([reduce_lr], [early_stop])
Accuracy was bouncing around and Val_accuracy was zero all the way. The loss and val_loss were almost the same and dropping together.
Can anyone advise what I can do in this scenario?

Using SVM classifier as the last layer for a Pre-trained Model(VGG16)

I have trained my model(binary classification) of CNN using Keras and now I want to use an SVM classifier instead of using a fully connected layer for classification.
I used a VGG16 pre-trained network for feature extraction and I also used Data augmentation.
What is the possible way to add SVM as the last layer for classification?
#Parametres
import keras
from keras.applications import VGG16
import sys
from PIL import Image
#Using VGG16 Pre-trained Model
conv_base = VGG16(weights = 'imagenet',
include_top = False,
input_shape=(224, 224, 3))
conv_base.summary()
import numpy as np
import os
from keras.preprocessing.image import ImageDataGenerator
base_dir = 'C:Covid Detection/Code/Dataset-created')
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
from keras import models
from keras import layers
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
conv_base.trainable = False
from keras import optimizers
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(224, 224),
batch_size=20,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(224, 224),
batch_size=20,
class_mode='binary')
# Compile the model
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=2e-5),
metrics=['acc'])
# Train the model
history = model.fit_generator(
train_generator,
steps_per_epoch=50,
epochs=30,
validation_data=validation_generator,
validation_steps=50)
# Save the model
model.save('vgg16_aug.h5')
from keras.applications.vgg16 import VGG16
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
base_model = VGG16(weights='imagenet')
model = Model(inputs=base_model.input,
outputs=base_model.get_layer('flatten').output)
model.summary()
Create a function to extract features using VGG16
def get_features(img_path):
img = load_img(img_path, target_size=(224, 224))
x = img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
flatten = model.predict(x)
return list(flatten[0])
features, labels = [], []
# Loop into the directory of images and extract features and labels
for image_path in folder:
features.append(extract_features(image_path))
labels.append("#some label")
Till here we extracted the features and labels, Now we train these features using SVM as follows:-
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
from sklearn.metrics import accuracy_score
X_train, X_test, y_train, y_test = train_test_split( features,
labels,
test_size=0.30)
clf = LinearSVC(random_state=0, tol=1e-5)
clf.fit(X_train, y_train)
predicted = clf.predict(X_test)
# get the accuracy
print(accuracy_score(y_test, predicted))

ValueError: Dimensions must be equal (keras)

I'm trying to train an autoencoder but have problems in reshaping my X_train to fit it to my model model().
from tensorflow import keras
from keras.layers import *
from keras.models import Model
from keras.models import Sequential
from keras.optimizers import Adam
from keras.optimizers import RMSprop
from keras.utils import plot_model
X_train = np.array(X_train, dtype=np.float)
X_test =np.array(X_train, dtype=np.float)
X_train = X_train.reshape(len(X_train), 100,1)
X_test = X_test.reshape(len(X_test), 100,1)
#inputs = Input(shape=(230, 1,100))
epoch = 100
batch = 128
def model():
m = Sequential()
# ##m.add(Reshape((,)))
m.add(Flatten())
m.add(Dense(512, activation='relu'))
m.add(Dense(128, activation = 'relu'))
m.add(Dense(2, activation = 'linear'))
m.add(Dense(128, activation = 'relu'))
m.add(Dense(512, activation = 'relu'))
m.add(Dense(784, activation = 'sigmoid'))
m.compile(loss='mean_squared_error', optimizer = 'rmsprop', metrics = ['accuracy'])
# Fit data to model m
m.fit(X_train, X_train, batch_size = batch, epochs = epoch)
m.summary()
#score = m.evaluate(X_test, Y_test, verbose = 0)
#print('Test loss:' score[0])
#print('Test accuracy:', score[1])
#m.summary()
mod = model()
The of dimension of my data is the following:
X_train = (523, 100,1)
X_test = (523, 100,1)
To fix your issue, change the following:
X_train = X_train.reshape((-1, 100))
X_test = X_test.reshape((-1, 100))
Delete the Flatten layer and use 100 neurons for the last layer as stated in the comments.

Memory used up for loading data alone in Keras program

My code is for training vgg16 from custom data. Two classes, Diseased and not diseased.
I have around 3400 Images, The problem is while loading the data-set to memory.The above-mentioned process utilizes 99% of ram memory and it gets stuck.I am using spyder,however when I followed another example which has lower data size it works fine. My question is as follows Can anyone suggest an efficent method to run it without loading all the images into the memory? Because this is eventually leading to the blue screen of death.
Ps:my system is capable of running deeplearning codes.
import numpy as np
import os
import time
from vgg16 import VGG16
from keras.preprocessing import image
from imagenet_utils import preprocess_input, decode_predictions
from keras.layers import Dense, Activation, Flatten
from keras.layers import merge, Input
from keras.models import Model
from keras.utils import np_utils
from sklearn.utils import shuffle
from sklearn.cross_validation import train_test_split
# Loading the training data
PATH = os.getcwd()
# Define data path
data_path = PATH + '/data'
data_dir_list = os.listdir(data_path)
img_data_list=[]
for dataset in data_dir_list:
img_list=os.listdir(data_path+'/'+ dataset)
print ('Loaded the images of dataset-'+'{}\n'.format(dataset))
for img in img_list:
img_path = data_path + '/'+ dataset + '/'+ img
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# x = x/255
print('Input image shape:', x.shape)
img_data_list.append(x)
img_data = np.array(img_data_list)
#img_data = img_data.astype('float32')
print (img_data.shape)
img_data=np.rollaxis(img_data,1,0)
print (img_data.shape)
img_data=img_data[0]
print (img_data.shape)
# Define the number of classes
num_classes = 2
num_of_samples = img_data.shape[0]
labels = np.ones((num_of_samples,),dtype='int64')
labels[0:2345]=0
labels[2245:3567]=1
names = ['YES','NO']
# convert class labels to on-hot encoding
Y = np_utils.to_categorical(labels, num_classes)
#Shuffle the dataset
x,y = shuffle(img_data,Y, random_state=2)
# Split the dataset
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=2)
#########################################################################################
# Custom_vgg_model_1
#Training the classifier alone
image_input = Input(shape=(224, 224, 3))
model = VGG16(input_tensor=image_input, include_top=True,weights='imagenet')
model.summary()
last_layer = model.get_layer('fc2').output
#x= Flatten(name='flatten')(last_layer)
out = Dense(num_classes, activation='softmax', name='output')(last_layer)
custom_vgg_model = Model(image_input, out)
custom_vgg_model.summary()
for layer in custom_vgg_model.layers[:-1]:
layer.trainable = False
custom_vgg_model.layers[3].trainable
custom_vgg_model.compile(loss='categorical_crossentropy',optimizer='rmsprop',metrics=['accuracy'])
t=time.time()
# t = now()
hist = custom_vgg_model.fit(X_train, y_train, batch_size=32, epochs=12, verbose=1, validation_data=(X_test, y_test))
print('Training time: %s' % (t - time.time()))
(loss, accuracy) = custom_vgg_model.evaluate(X_test, y_test, batch_size=10, verbose=1)
print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss,accuracy * 100))
####################################################################################################################
#Training the feature extraction also
image_input = Input(shape=(224, 224, 3))
model = VGG16(input_tensor=image_input, include_top=True,weights='imagenet')
model.summary()
last_layer = model.get_layer('block5_pool').output
x= Flatten(name='flatten')(last_layer)
x = Dense(128, activation='relu', name='fc1')(x)
x = Dense(128, activation='relu', name='fc2')(x)
out = Dense(num_classes, activation='softmax', name='output')(x)
custom_vgg_model2 = Model(image_input, out)
custom_vgg_model2.summary()
# freeze all the layers except the dense layers
for layer in custom_vgg_model2.layers[:-3]:
layer.trainable = False
custom_vgg_model2.summary()
custom_vgg_model2.compile(loss='categorical_crossentropy',optimizer='adadelta',metrics=['accuracy'])
t=time.time()
# t = now()
hist = custom_vgg_model2.fit(X_train, y_train, batch_size=32, epochs=12, verbose=1, validation_data=(X_test, y_test))
print('Training time: %s' % (t - time.time()))
(loss, accuracy) = custom_vgg_model2.evaluate(X_test, y_test, batch_size=10, verbose=1)
print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss,accuracy * 100))
#%%
import matplotlib.pyplot as plt
# visualizing losses and accuracy
train_loss=hist.history['loss']
val_loss=hist.history['val_loss']
train_acc=hist.history['acc']
val_acc=hist.history['val_acc']
xc=range(12)
plt.figure(1,figsize=(7,5))
plt.plot(xc,train_loss)
plt.plot(xc,val_loss)
plt.xlabel('num of Epochs')
plt.ylabel('loss')
plt.title('train_loss vs val_loss')
plt.grid(True)
plt.legend(['train','val'])
#print plt.style.available # use bmh, classic,ggplot for big pictures
plt.style.use(['classic'])
plt.figure(2,figsize=(7,5))
plt.plot(xc,train_acc)
plt.plot(xc,val_acc)
plt.xlabel('num of Epochs')
plt.ylabel('accuracy')
plt.title('train_acc vs val_acc')
plt.grid(True)
plt.legend(['train','val'],loc=4)
#print plt.style.available # use bmh, classic,ggplot for big pictures
plt.style.use(['classic'])

Keras Conv1D for Time Series

I am just a novice in area of deep learning.
I made my first basic attempt with Keras Conv1D. Not sure what I did and whether I did it right. My input data is simply total sales by every week (total of 313 weeks), for stores across US and with a time step of 1.
Here is my code:
from pandas import read_csv
import matplotlib.pyplot as plt
import numpy
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
def create_dataset(dataset, look_back=1):
    dataX, dataY = [], []
    for i in range(len(dataset)-look_back):
        a = dataset[i:(i+look_back), 0]
        dataX.append(a)
        dataY.append(dataset[i + look_back, 0])
    return numpy.array(dataX), numpy.array(dataY)
seed = 7
numpy.random.seed(seed)
dataframe = read_csv('D:/MIS793/Dataset/Academic Dataset External 2/Python scripts/totalsale _byweek.csv', usecols=[1], engine='python')
plt.plot(dataframe)
plt.show()
dataset = dataframe.values
dataset = dataset.astype('float32')
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
# reshape into X=t and Y=t+1
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
trainX = trainX.reshape(trainX.shape[0], trainX.shape[1], 1).astype('float32')
testX = testX.reshape(testX.shape[0], testX.shape[1], 1).astype('float32')
model = Sequential()
model.add(Conv1D(filters=10, kernel_size=1, padding='same', strides=1, activation='relu',input_shape=(1,1)))
model.add(MaxPooling1D(pool_size=1))
model.add(Flatten())
model.add(Dense(250, activation='relu'))
model.add(Dense(1, activation='linear'))
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
print(model.summary())
model.fit(trainX, trainY, validation_data=(testX, testY), epochs=10, batch_size=100)
scores = model.evaluate(testX, testY, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
Not sure about few things here:
Reshaping of trainX and testX.
Value of kernel_size and input_shape
My idea here is it's just one vector of sales value. 10 filters, each of size 1 move from one value to another. Input shape is of the format time step, dimensions.
I only got accuracy of 10.91%! So my first question is whether I am feeding in the right parameters.
Thanks
ASC
With model.metrics_names you can get the labels of your scores variable.
In your case it will be ['loss', 'mean_absolute_error'].
So what you are printing is not the accuracy, but the mae, multiplied by 100.
I tried using accuracy instead of mae. However I got accuracy as 0%. Just wondering as this was about predicting numerical values, should I really use accuracy? Here is my latest code.
from pandas import read_csv
import matplotlib.pyplot as plt
import numpy
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
dataframe = read_csv('D:/MIS793/Dataset/Academic Dataset External 2/Python scripts/totalsale _byweek.csv', usecols=[1], engine='python')
plt.plot(dataframe)
plt.show()
dataset = dataframe.values
dataset = dataset.astype('float32')
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
# reshape into X=t and Y=t+1
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
trainX = trainX.reshape(trainX.shape[0], trainX.shape[1],1).astype('float32')
testX = testX.reshape(testX.shape[0], testX.shape[1],1).astype('float32')
model = Sequential()
model.add(Conv1D(filters=20, kernel_size=1, padding='same', strides=1, activation='relu',input_shape=(1,1)))
model.add(MaxPooling1D(pool_size=1))
model.add(Conv1D(filters=10, kernel_size=1, padding='same', strides=1, activation='relu'))
model.add(MaxPooling1D(pool_size=1))
model.add(Flatten())
model.add(Dense(4, activation='relu'))
model.add(Dense(1, activation='linear'))
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
print(model.summary())
model.fit(trainX, trainY, validation_data=(testX, testY), epochs=10, batch_size=100)
scores = model.evaluate(testX, testY, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
OR should I go with MAE?
If I go with MAE, my scores will look like below:
[0.12740663779013364, 0.31208728355111426]
First one is loss and second one is MAE. Isn't that a better metrics in this case?
The final line will be like this:
print("MAE: %.2f%%" % (scores[1]))
Thanks
Anindya

Resources