I have built a CNN classifier using Keras, where:
Training dataset: 3194
Validation dataset: 1020
Test Dataset: 1023
I used the below code to build and evaluate the classifier, using training and validation datasets.
from keras.models import Sequential
from keras.layers import Conv2D,Activation,MaxPooling2D,Dense,Flatten,Dropout
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from IPython.display import display
import matplotlib.pyplot as plt
from PIL import Image
from sklearn.metrics import classification_report, confusion_matrix
import keras
from keras.layers import BatchNormalization
from keras.optimizers import Adam
classifier = Sequential()
classifier.add(Conv2D(16,(3,3),input_shape=(200,200,3)))
classifier.add(Activation('relu'))
classifier.add(Flatten())
classifier.add(Dense(128))
classifier.add(Activation('relu'))
classifier.add(Dropout(0.5))
classifier.add(Dense(2))
classifier.add(Activation('softmax'))
classifier.summary()
classifier.compile(optimizer =keras.optimizers.Adam(lr=0.1),
loss ='categorical_crossentropy',
metrics =['accuracy'])
train_datagen = ImageDataGenerator(rescale =1./255,
shear_range =0.2,
zoom_range = 0.2,
horizontal_flip =True)
test_datagen = ImageDataGenerator(rescale = 1./255)
batchsize=10
training_set = train_datagen.flow_from_directory('/home/osboxes/Downloads/Downloads/Journal_Paper/Benign_Malicious/Spectrogram/Train/',
target_size=(200,200),
batch_size= batchsize,
class_mode='categorical')
test_set = test_datagen.flow_from_directory('/home/osboxes/Downloads/Downloads/Journal_Paper/Benign_Malicious/Spectrogram/Validate/',
target_size = (200,200),
batch_size = batchsize,
shuffle=False,
class_mode ='categorical')
history=classifier.fit_generator(training_set,
steps_per_epoch = 3194 // batchsize,
epochs = 100,
validation_data =test_set,
validation_steps = 1020 // batchsize)
Y_pred = classifier.predict_generator(test_set, steps= 1020 // batchsize)
y_pred = np.argmax(Y_pred, axis=1)
print('Confusion Matrix')
print(confusion_matrix(test_set.classes, y_pred))
print('Classification Report')
target_names = test_set.classes
class_labels = list(test_set.class_indices.keys())
target_names = ['Bening','Malicious']
report = classification_report(test_set.classes, y_pred, target_names=class_labels)
print(report)
# summarize history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy 16 with dropout rl .1')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss 16 with dropout rl .1')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
classifier.save('16_With_Dropout_rl_001.h5')
After saving the model, I loaded the model using below code. Also, I am trying to evaluate the model with test dataset (that has not been seen by the classifier during the building and evaluation). However, the below code gives me the error (ValueError: Found input variables with inconsistent numbers of samples: [1023, 1020]). Any ideas why?
from keras.models import Sequential
from keras.layers import Conv2D,Activation,MaxPooling2D,Dense,Flatten,Dropout
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from IPython.display import display
import matplotlib.pyplot as plt
from PIL import Image
from keras.models import load_model
from sklearn.metrics import classification_report, confusion_matrix
from keras.models import load_model
import pickle
classifier = load_model('16_With_Dropout_rl_001.h5')
batchsize=10
test_datagen = ImageDataGenerator(rescale = 1./255)
test_set = test_datagen.flow_from_directory('/home/osboxes/Downloads/Downloads/Journal_Paper/Benign_Malicious/Spectrogram/Test/',
target_size = (200,200),
batch_size = batchsize,
shuffle=False,
class_mode ='categorical')
Y_pred = classifier.predict_generator(test_set, steps= 1023 // batchsize)
y_pred = np.argmax(Y_pred, axis=1)
print('Confusion Matrix')
print(confusion_matrix(test_set.classes, y_pred))
print('Classification Report')
target_names = test_set.classes
class_labels = list(test_set.class_indices.keys())
target_names = ['Bening', 'Malicious']
report = classification_report(test_set.classes, y_pred, target_names=class_labels)
print(report)
This is because of the number of steps in predict_generator(); 1023//10 yields 102 batches; Each batch of size 10 yields 1020 samples;
Thus, you have to change the line to :
Y_pred = classifier.predict_generator(test_set, steps= (1023 // batchsize)+1)
Related
I have trained my model(binary classification) of CNN using Keras and now I want to use an SVM classifier instead of using a fully connected layer for classification.
I used a VGG16 pre-trained network for feature extraction and I also used Data augmentation.
What is the possible way to add SVM as the last layer for classification?
#Parametres
import keras
from keras.applications import VGG16
import sys
from PIL import Image
#Using VGG16 Pre-trained Model
conv_base = VGG16(weights = 'imagenet',
include_top = False,
input_shape=(224, 224, 3))
conv_base.summary()
import numpy as np
import os
from keras.preprocessing.image import ImageDataGenerator
base_dir = 'C:Covid Detection/Code/Dataset-created')
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
from keras import models
from keras import layers
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
conv_base.trainable = False
from keras import optimizers
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(224, 224),
batch_size=20,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(224, 224),
batch_size=20,
class_mode='binary')
# Compile the model
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=2e-5),
metrics=['acc'])
# Train the model
history = model.fit_generator(
train_generator,
steps_per_epoch=50,
epochs=30,
validation_data=validation_generator,
validation_steps=50)
# Save the model
model.save('vgg16_aug.h5')
from keras.applications.vgg16 import VGG16
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
base_model = VGG16(weights='imagenet')
model = Model(inputs=base_model.input,
outputs=base_model.get_layer('flatten').output)
model.summary()
Create a function to extract features using VGG16
def get_features(img_path):
img = load_img(img_path, target_size=(224, 224))
x = img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
flatten = model.predict(x)
return list(flatten[0])
features, labels = [], []
# Loop into the directory of images and extract features and labels
for image_path in folder:
features.append(extract_features(image_path))
labels.append("#some label")
Till here we extracted the features and labels, Now we train these features using SVM as follows:-
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
from sklearn.metrics import accuracy_score
X_train, X_test, y_train, y_test = train_test_split( features,
labels,
test_size=0.30)
clf = LinearSVC(random_state=0, tol=1e-5)
clf.fit(X_train, y_train)
predicted = clf.predict(X_test)
# get the accuracy
print(accuracy_score(y_test, predicted))
I trained ResNet-50 model to classify images from 6 classes (my own dataset) and saved it. But the model did not learn properly and predictions are incorrect. What would be the reason for this poor learning?
Here is my code, and the output plots using Keras and TensorFlow backend. How can I solve this?
from keras.applications.resnet50 import ResNet50, preprocess_input
from keras.layers import Dense, Dropout
from keras.models import Model
from keras.optimizers import Adam, SGD
from keras.preprocessing.image import ImageDataGenerator, image
from keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGE = True
# Define some constant needed throughout the script
N_CLASSES = 6
EPOCHS = 20
PATIENCE = 5
TRAIN_PATH= '/Train/'
VALID_PATH = '/Test/'
MODEL_CHECK_WEIGHT_NAME = 'resnet_monki_v1_chk.h5'
# Define model to be used we freeze the pre trained resnet model weight, and add few layer on top of it to utilize our custom dataset
K.set_learning_phase(0)
model = ResNet50(input_shape=(224,224,3),include_top=False, weights='imagenet', pooling='avg')
K.set_learning_phase(1)
x = model.output
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
output = Dense(N_CLASSES, activation='softmax', name='custom_output')(x)
custom_resnet = Model(inputs=model.input, outputs = output)
for layer in model.layers:
layer.trainable = False
custom_resnet.compile(Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
custom_resnet.summary()
# 4. Load dataset to be used
datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
traingen = datagen.flow_from_directory(TRAIN_PATH, target_size=(224,224), batch_size=32, class_mode='categorical')
validgen = datagen.flow_from_directory(VALID_PATH, target_size=(224,224), batch_size=32, class_mode='categorical', shuffle=False)
# 5. Train Model we use ModelCheckpoint to save the best model based on validation accuracy
es_callback = EarlyStopping(monitor='val_acc', patience=PATIENCE, mode='max')
mc_callback = ModelCheckpoint(filepath=MODEL_CHECK_WEIGHT_NAME, monitor='val_acc', save_best_only=True, mode='max')
train_history = custom_resnet.fit_generator(traingen, steps_per_epoch=len(traingen), epochs= EPOCHS, validation_data=traingen, validation_steps=len(validgen), verbose=2, callbacks=[es_callback, mc_callback])
custom_resnet.save('custom_resnet.h5')
Here are the plots, I had to put the links, the site does not let me put a pic
enter image description here
I loaded Fahion_Mnist dataset through "fashion_mnist.load_data()" and I tried to train a ResNet50 neural network. But I don't know how reshape dataset image from (28,28,1) to (224,224,3), as needed as input in ResNet.
I am using Python 3, Keras 2.2.4
This is my code:
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow import keras# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
import time
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.layers import Flatten, Dense, Dropout
from tensorflow.python.keras.applications.resnet50 import ResNet50, preprocess_input
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.preprocessing import image
from PIL import Image
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat','Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
IMAGE_SIZE = (224,224)
NUM_CLASSES = 10
BATCH_SIZE = 8 # try reducing batch size or freeze more layers if your GPU runs out of memory
FREEZE_LAYERS = 2 # freeze the first this many layers for training
NUM_EPOCHS = 20
WEIGHTS_FINAL = 'model_fashion_resnet.h5'
train_images = preprocess_input(train_images)
train_images = np.expand_dims(train_images, axis=0)
train_labels = preprocess_input(train_labels)
train_labels = np.expand_dims(train_labels, axis=0)
test_images = preprocess_input(test_images)
test_images = np.expand_dims(test_images, axis=0)
net = ResNet50(include_top=False, weights='imagenet', input_tensor=None,
input_shape=(IMAGE_SIZE[0],IMAGE_SIZE[1],3))
x = net.output
x = Flatten()(x)
x = Dropout(0.5)(x)
output_layer = Dense(NUM_CLASSES, activation='softmax', name='softmax')(x)
model = Model(inputs=net.input, outputs=output_layer)
for layer in model.layers[:FREEZE_LAYERS]:
layer.trainable = False
for layer in model.layers[FREEZE_LAYERS:]:
layer.trainable = True
model.compile(optimizer=Adam(lr=1e-5), loss='categorical_crossentropy', metrics=['accuracy'])
print(model.summary())
inizio=time.time()
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
model.fit_generator(datagen.flow(train_images, train_labels, batch_size=BATCH_SIZE),
steps_per_epoch=len(train_images) / BATCH_SIZE, epochs=NUM_EPOCHS)
And this is what I receive after run:
ValueError: Error when checking input: expected input_1 to have shape (224, 224, 3) but got array with shape (60000, 28, 28)
How to change MNIST images so that they can input in the ResNet50 neural network?
I'm trying to save the following file but not sure how. I've tried placing with tf.Session as sess just prior to training my model history = model.fit_generator... but was receiving ValueError: No variables to save. Then I tried placing with tf.Session... above my model initialisation at model = Sequential(). I'm new to Tensorflow so I'm just trying to learn the ropes.
Any guidance would be great, thanks!
import numpy as np
import keras
from keras import backend as K
from keras.models import Sequential
from keras.layers import Activation, Dropout, Input
from keras.layers.core import Dense, Flatten
from keras.optimizers import Adam, Adadelta, SGD
from keras.metrics import categorical_crossentropy
from keras.preprocessing.image import ImageDataGenerator
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import *
from sklearn.metrics import confusion_matrix
from keras.models import Model
from keras.utils import np_utils
import itertools
import matplotlib.pyplot as plt
import livelossplot
#%matplotlib inline
#plot_losses = livelossplot.PlotLossesKeras()
PATH = './Food-5K/'
train_path = '%straining/' %PATH
valid_path = '%svalidation/' %PATH
test_path = '%sevaluation/' %PATH
classes = ('food', 'non-food')
print (train_path)
batch_size = 16
epochs = 20
nb_train_samples = 3001
nb_validation_samples = 1000
train_batches = ImageDataGenerator(rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True).flow_from_directory(train_path, target_size=(224,224),
batch_size=32, class_mode='binary')
valid_batches = ImageDataGenerator(rescale = 1./255 ).flow_from_directory(valid_path, target_size=(224,224),
batch_size=batch_size, class_mode='binary')
test_batches = ImageDataGenerator(rescale = 1./255).flow_from_directory(test_path, target_size=(224,224),
batch_size=batch_size, class_mode='binary')
print(type(train_batches[0]))
x_train, y_train = train_batches[0]
x_test, y_test = valid_batches[0]
print('x_train.shape: ' + str(x_train.shape))
print('y_train.shape: ' + str(y_train.shape))
print('y_train.shape: ' + str(y_train.reshape(y_train.shape + (1,)).shape))
print('x_test.shape: ' + str(x_test.shape))
print('y_test.shape: ' + str(y_test.shape))
print('y_test.shape: ' + str(y_test.reshape(y_test.shape + (1,)).shape))
X_train_flatten = x_train.reshape(x_train.shape[0], -1).T
X_test_flatten = x_test.reshape(x_test.shape[0], -1).T
y_train_flatten = y_train.T
y_test_flatten = y_test.T
print('X_train_flatten.shape: ' + str(X_train_flatten.T.shape))
print('y_train_flatten.shape: ' + str(y_train_flatten.shape))
#print('y_train_flatten.shape: ' + str(np.squeeze(y_train_flatten, axis=(2,)).shape))
print('X_test_flatten.shape: ' + str(X_test_flatten.T.shape))
print('y_test_flatten.shape: ' + str(y_test_flatten.shape))
#print('y_test_flatten.shape: ' + str(np.squeeze(y_test_flatten, axis=(2,)).shape))
train_set_x = X_train_flatten/255.
test_set_x = X_test_flatten/255.
print('len(train_set_x): ' + str(train_set_x.shape))
print('len(test_set_x): ' + str(test_set_x.shape))
print(y_train.shape)
# plots images with labels within jupyter notebook
def plots(ims, figsize=(80,60), rows=1, interp=False, titles=None):
if type(ims[0]) is np.ndarray:
#print(ims[0])
#ims = np.array(ims).astype(np.uint8)
#print(ims)
if (ims.shape[-1] != 3):
ims = ims.transpose((1,2,3,1))
f = plt.figure(figsize=figsize)
cols = len(ims)//rows if len(ims) % 2 == 0 else len(ims)//rows + 1
for i in range(len(ims)):
sp = f.add_subplot(rows, cols, i+1)
sp.axis('Off')
if titles is not None:
sp.set_title(titles[i], fontsize=15)
plt.imshow(ims[i], interpolation=None if interp else 'none')
imgs, labels = next(train_batches)
plots(imgs, titles=labels)
# Deep Multilayer Perceptron model
model = Sequential()
# Set the initial random weights > kernel_initializer
model.add(Flatten(input_shape=(224, 224, 3)))
model.add(Dense(200, input_dim=150528, kernel_initializer='normal'))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(100, input_dim=200, kernel_initializer='normal'))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(10, input_dim=100, kernel_initializer='normal'))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1, input_dim=10, kernel_initializer='normal'))
model.add(Activation('sigmoid'))
# Rho > is a hyper-parameter which attenuates the influence of past gradient.
model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=['accuracy'])
model.summary()
# Train
history = model.fit_generator(train_batches, steps_per_epoch=32, #steps_per_epoch=nb_train_samples,
#callbacks=[plot_losses],
validation_steps = 16,
validation_data=train_batches, epochs=epochs, verbose=1)
# Evaluate
x_test, y_test = valid_batches[0]
evaluation = model.evaluate(x_test, y_test, batch_size=batch_size, verbose=1)
print('Summary: Loss over the test dataset: %.2f, Accuracy: %.2f' % (evaluation[0], evaluation[1]))
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
From Keras documentation:
from keras.models import load_model
model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'
del model # deletes the existing model
# returns a compiled model
# identical to the previous one
model = load_model('my_model.h5')
I am just a novice in area of deep learning.
I made my first basic attempt with Keras Conv1D. Not sure what I did and whether I did it right. My input data is simply total sales by every week (total of 313 weeks), for stores across US and with a time step of 1.
Here is my code:
from pandas import read_csv
import matplotlib.pyplot as plt
import numpy
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
seed = 7
numpy.random.seed(seed)
dataframe = read_csv('D:/MIS793/Dataset/Academic Dataset External 2/Python scripts/totalsale _byweek.csv', usecols=[1], engine='python')
plt.plot(dataframe)
plt.show()
dataset = dataframe.values
dataset = dataset.astype('float32')
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
# reshape into X=t and Y=t+1
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
trainX = trainX.reshape(trainX.shape[0], trainX.shape[1], 1).astype('float32')
testX = testX.reshape(testX.shape[0], testX.shape[1], 1).astype('float32')
model = Sequential()
model.add(Conv1D(filters=10, kernel_size=1, padding='same', strides=1, activation='relu',input_shape=(1,1)))
model.add(MaxPooling1D(pool_size=1))
model.add(Flatten())
model.add(Dense(250, activation='relu'))
model.add(Dense(1, activation='linear'))
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
print(model.summary())
model.fit(trainX, trainY, validation_data=(testX, testY), epochs=10, batch_size=100)
scores = model.evaluate(testX, testY, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
Not sure about few things here:
Reshaping of trainX and testX.
Value of kernel_size and input_shape
My idea here is it's just one vector of sales value. 10 filters, each of size 1 move from one value to another. Input shape is of the format time step, dimensions.
I only got accuracy of 10.91%! So my first question is whether I am feeding in the right parameters.
Thanks
ASC
With model.metrics_names you can get the labels of your scores variable.
In your case it will be ['loss', 'mean_absolute_error'].
So what you are printing is not the accuracy, but the mae, multiplied by 100.
I tried using accuracy instead of mae. However I got accuracy as 0%. Just wondering as this was about predicting numerical values, should I really use accuracy? Here is my latest code.
from pandas import read_csv
import matplotlib.pyplot as plt
import numpy
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
dataframe = read_csv('D:/MIS793/Dataset/Academic Dataset External 2/Python scripts/totalsale _byweek.csv', usecols=[1], engine='python')
plt.plot(dataframe)
plt.show()
dataset = dataframe.values
dataset = dataset.astype('float32')
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
# reshape into X=t and Y=t+1
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
trainX = trainX.reshape(trainX.shape[0], trainX.shape[1],1).astype('float32')
testX = testX.reshape(testX.shape[0], testX.shape[1],1).astype('float32')
model = Sequential()
model.add(Conv1D(filters=20, kernel_size=1, padding='same', strides=1, activation='relu',input_shape=(1,1)))
model.add(MaxPooling1D(pool_size=1))
model.add(Conv1D(filters=10, kernel_size=1, padding='same', strides=1, activation='relu'))
model.add(MaxPooling1D(pool_size=1))
model.add(Flatten())
model.add(Dense(4, activation='relu'))
model.add(Dense(1, activation='linear'))
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
print(model.summary())
model.fit(trainX, trainY, validation_data=(testX, testY), epochs=10, batch_size=100)
scores = model.evaluate(testX, testY, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
OR should I go with MAE?
If I go with MAE, my scores will look like below:
[0.12740663779013364, 0.31208728355111426]
First one is loss and second one is MAE. Isn't that a better metrics in this case?
The final line will be like this:
print("MAE: %.2f%%" % (scores[1]))
Thanks
Anindya