Related
I am building a CNN for non image data in Keras 2.1.0 on Window 10.
My input feature is a 3x12 matrix of non negative number and my output is a binary multi-label vector with length 6x1
And I was running into this error expected conv2d_14_input to have shape (3, 12, 1) but got array with shape (3, 12, 6500)
Here is my code below
import tensorflow as tf
from scipy.io import loadmat
import numpy as np
from tensorflow.keras.layers import BatchNormalization
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout
from tensorflow.keras.layers import Conv2D, MaxPool2D, Flatten
reshape_channel_train = loadmat('reshape_channel_train')
reshape_channel_test = loadmat('reshape_channel_test.mat')
reshape_label_train = loadmat('reshape_label_train')
reshape_label_test = loadmat('reshape_label_test')
X_train = reshape_channel_train['store_train']
X_test = reshape_channel_test['store_test']
X_train = np.expand_dims(X_train,axis = 0)
X_test = np.expand_dims(X_test, axis = 0)
Y_train = reshape_label_train['label_train']
Y_test = reshape_label_test['label_test']
classifier = Sequential()
classifier.add(Conv2D(8, kernel_size=(3,3) , input_shape=(3, 12, 1), padding="same"))
classifier.add(BatchNormalization())
classifier.add(Activation('relu'))
classifier.add(Conv2D(8, kernel_size=(3,3), input_shape=(3, 12, 1), padding="same"))
classifier.add(BatchNormalization())
classifier.add(Activation('relu'))
classifier.add(Flatten())
classifier.add(Dense(8, activation='relu'))
classifier.add(Dense(6, activation='sigmoid'))
classifier.compile(optimizer='nadam', loss='binary_crossentropy', metrics=['accuracy'])
history = classifier.fit(X_train, Y_train, batch_size = 32, epochs=100,
validation_data=(X_test, Y_test), verbose=2)
After some searching, I have use the dimension expanding trick but it seem not to work
X_train = np.expand_dims(X_train,axis = 0)
X_test = np.expand_dims(X_test, axis = 0)
The X_train variable containing 6500 training instances is loaded from a Matlab .mat file with dimension 3x12x6500.
Where each training instance is a 3x12 matrix.
Before using the expand_dim tricks, the k-th training sample could be invoke by X_train[:,:,k] and X_train[:,:,k].shape would return (3,12). Also X_train.shape would return (3, 12, 6500)
After using the expand_dim tricks the command X_train[:,:,k].shape would return (1, 3, 6500)
Please help me with this !
Thank you
you manage your data wrongly. A Conv2D layer accepts data in this format (n_sample, height, width, channels) which in your case (for your X_train) became (6500,3,12,1). you need to simply reconduct to this case
# create data as in your matlab data
n_class = 6
n_sample = 6500
X_train = np.random.uniform(0,1, (3,12,n_sample)) # (3,12,n_sample)
Y_train = tf.keras.utils.to_categorical(np.random.randint(0,n_class, n_sample)) # (n_sample, n_classes)
# reshape your data for conv2d
X_train = X_train.transpose(2,0,1) # (n_sample,3,12)
X_train = np.expand_dims(X_train, -1) # (n_sample,3,12,1)
classifier = Sequential()
classifier.add(Conv2D(8, kernel_size=(3,3) , input_shape=(3, 12, 1), padding="same"))
classifier.add(BatchNormalization())
classifier.add(Activation('relu'))
classifier.add(Conv2D(8, kernel_size=(3,3), padding="same"))
classifier.add(BatchNormalization())
classifier.add(Activation('relu'))
classifier.add(Flatten())
classifier.add(Dense(8, activation='relu'))
classifier.add(Dense(n_class, activation='softmax'))
classifier.compile(optimizer='nadam', loss='categorical_crossentropy', metrics=['accuracy'])
history = classifier.fit(X_train, Y_train, batch_size = 32, epochs=2, verbose=2)
# get predictions
pred = np.argmax(classifier.predict(X_train), 1)
I also use a softmax activation with categorical_crossentropy which is more suited for multiclass problem but you can also modify this. remember to applicate the same data manipulation also on your test data
you need to pass data_format="channels_last" argument, bcoz your channels are at last
you try this:
x_train=x_train.reshape((6500,3,12,1))
x_test=x_test.reshape((-1,3,12,1))
and in each of conv2d layer conv2D(<other args>, data_format="channels_last")
I'm training a model to produce image masks. This error keeps popping up, and I can not determine the cause. Help would be appreciated.
Error statement:
File "--\Users\-----\Anaconda3\lib\site-packages\keras\initializers.py", line 209, in __call__
scale /= max(1., float(fan_in + fan_out) / 2)
TypeError: only size-1 arrays can be converted to Python scalars
Researching online, this error occurs when normal lists are used with numpy functions, but in my case, the arrays used are numpy arrays. Below, I've attached the code.
import cv2
import glob
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
from keras import backend as K
K.set_image_dim_ordering('tf')
np.random.seed(123) # for reproducibility
image_list = []
test_list = []
for filename in glob.glob("image/*.jpg*"):
im = cv2.imread(filename)
im_r = cv2.resize(im,(200, 200), interpolation = cv2.INTER_AREA)
image_list.append(im_r)
for filename in glob.glob("test/*.png*"):
im = cv2.imread(filename)
im_r = cv2.resize(im,(200, 200), interpolation = cv2.INTER_AREA)
im_r = np.ravel(im_r)
test_list.append(im_r)
x_data = np.array(image_list)
y_data = np.array(test_list)
x_data = x_data.astype("float32")
y_data = y_data.astype("float32")
x_data /= 255
y_data /= 255
X_train = x_data
Y_train = y_data
model = Sequential()
model.add(Convolution2D(32, 5, 5, activation='relu', input_shape=(200, 200, 3)))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Convolution2D(32, 5, 5, activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Convolution2D(32, 3, 3, activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(Y_train[0], activation='sigmoid'))
print('hello')
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
loss = acc = 0
while acc < 0.9999:
model.fit(X_train, Y_train, batch_size=32, nb_epoch=10, verbose=1)
loss, acc = model.evaluate(X_train, Y_train, verbose=1)
model.save("model_state_no_mapping")
The problem is in the last layer of your model.
Change the last layer from
model.add(Dense(Y_train[0], activation='sigmoid'))
to
model.add(Dense(Y_train.shape[0], activation='sigmoid'))
Also, in newer versions of Keras it is recommended to use Conv2D layer instead of old Convolution2D.
Not able to print confusion matrix correctly and also the in printing heatmap the values in some blocks or columns are printing in example 2e+2,e+4 etc etc. Please help me in this
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from sklearn.metrics import classification_report, confusion_matrix
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# Initialising the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Convolution2D(64, 3, 3, input_shape = (64, 64, 3), activation = 'relu'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Adding a second convolutional layer
classifier.add(Convolution2D(64, 3, 3, activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Convolution2D(64, 3, 3, activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Full connection
classifier.add(Dense(output_dim = 128, activation = 'relu'))
classifier.add(Dense(output_dim = 10, activation = 'sigmoid'))
# Compiling the CNN
classifier.compile(optimizer = 'Adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Part 2 - Fitting the CNN to the images
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.4,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
#importing training data
training_set = train_datagen.flow_from_directory('Dataset/train',
target_size = (64,64),
batch_size = 64,
class_mode = 'categorical')
#importing test data
test_set = test_datagen.flow_from_directory('Dataset/test',target_size = (64,64),
batch_size = 64,
class_mode = 'categorical',shuffle=False)
#storing all the history
history = classifier.fit_generator(
training_set,
steps_per_epoch=20,
epochs=5,
validation_data=test_set,
validation_steps=2000)
print(history.history.keys())
#summarize accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
#Confution Matrix
Y_pred = classifier.predict_generator(test_set, steps=len(test_set), max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0)
y_pred = np.argmax(Y_pred, axis=1)
#assigning values
confusion=(confusion_matrix(test_set.classes, y_pred))
confusion_df = pd.DataFrame(confusion,
index = ['Airplan','Car','Birds','Cats','Deer', 'Dogs','Frog', 'Horse','Ship','Truck'],
columns = ['Airplan','Car','Birds','Cats','Deer', 'Dogs','Frog', 'Horse','Ship','Truck'])
#heatmap
sns.heatmap(confusion_df, annot=True)
print(confusion_df)
#classification report
print('Classification Report')
target_names = ['Airplan','Car','Birds','Cats','Deer', 'Dogs','Frog', 'Horse','Ship','Truck']
print(classification_report(test_set.classes, y_pred, target_names=target_names))
Can you please try this.
sns.heatmap(confusion_df, annot=True, fmt='.2f')
Let's assume that your confusion matrix is cm
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
cm = np.array([[345,12],[0,104763]])
plt.figure(figsize=(10,7))
sns.heatmap(cm,annot=True,linewidths=1, fmt = 'd')
plt.xlabel('predicted')
plt.ylabel('Truth')
plt.show()
you'll get the below matrix:
I'm trying to implement a CNN+RNN+LSTM structure(1) with Keras.
And I found a related Keras sample code.
How can I convert the model.fit to model.fit_generator correctly?
Original code:
from keras.models import Sequential
from keras.layers import Activation, MaxPooling2D, Dropout, LSTM, Flatten, Merge, TimeDistributed
import numpy as np
from keras.layers import Concatenate
from keras.layers.convolutional import Conv2D
# Generate fake data
# Assumed to be 1730 grayscale video frames
x_data = np.random.random((1730, 1, 8, 10))
sequence_lengths = None
Izda=Sequential()
Izda.add(TimeDistributed(Conv2D(40,(3,3),padding='same'), input_shape=(sequence_lengths, 1,8,10)))
Izda.add(Activation('relu'))
Izda.add(TimeDistributed(MaxPooling2D(data_format="channels_first", pool_size=(2, 2))))
Izda.add(Dropout(0.2))
Dcha=Sequential()
Dcha.add(TimeDistributed(Conv2D(40,(3,3),padding='same'), input_shape=(sequence_lengths, 1,8,10)))
Dcha.add(Activation('relu'))
Dcha.add(TimeDistributed(MaxPooling2D(data_format="channels_first", pool_size=(2, 2))))
Dcha.add(Dropout(0.2))
Frt=Sequential()
Frt.add(TimeDistributed(Conv2D(40,(3,3),padding='same'), input_shape=(sequence_lengths, 1,8,10)))
Frt.add(Activation('relu'))
Frt.add(TimeDistributed(MaxPooling2D(data_format="channels_first", pool_size=(2, 2))))
Frt.add(Dropout(0.2))
merged=Merge([Izda, Dcha,Frt], mode='concat', concat_axis=2)
#merged=Concatenate()([Izda, Dcha, Frt], axis=2)
# Output from merge is (batch_size, sequence_length, 120, 4, 5)
# We want to get this down to (batch_size, sequence_length, 120*4*5)
model=Sequential()
model.add(merged)
model.add(TimeDistributed(Flatten()))
model.add(LSTM(240, return_sequences=True))
model.compile(loss='mse', optimizer='adam')
model.summary()
After my modification:
from keras.models import Sequential
from keras.layers import Activation, MaxPooling2D, Dropout, LSTM, Flatten, Merge, TimeDistributed
import numpy as np
from keras.layers import Concatenate
from keras.layers.convolutional import Conv2D
# Generate fake data
# Assumed to be 1730 grayscale video frames
x_data = np.random.random((1730, 1, 8, 10))
sequence_lengths = None
def defModel():
model=Sequential()
model.add(TimeDistributed(Conv2D(40,(3,3),padding='same'), input_shape=(sequence_lengths, 1,8,10)))
model.add(Activation('relu'))
model.add(TimeDistributed(MaxPooling2D(data_format="channels_first", pool_size=(2, 2))))
model.add(Dropout(0.2))
model.add(TimeDistributed(Flatten()))
model.add(LSTM(240, return_sequences=True))
model.compile(loss='mse', optimizer='adam')
model.summary()
return model
def gen():
for i in range(1730):
x_train = np.random.random((1, 8, 10))
y_train = np.ones((15, 240))
yield (x_train, y_train)
def main():
model = defModel()
# Slice our long, single sequence up into shorter sequeunces of images
# Let's make 50 examples of 15 frame videos
x_train = []
seq_len = 15
for i in range(50):
x_train.append(x_data[i*5:i*5+seq_len, :, :, :])
x_train = np.asarray(x_train, dtype='float32')
print(x_train.shape)
# >> (50, 15, 1, 8, 10)
model.fit_generator(
generator = gen(),
steps_per_epoch = 1,
epochs = 2)
if __name__ == "__main__":
main()
How can I resolve this error produce from by my modification?
ValueError: Error when checking input: expected
time_distributed_1_input to have 5 dimensions, but got array with
shape (1, 8, 10)
(1) Wang, S., Clark, R., Wen, H., & Trigoni, N. (2017). DeepVO: Towards end-to-end visual odometry with deep Recurrent Convolutional Neural Networks. Proceedings - IEEE International Conference on Robotics and Automation, 2043–2050.
Update: Concatenate CNN and LSTM as sample code
model.add(TimeDistributed(Conv2D(16, (7, 7),padding='same'),input_shape=(None, 540, 960, 1)))
model.add(Activation('relu'))
model.add(TimeDistributed(Conv2D(32, (5, 5),padding='same'))) model.add(Activation('relu'))
model.add(TimeDistributed(Flatten()))
model.add(LSTM(num_classes, return_sequences=True))
Got error
ValueError: Error when checking target: expected lstm_1 to have 3 dimensions, but got array with shape (4, 3)
Update2
The goal is to extract image feature by CNN, then combine 3 feature from 3 images and feed into LSTM.
Goal
#Input image
(540, 960, 1) ==> (x,y,ch) ==> CNN ==> (m,n,k)┐
(540, 960, 1) ==> (x,y,ch) ==> CNN ==> (m,n,k)---> (3, m,n,k) --flatten--> (3, mnk)
(540, 960, 1) ==> (x,y,ch) ==> CNN ==> (m,n,k)」
(3, mnk) => LSTM => predict three regression value
Model
model = Sequential()
model.add(TimeDistributed(Conv2D(16, (7, 7), padding='same'),input_shape=(None, 540, 960, 1)))
model.add(Activation('relu'))
model.add(TimeDistributed(Conv2D(32, (5, 5), padding='same')))
model.add(Activation('relu'))
model.add(TimeDistributed(Flatten()))
model.add(LSTM(num_classes, return_sequences=True))
model.compile(loss='mean_squared_error', optimizer='adam')
The generator
a = readIMG(filenames[start]) # (540, 960, 1)
b = readIMG(filenames[start + 1]) # (540, 960, 1)
c = readIMG(filenames[start + 2]) # (540, 960, 1)
x_train = np.array([[a, b, c]]) # (1, 3, 540, 960, 1)
Then I still got the error:
ValueError: Error when checking target: expected lstm_1 to have 3 dimensions, but got array with shape (1, 3)
The problem is a plain shape mismatch problem.
You defined input_shape=(sequence_lengths, 1,8,10), so your model is expecting five dimensions as input: (batch_size, sequence_lengths, 1, 8, 10)
All you need is to make your generator output the correct shapes with 5 dimensions.
def gen():
x_data = np.random.random((numberOfVideos, videoLength, 1, 8, 10))
y_data = np.ones((numberOfVideos, videoLength, 240))
for video in range(numberOfVideos):
x_train = x_data[video:video+1]
y_train = y_data[video:video+1]
yield (x_train, y_train)
Here is the working example of CNNLSTM using generator: https://gist.github.com/HTLife/25c0cd362faa91477b8f28f6033adb45
I am trying to fit an LSTM network to a sin function. Currently, as far as I understand Keras, my code does only predict the next value. According to this link: Many to one and many to many LSTM examples in Keras it is a many to one model. However, my goal is to implement a Many-to-many model. Basically, I want to be able to predict let's say 10 values, to a given time. When I am trying to use
return_sequences=True (see line model.add(..)), which is supposed to be the solution, the following error occurs:
ValueError: Error when checking target: expected lstm_8 to have 3 dimensions, but got array with shape (689, 1)
Unfortunately, I have absolutely no clue why this happens. Is there a general rule how the input shape needs to be when using return_sequences=True ? Furthermore what exactly would I need to change? Thanks for any help.
import pandas
import numpy as np
import matplotlib.pylab as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import sklearn
from keras.models import Sequential
from keras.layers import Activation, LSTM
from keras import optimizers
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
#generate sin function with noise
x = np.arange(0, 100, 0.1)
noise = np.random.uniform(-0.1, 0.1, size=(1000,))
Y = np.sin(x) + noise
# Perform feature scaling
scaler = MinMaxScaler()
Y = scaler.fit_transform(Y.reshape(-1, 1))
# split in train and test
train_size = int(len(Y) * 0.7)
test_size = len(Y) - train_size
train, test = Y[0:train_size,:], Y[train_size:len(Y),:]
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
# reshape into X=t and Y=t+1
look_back = 10
X_train, y_train = create_dataset(train, look_back)
X_test, y_test = create_dataset(test, look_back)
# LSTM network expects the input data in form of [samples, time steps, features]
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
np.set_printoptions(threshold=np.inf)
# compile model
model = Sequential()
model.add(LSTM(1, input_shape=(look_back, 1)))#, return_sequences=True)) <== uncomment this
model.compile(loss='mean_squared_error', optimizer='adam')
SVG(model_to_dot(model).create(prog='dot', format='svg'))
model.fit(X_train, y_train, validation_data=(X_test, y_test),
batch_size=10, epochs=10, verbose=2)
prediction = model.predict(X_test, batch_size=1, verbose=0)
prediction.reshape(-1)
#Transform back to original representation
Y = scaler.inverse_transform(Y)
prediction = scaler.inverse_transform(prediction)
plt.plot(np.arange(0,Y.shape[0]), Y)
plt.plot(np.arange(Y.shape[0] - X_test.shape[0] , Y.shape[0]), prediction, 'red')
plt.show()
error = mean_squared_error(y_test, prediction)
print(error)
The problem is not the input, but the output.
The error says: "Error when checking target", target = y_train and y_test.
Because your lstm returns a sequence (return_sequences=True) the output dimention will be: (n_batch,lookback,1).
You can verify it by using model.summary()
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
lstm_1 (LSTM) (None, 10, 1) 12
=================================================================
Total params: 12
Trainable params: 12
Non-trainable params: 0
_________________________________________________________________
You will need to change your create_dataset function so each ground truth will be shaped (lookback,1).
Something you might want to do:
for each seqeuence x in the train set,its y will be the next proceedings sequence.
For example, lets say we would like to learn something easier, the seqeuence will be the previous number plus 1 --> 1,2,3,4,5,6,7,8,9,10.
For loockback=4:
X_train[0] = 1,2,3,4
y_train[0] will be: 2,3,4,5
X_train[1] = 2,3,4,5
y_train[1] will be: 3,4,5,6
and so on...
I have simulated the data as #DvirSamuel suggested, and provided code for a LSTM and FNN. Note that for the LSTM, network_lstm.add(layers.Dense(1, activation = None)) is required if return_sequences = True is included in the previous layer.
## Simulate data.
np.random.seed(20180826)
Z = np.random.randint(0, 10, size = (11000, 1))
for i in range(10):
Z = np.concatenate((Z, (Z[:, -1].reshape(Z.shape[0], 1) + 1)), axis = 1)
X = Z[:, :-1]
Y = Z[:, 1:]
print(X.shape)
print(Y.shape)
## Training and validation data.
split = 10000
X_train = X[:split, :]
X_valid = X[split:, :]
Y_train = Y[:split, :]
Y_valid = Y[split:, :]
print(X_train.shape)
print(Y_train.shape)
print(X_valid.shape)
print(Y_valid.shape)
Code for a LSTM model:
## LSTM model.
X_lstm_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)
X_lstm_valid = X_valid.reshape(X_valid.shape[0], X_valid.shape[1], 1)
Y_lstm_train = Y_train.reshape(Y_train.shape[0], Y_train.shape[1], 1)
Y_lstm_valid = Y_valid.reshape(Y_valid.shape[0], Y_valid.shape[1], 1)
# Define model.
network_lstm = models.Sequential()
network_lstm.add(layers.LSTM(64, activation = 'relu', input_shape = (X_lstm_train.shape[1], 1),
return_sequences = True))
network_lstm.add(layers.Dense(1, activation = None))
network_lstm.summary()
# Compile model.
network_lstm.compile(optimizer = 'rmsprop', loss = 'mean_squared_error')
# Fit model.
history_lstm = network_lstm.fit(X_lstm_train, Y_lstm_train, epochs = 5, batch_size = 32, verbose = True,
validation_data = (X_lstm_valid, Y_lstm_valid))
## Extract loss over epochs and predict.
# Extract loss.
loss_lstm = history_lstm.history['loss']
val_loss_lstm = history_lstm.history['val_loss']
epochs_lstm = range(1, len(loss_lstm) + 1)
plt.plot(epochs_lstm, loss_lstm, 'black', label = 'Training Loss')
plt.plot(epochs_lstm, val_loss_lstm, 'red', label = 'Validation Loss')
plt.title('LSTM: Training and Validation Loss')
plt.legend()
plt.title('First in Sequence')
plt.scatter(Y_train[:, 0], network_lstm.predict(X_lstm_train)[:, 0], alpha = 0.1)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.show()
plt.scatter(Y_valid[:, 0], network_lstm.predict(X_lstm_valid)[:, 0], alpha = 0.1)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.show()
plt.title('Last in Sequence')
plt.scatter(Y_train[:, -1], network_lstm.predict(X_lstm_train)[:, -1], alpha = 0.1)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.show()
plt.scatter(Y_valid[:, -1], network_lstm.predict(X_lstm_valid)[:, -1], alpha = 0.1)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.show()
Code for a FNN model:
## FNN model.
# Define model.
network_fnn = models.Sequential()
network_fnn.add(layers.Dense(64, activation = 'relu', input_shape = (X_train.shape[1],)))
network_fnn.add(Dense(10, activation = None))
network_fnn.summary()
# Compile model.
network_fnn.compile(optimizer = 'rmsprop', loss = 'mean_squared_error')
# Fit model.
history_fnn = network_fnn.fit(X_train, Y_train, epochs = 5, batch_size = 32, verbose = True,
validation_data = (X_valid, Y_valid))
## Extract loss over epochs.
# Extract loss.
loss_fnn = history_fnn.history['loss']
val_loss_fnn = history_fnn.history['val_loss']
epochs_fnn = range(1, len(loss_fnn) + 1)
plt.plot(epochs_fnn, loss_fnn, 'black', label = 'Training Loss')
plt.plot(epochs_fnn, val_loss_fnn, 'red', label = 'Validation Loss')
plt.title('FNN: Training and Validation Loss')
plt.legend()
plt.title('First in Sequence')
plt.scatter(Y_train[:, 1], network_fnn.predict(X_train)[:, 1], alpha = 0.1)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.show()
plt.scatter(Y_valid[:, 1], network_fnn.predict(X_valid)[:, 1], alpha = 0.1)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.show()
plt.title('Last in Sequence')
plt.scatter(Y_train[:, -1], network_fnn.predict(X_train)[:, -1], alpha = 0.1)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.show()
plt.scatter(Y_valid[:, -1], network_fnn.predict(X_valid)[:, -1], alpha = 0.1)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.show()
Shouldn't this:
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
be like this:
X_train = np.reshape((X_train.shape[0], X_train.shape[1], 1))
X_test = np.reshape((X_test.shape[0], X_test.shape[1], 1))
Can this be your problem? (1 years after xD)