Keras LSTM to Pytorch - pytorch

I am using the following code to apply sequential LSTM to time-series data with one value. It works fine with a Keras version. I am wondering how could I do the same using PyTorch?
import tensorflow
from tensorflow.keras import optimizers
from tensorflow.keras import losses
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Input, Dropout, Embedding, LSTM
from tensorflow.keras.optimizers import RMSprop, Adam, Nadam
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.callbacks import TensorBoard
# training_dataset.shape = (303, 24, 1)
time_steps = 24
metric = 'mean_absolute_error'
model = Sequential()
model.add(LSTM(units=32, activation='tanh', input_shape=(time_steps, 1), return_sequences=True))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='mean_absolute_error', metrics=[metric])
print(model.summary())
batch_size=32
epochs=20
model.fit(x=training_dataset, y=training_dataset,
batch_size=batch_size, epochs=epochs,
verbose=1, validation_data=(training_dataset, training_dataset),
callbacks=[TensorBoard(log_dir='../logs/{0}'.format(tensorlog))])
testing_pred = model.predict(x=testing_dataset)

You can check the pytorch documentation for that: https://pytorch.org/docs/master/generated/torch.nn.LSTM.html
the simplest code is the following:
import torch, torch.nn as nn, torch.optim.Adam as Adam
model = nn.Sequential(nn.LSTM(input_size=1, hidden_size=32, output_size=1), nn.Sigmoid)
opt = Adam(model.parameters())
loss_func = nn.MSELoss()
for (x, y) in dataloader:
opt.zero_grad()
pred = model(x)
loss = loss_func(y, pred)
loss.backward()
opt.step()

Related

Grid-Search Voting Classifier containing a Keras model

I am trying to train a VotingClassifier containing a Keras model using GridSearchCV.
Here is the code:
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import adam
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.ensemble import VotingClassifier
from sklearn import datasets
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
# pretend data
X, y = datasets.make_classification(n_samples=100, n_features=20)
scaler = StandardScaler()
# create model
def create_model():
model = Sequential()
model.add(Dense(20, kernel_initializer="uniform", activation='relu', input_shape=(20,)))
model.add(Dense(30, kernel_initializer="uniform", activation='relu'))
model.add(Dense(10, kernel_initializer="uniform", activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Compile model
optimizer = adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=False)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
keras_model = KerasClassifier(build_fn=create_model)
keras_model._estimator_type = "classifier"
eclf = VotingClassifier(
estimators=[('svc',SVC(probability=True)), ('keras_model', keras_model)]
, voting='soft')
# Test - fit the viting classifier without grid search
eclf.fit(X, y)
print('The VotingClassifier can be fit outside of gridsearch\n')
# parameters to grid search
params = [{'svc__C':[0.01,0.1]}, ]
grid = GridSearchCV(eclf,params,cv=2,scoring='accuracy', verbose=1)
grid.fit(X,y)
I get the following error:
ValueError: The estimator KerasClassifier should be a classifier.
When I train the VotingClassifier outside of GridSearchCV no error occurs, however when I train it within GridSearchCV, I get the error message. This other question, VotingClassifier with pipelines as estimators, has the same error (without using GridSearch) and was fixed by a line asserting that the keras model is a classifier which i have also included:
keras_model._estimator_type = "classifier"
This did not fix the problem here.
Any suggestions?

Transferlearning ResNet Model does not learn

I trained ResNet-50 model to classify images from 6 classes (my own dataset) and saved it. But the model did not learn properly and predictions are incorrect. What would be the reason for this poor learning?
Here is my code, and the output plots using Keras and TensorFlow backend. How can I solve this?
from keras.applications.resnet50 import ResNet50, preprocess_input
from keras.layers import Dense, Dropout
from keras.models import Model
from keras.optimizers import Adam, SGD
from keras.preprocessing.image import ImageDataGenerator, image
from keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGE = True
# Define some constant needed throughout the script
N_CLASSES = 6
EPOCHS = 20
PATIENCE = 5
TRAIN_PATH= '/Train/'
VALID_PATH = '/Test/'
MODEL_CHECK_WEIGHT_NAME = 'resnet_monki_v1_chk.h5'
# Define model to be used we freeze the pre trained resnet model weight, and add few layer on top of it to utilize our custom dataset
K.set_learning_phase(0)
model = ResNet50(input_shape=(224,224,3),include_top=False, weights='imagenet', pooling='avg')
K.set_learning_phase(1)
x = model.output
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
output = Dense(N_CLASSES, activation='softmax', name='custom_output')(x)
custom_resnet = Model(inputs=model.input, outputs = output)
for layer in model.layers:
layer.trainable = False
custom_resnet.compile(Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
custom_resnet.summary()
# 4. Load dataset to be used
datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
traingen = datagen.flow_from_directory(TRAIN_PATH, target_size=(224,224), batch_size=32, class_mode='categorical')
validgen = datagen.flow_from_directory(VALID_PATH, target_size=(224,224), batch_size=32, class_mode='categorical', shuffle=False)
# 5. Train Model we use ModelCheckpoint to save the best model based on validation accuracy
es_callback = EarlyStopping(monitor='val_acc', patience=PATIENCE, mode='max')
mc_callback = ModelCheckpoint(filepath=MODEL_CHECK_WEIGHT_NAME, monitor='val_acc', save_best_only=True, mode='max')
train_history = custom_resnet.fit_generator(traingen, steps_per_epoch=len(traingen), epochs= EPOCHS, validation_data=traingen, validation_steps=len(validgen), verbose=2, callbacks=[es_callback, mc_callback])
custom_resnet.save('custom_resnet.h5')
Here are the plots, I had to put the links, the site does not let me put a pic
enter image description here

cross_val_score's n_jobs = -1 argument not working in python 3.6

i was trying to improve the accuracy and evaluate my Artificial Neural Network, but i encountered an issue that n_jobs = -1 of cross_val_score was not working,
i am using tensorflow on my cpu and my error was:-
BrokenProcessPool: A task has failed to un-serialize. Please ensure
that the arguments of the function are all picklable.
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
def build_classifier():
classifier = Sequential()
classifier.add(Dense(6, kernel_initializer='uniform', activation='relu', input_dim=11))
classifier.add(Dense(6, kernel_initializer='uniform', activation='relu'))
classifier.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))
classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
return classifier
classifier = KerasClassifier(build_fn = build_classifier, batch_size = 10, nb_epoch = 100)
accuracies = cross_val_score(estimator= classifier, X= x_train, y= y_train, cv = 10, n_jobs= 1)

How can I get the score I wanted by using KerasRegressor and sklearn pipeline?

I want to insert Keras model into scikit-learn pipeline, but when I use pipeline.score, I am comfused. Here is the code:
from keras import models
from keras import layers
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
def build_model():
model = models.Sequential()
model.add(
layers.Dense(
64, activation='relu', input_shape=(train_data.shape[1], )))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
model = KerasRegressor(
build_fn=build_model, epochs=90, batch_size=1, verbose=0)
pipe_network = Pipeline([('scl', StandardScaler()), ('clf', model)])
pipe_network.fit(train_data, train_targets)
The model score is:
pipe_network.score(test_data, test_targets)
>>> -12.813292971994802
What's the score is? I want to get the result like the output of evaluate function, How can I do?
stdsc = StandardScaler()
train_data_std = stdsc.fit_transform(train_data)
test_data_std = stdsc.transform(test_data)
network = build_model()
network.fit(train_data_std, train_targets, epochs=90, batch_size=1, verbose=0)
network.evaluate(test_data_std, test_targets)
>>> [12.681396334779029, 2.479423579047708]
Thank you for your attention.

Python- LSTM Based RNN error in input?

i am trying to build a deep learning network based on LSTM RNN
here is what is tried
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import LSTM
import numpy as np
train = np.loadtxt("TrainDatasetFinal.txt", delimiter=",")
test = np.loadtxt("testDatasetFinal.txt", delimiter=",")
y_train = train[:,7]
y_test = test[:,7]
train_spec = train[:,6]
test_spec = test[:,6]
model = Sequential()
model.add(LSTM(32, input_shape=(1415684, 8)))
model.add(LSTM(64, input_dim=1, input_length=1415684, return_sequences=True))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop')
model.fit(train_spec, y_train, batch_size=2000, nb_epoch=11)
score = model.evaluate(test_spec, y_test, batch_size=2000)
but it gets me the following error
ValueError: Input 0 is incompatible with layer lstm_2: expected ndim=3, found ndim=2
Here is a sample from the dataset
(Patient Number, time in millisecond, accelerometer x-axis,y-axis, z-axis,magnitude, spectrogram,label (0 or 1))
1,15,70,39,-970,947321,596768455815000,0
1,31,70,39,-970,947321,612882670787000,0
1,46,60,49,-960,927601,602179976392000,0
1,62,60,49,-960,927601,808020878060000,0
1,78,50,39,-960,925621,726154800929000,0
in the dataset i am using the only the spectrogram as input feature and the label (0 or 1) as the output
the total traing samples is 1,415,684

Resources