I am trying to build a recurrent neural network to predict cities.
The data I am using is a compilation of 298 address that has two columns.
Column 1 is the full address which includes the following components:
Street Number, Street Name, City, State and Zip Code
Column 2 is the City.
An example of the dataset I am working with can be seen below:
Address
City
34416 Mike Station Davidmouth WY 15806
Davidmouth
7067 Nguyen Well Stevenmouth NE 92527
Stevenmouth
14420 Erika Springs Port Kristenborough MP 81670
Port Kristenborough
704 Steven Corners South George ND 44349
South George
168 Ashley Views Port Brianberg WA 2232
Port Brianberg
Below is the code I ran in google colab:
# Import libraries
import collections
import helper
import numpy as np
import pandas as pd
from keras.preprocessing.text import Tokenizer
from keras.utils import pad_sequences
from keras.models import Model
from keras.layers import GRU, Input, Dense, TimeDistributed, Activation, RepeatVector, Bidirectional
from keras.layers import Embedding
from keras.optimizers import Adam
from keras.losses import sparse_categorical_crossentropy
df = pd.read_excel("rnn_mt_addresses.xlsx")
df.head()
# select columns for addresses and cities
addresses_df = df.iloc[:, :-1]
cities_list = df.iloc[:, -1]
# change cities list into datafrmae
cities_df = pd.DataFrame(cities_list, columns=['City'])
addresses = [str(add) for add in addresses_df['Address']]
cities = [str(cit) for cit in cities_df['City']]
# count words in addresses and cities
addresses_words_counter = collections.Counter([word for address in addresses for word in address.split()])
cities_word_counter = collections.Counter([word for city in cities for word in city.split()])
# tokenize addresses
addresses_tokenizer = Tokenizer()
cities_tokenizer = Tokenizer()
addresses_tokenizer.fit_on_texts(addresses)
# addresses vocab size
len(addresses_tokenizer.word_index)
# tokenize cities
cities_tokenizer.fit_on_texts(cities)
# cities vocab size
len(cities_tokenizer.word_index)
# apply sequencing
addresses_sequences = addresses_tokenizer.texts_to_sequences(addresses)
cities_sequences = cities_tokenizer.texts_to_sequences(cities)
# calculate addresses vocabulary size
addresses_vocab_size = len(addresses_tokenizer.word_index)
print(f"The addresses vocabulary size is {len(addresses_tokenizer.word_index)}")
The addresses vocabulary size is 1266
# calculate cities vocabulary size
cities_vocab_size = len(cities_tokenizer.word_index)
print(f"The cities vocabulary size is {len(cities_tokenizer.word_index)}")
The cities vocabulary size is 286
# calculate addresses and cities max length
max_addresses_sequence_length = max([len(address) for address in addresses_sequences])
max_addresses_sequence_length
7
max_cities_sequence_length = max([len(city) for city in cities_sequences])
max_cities_sequence_length
2
# pad cities and addresses
addresses_sequences_pd = pad_sequences(addresses_sequences,maxlen = max_addresses_sequence_length, padding = 'post')
cities_sequences_pd = pad_sequences(cities_sequences, maxlen = max_addresses_sequence_length, padding = 'post')
# reshape addresses_sequences_pd and cities_sequences_pd to 3D tensors
addresses_sequences_pd = addresses_sequences_pd.reshape(addresses_sequences_pd.shape[0], addresses_sequences_pd.shape[1],1)
cities_sequences_pd = cities_sequences_pd.reshape(cities_sequences_pd.shape[0], cities_sequences_pd.shape[1],1)
addresses_sequences_pd.shape
(298, 7, 1)
cities_sequences_pd.shape
(298, 7, 1)
# define logits function
def logits_to_text(logits, tokenizer):
"""
Turn logits from a neural network into text using the tokenizer
:param logits: Logits from a neural network
:param tokenizer: Keras Tokenizer fit on the labels
:return: String that represents the text of the logits
"""
index_to_words = {id: word for word, id in tokenizer.word_index.items()}
index_to_words[0] = '<PAD>'
return ' '.join([index_to_words[prediction] for prediction in np.argmax(logits, 1)])
from keras.layers import GRU, Input, Dense, TimeDistributed, Dropout, LSTM
from keras.models import Model, Sequential
from keras.layers import Activation
from keras.optimizers import Adam
from keras.losses import sparse_categorical_crossentropy
# TODO: Build the layers
model = Sequential()
model.add(GRU(128, input_shape=addresses_sequences_pd.shape[1:], return_sequences=True))
model.add(Dropout(0.5))
model.add(GRU(128, return_sequences=True))
model.add(Dropout(0.5))
model.add(TimeDistributed(Dense(256, activation='relu')))
model.add(Dropout(0.5))
model.add(TimeDistributed(Dense(addresses_vocab_size, activation='softmax')))
model.compile(loss=sparse_categorical_crossentropy,optimizer=Adam(),metrics=['accuracy'])
tmp_x = addresses_sequences_pd
# tmp_x = tmp_x.reshape((-1, preproc_cities.shape[-2], 1))
# Fit the model
model.fit(tmp_x, cities_sequences_pd, batch_size=300, epochs=10, validation_split=0.2)
# # Print prediction(s)
print(logits_to_text(model.predict(tmp_x[:1])[0], cities_tokenizer))
when I try to predict the first city it returns this as the predicton:
<PAD> <PAD> <PAD> <PAD> <PAD> <PAD> <PAD>
which is basically zero as is hardcoded in the logits function.
The expected prediction is Davidmouth.
I have tried changing the shape of the inputs and the labels i.e (addresses_sequences_pd and cities_sequences_pd) respectfully, but that hasn't worked.
I have also tried to change the batch_size and epochs, but that didnd't change anything either.
what am I doing wrong?
Related
I'm trying to develop a multitask deep neural network (MTDNN) to make prediction on small molecule bioactivity against kinase targets and something is definitely wrong with my model structure but I can't figure out what.
For my training data (highly imbalanced data with 0 as inactive and 1 as active), I have 423 unique kinase targets (tasks) and over 400k unique compounds. I first calculate the ECFP fingerprint using smiles, and then I randomly split the input data into train, test, and valid sets based on 8:1:1 ratio using RandomStratifiedSplitter from deepchem package. After training my model using the train set and I want to make prediction on the test set to check model performance.
Here's what my data looks like (screenshot example):
(https://i.stack.imgur.com/8Hp36.png)
Here's my code:
# Import Packages
import numpy as np
import pandas as pd
import deepchem as dc
from sklearn.metrics import roc_auc_score, roc_curve, auc, confusion_matrix
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import initializers, regularizers
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Input, Dropout, Reshape
from tensorflow.keras.optimizers import SGD
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors
# Build Model
inputs = keras.Input(shape = (1024, ))
x = keras.layers.Dense(2000, activation='relu', name="dense2000",
kernel_initializer=initializers.RandomNormal(stddev=0.02),
bias_initializer=initializers.Ones(),
kernel_regularizer=regularizers.L2(l2=.0001))(inputs)
x = keras.layers.Dropout(rate=0.25)(x)
x = keras.layers.Dense(500, activation='relu', name='dense500')(x)
x = keras.layers.Dropout(rate=0.25)(x)
x = keras.layers.Dense(846, activation='relu', name='output1')(x)
logits = Reshape([423, 2])(x)
outputs = keras.layers.Softmax(axis=2)(logits)
Model1 = keras.Model(inputs=inputs, outputs=outputs, name='MTDNN')
Model1.summary()
opt = keras.optimizers.SGD(learning_rate=.0003, momentum=0.9)
def loss_function (output, labels):
loss = tf.nn.softmax_cross_entropy_with_logits(output,labels)
return loss
loss_fn = loss_function
Model1.compile(loss=loss_fn, optimizer=opt,
metrics=[keras.metrics.Accuracy(),
keras.metrics.AUC(),
keras.metrics.Precision(),
keras.metrics.Recall()])
for train, test, valid in split2:
trainX = pd.DataFrame(train.X)
trainy = pd.DataFrame(train.y)
trainy2 = tf.one_hot(trainy,2)
testX = pd.DataFrame(test.X)
testy = pd.DataFrame(test.y)
testy2 = tf.one_hot(testy,2)
validX = pd.DataFrame(valid.X)
validy = pd.DataFrame(valid.y)
validy2 = tf.one_hot(validy,2)
history = Model1.fit(x=trainX, y=trainy2,
shuffle=True,
epochs=10,
verbose=1,
batch_size=100,
validation_data=(validX, validy2))
y_pred = Model1.predict(testX)
y_pred2 = y_pred[:, :, 1]
y_pred3 = np.round(y_pred2)
# Check the # of nonzero in assay
(y_pred3!=0).sum () #all 0s
My questions are:
The roc and precision recall are all extremely high (>0.99), but the prediction result of test set contains all 0s, no actives at all. I also use the randomized dataset with same active:inactive ratio for each task to test if those values are too good to be true, and turns out all values are still above 0.99, including roc which is expected to be 0.5.
Can anyone help me to identify what is wrong with my model and how should I fix it please?
Can I use built-in functions in sklearn to calculate roc/accuracy/precision-recall? Or should I manually calculate the metrics based on confusion matrix on my own for multitasking purpose. Why and why not?
Attached is the link file for Entities. I want to train a Neural Network to represent each entity into a vector. Attach is my code for training
import pandas as pd
import numpy as np
from numpy import array
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.models import Model
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Input
from keras.layers.embeddings import Embedding
from sklearn.model_selection import train_test_split
file_path = '/content/drive/My Drive/Colab Notebooks/Deep Learning/NLP/Data/entities.txt'
df = pd.read_csv(file_path, delimiter = '\t', engine='python', quoting = 3, header = None)
df.columns = ['Entity']
Entity = df['Entity']
X_train, X_test = train_test_split(Entity, test_size = 0.10)
print('Total Entities: {}'.format(len(Entity)))
print('Training Entities: {}'.format(len(X_train)))
print('Test Entities: {}'.format(len(X_test)))
vocab_size = len(Entity)
X_train_encode = [one_hot(d, vocab_size,lower=True, split=' ') for d in X_train]
X_test_encode = [one_hot(d, vocab_size,lower=True, split=' ') for d in X_test]
model = Sequential()
model.add(Embedding(input_length=1,input_dim=vocab_size, output_dim=100))
model.add(Flatten())
model.add(Dense(vocab_size, activation='softmax'))
model.compile(optimizer='adam', loss='mse', metrics=['acc'])
print(model.summary())
model.fit(X_train_encode, X_train_encode, epochs=20, batch_size=1000, verbose=1)
The following error encountered when I am trying to execute the code.
Error when checking model input: the list of Numpy arrays that you are passing to your model is not the size the model expected. Expected to see 1 array(s), but instead got the following list of 34826 arrays:
You are passing list of numpy arrays for model.fit. The following code produces list of arrays for x_train_encode and X_test_encode.
X_train_encode = [one_hot(d, vocab_size,lower=True, split=' ') for d in X_train]
X_test_encode = [one_hot(d, vocab_size,lower=True, split=' ') for d in X_test]
Change these lists into numpy array when passing to model.fit method.
X_train_encode = np.array(X_train_encode)
X_test_encode = np.array(X_test_encode)
And I don't see the need to one_hot encode the X_train and X_test, embedding layer expects integer(in your case word indexes) not one hot encoded value of the the words' indexes. So if X_train and X_test are array of the indexes of the words then you can directly feed this into the model.fit method.
EDIT:
Currently 'mse' loss is being used. Since the last layer is softmax layer cross entropy loss is more applicable here. And also the outputs are integer values of a class(words) sparse categorical should be used for loss.
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['acc'])
import keras
import keras.backend as K
from keras.optimizers import Adam
from keras.models import Sequential
from keras.layers import Dense
from keras.layers.core import Activation
from keras.preprocessing.text import Tokenizer # for
tokenizing text
from keras.preprocessing.sequence import pad_sequences # for
padding sentences with zeros. To make the sentence length same
from keras.utils import to_categorical # for one-
hot encoding of the labels
from keras.layers import Dense, Input, Flatten, Dropout,
BatchNormalization
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Sequential
from sklearn.model_selection import train_test_split
MAX_SEQUENCE_LENGTH = 300
MAX_NB_WORDS = 20000
#Reading the data
raw_data=pd.read_csv("/home/riaz.k/Desktop/TRAIN.csv")
raw_data.head()
# create training and testing vars
train, test = train_test_split(raw_data, test_size=0.3)
train.head()
test.head()
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(train.Procedure)
train_sequences = tokenizer.texts_to_sequences(train.Procedure)
test_sequences = tokenizer.texts_to_sequences(test.Procedure)
word_index = tokenizer.word_index
containing words and their index
# print(tokenizer.word_index)
print('Found %s unique tokens.' % len(word_index))
train_data = pad_sequences(train_sequences,
maxlen=MAX_SEQUENCE_LENGTH)
train
test_data=pad_sequences(test_sequences,maxlen=MAX_SEQUENCE_LENGTH)
test
print(train_data.shape)
print(test_data.shape)
print (word_index)
train_labels = train['dxcode']
test_labels = test['dxcode']
from sklearn import preprocessing
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder() # converts the character
array to numeric array.
Assigns levels to unique labels.
le.fit(train_labels)
le.fit(test_labels)
train_labels = le.transform(train_labels)
test_labels = le.transform(test_labels)
print(le.classes_)
print(np.unique(train_labels, return_counts=True))
print(np.unique(test_labels, return_counts=True))
le.inverse_transform(1)
labels_train = to_categorical(np.asanyarray(train_labels))
labels_test = to_categorical(np.asarray(test_labels))
print('Shape of data tensor:', train_data.shape)
print('Shape of label tensor:', labels_train.shape)
print('Shape of label tensor:', labels_test.shape)
EMBEDDING_DIM = 100
print(MAX_SEQUENCE_LENGTH)
print('Training model.')
model = Sequential()
model.add(Embedding(MAX_NB_WORDS,
EMBEDDING_DIM,
input_length=MAX_SEQUENCE_LENGTH
))
model.add(Dropout(0.2))
model.add(Conv1D(128, 5, activation='relu'))
model.add(MaxPooling1D(5))
model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(Conv1D(128, 5, activation='relu'))
model.add(MaxPooling1D(5))
model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(23, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['acc'],)
model.fit(train_data, labels_train,
batch_size=32,
epochs=10,
validation_data=(test_data, labels_test))
model.evaluate(test_data, labels_test)
pred = model.predict(test_data)
pred
# print(model.layers)
for layer in model.layers:
print(layer)
import keras.backend as K
emd = K.function(inputs=[model.layers[0].input],
outputs=[model.layers[0].output])
rbind = np.concatenate((train_data, test_data), axis=0)
print(rbind.shape)
### Submissions file
test_results = model.predict_classes(rbind)
#print(test_results)
test_labels = le.inverse_transform(test_results)
#test_labels = [le.inverse_transform(i) for i in test_results]
submissions_CNN =
pd.DataFrame({'id':raw_data['Claimno'],"label":test_labels})
submissions_CNN.to_csv("/home/riaz.k/Desktop/submissions.csv",index=False)
The text document can be labelled with more than one label, so how can I do a multi-label classification on this dataset? I've read a lot of documentation from sklearn, but I can't seem to find the right way to do multi-label classification. Thanks in advance for any help.
Are you getting the error on this line:
train_labels = le.transform(train_labels)
If yes, then its because in the line just above it, you are doing this:
le.fit(test_labels)
What this does is it forgets the previous data (previous call to fit() on the line above it) and only remembers the data in the test_labels. So when a new label (which is present in train but not in test) comes, it will throw this error.
You need to replave the lines:
le.fit(train_labels)
le.fit(test_labels)
with this:
# I am using .tolist() because I observe that your
# train_labels, test_labels are pandas Series objects
le.fit(train_labels.tolist() + test_labels.tolist())
I am giving variable size images (all 278 images of different size 139 of each category) input to my cnn model. As a fact that cnn required fixed size images, so from here i got solution for this is to make input_shape=(None,Nonen,1) (for tensorflow backend and gray scale). but this solution doesnot work with flatten layer, so from their only i got solution of using GlobleMaxpooling or Globalaveragepooling. So from uses these facrts i am making a cnn model in keras to train my network with following code:
import os,cv2
import numpy as np
from sklearn.utils import shuffle
from keras import backend as K
from keras.utils import np_utils
from keras.models import Sequential
from keras.optimizers import SGD,RMSprop,adam
from keras.layers import Conv2D, MaxPooling2D,BatchNormalization,GlobalAveragePooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import regularizers
from keras import initializers
from skimage.io import imread_collection
from keras.preprocessing import image
from keras import Input
import keras
from keras import backend as K
#%%
PATH = os.getcwd()
# Define data path
data_path = PATH+'/current_exp'
data_dir_list = os.listdir(data_path)
img_rows=None
img_cols=None
num_channel=1
# Define the number of classes
num_classes = 2
img_data_list=[]
for dataset in data_dir_list:
img_list=os.listdir(data_path+'/'+ dataset)
print ('Loaded the images of dataset-'+'{}\n'.format(dataset))
for img in img_list:
input_img=cv2.imread(data_path + '/'+ dataset + '/'+ img,0)
img_data_list.append(input_img)
img_data = np.array(img_data_list)
if num_channel==1:
if K.image_dim_ordering()=='th':
img_data= np.expand_dims(img_data, axis=1)
print (img_data.shape)
else:
img_data= np.expand_dims(img_data, axis=4)
print (img_data.shape)
else:
if K.image_dim_ordering()=='th':
img_data=np.rollaxis(img_data,3,1)
print (img_data.shape)
#%%
num_classes = 2
#Total 278 sample, 139 for 0 category and 139 for category 1
num_of_samples = img_data.shape[0]
labels = np.ones((num_of_samples,),dtype='int64')
labels[0:138]=0
labels[138:]=1
x,y = shuffle(img_data,labels, random_state=2)
y = keras.utils.to_categorical(y, 2)
model = Sequential()
model.add(Conv2D(32,(2,2),input_shape=(None,None,1),activation='tanh',kernel_initializer=initializers.glorot_uniform(seed=100)))
model.add(Conv2D(32, (2,2),activation='tanh'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (2,2),activation='tanh'))
model.add(Conv2D(64, (2,2),activation='tanh'))
model.add(MaxPooling2D())
model.add(Dropout(0.25))
#model.add(Flatten())
model.add(GlobalAveragePooling2D())
model.add(Dense(256,activation='tanh'))
model.add(Dropout(0.25))
model.add(Dense(2,activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='rmsprop',metrics=['accuracy'])
model.fit(x, y,batch_size=1,epochs=5,verbose=1)
but i am getting following error:
ValueError: Error when checking input: expected conv2d_1_input to have 4 dimensions, but got array with shape (278, 1)
how to solve it.
In the docs for Conv2D it says that the input tensor has to be in this format:
(samples, channels, rows, cols)
I believe you can't have a variable input size unless your network is fully convolutional.
Maybe what you want to do is to keep it to a fixed input size, and just resize the image to that size before feeding it into your network?
Your array with input data cannot have variable dimensions (this is a numpy limitation).
So the array, instead of being a regular array of numbers with 4 dimensions is being created as an array of arrays.
You should fit each image individually because of this limitation.
for epoch in range(epochs):
for img,class in zip(x,y):
#expand the first dimension of the image to have a batch size
img = img.reshape((1,) + img.shape)) #print and check there are 4 dimensions, like (1, width, height, 1).
class = class.reshape((1,) + class.shape)) #print and check there are two dimensions, like (1, classes).
model.train_on_batch(img,class,....)
Im trying to understand how to use LSTM to classify a certain dataset that i have.
I researched and found this example of keras and imdb :
https://github.com/fchollet/keras/blob/master/examples/imdb_lstm.py
However, im confused about how the data set must be processed to input.
I know keras has pre-processing text methods, but im not sure which to use.
The x contain n lines with texts and the y classify the text by happiness/sadness. Basically, 1.0 means 100% happy and 0.0 means totally sad. the numbers may vary, for example 0.25~~ and so on.
So my question is, How i input x and y properly? Do i have to use bag of words?
Any tip is appreciated!
I coded this below but i keep getting the same error:
#('Bad input argument to theano function with name ... at index 1(0-based)',
'could not convert string to float: negative')
import keras.preprocessing.text
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
print('Loading data...')
import pandas
thedata = pandas.read_csv("dataset/text.csv", sep=', ', delimiter=',', header='infer', names=None)
x = thedata['text']
y = thedata['sentiment']
x = x.iloc[:].values
y = y.iloc[:].values
###################################
tk = keras.preprocessing.text.Tokenizer(nb_words=2000, filters=keras.preprocessing.text.base_filter(), lower=True, split=" ")
tk.fit_on_texts(x)
x = tk.texts_to_sequences(x)
###################################
max_len = 80
print "max_len ", max_len
print('Pad sequences (samples x time)')
x = sequence.pad_sequences(x, maxlen=max_len)
#########################
max_features = 20000
model = Sequential()
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128, input_length=max_len, dropout=0.2))
model.add(LSTM(128, dropout_W=0.2, dropout_U=0.2))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop')
model.fit(x, y=y, batch_size=200, nb_epoch=1, verbose=1, validation_split=0.2, show_accuracy=True, shuffle=True)
# at index 1(0-based)', 'could not convert string to float: negative')
Review how you are using your CSV parser to read the text in. Ensure that the fields are in the format Text, Sentiment if you want to to make use of the parser as you've written it in your code.