I'm trying to solve the code error which comes from this line (not sure)
K.function([model2.layers[0].input], [model2.layers[-1].output])
The full code can be found here CODE and in this post I show only the problematic part:
inputs2 = Input(shape=(X_train2.shape[1], X_train2.shape[2]))
lstm2 = LSTM(128, return_sequences=True, dropout=0.3)(inputs2, training=True)
lstm2 = LSTM(32, return_sequences=False, dropout=0.3)(lstm2, training=True)
dense2 = Dense(50)(lstm2)
out2 = Dense(1)(dense2)
model2 = Model(inputs2, out2)
model2.compile(loss='mse', optimizer='adam', metrics=['mse'])
history = model2.fit(X_train2, y_train2, epochs=2, batch_size=128, verbose=2, shuffle=True)
# function for stochastic dropout:
def stoc_drop2(R, NN):
trans_pred = scaler2.transform(XE[len(df_x_train1):].reshape(-1, XE.shape[-1])).reshape(-1, SEQUENCE_SIZE, XE.shape[-1])
print(">>>",trans_pred.shape)
NN_pred = NN([trans_pred, R])
return np.vstack(NN_pred)
NN = K.function([model2.layers[0].input], [model2.layers[-1].output])
for i in tqdm.tqdm(range(0, 100)):
scores2.append(mean_absolute_error(y_test2, stoc_drop2(0.5, NN)))
When running the code I get the following:
ValueError: Layer "model_1" expects 1 input(s), but it received 2 input tensors. Inputs received: [<tf.Tensor: shape=(16482, 10, 3), dtype=float64, numpy=
array(...)>, <tf.Tensor: shape=(), dtype=float32, numpy=0.5>]
The NN expects to recieve input tensor, however dropout value is also added
NN_pred = NN([trans_pred, R])
partial solution:
I removed R and use just
NN([trans_pred])
Still do not understand how to pass dropout parameter through K.function() to model.
Related
I'm newbie in Neural Network. I'm going to do a text classification research using MLP model with keras. Input layer consisting of 900 nodes, 2 hidden layers, and 2 outputs.
The code I use is as follows:
#Split data training & testing (90:10)
Train_X, Test_X, Train_Y, Test_Y = model_selection.train_test_split(dataset['review'],dataset['sentimen'],test_size=0.2, random_state=8)
Encoder = LabelEncoder()
Train_Y = Encoder.fit_transform(Train_Y)
Test_Y = Encoder.fit_transform(Test_Y)
Tfidf_vect = TfidfVectorizer(max_features=None)
Tfidf_vect.fit(dataset['review'])
Train_X_Tfidf = Tfidf_vect.transform(Train_X)
Test_X_Tfidf = Tfidf_vect.transform(Test_X)
#ANN Architecture
model = Sequential()
model.add(Dense(units = 100, activation = 'sigmoid', input_shape=(32, 900)))
model.add(Dense(units = 100, activation = 'sigmoid'))
model.add(Dense(units = 2, activation = 'sigmoid'))
opt = Adam (learning_rate=0.001)
model.compile(loss = 'binary_crossentropy', optimizer = opt,
metrics = ['accuracy'])
print(model.summary())
#Hyperparameter
epochs= 100
batch_size= 32
es = EarlyStopping(monitor="val_loss",mode='min',patience=10)
model_prediction = model.fit(Train_X_Tfidf, Train_Y, epochs=epochs,
batch_size=batch_size, verbose=1,
validation_split=0.1, callbacks =[es])
But getting Error:
/usr/local/lib/python3.8/dist-packages/keras/engine/data_adapter.py in train_validation_split(arrays, validation_split)
1478 unsplitable = [type(t) for t in flat_arrays if not _can_split(t)]
1479 if unsplitable:
-> 1480 raise ValueError(
1481 "`validation_split` is only supported for Tensors or NumPy "
1482 "arrays, found following types in the input: {}".format(unsplitable))
ValueError: `validation_split` is only supported for Tensors or NumPy arrays, found following types in the input: [<class 'scipy.sparse.csr.csr_matrix'>]
How to Fix it? Thank you so much.
I just need to concatenate a flatten layer and a feature vector in Keras. This is the code:
#custom parameters
n_features = 38
vgg_model = VGGFace(include_top=False, input_shape=(224, 224, 3))
last_layer = vgg_model.get_layer('pool5').output
x = Flatten(name='flatten')(last_layer)
# feature vector
feature_vector = Input(shape = (n_features,))
conc = concatenate(([x, feature_vector]), axis=1)
layer_intermediate = Dense(128, activation='relu', name='fc6')(conc)
layer_intermediate1 = Dense(32, activation='relu', name='fc7')(layer_intermediate)
out = Dense(5, activation='softmax', name='fc8')(layer_intermediate1)
custom_vgg_model = Model(vgg_model.input, out)
But I'm getting this error:
---> 20 custom_vgg_model = Model(vgg_model.input, out)
ValueError: Graph disconnected: cannot obtain value for tensor Tensor("input_88:0", shape=(?, 38), dtype=float32) at layer "input_88". The following previous layers were accessed without issue: ['input_87', 'conv1_1', 'conv1_2', 'pool1', 'conv2_1', 'conv2_2', 'pool2', 'conv3_1', 'conv3_2', 'conv3_3', 'pool3', 'conv4_1', 'conv4_2', 'conv4_3', 'pool4', 'conv5_1', 'conv5_2', 'conv5_3', 'pool5', 'flatten']
Btw the shape of the flatten layer is (None, 25088)
Since your feature_vector is also Input. Try to add feature_vector into inputs when you define the Model.
custom_vgg_model = Model([vgg_model.input,feature_vector], out)
I am trying to implement a neural network where I merge/concatenate a fully connected neural network with a convolution neural network. But when I fit the model, I get the following error:
ValueError: All input arrays (x) should have the same number of
samples. Got array shapes: [(1, 100, 60, 4500), (100, 4500)]
I have two different inputs:
image(dimensions: 1,100,60,4500) where 1 is the channel, 100: # of sample, 60*4500 (dimension of my image). This goes to my convolution neural network
positions(dimensions: 100,4500): where 100 refers to samples.
Dimension for my output is 100,2.
The code for my neural network is:
###Convolution neural network
b1 = Sequential()
b1.add(Conv2D(128*2, kernel_size=3,activation='relu',data_format='channels_first',
input_shape=(100,60,4500)))
b1.add(Conv2D(128*2, kernel_size=3, activation='relu'))
b1.add(Dropout(0.2))
b1.add(Conv2D(128*2, kernel_size=4, activation='relu'))
b1.add(Dropout(0.2))
b1.add(Flatten())
b1.summary()
###Fully connected feed forward neural network
b2 = Sequential()
b2.add(Dense(64, input_shape = (4500,), activation='relu'))
b2.add(Dropout(0.1))
b2.summary()
model = Sequential()
###Concatenating the two networks
concat = concatenate([b1.output, b2.output], axis=-1)
x = Dense(256, activation='relu', kernel_initializer='normal')(concat)
x = Dropout(0.25)(x)
output = Dense(2, activation='softmax')(x)
model = Model([b1.input, b2.input], [output])
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
history = model.fit([image, positions], Ytest, batch_size=10,
epochs=1,
verbose=1)
Also, the reason why my 'image' array is 4 dimensional is because in the beginning it was just (100,60,4500) but then I ran into the following error:
ValueError: Error when checking input: expected conv2d_10_input to
have 4 dimensions, but got array with shape (100, 60, 4500)
And upon googling I found out that it expects # of channels as an input too. And after I added the # of channel, this error went away but then I ran into the other error that I mentioned in the beginning.
So can someone tell me how to solve for the error (the one I specified in the beginning)? Help would be appreciated.
It is not a good practice to mix Sequential and Functional API.
You can implement the model like this
i1 = Input(shape=(1, 60, 4500))
c1 = Conv2D(128*2, kernel_size=3,activation='relu',data_format='channels_first')(i1)
c1 = Conv2D(128*2, kernel_size=3, activation='relu')(c1)
c1 = Dropout(0.2)(c1)
c1 = Conv2D(128*2, kernel_size=4, activation='relu')(c1)
c1 = Dropout(0.2)(c1)
c1 = Flatten()(c1)
i2 = Input(shape=(4500, ))
c2 = Dense(64, input_shape = (4500,), activation='relu')(i2)
c2 = Dropout(0.2)(c2)
c = concatenate([c1, c2])
x = Dense(256, activation='relu', kernel_initializer='normal')(c)
x = Dropout(0.25)(x)
output = Dense(2, activation='softmax')(x)
model = Model([i1, i2], [output])
model.summary()
Note the shape of i1 is shape=(1, 60, 4500). You have set data_format='channels_first' in Conv2D layer hence you need 1 in the beginning.
Compiled the model like this
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
Placeholder data
import numpy as np
X_img = np.zeros((100, 1, 60, 4500))
X_pos = np.ones((100, 4500))
Y = np.zeros((100, 2))
Training
history = model.fit([X_img, X_pos], Y, batch_size=1,
epochs=1,
verbose=1)
You number of samples (batch size) should always be the first dimension. So, your data should have shape (100, 1, 60, 4500) for image and (100, 4500) for positions. The argument channels_first for the Conv2D layer means that the channels is the first non-batch dimension.
You also need to change the input shape to (1, 60, 4500) in the first Conv2D layer.
I'm worked on sentiment analysis task using universal sentence encoder embed_size=512 with CNN but have an error says: Input 0 is incompatible with layer conv1d_6: expected ndim=3, found ndim=2.
and wanna know if this is right to add universal sentence encoder with CNN in this way or not?
pickle_in=open("X.pickle","rb")
X=pickle.load(pickle_in)
X = X.tolist() #convert x to list as The embedding code works once I
converted
the pandas.series data type to list.
X = np.array(X, dtype=object)[:, np.newaxis]
pickle_in=open("Y.pickle","rb")
Y=pickle.load(pickle_in)
Y = np.asarray(pd.get_dummies(Y), dtype = np.int8)
import tensorflow as tf
import tensorflow_hub as hub
module_url = "https://tfhub.dev/google/universal-sentence-encoder-large/3"
embed = hub.Module(module_url)
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.15,
random_state = 42)
X_train, X_Val, Y_train, Y_Val = train_test_split(X_train,Y_train, test_size
= 0.15, random_state = 42)
print(X_train.shape,Y_train.shape)
print(X_test.shape,Y_test.shape)
print(X_Val.shape,Y_Val.shape)
type(Y_test)
embed_size = embed.get_output_info_dict()['default'].get_shape()[1].value
def UniversalEmbedding(x):
return embed(tf.squeeze(tf.cast(x, tf.string)),
signature="default", as_dict=True)["default"]
import keras
seed=7
np.random.seed(seed)
from keras.layers import Input, Dense, concatenate, Activation,
GlobalMaxPooling1D
from keras import layers
from keras.models import Model
input_text = layers.Input(shape=(1,), dtype=tf.string)
embedding = layers.Lambda(UniversalEmbedding,
output_shape=(embed_size,))(input_text)
bigram_branch = Conv1D(filters=64, kernel_size=1, padding='same',
activation='relu', strides=1)(embedding)
bigram_branch = GlobalMaxPooling1D()(bigram_branch)
trigram_branch = Conv1D(filters=64, kernel_size=2, padding='same',
activation='relu', strides=1)(embedding)
trigram_branch = GlobalMaxPooling1D()(trigram_branch)
fourgram_branch = Conv1D(filters=64, kernel_size=3, padding='same',
activation='relu', strides=1)(embedding)
fourgram_branch = GlobalMaxPooling1D()(fourgram_branch)
merged = concatenate([bigram_branch, trigram_branch, fourgram_branch],
axis=1)
merged = Dense(512, activation='relu')(merged)
merged = Dropout(0.8)(merged)
merged = Dense(2)(merged)
output = Activation('sigmoid')(merged)
model = Model(inputs=[tweet_input], outputs=[output])
adam=keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None,
decay=0.0, amsgrad=False)
model.compile(loss='mean_squared_error',
optimizer= adam,
metrics=['accuracy'])
model.summary()
You can not directly pass Universal Sentence Encoder to Conv1D because Conv1D expected a tensor with shape [batch, sequence, feature] while the output of Universal Sentence Encoder is [batch, feature]. It is also stated in tfhub.dev:
The input is variable length English text and the output is a 512
dimensional vector.
How can I fix this?
In my view, the easiest possible solution is to use ELMo on Tensorhub. With ELMo you can map each sentence to [batch, sequence, feature] and then feed into the Conv1D.
I think the code will speak for itself, but i trained a model, that i now wanna use to predict on some new input data. The new input data seems to be the wrong dimensions though. Below you can see the code and error messages for both the model and the predicting (attempted)
tokenizer = Tokenizer(num_words=10000)
df = pd.read_csv('/home/paperspace/Sentiment Analysis Dataset.csv', index_col = 0,
error_bad_lines = False)
y = list(df['Sentiment'])
tokenizer.fit_on_texts(list(df['SentimentText']))
X = tokenizer.texts_to_sequences(list(df['SentimentText']))
X = pad_sequences(X)
print("Done, fitting on texts.")
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, shuffle = True)
model = Sequential()
#Creates the wordembeddings.
embedding_vector_dim = 32
model.add(Embedding(10000, embedding_vector_dim, input_length=X.shape[1]))
model.add(Dropout(0.2))
model.add(LSTM(128))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
model.fit(numpy.array(X_train), numpy.array(y_train),
batch_size=128,
epochs=1,
validation_data=(numpy.array(X_test), numpy.array(y_test)))
score, acc = model.evaluate(numpy.array(X_test),numpy.array(y_test),
batch_size=128)
model.save('./sentiment_seq.h5')
print('Test score:', score)
print('Test accuracy:', acc)
Now for the trying to predict and error message.
text = "this is actually a very bad movie."
tokenizer = Tokenizer()
tokenizer.fit_on_texts(list(text))
X = tokenizer.texts_to_sequences(list(text))
X = pad_sequences(X)
X_flat = np.array([X.flatten()])
model = load_model('sentiment_test.h5')
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print(model.predict(X, batch_size = 1, verbose = 1))
ValueError: Error when checking : expected embedding_1_input to have shape (None, 116) but got array with shape (1, 38)
So basically why am i getting this error, when preprocessing is the same when training and predicting, and how can i know what the expected input should be before seeing the error message?
If you're not working with a fixed input length, you should not define an input_length in the embedding layer.