Using accepted answer here,I'm trying to change layer's weights with set_weights() method, but it seems don't work...
here the code I used
from keras.layers import Input
from keras.layers.convolutional import Convolution2D
from keras.models import Model
import numpy as np
print("Building Model...")
inp = Input(shape=(20,20,1))
output = Convolution2D(1, (3,3), padding='same',bias=False)(inp)
model_network=Model(inp, output)
print("Weights before change:")
print (model_network.layers[1].get_weights())
w = np.asarray([
[[[
[2,2,2],
[2,2,2],
[2,2,2]
]]]
])
w=np.reshape(w,np.shape(model_network.layers[1].get_weights()))
#print("W:",w)
model_network.layers[1].set_weights(w)
print("Weights after change:")
print(model_network.layers[1].get_weights())
but my weights remain the same; (output in comments)
Related
I am implementing a custom loss function as in the code below for a simple classification. However, when I run the code I get the error ValueError: No gradients provided for any variable:
import os
os.environ['KERAS_BACKEND'] = "tensorflow"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tensorflow import keras
from tensorflow.keras import layers
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
import statistics as st
import tensorflow as tf
from keras.utils import np_utils
# if the probability is greater than 0.75 then set the value to 1 for buy or sell else set it to None
# convert the y_pred to 0 and 1 using argmax function
# add the two matrices y_pred and y_true
# if value is 2 then set that to 0
# multiply by misclassification matrix
# add the losses to give a unique number
def custom_loss(y_true, y_pred):
y_pred = y_pred.numpy()
y_pred_dummy = np.zeros_like(y_pred)
y_pred_dummy[np.arange(len(y_pred)), y_pred.argmax(1)] = 1
y_pred = y_pred_dummy
y_true = y_true.numpy()
y_final = y_pred + y_true
y_final[y_final == 2] = 0
w_array = [[1,1,5],[1,1,1],[5,1,1]]
return tf.convert_to_tensor(np.sum(np.dot(y_final, w_array)))
model = keras.Sequential()
model.add(layers.Dense(32, input_dim=4, activation='relu'))
model.add(layers.Dense(16, input_dim=4, activation='relu'))
model.add(layers.Dense(8, input_dim=4, activation='relu'))
model.add(layers.Dense(3, activation='softmax'))
model.compile(loss=custom_loss, optimizer='adam', run_eagerly=True)
I do not understand what I am doing incorrectly over here. I read through the issues on tensorflow and one of the reasons is that the link between the loss function and input variables is broken. But I am using y_true in the loss function
Thanks
You can not use numpy within custom loss function. this function is a part of graph and should deal with tensors, not arrays. Numpy doesn't support backpropagation of gradients.
I created a keras- tensorflow model, much influenced by
this guide
which looks like
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import time
import numpy as np
import sys
from keras import losses
model = keras.Sequential()
model.add(layers.Dense(nodes,activation = tf.keras.activations.relu, input_shape=[len(data_initial.keys())]))
model.add(layers.Dense(64,activation = tf.keras.activations.relu))
model.add(layers.Dropout(0.1, noise_shape=None))
model.add(layers.Dense(1))
model.compile(loss='mse', # <-------- Here we define the loss function
optimizer=tf.keras.optimizers.Adam(lr= 0.01,
beta_1 = 0.01,
beta_2 = 0.001,
epsilon= 0.03),
metrics=['mae', 'mse'])
model.fit(train_data,train_labels,epochs = 200)
It is a regression model and instead of the loss = 'mse' I would like to use
tf keras mse loss together with an L2 regularization term. The question is
How can I add a predefined regularizer function (I think, it is this one ) into the model.compile statement.
How can I write a completely custom loss function and add it to model.compile.
You can add regularization as either a layer parameter or as a layer.
Use it as a layer parameter looks like below
model.add(layers.Dense(8,
kernel_regularizer=regularizers.l2(0.01),
activity_regularizer=regularizers.l1(0.01)))
Sample code with first dense layer regularized and a custom loss function
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import time
import numpy as np
import sys
from keras import losses
from keras import regularizers
import keras.backend as K
model = keras.Sequential()
model.add(layers.Dense(8,activation = tf.keras.activations.relu, input_shape=(8,),
kernel_regularizer=regularizers.l2(0.01),
activity_regularizer=regularizers.l1(0.01)))
model.add(layers.Dense(4,activation = tf.keras.activations.relu))
model.add(layers.Dropout(0.1, noise_shape=None))
model.add(layers.Dense(1))
def custom_loss(y_true, y_pred):
return K.mean(y_true - y_pred)**2
model.compile(loss=custom_loss,
optimizer=tf.keras.optimizers.Adam(lr= 0.01,
beta_1 = 0.01,
beta_2 = 0.001,
epsilon= 0.03),
metrics=['mae', 'mse'])
model.fit(np.random.randn(10,8),np.random.randn(10,1),epochs = 1)
Attached is the link file for Entities. I want to train a Neural Network to represent each entity into a vector. Attach is my code for training
import pandas as pd
import numpy as np
from numpy import array
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.models import Model
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Input
from keras.layers.embeddings import Embedding
from sklearn.model_selection import train_test_split
file_path = '/content/drive/My Drive/Colab Notebooks/Deep Learning/NLP/Data/entities.txt'
df = pd.read_csv(file_path, delimiter = '\t', engine='python', quoting = 3, header = None)
df.columns = ['Entity']
Entity = df['Entity']
X_train, X_test = train_test_split(Entity, test_size = 0.10)
print('Total Entities: {}'.format(len(Entity)))
print('Training Entities: {}'.format(len(X_train)))
print('Test Entities: {}'.format(len(X_test)))
vocab_size = len(Entity)
X_train_encode = [one_hot(d, vocab_size,lower=True, split=' ') for d in X_train]
X_test_encode = [one_hot(d, vocab_size,lower=True, split=' ') for d in X_test]
model = Sequential()
model.add(Embedding(input_length=1,input_dim=vocab_size, output_dim=100))
model.add(Flatten())
model.add(Dense(vocab_size, activation='softmax'))
model.compile(optimizer='adam', loss='mse', metrics=['acc'])
print(model.summary())
model.fit(X_train_encode, X_train_encode, epochs=20, batch_size=1000, verbose=1)
The following error encountered when I am trying to execute the code.
Error when checking model input: the list of Numpy arrays that you are passing to your model is not the size the model expected. Expected to see 1 array(s), but instead got the following list of 34826 arrays:
You are passing list of numpy arrays for model.fit. The following code produces list of arrays for x_train_encode and X_test_encode.
X_train_encode = [one_hot(d, vocab_size,lower=True, split=' ') for d in X_train]
X_test_encode = [one_hot(d, vocab_size,lower=True, split=' ') for d in X_test]
Change these lists into numpy array when passing to model.fit method.
X_train_encode = np.array(X_train_encode)
X_test_encode = np.array(X_test_encode)
And I don't see the need to one_hot encode the X_train and X_test, embedding layer expects integer(in your case word indexes) not one hot encoded value of the the words' indexes. So if X_train and X_test are array of the indexes of the words then you can directly feed this into the model.fit method.
EDIT:
Currently 'mse' loss is being used. Since the last layer is softmax layer cross entropy loss is more applicable here. And also the outputs are integer values of a class(words) sparse categorical should be used for loss.
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['acc'])
I am finding that my model has a tensor that is of shape (?,1,60). I want to know how I can reduce this to (?,60)? Not sure whether reshape or Flatten can be done with respect to a dimension. Any help?
Both layers will work, but in this case I prefer using keras.layers.Flatten. Here is an example:
from keras.layers import Input, Flatten
from keras.models import Model
import numpy as np
a = Input(shape=(1, 60))
b = Flatten()(a)
model = Model(inputs=a, outputs=b)
model.compile('sgd', 'mse')
pred = model.predict(x=np.ones(shape=(2, 1, 60)))
print(pred.shape)
I am trying to remove the top layers from a model I have previously trained.
This is the code I use:
import os
import h5py
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers import Activation, Dropout, Flatten, Dense
# KERAS_BACKEND=theano python
import keras
keras.backend.set_image_dim_ordering("th")
img_width, img_height = 150, 150
data_dir = '//shared_directory/projects/try_CD/data/validation'
nb_train_samples = 2000
nb_validation_samples = 800
nb_epoch = 50
def make_bottleneck_features(model):
datagen = ImageDataGenerator(rescale=1./255)
generator = datagen.flow_from_directory(
data_dir,
target_size=(img_width, img_height),
batch_size=32,
class_mode=None,
shuffle=False)
bottleneck_features = model.predict_generator(generator, nb_validation_samples)
return (bottleneck_features)
model=keras.models.load_model('/shared_directory/projects/think_exp/CD_M1.h5')
A = make_bottleneck_features(model)
model.summary()
for i in range (6):
model.pop()
B = make_bottleneck_features(model)
model.summary()
Judging comparing the results of the two calls to model.summary(), I can see that indeed the 6 topmost layers were removed.
However, the model's output (saved to A and B) does not change after discarding these layers.
What is the source of that discrepancy?
How can I retrieve the output of the desired layer instead of that of the entire model?
Thanks in advance!
You can't drop layers like that, in order for it to have an effect, you need to recompile the model (AKA model.compile).
But that's not the best way to obtain outputs from intermediate layers, you can just use K.function (where K is keras.backend) to build a function from the input to one of the layers and then call the function. More details are available in this answer.