Keras: Custom loss function: Dimension Issue (None, None) - keras

I am constructing a neural network with Keras. And I need to define a simple custom loss function. However, I met dimension problem of y_True in creating a custom loss function.
I inserted print function in loss function to verify the dimension of y_true (target variable). It shows that dimension of y_true is (none, none), but it should be (none, 10). The dimension of y_pred is (none, 10).
Anyone can point out where is the mistake? Thanks a lot.
from keras.datasets import mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
from keras import models
from keras import layers
network = models.Sequential()
network.add(layers.Dense(512, activation='relu', input_shape=(28 * 28,)))
network.add(layers.Dense(10, activation='softmax'))
def customLoss(yTrue,yPred):
print(yTrue)
print(yPred)
print(K.int_shape(yTrue))
return K.dot(yTrue, yPred) # K.mean(K.dot(yTrue,yPred), axis=-1)
network.compile(optimizer='rmsprop',
loss=customLoss,
metrics=['accuracy']) # loss='categorical_crossentropy',
train_images = train_images.reshape((60000, 28 * 28))
train_images = train_images.astype('float32') / 255
test_images = test_images.reshape((10000, 28 * 28))
test_images = test_images.astype('float32') / 255
from keras.utils import to_categorical
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
network.fit(train_images, train_labels, epochs=5, batch_size=128)

Related

ValueError: Dimensions must be equal (keras)

I'm trying to train an autoencoder but have problems in reshaping my X_train to fit it to my model model().
from tensorflow import keras
from keras.layers import *
from keras.models import Model
from keras.models import Sequential
from keras.optimizers import Adam
from keras.optimizers import RMSprop
from keras.utils import plot_model
X_train = np.array(X_train, dtype=np.float)
X_test =np.array(X_train, dtype=np.float)
X_train = X_train.reshape(len(X_train), 100,1)
X_test = X_test.reshape(len(X_test), 100,1)
#inputs = Input(shape=(230, 1,100))
epoch = 100
batch = 128
def model():
m = Sequential()
# ##m.add(Reshape((,)))
m.add(Flatten())
m.add(Dense(512, activation='relu'))
m.add(Dense(128, activation = 'relu'))
m.add(Dense(2, activation = 'linear'))
m.add(Dense(128, activation = 'relu'))
m.add(Dense(512, activation = 'relu'))
m.add(Dense(784, activation = 'sigmoid'))
m.compile(loss='mean_squared_error', optimizer = 'rmsprop', metrics = ['accuracy'])
# Fit data to model m
m.fit(X_train, X_train, batch_size = batch, epochs = epoch)
m.summary()
#score = m.evaluate(X_test, Y_test, verbose = 0)
#print('Test loss:' score[0])
#print('Test accuracy:', score[1])
#m.summary()
mod = model()
The of dimension of my data is the following:
X_train = (523, 100,1)
X_test = (523, 100,1)
To fix your issue, change the following:
X_train = X_train.reshape((-1, 100))
X_test = X_test.reshape((-1, 100))
Delete the Flatten layer and use 100 neurons for the last layer as stated in the comments.

Keras VGG16 pretrained model accuracy does not increase

Hi guys i have a problem on VGG16 on Keras.
I am trying to make accuracy higher but did not work.
I only have 46 data training, 12 classes, and 26 data validation.
Currently, the highest accuracy that I can get is 0.18.
I try to change the batch size into 2 but the result is worst than i expected.
I don't think I should set the data training sample should be higher than my actual data.
What should I do to increase the accuracy?
This is my actual code:
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
from keras.layers import Input, Flatten, Dense, Dropout
from keras.models import Model, Sequential
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
# dimensions of our images.
from keras.preprocessing.image import ImageDataGenerator
img_width, img_height = 224, 224
train_data_dir = 'database/train'
validation_data_dir = 'database/validation'
nb_train_samples = 46
nb_validation_samples = 26
epochs = 50
batch_size = 4
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
#Get back the convolutional part of a VGG network trained on ImageNet
vgg_conv = VGG16(weights='imagenet', include_top=True)
vgg_conv.summary()
print('VGG Pretrained Model loaded.')
#Add a layer where input is the output of the second last layer
x = Dense(12, activation='softmax', name='predictions')(vgg_conv.layers[-2].output)
model = Model(input=vgg_conv.input, output=x)
#In the summary, weights and layers from VGG part will be hidden, but they will be fit during the training
model.summary()
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1. / 224,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 224)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
# compile model
# model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizers.RMSprop(lr=2e-4), metrics=['accuracy'])
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
# Train the model
history = model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples / batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples / batch_size)
# Save the model
model.save('vgg16_pretrained_5.h5')
# Check Performance
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'b', label='Training acc')
plt.plot(epochs, val_acc, 'r', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
Since you have 12 classes and only 46 observations, it roughly becomes 2 observations per class (This is just a guess without even looking at the data set). With this little data, the NN model can not even understand the pattern of the data and eventually will fail to generalize. So you need at least for than 2k observations for better results.

I keep getting a dimension error where it says it's expecting the input to have 4 dimensions but got shape with (2062,64,64).

x = np.load('/Users/bharddwajvemulapalli/Documents/tensorFlowProjects/Sign-language-digits-dataset 2/X.npy')
y = np.load('/Users/bharddwajvemulapalli/Documents/tensorFlowProjects/Sign-language-digits-dataset 2/Y.npy')
x = x/255
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation,Flatten, Conv2D, MaxPooling2D #dense means fully connected
from tensorflow.keras.callbacks import TensorBoard
print(x.shape) # (2062, 64, 64)
BATCH_SIZE = 32
model = Sequential()
model.add(Conv2D(64,(6,6), input_shape = (64,64,1))) #figure out this input shape parameter to make this work
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Conv2D(3,3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Flatten())
model.add(Dense(1))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(1)) #output layer
model.add(Activation('sigmoid'))
model.compile(loss = 'sparse_categorical_crossentropy', optimizer ='adam' , metrics = ['accuracy'])
#x = np.arange(8445952)
#x= np.reshape(64,64,1)
model.fit(x,y,batch_size = 32, epochs = 2, validation_split =.1)
The above is my code. I thought that keras would automatically attach batch_size to (64,64,1) so that I could get 4 dimensions, because when I add a fourth dimension myself, then I get an error that it wasn't expecting an array of dimension 5.
x must be 4-dimensional, where the 4th dimension is the number of channels. Since in your case there is only 1 channel, you can add the channel by
if x.ndim == 3:
x = np.expand_dims(x, axis=-1)
This will add a new dimension at the end.

Using tf.data.Dataset as training input to Keras model NOT working

I have a simple code, which DOES work, for training a Keras model in Tensorflow using numpy arrays as features and labels. If I then wrap these numpy arrays using tf.data.Dataset.from_tensor_slices in order to train the same Keras model using a tensorflow dataset, I get an error. I haven't been able to figure out why (it may be a tensorflow or keras bug, but I may also be missing something). I'm on python 3, tensorflow is 1.10.0, numpy is 1.14.5, no GPU involved.
OBS1: The possibility of using tf.data.Dataset as a Keras input is showed in https://www.tensorflow.org/guide/keras, under "Input tf.data datasets".
OBS2: In the code below, the code under "#Train with numpy arrays" is being executed, using numpy arrays. If this code is commented and the code under "#Train with tf.data datasets" is used instead, the error will be reproduced.
OBS3: In line 13, which is commented and starts with "###WORKAROUND 1###", if the comment is removed and the line is used for tf.data.Dataset inputs, the error changes, even though I can't completely understand why.
The complete code is:
import tensorflow as tf
import numpy as np
np.random.seed(1)
tf.set_random_seed(1)
print(tf.__version__)
print(np.__version__)
#Import mnist dataset as numpy arrays
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()#Import
x_train, x_test = x_train / 255.0, x_test / 255.0 #normalizing
###WORKAROUND 1###y_train, y_test = (y_train.astype(dtype='float32'), y_test.astype(dtype='float32'))
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1]*x_train.shape[2])) #reshaping 28 x 28 images to 1D vectors, similar to Flatten layer in Keras
batch_size = 32
#Create a tf.data.Dataset object equivalent to this data
tfdata_dataset_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))
tfdata_dataset_train = tfdata_dataset_train.batch(batch_size).repeat()
#Creates model
keras_model = tf.keras.models.Sequential([
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2, seed=1),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
#Compile the model
keras_model.compile(optimizer='adam',
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=['accuracy'])
#Train with numpy arrays
keras_training_history = keras_model.fit(x_train,
y_train,
initial_epoch=0,
epochs=1,
batch_size=batch_size
)
#Train with tf.data datasets
#keras_training_history = keras_model.fit(tfdata_dataset_train,
# initial_epoch=0,
# epochs=1,
# steps_per_epoch=60000//batch_size
# )
print(keras_training_history.history)
The error observed when using tf.data.Dataset as input is:
(...)
ValueError: Tensor conversion requested dtype uint8 for Tensor with dtype float32: 'Tensor("metrics/acc/Cast:0", shape=(?,), dtype=float32)'
During handling of the above exception, another exception occurred:
(...)
TypeError: Input 'y' of 'Equal' Op has type float32 that does not match type uint8 of argument 'x'.
The error when removing the comment from line 13, as commented above in OBS3, is:
(...)
tensorflow.python.framework.errors_impl.InvalidArgumentError: In[0] is not a matrix
[[Node: dense/MatMul = MatMul[T=DT_FLOAT, _class=["loc:#training/Adam/gradients/dense/MatMul_grad/MatMul_1"], transpose_a=false, transpose_b=false, _device="/job:localhost/replica:0/task:0/device:CPU:0"](_arg_sequential_input_0_0, dense/MatMul/ReadVariableOp)]]
Any help would be appreciated, including comments that you were able to reproduce the errors, so I can report the bug if it is the case.
I just upgraded to Tensorflow 1.10 to execute this code. I think that is the answer which is also discussed in the other Stackoverflow thread
This code executes but only if I remove the normalization as that line seems to use too much CPU memory. I see messages indicating that. I also reduced the cores.
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, Input
np.random.seed(1)
tf.set_random_seed(1)
batch_size = 128
NUM_CLASSES = 10
print(tf.__version__)
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
#x_train, x_test = x_train / 255.0, x_test / 255.0 #normalizing
def tfdata_generator(images, labels, is_training, batch_size=128):
'''Construct a data generator using tf.Dataset'''
def preprocess_fn(image, label):
'''A transformation function to preprocess raw data
into trainable input. '''
x = tf.reshape(tf.cast(image, tf.float32), (28, 28, 1))
y = tf.one_hot(tf.cast(label, tf.uint8), NUM_CLASSES)
return x, y
dataset = tf.data.Dataset.from_tensor_slices((images, labels))
if is_training:
dataset = dataset.shuffle(1000) # depends on sample size
# Transform and batch data at the same time
dataset = dataset.apply(tf.contrib.data.map_and_batch(
preprocess_fn, batch_size,
num_parallel_batches=2, # cpu cores
drop_remainder=True if is_training else False))
dataset = dataset.repeat()
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
return dataset
training_set = tfdata_generator(x_train, y_train,is_training=True, batch_size=batch_size)
testing_set = tfdata_generator(x_test, y_test, is_training=False, batch_size=batch_size)
inputs = Input(shape=(28, 28, 1))
x = Conv2D(32, (3, 3), activation='relu', padding='valid')(inputs)
x = MaxPool2D(pool_size=(2, 2))(x)
x = Conv2D(64, (3, 3), activation='relu')(x)
x = MaxPool2D(pool_size=(2, 2))(x)
x = Flatten()(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
outputs = Dense(NUM_CLASSES, activation='softmax')(x)
keras_model = tf.keras.Model(inputs, outputs)
#Compile the model
keras_model.compile('adam', 'categorical_crossentropy', metrics=['acc'])
#Train with tf.data datasets
keras_training_history = keras_model.fit(
training_set.make_one_shot_iterator(),
steps_per_epoch=len(x_train) // batch_size,
epochs=5,
validation_data=testing_set.make_one_shot_iterator(),
validation_steps=len(x_test) // batch_size,
verbose=1)
print(keras_training_history.history)
Installing the tf-nightly build, together with changing dtypes of some tensors (the error changes after installing tf-nightly), solved the problem, so it is an issue which (hopefully) will be solved in 1.11.
Related material: https://github.com/tensorflow/tensorflow/issues/21894
I am wondering how Keras is able to do 5 epochs when the
make_one_shot_iterator() which only supports iterating once through a
dataset?
could be given smth like iterations = len(y_train) * epochs - here shown for tf.v1
the code from Mohan Radhakrishnan still works in tf.v2 with little corrections in objects' belongings to new classes (in tf.v2) fixings - to make the code up-to-date... No more make_one_shot_iterator() needed
# >> author: Mohan Radhakrishnan
import tensorflow as tf
import tensorflow.keras
import numpy as np
from tensorflow.keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, Input
np.random.seed(1)
tf.random.set_seed(1)
batch_size = 128
NUM_CLASSES = 10
print(tf.__version__)
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
#x_train, x_test = x_train / 255.0, x_test / 255.0 #normalizing
def tfdata_generator(images, labels, is_training, batch_size=128):
'''Construct a data generator using tf.Dataset'''
def preprocess_fn(image, label):
'''A transformation function to preprocess raw data
into trainable input. '''
x = tf.reshape(tf.cast(image, tf.float32), (28, 28, 1))
y = tf.one_hot(tf.cast(label, tf.uint8), NUM_CLASSES)
return x, y
dataset = tf.data.Dataset.from_tensor_slices((images, labels))
if is_training:
dataset = dataset.shuffle(1000) # depends on sample size
# Transform and batch data at the same time
dataset = dataset.apply( tf.data.experimental.map_and_batch(
preprocess_fn, batch_size,
num_parallel_batches=2, # cpu cores
drop_remainder=True if is_training else False))
dataset = dataset.repeat()
dataset = dataset.prefetch( tf.data.experimental.AUTOTUNE)
return dataset
training_set = tfdata_generator(x_train, y_train,is_training=True, batch_size=batch_size)
testing_set = tfdata_generator(x_test, y_test, is_training=False, batch_size=batch_size)
inputs = Input(shape=(28, 28, 1))
x = Conv2D(32, (3, 3), activation='relu', padding='valid')(inputs)
x = MaxPool2D(pool_size=(2, 2))(x)
x = Conv2D(64, (3, 3), activation='relu')(x)
x = MaxPool2D(pool_size=(2, 2))(x)
x = Flatten()(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
outputs = Dense(NUM_CLASSES, activation='softmax')(x)
keras_model = tf.keras.Model(inputs, outputs)
#Compile the model
keras_model.compile('adam', 'categorical_crossentropy', metrics=['acc'])
#Train with tf.data datasets
# training_set.make_one_shot_iterator() - 'PrefetchDataset' object has no attribute 'make_one_shot_iterator'
keras_training_history = keras_model.fit(
training_set,
steps_per_epoch=len(x_train) // batch_size,
epochs=5,
validation_data=testing_set,
validation_steps=len(x_test) // batch_size,
verbose=1)
print(keras_training_history.history)
not loading data locally, just easy DataFlow - that is very convinient - Thanks a lot - hope my corrections are proper

Error in creating h5 file (hdf file)

For below code i have save models weights in mnist_weights1234.h5. and want to create same file like mnist_weights1234.h5 with same layer configuration
import keras
from __future__ import print_function
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import numpy as np
from sklearn.model_selection import train_test_split
batch_size = 128
num_classes = 3
epochs = 1
# input image dimensions
img_rows, img_cols = 28, 28
#Just for reducing data set
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x1_train=x_train[y_train==0]; y1_train=y_train[y_train==0]
x1_test=x_test[y_test==0];y1_test=y_test[y_test==0]
x2_train=x_train[y_train==1];y2_train=y_train[y_train==1]
x2_test=x_test[y_test==1];y2_test=y_test[y_test==1]
x3_train=x_train[y_train==2];y3_train=y_train[y_train==2]
x3_test=x_test[y_test==2];y3_test=y_test[y_test==2]
X=np.concatenate((x1_train,x2_train,x3_train,x1_test,x2_test,x3_test),axis=0)
Y=np.concatenate((y1_train,y2_train,y3_train,y1_test,y2_test,y3_test),axis=0)
# the data, shuffled and split between train and test sets
x_train, x_test, y_train, y_test = train_test_split(X,Y)
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(1, kernel_size=(2, 2),
activation='relu',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(16,16)))
model.add(Flatten())
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.save_weights('mnist_weights1234.h5')
Now i want to create file like mnist_weights.h5. So i use below code and getting error.
hf = h5py.File('mnist_weights12356.h5', 'w')
hf.create_dataset('conv2d_2/conv2d_2/bias', data=weights[0])
hf.create_dataset('conv2d_2/conv2d_2/kernel', data=weights[1])
hf.create_dataset('dense_2/dense_2/bias', data=weights[2])
hf.create_dataset('dense_2/dense_2/kernel', data=weights[3])
hf.create_dataset('flatten_2', data=None)
hf.create_dataset('max_pooling_2d_2', data=None)
hf.close()
But getting following error:TypeError: One of data, shape or dtype must be specified.
How to solve problem
If you want to use weights that are in numpy arrays, simply set the weights in the layers:
model.get_layer('conv2d_2').set_weights([weights[1],weights[0]])
model.get_layer('dense_2').set_weights([weights[3],weights[2]])
If your arrays are stored in files:
array = numpy.load('arrayfile.npy')
You can save the entire model weights as numpy arrays:
numpy.save('weights.npy', model.get_weights())
model.set_weights(numpy.load('weights.npy'))
The error message has your solution. In these lines:
hf.create_dataset('flatten_2', data=None)
hf.create_dataset('max_pooling_2d_2', data=None)
You are giving data equals to None. To create a dataset, the HDF5 library needs a minimum information, and as the error says, you either need to give a dtype (the data type of the dataset' elements), or a non-None data parameter (to infer the shape), or a shape parameter. You are giving none of these, so the error is correct.
Just give enough information in the create_dataset call for a dataset ti be created.

Resources