TF 2.x - just for the experience I tried with a simple experimental dataset - to show the problem:
import numpy as np
import tensorflow as tf
import keras
from tensorflow.keras.callbacks import LambdaCallback
import tensorflow_datasets as tfds
data, info = tfds.load('iris', split='train[:80%]',
as_supervised=True, with_info=True)
print(info)
features, labels = tuple(zip(*data))
# NB: the generator should yield a dictionary for the inputs, and the output as is.
def gen(x_train, y_train):
print('generator initiated')
(x_train, y_train)= tfds.load('iris', shuffle_files=True, as_supervised=True, with_info=True)
idx = 0
while True:
yield tf.transpose([x_train[:32], tf.one_hot(y_train[:32])])
print('generator yielded a batch %d' % idx)
idx += 1
train_ds = tf.data.Dataset.from_generator(gen, args=(features, labels),
output_types=(tf.float32, tf.int32),
output_shapes=(tf.TensorShape([32,4]), tf.TensorShape([32,4 ])),
)
# OR
#output_signature=(
# tf.TensorSpec(shape=(4,), dtype=tf.float32),
# tf.TensorSpec(shape=(), dtype=tf.int32)),
#)
# datasetGen = iter(train_ds)
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(512, activation='relu', input_shape=(32,4,))) # 4 fields
model.add(tf.keras.layers.Dense(4, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
train_ds= train_ds.batch(32).prefetch(32)
# callbacks=[LambdaCallback(on_epoch_end=generator.on_epoch_end)],
history= model.fit(train_ds, epochs = 7, verbose = 1)
print(history.history['accuracy'])
& am getting :
In ln: yield tf.transpose([x_train[:32], tf.one_hot(y_train[:32])])
TypeError: unhashable type: 'slice'
problem seems to be here - x_train[:32] ?
Q ?? how to make corrections to the code (either to the generator-func? or to the output_signature? or to the input_shape=? or somewhere else) to be able to use Dataset in model.fit() method ?
(sorry for dummy example, but I'd like to test generator-func use in model.fit())
well, it was really a dummy example of generator use; & moreover tf.data always win in speed compared with generator use. Nevertheless, such works (code also needs refactoring - e.g. or organizing pipelines for BigData - e.g.)
import tensorflow as tf
import numpy as np
import pandas as pd
# LOAD DATA
df= pd.read_csv('https://gist.githubusercontent.com/netj/8836201/raw/6f9306ad21398ea43cba4f7d537619d0e07d5ae3/iris.csv', dtype = 'float32', converters = {'variety' : str},
nrows=64, decimal='.')
# df.head()
_features=df.iloc[:,:4].copy()
_labels=df.iloc[:,-1:].copy()
_labels['variety1'] = pd.factorize(_labels['variety'])[0]
_target= _labels['variety1'].astype(np.int64).copy()
_targets= _target[:,np.newaxis]
#print(_features)
print(type(_targets))
# SPLIT for Train & Test
# https://www.kdnuggets.com/2020/07/getting-started-tensorflow2.html
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(_features,_targets, test_size=0.3)
# Typically, we normalize the data when we have a high amount of variance in it.
print(X_train.var())
print(X_test.var())
# Here we can see that both X_train and X_test have very low variance, so no need to normalize the data.
# PREPROCESSING
#
# to_categorical
y_train = tf.keras.utils.to_categorical(y_train)
y_test = tf.keras.utils.to_categorical(y_test)
print(y_train[:5,:])
# convert our data to numpy arrays
X_train = X_train.values
X_test = X_test.values
#################################################
#################################################
def gen(_features, _labels):
x_train= _features
y_train= _labels
#print('gen:\n', list(x_train))
#print('gen:\n', list(y_train))
idx = 0
while idx<64:
yield x_train[:32], y_train[:32]
print('generator yielded a batch %d' % idx)
idx += 1
#################################################
# train_ds <<<<<<<<<<<<<<<<<<<<<<<
train_ds = tf.data.Dataset.from_generator(gen, args=(X_train, y_train),
output_types=(tf.float32, tf.int64),
output_shapes=(tf.TensorShape([32,4]), tf.TensorShape([32, 2 ])),
)
# OR
#output_signature=(
# tf.TensorSpec(shape=(4,), dtype=tf.float32),
# tf.TensorSpec(shape=(), dtype=tf.int32)),
#)
# datasetGen = iter(train_ds)
# print('train_ds:\n',list(train_ds.as_numpy_iterator()))
#################################################
# Model
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense((512), activation='relu', input_shape=(32,4 ))) # 4 fields
model.add(tf.keras.layers.Dense((2), activation='softmax'))
# INSTEAD OF ONE-HOT CAN USE sparse_categorical_crossentropy HERE
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
train_ds= train_ds.batch(32).prefetch(32)
# callbacks=[LambdaCallback(on_epoch_end=generator.on_epoch_end)],
history= model.fit(train_ds, epochs = 7, verbose = 1)
validation_ ds from source X_test, y_test formed with tf.data.Dataset.from_tensor_slices() have problems with shape (4,) instead of model's input shape (32,4,) - but it is of the inappropriate generator's task at all from the very beginningg, I think... though with train_ds evaluate() & predict() methods works (though that is not the task of ML)
##############################################
score = model.evaluate(train_ds, batch_size=32, verbose=1) # test_ds needed
print("Test Accuracy:", score[1])
y_pred = model.predict(train_ds)
print('PREDICTIONS:\n', y_pred)
##############################################
#https://medium.com/#nutanbhogendrasharma/tensorflow-deep-learning-model-with-iris-dataset-8ec344c49f91
#Print actual and predicted value
features, labels = tuple(zip(*train_ds)) # If you need the numpy array version, convert them using np.array(): # https://stackoverflow.com/a/65499385/15893581
actual = np.argmax(labels,axis=-1)
predicted = np.argmax(y_pred,axis=-1)
print(f"Actual: {actual}")
print(f"Predicted: {predicted}")
So, incoming test_ds e.g. still needs to be adopted (though better to adopt gen_func here, I think), but overall idea of using generator in TF 2.x is clear now (only if will be used for huge data)...
P.S.
and advice to improve the model here
I apologize for this dummy question, as I'm still a novice in ML, but needed to connect somehow generator & training for the experience
Finally I generated iris_dataset from function (really, not quick operation)... some attention stll needed else to repeat-fn, but code-design in general works (for really random data)
# Importing the tensorflow library
import tensorflow as tf
import numpy as np
import keras
#FeaturesDict({
# 'features': Tensor(shape=(4,), dtype=tf.float32),
# 'label': ClassLabel(shape=(), dtype=tf.int64, num_classes=3),
#})
BATCH_SIZE= 12
EPOCHS = 7
QTY_BATCHES= 10 # to be generated
# The Dataset.from_generator constructor converts the python generator to a fully functional tf.data.Dataset.
def gen():
for i in range(BATCH_SIZE):
# should yield a pair Features - Label
data= np.expand_dims(np.random.sample(4) , axis=0)
label= [np.random.randint(3)]
yield data, label
train_ds = tf.data.Dataset.from_generator(gen,
(tf.float32, tf.int32),
(tf.TensorShape([None,4]),
tf.TensorShape([ 1])))
# Applying the Dataset.repeat() transformation with no arguments will repeat the input indefinitely.
# The Dataset.repeat transformation concatenates its arguments without signaling the end of one epoch and the beginning of the next epoch. Because of this a Dataset.batch applied after Dataset.repeat will yield batches that straddle epoch boundaries:
train_ds= train_ds.repeat(count= EPOCHS*BATCH_SIZE*QTY_BATCHES).batch(BATCH_SIZE, drop_remainder=True).prefetch(BATCH_SIZE)
NUM_CLASSES= 3
train_ds = train_ds.map(lambda x, y: (x, tf.one_hot(y, depth=NUM_CLASSES)))
for x, y in train_ds:
print(x)
print(y)
# Build a simple linear model
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(64, activation='relu', input_shape=(None,4))) # unknown(variable) batch_size, 4 fields
model.add(tf.keras.layers.Dense(3, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# steps_per_epoch = int( np.ceil(x_train.shape[0] / batch_size) )
# The Steps per epoch denote the number of batches to be selected for one epoch. If 500 steps are selected then the network will train for 500 batches to complete one epoch.
history= model.fit(train_ds, batch_size=BATCH_SIZE, epochs= EPOCHS, \
steps_per_epoch= (QTY_BATCHES*BATCH_SIZE)//BATCH_SIZE, \
verbose = 1)
print(history.history['accuracy'])
print(history.history['loss'])
# Keras - Plot training, validation and test set accuracy
# https://stackoverflow.com/questions/41908379/keras-plot-training-validation-and-test-set-accuracy
import keras
from matplotlib import pyplot as plt
plt.plot(history.history['accuracy'])
#plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
#plt.legend(['train', 'val'], loc='upper left')
plt.legend(['train'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
# plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
# plt.legend(['train', 'val'], loc='upper left')
plt.legend(['train'], loc='upper left')
plt.show()
ok, I'v got working case for the initial Dataset:
import numpy as np
import tensorflow as tf
import keras
from tensorflow.keras.callbacks import LambdaCallback
import tensorflow_datasets as tfds
data, info = tfds.load('iris', split='train[:100%]', batch_size=10, as_supervised=True, with_info=True)
print(info)
NUM_CLASSES= info.features["label"].num_classes
data = data.map(lambda x, y: (x, tf.one_hot(y, depth=NUM_CLASSES)))
features, labels = tuple(zip(*data))
print(features)
print(labels)
# NB: the generator should yield a dictionary for the inputs, and the output as is.
def gen(x_train, y_train):
print('generator initiated')
print(x_train.shape)
print(y_train.shape)
idx = 0
while True:
yield x_train, y_train
print('generator yielded a batch %d' % idx)
idx += 1
train_ds = tf.data.Dataset.from_generator(gen, args=(features, labels),
output_types=(tf.float32, tf.int32),
output_shapes=(tf.TensorShape([None,10,4]), tf.TensorShape([ None, 10, 3 ])),
)
# OR (better! because prev. is Deprecated)
#output_signature=(
# tf.TensorSpec(shape=(4,), dtype=tf.float32),
# tf.TensorSpec(shape=(), dtype=tf.int32)),
#)
#it = iter(train_ds)
#print(it.get_next())
for feature, label in train_ds:
print("shape of ds_generated: ", feature.shape,label.shape)
break
#num_val = len(train_ds) # TypeError: The dataset length is unknown. BECAUSE it is FLOW
#print(num_val)
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(512, activation='relu', input_shape=(None,10,4))) # 4 fields
model.add(tf.keras.layers.Dense(3, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
train_ds= train_ds.batch(32).prefetch(32)
# callbacks=[LambdaCallback(on_epoch_end=generator.on_epoch_end)],
history= model.fit(train_ds, epochs = 2, steps_per_epoch= 120 // 10, verbose = 1)
print(history.history['accuracy'])
one-hot encoding I've moved out of gen_func-scope
divided DS for features & labels
! gave correct input_shape to model (& appropriate shape changes in gen_func) - according [variable_rows_count_in_batch, batch_size, columns_features]
verbose = 1 for readable Debug in MT env.
advice from here
to define a variable batch size with None and setting the
steps_per_epoch
-- still not helps if taking split='train[:50%]' and steps_per_epoch= 60 // 10, -- as for unfully filled LAST batch -- the source of problem in my code IS in gen_func output_shapes -- that is clear here, because gen_func really was got dummy for testing purposes...
for real cases use Logical Output ! and appropriate Shapes
P.S.
though for 5 epochs I am getting:
Graph execution error: >> ZMQError: Too many open file
AttributeError: '_thread._local' object has no attribute 'event_pipe'
-- ! probably, NOT enough memory to finish training !... - decreasing output in Dense(512,..) HELPS (as well as decreasing number of epochs)
Related
Although my code run fine on repl and did giving me results but it miserably fails on the Katacoda testing environment.
I am attaching the repl file here for your review as well, which also contains the question which is commented just above the code I have written.
Kindly review and let me know what mistakes I am making here.
Repl Link
https://repl.it/repls/WarmRobustOolanguage
Also sharing code below
Commented is Question Instructions
#Import two modules sklearn.datasets, and #sklearn.model_selection.
#Import numpy and set random seed to 100.
#Load popular Boston dataset from sklearn.datasets module #and assign it to variable boston.
#Split boston.data into two sets names X_train and X_test. #Also, split boston.target into two sets Y_train and Y_test.
#Hint: Use train_test_split method from #sklearn.model_selection; set random_state to 30.
#Print the shape of X_train dataset.
#Print the shape of X_test dataset.
import sklearn.datasets as datasets
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import cross_val_score
import numpy as np
np.random.seed(100)
max_depth = range(2, 6)
boston = datasets.load_boston()
X_train, X_test, Y_train, Y_test = train_test_split(boston.data, boston.target, random_state=30)
print(X_train.shape)
print(X_test.shape)
#Import required module from sklearn.tree.
#Build a Decision tree Regressor model from X_train set and #Y_train labels, with default parameters. Name the model as #dt_reg.
#Evaluate the model accuracy on training data set and print #it's score.
#Evaluate the model accuracy on testing data set and print it's score.
#Predict the housing price for first two samples of X_test #set and print them.(Hint : Use predict() function)
dt_reg = DecisionTreeRegressor(random_state=1)
dt_reg = dt_reg.fit(X_train, Y_train)
print('Accuracy of Train Data :', cross_val_score(dt_reg, X_train,Y_train, cv=10 ))
print('Accuracy of Test Data :', cross_val_score(dt_reg, X_test,Y_test, cv=10 ))
predicted = dt_reg.predict(X_test[:2])
print(predicted)
#Fit multiple Decision tree regressors on X_train data and #Y_train labels with max_depth parameter value changing from #2 to 5.
#Evaluate each model accuracy on testing data set.
#Hint: Make use of for loop
#Print the max_depth value of the model with highest accuracy.
dt_reg = DecisionTreeRegressor()
random_grid = {'max_depth': max_depth}
dt_random = RandomizedSearchCV(estimator = dt_reg, param_distributions = random_grid,
n_iter = 90, cv = 3, verbose=2, random_state=42, n_jobs = -1)
dt_random.fit(X_train, Y_train)
dt_random.best_params_
def evaluate(model, test_features, test_labels):
predictions = model.predict(test_features)
errors = abs(predictions - test_labels)
mape = 100 * np.mean(errors / test_labels)
accuracy = 100 - mape
print('Model Performance')
print('Average Error: {:0.4f} degrees.'.format(np.mean(errors)))
print('Accuracy = {:0.2f}%.'.format(accuracy))
return accuracy
best_random = dt_random.best_estimator_
random_accuracy = evaluate(best_random, X_test,Y_test)
print("Accuracy Scores of the Model ",random_accuracy)
best_parameters = (dt_random.best_params_['max_depth']);
print(best_parameters)
The question is asking for default values. Try to remove random_state=1
Current Line:
dt_reg = DecisionTreeRegressor(random_state=1)
Update Line:
dt_reg = DecisionTreeRegressor()
I think it should Work!!!
# ================================================================================
# Machine Learning Using Scikit-Learn | 3 | Decision Trees ================================================================================
import sklearn.datasets as datasets
import sklearn.model_selection as model_selection
import numpy as np
from sklearn.tree import DecisionTreeRegressor
np.random.seed(100)
# Load popular Boston dataset from sklearn.datasets module and assign it to variable boston.
boston = datasets.load_boston()
# print(boston)
# Split boston.data into two sets names X_train and X_test. Also, split boston.target into two sets Y_train and Y_test
X_train, X_test, Y_train, Y_test = model_selection.train_test_split(boston.data, boston.target, random_state=30)
# Print the shape of X_train dataset
print(X_train.shape)
# Print the shape of X_test dataset.
print(X_test.shape)
# Build a Decision tree Regressor model from X_train set and Y_train labels, with default parameters. Name the model as dt_reg
dt_Regressor = DecisionTreeRegressor()
dt_reg = dt_Regressor.fit(X_train, Y_train)
print(dt_reg.score(X_train,Y_train))
print(dt_reg.score(X_test,Y_test))
predicted = dt_reg.predict(X_test[:2])
print(predicted)
# Get the max depth
maxdepth = 2
maxscore = 0
for x in range(2, 6):
dt_Regressor = DecisionTreeRegressor(max_depth=x)
dt_reg = dt_Regressor.fit(X_train, Y_train)
score = dt_reg.score(X_test, Y_test)
if(maxscore < score):
maxdepth = x
maxscore = score
print(maxdepth)
I'm trying to train an LSTM to predict the the Nth token using the N-1 tokens preceding it
For each One-Hot encoded token, I try to predict the next token. After three layers of LSTM, the results are fed to a Dense layer (wrapped in he TimeDistributed layer wrapper) to re-encode the results into the same One-Hot encoding.
Oddly enough, after a few epochs the accuracy (in both train and validation) drops to nearly 0 (bad news), while the loss also drops to nearly 0 (good news?).
Why does this happen?
I know that I cannot expect the loss and accuracy to always go in the opposite directions (seeing as loss uses categorical cross-entropy over all categories, while accuracy uses merely the best or k best categories), but still - this behavior is highly unexpected and unexplainable.
What causes this?
Am I'm doing something wrong?
How should I change my code to allow my network to progress towards more and more accurate predictions?
My code is as follows:
import numpy as np
import glob
import keras
from keras.models import Sequential
from keras.layers import LSTM, Dense, TimeDistributed,Lambda, Dropout, Activation
from keras.metrics import top_k_categorical_accuracy
from keras.callbacks import ModelCheckpoint
###
import matplotlib
matplotlib.use('Agg') # prevents it from failing when there is no display
import matplotlib.pyplot as plt
import keras.backend as K
###
name='Try_6'
model_designation=str(name)+'_'
train_val_split=0.2 # portion to be placed in validation
train_control_number=0
val_control_number=0
batch_size = 16
def my_3D_top_5(true, pred):
features_num=int(list(pred.shape)[-1])
true = K.reshape(true, (-1, features_num))
pred = K.reshape(pred, (-1, features_num))
return top_k_categorical_accuracy(true, pred, k=5)
def my_3D_top_10(true, pred):
features_num=int(list(pred.shape)[-1])
true = K.reshape(true, (-1, features_num))
pred = K.reshape(pred, (-1, features_num))
return top_k_categorical_accuracy(true, pred, k=10)
def basic_LSTM(features_num):
model = Sequential()
model.add(LSTM(40, return_sequences=True, input_shape=(None, features_num)))
model.add(LSTM(40, return_sequences=True))
model.add(LSTM(40, return_sequences=True))
model.add(TimeDistributed(Dense(features_num)))
model.add(Activation('linear'))
print(model.summary())
model.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy',my_3D_top_5,my_3D_top_10])
return (model)
def main ():
input_files=glob.glob('*npy')
data_list,dim=loader(input_files)
train_list,val_list=data_spliter(data_list)
train_list=group_data(train_list,batch_size)
val_list=group_data(val_list,batch_size)
filepath = "saved-model-"+model_designation+"-{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(filepath, save_best_only=False)
callbacks_list=[checkpoint]
model=basic_LSTM(dim)
history=model.fit_generator(train_generator(train_list), steps_per_epoch=len(train_list), epochs=30, verbose=1,validation_data=val_generator(val_list),validation_steps=len(val_list),callbacks=callbacks_list)
report(history)
def group_data(data_list,size): # groups data and elongate it to match
output=[]
list_of_sizes=[]
for data in data_list:
list_of_sizes.append(list(data.shape)[1])
data_list = [x for _, x in sorted(zip(list_of_sizes,data_list), key=lambda pair: pair[0])]
while len(data_list)>size:
this=data_list[:size]
data_list=data_list[size:]
combined=(elongate_and_combine(this))
output.append(combined)
combined=(elongate_and_combine(data_list))
output.append(combined)
return (output)
def elongate_and_combine(data_list):
max_length= (list(data_list[-1].shape)[1])
last_element=list.pop(data_list)
output=last_element
stop_codon=last_element[0,(max_length-1),:]
stop_codon=stop_codon.reshape(1,1,stop_codon.size)
for data in data_list:
size_of_data=list(data.shape)[1]
while size_of_data<max_length:
data=np.append(data, stop_codon, axis=1)
size_of_data=list(data.shape)[1]
output=np.append(output, data, axis=0)
return (output)
def train_generator(data_list):
while True:
global train_control_number
train_control_number=cycle_throught(len(data_list),train_control_number)
#print (train_control_number)
this=data_list[train_control_number]
x_train = this [:,:-1,:] # all but the last 1
y_train = this [:,1:,:] # all but the first 1
yield (x_train, y_train)
def val_generator(data_list):
while True:
global val_control_number
val_control_number=cycle_throught(len(data_list),val_control_number)
#print (val_control_number)
this=data_list[val_control_number]
x_train = this [:,:-1,:] # all but the last 1
y_train = this [:,1:,:] # all but the first 1
yield (x_train, y_train)
def cycle_throught (total,current):
current+=1
if (current==total):
current=0
return (current)
def loader(input_files):
data_list=[]
for input_file in input_files:
a=np.load (input_file)
incoming_shape=list(a.shape)
requested_shape=[1]+incoming_shape
a=a.reshape(requested_shape)
#print (a.shape)
data_list.append(a)
return (data_list,incoming_shape[-1])
def data_spliter(input_list):
val_num=int(len(input_list)*train_val_split)
validation=input_list[:val_num]
train=input_list[val_num:]
return (train,validation)
def report(history) :
print(history.history.keys())
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
acc_5=history.history['my_3D_top_5']
val_acc_5=history.history['val_my_3D_top_5']
acc_10=history.history['my_3D_top_10']
val_acc_10=history.history['val_my_3D_top_10']
epochs = range(1, len(acc) + 1)
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10, 6))
axes[0][0].plot(epochs, acc, 'bo', label='Training acc')
axes[0][0].plot(epochs, val_acc, 'b', label='Validation acc')
axes[0][0].set_title('Training and validation accuracy')
axes[0][0].legend()
axes[0][1].plot(epochs, loss, 'ro', label='Training loss')
axes[0][1].plot(epochs, val_loss, 'r', label='Validation loss')
axes[0][1].set_title('Training and validation loss')
axes[0][1].legend()
axes[1][0].plot(epochs, acc_5, 'go', label='Training acc over top 5')
axes[1][0].plot(epochs, val_acc_5, 'g', label='Validation acc over top 5')
axes[1][0].set_title('Training and validation accuracy over top 5')
axes[1][0].legend()
axes[1][1].plot(epochs, acc_10, 'mo', label='Training acc over top 10')
axes[1][1].plot(epochs, val_acc_10, 'm', label='Validation acc over top 10')
axes[1][1].set_title('Training and validation accuracy over top 10')
axes[1][1].legend()
fig.tight_layout()
fig.savefig('fig_'+name+'.png') # save the figure to file
main()
The reason both accuracy and loss drop to 0 in the graph is that at that point the output becomes nan.
This causes the loss to become nan as well, which apparently matplotlib regards as 0. Accuracy, in this case, is of course zero.
My mistake was using linear rather than softmax activation for the decoding layer.
After replacing
model.add(Activation('linear'))
with
model.add(Activation('softmax'))
the model does not collapses into nan but rather improves in accuracy.
I am trying to write a simple ML code to classify the mnist dataset in tensorflow2.0. I didn't use Keras for now since I just want to use lower API to help me understand how tensorflow works. However, after I defined the cross entropy, It seems impossible to continue. All the tf2.0 optimizers are moved to keras and I don't know how to train a model without keras in tf2.0. Is there a way that we bypass keras in tf2.0?
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
(train_images, train_labels), (test_images, test_labels) = datasets.mnist.load_data()
print(train_images.shape)
print(len(train_labels))
print(train_images[1,:,:].shape)
# plt.figure()
# plt.imshow(train_images[0])
# plt.colorbar()
# plt.grid(False)
# plt.show()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
for i in range(1):
x = tf.constant(train_images[1,:,:].reshape(784), dtype = tf.float32)
x = tf.reshape(x, [1, 784])
print(tf.shape(x), tf.shape(W))
# define the model
y = tf.nn.softmax(tf.matmul(x, W) + b)
print(y)
# correct labels
y_ = np.zeros(10)
y_[train_labels[i]] = 1.0
y_ = tf.constant(y_, dtype = tf.float32)
y_ = tf.reshape(y_, [1, 10])
cross_entropy = -tf.reduce_sum(y_* tf.math.log(y))
print(cross_entropy)
I don't know how to continue from here.
Backpropagation-based training of models is totally possible in TensorFlow 2.x without using the keras API. The usage will be centered around the tf.GradientTape API and optimizers objects under the tf.optimizers namespace.
Your example can be modified as follows. Note that it's a simplistic code meant to illustrate the basic usage in a short code snippet. It's not to illustrate machine learning best practices in TF2.
(train_images, train_labels), (test_images, test_labels) = datasets.mnist.load_data()
train_images, test_images = train_images / 255.0, test_images / 255.0
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
#tf.function
def my_model(x):
# This is a hand-rolled logistic regressor.
y = tf.matmul(x, W) + b
return tf.nn.softmax(y)
#tf.function
def loss(x, y):
# This is a hand-rolled categorical cross-entropy loss.
diff = -(labels * tf.math.log(logits))
loss = tf.reduce_mean(diff)
return loss
optimizer = tf.optimizers.Adam(learning_rate=1e-3)
for i in xrange(num_steps):
# A single training step.
with tf.GradientTape() as tape:
# This is atypical, in that you would normally want to do this in
# mini-batches, instead of using all examples in x_train and y_train
# at once. But again, this is just a simple example.
loss_value = loss(x_train, y_train)
gradients = tape.gradient(loss_value, [W, b])
optimizer.apply_gradients(zip(gradients, [w, b]))
I have a simple code, which DOES work, for training a Keras model in Tensorflow using numpy arrays as features and labels. If I then wrap these numpy arrays using tf.data.Dataset.from_tensor_slices in order to train the same Keras model using a tensorflow dataset, I get an error. I haven't been able to figure out why (it may be a tensorflow or keras bug, but I may also be missing something). I'm on python 3, tensorflow is 1.10.0, numpy is 1.14.5, no GPU involved.
OBS1: The possibility of using tf.data.Dataset as a Keras input is showed in https://www.tensorflow.org/guide/keras, under "Input tf.data datasets".
OBS2: In the code below, the code under "#Train with numpy arrays" is being executed, using numpy arrays. If this code is commented and the code under "#Train with tf.data datasets" is used instead, the error will be reproduced.
OBS3: In line 13, which is commented and starts with "###WORKAROUND 1###", if the comment is removed and the line is used for tf.data.Dataset inputs, the error changes, even though I can't completely understand why.
The complete code is:
import tensorflow as tf
import numpy as np
np.random.seed(1)
tf.set_random_seed(1)
print(tf.__version__)
print(np.__version__)
#Import mnist dataset as numpy arrays
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()#Import
x_train, x_test = x_train / 255.0, x_test / 255.0 #normalizing
###WORKAROUND 1###y_train, y_test = (y_train.astype(dtype='float32'), y_test.astype(dtype='float32'))
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1]*x_train.shape[2])) #reshaping 28 x 28 images to 1D vectors, similar to Flatten layer in Keras
batch_size = 32
#Create a tf.data.Dataset object equivalent to this data
tfdata_dataset_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))
tfdata_dataset_train = tfdata_dataset_train.batch(batch_size).repeat()
#Creates model
keras_model = tf.keras.models.Sequential([
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2, seed=1),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
#Compile the model
keras_model.compile(optimizer='adam',
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=['accuracy'])
#Train with numpy arrays
keras_training_history = keras_model.fit(x_train,
y_train,
initial_epoch=0,
epochs=1,
batch_size=batch_size
)
#Train with tf.data datasets
#keras_training_history = keras_model.fit(tfdata_dataset_train,
# initial_epoch=0,
# epochs=1,
# steps_per_epoch=60000//batch_size
# )
print(keras_training_history.history)
The error observed when using tf.data.Dataset as input is:
(...)
ValueError: Tensor conversion requested dtype uint8 for Tensor with dtype float32: 'Tensor("metrics/acc/Cast:0", shape=(?,), dtype=float32)'
During handling of the above exception, another exception occurred:
(...)
TypeError: Input 'y' of 'Equal' Op has type float32 that does not match type uint8 of argument 'x'.
The error when removing the comment from line 13, as commented above in OBS3, is:
(...)
tensorflow.python.framework.errors_impl.InvalidArgumentError: In[0] is not a matrix
[[Node: dense/MatMul = MatMul[T=DT_FLOAT, _class=["loc:#training/Adam/gradients/dense/MatMul_grad/MatMul_1"], transpose_a=false, transpose_b=false, _device="/job:localhost/replica:0/task:0/device:CPU:0"](_arg_sequential_input_0_0, dense/MatMul/ReadVariableOp)]]
Any help would be appreciated, including comments that you were able to reproduce the errors, so I can report the bug if it is the case.
I just upgraded to Tensorflow 1.10 to execute this code. I think that is the answer which is also discussed in the other Stackoverflow thread
This code executes but only if I remove the normalization as that line seems to use too much CPU memory. I see messages indicating that. I also reduced the cores.
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, Input
np.random.seed(1)
tf.set_random_seed(1)
batch_size = 128
NUM_CLASSES = 10
print(tf.__version__)
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
#x_train, x_test = x_train / 255.0, x_test / 255.0 #normalizing
def tfdata_generator(images, labels, is_training, batch_size=128):
'''Construct a data generator using tf.Dataset'''
def preprocess_fn(image, label):
'''A transformation function to preprocess raw data
into trainable input. '''
x = tf.reshape(tf.cast(image, tf.float32), (28, 28, 1))
y = tf.one_hot(tf.cast(label, tf.uint8), NUM_CLASSES)
return x, y
dataset = tf.data.Dataset.from_tensor_slices((images, labels))
if is_training:
dataset = dataset.shuffle(1000) # depends on sample size
# Transform and batch data at the same time
dataset = dataset.apply(tf.contrib.data.map_and_batch(
preprocess_fn, batch_size,
num_parallel_batches=2, # cpu cores
drop_remainder=True if is_training else False))
dataset = dataset.repeat()
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
return dataset
training_set = tfdata_generator(x_train, y_train,is_training=True, batch_size=batch_size)
testing_set = tfdata_generator(x_test, y_test, is_training=False, batch_size=batch_size)
inputs = Input(shape=(28, 28, 1))
x = Conv2D(32, (3, 3), activation='relu', padding='valid')(inputs)
x = MaxPool2D(pool_size=(2, 2))(x)
x = Conv2D(64, (3, 3), activation='relu')(x)
x = MaxPool2D(pool_size=(2, 2))(x)
x = Flatten()(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
outputs = Dense(NUM_CLASSES, activation='softmax')(x)
keras_model = tf.keras.Model(inputs, outputs)
#Compile the model
keras_model.compile('adam', 'categorical_crossentropy', metrics=['acc'])
#Train with tf.data datasets
keras_training_history = keras_model.fit(
training_set.make_one_shot_iterator(),
steps_per_epoch=len(x_train) // batch_size,
epochs=5,
validation_data=testing_set.make_one_shot_iterator(),
validation_steps=len(x_test) // batch_size,
verbose=1)
print(keras_training_history.history)
Installing the tf-nightly build, together with changing dtypes of some tensors (the error changes after installing tf-nightly), solved the problem, so it is an issue which (hopefully) will be solved in 1.11.
Related material: https://github.com/tensorflow/tensorflow/issues/21894
I am wondering how Keras is able to do 5 epochs when the
make_one_shot_iterator() which only supports iterating once through a
dataset?
could be given smth like iterations = len(y_train) * epochs - here shown for tf.v1
the code from Mohan Radhakrishnan still works in tf.v2 with little corrections in objects' belongings to new classes (in tf.v2) fixings - to make the code up-to-date... No more make_one_shot_iterator() needed
# >> author: Mohan Radhakrishnan
import tensorflow as tf
import tensorflow.keras
import numpy as np
from tensorflow.keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, Input
np.random.seed(1)
tf.random.set_seed(1)
batch_size = 128
NUM_CLASSES = 10
print(tf.__version__)
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
#x_train, x_test = x_train / 255.0, x_test / 255.0 #normalizing
def tfdata_generator(images, labels, is_training, batch_size=128):
'''Construct a data generator using tf.Dataset'''
def preprocess_fn(image, label):
'''A transformation function to preprocess raw data
into trainable input. '''
x = tf.reshape(tf.cast(image, tf.float32), (28, 28, 1))
y = tf.one_hot(tf.cast(label, tf.uint8), NUM_CLASSES)
return x, y
dataset = tf.data.Dataset.from_tensor_slices((images, labels))
if is_training:
dataset = dataset.shuffle(1000) # depends on sample size
# Transform and batch data at the same time
dataset = dataset.apply( tf.data.experimental.map_and_batch(
preprocess_fn, batch_size,
num_parallel_batches=2, # cpu cores
drop_remainder=True if is_training else False))
dataset = dataset.repeat()
dataset = dataset.prefetch( tf.data.experimental.AUTOTUNE)
return dataset
training_set = tfdata_generator(x_train, y_train,is_training=True, batch_size=batch_size)
testing_set = tfdata_generator(x_test, y_test, is_training=False, batch_size=batch_size)
inputs = Input(shape=(28, 28, 1))
x = Conv2D(32, (3, 3), activation='relu', padding='valid')(inputs)
x = MaxPool2D(pool_size=(2, 2))(x)
x = Conv2D(64, (3, 3), activation='relu')(x)
x = MaxPool2D(pool_size=(2, 2))(x)
x = Flatten()(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
outputs = Dense(NUM_CLASSES, activation='softmax')(x)
keras_model = tf.keras.Model(inputs, outputs)
#Compile the model
keras_model.compile('adam', 'categorical_crossentropy', metrics=['acc'])
#Train with tf.data datasets
# training_set.make_one_shot_iterator() - 'PrefetchDataset' object has no attribute 'make_one_shot_iterator'
keras_training_history = keras_model.fit(
training_set,
steps_per_epoch=len(x_train) // batch_size,
epochs=5,
validation_data=testing_set,
validation_steps=len(x_test) // batch_size,
verbose=1)
print(keras_training_history.history)
not loading data locally, just easy DataFlow - that is very convinient - Thanks a lot - hope my corrections are proper
I have built a sentiment analyzer using Keras as a binary classification problem. I am using the Imdb dataset using GRU.
My code is:
# coding=utf-8
# ==========
# MODEL
# ==========
# imports
from __future__ import print_function
from timeit import default_timer as timer
from datetime import timedelta
from keras.models import Sequential
from keras.preprocessing import sequence
from keras import regularizers
from keras.layers import Dense, Embedding
from keras.layers import GRU, LeakyReLU, Bidirectional
from keras.datasets import imdb
#start a timer
start = timer()
# Hyperparameters
Model_Name = 'my_model.h5'
vocab_size = 5000
maxlen = 1000
batch_size = 512
hidden_layer_size = 2
test_split = 0.3
dropout = 0.1
num_epochs = 1
alpha = 0.2
validation_split = 0.25
l1 = 0.01
l2 = 0.01
# Dataset loading
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(path="imdb.npz",
maxlen=maxlen)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
# Data preprocessing
# Sequence padding
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
# Network building
print('Build model...')
model = Sequential()
model.add(Embedding(vocab_size, hidden_layer_size))
model.add(Bidirectional(GRU(hidden_layer_size, kernel_initializer='uniform', kernel_regularizer=regularizers.l1_l2(l1=l1,l2=l2), dropout=dropout, recurrent_dropout=dropout,return_sequences=True)))
model.add(LeakyReLU())
model.add(Bidirectional(GRU(hidden_layer_size, kernel_initializer='uniform', dropout=dropout, kernel_regularizer=regularizers.l1_l2(l1=l1,l2=l2), recurrent_dropout=dropout)))
model.add(LeakyReLU())
model.add(Dense(1, activation='softmax', kernel_initializer='uniform', kernel_regularizer=regularizers.l1_l2(l1=l1,l2=l2)))
# Compile my model
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
print('Train...')
# Fit the model
history = model.fit(x_train, y_train, batch_size=batch_size, epochs=num_epochs, validation_split=validation_split)
score, acc = model.evaluate(x_test, y_test, batch_size=batch_size, verbose=1)
# Create a summary, a plot and print the scores of the model
model.summary()
print('Test score:', score)
print('Test accuracy:', acc)
# Save model architecture, weights, training configuration (loss,optimizer),
# and also the state of the optimizer, so you can resume where you stopped
model.save(Model_Name)
end = timer()
print('Running time: ' + str(timedelta(seconds=(end - start))) + ' in Hours:Minutes:Seconds')
I keep receiving an Error message which I don't completely understand:
InvalidArgumentError (see above for traceback): indices[502,665] = 5476 is not in [0, 5000)
[[Node: embedding_1/Gather = Gather[Tindices=DT_INT32, Tparams=DT_FLOAT, validate_indices=true, _device=/job:localhost/replica:0/task:0/device:CPU:0](embedding_1/embeddings/read, embedding_1/Cast)]]
Can anyone help me understand what causes this error and how to solve it?
The error complains about a non-existent word index. That's because you are only limiting the number of Emedding features (i.e. there is a word with index 5476 which is not in the range [0, 5000), which 5000 refers to the vocab_size you have set). To resolve this, you also need to pass the vocab_size as num_words argument of load_data function, like this:
... = imdb.load_data(num_words=vocab_size, ...)
This way you are limiting the words to the most frequent words (i.e. top vocab_size words with the most frequency in the dataset) with their indices in range [0, vocab_size).