I create keras model and then save that model to pickle
program code :
print(base_learners)
# 9) 앙상블 모델 저장하기
pickle.dump(meta_learner, open('./models/meta_learner.pkl', 'wb'))
pickle.dump(base_learners, open('./models/base_learners.pkl', 'wb'))
pickle.dump(models, open('./models/models.pkl', 'wb'))
When I run the code, I get the following error:
Traceback (most recent call last):
File "MODEL02_ensemble.py", line 265, in <module>
main()
File "MODEL02_ensemble.py", line 246, in main
pickle.dump(base_learners, open('./models/base_learners.pkl', 'wb'))
TypeError: can't pickle _thread.RLock objects
What is the matter?
When I think of it, the value entered in the variable is normal.
when I print the 'base_learners' :
{'dnn': <keras.engine.sequential.Sequential object at 0x000001C43DDE8EF0>, 'random forest': RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
max_depth=4, max_features='sqrt', max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=2, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=100, n_jobs=-1,
oob_score=False, random_state=42, verbose=0, warm_start=False), 'extra trees': ExtraTreesClassifier(bootstrap=False, class_weight=None, criterion='gini',
max_depth=4, max_features='auto', max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=2, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=100, n_jobs=-1,
oob_score=False, random_state=42, verbose=0, warm_start=False)}
I found solution:
from threading import Thread
# base_learners has 3 models. so filename1~3
def hanlder(filename1,filename2,filename3):
with open('./models/base_learners.pkl', 'wb') as file:
pickle.dump(filename1, file)
pickle.dump(filename2, file)
pickle.dump(filename3, file)
t = Thread(target=hanlder, args=(base_learners))
t.start()
Related
I am trying to use tf.keras.utils.Sequence object as input to my keras model so,that I can apply augmentations that are not available in tensorflow using albumentations library. But I am getting error while doing so. (The image pre-processing operations mentioned here are just for clarity)
import albumentations as A
from tensorflow.keras.utils import Sequence
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Dense, Conv2D, Flatten, MaxPool2D, Dropout
from tensorflow.keras.models import Sequential
TRAIN_DIR = os.path.join('..', 'Data', 'PetImages')
def load_data():
list_of_fpaths = glob.glob('../Data/PetImages/Cat/*')
labels = [1] * len(list_of_fpaths)
temp = glob.glob('../Data/PetImages/Dog/*')
list_of_fpaths.extend(temp)
labels.extend([0] * len(temp))
return list_of_fpaths, labels
# Now list of fpaths contain the list of file paths and labels contain
# corresponding labels
class DataSequence(Sequence):
def __init__(self, x_set, y_set, batch_size, augmentations):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
self.augment = augmentations
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]
a = np.array([
self.augment(image=plt.imread(file_name))["image"] for file_name in
batch_x
])
b = np.array(batch_y)
return a,b
def get_model(input_shape):
model = Sequential([
Conv2D(8, 3, activation='relu', input_shape=input_shape),
MaxPool2D(2),
Conv2D(16, 3, activation='relu'),
MaxPool2D(2),
Conv2D(32, 3, activation='relu'),
MaxPool2D(2),
Conv2D(32, 3, activation='relu'),
MaxPool2D(2),
Conv2D(32, 3, activation='relu'),
MaxPool2D(2),
Flatten(),
Dense(1024, activation='relu'),
Dropout(0.3),
Dense(1, activation='sigmoid')
])
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy']
)
return model
ALBUMENTATIONS_TRAIN = A.Compose([
A.Resize(256, 256),
# A.Resize(512, 512),
A.ToFloat(),
# A.RandomCrop(384, 384, p=0.5),
])
ALBUMENTATIONS_TEST = A.Compose([
A.ToFloat(),
A.Resize(256, 256)
])
X, Y = load_data()
train_gen = DataSequence(X, Y, 16, ALBUMENTATIONS_TRAIN)
model = get_model(input_shape=(256,256,3))
model.fit(train_gen,epochs=100)
The error that I am getting is
17/748 [..............................] - ETA: 1:06 - loss: 0.4304 - accuracy: 0.92282020-07-08 13:25:47.751964: W tensorflow/core/framework/op_kernel.cc:1741] Invalid argument: ValueError: could not broadcast input array from shape (256,256,3) into shape (256,256)
Traceback (most recent call last):
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\ops\script_ops.py", line 243, in __call__
ret = func(*args)
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\autograph\impl\api.py", line 309, in wrapper
return func(*args, **kwargs)
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\data\ops\dataset_ops.py", line 785, in generator_py_func
values = next(generator_state.get_iterator(iterator_id))
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\engine\data_adapter.py", line 801, in wrapped_generator
for data in generator_fn():
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\engine\data_adapter.py", line 932, in generator_fn
yield x[i]
File "D:/ACAD/TENSORFLOW/Rough/data_aug_pipeline.py", line 40, in __getitem__
a = np.array([
ValueError: could not broadcast input array from shape (256,256,3) into shape (256,256)
Traceback (most recent call last):
File "D:/ACAD/TENSORFLOW/Rough/data_aug_pipeline.py", line 89, in <module>
model.fit(train_gen,epochs=100)
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\engine\training.py", line 66, in _method_wrapper
return method(self, *args, **kwargs)
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\engine\training.py", line 848, in fit
tmp_logs = train_function(iterator)
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\eager\def_function.py", line 580, in __call__
result = self._call(*args, **kwds)
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\eager\def_function.py", line 611, in _call
return self._stateless_fn(*args, **kwds) # pylint: disable=not-callable
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\eager\function.py", line 2420, in __call__
return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\eager\function.py", line 1661, in _filtered_call
return self._call_flat(
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\eager\function.py", line 1745, in _call_flat
return self._build_call_outputs(self._inference_function.call(
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\eager\function.py", line 593, in call
outputs = execute.execute(
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\eager\execute.py", line 59, in quick_execute
tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
tensorflow.python.framework.errors_impl.InvalidArgumentError: 2 root error(s) found.
(0) Invalid argument: ValueError: could not broadcast input array from shape (256,256,3) into shape (256,256)
Traceback (most recent call last):
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\ops\script_ops.py", line 243, in __call__
ret = func(*args)
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\autograph\impl\api.py", line 309, in wrapper
return func(*args, **kwargs)
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\data\ops\dataset_ops.py", line 785, in generator_py_func
values = next(generator_state.get_iterator(iterator_id))
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\engine\data_adapter.py", line 801, in wrapped_generator
for data in generator_fn():
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\engine\data_adapter.py", line 932, in generator_fn
yield x[i]
File "D:/ACAD/TENSORFLOW/Rough/data_aug_pipeline.py", line 40, in __getitem__
a = np.array([
ValueError: could not broadcast input array from shape (256,256,3) into shape (256,256)
[[{{node PyFunc}}]]
[[IteratorGetNext]]
[[IteratorGetNext/_4]]
(1) Invalid argument: ValueError: could not broadcast input array from shape (256,256,3) into shape (256,256)
Traceback (most recent call last):
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\ops\script_ops.py", line 243, in __call__
ret = func(*args)
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\autograph\impl\api.py", line 309, in wrapper
return func(*args, **kwargs)
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\data\ops\dataset_ops.py", line 785, in generator_py_func
values = next(generator_state.get_iterator(iterator_id))
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\engine\data_adapter.py", line 801, in wrapped_generator
for data in generator_fn():
File "C:\Users\aksha\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\engine\data_adapter.py", line 932, in generator_fn
yield x[i]
File "D:/ACAD/TENSORFLOW/Rough/data_aug_pipeline.py", line 40, in __getitem__
a = np.array([
ValueError: could not broadcast input array from shape (256,256,3) into shape (256,256)
[[{{node PyFunc}}]]
[[IteratorGetNext]]
0 successful operations.
0 derived errors ignored. [Op:__inference_train_function_1195]
Function call stack:
train_function -> train_function
Process finished with exit code 1
Please help me to understand what mistake I am making.
Base on the error messages, there is at least one grayscale image in your dataset that was resize to 256x256 and thus cannot fit into your network.
I get the ValueError in the title when I try to perform a GridsearchCV on an MLP classifier. Ofcourse I checked if any of np.inf or np.nan exist in my dataset, but they dont:
print(np.any(np.isnan(X)))
returns False
print(np.all(np.isfinite(X)))
Returns True
I also casted all my values to np.float64
X = X.values.astype(np.float64)
Y = Y.values
My scikit-learn version is 0.22.2.post1 (latest)
The code i'm trying to execute:
from scipy.stats import randint as sp_randint
hiddenlayers = [(sp_randint.rvs(100,600,1),sp_randint.rvs(100,600,1),), (sp_randint.rvs(100,600,1),)]
alpha_range = 10.0 ** np.arange(-2, 1)
param_grid_MLP = [{'solver': ['lbfgs'],
'hidden_layer_sizes': hiddenlayers,
'activation': ['identity','tanh', 'relu', 'logistic'],
'alpha': alpha_range
},
{'solver': ['sgd'],
'hidden_layer_sizes': hiddenlayers,
'activation': ['identity','tanh', 'relu', 'logistic'],
'alpha': alpha_range,
'learning_rate':['constant','invscaling','adaptive']
},
{'solver': ['adam'],
'hidden_layer_sizes': hiddenlayers,
'activation': ['identity','tanh', 'relu', 'logistic'],
'alpha': alpha_range
}]
mlp = MLPClassifier(random_state=0)
cross_validation = StratifiedKFold(5)
# scoring = {'AUC': 'roc_auc',
# 'Accuracy': make_scorer(accuracy_score),
# 'Recall':make_scorer(recall_score,pos_label='crafted'),
# 'Precision': make_scorer(precision_score,pos_label='crafted')}
scoring = {'AUC': 'roc_auc',
'Accuracy': make_scorer(accuracy_score),
'Recall':make_scorer(recall_score,pos_label='crafted')}
grid_search_MLP = GridSearchCV(estimator=mlp,
param_grid=param_grid_MLP,
scoring=scoring,cv=cross_validation.split(X_train,y_train),
refit='Recall',
n_jobs=-1,
verbose=True)
grid_search_MLP.fit(X_train,y_train)
print('Best score: {}'.format(grid_search_MLP.best_score_))
print('Best index: {}'.format(grid_search_MLP.best_index_))
print('Best parameters: {}'.format(grid_search_MLP.best_params_))
mlp = grid_search_MLP.best_estimator_
mlp
The full error traceback:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/dist-packages/joblib/externals/loky/process_executor.py", line 418, in _process_worker
r = call_item()
File "/usr/local/lib/python3.7/dist-packages/joblib/externals/loky/process_executor.py", line 272, in __call__
return self.fn(*self.args, **self.kwargs)
File "/usr/local/lib/python3.7/dist-packages/joblib/_parallel_backends.py", line 608, in __call__
return self.func(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/joblib/parallel.py", line 256, in __call__
for func, args, kwargs in self.items]
File "/usr/local/lib/python3.7/dist-packages/joblib/parallel.py", line 256, in <listcomp>
for func, args, kwargs in self.items]
File "/usr/local/lib/python3.7/dist-packages/sklearn/model_selection/_validation.py", line 544, in _fit_and_score
test_scores = _score(estimator, X_test, y_test, scorer)
File "/usr/local/lib/python3.7/dist-packages/sklearn/model_selection/_validation.py", line 591, in _score
scores = scorer(estimator, X_test, y_test)
File "/usr/local/lib/python3.7/dist-packages/sklearn/metrics/_scorer.py", line 87, in __call__
*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/sklearn/metrics/_scorer.py", line 332, in _score
return self._sign * self._score_func(y, y_pred, **self._kwargs)
File "/usr/local/lib/python3.7/dist-packages/sklearn/metrics/_ranking.py", line 369, in roc_auc_score
y_score = check_array(y_score, ensure_2d=False)
File "/usr/local/lib/python3.7/dist-packages/sklearn/utils/validation.py", line 578, in check_array
allow_nan=force_all_finite == 'allow-nan')
File "/usr/local/lib/python3.7/dist-packages/sklearn/utils/validation.py", line 60, in _assert_all_finite
msg_dtype if msg_dtype is not None else X.dtype)
ValueError: Input contains NaN, infinity or a value too large for dtype('float64').
It seems to me that you might have a corrupted value in your array, or a non numeric value. Try to check if there are other types in your array, before transforming to float. Try also to find the min and max value in your array, that might help to find the value which raises the error.
try giving verbose a large number, or run that 3 parts of grid one by one. if you realize sgd gives the problem, its probably explained here MLPRegressor error when solver sgd is used
I'm using keras.callbacks.callbacks.EarlyStopping for my deep learning project.
It's a VGG19 model for image classification.
Problem:
When settingrestore_best_weights=True,
TypeError: 'NoneType' object is not subscriptable
Stack trace:
Epoch 00003: saving model to ./output/2020-02-17_17-06-25_nomura/vgg19_weights.03-1.099-0.34.hdf5
Restoring model weights from the end of the best epoch
Traceback (most recent call last):
File "wb-det_nomura_nobn.py", line 824, in <module>
dynamic_train(path, _model)
File "wb-det_nomura_nobn.py", line 542, in dynamic_train
verbose=1)
File "C:\Users\owner\Anaconda3\lib\site-packages\keras\engine\training.py", line 1039, in fit
validation_steps=validation_steps)
File "C:\Users\owner\Anaconda3\lib\site-packages\keras\engine\training_arrays.py", line 217, in fit_loop
callbacks.on_epoch_end(epoch, epoch_logs)
File "C:\Users\owner\Anaconda3\lib\site-packages\keras\callbacks.py", line 79, in on_epoch_end
callback.on_epoch_end(epoch, logs)
File "C:\Users\owner\Anaconda3\lib\site-packages\keras\callbacks.py", line 557, in on_epoch_end
self.model.set_weights(self.best_weights)
File "C:\Users\owner\Anaconda3\lib\site-packages\keras\engine\network.py", line 504, in set_weights
layer_weights = weights[:num_param]
TypeError: 'NoneType' object is not subscriptable
And when python restore_best_weights=False, no problem.
System information
- keras 2.2.4
- python 3.7.3
installed within Anaconda.
The code is simple as follows.
es_cb = EarlyStopping(monitor='val_loss', patience=0, verbose=1,
mode='auto', baseline=0.05,
restore_best_weights=True)
I am trying to create a CNN with Tensorflow and keras with Sequential method.
The inputs have a (size, 50, 50, 1) shape and the labels have (size,). Size is the number of data in the dataset.
The problem is, after compilation, when I call the fit method with my model, I get an index error. See the code bellow :
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(32, 4, input_shape=(50, 50, 1), activation='relu', name="conv1"))
model.add(tf.keras.layers.Conv2D(64, 3, activation='relu', name="conv2"))
model.add(tf.keras.layers.Conv2D(128, 3, activation='relu', name="conv3"))
model.add(tf.keras.layers.Flatten(name='Flatten'))
model.add(tf.keras.layers.Dense(128, activation='relu', name="d1"))
model.add(tf.keras.layers.Dense(4, activation='softmax', name="output"))
# Compile the model
model.compile(
loss="sparse_categorical_crossentropy",
optimizer="Adam",
metrics=["accuracy"]
)
model.fit(x_trains, y_labels, epochs=5, verbose=2, validation_data=0.33, shuffle=True)
And the error :
2019-09-24 13:59:40.902561: I tensorflow/core/platform/cpu_feature_guard.cc:142] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
Traceback (most recent call last):
File "C:/Users/eloim/Documents/Programmation/Python/CNN_tf/face_train_seq.py", line 49, in <module>
model.fit(x_trains, y_labels, epochs=5, verbose=2, validation_data=0.33, shuffle=True)
File "C:\Users\{}\Anaconda3\envs\CNN_tf\lib\site-packages\tensorflow_core\python\keras\engine\training.py", line 728, in fit
use_multiprocessing=use_multiprocessing)
File "C:\Users\{}\Anaconda3\envs\CNN_tf\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py", line 224, in fit
distribution_strategy=strategy)
File "C:\Users\{}\Anaconda3\envs\CNN_tf\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py", line 564, in _process_training_inputs
distribution_strategy=distribution_strategy)
File "C:\Users\{}\Anaconda3\envs\CNN_tf\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py", line 606, in _process_inputs
use_multiprocessing=use_multiprocessing)
File "C:\Users\{}\Anaconda3\envs\CNN_tf\lib\site-packages\tensorflow_core\python\keras\engine\data_adapter.py", line 479, in __init__
batch_size=batch_size, shuffle=shuffle, **kwargs)
File "C:\Users\{}\Anaconda3\envs\CNN_tf\lib\site-packages\tensorflow_core\python\keras\engine\data_adapter.py", line 238, in __init__
num_samples = set(int(i.shape[0]) for i in nest.flatten(inputs))
File "C:\Users\{}\Anaconda3\envs\CNN_tf\lib\site-packages\tensorflow_core\python\keras\engine\data_adapter.py", line 238, in <genexpr>
num_samples = set(int(i.shape[0]) for i in nest.flatten(inputs))
IndexError: tuple index out of range
At the line :
model.fit(x_trains, y_labels, epochs=5, verbose=2, validation_data=0.33, shuffle=True)
I do not understand the nature of this error. How can I be rid of it ?
Thanks for your help.
I'm implementing Linear Regression in Tensorflow first time. Initially, I tried it using a linear model but after few iterations of training, my parameter shot up to infinity. So, I changed my model to a quadratic one and again tried training but still after few iterations of epochs, the same thing is happening.
Hence, the parameter in tf.summary.histogram('Weights', W0) is receiving inf as a parameter and similar is the case with W1 and b1.
I wanted to see my parameters in tensorboard(because I've never worked with it) but getting this error.
I have asked the question previously but the slight change was that I was using a linear model which again was giving the same problem(I didn't know that it was due to the parameters going to infinity because I was running this in my Ipython Notebook but when I ran the program in the terminal, the below-mentioned error was generated, which helped me figure out that the problem was due to the parameters shooting to infinity ). In the comments section, I got to know that it was working on someone's PC, and his tensorboard showed that the parameters were actually reaching infinity.
Here is the link to the problem asked earlier.
I hope that I've correctly declared Y_ in my program else do correct me!
Here is the code in Tensorflow:
import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.datasets import load_boston
import matplotlib.pyplot as plt
boston=load_boston()
type(boston)
boston.feature_names
bd=pd.DataFrame(data=boston.data,columns=boston.feature_names)
bd['Price']=pd.DataFrame(data=boston.target)
np.random.shuffle(bd.values)
W0=tf.Variable(0.3)
W1=tf.Variable(0.2)
b=tf.Variable(0.1)
#print(bd.shape[1])
tf.summary.histogram('Weights', W0)
tf.summary.histogram('Weights', W1)
tf.summary.histogram('Biases', b)
dataset_input=bd.iloc[:, 0 : bd.shape[1]-1];
#dataset_input.head(2)
dataset_output=bd.iloc[:, bd.shape[1]-1]
dataset_output=dataset_output.values
dataset_output=dataset_output.reshape((bd.shape[0],1))
#converted (506,) to (506,1) because in pandas
#the shape was not changing and it was needed later in feed_dict
dataset_input=dataset_input.values #only dataset_input is in DataFrame form and converting it into np.ndarray
dataset_input = np.array(dataset_input, dtype=np.float32)
#making the datatype into float32 for making it compatible with placeholders
dataset_output = np.array(dataset_output, dtype=np.float32)
X=tf.placeholder(tf.float32, shape=(None,bd.shape[1]-1))
Y=tf.placeholder(tf.float32, shape=(None,1))
Y_=W0*X*X + W1*X + b #Hope this equation is rightly written
#Y_pred = tf.add(tf.multiply(tf.pow(X, pow_i), W), Y_pred)
print(X.shape)
print(Y.shape)
loss=tf.reduce_mean(tf.square(Y_-Y))
tf.summary.scalar('loss',loss)
optimizer=tf.train.GradientDescentOptimizer(0.001)
train=optimizer.minimize(loss)
init=tf.global_variables_initializer()#tf.global_variables_initializer()#tf.initialize_all_variables()
sess=tf.Session()
sess.run(init)
wb_=[]
with tf.Session() as sess:
summary_merge = tf.summary.merge_all()
writer=tf.summary.FileWriter("Users/ajay/Documents",sess.graph)
epochs=10
sess.run(init)
for i in range(epochs):
s_mer=sess.run(summary_merge,feed_dict={X: dataset_input, Y: dataset_output}) #ERROR________ERROR
sess.run(train,feed_dict={X:dataset_input,Y:dataset_output})
#CHANGED
sess.run(loss, feed_dict={X:dataset_input,Y:dataset_output})
writer.add_summary(s_mer,i)
#tf.summary.histogram(name="loss",values=loss)
if(i%5==0):
print(i, sess.run([W0,W1,b]))
wb_.append(sess.run([W0,W1,b]))
print(writer.get_logdir())
print(writer.close())
I'm getting this error :
/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
(?, 13)
(?, 1)
2018-07-22 02:04:24.826027: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
0 [-3833776.2, -7325.9595, -15.471448]
5 [inf, inf, inf]
Traceback (most recent call last):
File "/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1322, in _do_call
return fn(*args)
File "/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1307, in _run_fn
options, feed_dict, fetch_list, target_list, run_metadata)
File "/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1409, in _call_tf_sessionrun
run_metadata)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Infinity in summary histogram for: Biases
[[Node: Biases = HistogramSummary[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"](Biases/tag, Variable_2/read)]]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "LR.py", line 75, in <module>
s_mer=sess.run(summary_merge,feed_dict={X: dataset_input, Y: dataset_output}) #ERROR________ERROR
File "/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 900, in run
run_metadata_ptr)
File "/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1135, in _run
feed_dict_tensor, options, run_metadata)
File "/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1316, in _do_run
run_metadata)
File "/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1335, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Infinity in summary histogram for: Biases
[[Node: Biases = HistogramSummary[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"](Biases/tag, Variable_2/read)]]
Caused by op 'Biases', defined at:
File "LR.py", line 24, in <module>
tf.summary.histogram('Biases', b)
File "/anaconda3/lib/python3.6/site-packages/tensorflow/python/summary/summary.py", line 187, in histogram
tag=tag, values=values, name=scope)
File "/anaconda3/lib/python3.6/site-packages/tensorflow/python/ops/gen_logging_ops.py", line 283, in histogram_summary
"HistogramSummary", tag=tag, values=values, name=name)
File "/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 3414, in create_op
op_def=op_def)
File "/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1740, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
InvalidArgumentError (see above for traceback): Infinity in summary histogram for: Biases
[[Node: Biases = HistogramSummary[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"](Biases/tag, Variable_2/read)]]
I believe this is caused due to high learning rate for Gradient descent.
Please refer Gradient descent explodes if learning rate is too large
Here the loss is actually getting bigger after each epoch.
I changed
optimizer=tf.train.GradientDescentOptimizer(0.001)
to
optimizer=tf.train.GradientDescentOptimizer(0.0000000001)
Then printed the loss after each epoch. By changing
sess.run(loss, feed_dict={X:dataset_input,Y:dataset_output})
to
print("loss",sess.run(loss, feed_dict={X:dataset_input,Y:dataset_output}))
in your code. The error was gone. The output was
(?, 13)
(?, 1)
loss = 44061484.0
0 [-0.08337769, 0.19926739, 0.099998444]
loss = 3373030.2
loss = 258605.05
loss = 20211.799
loss = 1964.4918
loss = 567.7717
5 [-0.0001616638, 0.19942635, 0.099998794]
loss = 460.862
loss = 452.67877
loss = 452.05255
loss = 452.00452
Users/ajay/Documents
None