Not all points are within the bounds of the space error in Scikit-Optimize - python-3.x

I am attempting to do a hyper-parameter optimization task on a LSTM model (purly Tensorflow) using the scikit optimize package. I am using the Bayesian optimization method using Gaussian Processes (gp_minimize) for this. The demo code provided for the function can be found through this link. When I try to run my code I keep getting the below error:
ValueError: Not all points are within the bounds of the space.
My complete code is shown below:
import skopt
from skopt import gp_minimize, forest_minimize
from skopt.space import Real, Categorical, Integer
from skopt.plots import plot_convergence
from skopt.plots import plot_objective, plot_evaluations
from skopt.utils import use_named_args
import csv
import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from math import sqrt
import atexit
from time import time, strftime, localtime
from datetime import timedelta
input_size = 1
num_layers = 1
hidden1_activation = tf.nn.relu
hidden2_activation = tf.nn.relu
lstm_activation = tf.nn.relu
columns = ['Sales', 'DayOfWeek', 'SchoolHoliday', 'Promo']
features = len(columns)
fileName = None
column_min_max = None
# fileNames = ['store2_1.csv', 'store85_1.csv', 'store259_1.csv', 'store519_1.csv', 'store725_1.csv', 'store749_1.csv', 'store934_1.csv', 'store1019_1.csv']
# column_min_max_all = [[[0, 11000], [1, 7]], [[0, 17000], [1, 7]], [[0, 23000], [1, 7]], [[0, 14000], [1, 7]], [[0, 14000], [1, 7]], [[0, 15000], [1, 7]], [[0, 17000], [1, 7]], [[0, 25000], [1, 7]]]
fileNames = ['store2_1.csv']
column_min_max_all = [[[0, 11000], [1, 7]]]
num_steps = None
lstm_size = None
batch_size = None
init_learning_rate = 0.01
learning_rate_decay = None
init_epoch = None # 5
max_epoch = None # 100 or 50
hidden1_nodes = None
hidden2_nodes = None
dropout_rate= None
best_accuracy = 0.0
start = None
lstm_num_steps = Categorical(categories=[2,3,4,5,6,7,8,9,10,11,12,13,14], name ='lstm_num_steps')
size = Categorical(categories=[8,16,32,64,128], name ='size')
lstm_hidden1_nodes = Categorical(categories=[4,8,16,32,64], name= 'lstm_hidden1_nodes')
lstm_hidden2_nodes = Categorical(categories=[2,4,8,16,32],name= 'lstm_hidden2_nodes')
lstm_learning_rate_decay = Categorical(categories=[0.99,0.8,0.7], name='lstm_learning_rate_decay')
lstm_max_epoch = Categorical(categories=[60,50,100,120,150,200], name='lstm_max_epoch')
lstm_init_epoch = Categorical(categories=[5, 10, 15, 20],name='lstm_init_epoch')
lstm_batch_size = Categorical(categories=[5, 8, 16, 30, 31, 64] , name = 'lstm_batch_size')
lstm_dropout_rate = Categorical(categories=[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9] , name = 'lstm_dropout_rate')
dimensions = [lstm_num_steps, size,lstm_hidden1_nodes, lstm_hidden2_nodes,lstm_init_epoch,lstm_max_epoch,lstm_learning_rate_decay,lstm_batch_size, lstm_dropout_rate]
default_parameters = [5,35,30,15,5,60,0.99,8,0.1]
# def log_dir_name(lstm_num_steps, size,lstm_hidden1_nodes, lstm_hidden2_nodes,lstm_learning_rate,lstm_init_epoch,lstm_max_epoch,lstm_learning_rate_decay,lstm_batch_size):
#
# # The dir-name for the TensorBoard log-dir.
# s = "./19_logs/{1}_{2}_{3}_{4}_{5}_{6}_{7}_{8}_{9}/"
#
# # Insert all the hyper-parameters in the dir-name.
# log_dir = s.format(lstm_num_steps, size,lstm_hidden1_nodes, lstm_hidden2_nodes,lstm_learning_rate,lstm_init_epoch,lstm_max_epoch,lstm_learning_rate_decay,lstm_batch_size)
#
# return log_dir
def secondsToStr(elapsed=None):
if elapsed is None:
return strftime("%Y-%m-%d %H:%M:%S", localtime())
else:
return str(timedelta(seconds=elapsed))
def log(s, elapsed=None):
line = "="*40
print(line)
print(secondsToStr(), '-', s)
if elapsed:
print("Elapsed time:", elapsed)
print(line)
print()
def endlog():
end = time()
elapsed = end-start
log("End Program", secondsToStr(elapsed))
def generate_batches(train_X, train_y, batch_size):
num_batches = int(len(train_X)) // batch_size
if batch_size * num_batches < len(train_X):
num_batches += 1
batch_indices = range(num_batches)
for j in batch_indices:
batch_X = train_X[j * batch_size: (j + 1) * batch_size]
batch_y = train_y[j * batch_size: (j + 1) * batch_size]
# assert set(map(len, batch_X)) == {num_steps}
yield batch_X, batch_y
def segmentation(data):
seq = [price for tup in data[columns].values for price in tup]
seq = np.array(seq)
# split into items of features
seq = [np.array(seq[i * features: (i + 1) * features])
for i in range(len(seq) // features)]
# split into groups of num_steps
X = np.array([seq[i: i + num_steps] for i in range(len(seq) - num_steps)])
y = np.array([seq[i + num_steps] for i in range(len(seq) - num_steps)])
# get only sales value
y = [[y[i][0]] for i in range(len(y))]
y = np.asarray(y)
return X, y
def scale(data):
for i in range (len(column_min_max)):
data[columns[i]] = (data[columns[i]] - column_min_max[i][0]) / ((column_min_max[i][1]) - (column_min_max[i][0]))
return data
def rescle(test_pred):
prediction = [(pred * (column_min_max[0][1] - column_min_max[0][0])) + column_min_max[0][0] for pred in test_pred]
return prediction
def pre_process():
store_data = pd.read_csv(fileName)
# sftp://wso2#192.168.32.11/home/wso2/suleka/salesPred/store2_1.csv
store_data = store_data.drop(store_data[(store_data.Open == 0) & (store_data.Sales == 0)].index)
#
# store_data = store_data.drop(store_data[(store_data.Open != 0) & (store_data.Sales == 0)].index)
# ---for segmenting original data --------------------------------
original_data = store_data.copy()
## train_size = int(len(store_data) * (1.0 - test_ratio))
validation_len = len(store_data[(store_data.Month == 6) & (store_data.Year == 2015)].index)
test_len = len(store_data[(store_data.Month == 7) & (store_data.Year == 2015)].index)
train_size = int(len(store_data) - (validation_len+test_len))
train_data = store_data[:train_size]
validation_data = store_data[(train_size-num_steps): validation_len+train_size]
test_data = store_data[((validation_len+train_size) - num_steps): ]
original_val_data = validation_data.copy()
original_test_data = test_data.copy()
# -------------- processing train data---------------------------------------
scaled_train_data = scale(train_data)
train_X, train_y = segmentation(scaled_train_data)
# -------------- processing validation data---------------------------------------
scaled_validation_data = scale(validation_data)
val_X, val_y = segmentation(scaled_validation_data)
# -------------- processing test data---------------------------------------
scaled_test_data = scale(test_data)
test_X, test_y = segmentation(scaled_test_data)
# ----segmenting original validation data-----------------------------------------------
nonescaled_val_X, nonescaled_val_y = segmentation(original_val_data)
# ----segmenting original test data-----------------------------------------------
nonescaled_test_X, nonescaled_test_y = segmentation(original_test_data)
return train_X, train_y, test_X, test_y, val_X, val_y, nonescaled_test_y,nonescaled_val_y
def setupRNN(inputs):
cell = tf.contrib.rnn.LSTMCell(lstm_size, state_is_tuple=True, activation=lstm_activation)
val1, _ = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
val = tf.transpose(val1, [1, 0, 2])
last = tf.gather(val, int(val.get_shape()[0]) - 1, name="last_lstm_output")
# hidden layer
hidden1 = tf.layers.dense(last, units=hidden1_nodes, activation=hidden2_activation)
hidden2 = tf.layers.dense(hidden1, units=hidden2_nodes, activation=hidden1_activation)
dropout = tf.layers.dropout(hidden2, rate=dropout_rate, training=True)
weight = tf.Variable(tf.truncated_normal([hidden2_nodes, input_size]))
bias = tf.Variable(tf.constant(0.1, shape=[input_size]))
prediction = tf.matmul(dropout, weight) + bias
return prediction
# saver = tf.train.Saver()
# saver.save(sess, "checkpoints_sales/sales_pred.ckpt")
#use_named_args(dimensions=dimensions)
def fitness(lstm_num_steps, size,lstm_hidden1_nodes,lstm_hidden2_nodes,lstm_init_epoch,lstm_max_epoch,
lstm_learning_rate_decay,lstm_batch_size,lstm_dropout_rate):
global num_steps, lstm_size, hidden2_nodes, hidden2_activation, hidden1_activation, hidden1_nodes, lstm_activation, init_epoch, max_epoch, learning_rate_decay, dropout_rate
num_steps = lstm_num_steps
lstm_size = size
batch_size = lstm_batch_size
learning_rate_decay = lstm_learning_rate_decay
init_epoch = lstm_init_epoch
max_epoch = lstm_max_epoch
hidden1_nodes = lstm_hidden1_nodes
hidden2_nodes = lstm_hidden2_nodes
dropout_rate = lstm_dropout_rate
# log_dir = log_dir_name(lstm_num_steps, size,lstm_hidden1_nodes,lstm_hidden2_nodes,lstm_learning_rate,lstm_init_epoch,lstm_max_epoch,
# lstm_learning_rate_decay,lstm_batch_size)
train_X, train_y, test_X, test_y, val_X, val_y, nonescaled_test_y, nonescaled_val_y = pre_process()
inputs = tf.placeholder(tf.float32, [None, num_steps, features], name="inputs")
targets = tf.placeholder(tf.float32, [None, input_size], name="targets")
learning_rate = tf.placeholder(tf.float32, None, name="learning_rate")
prediction = setupRNN(inputs)
with tf.name_scope('loss'):
model_loss = tf.losses.mean_squared_error(targets, prediction)
with tf.name_scope('adam_optimizer'):
train_step = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
train_step = train_step
# with tf.name_scope('accuracy'):
# correct_prediction = tf.sqrt(tf.losses.mean_squared_error(prediction, targets))
#
# accuracy = correct_prediction
sess = tf.Session()
sess.run(tf.global_variables_initializer())
learning_rates_to_use = [
init_learning_rate * (
learning_rate_decay ** max(float(i + 1 - init_epoch), 0.0)
) for i in range(max_epoch)]
for epoch_step in range(max_epoch):
current_lr = learning_rates_to_use[epoch_step]
for batch_X, batch_y in generate_batches(train_X, train_y, batch_size):
train_data_feed = {
inputs: batch_X,
targets: batch_y,
learning_rate: current_lr,
}
sess.run(train_step, train_data_feed)
val_data_feed = {
inputs: val_X,
targets: val_y,
learning_rate: 0.0,
}
pred = sess.run(prediction, val_data_feed)
pred_vals = rescle(pred)
pred_vals = np.array(pred_vals)
pred_vals = pred_vals.flatten()
pred_vals = pred_vals.tolist()
nonescaled_y = nonescaled_val_y.flatten()
nonescaled_y = nonescaled_y.tolist()
val_accuracy = sqrt(mean_squared_error(nonescaled_y, pred_vals))
global best_accuracy
if val_accuracy < best_accuracy:
# Save the new model to harddisk.
saver = tf.train.Saver()
saver.save(sess, "checkpoints_sales/sales_pred.ckpt")
with open("best_configs.csv", "a") as f:
writer = csv.writer(f)
writer.writerows(zip([fileName], [num_steps], [lstm_size], [hidden2_nodes], [hidden2_activation], [hidden1_activation], [hidden1_nodes], [lstm_size], [lstm_activation], [init_epoch], [max_epoch], [learning_rate_decay], [dropout_rate],[val_accuracy]))
# Update the classification accuracy.
best_accuracy = val_accuracy
# Clear the Keras session, otherwise it will keep adding new
# models to the same TensorFlow graph each time we create
# a model with a different set of hyper-parameters.
# sess.clear_session()
sess.close()
tf.reset_default_graph()
# NOTE: Scikit-optimize does minimization so it tries to
# find a set of hyper-parameters with the LOWEST fitness-value.
# Because we are interested in the HIGHEST classification
# accuracy, we need to negate this number so it can be minimized.
return val_accuracy
if __name__ == '__main__':
start = time()
for i in range(len(fileNames)):
fileName = '{}{}'.format('home/suleka/Documents/sales_prediction/', fileNames[i])
#/home/suleka/Documents/sales_prediction/
column_min_max = column_min_max_all[i]
#Bayesian optimization using Gaussian Processes.
#acq_func -> https://arxiv.org/pdf/1807.02811.pdf
search_result = gp_minimize(func=fitness,
dimensions=dimensions,
acq_func='EI', # Expected Improvement.
n_calls=40,
x0=default_parameters)
atexit.register(endlog)
log("Start Program")
Shown below is the complete stack trace:
/home/wso2/anaconda3/lib/python3.6/site-packages/h5py/init.py:36:
FutureWarning: Conversion of the second argument of issubdtype from
float to np.floating is deprecated. In future, it will be treated
as np.float64 == np.dtype(float).type. from ._conv import
register_converters as _register_converters auto_LSTM_skopt.py:138:
SettingWithCopyWarning: A value is trying to be set on a copy of a
slice from a DataFrame. Try using .loc[row_indexer,col_indexer] =
value instead
See the caveats in the documentation:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
data[columns[i]] = (data[columns[i]] - column_min_max[i][0]) /
((column_min_maxi) - (column_min_max[i][0]))
/home/wso2/anaconda3/lib/python3.6/site-packages/tensorflow/python/ops/gradients_impl.py:100:
UserWarning: Converting sparse IndexedSlices to a dense Tensor of
unknown shape. This may consume a large amount of memory.
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
Traceback (most recent call last): File "auto_LSTM_skopt.py", line
365, in
x0=default_parameters) File "/home/wso2/anaconda3/lib/python3.6/site-packages/skopt/optimizer/gp.py",
line 228, in gp_minimize
callback=callback, n_jobs=n_jobs) File "/home/wso2/anaconda3/lib/python3.6/site-packages/skopt/optimizer/base.py",
line 240, in base_minimize
result = optimizer.tell(x0, y0) File "/home/wso2/anaconda3/lib/python3.6/site-packages/skopt/optimizer/optimizer.py",
line 432, in tell
check_x_in_space(x, self.space) File "/home/wso2/anaconda3/lib/python3.6/site-packages/skopt/utils.py",
line 186, in check_x_in_space
raise ValueError("Not all points are within the bounds of" ValueError: Not all points are within the bounds of the space.

Issue is with your size dimension. All values in default_parameters must be in the lists of the parameter dimensions to be optimized, if not skopt throws the Not all points are within the bounds of the space error.
You currently have: size = Categorical(categories=[8,16,32,64,128], name ='size')
In your default parameters: default_parameters = [5,35,30,15,5,60,0.99,8,0.1]
the second item (representing 'size') has the value of 35, which is not part of the size parameters to search.
FIX 1.
Include 35 in size space:
size = Categorical(categories=[8,16,32,35,64,128], name ='size')
FIX 2.
Change 35 to '32' in default_parameters:
default_parameters = [5,32,30,15,5,60,0.99,8,0.1]
Use any of the fixes above and your code will run like a charm :)

Related

InvalidArgumentError (see above for traceback):logits and labels must be broadcastable: logits_size=[183,2] labels_size=[20,2]

This is a learning-based rna and disease prediction code using cnn that I downloaded from github. The output is accuracy and auc values, but the result is very unstable (occasionally 0.3, occasionally 0.8).
I don't know what the reason is, but the division of training set and verification set in this article is a self-defined function, so I want to try 10 cross-verification. However, when I write the cross-validation code, the problem as shown in the title appears.
This is the code that divides the training set and the verification set in the source code.
def get_data(args):
input_data, input_label = dh.get_samples(args)
input_data = standard_scale(input_data)
dev_sample_percentage = args.dev_percentage
test_sample_percentage = args.test_percentage
x = np.array(input_data)
Randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(input_label)))
input_data = [x[i] for i in shuffle_indices]
input_label = [input_label[i] for i in shuffle_indices]
dev_sample_index = -2 * int(dev_sample_percentage * float(len(input_label)))
test_sample_index = -1 * int(test_sample_percentage * float(len(input_label)))
x_train, x_dev, test_data = input_data[:dev_sample_index], input_data[dev_sample_index:test_sample_index], input_data[test_sample_index:]
y_train, y_dev, test_label = input_label[:dev_sample_index], input_label[dev_sample_index:test_sample_index], input_label[test_sample_index:]
return x_train, x_dev, test_data, y_train, y_dev, test_label
This is my modified code.
def get_data(args):
input_data, input_label = dh.get_samples(args)
input_data = standard_scale(input_data)
dev_sample_percentage = args.dev_percentage
test_sample_percentage = args.test_percentage
x = np.array(input_data)
y = np.array(input_label)
kf = KFold(n_splits=10)
d = kf.split(x)
for train_idx, test_idx in d:
x_train = x[train_idx]
x_dev = x[test_idx]
l=kf.split(y)
for train_idx ,test_idx in l:
y_train=y[train_idx]
y_dev=y[test_idx]
test_sample_index = -1 * int(test_sample_percentage * float(len(input_label)))
test_data=input_data[test_sample_index:]
test_lable=input_label[test_sample_index:]
return x_train,x_dev,y_train, y_dev,test_data,test_lable
This is a screenshot of the error.
This is the complete code of this part.
#! /usr/bin/env python
import tensorflow as tf
import numpy as np
import os
import argparse
import data_helpers as dh
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import precision_recall_curve
from sklearn import metrics
from tensorflow.contrib import learn
import matplotlib.pyplot as plt
import sklearn.preprocessing as prep
from sklearn.metrics import average_precision_score
from sklearn.model_selection import KFold
def parse_args():
parser = argparse.ArgumentParser(description="Run CNN.")
## the input file
##disease-gene relationships and miRNA-gene relatiohships
parser.add_argument('--input_disease_miRNA', nargs='?', default='..\..\data\CNN\disease-miro-1024-sigmoid.csv',
help='Input disease_gene_relationship file')
parser.add_argument('--input_label',nargs = '?',default='..\..\data\CNN\label.csv',
help='sample label')
parser.add_argument('--batch_size', nargs='?', default=64,
help = 'number of samples in one batch')
parser.add_argument('--training_epochs', nargs='?', default=1,
help= 'number of epochs in SGD')
parser.add_argument('--display_step', nargs='?', default=10)
parser.add_argument('--test_percentage', nargs='?', default=0.1,
help='percentage of test samples')
parser.add_argument('--dev_percentage', nargs='?', default=0.1,
help='percentage of validation samples')
parser.add_argument('--L2_norm', nargs='?', default=0.001,
help='percentage of validation samples')
parser.add_argument('--keep_prob', nargs='?', default=0.5,
help='keep_prob when using dropout option')
parser.add_argument('--optimizer', nargs='?', default=tf.train.AdamOptimizer,
help='optimizer for learning weights')
parser.add_argument('--learning_rate', nargs='?', default=1e-3,
help='learning rate for the SGD')
return parser.parse_args()
def standard_scale(X_train):
preprocessor = prep.StandardScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
return X_train
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev= 0.1)
weights = tf.Variable(initial)
return weights
def bias_variable(shape):
initial = tf.constant(0.1, shape = shape)
return tf.Variable(initial)
def conv2d(x,W):
return tf.nn.conv2d(x,W,strides=[1,1,1,1], padding= "VALID")
def max_pool_2(x, W):
return tf.nn.max_pool(x, ksize = W, strides= [1,10,1,1], padding= "VALID")
def get_data(args):
input_data, input_label = dh.get_samples(args)
input_data = standard_scale(input_data)
dev_sample_percentage = args.dev_percentage
test_sample_percentage = args.test_percentage
x = np.array(input_data)
y = np.array(input_label)
kf = KFold(n_splits=10)
d = kf.split(x)
for train_idx, test_idx in d:
x_train = x[train_idx]
x_dev = x[test_idx]
l=kf.split(y)
for train_idx ,test_idx in l:
y_train=y[train_idx]
y_dev=y[test_idx]
test_sample_index = -1 * int(test_sample_percentage * float(len(input_label)))
test_data=input_data[test_sample_index:]
test_lable=input_label[test_sample_index:]
return x_train,x_dev,y_train, y_dev,test_data,test_lable
# # Randomly shuffle data
# np.random.seed(10)
# shuffle_indices = np.random.permutation(np.arange(len(input_label)))
# input_data = [x[i] for i in shuffle_indices]
# input_label = [input_label[i] for i in shuffle_indices]
# dev_sample_index = -2 * int(dev_sample_percentage * float(len(input_label)))
# test_sample_index = -1 * int(test_sample_percentage * float(len(input_label)))
# x_train, x_dev, test_data = input_data[:dev_sample_index], input_data[dev_sample_index:test_sample_index], input_data[test_sample_index:]
# y_train, y_dev, test_label = input_label[:dev_sample_index], input_label[dev_sample_index:test_sample_index], input_label[test_sample_index:]
#
# return x_train, x_dev, test_data, y_train, y_dev, test_label
def deepnn(x, keep_prob, args):
with tf.name_scope('reshape'):
x = tf.reshape(x, [-1, 1024, 1, 1])
with tf.name_scope('conv_pool'):
filter_shape = [4, 1, 1, 4]
W_conv = weight_variable(filter_shape)
b_conv = bias_variable([4])
h_conv = tf.nn.relu(conv2d(x, W_conv) + b_conv)
h_pool = tf.nn.max_pool(h_conv, ksize = [1, 4, 1, 1], strides= [1,4,1,1], padding= "VALID")
# filter_shape2 = [4,1,4,4]
# W_conv2 = weight_variable(filter_shape2)
# b_conv2 = bias_variable([4])
# h_conv2 = tf.nn.relu(conv2d(h_pool, W_conv2) + b_conv2)
# h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1,4,1,1], strides= [1,4,1,1],padding="VALID")
regula = tf.contrib.layers.l2_regularizer(args.L2_norm)
h_input1 = tf.reshape(h_pool,[-1, 255 * 4])
W_fc1 = weight_variable([255* 4, 50])
b_fc1 = bias_variable([50])
h_input2 = tf.nn.relu(tf.matmul(h_input1, W_fc1) + b_fc1)
h_keep = tf.nn.dropout(h_input2, keep_prob)
W_fc2 = weight_variable([50, 2])
b_fc2 = bias_variable([2])
h_output = tf.matmul(h_keep, W_fc2) + b_fc2
regularizer = regula(W_fc1) + regula(W_fc2)
return h_output, regularizer
def main(args):
with tf.device('/cpu:0'):
x_train, x_dev, test_data, y_train, y_dev, test_label = get_data(args)
input_data = tf.placeholder(tf.float32, [None, 1024])
input_label = tf.placeholder(tf.float32, [None, 2])
keep_prob = tf.placeholder(tf.float32)
y_conv, losses = deepnn(input_data, keep_prob, args)
y_res = tf.nn.softmax(y_conv)
with tf.name_scope('loss'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=input_label)
cross_entropy = tf.reduce_mean(cross_entropy)
los = cross_entropy + losses
with tf.name_scope('optimizer'):
optimizer = args.optimizer
learning_rate = args.learning_rate
train_step = optimizer(learning_rate).minimize(los)
# optimizer = tf.train.MomentumOptimizer(learning_rate= 0.02, momentum=)
# train_step = optimizer.minimize(cross_entropy)
with tf.name_scope('accuracy'):
predictions = tf.argmax(y_conv, 1)
correct_predictions = tf.equal(predictions, tf.argmax(input_label, 1))
correct_predictions = tf.cast(correct_predictions, tf.float32)
accuracy = tf.reduce_mean(correct_predictions)
batch_size = args.batch_size
num_epochs = args.training_epochs
display_step = args.display_step
k_p = args.keep_prob
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
batches = dh.batch_iter(list(zip(x_train, y_train)), batch_size, num_epochs)
for i, batch in enumerate(batches):
x_batch, y_batch = zip(*batch)# 按batch把数据拿进来
train_step.run(feed_dict={input_data: x_batch, input_label: y_batch, keep_prob: k_p})
if i % display_step == 0:
loss = sess.run(los, feed_dict={input_data: x_train, input_label: y_train, keep_prob: 1.0})
#print('after training loss = %f' % loss)
y_predict = sess.run(y_res, feed_dict={input_data: x_dev, input_label: y_dev, keep_prob: 1.0})[:, 1]
loss = sess.run(los, feed_dict={input_data: x_dev, input_label: y_dev, keep_prob: 1.0})
#print('test loss = %f' % loss)
false_positive_rate1, true_positive_rate1, thresholds1 = roc_curve(np.array(y_dev)[:, 1], y_predict)
roc_auc1 = auc(false_positive_rate1, true_positive_rate1)
# print(roc_auc1)
###print(accuracy.eval(feed_dict={input_data: x_train, input_label:y_train, keep_prob: 1.0}))
print('accuracy=',accuracy.eval(feed_dict={input_data: test_data, input_label: test_label, keep_prob: 1.0}))
y_predict = sess.run(y_res, feed_dict={input_data: test_data, input_label: test_label, keep_prob: 1.0})[:, 1]
false_positive_rate1, true_positive_rate1, thresholds1 = roc_curve(np.array(test_label)[:, 1], y_predict)
roc_auc1 = auc(false_positive_rate1, true_positive_rate1)
print('roc_auc1=',roc_auc1)
# plt.figure()
# lw=2
# plt.title("ROC curve of %s (AUC = %.4f)")
# plt.xlabel("False Positive Rate")
# plt.ylabel("True Positive Rate")
# plt.plot(false_positive_rate1 , true_positive_rate1) # use pylab to plot x and y
# plt.show() # show the plot on the screen
#
# plt.show()
# np.savetxt("result_fp_tp_md_aver.txt", roc_curve(np.array(test_label)[:, 1], y_predict))
# precision, recall ,_ = precision_recall_curve(np.array(test_label)[:, 1], y_predict)
# #
# average_precision = average_precision_score(np.array(test_label)[:, 1], y_predict)
# #
# print('Average precision-recall score: {0:0.2f}'.format(average_precision))
# y_predict[y_predict >= 0.5] = 1
# y_predict[y_predict < 0.5] = 0
# print(y_predict)
# print(metrics.f1_score(np.array(test_label)[:, 1], y_predict))
# np.savetxt("precision_aver.txt", precision)
# np.savetxt("recall_aver.txt", recall)
if __name__ == '__main__':
args = parse_args()
main(args)
please help me!!! thanks a lot!!!

Running model twice inside same script gives different results in Tensorflow

I have a LSTM and I am running it twice inside my script(getting predictions twice as well). So when I take the prediction of the 2nd iteration it is different from the prediction I received from the 1st iteration. But when I run the model manually for two times by running the 2nd on when the first one finishes. It gives the same result.
Question: I want to get the same identical predictions in both the 1st and 2nd iterations. How can this be achieved? Shown below is my code.
import tensorflow as tf
import matplotlib as mplt
mplt.use('agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from math import sqrt
import csv
import atexit
from time import time, strftime, localtime
from datetime import timedelta
np.random.seed(1)
tf.set_random_seed(1)
class RNNConfig():
input_size = 1
noTimesToRun = 2
# fileNames = ['store2_1.csv', 'store85_1.csv', 'store259_1.csv', 'store519_1.csv', 'store725_1.csv',
# 'store749_1.csv',
# 'store934_1.csv', 'store1019_1.csv']
# column_min_max_all = [[[0, 11000], [1, 7]], [[0, 17000], [1, 7]], [[0, 23000], [1, 7]], [[0, 14000], [1, 7]],
# [[0, 14000], [1, 7]], [[0, 15000], [1, 7]], [[0, 17000], [1, 7]], [[0, 25000], [1, 7]]]
columns = ['Sales', 'DayOfWeek', 'SchoolHoliday', 'Promo', 'lagged_Open', 'lagged_promo', 'lagged_SchoolHoliday']
# fileNames = ['store85_1', 'store519_1', 'store725_1', 'store749_1','store165_1','store925_1','store1089_1','store335_1']
# column_min_max_all = [[[0, 17000], [1, 7]], [[0, 14000], [1, 7]], [[0, 14000], [1, 7]], [[0, 15000], [1, 7]],[[0, 9000], [1, 7]], [[0, 15000], [1, 7]], [[0, 21000], [1, 7]], [[0, 33000], [1, 7]]]
fileNames = ['store85_1']
column_min_max_all = [[[0, 17000], [1, 7]]]
features = len(columns)
num_steps = None
lstm_size = None
batch_size = None
init_learning_rate = None
learning_rate_decay = None
init_epoch = None
max_epoch = None
hidden1_nodes = None
hidden2_nodes = None
dropout_rate = None
hidden1_activation = None
hidden2_activation = None
lstm_activation = None
fileName = None
column_min_max = None
# plotname = None
writename = None
RMSE = None
MAE = None
MAPE = None
RMSPE = None
config = RNNConfig()
def secondsToStr(elapsed=None):
if elapsed is None:
return strftime("%Y-%m-%d %H:%M:%S", localtime())
else:
return str(timedelta(seconds=elapsed))
def log(s, elapsed=None):
line = "="*40
print(line)
print(secondsToStr(), '-', s)
if elapsed:
print("Elapsed time:", elapsed)
print(line)
print()
def endlog():
end = time()
elapsed = end-start
log("End Program", secondsToStr(elapsed))
def segmentation(data):
seq = [price for tup in data[config.columns].values for price in tup]
seq = np.array(seq)
# split into items of features
seq = [np.array(seq[i * config.features: (i + 1) * config.features])
for i in range(len(seq) // config.features)]
# split into groups of num_steps
X = np.array([seq[i: i + config.num_steps] for i in range(len(seq) - config.num_steps)])
y = np.array([seq[i + config.num_steps] for i in range(len(seq) - config.num_steps)])
# get only sales value
y = [[y[i][0]] for i in range(len(y))]
y = np.asarray(y)
print(y)
return X, y
def scale(data):
for i in range (len(config.column_min_max)):
data[config.columns[i]] = (data[config.columns[i]] - config.column_min_max[i][0]) / ((config.column_min_max[i][1]) - (config.column_min_max[i][0]))
return data
def rescle(test_pred):
prediction = [(pred * (config.column_min_max[0][1] - config.column_min_max[0][0])) + config.column_min_max[0][0] for pred in test_pred]
return prediction
def pre_process():
store_data = pd.read_csv(config.fileName)
store_data['lagged_Open'] = store_data['lagged_Open'].astype(int)
store_data['lagged_promo'] = store_data['lagged_promo'].astype(int)
store_data['lagged_SchoolHoliday'] = store_data['lagged_SchoolHoliday'].astype(int)
#
# store_data = store_data.drop(store_data[(store_data.Open != 0) & (store_data.Sales == 0)].index)
# ---for segmenting original data --------------------------------
# original_data = store_data.copy()
## train_size = int(len(store_data) * (1.0 - test_ratio))
# test_len = len(store_data[(store_data.Month == 7) & (store_data.Year == 2015)].index)
# train_size = int(len(store_data) - (test_len))
#
# train_data = store_data[:train_size]
# test_data = store_data[(train_size - config.num_steps):]
# original_test_data = test_data.copy()
#
# # -------------- processing train data---------------------------------------
# scaled_train_data = scale(train_data)
# train_X, train_y = segmentation(scaled_train_data)
#
# # -------------- processing test data---------------------------------------
# scaled_test_data = scale(test_data)
# test_X, test_y = segmentation(scaled_test_data)
#
# # ----segmenting original test data---------------------------------------------
# nonescaled_test_X, nonescaled_test_y = segmentation(original_test_data)
validation_len = len(store_data[(store_data.Month == 6) & (store_data.Year == 2015)].index)
test_len = len(store_data[(store_data.Month == 7) & (store_data.Year == 2015)].index)
train_size = int(len(store_data) - (validation_len + test_len))
train_data = store_data[:train_size]
validation_data = store_data[(train_size - config.num_steps): validation_len + train_size]
test_data = store_data[((validation_len + train_size) - config.num_steps):]
original_val_data = validation_data.copy()
original_test_data = test_data.copy()
# -------------- processing train data---------------------------------------
scaled_train_data = scale(train_data)
train_X, train_y = segmentation(scaled_train_data)
# -------------- processing validation data---------------------------------------
scaled_validation_data = scale(validation_data)
val_X, val_y = segmentation(scaled_validation_data)
# -------------- processing test data---------------------------------------
scaled_test_data = scale(test_data)
test_X, test_y = segmentation(scaled_test_data)
# ----segmenting original validation data-----------------------------------------------
nonescaled_val_X, nonescaled_val_y = segmentation(original_val_data)
# ----segmenting original test data-----------------------------------------------
nonescaled_test_X, nonescaled_test_y = segmentation(original_test_data)
return train_X, train_y, test_X, test_y, val_X, val_y, nonescaled_test_y, nonescaled_val_y
def generate_batches(train_X, train_y, batch_size):
num_batches = int(len(train_X)) // batch_size
if batch_size * num_batches < len(train_X):
num_batches += 1
batch_indices = range(num_batches)
for j in batch_indices:
batch_X = train_X[j * batch_size: (j + 1) * batch_size]
batch_y = train_y[j * batch_size: (j + 1) * batch_size]
assert set(map(len, batch_X)) == {config.num_steps}
yield batch_X, batch_y
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
itemindex = np.where(y_true == 0)
y_true = np.delete(y_true, itemindex)
y_pred = np.delete(y_pred, itemindex)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def RMSPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
itemindex = np.where(y_true == 0)
y_true = np.delete(y_true, itemindex)
y_pred = np.delete(y_pred, itemindex)
return np.sqrt(np.mean(np.square(((y_true - y_pred) / y_true)), axis=0))
# def plot(true_vals,pred_vals,name):
# fig = plt.figure()
# fig = plt.figure(dpi=100, figsize=(20, 7))
# days = range(len(true_vals))
# plt.plot(days, pred_vals, label='pred sales')
# plt.plot(days, true_vals, label='truth sales')
# plt.legend(loc='upper left', frameon=False)
# plt.xlabel("day")
# plt.ylabel("sales")
# plt.grid(ls='--')
# plt.savefig(name, format='png', bbox_inches='tight', transparent=False)
# plt.close()
def write_results(true_vals,pred_vals,name):
print("write method")
# with open(name, "w") as f:
# writer = csv.writer(f)
# writer.writerows(zip(true_vals, pred_vals))
def train_test():
train_X, train_y, test_X, test_y, val_X, val_y, nonescaled_test_y, nonescaled_val_y = pre_process()
# tf.set_random_seed(1)
inputs = tf.placeholder(tf.float32, [None, config.num_steps, config.features], name="inputs")
targets = tf.placeholder(tf.float32, [None, config.input_size], name="targets")
model_learning_rate = tf.placeholder(tf.float32, None, name="learning_rate")
model_dropout_rate = tf.placeholder_with_default(0.0, shape=())
global_step = tf.Variable(0, trainable=False)
model_learning_rate = tf.train.exponential_decay(learning_rate=model_learning_rate, global_step=global_step,
decay_rate=config.learning_rate_decay,
decay_steps=config.init_epoch, staircase=False)
cell = tf.contrib.rnn.LSTMCell(config.lstm_size, state_is_tuple=True, activation=config.lstm_activation,
use_peepholes=True)
val1, _ = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
val = tf.transpose(val1, [1, 0, 2])
last = tf.gather(val, int(val.get_shape()[0]) - 1, name="last_lstm_output")
# hidden layer
hidden1 = tf.layers.dense(last, units=config.hidden1_nodes, activation=config.hidden2_activation)
hidden2 = tf.layers.dense(hidden1, units=config.hidden2_nodes, activation=config.hidden1_activation)
dropout = tf.layers.dropout(hidden2, rate=model_dropout_rate, training=True,seed=1)
weight = tf.Variable(tf.truncated_normal([config.hidden2_nodes, config.input_size]))
bias = tf.Variable(tf.constant(0.1, shape=[config.input_size]))
prediction = tf.nn.relu(tf.matmul(dropout, weight) + bias)
loss = tf.losses.mean_squared_error(targets, prediction)
optimizer = tf.train.AdamOptimizer(model_learning_rate)
minimize = optimizer.minimize(loss, global_step=global_step)
# --------------------training------------------------------------------------------
tf.set_random_seed(1)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
iteration = 1
for epoch_step in range(config.max_epoch):
for batch_X, batch_y in generate_batches(train_X, train_y, config.batch_size):
train_data_feed = {
inputs: batch_X,
targets: batch_y,
model_learning_rate: config.init_learning_rate,
model_dropout_rate: config.dropout_rate
}
train_loss, _, value = sess.run([loss, minimize, val1], train_data_feed)
if iteration % 5 == 0:
print("Epoch: {}/{}".format(epoch_step, config.max_epoch),
"Iteration: {}".format(iteration),
"Train loss: {:.6f}".format(train_loss))
iteration += 1
saver = tf.train.Saver()
saver.save(sess, "checkpoints_sales/sales_pred.ckpt")
# --------------------testing------------------------------------------------------
saver.restore(sess, tf.train.latest_checkpoint('checkpoints_sales'))
test_data_feed = {
inputs: test_X,
}
test_pred = sess.run(prediction, test_data_feed)
# rmsse = sess.run(correct_prediction, test_data_feed)
pred_vals = rescle(test_pred)
pred_vals = np.array(pred_vals)
pred_vals = (np.round(pred_vals, 0)).astype(np.int32)
pred_vals = pred_vals.flatten()
pred_vals = pred_vals.tolist()
nonescaled_y = nonescaled_test_y.flatten()
nonescaled_y = nonescaled_y.tolist()
# plot(nonescaled_y, pred_vals, config.plotname)
# write_results(nonescaled_y, pred_vals, config.writename)
meanSquaredError = mean_squared_error(nonescaled_y, pred_vals)
rootMeanSquaredError = sqrt(meanSquaredError)
print("RMSE:", rootMeanSquaredError)
mae = mean_absolute_error(nonescaled_y, pred_vals)
print("MAE:", mae)
mape = mean_absolute_percentage_error(nonescaled_y, pred_vals)
print("MAPE:", mape)
rmse_val = RMSPE(nonescaled_y, pred_vals)
print("RMSPE:", rmse_val)
config.RMSE = rootMeanSquaredError
config.MAE = mae
config.MAPE = mape
config.RMSPE = rmse_val
# sess.close()
# tf.reset_default_graph()
if __name__ == '__main__':
start = time()
for i in range(len(config.fileNames)):
for j in range (config.noTimesToRun):
config.fileName = '{}{}{}'.format('/home/suleka/Documents/sales_prediction/', config.fileNames[i], '.csv')
# /home/suleka/Documents/sales_prediction/
# '/home/wso2/suleka/salesPred/
# config.plotname = '{}{}{}'.format('Sales_Prediction_testset_with_zero_bsl_plot_', config.fileNames[i],
# '.png')
config.writename = '{}{}{}{}{}'.format('prediction_data/Sales_Prediction_testset_with_zero_bsl_results_',j ,'_',config.fileNames[i],'.csv')
write_file = '{}{}{}{}{}'.format('test_results/test__data_',j,'_' ,config.fileNames[i], '.csv')
config.column_min_max = config.column_min_max_all[i]
hyperparameters = pd.read_csv('allStores_test.csv', header=None, float_precision='round_trip')
config.num_steps = hyperparameters.iloc[i:, 1].get_values()[0].astype(np.int32)
config.lstm_size = hyperparameters.iloc[i:, 2].get_values()[0].astype(np.int32)
config.hidden2_nodes = hyperparameters.iloc[i:, 3].get_values()[0].astype(np.int32)
config.hidden2_activation = hyperparameters.iloc[i:, 4].get_values()[0]
config.hidden1_activation = hyperparameters.iloc[i:, 5].get_values()[0]
config.hidden1_nodes = hyperparameters.iloc[i:, 6].get_values()[0].astype(np.int32)
config.lstm_activation = hyperparameters.iloc[i:, 7].get_values()[0]
config.init_epoch = hyperparameters.iloc[i:, 8].get_values()[0].astype(np.int32)
config.max_epoch = hyperparameters.iloc[i:, 9].get_values()[0].astype(np.int32)
config.learning_rate_decay = hyperparameters.iloc[i:, 10].get_values()[0].astype(np.float32)
config.dropout_rate = hyperparameters.iloc[i:, 11].get_values()[0].astype(np.float32)
config.batch_size = hyperparameters.iloc[i:, 12].get_values()[0].astype(np.int32)
config.init_learning_rate = hyperparameters.iloc[i:, 13].get_values()[0].astype(np.float32)
config.hidden1_activation = eval(config.hidden1_activation)
config.hidden2_activation = eval(config.hidden2_activation)
config.lstm_activation = eval(config.lstm_activation)
train_test()
tf.reset_default_graph()
atexit.register(endlog)
log("Start Program")
Make sure that you always do the following in this order:
# Reset the default graph
tf.reset_default_graph()
# Set the random seed
tf.set_random_seed(seed)
# Build the graph
# ....
# After creating the cell make sure intialize it
cell.build(inputs_shape)
# Initialize all variables in the graph
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Train the model
# ....
# Evaluate the model
# ....
In your code I see that the first time you are reseting the graph after setting the random seed.

Input contains NaN, infinity or a value too large for dtype('float64') in Tensorflow

I am trying to train a LSTM and in my model I have an exponential learning rate decay and a dropout layer. In order to deactivate the dropout layer when testing and validating, I have put a placeholder for the dropout rate and given it a default value of 1.0 and when training i am setting it to 0.5. The dropou_rate placeholder value is passed to the tf.layers.dropout(). When I run this during the validation I get the following error.
ValueError: Input contains NaN, infinity or a value too large for
dtype('float64').
shown below is the stack trace:
Traceback (most recent call last): File
"/home/suleka/Documents/sales_prediction/SalesPrediction_LSTM_mv.py",
line 329, in
train_test() File "/home/suleka/Documents/sales_prediction/SalesPrediction_LSTM_mv.py",
line 270, in train_test
meanSquaredError = mean_squared_error(nonescaled_y, pred_vals) File
"/home/suleka/anaconda3/lib/python3.6/site-packages/sklearn/metrics/regression.py",
line 238, in mean_squared_error
y_true, y_pred, multioutput) File "/home/suleka/anaconda3/lib/python3.6/site-packages/sklearn/metrics/regression.py",
line 77, in _check_reg_targets
y_pred = check_array(y_pred, ensure_2d=False) File "/home/suleka/anaconda3/lib/python3.6/site-packages/sklearn/utils/validation.py",
line 453, in check_array
_assert_all_finite(array) File "/home/suleka/anaconda3/lib/python3.6/site-packages/sklearn/utils/validation.py",
line 44, in _assert_all_finite
" or a value too large for %r." % X.dtype) ValueError: Input contains NaN, infinity or a value too large for dtype('float64').
When I put the learning rate as a value in tf.layers.dropout like:
dropout = tf.layers.dropout(last, rate=0.5, training=True)
The code works fine. I am not sure what is happening in the code.
Shown below is my complete code:
import tensorflow as tf
import matplotlib as mplt
mplt.use('agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from math import sqrt
import csv
np.random.seed(1)
tf.set_random_seed(1)
class RNNConfig():
input_size = 1
num_steps = 7#5
lstm_size = 64 #16
num_layers = 1
keep_prob = 0.8
batch_size = 16 #64
init_epoch = 15 # 5
max_epoch = 20 # 100 or 50
# test_ratio = 0.2
fileName = 'store2_1.csv'
graph = tf.Graph()
column_min_max = [[0,11000], [1,7]]
columns = ['Sales', 'DayOfWeek','SchoolHoliday', 'Promo']
features = len(columns)
hidden1_nodes = 64
hidden2_nodes = 8
config = RNNConfig()
def segmentation(data):
seq = [price for tup in data[config.columns].values for price in tup]
seq = np.array(seq)
# split into items of features
seq = [np.array(seq[i * config.features: (i + 1) * config.features])
for i in range(len(seq) // config.features)]
# split into groups of num_steps
X = np.array([seq[i: i + config.num_steps] for i in range(len(seq) - config.num_steps)])
y = np.array([seq[i + config.num_steps] for i in range(len(seq) - config.num_steps)])
# get only sales value
y = [[y[i][0]] for i in range(len(y))]
y = np.asarray(y)
return X, y
def scale(data):
for i in range (len(config.column_min_max)):
data[config.columns[i]] = (data[config.columns[i]] - config.column_min_max[i][0]) / ((config.column_min_max[i][1]) - (config.column_min_max[i][0]))
return data
def rescle(test_pred):
prediction = [(pred * (config.column_min_max[0][1] - config.column_min_max[0][0])) + config.column_min_max[0][0] for pred in test_pred]
return prediction
def pre_process():
store_data = pd.read_csv(config.fileName)
store_data = store_data.drop(store_data[(store_data.Open == 0) & (store_data.Sales == 0)].index)
#
# store_data = store_data.drop(store_data[(store_data.Open != 0) & (store_data.Sales == 0)].index)
# ---for segmenting original data --------------------------------
# original_data = store_data.copy()
## train_size = int(len(store_data) * (1.0 - test_ratio))
validation_len = len(store_data[(store_data.Month == 6) & (store_data.Year == 2015)].index)
test_len = len(store_data[(store_data.Month == 7) & (store_data.Year == 2015)].index)
train_size = int(len(store_data) - (validation_len + test_len))
train_data = store_data[:train_size]
validation_data = store_data[(train_size - config.num_steps): validation_len + train_size]
test_data = store_data[((validation_len + train_size) - config.num_steps):]
original_val_data = validation_data.copy()
original_test_data = test_data.copy()
# -------------- processing train data---------------------------------------
scaled_train_data = scale(train_data)
train_X, train_y = segmentation(scaled_train_data)
# -------------- processing validation data---------------------------------------
scaled_validation_data = scale(validation_data)
val_X, val_y = segmentation(scaled_validation_data)
# -------------- processing test data---------------------------------------
scaled_test_data = scale(test_data)
test_X, test_y = segmentation(scaled_test_data)
# ----segmenting original validation data-----------------------------------------------
nonescaled_val_X, nonescaled_val_y = segmentation(original_val_data)
# ----segmenting original test data---------------------------------------------
nonescaled_test_X, nonescaled_test_y = segmentation(original_test_data)
return train_X, train_y, test_X, test_y, val_X, val_y, nonescaled_test_y, nonescaled_val_y
def generate_batches(train_X, train_y, batch_size):
num_batches = int(len(train_X)) // batch_size
if batch_size * num_batches < len(train_X):
num_batches += 1
batch_indices = range(num_batches)
for j in batch_indices:
batch_X = train_X[j * batch_size: (j + 1) * batch_size]
batch_y = train_y[j * batch_size: (j + 1) * batch_size]
assert set(map(len, batch_X)) == {config.num_steps}
yield batch_X, batch_y
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
itemindex = np.where(y_true == 0)
y_true = np.delete(y_true, itemindex)
y_pred = np.delete(y_pred, itemindex)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def RMSPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.sqrt(np.mean(np.square(((y_true - y_pred) / y_pred)), axis=0))
def plot(true_vals,pred_vals,name):
fig = plt.figure()
fig = plt.figure(dpi=100, figsize=(20, 7))
days = range(len(true_vals))
plt.plot(days, pred_vals, label='pred sales')
plt.plot(days, true_vals, label='truth sales')
plt.legend(loc='upper left', frameon=False)
plt.xlabel("day")
plt.ylabel("sales")
plt.grid(ls='--')
plt.savefig(name, format='png', bbox_inches='tight', transparent=False)
plt.close()
def write_results(true_vals,pred_vals,name):
with open(name, "w") as f:
writer = csv.writer(f)
writer.writerows(zip(true_vals, pred_vals))
def train_test():
train_X, train_y, test_X, test_y, val_X, val_y, nonescaled_test_y, nonescaled_val_y = pre_process()
# Add nodes to the graph
with config.graph.as_default():
tf.set_random_seed(1)
learning_rate = tf.placeholder(tf.float32, None, name="learning_rate")
inputs = tf.placeholder(tf.float32, [None, config.num_steps, config.features], name="inputs")
targets = tf.placeholder(tf.float32, [None, config.input_size], name="targets")
global_step = tf.Variable(0, trainable=False)
dropout_rate = tf.placeholder_with_default(1.0, shape=())
learning_rate = tf.train.exponential_decay(learning_rate=learning_rate, global_step=global_step, decay_rate=0.96, decay_steps=5, staircase=False)
cell = tf.contrib.rnn.LSTMCell(config.lstm_size, state_is_tuple=True, activation=tf.nn.relu)
val1, _ = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
val = tf.transpose(val1, [1, 0, 2])
last = tf.gather(val, int(val.get_shape()[0]) - 1, name="last_lstm_output")
# hidden layer
last = tf.layers.dense(last, units=config.hidden1_nodes, activation=tf.nn.relu)
last = tf.layers.dense(last, units=config.hidden2_nodes, activation=tf.nn.relu)
weight = tf.Variable(tf.truncated_normal([config.hidden2_nodes, config.input_size]))
bias = tf.Variable(tf.constant(0.1, shape=[config.input_size]))
dropout = tf.layers.dropout(last, rate=dropout_rate, training=True)
prediction = tf.matmul(dropout, weight) + bias
loss = tf.losses.mean_squared_error(targets,prediction)
optimizer = tf.train.AdamOptimizer(learning_rate)
minimize = optimizer.minimize(loss, global_step=global_step)
# correct_prediction = tf.sqrt(tf.losses.mean_squared_error(prediction, targets))
# --------------------training------------------------------------------------------
with tf.Session(graph=config.graph) as sess:
tf.set_random_seed(1)
tf.global_variables_initializer().run()
iteration = 1
for epoch_step in range(config.max_epoch):
for batch_X, batch_y in generate_batches(train_X, train_y, config.batch_size):
train_data_feed = {
inputs: batch_X,
targets: batch_y,
learning_rate: 0.01,
dropout_rate: 0.5
}
train_loss, _, value,gs = sess.run([loss, minimize, val1,global_step], train_data_feed)
if iteration % 5 == 0:
print("Epoch: {}/{}".format(epoch_step, config.max_epoch),
"Iteration: {}".format(iteration),
"Train loss: {:.6f}".format(train_loss))
iteration += 1
saver = tf.train.Saver()
saver.save(sess, "checkpoints_sales/sales_pred.ckpt")
# --------------------validation------------------------------------------------------
with tf.Session(graph=config.graph) as sess:
tf.set_random_seed(1)
saver.restore(sess, tf.train.latest_checkpoint('checkpoints_sales'))
test_data_feed = {
inputs: val_X,
dropout_rate: 1.0
}
test_pred = sess.run(prediction, test_data_feed)
# rmsse = sess.run(correct_prediction, test_data_feed)
pred_vals = rescle(test_pred)
pred_vals = np.array(pred_vals)
pred_vals = pred_vals.flatten()
pred_vals = pred_vals.tolist()
nonescaled_y = nonescaled_val_y.flatten()
nonescaled_y = nonescaled_y.tolist()
plot(nonescaled_y, pred_vals, "Sales Prediction VS Truth mv testSet.png")
write_results(nonescaled_y, pred_vals, "Sales Prediction batch mv results_all validationSet.csv")
meanSquaredError = mean_squared_error(nonescaled_y, pred_vals)
rootMeanSquaredError = sqrt(meanSquaredError)
print("RMSE:", rootMeanSquaredError)
mae = mean_absolute_error(nonescaled_y, pred_vals)
print("MAE:", mae)
mape = mean_absolute_percentage_error(nonescaled_y, pred_vals)
print("MAPE:", mape)
rmse_val = RMSPE(nonescaled_y, pred_vals)
print("RMSPE:", rmse_val)
# --------------------testing------------------------------------------------------
with tf.Session(graph=config.graph) as sess:
tf.set_random_seed(1)
saver.restore(sess, tf.train.latest_checkpoint('checkpoints_sales'))
test_data_feed = {
inputs: test_X,
dropout_rate: 1.0
}
test_pred = sess.run(prediction, test_data_feed)
# rmsse = sess.run(correct_prediction, test_data_feed)
pred_vals = rescle(test_pred)
pred_vals = np.array(pred_vals)
pred_vals = (np.round(pred_vals, 0)).astype(np.int32)
pred_vals = pred_vals.flatten()
pred_vals = pred_vals.tolist()
nonescaled_y = nonescaled_test_y.flatten()
nonescaled_y = nonescaled_y.tolist()
plot(nonescaled_y, pred_vals, "Sales Prediction VS Truth mv testSet.png")
write_results(nonescaled_y, pred_vals, "Sales Prediction batch mv results_all validationSet.csv")
meanSquaredError = mean_squared_error(nonescaled_y, pred_vals)
rootMeanSquaredError = sqrt(meanSquaredError)
print("RMSE:", rootMeanSquaredError)
mae = mean_absolute_error(nonescaled_y, pred_vals)
print("MAE:", mae)
mape = mean_absolute_percentage_error(nonescaled_y, pred_vals)
print("MAPE:", mape)
rmse_val = RMSPE(nonescaled_y, pred_vals)
print("RMSPE:", rmse_val)
if __name__ == '__main__':
train_test()
When using tf.layers.dropout the rate argument tells how much of the data to drop when you give 1.0 all the output is gone, replace 1.0 with 0.0 and it should work.
TensorFlow documentation: https://www.tensorflow.org/api_docs/python/tf/layers/dropout
I am putting this because even though #Almog's answer was correct it didn't have the explanation I wanted. So for anyone confused like me:
If you use:
'tf.nn.dropout()'
to deactivate the dropout layer you should put
keep_prob= 1.0 not keep_prob=0.0
as keep_prob means 'The probability that each element is kept.' So keeping it as 1.0 makes sense to deactivate it.
If you are using
'tf.layers.dropout()'
you should put:
rate=0.0 not rate=1.0
as rate here means 'The dropout rate (should be between 0 and 1). E.g. "rate=0.1" would drop out 10% of input units'. So if I put rate=0.0 it means that none of the input units will be dropped.

ap_uniform_sampler() missing 1 required positional argument: 'high' in Ray Tune package for python

I am trying to use the Ray Tune package for hyperparameter tuning of a LSTM implemented using pure Tensorflow. I used the hyperband scheduler and HyperOptSearch algorithms for this and I am also using the trainable class method. When I try to run it I get the following error:
TypeError: ap_uniform_sampler() missing 1 required positional
argument: 'high'
shown below is the stack trace:
FutureWarning: Conversion of the second argument of issubdtype from float to np.floating is deprecated. In future, it will be treated as np.float64 == np.dtype(float).type. from ._conv import register_converters as _register_converters Process STDOUT and STDERR is being redirected to /tmp/ray/session_2018-12-19_09-43-46_5469/logs. Waiting for redis server at 127.0.0.1:14332 to respond... Waiting for redis server at 127.0.0.1:25158 to respond... Starting the Plasma object store with 3.220188364 GB memory using /dev/shm. Failed to start the UI, you may need to run 'pip install jupyter'.
== Status == Using HyperBand: num_stopped=0 total_brackets=0 Round #0: Resources requested: 0/4 CPUs, 0/0 GPUs Memory usage on this node:
3.7/8.1 GB
Traceback (most recent call last):
File "/home/suleka/Documents/sales_prediction/auto_LSTM_try3.py", line 398, in <module>
run_experiments(config, search_alg=algo, scheduler=hyperband)
File "/home/suleka/anaconda3/lib/python3.6/site-packages/ray/tune/tune.py", line 108, in run_experiments
runner.step()
File "/home/suleka/anaconda3/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 114, in step
next_trial = self._get_next_trial()
File "/home/suleka/anaconda3/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 254, in _get_next_trial
self._update_trial_queue(blocking=wait_for_trial)
File "/home/suleka/anaconda3/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 330, in _update_trial_queue
trials = self._search_alg.next_trials()
File "/home/suleka/anaconda3/lib/python3.6/site-packages/ray/tune/suggest/suggestion.py", line 67, in next_trials
for trial in self._trial_generator:
File "/home/suleka/anaconda3/lib/python3.6/site-packages/ray/tune/suggest/suggestion.py", line 88, in _generate_trials
suggested_config = self._suggest(trial_id)
File "/home/suleka/anaconda3/lib/python3.6/site-packages/ray/tune/suggest/hyperopt.py", line 81, in _suggest
self.rstate.randint(2**31 - 1))
File "/home/suleka/anaconda3/lib/python3.6/site-packages/hyperopt/tpe.py", line 835, in suggest
= tpe_transform(domain, prior_weight, gamma)
File "/home/suleka/anaconda3/lib/python3.6/site-packages/hyperopt/tpe.py", line 816, in tpe_transform
s_prior_weight
File "/home/suleka/anaconda3/lib/python3.6/site-packages/hyperopt/tpe.py", line 690, in build_posterior
b_post = fn(*b_args, **dict(named_args))
TypeError: ap_uniform_sampler() missing 1 required positional argument: 'high'
My code is shown below:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import time
import ray
from ray.tune import grid_search, run_experiments, register_trainable, Trainable
from ray.tune.schedulers import HyperBandScheduler
from tensorflow.examples.tutorials.mnist import input_data
# from ray.tune import sample_from
import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from math import sqrt
from ray.tune.suggest import HyperOptSearch
import argparse
from hyperopt import hp
num_steps = 14
lstm_size = 32
batch_size = 8
init_learning_rate = 0.01
learning_rate_decay = 0.99
init_epoch = 5 # 5
max_epoch = 60 # 100 or 50
hidden1_nodes = 30
hidden2_nodes = 15
hidden1_activation = tf.nn.tanh
hidden2_activation = tf.nn.tanh
lstm_activation = tf.nn.relu
input_size = 1
num_layers = 1
column_min_max = [[0, 11000], [1, 7]]
columns = ['Sales', 'DayOfWeek', 'SchoolHoliday', 'Promo']
features = len(columns)
def segmentation(data):
seq = [price for tup in data[columns].values for price in tup]
seq = np.array(seq)
# split into items of features
seq = [np.array(seq[i * features: (i + 1) * features])
for i in range(len(seq) // features)]
# split into groups of num_steps
X = np.array([seq[i: i + num_steps] for i in range(len(seq) - num_steps)])
y = np.array([seq[i + num_steps] for i in range(len(seq) - num_steps)])
# get only sales value
y = [[y[i][0]] for i in range(len(y))]
y = np.asarray(y)
return X, y
def scale(data):
for i in range (len(column_min_max)):
data[columns[i]] = (data[columns[i]] - column_min_max[i][0]) / ((column_min_max[i][1]) - (column_min_max[i][0]))
return data
def rescle(test_pred):
prediction = [(pred * (column_min_max[0][1] - column_min_max[0][0])) + column_min_max[0][0] for pred in test_pred]
return prediction
def pre_process():
store_data = pd.read_csv('/home/suleka/salesPred/store2_1.csv')
store_data = store_data.drop(store_data[(store_data.Open == 0) & (store_data.Sales == 0)].index)
#
# store_data = store_data.drop(store_data[(store_data.Open != 0) & (store_data.Sales == 0)].index)
# ---for segmenting original data --------------------------------
original_data = store_data.copy()
## train_size = int(len(store_data) * (1.0 - test_ratio))
validation_len = len(store_data[(store_data.Month == 6) & (store_data.Year == 2015)].index)
test_len = len(store_data[(store_data.Month == 7) & (store_data.Year == 2015)].index)
train_size = int(len(store_data) - (validation_len+test_len))
train_data = store_data[:train_size]
validation_data = store_data[(train_size-num_steps): validation_len+train_size]
test_data = store_data[((validation_len+train_size) - num_steps): ]
original_val_data = validation_data.copy()
original_test_data = test_data.copy()
# -------------- processing train data---------------------------------------
scaled_train_data = scale(train_data)
train_X, train_y = segmentation(scaled_train_data)
# -------------- processing validation data---------------------------------------
scaled_validation_data = scale(validation_data)
val_X, val_y = segmentation(scaled_validation_data)
# -------------- processing test data---------------------------------------
scaled_test_data = scale(test_data)
test_X, test_y = segmentation(scaled_test_data)
# ----segmenting original validation data-----------------------------------------------
nonescaled_val_X, nonescaled_val_y = segmentation(original_val_data)
# ----segmenting original test data-----------------------------------------------
nonescaled_test_X, nonescaled_test_y = segmentation(original_test_data)
return train_X, train_y, test_X, test_y, val_X, val_y, nonescaled_test_y,nonescaled_val_y
def generate_batches(train_X, train_y, batch_size):
num_batches = int(len(train_X)) // batch_size
if batch_size * num_batches < len(train_X):
num_batches += 1
batch_indices = range(num_batches)
for j in batch_indices:
batch_X = train_X[j * batch_size: (j + 1) * batch_size]
batch_y = train_y[j * batch_size: (j + 1) * batch_size]
# assert set(map(len, batch_X)) == {num_steps}
yield batch_X, batch_y
def setupRNN(inputs):
cell = tf.contrib.rnn.LSTMCell(lstm_size, state_is_tuple=True, activation=lstm_activation)
val1, _ = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
val = tf.transpose(val1, [1, 0, 2])
last = tf.gather(val, int(val.get_shape()[0]) - 1, name="last_lstm_output")
# hidden layer
hidden1 = tf.layers.dense(last, units=hidden1_nodes, activation=hidden2_activation)
hidden2 = tf.layers.dense(hidden1, units=hidden2_nodes, activation=hidden1_activation)
weight = tf.Variable(tf.truncated_normal([hidden2_nodes, input_size]))
bias = tf.Variable(tf.constant(0.1, shape=[input_size]))
prediction = tf.matmul(hidden2, weight) + bias
return prediction
class TrainMNIST(Trainable):
"""Example MNIST trainable."""
def _setup(self, config):
global num_steps, lstm_size, hidden2_nodes, hidden2_activation, hidden1_activation, hidden1_nodes, lstm_size, lstm_activation, init_learning_rate, init_epoch, max_epoch, learning_rate_decay
self.timestep = 0
self.train_X, self.train_y, self.test_X, self.test_y, self.val_X, self.val_y, self.nonescaled_test_y, self.nonescaled_val_y = pre_process()
self.inputs = tf.placeholder(tf.float32, [None, num_steps, features], name="inputs")
self.targets = tf.placeholder(tf.float32, [None, input_size], name="targets")
self.learning_rate = tf.placeholder(tf.float32, None, name="learning_rate")
num_steps = config["num_steps"]
lstm_size = config["lstm_size"]
hidden1_nodes = config["hidden1_nodes"]
hidden2_nodes = config["hidden2_nodees"]
batch_size = config["batch_size"]
init_learning_rate = getattr(config["learning_rate"])
learning_rate_decay = getattr(config["learning_rate_decay"])
max_epoch = getattr(config["max_epoch"])
init_epoch = getattr(config["init_epoch"])
self.prediction = setupRNN(self.inputs)
with tf.name_scope('loss'):
model_loss = tf.losses.mean_squared_error(self.targets, self.prediction)
with tf.name_scope('adam_optimizer'):
train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(model_loss)
self.train_step = train_step
with tf.name_scope('accuracy'):
correct_prediction = tf.sqrt(tf.losses.mean_squared_error(self.prediction, self.targets))
self.accuracy = correct_prediction
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.iterations = 0
self.saver = tf.train.Saver()
def _train(self):
learning_rates_to_use = [
init_learning_rate * (
learning_rate_decay ** max(float(i + 1 - init_epoch), 0.0)
) for i in range(max_epoch)]
for epoch_step in range(max_epoch):
current_lr = learning_rates_to_use[epoch_step]
i = 0
for batch_X, batch_y in generate_batches(self.train_X, self.train_y, batch_size):
train_data_feed = {
self.inputs: batch_X,
self.targets: batch_y,
self.learning_rate: 0.01,
}
self.sess.run(self.train_step, train_data_feed)
val_data_feed = {
self.inputs: self.val_X,
self.targets: self.val_y,
self.learning_rate: 0.0,
}
pred = self.sess.run(self.prediction, val_data_feed)
pred_vals = rescle(pred)
pred_vals = np.array(pred_vals)
pred_vals = pred_vals.flatten()
pred_vals = pred_vals.tolist()
nonescaled_y = self.nonescaled_val_y.flatten()
nonescaled_y = nonescaled_y.tolist()
val_accuracy = sqrt(mean_squared_error(nonescaled_y, pred_vals))
})
self.iterations += 1
return {"RMSE_loss": val_accuracy}
def _save(self, checkpoint_dir):
return self.saver.save(
self.sess, checkpoint_dir + "/save", global_step=self.iterations)
def _restore(self, path):
return self.saver.restore(self.sess, path)
# !!! Example of using the ray.tune Python API !!!
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--smoke-test', action='store_true', help='Finish quickly for testing')
args, _ = parser.parse_known_args()
ray.init(redirect_output=True)
register_trainable("my_class", TrainMNIST)
space = {
'num_steps': hp.uniform('num_steps', 2, 14),
'lstm_size': hp.uniform('lstm_size', [8,16,32,64,128]),
'hidden1_nodes': hp.choice("hidden1_nodes", [4,8,16,32,64]),
'hidden2_nodees': hp.choice("hidden2_nodees", [2,4,8,16,32]),
'learning_rate': hp.choice("learning_rate", [0.01,0.1,0.5,0.05]),
'learning_rate_decay': hp.choice("learning_rate_decay", [0.99,0.8,0.7]),
'max_epoch': hp.choice("max_epoch", [60,50,100,120,200]),
'init_epoch': hp.choice("init_epoch", [5,10,15,20]),
'batch_size': hp.choice("batch_size", [5,8,16,30,31,64])
}
config = {
"my_exp": {
"run": "exp",
"num_samples": 10 if args.smoke_test else 1000,
"stop": {
'RMSE_loss': 400.00,
'time_total_s': 600,
},
}
}
algo = HyperOptSearch(space, max_concurrent=4, reward_attr="RMSE_loss")
hyperband = HyperBandScheduler(
time_attr="training_iteration", reward_attr="RMSE_loss", max_t=10)
run_experiments(config, search_alg=algo, scheduler=hyperband)
Looks like simple typographical error here:
'lstm_size': hp.uniform('lstm_size', [8,16,32,64,128]),
It seems that you wanted lstm_size to be chosen from the list supplied. In that case you need to use hp.choice here too, just like you used for all the other parameters below 'lstm_size'.
'lstm_size': hp.choice('lstm_size', [8,16,32,64,128])

Rank mismatch error in Tensorflow

I'm working on creating an image classifier that can differentiate between cats and dogs. I have the follwing code:
import cv2
import os
from tqdm import tqdm
import numpy as np
import tensorflow as tf
img_height = 128
img_width = 128
path = "./train"
# class info
file = os.listdir(path)
index = []
images = []
# image size and channels
channels = 3
n_inputs = img_width * img_height * channels
# First convolutional layer
conv1_fmaps = 96 # Number of feature maps created by this layer
conv1_ksize = 4 # kernel size 3x3
conv1_stride = 2
conv1_pad = "SAME"
# Second convolutional layer
conv2_fmaps = 192
conv2_ksize = 4
conv2_stride = 4
conv2_pad = "SAME"
# Third layer is a pooling layer
pool3_fmaps = conv2_fmaps # Isn't it obvious?
n_fc1 = 192 # Total number of output features
n_outputs = 2
with tf.name_scope("inputs"):
X = tf.placeholder(tf.float32, shape=[None, img_width, img_height, channels], name="X")
X_reshaped = tf.reshape(X, shape=[-1, img_height, img_width, channels])
y = tf.placeholder(tf.int32, shape=[None, 2], name="y")
conv1 = tf.layers.conv2d(X_reshaped, filters=conv1_fmaps, kernel_size=conv1_ksize, strides=conv1_stride, padding=conv1_pad, activation=tf.nn.relu, name="conv1")
conv2 = tf.layers.conv2d(conv1, filters=conv2_fmaps, kernel_size=conv2_ksize, strides=conv2_stride, padding=conv2_pad, activation=tf.nn.relu, name="conv2")
n_epochs = 10
batch_size = 250
with tf.name_scope("pool3"):
pool3 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID")
pool3_flat = tf.reshape(pool3, shape=[-1, pool3_fmaps * 8 * 8])
with tf.name_scope("fc1"):
fc1 = tf.layers.dense(pool3_flat, n_fc1, activation=tf.nn.relu name="fc1")
with tf.name_scope("output"):
logits = tf.layers.dense(fc1, n_outputs, name="output")
Y_proba = tf.nn.softmax(logits, name="Y_proba")
with tf.name_scope("train"):
xentropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y)
loss = tf.reduce_mean(xentropy)
optimizer = tf.train.AdamOptimizer()
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
with tf.name_scope("init_and_save"):
saver = tf.train.Saver()
def next_batch(num):
index = []
images = []
# Data set Creation
print("Creating batch dataset "+str(num+1)+"...")
for f in tqdm(range(num * batch_size, (num+1)*batch_size)):
if file[f].find("dog"):
index.append(np.array([0, 1]))
else:
index.append(np.array([1, 0]))
image = cv2.imread(path + "/" + file[f])
image = cv2.resize(image, (img_width, img_height), 0, 0, cv2.INTER_LINEAR)
# image = image.astype(np.float32)
images.append(image)
images = np.array(images, dtype=np.uint8)
images = images.astype('float32')
images = images / 255
print("\nBatch "+str(num+1)+" creation finished.")
# print([images, index])
return [images, index]
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(25000 // batch_size):
X_batch, y_batch = next_batch(iteration)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
print(epoch, "Train accuracy:", acc_train)
save_path = saver.save(sess, "./dogvscat_mnist_model.ckpt")
But I'm getting this error:
ValueError: Rank mismatch: Rank of labels (received 2) should equal rank of logits minus 1 (received 2).
Can anyone point out the problem and help me to solve it. I'm totally new to this.
For tf.nn.sparse_softmax_corss_entropy_with_logits rank(labels) = rank(logits) - 1, so you need to redefine the labels placeholder as follows
...
y = tf.placeholder(tf.int32, shape=[None], name="y")
...
xentropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,labels=y)
...
X_batch, y_batch = next_batch(iteration)
y_batch = np.argmax(y_batch, axis=1)
OR you can you just use tf.nn.softmax_cross_entropy_with_logits without changing labels placeholder.
xentropy=tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=y)

Resources