How to implement TensorBoard v2 (tf.contrib.summary) with while_loop? - python-3.x

I want to log the loss and accuracy value evaluated inside a nested body function of a while_loop during training. This is the structure: I have a class, a method of this class builds the graph using a while_loop (build_graph()). Another method calls build_graph() and then runs the session. It works. Or it seems to work. However, I would like to use TensorBoard to check if loss and accuracy are actually improving, but I'm not able to summary those tensors. I've tried to define a tf.contrib.summary.create_file_writer('summary') and a graph and pass them to build_graph() as parameters, so that the body function can see them. I have checked the list during graph execution coming from tf.contrib.summary.all_summary_ops() and it isn't empty. However, when I open TensorBoard I get "No dashboards are active for the current data set.". Neither the graph. I am aware that tf.summary does not work in while_loop but it seems that tf.contrib.summary works.
Here is a working example
import tensorflow as tf
import sys
import datamanagement
class myNet:
def __init__(self):
self.varlist = ["x", "y"]
self.data = []
self.hsize = [10, 10]
self.batch_size = 10
self.tr_mainsteps = 1000
self.learnrate = 0.001
self.sourcedatafile = "XYfit.csv" # source file
# Dataset parameters
self.seq_params = {'dim': len(self.varlist),
'batch_size': self.batch_size,
'shuffle': True,
'filepath': self.sourcedatafile}
# Dataset from CSV file
self.dataset = datamanagement.CSVDataSet(**self.seq_params).finaldataset
# Iterator on the CSV file
self.dataiterator = self.dataset.make_initializable_iterator()
# Optimizer
self.optim = tf.train.RMSPropOptimizer(learning_rate=self.learnrate)
# Official creation of the graph
self.graph = tf.get_default_graph()
with self.graph.as_default():
# Writer creation
self.writer = tf.contrib.summary.create_file_writer('./summary')
with self.writer.as_default():
tf.contrib.summary.always_record_summaries()
def mymodel(self, Zinp, reuse=False):
# This function builds the graph of the network
with tf.variable_scope("mymod/net", reuse=reuse):
h1 = tf.layers.dense(Zinp, self.hsize[0], activation=tf.nn.leaky_relu, name='h1')
h2 = tf.layers.dense(h1, self.hsize[1], activation=tf.nn.leaky_relu, name='h2')
out = tf.layers.dense(h2, len(self.varlist), activation=None, name='final') # None means linear activation
return out
def _trainepoch(self, ind):
with self.writer.as_default():
# Real data tensor from CSV file
self.realdata = self.dataiterator.get_next()
# random input vector
self.Znoise = tf.random_uniform([self.batch_size, len(self.varlist)], minval=-1., maxval=1.)
# Model and output tensor
self.output = self.mymodel(self.Znoise, reuse=tf.AUTO_REUSE)
# Loss
self.loss = tf.losses.mean_squared_error(self.realdata, self.output)
tf.contrib.summary.scalar("loss", self.loss)
#Trainable variables
t_vars = tf.trainable_variables()
# Evaluation of the weight gradients
grad = self.optim.compute_gradients(self.loss, var_list=t_vars)
# Update weights based on gradients
return self.optim.apply_gradients(grad), tf.contrib.summary.all_summary_ops()
def _train_buildgraph(self):
def body(ind, ops):
train_up, ops = self._trainepoch(ind)
# Ensure that the update is applied before continuing.
with tf.control_dependencies([train_up]):
ind = ind + 1
return ind, ops
def cond(ind, ops):
return ind < self.tr_mainsteps
return tf.while_loop(cond, body, [tf.constant(0), [tf.Variable(False)]])
def config_run(self, trepoch=50, testNet=False):
self.tr_mainsteps = trepoch # Number of adversarial training epoch
with self.graph.as_default():
with self.writer.as_default():
tr_loop, summary_ops = self._train_buildgraph()
# Graph execution
with self.graph.as_default():
with self.writer.as_default():
with tf.Session() as sess:
sess.run(tf.initializers.global_variables())
sess.run(self.dataiterator.initializer)
tf.contrib.summary.initialize(
graph=tf.get_default_graph()
)
sess.run([summary_ops, tr_loop, summary_ops])
def main(argv):
hmodel = myNet()
hmodel.config_run()
if __name__ == "__main__":
main(sys.argv[1:])
Can someone help me?

Related

What is an efficient way to make a dataset and dataloader for high frequency time series with multiple individuals?

I'm trying to forecast high frequency time series using LSTMs and PyTorch library. I'm going through PyTorch tutorial for creating custom datasets and models and figured out how to create my Dataset class and my Dataloader and they work perfectly fine but they take too much time to generate one batch.
I want to generate batches of fixed size, each batch contains time series from different individuals and the input window is of the same length as the output window (multi-step prediction).
I think the issue is due to the fact that I'm verifying the windows are correct.
My dataframe of a little bit more than 3M lines with 6 columns. I have some 100 individuals and for each individual I have 4 different time series $y_{1}$, $y_{2}$, $y_{3}$ and $y_{4}$. I have no missing values at all and the time steps are consecutive. For each individual I have the same time steps.
My code is:
class TSDataset(Dataset):
def __init__(self, train_data, unique_column = 'unique_id', input_length = 3840, target_length = 3840, targets = ['y1', 'y2', 'y3', 'y4'], transform = None):
self.train_data = train_data
self.unique_column = unique_column
self.input_length = input_length
self.target_length = target_length
self.total_window_length = input_length + target_length
self.targets = targets
def __len__(self):
return len(self.train_data)
def verify_time_steps(self, idx):
change = False
# Check if the window doesn't overlap over many individuals
num_individuals = self.train_data.iloc[np.arange(idx + self.total_window_length), :][self.unique_column].unique().shape[0]
if num_stations != 1:
change = True
if idx + self.total_window_length >= len(self.train_data):
change = True
return change
def reshuffle(self):
return np.random.randint(0, len(self.train_data))
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
change = self.verify_time_steps(idx)
if change == True:
while change != False:
idx = self.reshuffle()
change = self.verify_time_steps(idx)
sample = self.train_data.iloc[np.arange(idx, idx + self.input_length), :][self.targets].values
labels = self.train_data.iloc[np.arange(idx + self.input_length, idx + self.input_length + self.target_length), :][self.targets].values
sample = torch.from_numpy(sample)
labels = torch.from_numpy(labels)
return sample, labels
I've tried using the TimeSeriesDataset from PyTorchForecasting but I had a hard time creating models that suit it.
I've also tried creating the dataset outside, as a numpy array but my RAM can't handle it.
Hope you can help me figure out how to alleviate the computations.

Working with large multiple datasets where each dataset contains multiple values - Pytorch

I'm training a Neural Network and have overall > 15GB of data inside a folder, the folder has multiple pickle files, and each file contains two lists that each holds multiple values.
This looks like the following:
dataset_folder:\
file.pickle
file_2.pickle
...
...
file_n.pickle
Each file_*.pickle contains a variable length list (list x and list y).
How to load all the data to train the model without having memory issue?
By implementing the custom dataset class provided from Pytorch, we need to implement three methods so pytorch loader can work with your data
__len__
__getitem__
__init__
Let's go through how to implement each one of them seperatly.
__init__
def __init__(self):
# Original Data has the following format
"""
dict_object =
{
"x":[],
"y":[]
}
"""
DIRECTORY = "data/raw"
self.dataset_file_name = os.listdir(DIRECTORY)
self.dataset_file_name_index = 0
self.dataset_length =0
self.prefix_sum_idx = list()
# Loop over each file and calculate the length of overall dataset
# you might need to check if file_name is file
for file_name in os.listdir(DIRECTORY):
with (open(f'{DIRECTORY}/{file_name}', "rb")) as openfile:
dict_object = pickle.load(openfile)
curr_page_sum = len(dict_object["x"]) + len(dict_object["y"])
self.prefix_sum_idx.append(curr_page_sum)
self.dataset_length += curr_page_sum
# prefix sum so we have an idea of where each index appeared in which file.
for i in range (1,len(self.prefix_sum_idx)):
self.prefix_sum_idx[i] = self.prefix_sum_idx[i] + self.prefix_sum_idx[i-1]
assert self.prefix_sum_idx[-1] == self.dataset_length
self.x = []
self.y = []
As you can see above, the main idea is to use prefix sum to "treat" all the dataset as once, so the logic is whenever you need to get access to a specific index later, you simply look into prefix_sum_idx to see this where this idx appear.
In the image above, let's say we need to access the index 150. Thanks to prefix sum, we are now able to know that 150 exist in the second .pickle file. Still we need a fast mechanism to know where that idx exist in the prefix_sum_idx. This will be explained in the __getitem__
__getitem__
def read_pickle_file(self, idx):
file_name = self.dataset_file_name[idx]
dict_object = dict()
with (open(f'{YOUR_DIRECTORY}/{file_name}', "rb")) as openfile:
dict_object = pickle.load(openfile)
self.x = dict_object['x']
self.y = #some logic here
......
# Some logic here....
def __getitem__(self,idx):
# Similar to C++ std::upper_bound - O(log n)
temp = bisect.bisect_right(self.prefix_sum_idx, idx)
self.read_pickle_file(temp)
local_idx = idx - self.prefix_sum_idx[temp]
return self.x[local_idx],self.y[local_idx]
check bisect_right() docs for details on how it works, but simply it returns the rightmost place in the sorted list to insert the given element and keep it sorted. In our approach, we're interested only in the following question, "which file should I access in order to get the appropriate data". More importantly, it does so in O(log n)
__len__
def __len__(self):
return self.dataset_length
In order to get the length of our dataset, we loop through each file in and accumulate the results as shown in __init__.
The full code sample goes like this:
import pickle
import torch
import torch.nn as nn
import numpy
import os
import bisect
from torch.utils.data import Dataset, DataLoader
from src.data.make_dataset import main
from torch.nn import functional as F
class dataset(Dataset):
def __init__(self):
# Original Data has the following format
"""
dict_object =
{
"x":[],
"y":[]
}
"""
DIRECTORY = "data/raw"
self.dataset_file_name = os.listdir(DIRECTORY)
self.dataset_file_name_index = 0
self.dataset_length =0
self.prefix_sum_idx = list()
# Loop over each file and calculate the length of overall dataset
# you might need to check if file_name is file
for file_name in os.listdir(DIRECTORY):
with (open(f'{DIRECTORY}/{file_name}', "rb")) as openfile:
dict_object = pickle.load(openfile)
curr_page_sum = len(dict_object["x"]) + len(dict_object["y"])
self.prefix_sum_idx.append(curr_page_sum)
self.dataset_length += curr_page_sum
# prefix sum so we have an idea of where each index appeared in which file.
for i in range (1,len(self.prefix_sum_idx)):
self.prefix_sum_idx[i] = self.prefix_sum_idx[i] + self.prefix_sum_idx[i-1]
assert self.prefix_sum_idx[-1] == self.dataset_length
self.x = []
self.y = []
def read_pickle_file(self, idx):
file_name = self.dataset_file_name[idx]
dict_object = dict()
with (open(f'{YOUR_DIRECTORY}/{file_name}', "rb")) as openfile:
dict_object = pickle.load(openfile)
self.x = dict_object['x']
self.y = #some logic here
......
# Some logic here....
def __getitem__(self,idx):
# Similar to C++ std::upper_bound - O(log n)
temp = bisect.bisect_right(self.prefix_sum_idx, idx)
self.read_pickle_file(temp)
local_idx = idx - self.prefix_sum_idx[temp]
return self.x[local_idx],self.y[local_idx]
def __len__(self):
return self.dataset_length
large_dataset = dataset()
train_size = int (0.8 * len(large_dataset))
validation_size = len(large_dataset) - train_size
train_dataset, validation_dataset = torch.utils.data.random_split(large_dataset, [train_size, validation_size])
validation_loader = DataLoader(validation_dataset, batch_size=64, num_workers=4, shuffle=False)
train_loader = DataLoader(train_dataset,batch_size=64, num_workers=4,shuffle=False)

How to apply a function to convert the paths to arrays using cv2 in tensorflow data pipeline?

Any help will be highly appreciated
I'm trying to load two lists containing image paths and their corresponding labels. Something like this:
p0 = ['a','b',....] #paths to images .tif format
p1 = [1,2,3,......] #paths to images .tif format
labels = [0,1,1,...] #corresponding labels w.r.t both the lists
I used a tf.data in the following way:
def TFData(p_0, p_1, batch_size, labels=None, is_train=True):
dset = tf.data.Dataset.from_tensor_slices((p_0,p_1))
if labels is not None:
label = tf.data.Dataset.from_tensor_slices(labels)
AUTO = tf.data.experimental.AUTOTUNE
final_dset = tf.data.Dataset.zip((dset, label))
final_dset = final_dset.batch(batch_size, drop_remainder=is_train).prefetch(AUTO)
return final_dset
This returns:
<PrefetchDataset shapes: (((64,), (64,)), (64,)), types: ((tf.string, tf.string), tf.int32)>
My question is how to apply a function to convert the paths to arrays using cv2 as the images are .tif files? such that the result will be:
<PrefetchDataset shapes: (((64,256,256,3), (64,256,256,3)), (64,)), types: ((tf.float64, tf.float64), tf.int32)>
I'm using a dataset.map. However it's throwing error:
def to_array(p_0):
im_1 = cv2.imread(p_0,1)
#im = tfio.experimental.image.decode_tiff(paths)
im_1 = cv2.resize(im_1,(img_w,img_h)) #img_w=img_h=256
im_1 = np.asarray(im_1, dtype=np.float64)
im_1 /= 255
return im_1
def parse_fn(p_0):
[p_0,] = tf.py_function(to_array, [p_0], [tf.float64])
return p_0
def TFData(p_0, p_1, batch_size, labels=None, is_train=True):
dset_1 = tf.data.Dataset.from_tensor_slices(p_0)
dset_1 = dset_1.map(parse_fn)
dset_2 = tf.data.Dataset.from_tensor_slices(p_1)
dset_2 = dset_2.map(parse_fn)
if labels is not None:
label = tf.data.Dataset.from_tensor_slices(labels)
AUTO = tf.data.experimental.AUTOTUNE
final_dset = tf.data.Dataset.zip((dset_1, dset_2, label))
final_dset = final_dset.batch(batch_size, drop_remainder=is_train).prefetch(AUTO)
return final_dset
print(train_data) #where train_data is defined as TFData()
<PrefetchDataset shapes: ((<unknown>, <unknown>), (64,)), types: ((tf.float64, tf.float64), tf.int32)>
This throws an error:
for (t,p),l in train_data.as_numpy_iterator():
print(t)
print(p)
print(l)
print(type(t))
break
SystemError: <built-in function imread> returned NULL without setting an error
[[{{node EagerPyFunc}}]] [Op:IteratorGetNext]
Any help will be highly appreciated
I think your problem is in cv2.imread.
Have you checked outside the functions to see if it is reading and plotting the data accordingly?
Please, try with -1 instead:
im_1 = cv2.imread(p_0,-1)

How to print the detected classes after performing object detection on an image?

I am following the object_detection_tutorial.ipynb tutorial.
Here is the code ( I only put parts which are needed, the rest of the code is the same as the notebook):
my_results = [] # I added this, a list to hold the detected classes
PATH_TO_LABELS = 'D:\\TensorFlow\\models\\research\\object_detection\\data\\oid_v4_label_map.pbtxt'
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
PATH_TO_TEST_IMAGES_DIR = pathlib.Path('C:\\Users\\Bhavin\\Desktop\\objects')
TEST_IMAGE_PATHS = sorted(list(PATH_TO_TEST_IMAGES_DIR.glob("*.jpg")))
TEST_IMAGE_PATHS
model = load_model()
def run_inference_for_single_image(model, image):
image = np.asarray(image)
# The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
input_tensor = tf.convert_to_tensor(image)
# The model expects a batch of images, so add an axis with `tf.newaxis`.
input_tensor = input_tensor[tf.newaxis,...]
# Run inference
output_dict = model(input_tensor)
# All outputs are batches tensors.
# Convert to numpy arrays, and take index [0] to remove the batch dimension.
# We're only interested in the first num_detections.
num_detections = int(output_dict.pop('num_detections'))
output_dict = {key:value[0, :num_detections].numpy()
for key,value in output_dict.items()}
output_dict['num_detections'] = num_detections
# detection_classes should be ints.
output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)
# Handle models with masks:
if 'detection_masks' in output_dict:
# Reframe the the bbox mask to the image size.
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
output_dict['detection_masks'], output_dict['detection_boxes'],
image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(detection_masks_reframed > 0.5,
tf.uint8)
output_dict['detection_masks_reframed'] = detection_masks_reframed.numpy()
return output_dict
def show_inference(model, image_path):
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = np.array(Image.open(image_path))
# Actual detection.
output_dict = run_inference_for_single_image(model, image_np)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks_reframed', None),
use_normalized_coordinates=True,
line_thickness=8)
name = "Image" + str(i) + ".jpg"
img = Image.fromarray(image_np)
plt.imsave(name,image_np)
my_results.append(output_dict['detection_classes']) # I added this
print(my_results) # I added this
#img.show()
i = 1
for image_path in TEST_IMAGE_PATHS:
show_inference(model, image_path)
i += 1
I checked some related stack overflow questions and the answer had something to do with category index. But the code and examples used are very different from the tutorial I am following.
The line : my_results.append(output_dict['detection_classes'])
Gives me output: [array([55], dtype=int64)]
How do I extract the classes of the detected objects?
First import six
Add get_classes_name_and_scores method, before def show_inference(model, image_path):
get_classes_name_and_scores method returns {'name': 'person', 'score': '91%'}
def get_classes_name_and_scores(
boxes,
classes,
scores,
category_index,
max_boxes_to_draw=20,
min_score_thresh=.9): # returns bigger than 90% precision
display_str = {}
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(min(max_boxes_to_draw, boxes.shape[0])):
if scores is None or scores[i] > min_score_thresh:
if classes[i] in six.viewkeys(category_index):
display_str['name'] = category_index[classes[i]]['name']
display_str['score'] = '{}%'.format(int(100 * scores[i]))
return display_str
Then add after vis_util.visualize_boxes_and_labels_on_image_array
print(get_classes_name_and_scores(
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index))

Training procedure

I noticed that the sklearn.linear_model.SGDClassifier implements gradient descent for a linear model, therefore one could state that the class combines the fitting procedure (SGD) and the model (linear model) in one class.
SGD is, however, not inherent to linear models, and linear_models can be trained using many other optimizations with particular pro's (memory usage, convergence speed, local optima avoidance*, ...). One could state that such optimization techniques implement how to iterate over the training data, whether to do it online or offline, in what feature-dimension to apply an update and when to stop (possibly based on an error function of a validation set).
In particular, I implemented a model using theano and wrapped it in a fit/predict interface. Theano is cool because it allows one to define a callable which applies gradient descent on one sample, or on a set of samples, as well as a callable which returns the error on a validation set. But this coolness is not inherent to theano, a lot more models can simply define an update and error-evaluation function, which can then be used by different iteration- and stopping policies for fitting.
The theano examples often use minibatch, and the minibatch code is copy-pasted or reimplemented a lot with just minor adjustments which can easily be factored out. So I was hoping that sklearn implements something that you initialize with some parameters and an update/error callable to fit 'any model'. Or possible there is some good practice on how to do this yourself (especially w.r.t. the interface of the fitter).
Is there anything like this (in sklearn), that is Fitters which do not define the model?
*In the particular case of linear models and a l2 cost function, local optima do not exist of course, but still.
EDIT
Fair enough, this calls for a suggestion. I coded these two classes, which are not 100% clean, but they given an idea of what I mean:
import numpy
class StochasticUpdate():
def __init__(self, model, update, n_epochs, n_data_points, error=None, test_fraction=None):
self.update = update
self.n_epochs = n_epochs
self.n_data_points = n_data_points
self.error = error
self.model = model
if self.error is None and test_fraction is not None:
raise ValueError('error parameter must be specified if a test_faction (value: %s) should be used.' % test_fraction)
self.do_test = test_fraction is not None
self.n_train_samples = int(n_data_points - test_fraction) if self.do_test else n_data_points
if self.do_test:
self.test_range = numpy.arange(self.n_train_samples, n_data_points)
self.n_test_samples = int(n_data_points * test_fraction)
self.train_range = numpy.arange(0, self.n_train_samples)
def fit(self):
if self.do_test: self.test_errors = []
self.train_errors = []
self.mean_cost_values = []
for epoch in range(self.n_epochs):
order = numpy.random.permutation(self.n_train_samples)
mean_cost_value = 0
for i in range(self.n_train_samples):
mean_cost_value += self.update([order[i]])
self.mean_cost_values.append(mean_cost_value/ self.n_data_points)
if self.error is not None:
self.train_errors.append(self.error(self.train_range))
if self.do_test:
self.test_errors.append(self.error(self.test_range))
return self.model
from math import ceil
class MinibatchStochasticUpdate(StochasticUpdate):
def __init__(self, model, update, n_epochs, n_data_points, error, batch_size, patience=5000, patience_increase=2,
improvement_threshold = 0.995, validation_frequency=None, validate_faction=0.1, test_fraction=None):
super().__init__(self, model, update, n_data_points, error, test_fraction)
self.update = update
self.n_epochs = n_epochs
self.n_data_points = n_data_points
self.model = model
self.batch_size = batch_size
self.patience = patience
self.patience_increase = patience_increase
self.improvement_threshold = improvement_threshold
self.n_validation_samples = int(n_data_points * validate_faction)
self.validation_range = numpy.arange(self.n_train_samples, self.n_train_samples + self.n_validation_samples)
self.n_train_batches = int(ceil(n_data_points / batch_size))
self.n_train_batches = int(ceil(self.n_train_samples / self.batch_size))
self.train_batch_ranges = [
numpy.arange(minibatch_index * self.batch_size, min((minibatch_index+1) * self.batch_size, self.n_train_samples))
for minibatch_index in range(self.n_train_batches)
]
self.validation_frequency = min(self.n_train_batches, patience/2) if validation_frequency is None else validation_frequency
def fit(self):
self.best_validation_error = numpy.inf
best_params = None
iteration = 0
for epoch in range(self.n_epochs):
for minibatch_index in range(self.n_train_batches):
self.update(self.train_batch_ranges[minibatch_index])
if (iter + 1) % self.validation_frequency == 0:
current_validation_error = self.error(self.validation_error)
if current_validation_error < self.best_validation_error:
if current_validation_error < self.best_validation_error * self.improvement_threshold:
patience = max(self.patience, iter * self.patience_increase)
best_params = self.model.copy_parameters()
self.best_validation_error = current_validation_error
if iteration <= patience:
self.model.set_parameters(best_params)
return self.model
iteration += 1
self.model.set_parameters(best_params)
return self.model
Then for in the fit of the model one could support different training approaches and stopping criteria like this:
def fit(self, X, y):
X_shared = theano.shared(X, borrow=True)
y_shared = theano.shared(y, borrow=True)
learning_rate = self.training_method_options['learning_rate']
trainer = {
'stochastic_gradient_descent': lambda: StochasticUpdate(
self,
update=self.update_stochastic_gradient_descent_function(X_shared, y_shared, learning_rate),
n_epochs=self.training_method_options['n_epochs'],
n_data_points=X.shape[0],
error=self.evaluation_function(X_shared, y_shared),
),
'minibatch_gradient_descent': lambda: MinibatchStochasticUpdate(
self,
update=self.update_stochastic_gradient_descent_function(X_shared, y_shared, learning_rate),
n_epochs=self.training_method_options['n_epochs'],
n_data_points=X.shape[0],
error=self.evaluation_function(X_shared, y_shared),
batch_size=self.training_method_options['batch_size']
)
}[self.training_method]()
trainer.fit()
return self
Obviosuly the hash-map part is hacky, and could be done more elegantly using a standardized interface for the two classes above (since the hashmaps are still O(N*M) in size for N fitters and M models).

Resources