Control the `__getitem__` in custom dataset class based on sampling vector - python-3.x

I have custom datasets that have the __getitem__ method in them.
I created the following DatasetMUX class that it supposed to select random dataset and get the item from that dataset:
class MUXDataset(Dataset):
"""
Defines a dataset class that provides a way to read scenes and also visualization tools
"""
def __init__(self, mux_dataset_params: MultiplexDatasetParams) -> None:
self._params = mux_dataset_params
self.sampling_vec = (
mux_dataset_params.sampling if isinstance(mux_dataset_params.sampling, str) else self.init_sampling()
)
return
def init_sampling(self) -> List[float]:
if self._params.sampling == 'uniform':
num_datasets = len(self._params.datasets)
sampling = [1 / num_datasets for _ in self._params.datasets]
elif self._params.sampling == 'proportional':
size_vec = [len(_d) for _d in self._params.datasets]
sampling = [1 - _s / sum(size_vec) for _s in size_vec]
sampling = [_s / sum(sampling) for _s in sampling]
else:
raise ValueError(f'{self._params.sampling} is not supported')
return sampling
def __len__(self) -> int:
return len(self._params.datasets)
def __getitem__(self, idx):
curr_ds = self._params.datasets[idx]
return curr_ds.__getitem__(random.randint(0, len(curr_ds) - 1))
Now I want the __getitem__ of the MUXDataset will be based on the sampling vector but I couldn't find a way to implement it within the class
I tried the following inside the __getitem__:
def __getitem__(self, idx):
ds_idx = random.choices(population, weights=self.sampling, k=1)
curr_ds = self._params.datasets[ds_idx]
return curr_ds.__getitem__(random.randint(0, len(curr_ds) - 1))

Related

How can I speed up using Pytorch DataLoader?

I had a dataset including about a million of rows. Before, I read the rows, preprocessed data and created a list of rows to be trained. Then I defined a Dataloader over this data like:
train_dataloader = torch.utils.data.DataLoader(mydata['train'],
batch_size=node_batch_size,shuffle=shuffle,collate_fn=data_collator)
Preprocessing could be time consuming, so I thought to define an IterableDataSet with __iter__ function. Then I could define my Dataloader like:
train_dataloader = torch.utils.data.DataLoader(myds['train'],
batch_size=node_batch_size,shuffle=shuffle,collate_fn=data_collator)
However, still to begin training it seems that it calls my preprocessing function and creates an Iteration over it. So, it seems I didn't gain much speed up.
Please guide me how could I use speed up in this case?
Here is my part of my class:
def __iter__(self):
iter_start = self.start
iter_end = self.num_samples
worker_info = torch.utils.data.get_worker_info()
if worker_info is None: # single-process data loading, return the full iterator
iter_start = self.start
iter_end = self.num_samples
else: # in a worker process
# split workload
per_worker = int(math.ceil((self.num_samples - self.start) / float(worker_info.num_workers)))
worker_id = worker_info.id
iter_start = self.start + worker_id * per_worker
iter_end = min(iter_start + per_worker, self.num_samples)
if self.flat_data:
return iter(self.flat_data)
else:
return iter(self.fill_data(iter_start, iter_end))
def fill_data(self, iter_start, iter_end, show_progress=False):
flat_data = []
if iter_end < 0:
iter_end = self.num_samples
kk = 0
dlog.info("========================== SPLIT: %s", self.split_name)
dlog.info("get data from %s to %s", iter_start, iter_end)
dlog.info("total rows: %s", len(self.split_df))
if show_progress:
pbar = tqdm(total = self.num_samples)
for index, d in self.split_df.iterrows():
if kk < iter_start:
dlog.info("!!!!!!!!! before start %s", iter_start)
kk += 1
continue
rel = d["prefix"]
...
# preprocessing and adding to returned list
I did preprosessing in the fill_data or __iter__ body. However, I can use a map for preprocessing. Then the preprocessing is called during training and for every batch and not before training.
import pandas as pd
import torch
class MyDataset(torch.utils.data.IterableDataset):
def __init__(self, fname, until=10):
self.df = pd.read_table("atomic/" + fname)
self.until = until
def preproc(self, t):
prefix, data = t
text = "Preproc: " + prefix + "|" + data
print(text) # to check when it is called
return text
def __iter__(self):
_iter = self.df_iter()
return map(self.preproc, _iter)
def df_iter(self):
ret = []
for idx, row in self.df.iterrows():
ret.append((row["prefix"],row["input_text"]))
return iter(ret)

Using self in init part of a class in Python

Is there any difference between the following two codes related to initializing a class in Python?
class summation:
def __init__(self, f, s):
self.first = f
self.second = s
self.summ = self.first + self.second
.
.
.
class summation:
def __init__(self, f, s):
self.first = f
self.second = s
self.summ = f + s
.
.
.
If there exists any difference, what is that, and which code is preferable?
Edit: I am going to write an artificial neural network with Python (and Pytorch). In fact, the above two codes are just some examples. In the actual case, I have seen in various resources that when there exists self.input = input in the initialization of a class, in other parts it is used as self.input, not input.
My questions: What are the differences between these two approaches? Why is the use of self.input preferable, in my case?
Example: (from https://docs.dgl.ai/en/latest/tutorials/models/1_gnn/4_rgcn.html#sphx-glr-tutorials-models-1-gnn-4-rgcn-py)
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl import DGLGraph
import dgl.function as fn
from functools import partial
class RGCNLayer(nn.Module):
def __init__(self, in_feat, out_feat, num_rels, num_bases=-1, bias=None,
activation=None, is_input_layer=False):
super(RGCNLayer, self).__init__()
self.in_feat = in_feat
self.out_feat = out_feat
self.num_rels = num_rels
self.num_bases = num_bases
self.bias = bias
self.activation = activation
self.is_input_layer = is_input_layer
# sanity check
if self.num_bases <= 0 or self.num_bases > self.num_rels:
self.num_bases = self.num_rels
# weight bases in equation (3)
self.weight = nn.Parameter(torch.Tensor(self.num_bases, self.in_feat,
self.out_feat))
if self.num_bases < self.num_rels:
# linear combination coefficients in equation (3)
self.w_comp = nn.Parameter(torch.Tensor(self.num_rels, self.num_bases))
# add bias
if self.bias:
self.bias = nn.Parameter(torch.Tensor(out_feat))
# init trainable parameters
nn.init.xavier_uniform_(self.weight,
gain=nn.init.calculate_gain('relu'))
if self.num_bases < self.num_rels:
nn.init.xavier_uniform_(self.w_comp,
gain=nn.init.calculate_gain('relu'))
if self.bias:
nn.init.xavier_uniform_(self.bias,
gain=nn.init.calculate_gain('relu'))
def forward(self, g):
if self.num_bases < self.num_rels:
# generate all weights from bases (equation (3))
weight = self.weight.view(self.in_feat, self.num_bases, self.out_feat)
weight = torch.matmul(self.w_comp, weight).view(self.num_rels,
self.in_feat, self.out_feat)
else:
weight = self.weight
if self.is_input_layer:
def message_func(edges):
# for input layer, matrix multiply can be converted to be
# an embedding lookup using source node id
embed = weight.view(-1, self.out_feat)
index = edges.data['rel_type'] * self.in_feat + edges.src['id']
return {'msg': embed[index] * edges.data['norm']}
else:
def message_func(edges):
w = weight[edges.data['rel_type']]
msg = torch.bmm(edges.src['h'].unsqueeze(1), w).squeeze()
msg = msg * edges.data['norm']
return {'msg': msg}
def apply_func(nodes):
h = nodes.data['h']
if self.bias:
h = h + self.bias
if self.activation:
h = self.activation(h)
return {'h': h}
g.update_all(message_func, fn.sum(msg='msg', out='h'), apply_func)
No. there is no difference between these two approaches in your case with this level of information. but could they? Yes. they could. if they have some modifications in their setters or getters. later in my answer I'll show you how.
First of all, I prefer using this one:
class summation:
def __init__(self, f, s):
self.first = f
self.second = s
#property
def summ(self):
return self.first+self.second
the above implementation calculates the summation on demand. so when you change self.first or self.second, summ will be calculated automatically. you can access the sum as you did before.
s = summation(1,9)
print(s.summ)
# 10
s.first = 2
s.second = 3
print(s.summ)
# 5
So, How could they be different?
let's implements them as follows. in setters I doubled the inputs to show you how setters can affect the results. it's just an imaginary example and is not exactly what you wrote.
class summation1:
def __init__(self, f, s):
self.first = f
self.second = s
self.summ = self.first + self.second
#property
def first(self):
return self.__first
#first.setter
def first(self,f):
self.__first = f*2
#property
def second(self):
return self.__second
#second.setter
def second(self,s):
self.__second = s*2
class summation2:
def __init__(self, f, s):
self.first = f
self.second = s
self.summ = f + s
#property
def first(self):
return self.__first
#first.setter
def first(self,f):
self.__first = f*2
#property
def second(self):
return self.__second
#second.setter
def second(self,s):
self.__second = s*2
now let's take a look at the outputs:
a = 3
b = 2
s1 = summation1(a,b)
s2 = summation2(a,b)
print(s1.summ)
# 10
print(s2.summ)
# 5
so, if you are not sure what to choose between those two, maybe the first approach is what you need.

How can I alter/expand the following python code to also calculate mg (miligram?)

I want to expand the following code in order to calculate the miligrams too. Can someone tell me how to?
class weight:
__metric = {"g" : 1,
"kg" : 1000,
}
def __init__(self, value, unit = "g"):
self.value = value
self.unit = unit
def convert_to_gram(self):
return self.value * weight._metric[self.unit]
def __add__(self,other):
x = self.convert_to_gram() + other.convert_to_gram()
return weight + (x/weight._metric[self.unit], self.unit)
def __str__(self):
return "{} {}".format (self.value, self.unit)
class weight:
def __init__(self, value, unit = "g"):
"""
This method is initialized when the object is created
"""
self.value = value
self.unit = unit
# The value of kg is 0.001 (1E-3) because your logic multiplies the input value. So mg must be 1/1E-3 = 1000
self._metric = {"g" : 1,
"kg" : 0.001,
"mg" : 1000
}
def convert_to_gram(self):
"""
This method converts a self.value to g, kg or mg based on self.unit
"""
return self.value * self._metric[self.unit]
def __add__(self, other):
"""
The __add__ method is a 'magic' (dunder) method which gets called when we add two numbers using the + operator.
this method calls convert_to_gram() methods from this class object and from 'other' class object as well
it then returns the sum of both convertion results from the two objects
"""
x = self.convert_to_gram() + other.convert_to_gram()
return (x/self._metric[self.unit], self.unit)
def __str__(self):
return "{} {}".format (self.value, self.unit)
w1 = weight(100, 'mg') # create object w1
w2 = weight(50, 'mg') # create object w2
# call convert_to_gram() to convert 100 to mg and save the result in result_w1
result_w1 = w1.convert_to_gram()
print(result_w1)
# call convert_to_gram() to convert 50 to mg and save the result in result_w2
result_w2 = w2.convert_to_gram()
print(result_w2)
print(w1 + w2)
Gives:
100000
50000
(150.0, 'mg')

How can I pass different types of parameters (ex: array) into a functional class?

I am trying to learn how to group functions by class. As an example, I tried to code a generalized least squares method to find the equation of a best-fitting line between a set of (x,y) coordinates. For my particular case, I chose a simple line y = x + 5, so slope should be close to 1 and y-intercept should be close to 5. Running my attempt at a coded solution below produces the error TypeError: set_x() takes 1 positional argument but 2 were given, though I am trying to pass an array of x-points. How can I circumvent this error?
import numpy as np
from scipy.optimize import minimize
class GeneralizedLeastSquares:
def __init__(self, residuals=None, parameters=None, x=None, y_true=None, y_fit=None, weights=None, method=None):
self.residuals = residuals
self.parameters = parameters
self.x = x
self.y_true = y_true
self.y_fit = y_fit
self.weights = weights
self.method = method
def set_residuals(self, residuals):
self.residuals = residuals
def set_parameters(self, parameters):
self.parameters = parameters
def set_x(self, x):
self.x = x
def set_y_true(self, y_true):
self.y_true = y_true
def set_y_fit(self, y_fit):
self.y_fit = y_fit
def set_weights(self, weights):
self.weights = weights
def set_method(self, method):
self.method = method
def get_residuals(self):
return [(self.y_true[idx] - self.y_fit[idx])**2 for idx in range(len(self.y_true)) if len(self.y_true) == len(self.y_fit) ]
def get_parameters(self):
return self.parameters
def get_x(self):
return self.x
def get_y_true(self):
return self.y_true
def get_y_fit(self):
return [self.parameters[0] * self.x[idx] + self.parameters[1] for idx in range(len(self.x))]
def get_weights(self):
return self.weights
def update_weights(self):
inverse_residuals = [1/self.residuals[idx] for idx in range(len(residuals))]
inverse_residuals_abs = [abs(inverse_residual) for inverse_residual in inverse_residuals]
residual_abs_total = sum(inverse_residuals_abs)
return [inverse_residuals_abs[idx]/residual_abs_total for idx in range(len(inverse_residuals_abs))]
def get_method(self):
return self.method
def get_error_by_residuals(self):
return sum([self.weights[idx] * self.residuals[idx] for idx in range(len(self.residuals))])
def get_error_by_std_mean(self):
return np.std(self.y_true)/np.sqrt(len(self.y_true))
def get_linear_fit(self):
"""
"""
if self.parameters == 'estimate':
slope_init = (self.y_true[-1] - self.y_true[0]) / (self.x[-1] - self.x[0])
b_init = np.mean([self.y_true[-1] - slope_init * self.x[-1], self.y_true[0] - slope_init * self.x[0]])
self.parameters = [slope_init, b_init]
elif not isinstance(self.parameters, (list, np.ndarray)):
raise ValueError("parameters = 'estimate' or [slope, y-intercept]")
meths = ['residuals', 'std of mean']
funcs = [get_error_by_residuals, get_error_by_std_mean]
func = dict(zip(meths, funcs))[self.method]
res = minimize(func, x0=self.parameters, args=(self,), method='Nelder-Mead')
self.parameters = [res.x[0], res.x[1]]
self.y_fit = get_y_fit(self)
self.residuals = get_residuals(self)
self.weights = update_weights(self)
return self.parameters, self.y_fit, self.residuals, self.weights
x = np.linspace(0, 4, 5)
y_true = np.linspace(5, 9, 5) ## using slope=1, y-intercept=5
y_actual = np.array([4.8, 6.2, 7, 8.1, 8.9]) ## test data
GLS = GeneralizedLeastSquares()
GLS.set_x(x)
GLS.set_y_true(y_actual)
GLS.set_weights(np.ones(len(x)))
GLS.set_parameters('estimate')
# GLS.set_parameters([1.2, 4.9])
GLS.set_method('residuals')
results = GLS.get_linear_fit()
print(results)
Your method is not taking an argument. It should be:
def set_x(self, x):
self.x = x
Wrapping properties in get/set methods is a very Java / outdated way of doing things. It is much easier to access the underlying property outside of your class. I.e. rather than: GLS.set_x(12), consider the more Pythonic: GLS.x = 12. This way you don't have to write a get and set method for each property.
Also, it might make more sense for the heavy lifting method of your object, get_linear_fit to be put in the __call__ method. This way, you can run the regression using by just typing GLS() rather than GLS.get_linear_fit()

AssertionError: Format for classes is `<label> file`

This is a python script for detecting features in a set of images for a SVM.
import os
import sys
import argparse
import _pickle as cPickle
import json
import cv2
import numpy as np
from sklearn.cluster import KMeans
def build_arg_parser():
parser = argparse.ArgumentParser(description='Creates features for given images')
parser.add_argument("--samples", dest="cls", nargs="+", action="append",
required=True, help="Folders containing the training images. \
The first element needs to be the class label.")
parser.add_argument("--codebook-file", dest='codebook_file', required=True,
help="Base file name to store the codebook")
parser.add_argument("--feature-map-file", dest='feature_map_file', required=True,
help="Base file name to store the feature map")
parser.add_argument("--scale-image", dest="scale", type=int, default=150,
help="Scales the longer dimension of the image down to this size.")
return parser
def load_input_map(label, input_folder):
combined_data = []
if not os.path.isdir(input_folder):
print ("The folder " + input_folder + " doesn't exist")
raise IOError
for root, dirs, files in os.walk(input_folder):
for filename in (x for x in files if x.endswith('.jpg')):
combined_data.append({'label': label, 'image': os.path.join(root, filename)})
return combined_data
class FeatureExtractor(object):
def extract_image_features(self, img):
kps = DenseDetector().detect(img)
kps, fvs = SIFTExtractor().compute(img, kps)
return fvs
def get_centroids(self, input_map, num_samples_to_fit=10):
kps_all = []
count = 0
cur_label = ''
for item in input_map:
if count >= num_samples_to_fit:
if cur_label != item['label']:
count = 0
else:
continue
count += 1
if count == num_samples_to_fit:
print ("Built centroids for", item['label'])
cur_label = item['label']
img = cv2.imread(item['image'])
img = resize_to_size(img, 150)
num_dims = 128
fvs = self.extract_image_features(img)
kps_all.extend(fvs)
kmeans, centroids = Quantizer().quantize(kps_all)
return kmeans, centroids
def get_feature_vector(self, img, kmeans, centroids):
return Quantizer().get_feature_vector(img, kmeans, centroids)
def extract_feature_map(input_map, kmeans, centroids):
feature_map = []
for item in input_map:
temp_dict = {}
temp_dict['label'] = item['label']
print ("Extracting features for", item['image'])
img = cv2.imread(item['image'])
img = resize_to_size(img, 150)
temp_dict['feature_vector'] = FeatureExtractor().get_feature_vector(
img, kmeans, centroids)
if temp_dict['feature_vector'] is not None:
feature_map.append(temp_dict)
return feature_map
class Quantizer(object):
def __init__(self, num_clusters=32):
self.num_dims = 128
self.extractor = SIFTExtractor()
self.num_clusters = num_clusters
self.num_retries = 10
def quantize(self, datapoints):
kmeans = KMeans(self.num_clusters,
n_init=max(self.num_retries, 1),
max_iter=10, tol=1.0)
res = kmeans.fit(datapoints)
centroids = res.cluster_centers_
return kmeans, centroids
def normalize(self, input_data):
sum_input = np.sum(input_data)
if sum_input > 0:
return input_data / sum_input
else:
return input_data
def get_feature_vector(self, img, kmeans, centroids):
kps = DenseDetector().detect(img)
kps, fvs = self.extractor.compute(img, kps)
labels = kmeans.predict(fvs)
fv = np.zeros(self.num_clusters)
for i, item in enumerate(fvs):
fv[labels[i]] += 1
fv_image = np.reshape(fv, ((1, fv.shape[0])))
return self.normalize(fv_image)
class DenseDetector(object):
def __init__(self, step_size=20, feature_scale=40, img_bound=20):
self.detector = cv2.xfeatures2d.SIFT_create("Dense")
self.detector.setInt("initXyStep", step_size)
self.detector.setInt("initFeatureScale", feature_scale)
self.detector.setInt("initImgBound", img_bound)
def detect(self, img):
return self.detector.detect(img)
class SIFTExtractor(object):
def compute(self, image, kps):
if image is None:
print ("Not a valid image")
raise TypeError
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
kps, des = cv2.SIFT().compute(gray_image, kps)
return kps, des
# Resize the shorter dimension to 'new_size'
# while maintaining the aspect ratio
def resize_to_size(input_image, new_size=150):
h, w = input_image.shape[0], input_image.shape[1]
ds_factor = new_size / float(h)
if w < h:
ds_factor = new_size / float(w)
new_size = (int(w * ds_factor), int(h * ds_factor))
return cv2.resize(input_image, new_size)
if __name__=='__main__':
args = build_arg_parser().parse_args()
input_map = []
for cls in args.cls:
assert len(cls) >= 2, "Format for classes is `<label> file`"
label = cls[0]
input_map += load_input_map(label, cls[1])
downsample_length = args.scale
# Building the codebook
print ("===== Building codebook =====")
kmeans, centroids = FeatureExtractor().get_centroids(input_map)
if args.codebook_file:
with open(args.codebook_file, 'w') as f:
pickle.dump((kmeans, centroids), f)
# Input data and labels
print ("===== Building feature map =====")
feature_map = extract_feature_map(input_map, kmeans, centroids)
if args.feature_map_file:
with open(args.feature_map_file, 'w') as f:
pickle.dump(feature_map, f)
I receive the following error:
Traceback (most recent call last):
File "create_features.py", line 164, in <module>
assert len(cls) >= 2, ("Format for classes is `<label> file`")
AssertionError: Format for classes is `<label> file`
Any idea of what could be wrong? I'm just following the instructions of 'OpenCV with Python by Example' of Prateek Joshi. Pages 494-526
Assertion are used to check a condition. If the condition isn't satisfied, it throes AssertionError. In your case, len(cls) >= 2 isn't satisfied. It means that len(cls) is smaller than 2. Apparently, cls is a list of arguments passed to the programm. And the first element of this list must be a label. And when you add argument (a file), you should specify a label for this file.
For example, if you choose a label name my_label, you must add file with my_label my_file.

Resources