I'm building my own 1-NN clasifier in python cause I need max speed in certain operations for testing it, because I want to use it in genetic algorithm and every milisecond its important for speed.
I'm trying to implement a leave one out test inside of my KNN class with numpy, but I obtain about 50% of success with this test. I try the scikit learn knn with the same leave one out and returns about 97% of success.
This is my KNN class:
class KNN(object):
"""Documentation for KNK-clasifier"""
def __init__(self):
super(KNN, self).__init__()
# self.args = args
def fit(self, entrenamiento, clases):
self.entrenamiento = np.asarray(entrenamiento)
self.n_examples = len(self.entrenamiento)
self.n_features = len(self.entrenamiento[1])
self.clases = np.asarray(clases)
self.createDistenceMatrix()
def createDistenceMatrix(self):
self.distances = np.zeros([len(self.entrenamiento),
len(self.entrenamiento),
len(self.entrenamiento[1])])
for i in range(self.n_examples):
for j in range(self.n_examples):
if i is not j:
self.distances[i][j] = self.distance(self.entrenamiento[i],
self.entrenamiento[j])
else:
self.distances[i][j] = np.full(len(self.entrenamiento[1]),
10000.0)
def distance(self, x, y):
return (x-y)*(x-y)
def predict(self, test, pesos=None):
dist = 100000
class_index = 0
for i in range(self.n_examples):
aux = self.distance(self.entrenamiento[i], test)
if pesos is not None:
aux = pesos*aux
if aux < dist:
dist = aux
class_index = i
return self.clases[class_index]
def leave_one_out(self, pesos=None):
# DONE: solo tengo que buscar el minimo de cada columna
dist = np.zeros(self.n_examples)
aciertos = 0
for i in range(self.n_examples):
for j in range(self.n_examples):
if pesos is not None:
dist[i] = np.linalg.norm(
np.multiply(self.distances[i][j], pesos))
else:
dist[i] = np.linalg.norm(self.distances[i][j])
if self.clases[i] == self.clases[np.argmin(dist)]:
aciertos = aciertos + 1
return 100*(aciertos/self.n_examples)
where create createDistanceMatrix precalculate all the possible x,y distances for all the features and save it to a vector. This vector will be multiplied for a weitght vector. This vector represent a feature weights learning problem that I'm trying to solve. I passed two days trying to find where the mistake is but I cand find, but my clasifier doesn't give me a decent percent of good classification in leave one out.
For sklearn knn this is the leave one out that I'm testing:
aciertos = 0
knn = neighbors.KNeighborsClassifier(n_neighbors=1)
start = time.clock()
for i in range(len(train)):
knn.fit(train[1:], cls[1:])
if knn.predict(train[0])[0] == cls[0]:
aciertos = aciertos + 1
train[0], train[-1] = train[-1], train[0]
cls[0], cls[-1] = cls[-1], cls[0]
end = time.clock()
print(str(end - start) + " segundos")
print(str(100*(aciertos/len(train))))
this same code with my own clasifier returns similar percent of succes.
I don't know if you fixed your problem but your distance looks wrong?
Here is the algorithm from Stanford cs231n:
Related
I have tried using two techniques, but I am getting different results. I just want to be sure about which one to go with.
Method 1:
I tried using
from tmtoolkit.topicmod.evaluate import metric_coherence_gensim
metric_coherence_gensim(measure='u_mass',
top_n=25,
topic_word_distrib = lda.components_,
dtm = dtm,
vocab=np.array([x for x in tfidf_vect.vocabulary_.keys()]),
return_mean = True)
The source mentioned that a decent coherence score should be between -14 to +14. Any explanation on this also helps.
Method 2:
I had to write functions to calculate the score without any in-built library.
def get_umass_score(dt_matrix, i, j):
zo_matrix = (dt_matrix > 0).astype(int)
col_i, col_j = zo_matrix[:, i], zo_matrix[:, j]
col_ij = col_i + col_j
col_ij = (col_ij == 2).astype(int)
Di, Dij = col_i.sum(), col_ij.sum()
return math.log((Dij + 1) / Di)
def get_topic_coherence(dt_matrix, topic, n_top_words):
indexed_topic = zip(topic, range(0, len(topic)))
topic_top = sorted(indexed_topic, key=lambda x: 1 - x[0])[0:n_top_words]
coherence = 0
for j_index in range(0, len(topic_top)):
for i_index in range(0, j_index - 1):
i = topic_top[i_index][1]
j = topic_top[j_index][1]
coherence += get_umass_score(dt_matrix, i, j)
return coherence
def get_average_topic_coherence(dt_matrix, topics, n_top_words):
total_coherence = 0
for i in range(0, len(topics)):
total_coherence += get_topic_coherence(dt_matrix, topics[i], n_top_words)
return total_coherence / len(topics)
I got this from a StackOverflow post. Credits to that guy who wrote this, but I was getting huge value depending on the value I pass for n_top_words.
Can someone tell me which method is reliable, or is there any better way I can find the coherence score for sklearn LDA models?
i have a code of classification desicion tree (classification), and
i'm trying to convert it to a regression tree.
i understand that i need to change the Impurity measure.
in classification i have the Gini and Entropy.
in regression i need to use SSR.
if i understand right, i need to change the information_gain function for calculating the SSR.
can someone help me understand how should i change it?
class DecisionTreeClassifier():
def __init__(self, min_samples_split=2, max_depth=2):
''' constructor '''
# the root of the tree
self.root = None
# stopping conditions
# if the num of samples became less then min sample we will stop and it will be a leaf.
# same with depth
self.min_samples_split = min_samples_split
self.max_depth = max_depth
def build_tree(self, dataset, curr_depth=0):
''' recursive function to build the tree '''
#splitting the features and target
X, Y = dataset[:,:-1], dataset[:,-1]
num_samples, num_features = np.shape(X)
# split until stopping conditions are met
if num_samples>=self.min_samples_split and curr_depth<=self.max_depth:
# find the best split
best_split = self.get_best_split(dataset, num_samples, num_features)
# check if information gain is positive, if it eq to 0 it means its pure
if best_split["info_gain"]>0:
# recursive left
left_subtree = self.build_tree(best_split["dataset_left"], curr_depth+1)
# recursive right
right_subtree = self.build_tree(best_split["dataset_right"], curr_depth+1)
# return decision node
return Node(best_split["feature_index"], best_split["threshold"],
left_subtree, right_subtree, best_split["info_gain"])
# calculate leaf node
leaf_value = self.calculate_leaf_value(Y)
# return leaf node
return Node(value=leaf_value)
def get_best_split(self, dataset, num_samples, num_features):
''' function to find the best split '''
# dictionary to store the best split
best_split = {}
#we want to maximize the and to find that we have to use a number that less then any other number
max_info_gain = -float("inf")
# loop over all the features
for feature_index in range(num_features):
feature_values = dataset[:, feature_index]
# return the unique values of particular feature
possible_thresholds = np.unique(feature_values)
# loop over all the feature values present in the data
for threshold in possible_thresholds:
# get current split
dataset_left, dataset_right = self.split(dataset, feature_index, threshold)
# check if childs are not null
if len(dataset_left)>0 and len(dataset_right)>0:
#getting the target values
y, left_y, right_y = dataset[:, -1], dataset_left[:, -1], dataset_right[:, -1]
# y = target values
# compute information gain
curr_info_gain = self.information_gain(y, left_y, right_y, "gini")
# once we get the current information gain we need the check if the currentinformation gain
#bigger then the max information gain if yes ? we need to update oyr best split
if curr_info_gain>max_info_gain:
best_split["feature_index"] = feature_index
best_split["threshold"] = threshold
best_split["dataset_left"] = dataset_left
best_split["dataset_right"] = dataset_right
best_split["info_gain"] = curr_info_gain
max_info_gain = curr_info_gain
# return best split
return best_split
def split(self, dataset, feature_index, threshold):
''' function to split the data '''
# takes the dataset and the feature index and the threshold value and split it to two parts ( left and right child)
# we will split with <> threshold
dataset_left = np.array([row for row in dataset if row[feature_index]<=threshold])
dataset_right = np.array([row for row in dataset if row[feature_index]>threshold])
return dataset_left, dataset_right
def information_gain(self, parent, l_child, r_child, mode="gini"):
''' function to compute information gain '''
# calculate the weights. child/parent
weight_l = len(l_child) / len(parent)
weight_r = len(r_child) / len(parent)
# calculate the Gini
if mode=="gini":
gain = self.gini_index(parent) - (weight_l*self.gini_index(l_child) + weight_r*self.gini_index(r_child))
else:
gain = self.entropy(parent) - (weight_l*self.entropy(l_child) + weight_r*self.entropy(r_child))
return gain
# for that home work we do not need entropy but nice to have
'''def entropy(self, y):
# function to compute entropy
class_labels = np.unique(y)
entropy = 0
for cls in class_labels:
p_cls = len(y[y == cls]) / len(y)
entropy += -p_cls * np.log2(p_cls)
return entropy'''
def gini_index(self, y):
''' function to compute gini index '''
class_labels = np.unique(y)
gini = 0
for cls in class_labels:
p_cls = len(y[y == cls]) / len(y)
gini += p_cls**2
return 1 - gini
def calculate_leaf_value(self, Y):
''' function to compute leaf node '''
# find the most occuring element in Y
Y = list(Y)
return max(Y, key=Y.count)
def print_tree(self, tree=None, indent=" "):
''' recursive function to print the tree '''
if not tree:
tree = self.root
if tree.value is not None:
print(tree.value)
else:
print("X_"+str(tree.feature_index), "<=", tree.threshold, "?", tree.info_gain)
print("%sleft:" % (indent), end="")
self.print_tree(tree.left, indent + indent)
print("%sright:" % (indent), end="")
self.print_tree(tree.right, indent + indent)
def fit(self, X, Y):
''' function to train the tree '''
dataset = np.concatenate((X, Y), axis=1)
self.root = self.build_tree(dataset)
def predict(self, X):
''' function to predict new dataset '''
preditions = [self.make_prediction(x, self.root) for x in X]
return preditions
def make_prediction(self, x, tree):
''' function to predict a single data point '''
if tree.value!=None: return tree.value
feature_val = x[tree.feature_index]
if feature_val<=tree.threshold:
return self.make_prediction(x, tree.left)
else:
return self.make_prediction(x, tree.right)
I am trying to reimplement the original GAN paper by Ian Goodfellow et al. And I need to show that my implementation achieves same or similar results as the authors achieved. But I am not sure how to evaluate this metric. I took a look at their implementation but I got some funny results. In the paper they report 225 +- 2 on the MNIST for this metric, while the results I get are bellow -400000000. I thought that maybe the model is bad, but it generates really good images of MNIST digits.
Can someone tell me what am I doing wrong?
Bellow is the code which I used. I copied the part of the code from the official implementation.
Note: valid variable are images taken from the MNIST dataset.
def get_nll(x, parzen, batch_size=10):
"""
Credit: Yann N. Dauphin
"""
inds = range(x.shape[0])
n_batches = int(numpy.ceil(float(len(inds)) / batch_size))
print("N batches:", n_batches)
times = []
nlls = []
for i in range(n_batches):
begin = time.time()
nll = parzen(x[inds[i::n_batches]])
end = time.time()
times.append(end-begin)
nlls.extend(nll)
if i % 10 == 0:
print(i, numpy.mean(times), numpy.mean(nlls))
return numpy.array(nlls)
def log_mean_exp(a):
"""
Credit: Yann N. Dauphin
"""
max_ = a.max(1)
return max_ + T.log(T.exp(a - max_.dimshuffle(0, 'x')).mean(1))
def cross_validate_sigma(samples, data, sigmas, batch_size):
lls = []
for sigma in sigmas:
print("Sigma:", sigma)
parzen = theano_parzen(samples, sigma)
tmp = get_nll(data, parzen, batch_size = batch_size)
lls.append(numpy.asarray(tmp).mean())
del parzen
gc.collect()
ind = numpy.argmax(lls)
print(max(lls))
return sigmas[ind]
noise = torch.randn((10000, 100), device=device)
gen_model.eval()
gan_out = gen_model(noise)
sigma_range = numpy.logspace(-1., 0., num=10)
sigma = cross_validate_sigma(gan_out.reshape(10000,-1), valid[0:10000], sigma_range, 100)
I am working with an imbalanced data set that has multiple observations from the same set of users. I want to make sure that I don't have the same users in both the training and test sets while still maintaining the original distribution as much as possible.
I have been trying to combine the GroupKFold and StratifiedKFold functions from Sklearn but I'm kind of at a loss how to do it. Does anyone have any ideas of how I could combine these two functions?
def stratified_group_k_fold(X, y, groups, k, seed=None):
"""Source: https://www.kaggle.com/jakubwasikowski/stratified-group-k-fold-cross-validation """
labels_num = np.max(y) + 1
y_counts_per_group = collections.defaultdict(lambda: np.zeros(labels_num))
y_distr = collections.Counter()
for label, g in zip(y, groups):
y_counts_per_group[g][label] += 1
y_distr[label] += 1
y_counts_per_fold = collections.defaultdict(lambda: np.zeros(labels_num))
groups_per_fold = collections.defaultdict(set)
def eval_y_counts_per_fold(y_counts, fold):
y_counts_per_fold[fold] += y_counts
std_per_label = []
for label in range(labels_num):
label_std = np.std([y_counts_per_fold[i][label] / y_distr[label] for i in range(k)])
std_per_label.append(label_std)
y_counts_per_fold[fold] -= y_counts
return np.mean(std_per_label)
groups_and_y_counts = list(y_counts_per_group.items())
random.Random(seed).shuffle(groups_and_y_counts)
for g, y_counts in sorted(groups_and_y_counts, key=lambda x: -np.std(x[1])):
best_fold = None
min_eval = None
for i in range(k):
fold_eval = eval_y_counts_per_fold(y_counts, i)
if min_eval is None or fold_eval < min_eval:
min_eval = fold_eval
best_fold = i
y_counts_per_fold[best_fold] += y_counts
groups_per_fold[best_fold].add(g)
all_groups = set(groups)
for i in range(k):
train_groups = all_groups - groups_per_fold[i]
test_groups = groups_per_fold[i]
train_indices = [i for i, g in enumerate(groups) if g in train_groups]
test_indices = [i for i, g in enumerate(groups) if g in test_groups]
yield train_indices, test_indices
I've never used incremental PCA which exists in sklearn and I'm a bit confused about it's parameters and not able to find a good explanation of them.
I see that there is batch_size in the constructor, but also, when using partial_fit method you can again pass only a part of your data, I've found the following way:
n = df.shape[0]
chunk_size = 100000
iterations = n//chunk_size
ipca = IncrementalPCA(n_components=40, batch_size=1000)
for i in range(0, iterations):
ipca.partial_fit(df[i*chunk_size : (i+1)*chunk_size].values)
ipca.partial_fit(df[iterations*chunk_size : n].values)
Now, what I don't understand is the following - when using partial fit, does the batch_size play any role at all, or not? And how are they related?
Moreover, if both are considered, how should I change their values properly, when wanting to increase the precision while increasing memory footprint (and the other way around, decrease the memory consumption for the price of decreased accuracy)?
The docs say:
batch_size : int or None, (default=None)
The number of samples to use for each batch. Only used when calling fit...
This param is not used within partial_fit, where the batch-size is controlled by the user.
Bigger batches will increase memory-consumption, smaller ones will decrease it.
This is also written in the docs:
This algorithm has constant memory complexity, on the order of batch_size, enabling use of np.memmap files without loading the entire file into memory.
Despite some checks and parameter-heuristics, the whole fit-function looks like this:
for batch in gen_batches(n_samples, self.batch_size_):
self.partial_fit(X[batch], check_input=False)
Here is some an incremental PCA code based on https://github.com/kevinhughes27/pyIPCA which is an implementation of CCIPCA method.
import scipy.sparse as sp
import numpy as np
from scipy import linalg as la
import scipy.sparse as sps
from sklearn import datasets
class CCIPCA:
def __init__(self, n_components, n_features, amnesic=2.0, copy=True):
self.n_components = n_components
self.n_features = n_features
self.copy = copy
self.amnesic = amnesic
self.iteration = 0
self.mean_ = None
self.components_ = None
self.mean_ = np.zeros([self.n_features], np.float)
self.components_ = np.ones((self.n_components,self.n_features)) / \
(self.n_features*self.n_components)
def partial_fit(self, u):
n = float(self.iteration)
V = self.components_
# amnesic learning params
if n <= int(self.amnesic):
w1 = float(n+2-1)/float(n+2)
w2 = float(1)/float(n+2)
else:
w1 = float(n+2-self.amnesic)/float(n+2)
w2 = float(1+self.amnesic)/float(n+2)
# update mean
self.mean_ = w1*self.mean_ + w2*u
# mean center u
u = u - self.mean_
# update components
for j in range(0,self.n_components):
if j > n: pass
elif j == n: V[j,:] = u
else:
# update the components
V[j,:] = w1*V[j,:] + w2*np.dot(u,V[j,:])*u / la.norm(V[j,:])
normedV = V[j,:] / la.norm(V[j,:])
normedV = normedV.reshape((self.n_features, 1))
u = u - np.dot(np.dot(u,normedV),normedV.T)
self.iteration += 1
self.components_ = V / la.norm(V)
return
def post_process(self):
self.explained_variance_ratio_ = np.sqrt(np.sum(self.components_**2,axis=1))
idx = np.argsort(-self.explained_variance_ratio_)
self.explained_variance_ratio_ = self.explained_variance_ratio_[idx]
self.components_ = self.components_[idx,:]
self.explained_variance_ratio_ = (self.explained_variance_ratio_ / \
self.explained_variance_ratio_.sum())
for r in range(0,self.components_.shape[0]):
d = np.sqrt(np.dot(self.components_[r,:],self.components_[r,:]))
self.components_[r,:] /= d
You can test it with
import pandas as pd, ccipca
df = pd.read_csv('iris.csv')
df = np.array(df)[:,:4].astype(float)
pca = ccipca.CCIPCA(n_components=2,n_features=4)
S = 10
print df[0, :]
for i in range(150): pca.partial_fit(df[i, :])
pca.post_process()
The resulting eigenvectors / values will not exaactly be the same as the batch PCA. Results are approximate, but they are useful.