I have two lists of numpy vectors and wish to determine whether they represent approximately the same points (but possibly in a different order).
I've found methods such as numpy.testing.assert_allclose but it doesn't allow for possibly different orders. I have also found unittest.TestCase.assertCountEqual but that doesn't work with numpy arrays!
What is my best approach?
import unittest
import numpy as np
first = [np.array([20, 40]), np.array([20, 60])]
second = [np.array([19.8, 59.7]), np.array([20.1, 40.5])]
np.testing.assert_all_close(first, second, atol=2) # Fails because the orders are different
unittest.TestCase.assertCountEqual(None, first, second) # Fails because numpy comparisons evaluate element-wise; and because it doesn't allow a tolerance
A nice list iteration approach
In [1047]: res = []
In [1048]: for i in first:
...: for j in second:
...: diff = np.abs(i-j)
...: if np.all(diff<2):
...: res.append((i,j))
In [1049]: res
Out[1049]:
[(array([20, 40]), array([ 20.1, 40.5])),
(array([20, 60]), array([ 19.8, 59.7]))]
Length of res is the number of matches.
Or as list comprehension:
def match(i,j):
diff = np.abs(i-j)
return np.all(diff<2)
In [1051]: [(i,j) for i in first for j in second if match(i,j)]
Out[1051]:
[(array([20, 40]), array([ 20.1, 40.5])),
(array([20, 60]), array([ 19.8, 59.7]))]
or with the existing array test:
[(i,j) for i in first for j in second if np.allclose(i,j, atol=2)]
Here you are :)
( idea based on
Euclidean distance between points in two different Numpy arrays, not within )
import numpy as np
import scipy.spatial
first = [np.array([20 , 60 ]), np.array([ 20, 40])]
second = [np.array([19.8, 59.7]), np.array([20.1, 40.5])]
def pointsProximityCheck(firstListOfPoints, secondListOfPoints, distanceTolerance):
pointIndex = 0
maxDistance = 0
lstIndices = []
for item in scipy.spatial.distance.cdist( firstListOfPoints, secondListOfPoints ):
currMinDist = min(item)
if currMinDist > maxDistance:
maxDistance = currMinDist
if currMinDist < distanceTolerance :
pass
else:
lstIndices.append(pointIndex)
# print("point with pointIndex [", pointIndex, "] in the first list outside of Tolerance")
pointIndex+=1
return (maxDistance, lstIndices)
maxDistance, lstIndicesOfPointsOutOfTolerance = pointsProximityCheck(first, second, distanceTolerance=0.5)
print("maxDistance:", maxDistance, "indicesOfOutOfTolerancePoints", lstIndicesOfPointsOutOfTolerance )
gives on output with distanceTolerance=0.5 :
maxDistance: 0.509901951359 indicesOfOutOfTolerancePoints [1]
but possibly in a different order
This is the key requirement. This problem can be treat as a classic problem in graph theory - finding perfect matching in unweighted bipartite graph. Hungarian Algorithm is a classic algo to solve this problem.
Here I implemented one.
import numpy as np
def is_matched(first, second):
checked = np.empty((len(first),), dtype=bool)
first_matching = [-1] * len(first)
second_matching = [-1] * len(second)
def find(i):
for j, point in enumerate(second):
if np.allclose(first[i], point, atol=2):
if not checked[j]:
checked[j] = True
if second_matching[j] == -1 or find(second_matching[j]):
second_matching[j] = i
first_matching[i] = j
return True
def get_max_matching():
count = 0
for i in range(len(first)):
if first_matching[i] == -1:
checked.fill(False)
if find(i):
count += 1
return count
return len(first) == len(second) and get_max_matching() == len(first)
first = [np.array([20, 40]), np.array([20, 60])]
second = [np.array([19.8, 59.7]), np.array([20.1, 40.5])]
print(is_matched(first, second))
# True
first = [np.array([20, 40]), np.array([20, 60])]
second = [np.array([19.8, 59.7]), np.array([20.1, 43.5])]
print(is_matched(first, second))
# False
Related
I am trying to rule out a possible astrology effect on populations as a statistically insignificant effect but to no avail. I am using Pearson's Chi Square test on two distributions of sun signs from two different populations one of astronaut pilots and the other one of celebrities. Something must be wrong but I failed to find it, probably on the statistics side.
import numpy as np
import pandas as pd
import ephem
from collections import Counter, namedtuple
import matplotlib.pyplot as plt
from scipy import stats
models = pd.read_csv('models.csv', delimiter=',')
astronauts = pd.read_csv('astronauts.csv', delimiter=',')
models = models.sample(229)
astronauts = astronauts.sample(229)
sun = ephem.Sun()
def get_planet_constellation(planet, dataset):
person_planet_constellation = []
for person in dataset['Birth Date']:
planet.compute(person)
person_planet_constellation += [ephem.constellation(planet)[1]]
return person_planet_constellation
def plot_bar_group(planet, data1, data2):
fig, ax = plt.subplots()
plt.bar(data1.keys(), data1.values(), alpha=0.5)
plt.bar(data2.keys(), data2.values(), alpha=0.5)
plt.legend(['astronauts', 'models'])
ylabel = 'Percentages of ' + planet.name + ' in constellation'
ax.set_ylabel(ylabel)
title = 'Histogram of ' + planet.name + ' in constellation by group'
ax.set_title(title)
plt.show()
astronaut_sun_constellation = Counter(
get_planet_constellation(sun, astronauts))
model_sun_constellation = Counter(get_planet_constellation(sun, models))
plot_bar_group(sun, astronaut_sun_constellation, model_sun_constellation)
a = list(astronaut_sun_constellation.values())
b = list(model_sun_constellation.values())
s = np.array([a, b])
stat, p, dof, expected = stats.chi2_contingency(s)
print(stat, p, dof, expected)
prob = 0.95
critical = stats.chi2.ppf(prob, dof)
if abs(stat) >= critical:
print('Dependent (reject H0)')
else:
print('Independent (fail to reject H0)')
# interpret p-value
alpha = 1.0 - prob
if p <= alpha:
print('Dependent (reject H0)')
else:
print('Independent (fail to reject H0)')
https://www.dropbox.com/s/w7rye6m5lbihjlh/astronauts.csv
https://www.dropbox.com/s/xlxanr0pxqtxcvv/models.csv
I have eventually found the bug, it was on passing the counter as a list to the chisquare function, it must be sorted first, otherwise chisquare sees a major difference in the counters values. All astrology effects now are insignificant as expected at the level of 0.95
I have defined my function (the partition function partfunc_E, a function of ionisation energy chiI and a temperature T) and have to create a nested for loop to first, loop over the values of ionisation energies (chiI, a vector) and an inner loop to evaluate the sum over all energy states for the ion (here I am using 5). The output should be a vector of partition function values associated with each ionisation energy in the chiI array.
(From our instructions): For an energy of 10,000K, using the ionisation energies of calcium and a temperature of 10,000K should give something like:
[ 1.45605581 1.45648718 1.45648849 1.45648849 1.45648849]
Note: g=1 for all states.
I don't know what I have done wrong but I am not getting what they expect at all, I am getting an array of length 100 and all the values in the array are the same. The following code is my attempt.
import matplotlib.pylab as plt
import numpy as np
from astropy import units as u
from astropy import constants as const
%matplotlib inline
k = const.k_B.to('eV*K**-1')
#print(k)
def partfunc_E(chiI,T):
return g*np.exp(-(chiI/(k*T)))
chiI = [6.1131554, 11.871719, 50.91316, 67.2732, 84.34]*u.eV
T=(10000)*u.K
g= 1
partition_Ca = []
for i in chiI:
for j in range(0,10,1):
function = partfunc_E(chiI,T)
partition_Ca.append(sum(partfunc_E(chiI,T)))
You are doing something like this:
In [336]: x = np.arange(3)
...: alist = []
...: for i in x:
...: for j in range(2):
...: alist.append(np.exp(x).sum())
...:
In [337]: alist
Out[337]:
[11.107337927389695,
11.107337927389695,
11.107337927389695,
11.107337927389695,
11.107337927389695,
11.107337927389695]
this puts the same value into alist each time. The looping does nothing new:
In [338]: np.exp(x).sum()
Out[338]: 11.107337927389695
You could instead use i, the iteration variable in the loop:
In [339]: x = np.arange(3)
...: alist = []
...: for i in x:
...: alist.append(np.exp(i))
...:
In [340]: alist
Out[340]: [1.0, 2.718281828459045, 7.38905609893065]
But since np.exp works with an array, there's no need to iterate.
In [341]: np.exp(x)
Out[341]: array([1. , 2.71828183, 7.3890561 ])
As I demonstrate, you need to experiment with some basic Python loops, and check the results in detail. Don't write a big script and then scratch your head when the results aren't right. Test pieces, and make sure you understand each step.
How do I generate random matrices and get them multiplied in an efficient way.
This is what I've done:
`mat1 = []
for i in range(0, order):
num1 = random.sample(range(1,10), order)
print(num1)
mat1.append(num1)
print()
print("Result of Matrix Multiplication.")
for p in range(len(mat1)):
for q in range(len(mat2[0])):
for r in range(len(mat2)):
res_matrix[p][q] += mat1[p][r] * mat2[r][q]
for res in res_matrix:
print(res)`
You can use list comprehension to generate res_matrix using
res_matrix = [[0 for i in range(order)] for j in range(order)]
Also, have you heard of numpy? It does this kind of computations (and many more) in an easy and very fast way. This is what your code would become with numpy
import numpy as np
print("Generate 1st Matrix")
mat1 = np.random.randint(1, 10, size=(order, order))
print(mat1)
print("Generate 2nd Matrix")
mat2 = np.random.randint(1, 10, size=(order, order))
print(mat2)
res_matrix = mat1.dot(mat2)
print("Result of Matrix Multiplication.")
print(res_matrix)
I have used following codes.
from collections import defaultdict
from random import randint, randrange,choice, shuffle
def random_array(low, high, step, size):
lst = []
while len(lst)<size:
nexts = randrange(low, high, step)
if nexts in lst:continue
lst.append(nexts)
return lst
def find_pair_from_two_list(a, b, val):
b_dict = defaultdict(int)
for i,v in enumerate(b): b_dict[v] = i
for v in a:
if (val - v) in b_dict:
return v, val-v
return -1, -1
arr1 = random_array(1, 100, 1, 99)
arr2 = random_array(1, 100, 1, 99)
val1 = choice(arr1)
val2 = choice(arr2)
val = val1 + val2
print(find_pair_from_two_list(arr1,arr2, val))
However if i change size value in
arr1 = random_array(1, 100, 1, 99)
arr2 = random_array(1, 100, 1, 99)
upto 99 it works instantly but if i change any of the size value to 100 or more it just seems to hang in there.
I am curious to know why this is happening.I mean it works well till 99 but what causes it to hang for even 100.
Why is yours slow:
Using arr1 = random_array(1, 100, 1, 100) your method can take lots of time to draw the last missing numbers because you draw new random values over and over and discard them when they are already inside your resultlist:
while len(lst)<size:
nexts = randrange(low, high, step)
if nexts in lst:continue # discards already inside numbers
lst.append(nexts)
return lst
With inputs like this you essentially draw "all" possible numbers until done and the more your result contains the longer it takes to draw another "fitting" one.
You can even produce endless loops if your range(low,high,steps) has less total values then your size demands.
(1,100,5,100) # => only 20 in this range with this stepper -> endless loop
Possible simplification (not optimal)
You could simplyfy and speedup the code by:
import random
def random_array(low, high, step, size):
poss = list(range(low,high,step)) # this does not contain duplicates
random.shuffle(poss) # shuffle it
return poss[:size] # return size (or all) elements from it
print(random_array(1,100,1,10))
This code will return if you specify "wrong" combinations to it, but the resulting list is then shorter as whatever you specified as size.
Even better
jonsharpes suggestion to use
random.sample(range(low,high,step),size)
like so:
def ra(low,high,step,size):
return random.sample(range(low,high,step),size)
Performance test
Performancewise they the random.sample outperforms mine for big lists easily:
import random
def random_array(low, high, step, size):
poss = list(range(low,high,step))
random.shuffle(poss)
return poss[:size]
def ra(low,high,step,size):
return random.sample(range(low,high,step),size)
import timeit
if __name__ == '__main__':
import timeit
# create 100 times 495 randoms of range (1,1000000,22)
print(timeit.timeit("ra(1,1000000,22,495)", setup="from __main__ import ra",number = 10000))
print(timeit.timeit("random_array(1,1000000,22,495)", setup="from __main__ import random_array",number = 10000))
Output:
1.1825043768664596 # random.sample(...) of range(...)
92.12594874871951 # mine
Reason probably being I create actual lists from ranges, random.sample uses ranges with iterators smartly...
Doku:
https://docs.python.org/3.1/library/random.html
https://docs.python.org/3/library/timeit.html
I have the working code below.
from matplotlib import pyplot as plt
import numpy as np
from matplotlib_venn import venn3, venn3_circles
Gastric_tumor_promoters = set(['DPEP1', 'CDC42BPA', 'GNG4', 'RAPGEFL1', 'MYH7B', 'SLC13A3', 'PHACTR3', 'SMPX', 'NELL2', 'PNMAL1', 'KRT23', 'PCP4', 'LOX', 'CDC42BPA'])
Ovarian_tumor_promoters = set(['ABLIM1','CDC42BPA','VSNL1','LOX','PCP4','SLC13A3'])
Gastric_tumor_suppressors = set(['PLCB4', 'VSNL1', 'TOX3', 'VAV3'])
#Ovarian_tumor_suppressors = set(['VAV3', 'FREM2', 'MYH7B', 'RAPGEFL1', 'SMPX', 'TOX3'])
venn3([Gastric_tumor_promoters,Ovarian_tumor_promoters, Gastric_tumor_suppressors], ('GCPromoters', 'OCPromoters', 'GCSuppressors'))
venn3([Gastric_tumor_promoters,Ovarian_tumor_promoters, Gastric_tumor_suppressors], ('GCPromoters', 'OCPromoters', 'GCSuppressors'))
plt.show()
How can I show the contents of each of the set in these 3 circles? With the color alpha being 0.6. Circles must be bigger to accommodate all the symbols.
I'm not sure there is a simple way to do this automatically for any possible combination of sets. If you're ready to do some manual tuning in your particular example, start with something like that:
A = set(['DPEP1', 'CDC42BPA', 'GNG4', 'RAPGEFL1', 'MYH7B', 'SLC13A3', 'PHACTR3', 'SMPX', 'NELL2', 'PNMAL1', 'KRT23', 'PCP4', 'LOX', 'CDC42BPA'])
B = set(['ABLIM1','CDC42BPA','VSNL1','LOX','PCP4','SLC13A3'])
C = set(['PLCB4', 'VSNL1', 'TOX3', 'VAV3'])
v = venn3([A,B,C], ('GCPromoters', 'OCPromoters', 'GCSuppressors'))
v.get_label_by_id('100').set_text('\n'.join(A-B-C))
v.get_label_by_id('110').set_text('\n'.join(A&B-C))
v.get_label_by_id('011').set_text('\n'.join(B&C-A))
v.get_label_by_id('001').set_text('\n'.join(C-A-B))
v.get_label_by_id('010').set_text('')
plt.annotate(',\n'.join(B-A-C), xy=v.get_label_by_id('010').get_position() +
np.array([0, 0.2]), xytext=(-20,40), ha='center',
textcoords='offset points',
bbox=dict(boxstyle='round,pad=0.5', fc='gray', alpha=0.1),
arrowprops=dict(arrowstyle='->',
connectionstyle='arc',color='gray'))
Note that methods like v.get_label_by_id('001') return the matplotlib Text objects, and you are free to configure them to your liking (e.g. you can change font size by calling set_fontsize(8), etc).
Here is an example which automates the whole thing. It creates a temporary dictionary which contains the id's needed by venn as keys and the intersections of all participating sets for this very id.
If you don't want the labels sorted remove the sorted() call in the second last line.
import math
from matplotlib import pyplot as plt
from matplotlib_venn import venn2, venn3
import numpy as np
# Convert number to indices into binary
# e.g. 5 -> '101' > [2, 0]
def bits2indices(b):
l = []
if b == 0:
return l
for i in reversed(range(0, int(math.log(b, 2)) + 1)):
if b & (1 << i):
l.append(i)
return l
# Make dictionary containing venn id's and set intersections
# e.g. d = {'100': {'c', 'b', 'a'}, '010': {'c', 'd', 'e'}, ... }
def set2dict(s):
d = {}
for i in range(1, 2**len(s)):
# Make venn id strings
key = bin(i)[2:].zfill(len(s))
key = key[::-1]
ind = bits2indices(i)
# Get the participating sets for this id
participating_sets = [s[x] for x in ind]
# Get the intersections of those sets
inter = set.intersection(*participating_sets)
d[key] = inter
return d
# Define some sets
a = set(['a', 'b', 'c'])
b = set(['c', 'd', 'e'])
c = set(['e', 'f', 'a'])
s = [a, b, c]
# Create dictionary from sets
d = set2dict(s)
# Plot it
h = venn3(s, ('A', 'B', 'C'))
for k, v in d.items():
l = h.get_label_by_id(k)
if l:
l.set_text('\n'.join(sorted(v)))
plt.show()
/edit
I'm sorry I just figured out that the above code does not remove duplicate labels and is therefor wrong. The number of elements shown by venn and the number of labels was different. Here is a new version which removes wrong duplicates from other intersections. I guess there is a smarter and more functional way to do that instead of iterating over all intersections twice...
import math, itertools
from matplotlib import pyplot as plt
from matplotlib_venn import venn2, venn3
import numpy as np
# Generate list index for itertools combinations
def gen_index(n):
x = -1
while True:
while True:
x = x + 1
if bin(x).count('1') == n:
break
yield x
# Generate all combinations of intersections
def make_intersections(sets):
l = [None] * 2**len(sets)
for i in range(1, len(sets) + 1):
ind = gen_index(i)
for subset in itertools.combinations(sets, i):
inter = set.intersection(*subset)
l[next(ind)] = inter
return l
# Get weird reversed binary string id for venn
def number2venn_id(x, n_fill):
id = bin(x)[2:].zfill(n_fill)
id = id[::-1]
return id
# Iterate over all combinations and remove duplicates from intersections with
# more sets
def sets2dict(sets):
l = make_intersections(sets)
d = {}
for i in range(1, len(l)):
d[number2venn_id(i, len(sets))] = l[i]
for j in range(1, len(l)):
if bin(j).count('1') < bin(i).count('1'):
l[j] = l[j] - l[i]
d[number2venn_id(j, len(sets))] = l[j] - l[i]
return d
# Define some sets
a = set(['a', 'b', 'c', 'f'])
b = set(['c', 'd', 'e'])
c = set(['e', 'f', 'a'])
sets = [a, b, c]
d = sets2dict(sets)
# Plot it
h = venn3(sets, ('A', 'B', 'C'))
for k, v in d.items():
l = h.get_label_by_id(k)
if l:
l.set_fontsize(12)
l.set_text('\n'.join(sorted(v)))
# Original for comparison
f = plt.figure(2)
venn3(sets, ('A', 'B', 'C'))
plt.show()
Thanks for the automation, #Vinci! I wonder if you (or somebody else) has written a version that rearranges the content so that the elements stay within the bubble(s) in a random fashion instead of a long list? ... bonus track: re-dimensioning the bubbles if the elements do not fit? ;)