Initial question
I want to calculate the Levenshtein distance between multiple strings, one in a series, the other in a list. I tried my hands on map, zip, etc., but I only got the desired result using a for loop and apply. Is there a way to improve style and especially speed?
Here is what I tried and it does what it is supposed to do, but lacks of speed given a large series.
import stringdist
strings = ['Hello', 'my', 'Friend', 'I', 'am']
s = pd.Series(data=strings, index=strings)
c = ['me', 'mine', 'Friend']
df = pd.DataFrame()
for w in c:
df[w] = s.apply(lambda x: stringdist.levenshtein(x, w))
## Result: ##
me mine Friend
Hello 4 5 6
my 1 3 6
Friend 5 4 0
I 2 4 6
am 2 4 6
Solution
Thanks to #Dames and #molybdenum42, I can provide the solution I used, directly beneath the question. For more insights, please check their great answers below.
import stringdist
from itertools import product
strings = ['Hello', 'my', 'Friend', 'I', 'am']
s = pd.Series(data=strings, index=strings)
c = ['me', 'mine', 'Friend']
word_combinations = np.array(list(product(s.values, c)))
vectorized_levenshtein = np.vectorize(stringdist.levenshtein)
result = vectorized_levenshtein(word_combinations[:, 0],
word_combinations[:, 1])
result = result.reshape((len(s), len(c)))
df = pd.DataFrame(result, columns=c, index=s)
This results in the desired data frame.
Setup:
import stringdist
import pandas as pd
import numpy as np
import itertools
s = pd.Series(data=['Hello', 'my', 'Friend'],
index=['Hello', 'my', 'Friend'])
c = ['me', 'mine', 'Friend']
Options
option: an easy one-liner
df = pd.DataFrame([s.apply(lambda x: stringdist.levenshtein(x, w)) for w in c])
option: np.fromfunction (thanks to #baccandr)
#np.vectorize
def lavdist(a, b):
return stringdist.levenshtein(c[a], s[b])
df = pd.DataFrame(np.fromfunction(lavdist, (len(c), len(s)), dtype = int),
columns=c, index=s)
option: see #molybdenum42
word_combinations = np.array(list(itertools.product(s.values, c)))
vectorized_levenshtein = np.vectorize(stringdist.levenshtein)
result = vectorized_levenshtein(word_combinations[:,0], word_combinations[:,1])
df = pd.DataFrame([word_combinations[:,1], word_combinations[:,1], result])
df = df.set_index([0,1])[2].unstack()
(the best) option: modified option 3
word_combinations = np.array(list(itertools.product(s.values, c)))
vectorized_levenshtein = np.vectorize(distance)
result = vectorized_levenshtein(word_combinations[:,0], word_combinations[:,1])
result = result.reshape((len(s), len(c)))
df = pd.DataFrame(result, columns=c, index=s)
Performance testing:
import timeit
from Levenshtein import distance
import pandas as pd
import numpy as np
import itertools
s = pd.Series(data=['Hello', 'my', 'Friend'],
index=['Hello', 'my', 'Friend'])
c = ['me', 'mine', 'Friend']
test_code0 = """
df = pd.DataFrame()
for w in c:
df[w] = s.apply(lambda x: distance(x, w))
"""
test_code1 = """
df = pd.DataFrame({w:s.apply(lambda x: distance(x, w)) for w in c})
"""
test_code2 = """
#np.vectorize
def lavdist(a, b):
return distance(c[a], s[b])
df = pd.DataFrame(np.fromfunction(lavdist, (len(c), len(s)), dtype = int),
columns=c, index=s)
"""
test_code3 = """
word_combinations = np.array(list(itertools.product(s.values, c)))
vectorized_levenshtein = np.vectorize(distance)
result = vectorized_levenshtein(word_combinations[:,0], word_combinations[:,1])
df = pd.DataFrame([word_combinations[:,1], word_combinations[:,1], result])
df = df.set_index([0,1])[2] #.unstack() produces error
"""
test_code4 = """
word_combinations = np.array(list(itertools.product(s.values, c)))
vectorized_levenshtein = np.vectorize(distance)
result = vectorized_levenshtein(word_combinations[:,0], word_combinations[:,1])
result = result.reshape((len(s), len(c)))
df = pd.DataFrame(result, columns=c, index=s)
"""
test_setup = "from __main__ import distance, s, c, pd, np, itertools"
print("test0", timeit.timeit(test_code0, number = 1000, setup = test_setup))
print("test1", timeit.timeit(test_code1, number = 1000, setup = test_setup))
print("test2", timeit.timeit(test_code2, number = 1000, setup = test_setup))
print("test3", timeit.timeit(test_code3, number = 1000, setup = test_setup))
print("test4", timeit.timeit(test_code4, number = 1000, setup = test_setup))
Results
# results
# test0 1.3671939949999796
# test1 0.5982696900009614
# test2 0.3246431229999871
# test3 2.0100400850005826
# test4 0.23796007100099814
Using itertools, you can at least get all the required combinations. Using a vectorized version of stringcount.levenshtein (made using numpy.vectorize()) you can then get your desired result without looping at all, though I haven't tested the performance of the vectorized levenshtein function.
The code could look something like this:
import stringdist
import numpy as np
import pandas as pd
import itertools
s = pd.Series(["Hello", "my","Friend"])
c = ['me', 'mine', 'Friend']
word_combinations = np.array(list(itertools.product(s.values, c)))
vectorized_levenshtein = np.vectorize(stringdist.levenshtein)
result = vectorized_levenshtein(word_combinations[:,0], word_combinations[:,1])
At this point you have the results in a numpy array, each corresponding to one of all the possible combinations of your two intial arrays. If you want to get it into the shape you have in your example, there's some pandas trickery to be done:
df = pd.DataFrame([word_combinations[:,0], word_combinations[:,1], result]).T
### initially looks like: ###
# 0 1 2
# 0 Hello me 4
# 1 Hello mine 5
# 2 Hello Friend 6
# 3 my me 1
# 4 my mine 3
# 5 my Friend 6
# 6 Friend me 5
# 7 Friend mine 4
# 8 Friend Friend 0
df = df.set_index([0,1])[2].unstack()
### Now looks like: ###
# Friend Hello my
# Friend 0 6 6
# me 5 4 1
# mine 4 5 3
Again, I haven't tested the performance of this method, so I recommend checking that out - it should be faster than iteration though.
EDIT:
User #Dames has a better suggestion for making the result all pretty-like:
result = result.reshape(len(c), len(s))
df = pd.DataFrame(result, columns=c, index=s)
Related
I have this weird Pandas problem, when I use the apply function using values from a data frame, it only gets applied to the first row:
import pandas as pd
# main data frame - to be edited
headerData = [['dataA', 'dataB']]
valuesData = [[10, 20], [10, 20]]
dfData = pd.DataFrame(valuesData, columns = headerData)
dfData.to_csv('MainData.csv', index=False)
readMainDataCSV = pd.read_csv('MainData.csv')
print(readMainDataCSV)
#variable data frame - pull values from this to edit main data frame
headerVariables = [['varA', 'varB']]
valuesVariables = [[2, 10]]
dfVariables = pd.DataFrame(valuesVariables, columns = headerVariables)
dfVariables.to_csv('Variables.csv', index=False)
readVariablesCSV = pd.read_csv('Variables.csv')
readVarA = readVariablesCSV['varA']
readVarB = readVariablesCSV['varB']
def formula(x):
return (x / readVarA) * readVarB
dfFormulaApplied = readMainDataCSV.apply(lambda x: formula(x))
print('\n', dfFormulaApplied)
Output:
dataA dataB
0 50.0 100.0
1 NaN NaN
But when I just use regular variables (not being called from a data frame), it functions just fine:
import pandas as pd
# main data frame - to be edited
headerData = [['dataA', 'dataB']]
valuesData = [[10, 20], [20, 40]]
dfData = pd.DataFrame(valuesData, columns = headerData)
dfData.to_csv('MainData.csv', index=False)
readMainDataCSV = pd.read_csv('MainData.csv')
print(readMainDataCSV)
# variables
readVarA = 2
readVarB = 10
def formula(x):
return (x / readVarA) * readVarB
dfFormulaApplied = readMainDataCSV.apply(lambda x: formula(x))
print('\n', dfFormulaApplied)
Output:
dataA dataB
0 50.0 100.0
1 100.0 200.0
Help please I'm pulling my hair out.
If you take readVarA and readVarB from the dataframe by selecting the column it is a pandas Series with an index, which gives a problem in the calculation (dividing a series by another series with a different index doesn't work).
You can take the first value from the series to get the value like this:
def formula(x):
return (x / readVarA[0]) * readVarB[0]
I am trying to understand how Kalman Filter for non-linear system works. While searching for an example, I cam across this good basic example.
import numpy as np
import pylab as pl
import pandas as pd
from pykalman import UnscentedKalmanFilter
# initialize parameters
def transition_function(state, noise):
a = np.sin(state[0]) + state[1] * noise[0]
b = state[1] + noise[1]
return np.array([a, b])
def observation_function(state, noise):
C = np.array([[-1, 0.5], [0.2, 0.1]])
return np.dot(C, state) + noise
transition_covariance = np.eye(2)
random_state = np.random.RandomState(0)
observation_covariance = np.eye(2) + random_state.randn(2, 2) * 0.1
initial_state_mean = [0, 0]
initial_state_covariance = [[1, 0.1], [-0.1, 1]]
# sample from model
kf = UnscentedKalmanFilter(
transition_function, observation_function,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance,
random_state=random_state
)
states, observations = kf.sample(50, initial_state_mean)
# estimate state with filtering and smoothing
filtered_state_estimates = kf.filter(observations)[0]
smoothed_state_estimates = kf.smooth(observations)[0]
# draw estimates
pl.figure()
lines_true = pl.plot(states, color='b')
lines_filt = pl.plot(filtered_state_estimates, color='r', ls='-')
lines_smooth = pl.plot(smoothed_state_estimates, color='g', ls='-.')
pl.legend((lines_true[0], lines_filt[0], lines_smooth[0]),
('true', 'filt', 'smooth'),
loc='lower left'
)
pl.show()
This code produces the following graph.
However,for my experiment - I have created a very small time series data ready with three columns formatted as follows. The full dataset is attached here for reproduciability.
time X Y
0.040662 1.041667 1
0.139757 1.760417 2
0.144357 1.190104 1
0.145341 1.047526 1
0.145401 1.011882 1
0.148465 1.002970 1
.... ..... .
Instead of using the random values as shown in the code, how can we input from the CSV file I attached? Here is my approach, but it doesn't seem to workout for me and I would appreciate for any help.
df = pd.read_csv('testdata.csv')
pd.set_option('use_inf_as_null', True)
df.dropna(inplace=True)
X = df.drop('Y', axis=1)
y = df['Y']
d1= np.array(X)
d2 = np.array(y)
From the link I shared, here is how you get the CSV data into Numpy Arrays.
import numpy as np
import csv
with open('testdata.csv','r') as csvfile:
r = csv.reader(csvfile, delimiter=',')
data = [i for i in r]
headings = data.pop(0)
data = np.array([[np.float(j) for j in i] for i in data])
T = data.T[0] #Time
X = data.T[1] #X
Y = data.T[2] #Y
print(T)
print(X)
print(Y)
The problem statement:
An unnamed tourist got lost in New York. All he has is a map of M
metro stations, which shows the coordinates of the stations and his
own coordinates, which he saw on the nearby pointer. The tourist is
not sure that each of the stations is open, therefore, just in case,
he is looking for the nearest N stations. The tourist moves
through New York City like every New Yorker (Distance of city
quarters). Help the tourist to find these stations.
Sample input
5 2
А 1 2
B 4.5 1.2
C 100500 100500
D 100501 100501
E 100502 100502
1 1
Sample output
A B
My code:
import scipy.spatial.distance as d
import math
#finds N nearest metro stations in relation to the tourist
def find_shortest_N(distance_list, name_list, number_of_stations):
result = []
for num in range(0, number_of_stations):
min_val_index = distance_list.index(min(distance_list))
result.append(name_list[min_val_index])
distance_list.pop(min_val_index)
name_list.pop(min_val_index)
return result
#returns a list with distances between touri and stations
def calculate_nearest(list_of_coords, tourist_coords):
distances = []
for metro_coords in list_of_coords:
distances.append(math.fabs(d.cityblock(metro_coords, tourist_coords)))
return distances
station_coords = []
station_names = []
input_stations = input("Input a number of stations: ").split()
input_stations = list(map(int, input_stations))
#all station coordinates and their names
station_M = input_stations[0]
#number of stations a tourist wants to visit
stations_wanted_N = input_stations[1]
#distribute the station names in station_names list
#and the coordinates in station_coords list
for data in range(0, station_M):
str_input = input()
list_input = str_input.split()
station_names.append(list_input[0])
list_input.pop(0)
list_input = list(map(float, list_input))
station_coords.append(list_input)
tourist_coordinates = input("Enter tourist position: ").split()
tourist_coordinates = list(map(float, tourist_coordinates))
distance_values = calculate_nearest(station_coords, tourist_coordinates)
result = find_shortest_N(distance_values, station_names, stations_wanted_N)
for name in result:
print(name, end=" ")
You could also, for example, directly use the cdist function:
import numpy as np
from scipy.spatial.distance import cdist
sample_input = '''
5 2
А 1 2
B 4.5 1.2
C 100500 100500
D 100501 100501
E 100502 100502
1 1
'''
# Parsing the input data:
sample_data = [line.split()
for line in sample_input.strip().split('\n')]
tourist_coords = np.array(sample_data.pop(), dtype=float) # takes the last line
nbr_stations, nbr_wanted = [int(n) for n in sample_data.pop(0)] # takes the first line
stations_coords = np.array([line[1:] for line in sample_data], dtype=float)
stations_names = [line[0] for line in sample_data]
# Computing the distances:
tourist_coords = tourist_coords.reshape(1, 2) # have to be a 2D array
distance = cdist(stations_coords, tourist_coords, metric='cityblock')
# Sorting the distances:
sorted_distance = sorted(zip(stations_names, distance), key=lambda x:x[1])
# Result:
result = [name for name, dist in sorted_distance[:nbr_wanted]]
print(result)
Use scipy.spatial.KDTree
from scipy.spatial import KDTree
subway_tree = KDTree(stations_coords)
dist, idx = subway_tree.query(tourist_coords, nbr_wanted, p = 1)
nearest_stations = station_names[idx]
I have the working code below.
from matplotlib import pyplot as plt
import numpy as np
from matplotlib_venn import venn3, venn3_circles
Gastric_tumor_promoters = set(['DPEP1', 'CDC42BPA', 'GNG4', 'RAPGEFL1', 'MYH7B', 'SLC13A3', 'PHACTR3', 'SMPX', 'NELL2', 'PNMAL1', 'KRT23', 'PCP4', 'LOX', 'CDC42BPA'])
Ovarian_tumor_promoters = set(['ABLIM1','CDC42BPA','VSNL1','LOX','PCP4','SLC13A3'])
Gastric_tumor_suppressors = set(['PLCB4', 'VSNL1', 'TOX3', 'VAV3'])
#Ovarian_tumor_suppressors = set(['VAV3', 'FREM2', 'MYH7B', 'RAPGEFL1', 'SMPX', 'TOX3'])
venn3([Gastric_tumor_promoters,Ovarian_tumor_promoters, Gastric_tumor_suppressors], ('GCPromoters', 'OCPromoters', 'GCSuppressors'))
venn3([Gastric_tumor_promoters,Ovarian_tumor_promoters, Gastric_tumor_suppressors], ('GCPromoters', 'OCPromoters', 'GCSuppressors'))
plt.show()
How can I show the contents of each of the set in these 3 circles? With the color alpha being 0.6. Circles must be bigger to accommodate all the symbols.
I'm not sure there is a simple way to do this automatically for any possible combination of sets. If you're ready to do some manual tuning in your particular example, start with something like that:
A = set(['DPEP1', 'CDC42BPA', 'GNG4', 'RAPGEFL1', 'MYH7B', 'SLC13A3', 'PHACTR3', 'SMPX', 'NELL2', 'PNMAL1', 'KRT23', 'PCP4', 'LOX', 'CDC42BPA'])
B = set(['ABLIM1','CDC42BPA','VSNL1','LOX','PCP4','SLC13A3'])
C = set(['PLCB4', 'VSNL1', 'TOX3', 'VAV3'])
v = venn3([A,B,C], ('GCPromoters', 'OCPromoters', 'GCSuppressors'))
v.get_label_by_id('100').set_text('\n'.join(A-B-C))
v.get_label_by_id('110').set_text('\n'.join(A&B-C))
v.get_label_by_id('011').set_text('\n'.join(B&C-A))
v.get_label_by_id('001').set_text('\n'.join(C-A-B))
v.get_label_by_id('010').set_text('')
plt.annotate(',\n'.join(B-A-C), xy=v.get_label_by_id('010').get_position() +
np.array([0, 0.2]), xytext=(-20,40), ha='center',
textcoords='offset points',
bbox=dict(boxstyle='round,pad=0.5', fc='gray', alpha=0.1),
arrowprops=dict(arrowstyle='->',
connectionstyle='arc',color='gray'))
Note that methods like v.get_label_by_id('001') return the matplotlib Text objects, and you are free to configure them to your liking (e.g. you can change font size by calling set_fontsize(8), etc).
Here is an example which automates the whole thing. It creates a temporary dictionary which contains the id's needed by venn as keys and the intersections of all participating sets for this very id.
If you don't want the labels sorted remove the sorted() call in the second last line.
import math
from matplotlib import pyplot as plt
from matplotlib_venn import venn2, venn3
import numpy as np
# Convert number to indices into binary
# e.g. 5 -> '101' > [2, 0]
def bits2indices(b):
l = []
if b == 0:
return l
for i in reversed(range(0, int(math.log(b, 2)) + 1)):
if b & (1 << i):
l.append(i)
return l
# Make dictionary containing venn id's and set intersections
# e.g. d = {'100': {'c', 'b', 'a'}, '010': {'c', 'd', 'e'}, ... }
def set2dict(s):
d = {}
for i in range(1, 2**len(s)):
# Make venn id strings
key = bin(i)[2:].zfill(len(s))
key = key[::-1]
ind = bits2indices(i)
# Get the participating sets for this id
participating_sets = [s[x] for x in ind]
# Get the intersections of those sets
inter = set.intersection(*participating_sets)
d[key] = inter
return d
# Define some sets
a = set(['a', 'b', 'c'])
b = set(['c', 'd', 'e'])
c = set(['e', 'f', 'a'])
s = [a, b, c]
# Create dictionary from sets
d = set2dict(s)
# Plot it
h = venn3(s, ('A', 'B', 'C'))
for k, v in d.items():
l = h.get_label_by_id(k)
if l:
l.set_text('\n'.join(sorted(v)))
plt.show()
/edit
I'm sorry I just figured out that the above code does not remove duplicate labels and is therefor wrong. The number of elements shown by venn and the number of labels was different. Here is a new version which removes wrong duplicates from other intersections. I guess there is a smarter and more functional way to do that instead of iterating over all intersections twice...
import math, itertools
from matplotlib import pyplot as plt
from matplotlib_venn import venn2, venn3
import numpy as np
# Generate list index for itertools combinations
def gen_index(n):
x = -1
while True:
while True:
x = x + 1
if bin(x).count('1') == n:
break
yield x
# Generate all combinations of intersections
def make_intersections(sets):
l = [None] * 2**len(sets)
for i in range(1, len(sets) + 1):
ind = gen_index(i)
for subset in itertools.combinations(sets, i):
inter = set.intersection(*subset)
l[next(ind)] = inter
return l
# Get weird reversed binary string id for venn
def number2venn_id(x, n_fill):
id = bin(x)[2:].zfill(n_fill)
id = id[::-1]
return id
# Iterate over all combinations and remove duplicates from intersections with
# more sets
def sets2dict(sets):
l = make_intersections(sets)
d = {}
for i in range(1, len(l)):
d[number2venn_id(i, len(sets))] = l[i]
for j in range(1, len(l)):
if bin(j).count('1') < bin(i).count('1'):
l[j] = l[j] - l[i]
d[number2venn_id(j, len(sets))] = l[j] - l[i]
return d
# Define some sets
a = set(['a', 'b', 'c', 'f'])
b = set(['c', 'd', 'e'])
c = set(['e', 'f', 'a'])
sets = [a, b, c]
d = sets2dict(sets)
# Plot it
h = venn3(sets, ('A', 'B', 'C'))
for k, v in d.items():
l = h.get_label_by_id(k)
if l:
l.set_fontsize(12)
l.set_text('\n'.join(sorted(v)))
# Original for comparison
f = plt.figure(2)
venn3(sets, ('A', 'B', 'C'))
plt.show()
Thanks for the automation, #Vinci! I wonder if you (or somebody else) has written a version that rearranges the content so that the elements stay within the bubble(s) in a random fashion instead of a long list? ... bonus track: re-dimensioning the bubbles if the elements do not fit? ;)
What I am trying to do is to get bootstrap confidence limits by row regardless of the number of rows and make a new dataframe from the output.I currently can do this for the entire dataframe, but not by row. The data I have in my actual program looks similar to what I have below:
0 1 2
0 1 2 3
1 4 1 4
2 1 2 3
3 4 1 4
I want the new dataframe to look something like this with the lower and upper confidence limits:
0 1
0 1 2
1 1 5.5
2 1 4.5
3 1 4.2
The current generated output looks like this:
0 1
0 2.0 2.75
The python 3 code below generates a mock dataframe and generates the bootstrap confidence limits for the entire dataframe. The result is a new dataframe with just 2 values, a upper and a lower confidence limit rather than 4 sets of 2(one for each row).
import pandas as pd
import numpy as np
import scikits.bootstrap as sci
zz = pd.DataFrame([[[1,2],[2,3],[3,6]],[[4,2],[1,4],[4,6]],
[[1,2],[2,3],[3,6]],[[4,2],[1,4],[4,6]]])
print(zz)
x= zz.dtypes
print(x)
a = pd.DataFrame(np.array(zz.values.tolist())[:, :, 0],zz.index, zz.columns)
print(a)
b = sci.ci(a)
b = pd.DataFrame(b)
b = b.T
print(b)
Thank you for any help.
scikits.bootstrap operates by assuming that data samples are arranged by row, not by column. If you want the opposite behavior, just use the transpose, and a statfunction that doesn't combine columns.
import pandas as pd
import numpy as np
import scikits.bootstrap as sci
zz = pd.DataFrame([[[1,2],[2,3],[3,6]],[[4,2],[1,4],[4,6]],
[[1,2],[2,3],[3,6]],[[4,2],[1,4],[4,6]]])
print(zz)
x= zz.dtypes
print(x)
a = pd.DataFrame(np.array(zz.values.tolist())[:, :, 0],zz.index, zz.columns)
print(a)
b = sci.ci(a.T, statfunction=lambda x: np.average(x, axis=0))
print(b.T)
Below is the answer I ended up figuring out to create bootstrap ci by row.
import pandas as pd
import numpy as np
import numpy.random as npr
zz = pd.DataFrame([[[1,2],[2,3],[3,6]],[[4,2],[1,4],[4,6]],
[[1,2],[2,3],[3,6]],[[4,2],[1,4],[4,6]]])
x= zz.dtypes
a = pd.DataFrame(np.array(zz.values.tolist())[:, :, 0],zz.index, zz.columns)
print(a)
def bootstrap(data, num_samples, statistic, alpha):
n = len(data)
idx = npr.randint(0, n, (num_samples, n))
samples = data[idx]
stat = np.sort(statistic(samples, 1))
return (stat[int((alpha/2.0)*num_samples)],
stat[int((1-alpha/2.0)*num_samples)])
cc = list(a.index.values) # informs generator of the number of rows
def bootbyrow(cc):
for xx in range(1):
xx = list(a.index.values)
for xx in range(len(cc)):
k = a.apply(lambda y: y[xx])
k = k.values
for xx in range(1):
kk = list(bootstrap(k,10000,np.mean,0.05))
yield list(kk)
abc = pd.DataFrame(list(bootbyrow(cc))) #bootstrap ci by row
# the next 4 just show that its working correctly
a0 = bootstrap((a.loc[0,].values),10000,np.mean,0.05)
a1 = bootstrap((a.loc[1,].values),10000,np.mean,0.05)
a2 = bootstrap((a.loc[2,].values),10000,np.mean,0.05)
a3 = bootstrap((a.loc[3,].values),10000,np.mean,0.05)
print(abc)
print(a0)
print(a1)
print(a2)
print(a3)