Using the linear_model perceptron module from sklearn to separate points - python-3.x

I am trying to use this sklearn module for a binary classification problem and my data is clearly linearly separable.
what I dont understand is why the green area of my plot does not include the five red circles.
.
I have tried to vary the number of iterations parameter(max_iter) from 100 to 10000, however it does not make any difference.
here is my code:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Perceptron
def learn_and_display_Perceptron(datafile):
#taking data reading this from the above functions
data = np.loadtxt(datafile)
n,d = data.shape
x = data[:,0:2]
y = data[:,2]
clf = Perceptron(max_iter=10000)
clf.fit(x, y)
sv = np.zeros(n,dtype=bool) ## all False array
notsv = np.logical_not(sv) # all True array
# Determine the x1- and x2- limits of the plot
x1min = min(x[:,0]) - 1
x1max = max(x[:,0]) + 1
x2min = min(x[:,1]) - 1
x2max = max(x[:,1]) + 1
plt.xlim(x1min,x1max)
plt.ylim(x2min,x2max)
# Plot the data points, enlarging those that are support vectors
plt.plot(x[(y==1)*notsv,0], x[(y==1)*notsv,1], 'ro')
plt.plot(x[(y==1)*sv,0], x[(y==1)*sv,1], 'ro', markersize=10)
plt.plot(x[(y==-1)*notsv,0], x[(y==-1)*notsv,1], 'k^')
plt.plot(x[(y==-1)*sv,0], x[(y==-1)*sv,1], 'k^', markersize=10)
# Construct a grid of points and evaluate classifier at each grid points
grid_spacing = 0.05
xx1, xx2 = np.meshgrid(np.arange(x1min, x1max, grid_spacing), np.arange(x2min, x2max, grid_spacing))
grid = np.c_[xx1.ravel(), xx2.ravel()]
Z = clf.predict(grid)
# Quantize the values to -1, -0.5, 0, 0.5, 1 for display purposes
for i in range(len(Z)):
Z[i] = min(Z[i],1.0)
Z[i] = max(Z[i],-1.0)
if (Z[i] > 0.0) and (Z[i] < 1.0):
Z[i] = 0.5
if (Z[i] < 0.0) and (Z[i] > -1.0):
Z[i] = -0.5
# Show boundary and margin using a color plot
Z = Z.reshape(xx1.shape)
plt.pcolormesh(xx1, xx2, Z, cmap=plt.cm.PRGn, vmin=-2, vmax=2, shading='auto')
plt.show()
my datafile, data_1.txt can be found on here, https://github.com/bluetail14/MyCourserapractice/tree/main/Edx
What can I change in my code to adjust the green/purple borderline to include the five red circles?

Nice code. You need to change the eta0 value,
clf = Perceptron(max_iter=10000, eta0=0.1)

Related

How to compute the distance of data points to decision boundary when using the EllipticEnvelope of sklearn?

How can I compute the euclidean distance to the boundary decision of the EllipticEnvelope? Here is my code :
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.model_selection import train_test_split
feature, output = "temperature", "consumption"
data = pd.DataFrame(np.random.normal(0,15, size=(2355,2)), columns=[feature, output])
X = data[[feature, output]]
X_train, X_test = train_test_split(X, shuffle=True, train_size=0.8)
model = EllipticEnvelope(contamination=0.18)
model.fit(X_train)
# extract the model predictions
y_pred = pd.Series(model.predict(X), index=X.index, name="anomaly")
# define the meshgrid : X = (u,v).T
u_min, u_max = X_train.iloc[:, 0].min() - 1.5, X_train.iloc[:, 0].max() + 1.5
v_min, v_max = X_train.iloc[:, 1].min() - 1.5, X_train.iloc[:, 1].max() + 1.5
n_points = 500
u = np.linspace(u_min, u_max, n_points)
v = np.linspace(v_min, v_max, n_points)
U, V = np.meshgrid(u, v)
# evaluate the decision function on the meshgrid
W = model.decision_function(np.c_[U.ravel(), V.ravel()])
W = W.reshape(U.shape)
plt.figure(figsize=(20,6))
a = plt.contour(U, V, W, levels=[0], linewidths=2, colors="black")
b = plt.scatter(X.loc[y_pred == 1].iloc[:, 0], X.loc[y_pred == 1].iloc[:, 1], c="yellowgreen", edgecolors='k')
c = plt.scatter(X.loc[y_pred == -1].iloc[:, 0], X.loc[y_pred == -1].iloc[:, 1], c="tomato", edgecolors='k')
plt.legend([a.collections[0], b, c], ['learned frontier', 'regular observations', 'abnormal observations'], bbox_to_anchor=(1.05, 1))
plt.axis('tight')
plt.show()
Edits
I am able to get the decision boundary points using the following code. Now, the problem can be solved by computing numerically the distance.
for item in a.collections:
for i in item.get_paths():
v = i.vertices
x = v[:, 0]
y = v[:, 1]
I have an obvious solution. Getting all data points d and compute the euclidean distance between d and e=(x,y). But, it is a brute-force technique.. :D I will continue my research !
Another solution would be to fit an ellipse and compute the distance using the formula described by #epiliam there : https://math.stackexchange.com/questions/3670465/calculate-distance-from-point-to-ellipse-edge
I will provide one solution tomorrow based on the brute-force. It seems to work well for small dataset (n_rows < 10000). I did not test for larger ones.

How to plot support vectors for support vector regression?

I am trying to solve hard margin support vector regression and plot hyperplane and support vectors for a dataset.
As you know, hard margin is solved with the below assumption:
I solved the problem but when I want to plot decision boundaries and support vectors, I face the below problem. All of point should be located between two decision boundaries and support vectors should be drawn on the decision boundaries. Can you help me to find the problem?
Here is the full code:
import pandas as pd
import numpy as np
from pandas import DataFrame
from sklearn import metrics
Data = pd.read_csv("Data.txt",delimiter="\t")
X=Data['waterlevel(x)'].values
y=Data['Area(y)'].values
# Plot Data
import matplotlib.pyplot as plt
fig,ax = plt.subplots(1, 1,constrained_layout=True,figsize=(8, 4))
ax.plot(X, y,'k.')
ax.set_title('Urmia lake Area versus Level')
ax.set_xlabel('Water level (M)',fontsize=15)
ax.set_ylabel('Area (km^2)',fontsize=15)
#plt.axis([0, 25, 0, 25])
plt.grid(True)
plt.show()
# find max and min values of predictor variables (here X) to use it to specify initial values of w and b
max_feature_value=np.amax(X)
min_feature_value=np.amin(X)
w_optimum = max_feature_value*0.5
w = [w_optimum for i in range(1)] # w shoulb be a vector with dimension of the independent features (here:1)
wt_b=w
b_sum=0
for i in range(X.shape[0]):
b_sum+=y[i]-np.dot(wt_b,X[i])
b_ini=b_sum/len(X)
b_step_size_lower = 0.9
b_step_size_upper = 0.2
b_multiple = 500 # step size for b
b_range = np.arange((b_ini*b_step_size_lower), -b_ini*b_step_size_upper, b_multiple)
print(len(b_range))
# Estimate w and b using stochastic gradient descent and trial and error
l_rate=0.1
n_epoch = 250
epsilon=150 # acceptable error
length_Wvector_list=[]
for i in range (len(b_range)):
correctly_regressed = True
for j in range(X.shape[0]):
print(i,j,wt_b,b_range[i])
if (y[j]-(np.dot(wt_b,X[j])+b_range[i]) > epsilon) or (y[j]-(np.dot(wt_b,X[j])+b_range[i]) < -epsilon)==True:
correctly_regressed = False
wt_b = np.asarray(wt_b) - l_rate
if correctly_regressed==True:
length_Wvector_list.append([wt_b[0],wt_b,b_range[i]])
if wt_b[0] < 0:
wt_b=w
break
norms = sorted([n for n in length_Wvector_list])
wt_b=norms[0][1]
b=norms[0][2]
# Predict using the optimized values of w and b
y_predict=[]
for i in range (X.shape[0]):
y_hat=np.dot(wt_b,X[i])+b
y_predict.append(y_hat)
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y, y_predict)))
print('Coefficient of determination (R2):', metrics.r2_score(y, y_predict))
# plot
fig,ax = plt.subplots(1, 1,figsize=(8, 5.2))
ax.scatter(y, y_predict, cmap='K', edgecolor='b',linewidth='0.5',alpha=1, label='testing points',marker='o', s=12)
ax.set_xlabel('Observed Area(km $^{2}$)',fontsize=14)
ax.set_ylabel('Simulated Area(km $^{2}$)',fontsize=14)
# find support vectors
positive_instances=[]
negative_instances=[]
for i in range(X.shape[0]):
y_pre=(np.dot(wt_b,X[i]))+b
if y[i]-y_pre<=epsilon:
positive_instances.append([y[i]-y_pre,[X[i],y[i]]])
elif y[i]-y_pre>=-epsilon:
negative_instances.append([y[i]-y_pre,[X[i],y[i]]])
len(positive_instances)+len(negative_instances)
sort_positive=sorted([n for n in positive_instances])
sort_negative=sorted([n for n in negative_instances])
positive_support_vector=sort_positive[0][1]
negative_support_vector=sort_negative[-1][1]
model_support_vectors=np.stack((positive_support_vector,negative_support_vector),axis=-1)
# visualize the data-set
colors = {1:'r',-1:'b'}
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.scatter(X,y,marker='o',c=y)
# plot support vectors
ax.scatter(model_support_vectors[0, :],model_support_vectors[1, :],s=200, linewidth=1,facecolors='none', edgecolors='b')
# hyperplane = x.w+b
# 0 = x.w+b
# psv = epsilon
# nsv = -epsilon
# dec = 0
def hyperplane_value(x,w,b,e):
return (np.dot(w,x)+b+e)
datarange = (min_feature_value*1.,max_feature_value*1.)
hyp_x_min = datarange[0]
hyp_x_max = datarange[1]
# (w.x+b) = epsilon
# positive support vector hyperplane
psv1 = hyperplane_value(hyp_x_min, wt_b, b, epsilon)
psv2 = hyperplane_value(hyp_x_max, wt_b, b, epsilon)
ax.plot([hyp_x_min,hyp_x_max],[psv1,psv2], 'k')
# (w.x+b) = -epsilon
# negative support vector hyperplane
nsv1 = hyperplane_value(hyp_x_min, wt_b, b, -epsilon)
nsv2 = hyperplane_value(hyp_x_max, wt_b, b, -epsilon)
ax.plot([hyp_x_min,hyp_x_max],[nsv1,nsv2], 'k')
# (w.x+b) = 0
# positive support vector hyperplane
db1 = hyperplane_value(hyp_x_min, wt_b, b, 0)
db2 = hyperplane_value(hyp_x_max, wt_b, b, 0)
ax.plot([hyp_x_min,hyp_x_max],[db1,db2], 'y--')
#plt.axis([-5,10,-12,-1])
plt.show()
I improved the program and the problem was solved. As you can see, decision boundaries and support vectors are drawn in the correct position.
Here is the full code:
import pandas as pd
import numpy as np
from pandas import DataFrame
from sklearn import metrics
Data = pd.read_csv("Data.txt",delimiter="\t")
X=Data['waterlevel(x)'].values
y=Data['Area(y)'].values
# Plot Data
import matplotlib.pyplot as plt
fig,ax = plt.subplots(1, 1,constrained_layout=True,figsize=(8, 4))
ax.plot(X, y,'k.')
ax.set_title('Urmia lake Area versus Level')
ax.set_xlabel('Water level (M)',fontsize=15)
ax.set_ylabel('Area (km^2)',fontsize=15)
#plt.axis([0, 25, 0, 25])
plt.grid(True)
plt.show()
# find max and min values of predictor variables (here X) to use it to specify initial values of w and b
max_feature_value=np.amax(X)
min_feature_value=np.amin(X)
w_optimum = max_feature_value*0.5
w = [w_optimum for i in range(1)] # w shoulb be a vector with dimension of the independent features (here:1)
wt_b=w
b_sum=0
for i in range(X.shape[0]):
b_sum+=y[i]-np.dot(wt_b,X[i])
b_ini=b_sum/len(X)
b_step_size_lower = 0.9
b_step_size_upper = 0.1
b_multiple = 500 # step size for b
b_range = np.arange((b_ini*b_step_size_lower), -b_ini*b_step_size_upper, b_multiple)
print(len(b_range))
# Estimate w and b using stochastic gradient descent and trial and error
l_rate=0.1
n_epoch = 250
epsilon=500 # acceptable error
length_Wvector_list=[]
for i in range (len(b_range)):
print(i)
optimized = False
while not optimized:
correctly_regressed = True
for j in range(X.shape[0]):
# every data point should be satisfies the constraint yi-(np.dot(w_t,xi)+b) <=epsilon or yi-(np.dot(w_t,xi)+b)>=-epsilon
if (y[j]-(np.dot(wt_b,X[j])+b_range[i]) > epsilon) or (y[j]-(np.dot(wt_b,X[j])+b_range[i]) < -epsilon)==True:
correctly_regressed = False
wt_b = np.asarray(wt_b) - l_rate
if correctly_regressed==True:
length_Wvector_list.append([wt_b[0],wt_b,b_range[i]]) #store w, b for minimum magnitude , magnitude or length of a vector w_t is called the norm
optimized = True
if wt_b[0] < 0:
optimized = True
wt_b_temp=wt_b
wt_b=w
norms = sorted([n for n in length_Wvector_list])
wt_b=norms[0][1]
b=norms[0][2]
# Predict using the optimized values of w and b
y_predict=[]
for i in range (X.shape[0]):
y_hat=np.dot(wt_b,X[i])+b
y_predict.append(y_hat)
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y, y_predict)))
print('Coefficient of determination (R2):', metrics.r2_score(y, y_predict))
# plot
fig,ax = plt.subplots(1, 1,figsize=(8, 5.2))
ax.scatter(y, y_predict, cmap='K', edgecolor='b',linewidth='0.5',alpha=1, label='testing points',marker='o', s=12)
ax.set_xlabel('Observed Area(km $^{2}$)',fontsize=14)
ax.set_ylabel('Simulated Area(km $^{2}$)',fontsize=14)
ax.set_xlim([min(y)-100, max(y)+100])
ax.set_ylim([min(y)-100, max(y)+100])
# find support vectors
positive_instances=[]
negative_instances=[]
for i in range(X.shape[0]):
y_pre=(np.dot(wt_b,X[i]))+b
if ((y[i]-y_pre>0) and (y[i]-y_pre<=epsilon))==True:
positive_instances.append([y[i]-y_pre,[X[i],y[i]]])
elif ((y[i]-y_pre<0) and (y[i]-y_pre>=-epsilon))==True:
negative_instances.append([y[i]-y_pre,[X[i],y[i]]])
len(positive_instances)+len(negative_instances)
sort_positive=sorted([n for n in positive_instances])
sort_negative=sorted([n for n in negative_instances])
positive_support_vector=sort_positive[-1][1]
negative_support_vector=sort_negative[0][1]
model_support_vectors=np.stack((positive_support_vector,negative_support_vector),axis=-1)
# visualize the data-set
colors = {1:'r',-1:'b'}
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.scatter(X,y,marker='o',c=y)
# plot support vectors
ax.scatter(model_support_vectors[0, :],model_support_vectors[1, :],s=200, linewidth=1,facecolors='none', edgecolors='b')
# hyperplane = x.w+b
# 0 = x.w+b
# psv = epsilon
# nsv = -epsilon
# dec = 0
def hyperplane_value(x,w,b,e):
return (np.dot(w,x)+b+e)
datarange = (min_feature_value*1.,max_feature_value*1.)
hyp_x_min = datarange[0]
hyp_x_max = datarange[1]
# (w.x+b) = epsilon
# positive support vector hyperplane
psv1 = hyperplane_value(hyp_x_min, wt_b, b, epsilon)
psv2 = hyperplane_value(hyp_x_max, wt_b, b, epsilon)
ax.plot([hyp_x_min,hyp_x_max],[psv1,psv2], 'k')
# (w.x+b) = -epsilon
# negative support vector hyperplane
nsv1 = hyperplane_value(hyp_x_min, wt_b, b, -epsilon)
nsv2 = hyperplane_value(hyp_x_max, wt_b, b, -epsilon)
ax.plot([hyp_x_min,hyp_x_max],[nsv1,nsv2], 'k')
# (w.x+b) = 0
# positive support vector hyperplane
db1 = hyperplane_value(hyp_x_min, wt_b, b, 0)
db2 = hyperplane_value(hyp_x_max, wt_b, b, 0)
ax.plot([hyp_x_min,hyp_x_max],[db1,db2], 'y--')
#plt.axis([-5,10,-12,-1])
plt.show()

Sklearn BIC criterion : differents optimum values of k for clustering

I want to determine the best value of k (number of clusters) for the KMeans algo and a dataset.
I found a ressource in the documentation of Sklearn : The Gaussian Mixture Model Selection using the BIC criterion.
I found an example of code on the site that I adapted to my dataset.
But each run of this code give a different value of optimal value of k . Why ?
Here the code :
import numpy as np
import pandas as pd
import itertools
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
print(__doc__)
# Number of samples per component
n_samples = 440
path = 'C:/Users/Lionel/Downloads'
file = 'Wholesale customers data.csv'
data = pd.read_csv(path + '/'+file)
X = np.array(data.iloc[:,2 :])
lowest_bic = np.infty
bic = []
n_components_range = range(1, 12)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a Gaussian mixture with EM
gmm = mixture.GaussianMixture(n_components=n_components,
covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['navy', 'turquoise', 'cornflowerblue',
'darkorange'])
clf = best_gmm
print(clf)
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
#spl = plt.plot()
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, cov, color) in enumerate(zip(clf.means_, clf.covariances_,
color_iter)):
v, w = linalg.eigh(cov)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180. * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
Here the link to my dataset :
https://drive.google.com/open?id=1yMw1rMh12ml6Lh3yrL6WDLbEnLM-SmiN
Have you an explanation for this behaviour ?

Trapezoidal wave in Python

How do I generate a trapezoidal wave in Python?
I looked into the modules such as SciPy and NumPy, but in vain. Is there a module such as the scipy.signal.gaussian which returns an array of values representing the Gaussian function wave?
I generated this using the trapezoidal kernel of Astropy,
Trapezoid1DKernel(30,slope=1.0)
. I want to implement this in Python without using Astropy.
While the width and the slope are sufficient to define a triangular signal, you would need a third parameter for a trapezoidal signal: the amplitude.
Using those three parameters, you can easily adjust the scipy.signal.sawtooth function to give you a trapeziodal shape by truncating and offsetting the triangular shaped function.
from scipy import signal
import matplotlib.pyplot as plt
import numpy as np
def trapzoid_signal(t, width=2., slope=1., amp=1., offs=0):
a = slope*width*signal.sawtooth(2*np.pi*t/width, width=0.5)/4.
a[a>amp/2.] = amp/2.
a[a<-amp/2.] = -amp/2.
return a + amp/2. + offs
t = np.linspace(0, 6, 501)
plt.plot(t,trapzoid_signal(t, width=2, slope=2, amp=1.), label="width=2, slope=2, amp=1")
plt.plot(t,trapzoid_signal(t, width=4, slope=1, amp=0.6), label="width=4, slope=1, amp=0.6")
plt.legend( loc=(0.25,1.015))
plt.show()
Note that you may also like to define a phase, depeding on the use case.
In order to define a single pulse, you might want to modify the function a bit and supply an array which ranges over [0,width].
from scipy import signal
import matplotlib.pyplot as plt
import numpy as np
def trapzoid_signal(t, width=2., slope=1., amp=1., offs=0):
a = slope*width*signal.sawtooth(2*np.pi*t/width, width=0.5)/4.
a += slope*width/4.
a[a>amp] = amp
return a + offs
for w,s,a in zip([2,5], [2,1], [1,0.6]):
t = np.linspace(0, w, 501)
l = "width={}, slope={}, amp={}".format(w,s,a)
plt.plot(t,trapzoid_signal(t, width=w, slope=s, amp=a), label=l)
plt.legend( loc="upper right")
plt.show()
From the SciPy website it looks like this isn't included (they currently have sawtooth and square, but not trapezoid). As a generalised version of the C example the following will do what you want,
import numpy as np
import matplotlib.pyplot as plt
def trapezoidalWave(xin, width=1., slope=1.):
x = xin%(4*width)
if (x <= width):
# Ascending line
return x*slope;
elif (x <= 2.*width):
# Top horizontal line
return width*slope
elif (x <= 3.*width):
# Descending line
return 3.*width*slope - x*slope
elif (x <= 4*width):
# Bottom horizontal line
return 0.
x = np.linspace(0.,20,1000)
for i in x:
plt.plot(i, trapezoidalWave(i), 'k.')
plt.plot(i, trapezoidalWave(i, 1.5, 2.), 'r.')
plt.show()
which looks like,
This can be done more elegantly with Heaviside functions which allow you to use NumPy arrays,
import numpy as np
import matplotlib.pyplot as plt
def H(x):
return 0.5 * (np.sign(x) + 1)
def trapWave(xin, width=1., slope=1.):
x = xin%(4*width)
y = ((H(x)-H(x-width))*x*slope +
(H(x-width)-H(x-2.*width))*width*slope +
(H(x-2.*width)-H(x-3.*width))*(3.*width*slope - x*slope))
return y
x = np.linspace(0.,20,1000)
plt.plot(x, trapWave(x))
plt.plot(x, trapWave(x, 1.5, 2.))
plt.show()
For this example, the Heaviside version is about 20 times faster!
The below example shows how to do that to get points and show scope.
Equation based on reply: Equation for trapezoidal wave equation
import math
import numpy as np
import matplotlib.pyplot as plt
def get_wave_point(x, a, m, l, c):
# Equation from: https://stackoverflow.com/questions/11041498/equation-for-trapezoidal-wave-equation
# a/pi(arcsin(sin((pi/m)x+l))+arccos(cos((pi/m)x+l)))-a/2+c
# a is the amplitude
# m is the period
# l is the horizontal transition
# c is the vertical transition
point = a/math.pi*(math.asin(math.sin((math.pi/m)*x+l))+math.acos(math.cos((math.pi/m)*x+l)))-a/2+c
return point
print('Testing wave')
x = np.linspace(0., 10, 1000)
listofpoints = []
for i in x:
plt.plot(i, get_wave_point(i, 5, 2, 50, 20), 'k.')
listofpoints.append(get_wave_point(i, 5, 2, 50, 20))
print('List of points : {} '.format(listofpoints))
plt.show()
The whole credit goes to #ImportanceOfBeingErnest . I am just revising some edits to his code which just made my day.
from scipy import signal
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
def trapzoid_signal(t, width=2., slope=1., amp=1., offs=0):
a = slope*width*signal.sawtooth(2*np.pi*t/width, width=0.5)/4.
a += slope*width/4.
a[a>amp] = amp
return a + offs
for w,s,a in zip([32],[1],[0.0322]):
t = np.linspace(0, w, 34)
plt.plot(t,trapzoid_signal(t, width=w, slope=s, amp=a))
plt.show()
The result:
I'll throw a very late hat into this ring, namely, a function using only numpy that produces a single (symmetric) trapezoid at a desired location, with all the usual parameters. Also posted here
import numpy as np
def trapezoid(x, center=0, slope=1, width=1, height=1, offset=0):
"""
For given array x, returns a (symmetric) trapezoid with plateau at y=h (or -h if
slope is negative), centered at center value of "x".
Note: Negative widths and heights just converted to 0
Parameters
----------
x : array_like
array of x values at which the trapezoid should be evaluated
center : float
x coordinate of the center of the (symmetric) trapezoid
slope : float
slope of the sides of the trapezoid
width : float
width of the plateau of the trapezoid
height : float
(positive) vertical distance between the base and plateau of the trapezoid
offset : array_like
vertical shift (either single value or the same shape as x) to add to y before returning
Returns
-------
y : array_like
y value(s) of trapezoid with above parameters, evaluated at x
"""
# ---------- input checking ----------
if width < 0: width = 0
if height < 0: height = 0
x = np.asarray(x)
slope_negative = slope < 0
slope = np.abs(slope) # Do all calculations with positive slope, invert at end if necessary
# ---------- Calculation ----------
y = np.zeros_like(x)
mask_left = x - center < -width/2.0
mask_right = x - center > width/2.0
y[mask_left] = slope*(x[mask_left] - center + width/2.0)
y[mask_right] = -slope*(x[mask_right] - center - width/2.0)
y += height # Shift plateau up to y=h
y[y < 0] = 0 # cut off below zero (so that trapezoid flattens off at "offset")
if slope_negative: y = -y # invert non-plateau
return y + offset
Which outputs something like
import matplotlib.pyplot as plt
plt.style.use("seaborn-colorblind")
x = np.linspace(-5,5,1000)
for i in range(1,4):
plt.plot(x,trapezoid(x, center=0, slope=1, width=i, height=i, offset = 0), label=f"width = height = {i}\nslope=1")
plt.plot(x,trapezoid(x, center=0, slope=-1, width=2.5, height=1, offset = 0), label=f"width = height = 1.5,\nslope=-1")
plt.ylim((-2.5,3.5))
plt.legend(frameon=False, loc='lower center', ncol=2)
Example output:

Recreating decision-boundary plot in python with scikit-learn and matplotlib

I found this wonderful graph in post here Variation on "How to plot decision boundary of a k-nearest neighbor classifier from Elements of Statistical Learning?". In this example K-NN is used to clasify data into three classes. I especially enjoy that it features the probability of class membership as a indication of the "confidence".
r and ggplot seem to do a great job.I wonder, whether this can be re-created in python? My initial thought tends to scikit-learn and matplotlib. Here is the iris example from scikit:
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
This produces a graph in a sense very similar:
I have three questions:
How can I introduce the confidence to the plot?
How can I plot the decision-boundaries with a connected line?
Let's say I have a new observation, how can I introduce it to the plot and plot if it is classified correctly?
I stumbled upon your question about a year ago, and loved the plot -- I just never got around to answering it, until now. Hopefully the code comments below are self-explanitory enough (I also blogged about, if you want more details). Maybe four years too late, haha.
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from matplotlib.lines import Line2D
from matplotlib.ticker import MaxNLocator
from sklearn import neighbors
iris = datasets.load_iris()
x = iris.data[:,0:2]
y = iris.target
# create the x0, x1 feature
x0 = x[:,0]
x1 = x[:,1]
# set main parameters for KNN plot
N_NEIGHBORS = 15 # KNN number of neighbors
H = 0.1 # mesh stepsize
PROB_DOT_SCALE = 40 # modifier to scale the probability dots
PROB_DOT_SCALE_POWER = 3 # exponential used to increase/decrease size of prob dots
TRUE_DOT_SIZE = 50 # size of the true labels
PAD = 1.0 # how much to "pad" around the true labels
clf = neighbors.KNeighborsClassifier(N_NEIGHBORS, weights='uniform')
clf.fit(x, y)
# find the min/max points for both x0 and x1 features
# these min/max values will be used to set the bounds
# for the plot
x0_min, x0_max = np.round(x0.min())-PAD, np.round(x0.max()+PAD)
x1_min, x1_max = np.round(x1.min())-PAD, np.round(x1.max()+PAD)
# create 1D arrays representing the range of probability data points
# on both the x0 and x1 axes.
x0_axis_range = np.arange(x0_min,x0_max, H)
x1_axis_range = np.arange(x1_min,x1_max, H)
# create meshgrid between the two axis ranges
xx0, xx1 = np.meshgrid(x0_axis_range, x1_axis_range)
# put the xx in the same dimensional format as the original x
# because it's easier to work with that way (at least for me)
# * shape will be: [no_dots, no_dimensions]
# where no_dimensions = 2 (x0 and x1 axis)
xx = np.reshape(np.stack((xx0.ravel(),xx1.ravel()),axis=1),(-1,2))
yy_hat = clf.predict(xx) # prediction of all the little dots
yy_prob = clf.predict_proba(xx) # probability of each dot being
# the predicted color
yy_size = np.max(yy_prob, axis=1)
# make figure
plt.style.use('seaborn-whitegrid') # set style because it looks nice
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8,6), dpi=150)
# establish colors and colormap
# * color blind colors, from
# https://towardsdatascience.com/two-simple-steps-to-create-colorblind-friendly-data-visualizations-2ed781a167ec
redish = '#d73027'
orangeish = '#fc8d59'
yellowish = '#fee090'
blueish = '#4575b4'
colormap = np.array([redish,blueish,orangeish])
# plot all the little dots, position defined by the xx values, color
# defined by the knn predictions (yy_hat), and size defined by the
# probability of that color (yy_prob)
# * because the yy_hat values are either 0, 1, 2, we can use
# these as values to index into the colormap array
# * size of dots (the probability) increases exponentially (^3), so that there is
# a nice difference between different probabilities. I'm sure there is a more
# elegant way to do this though...
# * linewidths=0 so that there are no "edges" around the dots
ax.scatter(xx[:,0], xx[:,1], c=colormap[yy_hat], alpha=0.4,
s=PROB_DOT_SCALE*yy_size**PROB_DOT_SCALE_POWER, linewidths=0,)
# plot the contours
# * we have to reshape the yy_hat to get it into a
# 2D dimensional format, representing both the x0
# and x1 axis
# * the number of levels and color scheme was manually tuned
# to make sense for this data. Would probably change, for
# instance, if there were 4, or 5 (etc.) classes
ax.contour(x0_axis_range, x1_axis_range,
np.reshape(yy_hat,(xx0.shape[0],-1)),
levels=3, linewidths=1,
colors=[redish,blueish, blueish,orangeish,])
# plot the original x values.
# * zorder is 3 so that the dots appear above all the other dots
ax.scatter(x[:,0], x[:,1], c=colormap[y], s=TRUE_DOT_SIZE, zorder=3,
linewidths=0.7, edgecolor='k')
# create legends
x_min, x_max = ax.get_xlim()
y_min, y_max = ax.get_ylim()
# set x-y labels
ax.set_ylabel(r"$x_1$")
ax.set_xlabel(r"$x_0$")
# create class legend
# Line2D properties: https://matplotlib.org/stable/api/_as_gen/matplotlib.lines.Line2D.html
# about size of scatter plot points: https://stackoverflow.com/a/47403507/9214620
legend_class = []
for flower_class, color in zip(['c', 's', 'v'], [blueish, redish, orangeish]):
legend_class.append(Line2D([0], [0], marker='o', label=flower_class,ls='None',
markerfacecolor=color, markersize=np.sqrt(TRUE_DOT_SIZE),
markeredgecolor='k', markeredgewidth=0.7))
# iterate over each of the probabilities to create prob legend
prob_values = [0.4, 0.6, 0.8, 1.0]
legend_prob = []
for prob in prob_values:
legend_prob.append(Line2D([0], [0], marker='o', label=prob, ls='None', alpha=0.8,
markerfacecolor='grey',
markersize=np.sqrt(PROB_DOT_SCALE*prob**PROB_DOT_SCALE_POWER),
markeredgecolor='k', markeredgewidth=0))
legend1 = ax.legend(handles=legend_class, loc='center',
bbox_to_anchor=(1.05, 0.35),
frameon=False, title='class')
legend2 = ax.legend(handles=legend_prob, loc='center',
bbox_to_anchor=(1.05, 0.65),
frameon=False, title='prob', )
ax.add_artist(legend1) # add legend back after it disappears
ax.set_yticks(np.arange(x1_min,x1_max, 1)) # I don't like the decimals
ax.grid(False) # remove gridlines (inherited from 'seaborn-whitegrid' style)
# only use integers for axis tick labels
# from: https://stackoverflow.com/a/34880501/9214620
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
# set the aspect ratio to 1, for looks
ax.set_aspect(1)
# remove first ticks from axis labels, for looks
# from: https://stackoverflow.com/a/19503828/9214620
ax.set_xticks(ax.get_xticks()[1:-1])
ax.set_yticks(np.arange(x1_min,x1_max, 1)[1:])
plt.show()

Resources