Having Problems With Implementing the Perceptron Algorithm in Python - python-3.x

I'm having a problem debugging the following code, for some reason The perceptron stops updating itself after a couple of steps with random values as the weights. I have tried not using a class for my work and edited everything to the bare minimum, but still had the same problem. I have also checked the Perceptron.train(), and it works just fine. So, I'm guessing the main problem is with the train function itself. I am kind of new to python programming so any help would be apreciated guys.
import random
import Plot as plt
import numpy as np
#-----Function Of the line that seperates the two different Data Types-----$
def f(x):
return x
#-----Activation Function-----#
def act(x):
if x >= 0:
return 1.0
return 0.0
class Point:
def __init__(self, x, y):
self.X = x
self.Y = y
if y > f(x):
self.Target = 1.0
else:
self.Target = 0.0
class Perceptron:
def __init__(self, n, actFunc = act, lr = 0.2):
self.Weights = [0 for i in range(n)]
self.ActFunc = actFunc
self.LR = lr
def guess(self, inputs):
valSum = 0
for i in range(len(inputs)):
valSum += self.Weights[i] * inputs[i]
return self.ActFunc(valSum)
def train(self, inputs, target):
cal = self.guess(inputs)
err = target - cal
for i in range(0, len(self.Weights)):
self.Weights[i] += self.LR * err * inputs[i]
def printWeights(self):
for i in range(len(self.Weights)):
print("WEIGHT[" + str(i) + "] = " + str(self.Weights[i]))
print("")
def lineFunc(self):
# y = w0 + w1x + w2y
# (1 - w2)y = w0 + w1x
# y = w0/(1-w2) + w1/(1 - w2)x
w0 = self.Weights[0]
w1 = self.Weights[1]
w2 = self.Weights[2]
return (str(w0/(1 - w2)) + " + " + str(w1/(1 - w2)) + " * x")
#-----INITIALISING DATA------#
brain = Perceptron(3)
n = 20
points = [Point(random.uniform(-10, 10), random.uniform(-10, 10)) for x in range(n)]
t = 1000
#-----Training-----#
for i in range(t):
point = points[random.randrange(0, n)]
brain.train([1, point.X, point.Y], point.Target)
brain.printWeights()
print(brain.lineFunc())

I did find the problem myself. There was a bug in the LineFunc() method. The return value was wrong and it should have been:
return (str(-w0/w2) + " + " + str(-w1/w2) + " * x")

Related

How to remove inplace operation error in Pytorch?

I get this error from the following Pytorch code:
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.DoubleTensor [3]] is at version 10; expected version 9 instead.
As it is seen the code does not have inplace operations.
import torch
device = torch.device('cpu')
class MesNet(torch.nn.Module):
def __init__(self):
super(MesNet, self).__init__()
self.cov_lin = torch.nn.Sequential(torch.nn.Linear(6, 5)).double()
def forward(self, u):
z_cov = self.cov_lin(u.transpose(0, 2).squeeze(-1))
return z_cov
class UpdateModel(torch.nn.Module):
def __init__(self):
torch.nn.Module.__init__(self)
self.P_dim = 18
self.Id3 = torch.eye(3).double()
def run_KF(self):
N = 10
u = torch.randn(N, 6).double()
v = torch.zeros(N, 3).double()
model = MesNet()
measurements_covs_l = model(u.t().unsqueeze(0))
# remember to remove this afterwards
torch.autograd.set_detect_anomaly(True)
for i in range(1, N):
v[i] = self.update_pos(v[i].detach(), measurements_covs_l[i-1])
criterion = torch.nn.MSELoss(reduction="sum")
targ = torch.rand(10, 3).double()
loss = criterion(v, targ)
loss = torch.mean(loss)
loss.backward()
return v, p
def update_pos(self, v, measurement_cov):
Omega = torch.eye(3).double()
H = torch.ones((5, self.P_dim)).double()
R = torch.diag(measurement_cov)
Kt = H.t().mm(torch.inverse(R))
# it is indicating inplace error even with this:
# Kt = H.t().mm(R)
dx = Kt.mv(torch.ones(5).double())
dR = self.trans(dx[:9].clone())
v_up = dR.mv(v)
return v_up
def trans(self, xi):
phi = xi[:3].clone()
angle = torch.norm(phi.clone())
if angle.abs().lt(1e-10):
skew_phi = torch.eye(3).double()
J = self.Id3 + 0.5 * skew_phi
Rot = self.Id3 + skew_phi
else:
axis = phi / angle
skew_axis = torch.eye(3).double()
s = torch.sin(angle)
c = torch.cos(angle)
Rot = c * self.Id3
return Rot
net = UpdateModel()
net.run_KF()
I think the issue is that you are overwriting v[i] elements.
You could instead construct an auxiliary list v_ from the loop, then convert it tensor:
v_ = [v[0]]
for i in range(1, N):
v_.append(self.update_pos(v[i].detach(), measurements_covs_l[i-1]))
v = torch.stack(v_)

convert Tensorflow1 to Tensorflow 2

there is a code written with tensorflow1 on this link.
https://github.com/carlthome/tensorflow-convlstm-cell/blob/master/cell.py
I want to use this class as a layer in TensorFlow.Keras. So it should be written with TensorFlow version 2.
How can do it?
this is this code:
import tensorflow as tf
class ConvLSTMCell(tf.nn.rnn_cell.RNNCell):
"""A LSTM cell with convolutions instead of multiplications.
Reference:
Xingjian, S. H. I., et al. "Convolutional LSTM network: A machine learning approach for precipitation nowcasting." Advances in Neural Information Processing Systems. 2015.
"""
def __init__(self, shape, filters, kernel, forget_bias=1.0, activation=tf.tanh, normalize=True, peephole=True, data_format='channels_last', reuse=None):
super(ConvLSTMCell, self).__init__(_reuse=reuse)
self._kernel = kernel
self._filters = filters
self._forget_bias = forget_bias
self._activation = activation
self._normalize = normalize
self._peephole = peephole
if data_format == 'channels_last':
self._size = tf.TensorShape(shape + [self._filters])
self._feature_axis = self._size.ndims
self._data_format = None
elif data_format == 'channels_first':
self._size = tf.TensorShape([self._filters] + shape)
self._feature_axis = 0
self._data_format = 'NC'
else:
raise ValueError('Unknown data_format')
#property
def state_size(self):
return tf.nn.rnn_cell.LSTMStateTuple(self._size, self._size)
#property
def output_size(self):
return self._size
def call(self, x, state):
c, h = state
x = tf.concat([x, h], axis=self._feature_axis)
n = x.shape[-1].value
m = 4 * self._filters if self._filters > 1 else 4
W = tf.get_variable('kernel', self._kernel + [n, m])
y = tf.nn.convolution(x, W, 'SAME', data_format=self._data_format)
if not self._normalize:
y += tf.get_variable('bias', [m], initializer=tf.zeros_initializer())
j, i, f, o = tf.split(y, 4, axis=self._feature_axis)
if self._peephole:
i += tf.get_variable('W_ci', c.shape[1:]) * c
f += tf.get_variable('W_cf', c.shape[1:]) * c
if self._normalize:
j = tf.contrib.layers.layer_norm(j)
i = tf.contrib.layers.layer_norm(i)
f = tf.contrib.layers.layer_norm(f)
f = tf.sigmoid(f + self._forget_bias)
i = tf.sigmoid(i)
c = c * f + i * self._activation(j)
if self._peephole:
o += tf.get_variable('W_co', c.shape[1:]) * c
if self._normalize:
o = tf.contrib.layers.layer_norm(o)
c = tf.contrib.layers.layer_norm(c)
o = tf.sigmoid(o)
h = o * self._activation(c)
state = tf.nn.rnn_cell.LSTMStateTuple(c, h)
return h, state

Matplotlib animation not showing (Gradient Descent Test)

I tried to create a matplotlib animation to practice using gradient descent to do linear regression. However I can't get the animation to work.
I managed to get the animation to work by using anim.show() but this caused an AttributeError as the animation class does not have a method. No idea why this actually causes the animation to work
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.animation as animation
def main():
# Initialize Dataset
X = 10*np.random.rand(50)
y = 8*X + 1 + 2.5*np.random.randn(50)
model = LinearRegression()
model.train(X,y)
model.animate(X,y)
class LinearRegression():
# Using Gradient Descent for Linear Regression
def __init__(self, learning_rate=0.001, epochs=100):
self.learning_rate = learning_rate
self.epochs = epochs
self.a_0 = 0
self.a_1 = 0
self.w_list = []
def train(self, X, y):
n = X.shape[0]
for i in range(self.epochs):
self.w_list.append([self.a_0,self.a_1])
y_train = self.a_0 + self.a_1 * X
error = y - y_train # Whether you use y_train - y or y - y_train will make a difference
mse = np.sum(error ** 2) / n
self.a_0 -= -2/n * np.sum(error) * self.learning_rate
self.a_1 -= -2/n * np.sum(error * X) * self.learning_rate
#if i%10 == 0:
# print("MSE",str(i)+":", mse)
self.w_list = np.array(self.w_list)
def animate(self, X, y):
fig, ax = plt.subplots()
ax.scatter(X,y)
plot_range = np.array(range(int(min(X))-1,int(max(X))+3))
a_0,a_1 = self.w_list[0,]
y_plot = plot_range*a_1 + a_0
ln, = ax.plot(plot_range, y_plot, color="red", label="Best Fit")
def animator(frame):
a_0, a_1 = self.w_list[frame,]
y_plot = plot_range * a_1 + a_0
ln.set_data(plot_range,y_plot)
print("Launching Animation")
anim = animation.FuncAnimation(fig,func = animator, frames = self.epochs)
anim.show()
if __name__ == "__main__":
main()
You need to call plt.show() to open the plot window.
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.animation as animation
def main():
# Initialize Dataset
X = 10*np.random.rand(50)
y = 8*X + 1 + 2.5*np.random.randn(50)
model = LinearRegression()
model.train(X,y)
model.animate(X,y)
class LinearRegression():
# Using Gradient Descent for Linear Regression
def __init__(self, learning_rate=0.001, epochs=100):
self.learning_rate = learning_rate
self.epochs = epochs
self.a_0 = 0
self.a_1 = 0
self.w_list = []
def train(self, X, y):
n = X.shape[0]
for i in range(self.epochs):
self.w_list.append([self.a_0,self.a_1])
y_train = self.a_0 + self.a_1 * X
error = y - y_train # Whether you use y_train - y or y - y_train will make a difference
mse = np.sum(error ** 2) / n
self.a_0 -= -2/n * np.sum(error) * self.learning_rate
self.a_1 -= -2/n * np.sum(error * X) * self.learning_rate
#if i%10 == 0:
# print("MSE",str(i)+":", mse)
self.w_list = np.array(self.w_list)
def animate(self, X, y):
fig, ax = plt.subplots()
ax.scatter(X,y)
plot_range = np.array(range(int(min(X))-1,int(max(X))+3))
a_0,a_1 = self.w_list[0,]
y_plot = plot_range*a_1 + a_0
ln, = ax.plot(plot_range, y_plot, color="red", label="Best Fit")
def animator(frame):
a_0, a_1 = self.w_list[frame,]
y_plot = plot_range * a_1 + a_0
ln.set_data(plot_range,y_plot)
print("Launching Animation")
anim = animation.FuncAnimation(fig,func = animator, frames = self.epochs)
plt.show()
if __name__ == "__main__":
main()

Backpropagation (matrixis not aligned)

Well, the problem is with delta1, I've checked over math couple times, it seems good to me, everything should be correct with delta2, but it doesn't match with W2 transposed, here is backpropagation:
def backward(self, X, Y):
X = np.array(X)
Y = np.array(Y)
delta2 = -(Y - self.yHat) * self.deriv_sigmoid(self.a2)
dJdW2 = np.dot(self.a2.T, delta2)
delta1 = np.dot(delta2, self.W2.T)*self.deriv_sigmoid(self.a1)
dJdW1 = np.dot(X.T, delta1)
return dJdW1, dJdW2
here is forward propagation:
def forward(self, X):
self.X = X
self.a1 = np.dot(self.W1, X)
self.Z1 = self.sigmoid(self.a1)
self.a2 = np.dot(self.W2, self.Z1)
self.yHat = self.sigmoid(self.a2)
return self.yHat
And here is file from witch I call it:
NN = nn.Neural_Network(2, 3, 1)
X = [[1],[1],]
Y = [[1],]
yHat = NN.forward(X)
dJdW1, dJdW2 = NN.backward(X, Y)
I've tried checking placings in np.dot(), but it seems to be correct, and here is full code: https://hastebin.com/ikijahecaz.py

A weird error with updates in theano

I designed a variable net, but it occurred some problems with theano. The general idea is that different input will get different net with same parameters, something like a recursive neural network with auto-encoder.
There are two cases in my code, one case is run combine_feat_gt1_1() if c > 1, the other case is run combine_feat_gt1_0().
It is weird that the code can run without bugs if I comment updates=updates, which is not my expected (train_test theano function in code). However, if I uncomment updates=updates, an error occurred (train_test_bug theano function in code). The later one is that I'd like to implement.
I have been already spend some days on this bug. Who can help me? I will appreciate that.
import os
import sys
import numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from theano.ifelse import ifelse
class Test(object):
def __init__(
self,
numpy_rng,
input=None,
output=None,
n_output=6,
n_input=3,
n_group=2,
W_r=None,
b_r=None
):
self.n_output = n_output
self.n_input = n_input
self.n_group = n_group
if not W_r:
initial_W_r = numpy.asarray(
numpy_rng.uniform(
low=-4 * numpy.sqrt(6. / (n_input + n_input)),
high=4 * numpy.sqrt(6. / (n_input + n_input)),
size=(n_input, n_input)
),
dtype=theano.config.floatX
)
W_r = theano.shared(value=initial_W_r, name='W_r', borrow=True)
if not b_r:
b_r = theano.shared(
value=numpy.zeros(
n_input,
dtype=theano.config.floatX
),
borrow=True
)
self.W_r = W_r
self.b_r = b_r
if input is None:
self.x = T.tensor4(name='input', dtype=theano.config.floatX)
else:
self.x = input
if output is None:
self.y = T.matrix(name='output', dtype=theano.config.floatX)
else:
self.y = output
self.params = [self.W_r, self.b_r]
def get_output_values(self, input):
a, b, c, d = input.shape
def recusive(x_t, h_tm1, wr, hr):
h_t = T.dot(h_tm1, wr) + T.dot(x_t, wr) + hr
return h_t
def combine_recusive(data):
hidden, _ = theano.scan(fn=recusive,
sequences=data[1:],
outputs_info=data[0],
non_sequences=[self.W_r, self.b_r],
n_steps=data[1:].shape[0],
strict=True)
return hidden[-1]
def combine_feat_gt1_1(input):
feats, _ = theano.scan(fn=combine_recusive,
sequences=input[0],
outputs_info=None,
n_steps=input[0].shape[0])
recusive_flag = T.ones(1)
return T.reshape(feats, (1,-1)) # concatenation
def combine_feat_gt1_0(input):
feats = input[0]
recusive_flag = T.zeros(1)
return T.reshape(feats, (1,-1)) # concatenation
feat = ifelse(T.gt(c, 1), combine_feat_gt1_1(input), combine_feat_gt1_0(input))
# debug code snippet
self.debug_ifelse = theano.function([input], T.gt(c, 1))
self.debug_1_0 = theano.function([input], ifelse(T.gt(c, 1), 1, 0))
return feat
def get_cost_updates(self):
learning_rate = 0.1
self.y_given_x = self.get_output_values(self.x)
cost = T.sum(( self.y_given_x - self.y) ** 2)
gparams = T.grad(cost, self.params)
updates = [
(param, param - learning_rate * gparam)
for param, gparam in zip(self.params, gparams)
]
return (cost, updates)
if __name__ == "__main__":
toy_data = numpy.array([[[[1,1,1],[2,2,2]], [[3, 4,5],[4,5,6]]]],dtype=theano.config.floatX)
lable = numpy.array([[1,2,3,4,5,6]],dtype=theano.config.floatX)
toy_data2 = numpy.array([[[[1,1,1]], [[3,4,5]]]],dtype=theano.config.floatX)
lable2 = numpy.array([[6,5,4,3,2,1]],dtype=theano.config.floatX)
x = T.tensor4('x', dtype=theano.config.floatX)
y = T.matrix('y', dtype=theano.config.floatX)
newX = T.tensor4(dtype=x.dtype)
newY = T.matrix(dtype=y.dtype)
rng = numpy.random.RandomState(123)
test = Test(
numpy_rng=rng,
input=x,
output=y,
n_group=2,
n_input=3,
n_output=6
)
cost, updates= test.get_cost_updates()
train_test = theano.function(
[newX, newY],
cost,
# updates=updates,
givens={
x : newX,
y : newY
}
)
train_test_bug = theano.function(
[newX, newY],
cost,
updates=updates,
givens={
x : newX,
y : newY
}
)
print train_test(toy_data, lable)
print train_test(toy_data2, lable2)
# code with bug
# print train_test_bug(toy_data, lable)
# print train_test_bug(toy_data2, lable2)
EDIT (by #danielrenshaw)
I've cut the code down to a simpler demonstration of the problem.
The cause is in the gradient computation of a double-nested scan expression. The problem disappears when a modified inner-most recursive expression is used (see comments in first function below).
import numpy
import theano
import theano.tensor as tt
import theano.ifelse
def inner_scan_step(x_t_t, h_tm1, w):
# Fails when using this recursive expression
h_t = tt.dot(h_tm1, w) + x_t_t
# No failure when using this recursive expression
# h_t = h_tm1 + tt.dot(x_t_t, w)
return h_t
def outer_scan_step(x_t, w):
h, _ = theano.scan(inner_scan_step,
sequences=[x_t[1:]],
outputs_info=[x_t[0]],
non_sequences=[w],
strict=True)
return h[-1]
def get_outputs(x, w):
features, _ = theano.scan(outer_scan_step,
sequences=[x],
non_sequences=[w],
strict=True)
return tt.grad(features.sum(), w)
def main():
theano.config.compute_test_value = 'raise'
x_value = numpy.arange(12, dtype=theano.config.floatX).reshape((2, 2, 3))
x = tt.tensor3()
x.tag.test_value = x_value
w = theano.shared(value=numpy.ones((3, 3), dtype=theano.config.floatX), borrow=True)
f = theano.function(inputs=[x], outputs=get_outputs(x, w))
print f(x_value)
if __name__ == "__main__":
main()
I solved this problem edited by danielrenshaw. When I add h0 as outputs_info, it work. Before that I used first element of sequence as outputs_info, I think it caused the error. But I still cannot solve my original problem.
import numpy
import theano
import theano.tensor as tt
import theano.ifelse
def inner_scan_step(x_t_t, h_tm1, w):
# Fails when using this recursive expression
h_t = tt.dot(h_tm1, w) + x_t_t
# No failure when using this recursive expression
# h_t = h_tm1 + tt.dot(x_t_t, w)
return h_t
def outer_scan_step(x_t, w, h0):
h, _ = theano.scan(inner_scan_step,
sequences=[x_t],
outputs_info=[h0],
non_sequences=[w],
strict=True)
return h[-1]
def get_outputs(x, w, h0):
features, _ = theano.scan(outer_scan_step,
sequences=[x],
non_sequences=[w, h0],
strict=True)
return tt.grad(features.sum(), w)
def main():
theano.config.compute_test_value = 'raise'
x_value = numpy.arange(12, dtype=theano.config.floatX).reshape((2, 2, 3))
x = tt.tensor3()
x.tag.test_value = x_value
w = theano.shared(value=numpy.ones((3, 3), dtype=theano.config.floatX), borrow=True)
h0 = theano.shared(value=numpy.zeros(3, dtype=theano.config.floatX), borrow=True)
f = theano.function(inputs=[x], outputs=get_outputs(x, w, h0))
print f(x_value)
if __name__ == "__main__":
main()
I've encountered the same issue and I fixed it by letting optimizer=fast_compile in theano_flags. Guess that is a bug of theano.

Resources