Function in a class requires inputs from another function in the class - python-3.x

I am pretty new to Python and are trying to make a option pricing class with three functions: call, put and graph. The call and put function work fine, but I can't figure out the graph function. I want the p.append to get the values from the call function, hold all the variables constant except for S0 which is equal to i.
import numpy as np
from scipy.stats import norm
import matplotlib.pyplot as plt
class Option():
def __init__(self, S0, K, T, r, sigma, start, stop, N):
self.S0 = S0
self.K = K
self.T = T
self.r = r
self.sigma = sigma
self.start = start
self.stop = stop
self.N = N
def call(self):
d1 = (np.log(self.S0/self.K) + \
(self.r + 0.5*self.sigma**2)*self.T)/(self.sigma*np.sqrt(self.T))
d2 = d1 - self.sigma*np.sqrt(self.T)
price = (self.S0 * norm.cdf(d1, 0.0, 1.0) - \
self.K * np.exp(-self.r * self.T) * norm.cdf(d2, 0.0, 1.0))
return price
def put(self):
d1 = (np.log(self.S0/self.K) + \
(self.r + 0.5*self.sigma**2)*self.T)/(self.sigma*np.sqrt(self.T))
d2 = d1 - self.sigma*np.sqrt(self.T)
price = (self.K * np.exp(-self.r * self.T) * norm.cdf(-d2, 0.0, 1.0) - \
self.S0 * norm.cdf(-d1, 0.0, 1.0))
return price
def graphCall(self):
S = np.linspace(self.start, self.stop, self.N)
p = []
for i in S:
p.append()
plt.plot(S, p)
x = Option(100, 50, 3, 0.05, 0.40, 100, 200, 500)
print(x.call())
x.graphCall()

You could decide to use self.S0 as the default value for calls to call and put, but allow for a other arguments as well.
import numpy as np
from scipy.stats import norm
import matplotlib.pyplot as plt
class Option():
def __init__(self, S0, K, T, r, sigma, start, stop, N):
self.S0 = S0
self.K = K
self.T = T
self.r = r
self.sigma = sigma
self.start = start
self.stop = stop
self.N = N
def call(self, s=None):
if s is None:
s=self.S0
d1 = (np.log(s/self.K) + \
(self.r + 0.5*self.sigma**2)*self.T)/(self.sigma*np.sqrt(self.T))
d2 = d1 - self.sigma*np.sqrt(self.T)
price = (s * norm.cdf(d1, 0.0, 1.0) - \
self.K * np.exp(-self.r * self.T) * norm.cdf(d2, 0.0, 1.0))
return price
def put(self, s=None):
if s is None:
s=self.S0
d1 = (np.log(s/self.K) + \
(self.r + 0.5*self.sigma**2)*self.T)/(self.sigma*np.sqrt(self.T))
d2 = d1 - self.sigma*np.sqrt(self.T)
price = (self.K * np.exp(-self.r * self.T) * norm.cdf(-d2, 0.0, 1.0) - \
s * norm.cdf(-d1, 0.0, 1.0))
return price
def graphCall(self):
S = np.linspace(self.start, self.stop, self.N)
plt.plot(S, self.call(S))
plt.show()
x = Option(100, 50, 3, 0.05, 0.40, 100, 200, 500)
print(x.call())
x.graphCall()

Related

I'm getting an error message and not sure why "PIL.UnidentifiedImageError: cannot identify image file 'Maze.png'"

SOLVED!
Thanks for any help/suggestions!
Post:
I've been trying to create a program which creates a perfect maze and outputs a .png file. I want this file to then be outputted in a GUI. but receive this message PIL.UnidentifiedImageError: cannot identify image file 'Maze.png'
All Relevant Code:
Maze_Prep.py
import random
class Cell:
wall_pairs = {'N': 'S', 'S': 'N', 'E': 'W', 'W': 'E'}
def __init__(self, x, y):
self.x, self.y = x, y
self.walls = {'N': True, 'S': True, 'E': True, 'W': True}
def has_all_walls(self):
return all(self.walls.values())
def knock_down_wall(self, other, wall):
self.walls[wall] = False
other.walls[Cell.wall_pairs[wall]] = False
class Maze:
def __init__(self, nx, ny, ix=0, iy=0):
self.nx, self.ny = nx, ny
self.ix, self.iy = ix, iy
self.maze_map = [[Cell(x, y) for y in range(ny)] for x in range(nx)]
self.add_begin_end = False
def cell_at(self, x, y):
return self.maze_map[x][y]
def write_svg(self, filename):
aspect_ratio = self.nx / self.ny
padding = 100
height = 250
width = int(height * aspect_ratio)
scy, scx = height / self.ny, width / self.nx
def write_wall(ww_f, ww_x1, ww_y1, ww_x2, ww_y2):
print('<line x1="{}" y1="{}" x2="{}" y2="{}"/>'
.format(ww_x1, ww_y1, ww_x2, ww_y2), file=ww_f)
def add_cell_rect(f, x, y, colour):
pad = 5
print(f'<rect x="{scx*x+pad}" y="{scy*y+pad}" width="{scx-2*pad}"'
f' height="{scy-2*pad}" style="fill:{colour}" />', file=f)
with open(filename, 'w') as f:
print('<?xml version="1.0" encoding="utf-8"?>', file=f)
print('<svg xmlns="http://www.w3.org/2000/svg"', file=f)
print(' xmlns:xlink="http://www.w3.org/1999/xlink"', file=f)
print(' width="{:d}" height="{:d}" style="background-color:white" viewBox="{} {} {} {}">'
.format(width + 2 * padding, height + 2 * padding,
-padding, -padding, width + 2 * padding, height + 2 * padding),
file=f)
print('<defs>\n<style type="text/css"><![CDATA[', file=f)
print('line {', file=f)
print(' stroke: #000000;\n stroke-linecap: square;', file=f)
print(' stroke-width: 10;\n}', file=f)
print(']]></style>\n</defs>', file=f)
for x in range(self.nx):
for y in range(self.ny):
if self.cell_at(x, y).walls['S']:
x1, y1, x2, y2 = x * scx, (y + 1) * scy, (x + 1) * scx, (y + 1) * scy
write_wall(f, x1, y1, x2, y2)
if self.cell_at(x, y).walls['E']:
x1, y1, x2, y2 = (x + 1) * scx, y * scy, (x + 1) * scx, (y + 1) * scy
write_wall(f, x1, y1, x2, y2)
print('<line x1="0" y1="0" x2="{}" y2="0"/>'.format(width), file=f)
print('<line x1="0" y1="0" x2="0" y2="{}"/>'.format(height), file=f)
if self.add_begin_end:
add_cell_rect(f, 0, -1, 'green')
add_cell_rect(f, self.nx - 1, self.ny, 'red')
print('</svg>', file=f)
def find_valid_neighbours(self, cell):
delta = [('W', (-1, 0)),
('E', (1, 0)),
('S', (0, 1)),
('N', (0, -1))]
neighbours = []
for direction, (dx, dy) in delta:
x2, y2 = cell.x + dx, cell.y + dy
if (0 <= x2 < self.nx) and (0 <= y2 < self.ny):
neighbour = self.cell_at(x2, y2)
if neighbour.has_all_walls():
neighbours.append((direction, neighbour))
return neighbours
def make_maze(self):
n = self.nx * self.ny
cell_stack = []
current_cell = self.cell_at(self.ix, self.iy)
nv = 1
while nv < n:
neighbours = self.find_valid_neighbours(current_cell)
if not neighbours:
current_cell = cell_stack.pop()
continue
direction, next_cell = random.choice(neighbours)
current_cell.knock_down_wall(next_cell, direction)
cell_stack.append(current_cell)
current_cell = next_cell
nv += 1
Maze.py
from Maze_Prep import Maze
import time
import aspose.words as aw
from tkinter import *
from PIL import ImageTk,Image
# Maze dimensions (ncols, nrows)
nx, ny = 10, 10
# Maze entry position
ix, iy = 0, 0
maze = Maze(nx, ny, ix, iy)
maze.add_begin_end = True
maze.make_maze()
print('Maze Loaded!')
maze.write_svg('Maze.svg')
# Converts .svg to .png
doc = aw.Document()
builder = aw.DocumentBuilder(doc)
shape = builder.insert_image("Maze.svg")
shape.image_data.save("Maze.png")
Maze_GUI.py
from tkinter import *
from PIL import ImageTk,Image
import time
root = Tk()
root.title("Maze")
myImg = ImageTk.PhotoImage(Image.open('Maze.png'))
myLabel = Label(image=myImg)
myLabel.pack()
root.mainloop()
Any help would be appreciated!
Tried running a .png downloaded from the web and it worked fine. So not sure why it wont work. Try running it yourselves.

Qt Opengl How to change quternion rotation point?

I'm trying to implement a rotaion around a point. The problem is: the rotation point doesn't change when I pan a camera. I've already found some useful info about changing the rotation point by converting Quaternion to a rotation matrix and multiply with transformation matrices like T(x, y,z) * R(q) * T(-x, -y, -z), but i don't use any of them. All transformations performed on a single QMatrix4x4 using rotate and lookAt methods. Are there any other solutions available?
Here's my camera class:
class PlyViewportCamera:
def __init__(self):
self.__projectionMatrix = QMatrix4x4()
self.__viewMatrix = QMatrix4x4()
self.__clipRange = (0.1, 1000.0)
self.__fov = 45
self.__camEye = QVector3D(0.0, 5.0, -10.0)
self.__camTarget = QVector3D(0.0, 0.0, 0.0)
self.__camUp = QVector3D(0.0, 1.0, 0.0)
self.__viewRotation = QQuaternion()
self.__xRotation = QQuaternion()
self.__yRotation = QQuaternion()
def __rotateX(self, rotation: QQuaternion):
self.__xRotation = rotation * self.__xRotation
self.__viewRotation = self.__xRotation * self.__yRotation
self.__viewRotation.normalize()
def __rotateY(self, rotation: QQuaternion):
self.__yRotation = rotation * self.__yRotation
self.__viewRotation = self.__xRotation * self.__yRotation
self.__viewRotation.normalize()
def rotate(self, p_start: QVector2D, p_end: QVector2D):
prev_rotation = self.__viewRotation
div_factor = 10
diff = p_end - p_start
angle_x = diff.y() / div_factor
angle_y = diff.x() / div_factor
self.__rotateX(QQuaternion.fromAxisAndAngle(1.0, 0.0, 0.0, angle_x))
self.__rotateY(QQuaternion.fromAxisAndAngle(0.0, 1.0, 0.0, angle_y))
self.__viewRotation = QQuaternion.slerp(prev_rotation, self.__viewRotation, 0.6)
self.__viewRotation.normalize()
def pan(self, start: QVector2D, end:QVector2D):
delta = end - start
transform = QVector3D(delta.x() / 50, delta.y() / 50, 0.0)
self.__camEye += transform
self.__camTarget += transform
def updateCamera(self):
self.__viewMatrix.setToIdentity()
self.__viewMatrix.lookAt(self.__camEye, self.__camTarget, self.__camUp)
self.__viewMatrix.rotate(self.__viewRotation)
def setProjection(self, w: int, h: int):
aspect_ratio = w / h
self.__projectionMatrix.setToIdentity()
self.__projectionMatrix.perspective(self.__fov, aspect_ratio, *self.__clipRange)
#property
def projectionMatrix(self):
return self.__projectionMatrix
#property
def viewMatrix(self):
return self.__viewMatrix

Numpy implementation for regression using NN

I am implementing my own Neural Network model for regression using only NumPy, and I'm getting really weird results when I'm testing my model on m > 1 samples (for m=1 it works fine).. It seems like the model collapses and predicts only specific values for the whole batch:
Input:
X [[ 7.62316802 -6.12433912]
[ 1.11048966 4.97509421]]
Expected Output:
Y [[16.47952332 12.50288412]]
Model Output
y_hat [[10.42446234 10.42446234]]
Any idea what might cause this issue?
My code:
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# np.seterr(all=None, divide=None, over=None, under=None, invalid=None)
data_x = np.random.uniform(0, 10, size=(2, 1))
data_y = (2 * data_x).sum(axis=0, keepdims=True)
# data_y = data_x[0, :] ** 2 + data_x[1, :] ** 2
# data_y = data_y.reshape((1, -1))
# # fig = plt.figure()
# # ax = fig.add_subplot(111, projection='3d')
# # ax.scatter(data_x[0, :], data_x[1, :], data_y)
# # plt.show()
memory = dict()
nn_architecture = [
{"input_dim": 2, "output_dim": 6, "activation": "sigmoid", "bias": True},
{"input_dim": 6, "output_dim": 4, "activation": "sigmoid", "bias": True},
{"input_dim": 4, "output_dim": 1, "activation": "relu", "bias": True}
]
def init_network_parameters(nn_architecture):
parameters = []
for idx, layer in enumerate(nn_architecture):
layer_params = {}
input_dim, output_dim, activation, bias = layer.values()
W = np.random.uniform(0, 1, (output_dim, input_dim))
B = np.zeros((output_dim, 1))
if bias:
B = np.ones((output_dim, 1))
activation_func = identity
backward_activation_func = identity_backward
if activation is 'sigmoid':
activation_func = sigmoid
backward_activation_func = sigmoid_backward
elif activation is 'relu':
activation_func = relu
backward_activation_func = relu_backward
else:
print(f"Activation function set to identity for layer {idx}")
layer_params[f"W"] = W
layer_params[f"B"] = B
layer_params[f"activation"] = activation_func
layer_params[f"backward_activation"] = backward_activation_func
layer_params[f"bias"] = bias
parameters.append(layer_params)
return parameters
def identity(z):
return z
def sigmoid(z):
return np.clip(1 / (1 + np.exp(-z)), -100, 100)
def relu(z):
output = np.array(z, copy=True)
output[z <= 0] = 0
return output
def identity_backward(z, dA):
return dA
def sigmoid_backward(z, dA):
return np.clip(z * (1-z) * dA, -100, 100)
def relu_backward(z, dA):
output = np.ones(z.shape)
output[z <= 0] = 0
return output * dA
def forward_single_layer(prev_A, parameters, idx):
W = parameters[f"W"]
B = parameters[f"B"]
activation = parameters[f"activation"]
if parameters["bias"]:
curr_Z = W.dot(prev_A) + B
else:
curr_Z = W.dot(prev_A)
curr_A = activation(curr_Z)
memory[f"Z{idx+1}"] = curr_Z
memory[f"A{idx+1}"] = curr_A
return curr_Z, curr_A
def forward(X, parameters):
prev_A = X
memory["A0"] = prev_A
for idx, layer_params in enumerate(parameters):
curr_Z, prev_A = forward_single_layer(prev_A=prev_A, parameters=layer_params, idx=idx)
return prev_A
def criteria(y_hat, y):
assert y_hat.shape == y.shape
n = y_hat.shape[0]
m = y_hat.shape[1]
loss = np.sum(y_hat - y, axis=1) / m
dA = (y_hat - y) / m
return loss, dA
def backward_single_layer(prev_A, dA, curr_W, curr_Z, backward_activation, idx):
m = prev_A.shape[1]
dZ = backward_activation(z=curr_Z, dA=dA)
dW = np.dot(dZ, prev_A.T) / m
dB = np.sum(dZ, axis=1, keepdims=True) / m
dA = np.dot(curr_W.T, dZ)
return dA, dW, dB
def backpropagation(parameters, dA):
grads = {}
for idx in reversed(range(len(parameters))):
layer = parameters[idx]
prev_A = memory[f"A{idx}"]
curr_Z = memory[f"Z{idx+1}"]
curr_W = layer["W"]
backward_activation = layer["backward_activation"]
dA, dW, dB = backward_single_layer(prev_A, dA, curr_W, curr_Z, backward_activation, idx)
grads[f"W{idx}"] = dW
grads[f"B{idx}"] = dB
return grads
def update_params(parameters, grads, lr=0.001):
new_params = []
for idx, layer in enumerate(parameters):
layer["W"] -= lr*grads[f"W{idx}"]
layer["B"] -= lr*grads[f"B{idx}"]
new_params.append(layer)
return new_params
X = np.random.uniform(-10, 10, (2, 2))
Y = 2*X[0, :] + X[1, :] ** 2
Y = Y.reshape((1, X.shape[1]))
parameters = init_network_parameters(nn_architecture)
n_epochs = 1000
lr = 0.01
loss_history = []
for i in range(n_epochs):
y_hat = forward(X, parameters)
loss, dA = criteria(y_hat, Y)
loss_history.append(loss)
grads = backpropagation(parameters, dA)
parameters = update_params(parameters, grads, lr)
if not i % 10:
print(f"Epoch {i}/{n_epochs} loss={loss}")
print("X", X)
print("Y", Y)
print("y_hat", y_hat)
There wasn't a problem with my implementation, just overfitting.
More information can be found here.

SyntaxError: invalid syntax someone know this?

I'm trying to run the code below but there is an error in the above.
The complete code and error is:
line 27
return np.exp(-1.0)*self.rf*self.T)*average SyntaxError: invalid syntax
import numpy as np
import math
import time
class optionPricing:
def __init__(self,S0,E,T,rf,sigma,interations):
self.S0 = S0
self.E = E
self.T = T
self.rf = rf
self.sigma = sigma
self.interations = interations
def call_option_simulation(self):
option_data = np.zeros([self.interations, 2])
rand = np.random.normal(0, 1, [1, self.interations])
stock_price = self.S0*np.exp(self.T*(self.rf - 0.5*self.sigma**2)+self.sigma*np.sqrt(self.T)*rand)
option_data[:,1] = stock_price - self.E
average = np.sum(np.amax(option_data, axis=1))/float(self.interations)
return np.exp(-1.0)*self.rf*self.T)*average
def put_option_simulation(self):
option_data = np.zeros([self.interations, 2])
rand = np.random.normal(0, 1, [1, self.interations])
stock_price = self.S0 * np.exp(self.T * (self.rf - 0.5 * self.sigma ** 2) + self.sigma * np.sqrt(self.T) * rand)
option_data[:, 1] = self.E - stock_price
average = np.sum(np.amax(option_data, axis=1)) / float(self.interations)
return np.exp(-1.0) * self.rf * self.T) * average
if __name__ == "__name__":
S0=100 #underlaying stock price at t=0
E=100 #strike price
T = 1 #time to maturity
rf = 0.05 #risk-free rate
sigma=0.2 #volatility of the underlying stock
interations = 10000000 #number of interations in the monte-carlo simulation
model = optionPricing(S0,E,T,rf,sigma,interations)
print("call option price with monte-carlo approach: ", model.call_option_simulation())
ptint("put option price with monte-carlo approach: ", model.put_option_simulation())
two open bracket and one close . return np.exp(-1.0)self.rfself.T)*average

Why can't I get this Runge-Kutta solver to converge as the time step decreases?

For reasons, I need to implement the Runge-Kutta4 method in PyTorch (so no, I'm not going to use scipy.odeint). I tried and I get weird results on the simplest test case, solving x'=x with x(0)=1 (analytical solution: x=exp(t)). Basically, as I reduce the time step, I cannot get the numerical error to go down. I'm able to do it with a simpler Euler method, but not with the Runge-Kutta 4 method, which makes me suspect some floating point issue here (maybe I'm missing some hidden conversion from double precision to single)?
import torch
import numpy as np
import matplotlib.pyplot as plt
def Euler(f, IC, time_grid):
y0 = torch.tensor([IC])
time_grid = time_grid.to(y0[0])
values = y0
for i in range(0, time_grid.shape[0] - 1):
t_i = time_grid[i]
t_next = time_grid[i+1]
y_i = values[i]
dt = t_next - t_i
dy = f(t_i, y_i) * dt
y_next = y_i + dy
y_next = y_next.unsqueeze(0)
values = torch.cat((values, y_next), dim=0)
return values
def RungeKutta4(f, IC, time_grid):
y0 = torch.tensor([IC])
time_grid = time_grid.to(y0[0])
values = y0
for i in range(0, time_grid.shape[0] - 1):
t_i = time_grid[i]
t_next = time_grid[i+1]
y_i = values[i]
dt = t_next - t_i
dtd2 = 0.5 * dt
f1 = f(t_i, y_i)
f2 = f(t_i + dtd2, y_i + dtd2 * f1)
f3 = f(t_i + dtd2, y_i + dtd2 * f2)
f4 = f(t_next, y_i + dt * f3)
dy = 1/6 * dt * (f1 + 2 * (f2 + f3) +f4)
y_next = y_i + dy
y_next = y_next.unsqueeze(0)
values = torch.cat((values, y_next), dim=0)
return values
# differential equation
def f(T, X):
return X
# initial condition
IC = 1.
# integration interval
def integration_interval(steps, ND=1):
return torch.linspace(0, ND, steps)
# analytical solution
def analytical_solution(t_range):
return np.exp(t_range)
# test a numerical method
def test_method(method, t_range, analytical_solution):
numerical_solution = method(f, IC, t_range)
L_inf_err = torch.dist(numerical_solution, analytical_solution, float('inf'))
return L_inf_err
if __name__ == '__main__':
Euler_error = np.array([0.,0.,0.])
RungeKutta4_error = np.array([0.,0.,0.])
indices = np.arange(1, Euler_error.shape[0]+1)
n_steps = np.power(10, indices)
for i, n in np.ndenumerate(n_steps):
t_range = integration_interval(steps=n)
solution = analytical_solution(t_range)
Euler_error[i] = test_method(Euler, t_range, solution).numpy()
RungeKutta4_error[i] = test_method(RungeKutta4, t_range, solution).numpy()
plots_path = "./plots"
a = plt.figure()
plt.xscale('log')
plt.yscale('log')
plt.plot(n_steps, Euler_error, label="Euler error", linestyle='-')
plt.plot(n_steps, RungeKutta4_error, label="RungeKutta 4 error", linestyle='-.')
plt.legend()
plt.savefig(plots_path + "/errors.png")
The result:
As you can see, the Euler method converges (slowly, as expected of a first order method). However, the Runge-Kutta4 method does not converge as the time step gets smaller and smaller. The error goes down initially, and then up again. What's the issue here?
The reason is indeed a floating point precision issue. torch defaults to single precision, so once the truncation error becomes small enough, the total error is basically determined by the roundoff error, and reducing the truncation error further by increasing the number of steps <=> decreasing the time step doesn't lead to any decrease in the total error.
To fix this, we need to enforce double precision 64bit floats for all floating point torch tensors and numpy arrays. Note that the right way to do this is to use respectively torch.float64 and np.float64 rather than, e.g., torch.double and np.double, because the former are fixed-sized float values, (always 64bit) while the latter depend on the machine and/or compiler. Here's the fixed code:
import torch
import numpy as np
import matplotlib.pyplot as plt
def Euler(f, IC, time_grid):
y0 = torch.tensor([IC], dtype=torch.float64)
time_grid = time_grid.to(y0[0])
values = y0
for i in range(0, time_grid.shape[0] - 1):
t_i = time_grid[i]
t_next = time_grid[i+1]
y_i = values[i]
dt = t_next - t_i
dy = f(t_i, y_i) * dt
y_next = y_i + dy
y_next = y_next.unsqueeze(0)
values = torch.cat((values, y_next), dim=0)
return values
def RungeKutta4(f, IC, time_grid):
y0 = torch.tensor([IC], dtype=torch.float64)
time_grid = time_grid.to(y0[0])
values = y0
for i in range(0, time_grid.shape[0] - 1):
t_i = time_grid[i]
t_next = time_grid[i+1]
y_i = values[i]
dt = t_next - t_i
dtd2 = 0.5 * dt
f1 = f(t_i, y_i)
f2 = f(t_i + dtd2, y_i + dtd2 * f1)
f3 = f(t_i + dtd2, y_i + dtd2 * f2)
f4 = f(t_next, y_i + dt * f3)
dy = 1/6 * dt * (f1 + 2 * (f2 + f3) +f4)
y_next = y_i + dy
y_next = y_next.unsqueeze(0)
values = torch.cat((values, y_next), dim=0)
return values
# differential equation
def f(T, X):
return X
# initial condition
IC = 1.
# integration interval
def integration_interval(steps, ND=1):
return torch.linspace(0, ND, steps, dtype=torch.float64)
# analytical solution
def analytical_solution(t_range):
return np.exp(t_range, dtype=np.float64)
# test a numerical method
def test_method(method, t_range, analytical_solution):
numerical_solution = method(f, IC, t_range)
L_inf_err = torch.dist(numerical_solution, analytical_solution, float('inf'))
return L_inf_err
if __name__ == '__main__':
Euler_error = np.array([0.,0.,0.], dtype=np.float64)
RungeKutta4_error = np.array([0.,0.,0.], dtype=np.float64)
indices = np.arange(1, Euler_error.shape[0]+1)
n_steps = np.power(10, indices)
for i, n in np.ndenumerate(n_steps):
t_range = integration_interval(steps=n)
solution = analytical_solution(t_range)
Euler_error[i] = test_method(Euler, t_range, solution).numpy()
RungeKutta4_error[i] = test_method(RungeKutta4, t_range, solution).numpy()
plots_path = "./plots"
a = plt.figure()
plt.xscale('log')
plt.yscale('log')
plt.plot(n_steps, Euler_error, label="Euler error", linestyle='-')
plt.plot(n_steps, RungeKutta4_error, label="RungeKutta 4 error", linestyle='-.')
plt.legend()
plt.savefig(plots_path + "/errors.png")
Result:
Now, as we decrease the time step, the error of the RungeKutta4 approximation decreases with the correct rate.

Resources