Cython: define arguments of unknown types - python-3.x

In the process of learing Cython, I am reproducing the example shown here regarding how to solve ordinary differential equations. I report it here to make it simpler:
import numpy as np
cimport numpy as np
cdef class Problem:
cpdef double rhs(self, double u, double t):
return 0
cdef class Problem1(Problem):
cpdef double rhs(self, double u, double t):
return -u +1 # u = 1-exp(-t)
from math import exp
cdef class Problem2(Problem):
cpdef double rhs(self, double u, double t):
return - u + exp(-2*t)
cdef class ODEMethod:
cpdef double advance(self, np.ndarray u, int n,
np.ndarray t, Problem p):
return 0
cdef class Method_RK2(ODEMethod):
cpdef double advance(self, np.ndarray u, int n,
np.ndarray t, Problem p):
cdef double K1, K2, unew, dt
dt = t[n+1] - t[n]
K1 = dt*p.rhs(u[n], t[n])
K2 = dt*p.rhs(u[n] + 0.5*K1, t[n] + 0.5*dt)
unew = u[n] + K2
return unew
# Create names compatible with ode0.py
RK2 = Method_RK2()
problem1 = Problem1()
problem2 = Problem2()
cpdef solver(Problem f, double I, np.ndarray t, ODEMethod method):
cdef int N = len(t)-1
cdef np.ndarray u = np.zeros(N+1, dtype=np.float)
u[0] = I
cdef int n
for n in range(N):
u[n+1] = method.advance(u, n, t, f)
return u, t
The class Problem is meant to make the code general . However, to make it more general, I would like to include some additional arguments for which you don't know in advance how many they are or their type, something similar to *args, for example when you define in pure Python:
class Problem
def __init__(self, *args)
self.args = args
Is there a way to do this or even a better one?
Thanks in advance!

Related

Functions intersection approximation

Let f1 and f2 be two functions in the range of [a, b], and maxerr the required approximation. They both differentiable and continuous in this range. I should return an iterable of approximate intersection Xs, such that:
∀x∈Xs, |f_1(x) - f_2(x)| < maxerr.
The signature of the function for example should be:
def intersection(self, f1: callable, f2: callable, a: float, b: float, maxerr=0.001) -> callable:
What is the most profficient way to do that without using a library method that finds the intersection directly?
Notes:
Python 3.7
Forbidden build-in functions: finding roots and intersections of functions, interpolation, integration, matrix decomposition, eigenvectors and solving linear systems.
Right now my code is as the following:
def intersection_recursive(f1, f2, a, b, maxerr, X, start_time, timeout, side_flag):
f = f1 - f2
startX = a
endX = b
while not f(startX) * f(endX) < 0 and time.time() < start_time + timeout:
startX = random.uniform(a, b)
endX = random.uniform(startX, b)
mid = (startX + endX) / 2
while not abs(f(mid)) < maxerr and time.time() < start_time + timeout:
if f(startX) * f(mid) < -1:
endX = mid
else:
startX = mid
mid = (startX + endX) / 2
if abs(f(mid)) < maxerr:
X.append(mid)
else:
return X
if side_flag:
return intersection_recursive(f1, f2, a, mid, maxerr, X, start_time, timeout, not side_flag)
else:
return intersection_recursive(f1, f2, mid, b, maxerr, X, start_time, timeout, not side_flag)
def intersection(self, f1: callable, f2: callable, a: float, b: float, maxerr=0.001) -> callable:
timeout = 10
X = []
start_time = time.time()
intersection_recursive(f1, f2, a, b, maxerr, X, start_time, timeout, True)
return X
The below answer is to the original question, where no assumptions about the functions are made...
Without any additional information about the functions the no free lunch theorem applies and any search strategy is as valid as any other.
That said, a simple quasirandom sequence covers all of [a, b] uniformly at every detail level, given enough time.
I don't think your function signature is correct by the way, it should return an iterable.
from typing import Callable, Iterable
def intersection(f1: Callable[[float], float],
f2: Callable[[float], float],
a: float, b: float,
maxerr: float=0.001) -> Iterable[float]:
a, b = sorted([a, b])
invphi = 2 / (1 + 5**0.5)
t = 0.5
while True:
x = a + (b-a)*t
if abs(f1(x) - f2(x)) < maxerr:
yield x
t = (t + invphi) % 1.0

Using self in init part of a class in Python

Is there any difference between the following two codes related to initializing a class in Python?
class summation:
def __init__(self, f, s):
self.first = f
self.second = s
self.summ = self.first + self.second
.
.
.
class summation:
def __init__(self, f, s):
self.first = f
self.second = s
self.summ = f + s
.
.
.
If there exists any difference, what is that, and which code is preferable?
Edit: I am going to write an artificial neural network with Python (and Pytorch). In fact, the above two codes are just some examples. In the actual case, I have seen in various resources that when there exists self.input = input in the initialization of a class, in other parts it is used as self.input, not input.
My questions: What are the differences between these two approaches? Why is the use of self.input preferable, in my case?
Example: (from https://docs.dgl.ai/en/latest/tutorials/models/1_gnn/4_rgcn.html#sphx-glr-tutorials-models-1-gnn-4-rgcn-py)
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl import DGLGraph
import dgl.function as fn
from functools import partial
class RGCNLayer(nn.Module):
def __init__(self, in_feat, out_feat, num_rels, num_bases=-1, bias=None,
activation=None, is_input_layer=False):
super(RGCNLayer, self).__init__()
self.in_feat = in_feat
self.out_feat = out_feat
self.num_rels = num_rels
self.num_bases = num_bases
self.bias = bias
self.activation = activation
self.is_input_layer = is_input_layer
# sanity check
if self.num_bases <= 0 or self.num_bases > self.num_rels:
self.num_bases = self.num_rels
# weight bases in equation (3)
self.weight = nn.Parameter(torch.Tensor(self.num_bases, self.in_feat,
self.out_feat))
if self.num_bases < self.num_rels:
# linear combination coefficients in equation (3)
self.w_comp = nn.Parameter(torch.Tensor(self.num_rels, self.num_bases))
# add bias
if self.bias:
self.bias = nn.Parameter(torch.Tensor(out_feat))
# init trainable parameters
nn.init.xavier_uniform_(self.weight,
gain=nn.init.calculate_gain('relu'))
if self.num_bases < self.num_rels:
nn.init.xavier_uniform_(self.w_comp,
gain=nn.init.calculate_gain('relu'))
if self.bias:
nn.init.xavier_uniform_(self.bias,
gain=nn.init.calculate_gain('relu'))
def forward(self, g):
if self.num_bases < self.num_rels:
# generate all weights from bases (equation (3))
weight = self.weight.view(self.in_feat, self.num_bases, self.out_feat)
weight = torch.matmul(self.w_comp, weight).view(self.num_rels,
self.in_feat, self.out_feat)
else:
weight = self.weight
if self.is_input_layer:
def message_func(edges):
# for input layer, matrix multiply can be converted to be
# an embedding lookup using source node id
embed = weight.view(-1, self.out_feat)
index = edges.data['rel_type'] * self.in_feat + edges.src['id']
return {'msg': embed[index] * edges.data['norm']}
else:
def message_func(edges):
w = weight[edges.data['rel_type']]
msg = torch.bmm(edges.src['h'].unsqueeze(1), w).squeeze()
msg = msg * edges.data['norm']
return {'msg': msg}
def apply_func(nodes):
h = nodes.data['h']
if self.bias:
h = h + self.bias
if self.activation:
h = self.activation(h)
return {'h': h}
g.update_all(message_func, fn.sum(msg='msg', out='h'), apply_func)
No. there is no difference between these two approaches in your case with this level of information. but could they? Yes. they could. if they have some modifications in their setters or getters. later in my answer I'll show you how.
First of all, I prefer using this one:
class summation:
def __init__(self, f, s):
self.first = f
self.second = s
#property
def summ(self):
return self.first+self.second
the above implementation calculates the summation on demand. so when you change self.first or self.second, summ will be calculated automatically. you can access the sum as you did before.
s = summation(1,9)
print(s.summ)
# 10
s.first = 2
s.second = 3
print(s.summ)
# 5
So, How could they be different?
let's implements them as follows. in setters I doubled the inputs to show you how setters can affect the results. it's just an imaginary example and is not exactly what you wrote.
class summation1:
def __init__(self, f, s):
self.first = f
self.second = s
self.summ = self.first + self.second
#property
def first(self):
return self.__first
#first.setter
def first(self,f):
self.__first = f*2
#property
def second(self):
return self.__second
#second.setter
def second(self,s):
self.__second = s*2
class summation2:
def __init__(self, f, s):
self.first = f
self.second = s
self.summ = f + s
#property
def first(self):
return self.__first
#first.setter
def first(self,f):
self.__first = f*2
#property
def second(self):
return self.__second
#second.setter
def second(self,s):
self.__second = s*2
now let's take a look at the outputs:
a = 3
b = 2
s1 = summation1(a,b)
s2 = summation2(a,b)
print(s1.summ)
# 10
print(s2.summ)
# 5
so, if you are not sure what to choose between those two, maybe the first approach is what you need.

Product feature optimization with constraints

I have trained a Lightgbm model on learning to rank dataset. The model predicts relevance score of a sample. So higher the prediction the better it is. Now that the model has learned I would like to find the best values of some features that gives me the highest prediction score.
So, lets say I have features u,v,w,x,y,z and the features I would like to optimize over are x,y,z.
maximize f(u,v,w,x,y,z) w.r.t features x,y,z where f is a lightgbm model
subject to constraints :
y = Ax + b
z = 4 if y < thresh_a else 4-0.5 if y >= thresh_b else 4-0.3
thresh_m < x <= thresh_n
The numbers are randomly made up but constraints are linear.
Objective function with respect to x looks like the following :
So the function is very spiky, non-smooth. I also don't have the gradient information as f is a lightgbm model.
Using Nathan's answer I wrote down the following class :
class ProductOptimization:
def __init__(self, estimator, features_to_change, row_fixed_values,
bnds=None):
self.estimator = estimator
self.features_to_change = features_to_change
self.row_fixed_values = row_fixed_values
self.bounds = bnds
def get_sample(self, x):
new_values = {k:v for k,v in zip(self.features_to_change, x)}
return self.row_fixed_values.replace({k:{self.row_fixed_values[k].iloc[0]:v}
for k,v in new_values.items()})
def _call_model(self, x):
pred = self.estimator.predict(self.get_sample(x))
return pred[0]
def constraint1(self, vector):
x = vector[0]
y = vector[2]
return # some float value
def constraint2(self, vector):
x = vector[0]
y = vector[3]
return #some float value
def optimize_slsqp(self, initial_values):
con1 = {'type': 'eq', 'fun': self.constraint1}
con2 = {'type': 'eq', 'fun': self.constraint2}
cons = ([con1,con2])
result = minimize(fun=self._call_model,
x0=np.array(initial_values),
method='SLSQP',
bounds=self.bounds,
constraints=cons)
return result
The results that I get are always around the initial guess. And I think its because of non-smoothness of the function and absence of any gradient information which is important for the SLSQP optimizer. Any advices how should I deal with this kind of problem ?
It's been a good minute since I last wrote some serious code, so I appologize if it's not entirely clear what everything does, please feel free to ask for more explanations
The imports:
from sklearn.ensemble import GradientBoostingRegressor
import numpy as np
from scipy.optimize import minimize
from copy import copy
First I define a new class that allows me to easily redefine values. This class has 5 inputs:
value: this is the 'base' value. In your equation y=Ax + b it's the b part
minimum: this is the minimum value this type will evaluate as
maximum: this is the maximum value this type will evaluate as
multipliers: the first tricky one. It's a list of other InputType objects. The first is the input type and the second the multiplier. In your example y=Ax +b you would have [[x, A]], if the equation was y=Ax + Bz + Cd it would be [[x, A], [z, B], [d, C]]
relations: the most tricky one. It's also a list of other InputType objects, it has four items: the first is the input type, the second defines if it's an upper boundary you use min, if it's a lower boundary you use max. The third item in the list is the value of the boundary, and the fourth the output value connected to it
Watch out if you define your input values too strangely I'm sure there's weird behaviour.
class InputType:
def __init__(self, value=0, minimum=-1e99, maximum=1e99, multipliers=[], relations=[]):
"""
:param float value: base value
:param float minimum: value can never be lower than x
:param float maximum: value can never be higher than y
:param multipliers: [[InputType, multiplier], [InputType, multiplier]]
:param relations: [[InputType, min, threshold, output_value], [InputType, max, threshold, output_value]]
"""
self.val = value
self.min = minimum
self.max = maximum
self.multipliers = multipliers
self.relations = relations
def reset_val(self, value):
self.val = value
def evaluate(self):
"""
- relations to other variables are done first if there are none then the rest is evaluated
- at most self.max
- at least self.min
- self.val + i_x * w_x
i_x is input i, w_x is multiplier (weight) of i
"""
for term, min_max, value, output_value in self.relations:
# check for each term if it falls outside of the expected terms
if min_max(term.evaluate(), value) != term.evaluate():
return self.return_value(output_value)
output_value = self.val + sum([i[0].evaluate() * i[1] for i in self.multipliers])
return self.return_value(output_value)
def return_value(self, output_value):
return min(self.max, max(self.min, output_value))
Using this, you can fix the input types sent from the optimizer, as shown in _call_model:
class Example:
def __init__(self, lst_args):
self.lst_args = lst_args
self.X = np.random.random((10000, len(lst_args)))
self.y = self.get_y()
self.clf = GradientBoostingRegressor()
self.fit()
def get_y(self):
# sum of squares, is minimum at x = [0, 0, 0, 0, 0 ... ]
return np.array([[self._func(i)] for i in self.X])
def _func(self, i):
return sum(i * i)
def fit(self):
self.clf.fit(self.X, self.y)
def optimize(self):
x0 = [0.5 for i in self.lst_args]
initial_simplex = self._get_simplex(x0, 0.1)
result = minimize(fun=self._call_model,
x0=np.array(x0),
method='Nelder-Mead',
options={'xatol': 0.1,
'initial_simplex': np.array(initial_simplex)})
return result
def _get_simplex(self, x0, step):
simplex = []
for i in range(len(x0)):
point = copy(x0)
point[i] -= step
simplex.append(point)
point2 = copy(x0)
point2[-1] += step
simplex.append(point2)
return simplex
def _call_model(self, x):
print(x, type(x))
for i, value in enumerate(x):
self.lst_args[i].reset_val(value)
input_x = np.array([i.evaluate() for i in self.lst_args])
prediction = self.clf.predict([input_x])
return prediction[0]
I can define your problem as shown below (be sure to define the inputs in the same order as the final list, otherwise not all the values will get updated correctly in the optimizer!):
A = 5
b = 2
thresh_a = 5
thresh_b = 10
thresh_c = 10.1
thresh_m = 4
thresh_n = 6
u = InputType()
v = InputType()
w = InputType()
x = InputType(minimum=thresh_m, maximum=thresh_n)
y = InputType(value = b, multipliers=([[x, A]]))
z = InputType(relations=[[y, max, thresh_a, 4], [y, min, thresh_b, 3.5], [y, max, thresh_c, 3.7]])
example = Example([u, v, w, x, y, z])
Calling the results:
result = example.optimize()
for i, value in enumerate(result.x):
example.lst_args[i].reset_val(value)
print(f"final values are at: {[i.evaluate() for i in example.lst_args]}: {result.fun)}")

Cython: Calling an extension type cdef method from a cdef function called by python

I'm trying to write a Cython module that calculates pairwise distances as part of a larger class of locality sensitive hashes. Instead of writing code for each type and each distance metric, I am attempting to create one cdef function that takes various extension types that inherit from Metric:
cdef class Metric:
def __init__(self):
pass
cdef class Euclidean(Metric):
cdef numeric c_evaluate(self, numeric[:] x, numeric[:] y, int dims):
....
cdef numeric[:,:] pairwise(numeric[:] x, numeric[:] y, Metric func, bint symmetric):
...
dm[i,j] = func.c_evaluate(x,y,dims)
...
To access this function from Python:
def py_pairwise(numeric[:,:] x, numeric[:,:] y, str func, bint symmetric = 1, **kwargs):
cdef Metric mfunc = to_Metric(func, **kwargs)
return pairwise(x, y, mfunc, symmetric)
However I keep getting the error that "c_distance.[Metric] object has no attribute 'c_evaluate'". I'm wondering if the c_evaluate method isn't accessible because the class object is created in python code through the python function to_Metric, though I thought def and cdef functions were supposed to be able to call each other freely within a Cython module. The method works if I change c_evaluate to a cpdef method, but I'm not sure if this fixes the problem by allowing the cdef object to pass through python to cython or simply uses the slower python method. Any suggestions (I'm also not at my home computer so I don't have all the code right now. Will update later/on request)?
Edit: That typo isn't in the original functions (there could still be others):
ctypedef fused floating:
float
double
cdef class Euclidean(Metric):
cdef public floating c_evaluate(self, floating[:] x, floating[:] y, int dims):
cdef int i
cdef floating tmp, d = 0
for i in range(dims):
tmp = x[i]-y[i]
d += tmp*tmp
return sqrt(d)
##cython.boundscheck(False)
##cython.wraparound(False)
def py_pairwise(numeric[:,::1] x, numeric[:,::1] y,str metric, bint symmetric,**kwargs):
cdef Metric func = to_Metric(metric,**kwargs)
return pairwise(x,y,func,symmetric)
cdef numeric[:,::1] pairwise(numeric[:,::1] x,numeric[:,::1] y, Metric met, bint symmetric):#
cdef int n,m,k,i,j
n = x.shape[0]
m = y.shape[0]
dims = x.shape[1]
if numeric in floating:
mdtype = np.float
else:
mdtype = np.int
#mdtype = np.float
cdef numeric[:,::1] dm = (np.empty((n,m),dtype = mdtype)).fill(0)
if symmetric:
interval = lambda i,n,m: range(i+1,m)
else:
interval = lambda i,n,m: range(m)
for i in range(n):
for j in interval(i,n,m):
dm[i,j] = met.c_evaluate(x[i,:],y[j,:],dims)
return np.asarray(dm)
Also, to_Metric:
def to_Metric(str m, **kwargs):
if len(kwargs) == 0:
if m == 'euclidean':
met = Euclidean()
elif m in {'cos','cosine'}:
met = Cosine()
elif m in {'hamming','matching'}:
met = Hamming()
else:
raise ValueError('Unrecognized metric {}'.format('\''+m+'\''))
else:
if m in {'pnorm','p-norm'}:
met = Pnorm(kwargs['p'])
elif m == 'maximal':
met = Maximal(kwargs['m1'],kwargs['m2'],kwargs['sep'])
else:
raise ValueError('Unrecognized metric {}'.format('\''+m+'\''))
return met
The issue is that c_evaluate is associated with the class Euclidean and because of this can only be used with objects that are known to be of type Euclidean. However, in pairwise you declare the type of met to be Metric.
Because you declared c_evaluate function as cdef it can only be found at compile time. If you want the c_evaluate to be found at runtime like a standard Python function, you should declare it as def.
If you need the function to be found at compile time (which makes calling it quicker) then you should either make c_evaluate be a function of the Metric object, or you should make pairwise only take a Euclidean object.

Numpy in Cython, no improvement

I am writing a simple function in cython using numpy but it seems that cython is producing a ton of API while converting to C++. Could anyone help me with the error? I did not find anything more in the cython docs.
operations.pyx:
import numpy as np
cimport numpy as np
import cython
cimport cython
#cython.boundscheck(False)
#cython.wraparound(False)
#cython.nonecheck(False)
def diff(np.ndarray[np.float64_t, ndim=2] a,
np.ndarray[np.float64_t, ndim=2] b):
cdef int cols = 100
cdef int rows = 100
for _ in range(1000):
for i in range(rows):
b[i, 0] = (a[i, 1] - a[i, cols - 1]) / 2
for i in range(1, cols - 1):
b[:, i] = (a[:, i + 1] - a[:, i - 1]) / 2
for i in range(rows):
b[i, cols - 1] = (a[i, 0] - a[i, cols - 2]) / 2
return
I get almost the same speed in python and cython. If I change the column selection (:), it becomes much worse (5x slower). could someone show me where the error might be?
html output from cython annotation:
The loops use i and j (and _) as python objects, try cdef-ing them; for example here:
cdef int cols = 100
cdef int rows = 100
cdef int i = 0
cdef int j = 0
Since you do not do operations over _, I think Cython handles it right and isn't needed to be cdef, but you could try (anyway it is just a line).

Resources