Implementing self attention - pytorch

I am trying to implement self attention in Pytorch.
I need to calculate the following expressions.
Similarity function S (2 dimensional), P(2 dimensional), C'
S[i][j] = W1 * inp[i] + W2 * inp[j] + W3 * x1[i] * inp[j]
P[i][j] = e^(S[i][j]) / Sum for all j( e ^ (S[i]))
basically, P is a softmax function
C'[i] = Sum (for all j) P[i][j] * x1[j]
I tried the following code using for loops
for i in range(self.dim):
for j in range(self.dim):
S[i][j] = self.W1 * x1[i] + self.W2 * x1[j] + self.W3 * x1[i] * x1[j]
for i in range(self.dim):
for j in range(self.dim):
P[i][j] = torch.exp(S[i][j]) / torch.sum( torch.exp(S[i]))
# attend
for i in range(self.dim):
out[i] = 0
for j in range(self.dim):
out[i] += P[i][j] * x1[j]
Is there any faster way to implement this in Pytorch?

Here is an example of Self Attention I had implemented in Dual Attention for HSI Imagery
class PAM_Module(Module):
""" Position attention module https://github.com/junfu1115/DANet/blob/master/encoding/nn/attention.py"""
#Ref from SAGAN
def __init__(self, in_dim):
super(PAM_Module, self).__init__()
self.chanel_in = in_dim
self.query_conv = Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
self.key_conv = Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
self.value_conv = Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.gamma = Parameter(torch.zeros(1))
self.softmax = Softmax(dim=-1)
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value + input feature
attention: B X (HxW) X (HxW)
"""
m_batchsize, C, height, width = x.size()
proj_query = self.query_conv(x).view(m_batchsize, -1, width*height).permute(0, 2, 1)
proj_key = self.key_conv(x).view(m_batchsize, -1, width*height)
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = self.value_conv(x).view(m_batchsize, -1, width*height)
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, height, width)
out = self.gamma*out + x
#out = F.avg_pool2d(out, out.size()[2:4])
return out

Related

PyTorch model tracing not working: We don't have an op for aten::fill_

I am stuck on tracing a PyTorch model on this specific module with an error:
RuntimeError: 0INTERNAL ASSERT FAILED at "../torch/csrc/jit/ir/alias_analysis.cpp":611, please report a bug to PyTorch. We don't have an op for aten::fill_ but it isn't a special case. Argument types: Tensor, bool,
Candidates:
aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> (Tensor(a!))
aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> (Tensor(a!))
Here is the reduced code example to reproduce the bug:
import torch
import torch.nn.functional as F
import torch.nn as nn
class SurroundPattern(nn.Module):
def __init__(self, crop_size=1./2):
super(SurroundPattern, self).__init__()
self.crop_size = crop_size
def forward(self, x, s):
H,W = x.shape[2:]
crop_h = (int(H / 2 - self.crop_size / 2 * H), int(H / 2 + self.crop_size / 2 * H))
crop_w = (int(W / 2 - self.crop_size / 2 * W), int(W / 2 + self.crop_size / 2 * W))
x_mask = torch.zeros(H,W,device=x.device, dtype=torch.bool)
x_mask[crop_h[0] : crop_h[1], crop_w[0] : crop_w[1]] = True
inside_indices = torch.where(x_mask)
inside_part = x[:, :, inside_indices[0], inside_indices[1]]
inside_feat = inside_part.mean(2)
outside_indices = torch.where(~x_mask)
outside_part = x[:, :, outside_indices[0], outside_indices[1]]
outside_feat = outside_part.mean(2)
fused = torch.stack([inside_feat, outside_feat], dim=2).unsqueeze(3)
if s is None:
return fused
SH,SW = s.shape[2:]
crop_sh = (int(SH / 2 - self.crop_size / 2 * SH), int(SH / 2 + self.crop_size / 2 * SH))
crop_sw = (int(SW / 2 - self.crop_size / 2 * SW), int(SW / 2 + self.crop_size / 2 * SW))
s_mask = torch.zeros(SH, SW, device=s.device, dtype=torch.bool)
s_mask[crop_sh[0] : crop_sh[1], crop_sw[0] : crop_sw[1]] = True
s_inside_indices = torch.where(s_mask)
inside_sal = s[:, :, s_inside_indices[0], s_inside_indices[1]].flatten(1)
s_outside_indices = torch.where(~s_mask)
outside_sal = s[:, :, s_outside_indices[0], s_outside_indices[1]].flatten(1)
if outside_sal.shape != inside_sal.shape:
outside_sal = F.adaptive_max_pool1d(outside_sal.unsqueeze(1), output_size=784)
outside_sal = outside_sal.squeeze(1)
fused_sal = torch.stack([inside_sal, outside_sal], dim=2).unsqueeze(3)
return fused, fused_sal
x = torch.randn(2, 512, 7, 7)
s = torch.randn(2, 1, 56, 56)
patt = SurroundPattern()
traced_cell = torch.jit.trace(patt, (x, s))
print(traced_cell)
How to figure out where exactly the problem is? Is there a way to fix it with another functions?
Thank you!
The problem is that you try to fill in a bool Tensor which is apparently not yet supported in jit (or a bug)
Replacing this:
x_mask= torch.zeros(H,W,device=x.device, dtype=torch.bool)
x_mask[crop_h[0] : crop_h[1], crop_w[0] : crop_w[1]] = True
with:
x_mask= torch.zeros(H,W,device=x.device)
x_mask[crop_h[0] : crop_h[1], crop_w[0] : crop_w[1]] = 1
should resolve the error. This of course is suboptimal to the target Tensor type but you should be able to perform any other operation you would be doing with torch.BoolTensor

convert Tensorflow1 to Tensorflow 2

there is a code written with tensorflow1 on this link.
https://github.com/carlthome/tensorflow-convlstm-cell/blob/master/cell.py
I want to use this class as a layer in TensorFlow.Keras. So it should be written with TensorFlow version 2.
How can do it?
this is this code:
import tensorflow as tf
class ConvLSTMCell(tf.nn.rnn_cell.RNNCell):
"""A LSTM cell with convolutions instead of multiplications.
Reference:
Xingjian, S. H. I., et al. "Convolutional LSTM network: A machine learning approach for precipitation nowcasting." Advances in Neural Information Processing Systems. 2015.
"""
def __init__(self, shape, filters, kernel, forget_bias=1.0, activation=tf.tanh, normalize=True, peephole=True, data_format='channels_last', reuse=None):
super(ConvLSTMCell, self).__init__(_reuse=reuse)
self._kernel = kernel
self._filters = filters
self._forget_bias = forget_bias
self._activation = activation
self._normalize = normalize
self._peephole = peephole
if data_format == 'channels_last':
self._size = tf.TensorShape(shape + [self._filters])
self._feature_axis = self._size.ndims
self._data_format = None
elif data_format == 'channels_first':
self._size = tf.TensorShape([self._filters] + shape)
self._feature_axis = 0
self._data_format = 'NC'
else:
raise ValueError('Unknown data_format')
#property
def state_size(self):
return tf.nn.rnn_cell.LSTMStateTuple(self._size, self._size)
#property
def output_size(self):
return self._size
def call(self, x, state):
c, h = state
x = tf.concat([x, h], axis=self._feature_axis)
n = x.shape[-1].value
m = 4 * self._filters if self._filters > 1 else 4
W = tf.get_variable('kernel', self._kernel + [n, m])
y = tf.nn.convolution(x, W, 'SAME', data_format=self._data_format)
if not self._normalize:
y += tf.get_variable('bias', [m], initializer=tf.zeros_initializer())
j, i, f, o = tf.split(y, 4, axis=self._feature_axis)
if self._peephole:
i += tf.get_variable('W_ci', c.shape[1:]) * c
f += tf.get_variable('W_cf', c.shape[1:]) * c
if self._normalize:
j = tf.contrib.layers.layer_norm(j)
i = tf.contrib.layers.layer_norm(i)
f = tf.contrib.layers.layer_norm(f)
f = tf.sigmoid(f + self._forget_bias)
i = tf.sigmoid(i)
c = c * f + i * self._activation(j)
if self._peephole:
o += tf.get_variable('W_co', c.shape[1:]) * c
if self._normalize:
o = tf.contrib.layers.layer_norm(o)
c = tf.contrib.layers.layer_norm(c)
o = tf.sigmoid(o)
h = o * self._activation(c)
state = tf.nn.rnn_cell.LSTMStateTuple(c, h)
return h, state

Why can't I get this Runge-Kutta solver to converge as the time step decreases?

For reasons, I need to implement the Runge-Kutta4 method in PyTorch (so no, I'm not going to use scipy.odeint). I tried and I get weird results on the simplest test case, solving x'=x with x(0)=1 (analytical solution: x=exp(t)). Basically, as I reduce the time step, I cannot get the numerical error to go down. I'm able to do it with a simpler Euler method, but not with the Runge-Kutta 4 method, which makes me suspect some floating point issue here (maybe I'm missing some hidden conversion from double precision to single)?
import torch
import numpy as np
import matplotlib.pyplot as plt
def Euler(f, IC, time_grid):
y0 = torch.tensor([IC])
time_grid = time_grid.to(y0[0])
values = y0
for i in range(0, time_grid.shape[0] - 1):
t_i = time_grid[i]
t_next = time_grid[i+1]
y_i = values[i]
dt = t_next - t_i
dy = f(t_i, y_i) * dt
y_next = y_i + dy
y_next = y_next.unsqueeze(0)
values = torch.cat((values, y_next), dim=0)
return values
def RungeKutta4(f, IC, time_grid):
y0 = torch.tensor([IC])
time_grid = time_grid.to(y0[0])
values = y0
for i in range(0, time_grid.shape[0] - 1):
t_i = time_grid[i]
t_next = time_grid[i+1]
y_i = values[i]
dt = t_next - t_i
dtd2 = 0.5 * dt
f1 = f(t_i, y_i)
f2 = f(t_i + dtd2, y_i + dtd2 * f1)
f3 = f(t_i + dtd2, y_i + dtd2 * f2)
f4 = f(t_next, y_i + dt * f3)
dy = 1/6 * dt * (f1 + 2 * (f2 + f3) +f4)
y_next = y_i + dy
y_next = y_next.unsqueeze(0)
values = torch.cat((values, y_next), dim=0)
return values
# differential equation
def f(T, X):
return X
# initial condition
IC = 1.
# integration interval
def integration_interval(steps, ND=1):
return torch.linspace(0, ND, steps)
# analytical solution
def analytical_solution(t_range):
return np.exp(t_range)
# test a numerical method
def test_method(method, t_range, analytical_solution):
numerical_solution = method(f, IC, t_range)
L_inf_err = torch.dist(numerical_solution, analytical_solution, float('inf'))
return L_inf_err
if __name__ == '__main__':
Euler_error = np.array([0.,0.,0.])
RungeKutta4_error = np.array([0.,0.,0.])
indices = np.arange(1, Euler_error.shape[0]+1)
n_steps = np.power(10, indices)
for i, n in np.ndenumerate(n_steps):
t_range = integration_interval(steps=n)
solution = analytical_solution(t_range)
Euler_error[i] = test_method(Euler, t_range, solution).numpy()
RungeKutta4_error[i] = test_method(RungeKutta4, t_range, solution).numpy()
plots_path = "./plots"
a = plt.figure()
plt.xscale('log')
plt.yscale('log')
plt.plot(n_steps, Euler_error, label="Euler error", linestyle='-')
plt.plot(n_steps, RungeKutta4_error, label="RungeKutta 4 error", linestyle='-.')
plt.legend()
plt.savefig(plots_path + "/errors.png")
The result:
As you can see, the Euler method converges (slowly, as expected of a first order method). However, the Runge-Kutta4 method does not converge as the time step gets smaller and smaller. The error goes down initially, and then up again. What's the issue here?
The reason is indeed a floating point precision issue. torch defaults to single precision, so once the truncation error becomes small enough, the total error is basically determined by the roundoff error, and reducing the truncation error further by increasing the number of steps <=> decreasing the time step doesn't lead to any decrease in the total error.
To fix this, we need to enforce double precision 64bit floats for all floating point torch tensors and numpy arrays. Note that the right way to do this is to use respectively torch.float64 and np.float64 rather than, e.g., torch.double and np.double, because the former are fixed-sized float values, (always 64bit) while the latter depend on the machine and/or compiler. Here's the fixed code:
import torch
import numpy as np
import matplotlib.pyplot as plt
def Euler(f, IC, time_grid):
y0 = torch.tensor([IC], dtype=torch.float64)
time_grid = time_grid.to(y0[0])
values = y0
for i in range(0, time_grid.shape[0] - 1):
t_i = time_grid[i]
t_next = time_grid[i+1]
y_i = values[i]
dt = t_next - t_i
dy = f(t_i, y_i) * dt
y_next = y_i + dy
y_next = y_next.unsqueeze(0)
values = torch.cat((values, y_next), dim=0)
return values
def RungeKutta4(f, IC, time_grid):
y0 = torch.tensor([IC], dtype=torch.float64)
time_grid = time_grid.to(y0[0])
values = y0
for i in range(0, time_grid.shape[0] - 1):
t_i = time_grid[i]
t_next = time_grid[i+1]
y_i = values[i]
dt = t_next - t_i
dtd2 = 0.5 * dt
f1 = f(t_i, y_i)
f2 = f(t_i + dtd2, y_i + dtd2 * f1)
f3 = f(t_i + dtd2, y_i + dtd2 * f2)
f4 = f(t_next, y_i + dt * f3)
dy = 1/6 * dt * (f1 + 2 * (f2 + f3) +f4)
y_next = y_i + dy
y_next = y_next.unsqueeze(0)
values = torch.cat((values, y_next), dim=0)
return values
# differential equation
def f(T, X):
return X
# initial condition
IC = 1.
# integration interval
def integration_interval(steps, ND=1):
return torch.linspace(0, ND, steps, dtype=torch.float64)
# analytical solution
def analytical_solution(t_range):
return np.exp(t_range, dtype=np.float64)
# test a numerical method
def test_method(method, t_range, analytical_solution):
numerical_solution = method(f, IC, t_range)
L_inf_err = torch.dist(numerical_solution, analytical_solution, float('inf'))
return L_inf_err
if __name__ == '__main__':
Euler_error = np.array([0.,0.,0.], dtype=np.float64)
RungeKutta4_error = np.array([0.,0.,0.], dtype=np.float64)
indices = np.arange(1, Euler_error.shape[0]+1)
n_steps = np.power(10, indices)
for i, n in np.ndenumerate(n_steps):
t_range = integration_interval(steps=n)
solution = analytical_solution(t_range)
Euler_error[i] = test_method(Euler, t_range, solution).numpy()
RungeKutta4_error[i] = test_method(RungeKutta4, t_range, solution).numpy()
plots_path = "./plots"
a = plt.figure()
plt.xscale('log')
plt.yscale('log')
plt.plot(n_steps, Euler_error, label="Euler error", linestyle='-')
plt.plot(n_steps, RungeKutta4_error, label="RungeKutta 4 error", linestyle='-.')
plt.legend()
plt.savefig(plots_path + "/errors.png")
Result:
Now, as we decrease the time step, the error of the RungeKutta4 approximation decreases with the correct rate.

TypeError: Cannot cast array data from dtype('O') to dtype('int64') according to the rule 'safe'

When I run code below, I get:
TypeError: Cannot cast array data from dtype('O') to dtype('int64') according to the rule 'safe'
But I don't know where is dtype('O') and dtype('int64'). Does anyone know where is to parse?
import collections
import numpy as np
import math
import pandas as pd
def pre_prob(y):
y_dict = collections.Counter(y)
pre_probab = np.ones(2)
for i in range(0, 2):
pre_probab[i] = y_dict[i]/y.shape[0]
return pre_probab
def mean_var(X, y):
n_features = X.shape[1]
m = np.ones((2, n_features))
v = np.ones((2, n_features))
n_0 = np.bincount(y)[np.nonzero(np.bincount(y))[0]][0]
x0 = np.ones((n_0, n_features))
x1 = np.ones((X.shape[0] - n_0, n_features))
k = 0
for i in range(0, X.shape[0]):
if y[i] == 0:
x0[k] = X[i]
k = k + 1
k = 0
for i in range(0, X.shape[0]):
if y[i] == 1:
x1[k] = X[i]
k = k + 1
for j in range(0, n_features):
m[0][j] = np.mean(x0.T[j])
v[0][j] = np.var(x0.T[j])*(n_0/(n_0 - 1))
m[1][j] = np.mean(x1.T[j])
v[1][j] = np.var(x1.T[j])*((X.shape[0]-n_0)/((X.shape[0]- n_0) - 1))
return m, v # mean and variance
def prob_feature_class(m, v, x):
n_features = m.shape[1]
pfc = np.ones(2)
for i in range(0, 2):
product = 1
for j in range(0, n_features):
product = product * (1/math.sqrt(2*3.14*v[i][j])) * math.exp(-0.5* pow((x[j] - m[i][j]),2)/v[i][j])
pfc[i] = product
return pfc
def GNB(X, y, x):
m, v = mean_var(X, y)
pfc = prob_feature_class(m, v, x)
pre_probab = pre_prob(y)
pcf = np.ones(2)
total_prob = 0
for i in range(0, 2):
total_prob = total_prob + (pfc[i] * pre_probab[i])
for i in range(0, 2):
pcf[i] = (pfc[i] * pre_probab[i])/total_prob
prediction = int(pcf.argmax())
return m, v, pre_probab, pfc, pcf, prediction

Backpropogation with self defined loss function and network

I want to create a CNN network for object localization (It is given that there is only one object). For this I am using some general layers and in the end I want to get the nearest and farthest corner to origin. I am also using self defined loss function which is (100 - intersaction over union in %). Loss is not converging. What may be the problem? Wheather backpropogation will work with this network or some other problem? Below is the code:
For illustraion purpose see .
Network:
class convnet(nn.Module):
def __init__(self):
super(convnet, self).__init__()
self.conv1 = nn.Conv2d(1, 4, kernel_size=5)
self.pool1 = nn.MaxPool2d(kernel_size=3,stride=3)
self.conv2 = nn.Conv2d(4, 8, kernel_size=5)
self.pool2 = nn.MaxPool2d(kernel_size=3,stride=3)
self.conv3 = nn.Conv2d(8, 16, kernel_size=5)
self.pool3 = nn.MaxPool2d(kernel_size=3,stride=3)
self.fc1 = nn.Linear(5040, 1000)
self.fc2 = nn.Linear(1000, 84)
self.fc3 = nn.Linear(84, 4)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool1(x)
x = F.relu(self.conv2(x))
x = self.pool2(x)
x = F.relu(self.conv3(x))
x = self.pool3(x)
x = x.view(-1, 5040)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
a = self.sigmoid(self.fc3(x))
c = torch.zeros(a.shape[0], 2)
for idx, x in enumerate(a):
d1 = x[0] ** 2 + x[1] ** 2
d2 = x[2] ** 2 + x[3] ** 2
d3 = x[0] ** 2 + x[3] ** 2
d4 = x[2] ** 2 + x[1] ** 2
dmin = min(d1, d2, d3, d4)
if d1 == dmin:
c[idx] = torch.tensor([x[0], x[1]])
elif d2 == dmin:
c[idx] = torch.tensor([x[2], x[3]])
elif d3 == dmin:
c[idx] = torch.tensor([x[0], x[3]])
elif d4 == dmin:
c[idx] = torch.tensor([x[2], x[1]])
m = torch.tensor([[640, 480, 640, 480]]).type(torch.DoubleTensor).cuda()
return c*m
def sigmoid(self, z):
return 1/(1+torch.exp(-z))
Loss function:
def iou(box_a, box_b):
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
inter =inter[:, :, 0] * inter[:, :, 1]
area_a = ((box_a[:, 2]-box_a[:, 0]) *
(box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter)
area_b = ((box_b[:, 2]-box_b[:, 0]) *
(box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter)
union = area_a + area_b - inter
return ((inter / union)*100/float(A*A)).sum()
def criterion(output, labels):
return (100-iou(output, labels))
You can check full code here: link

Resources