Namerror in python for self in the for loop , - python-3.x

class Support_Vector_Machine():
def __init__(self, visualization=True):
self.visualization = visualization
self.colors = {1:'r',-1:'b'}
if self.visualization:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(1,1,1)
# train
def fit(self, data):
self.data=data
opt_dict = {}
transforms = [[1,1],
[-1,-1],[1,-1],[-1,1 ]]
all_data=[]
for yi in **self.data :**
for featureset in self.data[yi]:
for feature in featureset:
self.max_feature_value = max(all_data)
self.min_feature_value = min(all_data)
all_data= None
In the above code , I am trying to loop a variable yi through self.data which throws the name error.
Help me resolve the error.
it says:
line 21, in Support_Vector_Machine
for yi in self.data :
NameError: name 'self' is not defined

In Python, indentation matters. The whole for block is misindented:
class Support_Vector_Machine():
def __init__(self, visualization=True):
self.visualization = visualization
self.colors = {1:'r',-1:'b'}
if self.visualization:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(1,1,1)
# train
def fit(self, data):
self.data=data
opt_dict = {}
transforms = [[1,1],
[-1,-1],[1,-1],[-1,1 ]]
all_data=[]
for yi in self.data:
for featureset in self.data[yi]:
for feature in featureset:
self.max_feature_value = max(all_data)
self.min_feature_value = min(all_data)
all_data= None

Related

Class raises "name 'smth' is not defined" error, though I've already defined it by 'self,smth = ~~~imputer()'?

I am currently confronting "name 'imputer' is not defined" error.
the thing is in the init(self) part, have already defined 'imputer' by declaring 'self.imputer = IterativeImputer(max_iter=10)'.
Can anyone explain why this happens?
the whole code is as follows:
away_defencePressure_idx = 15
class IterImputer(BaseEstimator, TransformerMixin):
def __init__(self):
self.imputer = IterativeImputer(max_iter=10)
def fit(self, X, y=None):
imputer.fit(X)
return self
def transform(self, X, y=None):
imputed = imputer.transform(X)
X.T[away_defencePressure_idx] = imputed.T[away_defencePressure_idx]
return X
p = Pipeline([
('imputerA', IterImputer())
])
p.fit(X)
You defined self.imputer but not imputer. Just add self. in front of it:
def fit(self, X, y=None):
self.imputer.fit(X)
return self
def transform(self, X, y=None):
imputed = self.imputer.transform(X)
X.T[away_defencePressure_idx] = imputed.T[away_defencePressure_idx]
return X

Learnable scalar weight in PyTorch

I have two neural networks running in parallel. Each gives a features map of same size say Nx1. Now I want weighted average of these embedding like this w1 * embed1 + w2 * embed2. I have tried these 1 2.But the weights are not updating. Any help would be appreciated. Here is how I am trying to do it:
class LinearWeightedAvg(nn.Module):
def __init__(self, n_inputs):
super(LinearWeightedAvg, self).__init__()
self.weight1 = Variable(torch.randn(1), requires_grad=True).cuda()
self.weight2 = Variable(torch.randn(1), requires_grad=True).cuda()
def forward(self, inp_embed):
return self.weight1 * inp_embed[0] + self.weight2 * inp_embed[1]
class EmbedBranch(nn.Module):
def __init__(self, feat_dim, embedding_dim):
super(EmbedBranch, self).__init__()
fc_layer1 = fc_layer
def forward(self, x):
x = self.fc_layer1(x)
return x
class EmbeddingNetwork(nn.Module):
def __init__(self, args, N):
super(EmbeddingNetwork, self).__init__()
embedding_dim = N
self.embed1 = EmbedBranch(N, N)
self.embed2 = EmbedBranch(N, N)
self.comb_branch = LinearWeightedAvg(metric_dim)
self.args = args
if args.cuda:
self.cuda()
def forward(self, emb1, emb2):
embeds1 = self.text_branch(emb1)
embeds2 = self.image_branch(emb2)
combined = self.comb_branch([embeds1, embeds2])
return combined
def train_forward(self, embed1, embed2):
combined = self(embed1, embed2)
embeds = model.train_forward(embed1, embed2)
loss = loss_func(embeds, labels)
running_loss.update(loss.data.item())
optimizer.zero_grad()
loss.backward()
Also I want the weight to be within 0-1 range.
Thanks,
You should use self.weightx = torch.nn.Parameter(your_inital_tensor) to register a tensor as a learnable parameter of the model.

Using Captum with Pytorch Lightning?

So I tried to use Captum with PyTorch Lightning. I am having issues when passing the Module to Captum, since it seems to do weird reshaping of the tensors.
For example in the below minimal example, the lightning code works easy and well.
But when I use IntegratedGradient with "n_step>=1" I get an issue.
The code of the LighningModule is not that important I would say, I wonder more at the code line at the very bottom.
Does anyone know how to work around this?
from captum.attr import IntegratedGradients
from torch import nn, optim, rand, sum as tsum, reshape, device
import torch.nn.functional as F
from pytorch_lightning import seed_everything, LightningModule, Trainer
from torch.utils.data import DataLoader, Dataset
SAMPLE_DIM = 3
class CustomDataset(Dataset):
def __init__(self, samples=42):
self.dataset = rand(samples, SAMPLE_DIM).cuda().float() * 2 - 1
def __getitem__(self, index):
return (self.dataset[index], (tsum(self.dataset[index]) > 0).cuda().float())
def __len__(self):
return self.dataset.size()[0]
class OurModel(LightningModule):
def __init__(self):
super(OurModel, self).__init__()
# Network layers
self.linear = nn.Linear(SAMPLE_DIM, 2048)
self.linear2 = nn.Linear(2048, 1)
self.output = nn.Sigmoid()
# Hyper-parameters, that we will auto-tune using lightning!
self.lr = 0.001
self.batch_size = 512
def forward(self, x):
x = self.linear(x)
x = self.linear2(x)
output = self.output(x)
return reshape(output, (-1,))
def configure_optimizers(self):
return optim.Adam(self.parameters(), lr=self.lr)
def train_dataloader(self):
loader = DataLoader(CustomDataset(samples=1000), batch_size=self.batch_size, shuffle=True)
return loader
def training_step(self, batch, batch_nb):
x, y = batch
loss = F.binary_cross_entropy(self(x), y)
return {'loss': loss, 'log': {'train_loss': loss}}
if __name__ == '__main__':
seed_everything(42)
device = device("cuda")
model = OurModel().to(device)
trainer = Trainer(max_epochs=2, min_epochs=1, auto_lr_find=False,
progress_bar_refresh_rate=10)
trainer.fit(model)
# ok Now the Problem
test_input = CustomDataset(samples=1).__getitem__(0)[0].requires_grad_()
ig = IntegratedGradients(model)
attr, delta = ig.attribute(test_input, target=1, return_convergence_delta=True)
The solution was to wrap the forward function. Make sure that the shape going into the mode.foward() is correct!
# Solution is this wrapper function
def modified_f(in_vec):
# Shape here is wrong
print("IN:", in_vec.size())
x = torch.reshape(in_vec, (int(in_vec.size()[0]/SAMPLE_DIM), SAMPLE_DIM))
print("x:", x.size())
res = model.forward(x)
print("res:", res.size())
res = torch.reshape(res, (res.size()[0], 1))
print("res2:", res.size())
return res
ig = IntegratedGradients(modified_f)
attr, delta = ig.attribute(test_input, return_convergence_delta=True, n_steps=STEP_AMOUNT)

TypeError: __init__() missing 1 required positional argument: 'axis' how can i solve

class CustomScale(mscale.ScaleBase):
name = 'custom'
def __init__(self,axis, **kwargs):
mscale.ScaleBase.__init__(self)
self.thresh = None #thresh
def get_transform(self):
return self.CustomTransform(self.thresh)
def set_default_locators_and_formatters(self, axis):
pass
class CustomTransform(mtransforms.Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, thresh):
mtransforms.Transform.__init__(self)
self.thresh = thresh
def transform_non_affine(self, a):
return np.log(1+a)
def inverted(self):
return CustomScale.InvertedCustomTransform(self.thresh)
class InvertedCustomTransform(mtransforms.Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, thresh):
mtransforms.Transform.__init__(self)
self.thresh = thresh
def transform_non_affine(self, a):
return np.exp(a)-1
def inverted(self):
return CustomScale.CustomTransform(self.thresh)
# Now that the Scale class has been defined, it must be registered so
# that ``matplotlib`` can find it.
mscale.register_scale(CustomScale)
z = [0,0.1,0.3,0.9,1,2,5]
thick = [20,40,20,60,37,32,21]
fig = plt.figure(figsize=(8,5))
ax1 = fig.add_subplot(111)
ax1.plot(z, thick, marker='o', linewidth=2, c='k')
plt.xlabel(r'$\rm{redshift}$', size=16)
plt.ylabel(r'$\rm{thickness\ (kpc)}$', size=16)
plt.gca().set_yscale('custom')
plt.show()
###############################################################
It always gives me an error like this:TypeError: init() missing 1 required positional argument: 'axis'.
###################################################################
I dont know how to fix this. If a try :
...
plt.gca().set_yscale('custom', axis=1)
....
################################################################################
I find this: TypeError: scale_factory() got multiple values for argument 'axis'
################################################################################
In the init call, you're missing axis.
Instead of:
mscale.ScaleBase.__init__(self)
It should be
Instead of: `mscale.ScaleBase.__init__(self, axis)
I think you're missing thresh as well at the init calls to CustomTransform and InvertedCustomTransform

I can't figure out what's wrong with the python code below. It keeps giving me Attribute error: 'Dog' object has no attribute '_Dog_name'

class Animal:
__name = None
__height = 0
__weight = 0
__sound = 0
We call a constructor, a constructor is called to setup/initialize an object
def __init__(self, name, height, weight, sound):
self.__name = name
self.__height = height
self.__weight = weight
self.__sound = sound
def set_name(self, name):
self.__name = name
def get_name(self):
return self.__name
def set_height(self, height):
self.__height = height
def get_height(self, height):
return self.__height
def set_weight(self, weight):
self.__weight = weight
def get_weight(self):
return self.__weight
def set_sound(self, sound):
self.__sound = sound
def get_sound(self):
return self.__sound
def get_type(self):
print("Animal")
def toString(self):
return "{} is {} cm tall and {} kilograms in weight and makes the sound {}".format(self.__name, self.__height,
self.__weight, self.__sound)
cat = Animal('Whiskers', 33, 20, 'meow')
print(cat.get_name())
print(cat.toString())
class Dog(Animal):
__owner= ""
def __init__(self,name,height,weight,sound,owner):
self.__owner=owner
super(Dog,self).__init__(name,height,weight,sound)
def set_owner(self,owner):
self.__owner=owner
def get_owner(self):
return self.__owner
def get_type(self):
print("dog")
def toString(self):
return"{} is {} cm tall and weighs {}. He says {} and his owner is {}".format(self.__name,
self.__height,
self.__weight,
self.__sound,
self.__owner)
Spot = Dog("Spot",45,77,"Ruff","Amit")
print(Spot.toString())
Here the class Animal is being called in to use its attributes. I saw this on a tutorial video, it seems to be running fine in the video but not when I try it
The issue is with "name mangling" (c.f. https://docs.python.org/2/tutorial/classes.html#private-variables-and-class-local-references). If you wish to access super class variables with double underscores, you should prefix them with the class name:
def toString(self):
return"{} is {} cm tall and weighs {}. He says {} and his owner is {}".\
format(self._Animal__name,
self._Animal__height,
self._Animal__weight,
self._Animal__sound,
self.__owner)
Alternatively you could call the getters from Animal such as self.get_name().

Resources