I have used a code of Flava model from this link:
https://huggingface.co/docs/transformers/model_doc/flava#transformers.FlavaModel.forward.example
But I am getting the following error:
'FlavaModelOutput' object has no attribute 'contrastive_logits_per_image'
I tried using FlavaForPreTraining model instead, so updated code was :
from PIL import Image
import requests
from transformers import FlavaProcessor, FlavaForPreTraining
model = FlavaForPreTraining.from_pretrained("facebook/flava-full")
processor = FlavaProcessor.from_pretrained("facebook/flava-full")
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = Image.open(requests.get(url, stream=True).raw)
inputs = processor(text=["a photo of a cat"], images=image, return_tensors="pt", padding=True, return_codebook_pixels = True)
inputs.update(
{
"input_ids_masked": inputs.input_ids,
}
)
outputs = model(**inputs)
logits_per_image = outputs.contrastive_logits_per_image # this is the image-text similarity score
probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
but I'm still getting this as error:
/usr/local/lib/python3.7/dist-packages/transformers/modeling_utils.py:714: FutureWarning: The `device` argument is deprecated and will be removed in v5 of Transformers.
"The `device` argument is deprecated and will be removed in v5 of Transformers.", FutureWarning
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-44-bdb428b8184a> in <module>()
----> 1 outputs = model(**inputs)
2 frames
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1129 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1130 return forward_call(*input, **kwargs)
1131 # Do not call functions when jit is used
1132 full_backward_hooks, non_full_backward_hooks = [], []
/usr/local/lib/python3.7/dist-packages/transformers/models/flava/modeling_flava.py in forward(self, input_ids, input_ids_masked, pixel_values, codebook_pixel_values, attention_mask, token_type_ids, bool_masked_pos, position_ids, image_attention_mask, skip_unmasked_multimodal_encoder, mlm_labels, mim_labels, itm_labels, output_attentions, output_hidden_states, return_dict, return_loss)
1968 if mim_labels is not None:
1969 mim_labels = self._resize_to_2d(mim_labels)
-> 1970 bool_masked_pos = self._resize_to_2d(bool_masked_pos)
1971 mim_labels[bool_masked_pos.ne(True)] = self.ce_ignore_index
1972
/usr/local/lib/python3.7/dist-packages/transformers/models/flava/modeling_flava.py in _resize_to_2d(self, x)
1765
1766 def _resize_to_2d(self, x: torch.Tensor):
-> 1767 if x.dim() > 2:
1768 x = x.view(x.size(0), -1)
1769 return x
AttributeError: 'NoneType' object has no attribute 'dim'
Can anyone provide suggestions with what's going wrong?
FLAVA's author here.
Can you please add the following arguments to your processor call:
return_codebook_pixels=True, return_image_mask=True
Here is an example colab if you want to see how to call FLAVA model: https://colab.research.google.com/drive/1c3l4r4cEA5oXfq9uXhrJibddwRkcBxzP?usp=sharing#scrollTo=xtkrSjfhCdv-
Related
I'm building an MLP model for a project and ran into the following error. I'm new to DL and PyTorch and I know the error is in the hidden layer, I'm a bit confused regarding the input and output features of the hidden layer and why it is trying to multiply with 1x1 matrix. Any help would be appreciated.
Traceback:
RuntimeError Traceback (most recent call last)
Input In [92], in <cell line: 22>()
22 for epoch in range(1, num_epochs+1):
23 print(f"\nEpoch: {epoch}/{num_epochs}")
---> 25 train(model, device, train_loader, optimizer, criterion, epoch)
26 test(model, device, test_loader, criterion, mode = "Test")
Input In [90], in train(model, device, train_loader, optimizer, criterion, epoch)
13 optimizer.zero_grad()
15 # pass the batch to the model and assign the output to variable named y_pred
---> 16 y_pred = model(batch_idx)
18 # calculate the loss (use CrossEntropyLoss in pytorch)
19 loss = criterion(y_pred, target)
File ~\AppData\Local\Programs\Python\Python310\lib\site-
packages\torch\nn\modules\module.py:1130, in Module._call_impl(self, *input, **kwargs)
1126 # If we don't have any hooks, we want to skip the rest of the logic in
1127 # this function, and just call forward.
1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or
_global_backward_hooks
1129 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1130 return forward_call(*input, **kwargs)
1131 # Do not call functions when jit is used
1132 full_backward_hooks, non_full_backward_hooks = [], []
Input In [89], in MLP.forward(self, x)
23 print(x.size())
24 print(x.size())
---> 25 x = self.relu(self.hidden(x))
26 print(x.size())
27 x = self.relu(self.classifier(x))
File ~\AppData\Local\Programs\Python\Python310\lib\site-
packages\torch\nn\modules\module.py:1130, in Module._call_impl(self, *input, **kwargs)
1126 # If we don't have any hooks, we want to skip the rest of the logic in
1127 # this function, and just call forward.
1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or
_global_backward_hooks
1129 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1130 return forward_call(*input, **kwargs)
1131 # Do not call functions when jit is used
1132 full_backward_hooks, non_full_backward_hooks = [], []
File ~\AppData\Local\Programs\Python\Python310\lib\site-
packages\torch\nn\modules\linear.py:114, in Linear.forward(self, input)
113 def forward(self, input: Tensor) -> Tensor:
--> 114 return F.linear(input, self.weight, self.bias)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (1x1 and 512x22400)
Below is my code:
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, num_features, num_classes, num_hidden):
super(MLP, self).__init__()
# define a linear layer with output channels as 32
self.hidden = nn.Linear(num_hidden, num_features*70*32)
self.relu = torch.nn.ReLU()
# define a linear layer with output features corresponding to the number of classes
self.classifier = nn.Linear(num_features*70*32, 5)
def forward(self, x):
x = torch.tensor(x).unsqueeze(dim=0)
x = self.relu(self.hidden(x))
out = self.relu(self.classifier(x))
return out
num_hidden = 512
num_features = 10
classes = [0, 1, 2, 3, 4]
num_classes = len(classes)
I was trying to make a Neural Network in PyTorch, however I ran into the error below. I'm still new to this topic so I am not able to understand how I should go about solving this.
Code:
class ANN_Model(nn.Module):
def __init__(self,input_features=8,hidden1=8,hidden2=200,hidden3=200,hidden4=300,hidden5=300,hidden6=400,hidden7=400,hidden8=300,hidden9=300,out_features=2):
super().__init__()
self.f_connected1=nn.Linear(input_features,hidden1)
self.f_connected2=nn.Linear(hidden1,hidden2)
self.f_connected2=nn.Linear(hidden2,hidden3)
self.f_connected2=nn.Linear(hidden3,hidden4)
self.f_connected2=nn.Linear(hidden4,hidden5)
self.f_connected2=nn.Linear(hidden5,hidden6)
self.f_connected2=nn.Linear(hidden6,hidden7)
self.f_connected2=nn.Linear(hidden7,hidden8)
self.f_connected2=nn.Linear(hidden8,hidden9)
self.out=nn.Linear(hidden9,out_features)
def forward(self,x):
x=F.relu(self.f_connected1(x))
x=F.relu(self.f_connected2(x))
x=F.relu(self.f_connected3(x))
x=F.relu(self.f_connected4(x))
x=F.relu(self.f_connected5(x))
x=F.relu(self.f_connected6(x))
x=F.relu(self.f_connected7(x))
x=F.relu(self.f_connected8(x))
x=F.relu(self.f_connected9(x))
x=self.out(x)
return x
loss_function = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr = 0.01)
epochs = 500
final_losses = []
for i in range(epochs):
i = i + 1
y_pred = model.forward(X_train)
loss=loss_function(y_pred, y_train)
final_losses.append(loss.item())
if i%10==1:
print("Epoch number: {} and the loss: {}".format(i, loss.item()))
optimizer.zero_grad()
loss.backward()
optimizer.step()
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
Input In [13], in <cell line: 3>()
3 for i in range(epochs):
4 i = i + 1
----> 5 y_pred = model.forward(X_train)
6 loss=loss_function(y_pred, y_train)
7 final_losses.append(loss.item())
Input In [8], in ANN_Model.forward(self, x)
14 def forward(self,x):
15 x=F.relu(self.f_connected1(x))
---> 16 x=F.relu(self.f_connected2(x))
17 x=F.relu(self.f_connected3(x))
18 x=F.relu(self.f_connected4(x))
File ~/miniconda3/lib/python3.9/site-packages/torch/nn/modules/module.py:1130, in Module._call_impl(self, *input, **kwargs)
1126 # If we don't have any hooks, we want to skip the rest of the logic in
1127 # this function, and just call forward.
1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1129 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1130 return forward_call(*input, **kwargs)
1131 # Do not call functions when jit is used
1132 full_backward_hooks, non_full_backward_hooks = [], []
File ~/miniconda3/lib/python3.9/site-packages/torch/nn/modules/linear.py:114, in Linear.forward(self, input)
113 def forward(self, input: Tensor) -> Tensor:
--> 114 return F.linear(input, self.weight, self.bias)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (691x8 and 300x300)
I was trying to make a Neural Network in PyTorch, however I ran into the error below. I'm still new to this topic so I am not able to understand how I should go about solving this.
I found it, in your model's constructor __init__ every layer is named self.f_connected2 and because of that it expects a shape of (batch_size,300).
My mode likes the following:
class RankingModel(nn.Module):
def __init__(self, conf: Dict[Text, Any], **kwargs: Any):
super(RankingModel, self).__init__()
self.conf = deepcopy(conf)
......
def forward(self, **_features): # the model input is a torch.utils.data.Dataset()
### model body part.
return prob
Then I train my model using:
trainer = Trainer(
model=model,
args=training_args,
train_dataset=training_dataset,
eval_dataset=valid_dataset,
compute_metrics=compute_metrics_for_binary_classification,
callbacks=[callback],
)
trainer.train()
Then I predict the result with the model.
test_predict = model(x_test)
I get the error:
TypeError Traceback (most recent call last)
Input In [18], in <cell line: 9>()
17 x_test.from_dict(feature_test)
18 x_test.set_format(tensor_type="torch")
---> 20 test_predict = model(x_test) # trainer.predict(x_test).predictions
21 if np.argmax(test_predict) < 5:
22 recall_counter = recall_counter + 1
File ~/miniconda3/lib/python3.8/site-packages/torch/nn/modules/module.py:1051, in Module._call_impl(self, *input, **kwargs)
1047 # If we don't have any hooks, we want to skip the rest of the logic in
1048 # this function, and just call forward.
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []
TypeError: forward() takes 1 positional argument but 2 were given
But all is OK if I predict the result through:
test_predict = trainer.predict(x_test).predictions
Why may I not use model(x_test) to get inference result? Could you please give me any suggestions? Thanks.
Your forward expect argument with key like forward(data=myarray) because you used double asterix when defining it and didn't give positional argument.
either use def forward(self, input, **kwargs)which would read the first argument of the call and then use other argument as kwargs
or call it with:
model(keyword=x_test) and then in your foward function you can access it with _features['keyword']
I'm trying to understand what is wrong with the following GNN model implemented in PyTorch
class Net(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = SAGEConv(dataset.num_features,
dataset.num_classes,
aggr="max") # max, mean, add ...)
def forward():
x = self.conv(data.x, data.edge_index)
return F.log_softmax(x, dim=1)
but I get the following error when trying to run a training loop:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-34-f3ee8050af6a> in <module>
1 best_val_acc = test_acc = 0
2 for epoch in range(1,100):
----> 3 train()
4 _, val_acc, tmp_test_acc = test()
5 if val_acc > best_val_acc:
<ipython-input-14-64df4e2a24f9> in train()
2 model.train()
3 optimizer.zero_grad()
----> 4 F.nll_loss(model()[data.train_mask], data.y[data.train_mask]).backward()
5 optimizer.step()
6
~\anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []
TypeError: forward() takes 0 positional arguments but 1 was given
I'm adding more details as requested on how I call the model :
def train():
model.train()
optimizer.zero_grad()
F.nll_loss(model()[data.train_mask], data.y[data.train_mask]).backward()
optimizer.step()
def test():
model.eval()
logits, accs = model(), []
for _, mask in data('train_mask', 'val_mask', 'test_mask'):
pred = logits[mask].max(1)[1]
acc = pred.eq(data.y[mask]).sum().item() / mask.sum().item()
accs.append(acc)
return accs
Function torch.nn.Module.forward should have at minimum one argument: self. In your case, you have two: self and your input data.
def forward(self, data): # <-
x = self.conv(data.x, data.edge_index)
return F.log_softmax(x, dim=1)
I am doing text classification using a linear SVC model from sklearn. Now I want to visualize which words/tokens have the highest impact on the classification decision by using SHAP (https://github.com/slundberg/shap).
Right now this does not work because I am getting an error that seems to originate from the vectorizer step in the pipeline I have defined - whats wrong here?
Is my general approach on how to use SHAP in this case correct?
x_Train, x_Test, y_Train, y_Test = train_test_split(df_all['PDFText'], df_all['class'], test_size = 0.2, random_state = 1234)
pipeline = Pipeline([
(
'tfidv',
TfidfVectorizer(
ngram_range=(1,3),
analyzer='word',
strip_accents = ascii,
use_idf = True,
sublinear_tf=True,
max_features=6000,
min_df=2,
max_df=1.0
)
),
(
'lin_svc',
svm.SVC(
C=1.0,
probability=True,
kernel='linear'
)
)
])
pipeline.fit(x_Train, y_Train)
shap.initjs()
explainer = shap.KernelExplainer(pipeline.predict_proba, x_Train)
shap_values = explainer.shap_values(x_Test, nsamples=100)
shap.force_plot(explainer.expected_value[0], shap_values[0][0,:], x_Test.iloc[0,:])
This is the error message I get:
Provided model function fails when applied to the provided data set.
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-81-4bca63616b3b> in <module>
3
4 # use Kernel SHAP to explain test set predictions
----> 5 explainer = shap.KernelExplainer(pipeline.predict_proba, x_Train)
6 shap_values = explainer.shap_values(x_Test, nsamples=100)
7
c:\users\s.p\appdata\local\programs\python\python37\lib\site-packages\shap\explainers\kernel.py in __init__(self, model, data, link, **kwargs)
95 self.keep_index_ordered = kwargs.get("keep_index_ordered", False)
96 self.data = convert_to_data(data, keep_index=self.keep_index)
---> 97 model_null = match_model_to_data(self.model, self.data)
98
99 # enforce our current input type limitations
c:\users\s.p\appdata\local\programs\python\python37\lib\site-packages\shap\common.py in match_model_to_data(model, data)
80 out_val = model.f(data.convert_to_df())
81 else:
---> 82 out_val = model.f(data.data)
83 except:
84 print("Provided model function fails when applied to the provided data set.")
c:\users\s.p\appdata\local\programs\python\python37\lib\site-packages\sklearn\utils\metaestimators.py in <lambda>(*args, **kwargs)
116
117 # lambda, but not partial, allows help() to work with update_wrapper
--> 118 out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
119 # update the docstring of the returned function
120 update_wrapper(out, self.fn)
c:\users\s.p\appdata\local\programs\python\python37\lib\site-packages\sklearn\pipeline.py in predict_proba(self, X)
379 for name, transform in self.steps[:-1]:
380 if transform is not None:
--> 381 Xt = transform.transform(Xt)
382 return self.steps[-1][-1].predict_proba(Xt)
383
c:\users\s.p\appdata\local\programs\python\python37\lib\site-packages\sklearn\feature_extraction\text.py in transform(self, raw_documents, copy)
1631 check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
1632
-> 1633 X = super(TfidfVectorizer, self).transform(raw_documents)
1634 return self._tfidf.transform(X, copy=False)
c:\users\s.p\appdata\local\programs\python\python37\lib\site-packages\sklearn\feature_extraction\text.py in transform(self, raw_documents)
1084
1085 # use the same matrix-building strategy as fit_transform
-> 1086 _, X = self._count_vocab(raw_documents, fixed_vocab=True)
1087 if self.binary:
1088 X.data.fill(1)
c:\users\s.p\appdata\local\programs\python\python37\lib\site-packages\sklearn\feature_extraction\text.py in _count_vocab(self, raw_documents, fixed_vocab)
940 for doc in raw_documents:
941 feature_counter = {}
--> 942 for feature in analyze(doc):
943 try:
944 feature_idx = vocabulary[feature]
c:\users\s.p\appdata\local\programs\python\python37\lib\site-packages\sklearn\feature_extraction\text.py in <lambda>(doc)
326 tokenize)
327 return lambda doc: self._word_ngrams(
--> 328 tokenize(preprocess(self.decode(doc))), stop_words)
329
330 else:
c:\users\s.p\appdata\local\programs\python\python37\lib\site-packages\sklearn\feature_extraction\text.py in <lambda>(x)
254
255 if self.lowercase:
--> 256 return lambda x: strip_accents(x.lower())
257 else:
258 return strip_accents
AttributeError: 'numpy.ndarray' object has no attribute 'lower'
KernelExplainer expects to receive a classification model as the first argument. Please check the use of Pipeline with Shap following the link.
In your case, you can use the Pipeline as follows:
x_Train = pipeline.named_steps['tfidv'].fit_transform(x_Train)
explainer = shap.KernelExplainer(pipeline.named_steps['lin_svc'].predict_proba, x_Train)