Problem with Graph Neural Network in PyTorch Geometric - pytorch

I'm trying to understand what is wrong with the following GNN model implemented in PyTorch
class Net(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = SAGEConv(dataset.num_features,
dataset.num_classes,
aggr="max") # max, mean, add ...)
def forward():
x = self.conv(data.x, data.edge_index)
return F.log_softmax(x, dim=1)
but I get the following error when trying to run a training loop:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-34-f3ee8050af6a> in <module>
1 best_val_acc = test_acc = 0
2 for epoch in range(1,100):
----> 3 train()
4 _, val_acc, tmp_test_acc = test()
5 if val_acc > best_val_acc:
<ipython-input-14-64df4e2a24f9> in train()
2 model.train()
3 optimizer.zero_grad()
----> 4 F.nll_loss(model()[data.train_mask], data.y[data.train_mask]).backward()
5 optimizer.step()
6
~\anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []
TypeError: forward() takes 0 positional arguments but 1 was given
I'm adding more details as requested on how I call the model :
def train():
model.train()
optimizer.zero_grad()
F.nll_loss(model()[data.train_mask], data.y[data.train_mask]).backward()
optimizer.step()
def test():
model.eval()
logits, accs = model(), []
for _, mask in data('train_mask', 'val_mask', 'test_mask'):
pred = logits[mask].max(1)[1]
acc = pred.eq(data.y[mask]).sum().item() / mask.sum().item()
accs.append(acc)
return accs

Function torch.nn.Module.forward should have at minimum one argument: self. In your case, you have two: self and your input data.
def forward(self, data): # <-
x = self.conv(data.x, data.edge_index)
return F.log_softmax(x, dim=1)

Related

RuntimeError: stack expects a non-empty TensorList

first of all I thank , I tried to train model with pytorch but I got the following error:
RuntimeError: stack expects a non-empty TensorList .I am trying to model a extract features point cloud using deep learning in pytorch.
I get the following error . Could anyone help on this? ************** ***************
Thanks!
def training_loop(gpu, training_dataloader, model, loss_fn, optimizer):
losses = []
correct = 0
batch_results = dict()
conf_mat = np.zeros((10,10))
for batch_n, batch in enumerate(training_dataloader): #batch[batch, pos, ptr, y]
batch_size = int(batch.batch.size()[0] / sample_points)
if dimensionality == 3:
# Input dim [:,3] for your geometry x,y,z
X = batch.pos.cuda(non_blocking=True).view(batch_size, sample_points, -1) + torch.normal(
torch.zeros(batch_size, sample_points, dimensionality), torch.full((batch_size, sample_points,
dimensionality), fill_value=0.1)).cuda(gpu)
else:
# Input dim [:,6] for your geometry x,y,z and normals nx,ny,nz
X = torch.cat((batch.pos.cuda(non_blocking=True), batch.normal.cuda(non_blocking=True)), 1).view(batch_size, sample_points, -1) + torch.normal(
torch.zeros(batch_size, sample_points, dimensionality), torch.full((batch_size, sample_points,
dimensionality), fill_value=0.1)).cuda(gpu)
y = batch.y.cuda(non_blocking=True).flatten() #size (batch_size) --> torch.Size([8])
# Compute predictions
pred = model(None, X) #size (batch_size,classes) --> torch.Size([8, 10])
if overall_classes_loss:
# weighted CE Loss over all classes
loss = loss_fn(pred, y)
else:
# weighted batchwise Loss
sample_count = np.array([[x, batch.y.tolist().count(x)] for x in batch.y])[:,1]
batch_weights = 1. / sample_count
batch_weights = torch.from_numpy(batch_weights)
batch_weights = batch_weights.double()
loss = element_weighted_loss(pred, batch.y, batch_weights, gpu)
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
print(f"Loss: {loss}")
tensor_list_y = [torch.ones_like(y) for _ in range(dist.get_world_size())]
tensor_list_pred = [torch.ones_like(y) for _ in range(dist.get_world_size())]
torch.distributed.all_gather(tensor_list_y, y, group=None, async_op=False)
torch.distributed.all_gather(tensor_list_pred, pred.argmax(1), group=None, async_op=False)
tensor_list_y = torch.cat(tensor_list_y)
tensor_list_pred = torch.cat(tensor_list_pred)
# Confusion Matrix
conf_mat += confusion_matrix(tensor_list_y.cpu().detach().numpy(), tensor_list_pred.cpu().detach().numpy(), labels=np.arange(0,10))
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss.item())
# Save batch predictions
batch_results[batch_n] = {'true':tensor_list_y, 'pred':tensor_list_pred}
if verbosity == True:
print(f"\n\nTRAIN on GPU:{gpu}: True Label {y} - Prediction {pred.argmax(1)} - Loss {loss}")
truevalue = '\t\t'.join(classes[items] for items in y.tolist())
predvalues = '\t\t'.join(classes[items] for items in pred.argmax(1).tolist())
print(f"INFO on GPU:{gpu}: TRAIN - True Value\t {truevalue}")
print(f"INFO on GPU:{gpu}: TRAIN - Predictions\t {predvalues}")
if batch_n % 25 == 0:
torch.distributed.reduce(loss, 0)
"""if gpu == 0:
# print predictions and true values
#truevalue = '\t\t'.join(classes[items] for items in y.tolist())
#predvalues = '\t\t'.join(classes[items] for items in pred.argmax(1).tolist())
#print(f"\n\nINFO on GPU{gpu}: TRAIN - True Value\t {truevalue}")
#print(f"INFO on GPU{gpu}: TRAIN - Predictions\t {predvalues}")
#print("INFO: TRAIN - Correctness\t", pred.argmax(1) == y)
#print(f"INFO: TRAIN - Single Batch Test Accuracy {correct * 100 / batch_size}\n\n")
loss, current = loss.item(), batch_n * len(X)
#print(f"loss: {loss:>7f}")"""
#print(f"conf_mat: {conf_mat}")
#print(f"batch_results: {batch_results}")
return torch.tensor(losses, device=f"cuda:{gpu}"), torch.tensor(correct, device=f"cuda:{gpu}"), batch_results, conf_mat
def train_optimisation(gpu, gpus, training_dataloader, test_dataloader, model, loss_fn, optimizer, scheduler, dir_path, initial_epoch):
epoch_losses = []
training_accuracies = []
test_losses = []
test_accuracies = []
learning_rates = []
counter = 0 #early stopping counter
batchwise_results = dict()
# Learning Rate Scheduler
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', patience=20)
for i in range(initial_epoch, initial_epoch + epochs):
if gpu == 0:
if initial_epoch > 0:
print(f"\n\nEpoch {i}\n-------------------------------")
else:
print(f"\n\nEpoch {i + 1}\n-------------------------------")
# TRAIN
losses, training_accuracy, train_batch_result, train_conf_mat = training_loop(gpu, training_dataloader, model, loss_fn, optimizer)
average_loss = torch.mean(losses)
torch.distributed.reduce(average_loss, 0, torch.distributed.ReduceOp.SUM)
torch.distributed.reduce(training_accuracy, 0, torch.distributed.ReduceOp.SUM)
# TEST
test_accuracy, test_loss, test_batch_result, test_conf_mat = test_loop(gpu, test_dataloader, model, loss_fn)
torch.distributed.reduce(test_accuracy, 0, torch.distributed.ReduceOp.SUM)
torch.distributed.reduce(test_loss, 0, torch.distributed.ReduceOp.SUM)
# save results
batchwise_results[i] = {'train':train_batch_result, 'test':test_batch_result}
if gpu == 0: # the following operations are performed only by the process running in the first gpu
average_loss = average_loss / torch.tensor(gpus, dtype=torch.float) # average loss among all gpus
test_accuracy = test_accuracy / torch.tensor(len(test_dataloader.dataset),
dtype=torch.float) * torch.tensor(100.0)
training_accuracy = training_accuracy / torch.tensor(len(training_dataloader.dataset),
dtype=torch.float) * torch.tensor(100.0)
test_loss = test_loss / torch.tensor(gpus, dtype=torch.float)
epoch_losses.append(average_loss.item())
training_accuracies.append(training_accuracy.item())
test_losses.append(test_loss.item())
test_accuracies.append(test_accuracy.item())
learning_rates.append((optimizer.param_groups[0])["lr"])
print(f"\nBatch size: {batch_size * int(gpus)}")
print(f"average Training Loss: {average_loss.item():.6f}")
print(f"average Test Loss: {test_loss.item():.6f}")
print(f"\naverage Training Acc: {training_accuracy.item():.6f}")
print(f"average Test Acc: {test_accuracy.item():.6f}")
printLearningRate(optimizer)
scheduler.step(test_loss)
"""# stepwise learning rate decay
if average_loss.item() <= 0.35:
for param_group in optimizer.param_groups:
print("Learning rate changed to 0.007")
param_group['lr'] = 0.007
if average_loss.item() <= 0.30:
for param_group in optimizer.param_groups:
print("Learning rate changed to 0.004")
param_group['lr'] = 0.004"""
# saving model checkpoint
save_checkpoint(model, optimizer, scheduler, i, epoch_losses, training_accuracies, test_losses, test_accuracies, learning_rates,
os.path.join(dir_path, f"epoch{i}.pth"), {key: value for key, value in batchwise_results[i].items() if key == 'train'}, {key: value for key, value in batchwise_results[i].items() if key == 'test'}, train_conf_mat, test_conf_mat)
#TODO: implement ONNX Export
# early stopping scheduler
if early_stopping(test_losses) == True:
counter += 1
print(f"Early Stopping counter: {counter} of {patience}")
else:
counter += 0
if counter < patience:
pass
else:
print("\n\nEarly Stopping activated")
print(f"Training stopped at Epoch{i + 1}")
dist.destroy_process_group()
exit()
class DistributedWeightedSampler(Sampler):
def __init__(self, dataset, data_source: Optional[Sized], num_replicas: Optional[int] = None,
rank: Optional[int] = None, shuffle: bool = True, seed: int = 0, replacement: bool = True):
super().__init__(data_source)
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
if rank >= num_replicas or rank < 0:
raise ValueError("Invalid rank {}, rank should be in the interval [0, {}]".format(rank, num_replicas - 1))
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
self.seed = seed
self.replacement = replacement #sample can be drown again in that row if True
def calculate_weights(self, targets):
class_sample_count = np.array([len(np.where(self.dataset.data.y == t)[0]) for t in np.unique(self.dataset.data.y)])
weight = 1. / class_sample_count
samples_weight = np.array([weight[t] for t in self.dataset.data.y])
samples_weight = torch.from_numpy(samples_weight)
samples_weigth = samples_weight.double()
return samples_weigth
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
# get targets (you can alternatively pass them in __init__, if this op is expensive)
# data.data.y == labels
targets = self.dataset.data.y
targets = targets[self.rank:self.total_size:self.num_replicas]
#assert len(targets) == self.num_samples
weights = self.calculate_weights(targets)
weighted_indices = torch.multinomial(weights, self.num_samples, self.replacement).tolist()
return iter(weighted_indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
def train(gpu, gpus, world_size):
torch.manual_seed(0)
torch.cuda.set_device(gpu)
try:
#dist.init_process_group(backend='nccl', world_size=world_size, rank=gpu) #for distributed GPU training
dist.init_process_group(backend='gloo', world_size=world_size, rank=gpu) #as a fallback option
except RuntimeError:
print("\n\nINFO:RuntimeError is raised >> Used gloo backend instead of nccl!\n")
dist.init_process_group(backend='gloo', world_size=world_size, rank=gpu) #as a fallback option
dir_path = None
if gpu == 0:
dir_path = "stackgraphConvPool3DPnet"
createdir(dir_path)
training_number = next_training_number(dir_path)
dir_path = os.path.join(dir_path, f"train{training_number}")
createdir(dir_path)
#save hyper-parameters in txt protocol file
save_hyperparameters(dir_path, 'hyperparameters.txt')
print("\nINFO: Protocol File saved successfully . . .")
#copy crucial py-files in current train folder
#shutil.copy2(os.path.basename('__file__'), dir_path)
#shutil.copy2('stackGraphConvPool3DPnet.py', dir_path)
#shutil.copy2('shrinkingunit.py', dir_path)
#shutil.copy2('utilities.py', dir_path)
#print("\nINFO: Script Files copied successfully . . .")
model = Classifier(shrinkingLayers, mlpClassifier)
torch.cuda.set_device(gpu)
model.cuda(gpu)
#setting up optimizer
if optimizer_str == "SGD":
optimizer = torch.optim.SGD(model.parameters(), learning_rate, momentum=momentum, weight_decay=weight_decay)
elif optimizer_str == "RMSprop":
optimizer = torch.optim.RMSprop(model.parameters(), learning_rate, weight_decay=weight_decay)
else:
optimizer = torch.optim.Adam(model.parameters(), learning_rate, weight_decay=weight_decay)
# single-program multiple-data training paradigm (Distributed Data-Parallel Training)
model = DDP(model, device_ids=[gpu])
if dimensionality == 3:
training_data = ModelNet("ModelNet10_train_data", transform=lambda x: NormalizeScale()(SamplePoints(num=sample_points)(x)))
else:
training_data = ModelNet("ModelNet10_train_data", transform=lambda x: NormalizeScale()(NormalizeRotation()(SamplePoints(num=sample_points, remove_faces=True, include_normals=True)(x))))
training_sampler = DistributedWeightedSampler(training_data, data_source=None, num_replicas=world_size) #weight unbalanced classes by 1/cls_count
training_dataloader = DataLoader(dataset=training_data, batch_size=batch_size, shuffle=data_shuffle, num_workers=0,
pin_memory=True, sampler=training_sampler)
if dimensionality == 3:
test_data = ModelNet("ModelNet10_test_data", train=False, transform=lambda x: NormalizeScale()(SamplePoints(num=sample_points)(x)))
else:
test_data = ModelNet("ModelNet10_test_data", train=False, transform=lambda x: NormalizeScale()(NormalizeRotation()(SamplePoints(num=sample_points, remove_faces=True, include_normals=True)(x))))
test_sampler = DistributedWeightedSampler(test_data,data_source=None, num_replicas=world_size) #weight unbalanced classes by 1/cls_count
test_dataloader = DataLoader(dataset=test_data, batch_size=batch_size, shuffle=data_shuffle, num_workers=0,
pin_memory=True, sampler=test_sampler)
"""# save sampled data for later result visualisation
try:
#export_path = os.path.join("stackgraphConvPool3DPnet", "train" + str(next_training_number("stackgraphConvPool3DPnet")-1))
#export_sampled_data(training_dataloader, os.path.join(export_path, "train_sampledPoints.pth"))
#export_sampled_data(test_dataloader, os.path.join(export_path, "test_sampledPoints.pth"))
print("\nINFO: Sampled 3D data successfully saved . . .")
except Exception as e:
print(f"\nERROR: Sampled 3D data could not saved successfully . . . - this process does not executed - caused by {e}")"""
# weighted CE Loss over all Classes C
class_sample_count = np.array([len(np.where(training_data.data.y == t)[0]) for t in np.unique(training_data.data.y)])
weight = 1. / class_sample_count
weight = torch.from_numpy(weight)
weight = weight.float()
loss_fn = nn.CrossEntropyLoss(weight=weight).cuda(gpu)
# continue training from certain checkpoint
continue_from_scratch = True if args.resume is None else False
if continue_from_scratch:
if gpu == 0:
print("\nINFO: Train from scratch has started . . .")
train_optimisation(gpu, gpus, training_dataloader, test_dataloader, model, loss_fn, optimizer, None, dir_path, 0)
else:
checkpoint_path = "stackgraphConvPool3DPnet/" + args.resume
if gpu == 0:
print(f"\nINFO: Train has started from certain checkpoint {checkpoint_path.split('/')[2].split('.')[0]} in {checkpoint_path.split('/')[1]} . . .")
model.load_state_dict(torch.load(checkpoint_path)['model_state_dict'], strict=False)
optimizer.load_state_dict(torch.load(checkpoint_path)['optimizer_state_dict'])
final_epoch = (torch.load("stackgraphConvPool3DPnet/" + args.resume)['epoch'])+1
train_optimisation(gpu, gpus, training_dataloader, test_dataloader, model, loss_fn, optimizer, None, dir_path, final_epoch)
INFO: Train from scratch has started . . .
Epoch 1
-------------------------------
Exception in thread Exception in thread Thread-8:
Traceback (most recent call last):
File "C:\ProgramData\Anaconda3\lib\threading.py", line 973, in _bootstrap_inner
Thread-7:
Traceback (most recent call last):
File "C:\ProgramData\Anaconda3\lib\threading.py", line 973, in _bootstrap_inner
self.run()
File "C:\ProgramData\Anaconda3\lib\threading.py", line 910, in run
self.run()
File "C:\ProgramData\Anaconda3\lib\threading.py", line 910, in run
self._target(*self._args, **self._kwargs)
File ~\Desktop\Forum\unit.py", line 615, in kmeansAppender
self._target(*self._args, **self._kwargs)
File ~\Desktop\Forum\unit.py", line 615, in kmeansAppender
x, y, z = module(input)
File "C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py", line 1130, in _call_impl
x, y, z = module(input)
File "C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File ~\Desktop\Forum\unit.py", line 148, in forward
labels = np.apply_along_axis(lambda x: x + (i*self.k), axis=0, arr=kmeans.labels_)
AttributeError: 'KMeans' object has no attribute 'labels_'
return forward_call(*input, **kwargs)
File ~\Desktop\Forum\unit.py", line 148, in forward
labels = np.apply_along_axis(lambda x: x + (i*self.k), axis=0, arr=kmeans.labels_)
AttributeError: 'KMeans' object has no attribute 'labels_'
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
Input In [1], in <cell line: 720>()
734 #change state
735 if args.train_state == True:
--> 736 train(args.local_rank, gpus, world_size)
737 else:
738 infer(args.local_rank, gpus, world_size, args.checkpoint, args.data)
Input In [1], in train(gpu, gpus, world_size)
672 if gpu == 0:
673 print("\nINFO: Train from scratch has started . . .")
--> 674 train_optimisation(gpu, gpus, training_dataloader, test_dataloader, model, loss_fn, optimizer, None, dir_path, 0)
675 else:
676 checkpoint_path = "stackgraphConvPool3DPnet/" + args.resume
Input In [1], in train_optimisation(gpu, gpus, training_dataloader, test_dataloader, model, loss_fn, optimizer, scheduler, dir_path, initial_epoch)
454 print(f"\n\nEpoch {i + 1}\n-------------------------------")
456 # TRAIN
--> 457 losses, training_accuracy, train_batch_result, train_conf_mat = training_loop(gpu, training_dataloader, model, loss_fn, optimizer)
458 average_loss = torch.mean(losses)
459 torch.distributed.reduce(average_loss, 0, torch.distributed.ReduceOp.SUM)
Input In [1], in training_loop(gpu, training_dataloader, model, loss_fn, optimizer)
249 y = batch.y.cuda(non_blocking=True).flatten() #size (batch_size) --> torch.Size([8])
251 # Compute predictions
--> 252 pred = model(None, X) #size (batch_size,classes) --> torch.Size([8, 10])
254 if overall_classes_loss:
255 # weighted CE Loss over all classes
256 loss = loss_fn(pred, y)
File C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py:1130, in Module._call_impl(self, *input, **kwargs)
1126 # If we don't have any hooks, we want to skip the rest of the logic in
1127 # this function, and just call forward.
1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1129 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1130 return forward_call(*input, **kwargs)
1131 # Do not call functions when jit is used
1132 full_backward_hooks, non_full_backward_hooks = [], []
File C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\parallel\distributed.py:1008, in DistributedDataParallel.forward(self, *inputs, **kwargs)
1004 if self._join_config.enable:
1005 # Notify joined ranks whether they should sync in backwards pass or not.
1006 self._check_global_requires_backward_grad_sync(is_joined_rank=False)
-> 1008 output = self._run_ddp_forward(*inputs, **kwargs)
1010 # sync params according to location (before/after forward) user
1011 # specified as part of hook, if hook was specified.
1012 if self._check_sync_bufs_post_fwd():
File C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\parallel\distributed.py:969, in DistributedDataParallel._run_ddp_forward(self, *inputs, **kwargs)
962 if self.device_ids:
963 inputs, kwargs = _to_kwargs(
964 inputs,
965 kwargs,
966 self.device_ids[0],
967 self.use_side_stream_for_tensor_copies
968 )
--> 969 return module_to_run(*inputs[0], **kwargs[0])
970 else:
971 return module_to_run(*inputs, **kwargs)
File C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py:1130, in Module._call_impl(self, *input, **kwargs)
1126 # If we don't have any hooks, we want to skip the rest of the logic in
1127 # this function, and just call forward.
1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1129 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1130 return forward_call(*input, **kwargs)
1131 # Do not call functions when jit is used
1132 full_backward_hooks, non_full_backward_hooks = [], []
File ~\Desktop\Forum\unit.py:657, in Classifier.forward(self, x, pos)
655 feature_matrix_batch = pos.unsqueeze(0)
656 # feature_matrix_batch size = (1,N,I,D) where N=batch number, I=members, D=member dimensionality
--> 657 output = self.neuralNet(feature_matrix_batch)
658 # output size = (S,N,D) where S= stack size, N=batch number, D'=member dimensionality
659 output = torch.mean(output, dim=0)
File C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py:1130, in Module._call_impl(self, *input, **kwargs)
1126 # If we don't have any hooks, we want to skip the rest of the logic in
1127 # this function, and just call forward.
1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1129 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1130 return forward_call(*input, **kwargs)
1131 # Do not call functions when jit is used
1132 full_backward_hooks, non_full_backward_hooks = [], []
File C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\container.py:139, in Sequential.forward(self, input)
137 def forward(self, input):
138 for module in self:
--> 139 input = module(input)
140 return input
File C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py:1130, in Module._call_impl(self, *input, **kwargs)
1126 # If we don't have any hooks, we want to skip the rest of the logic in
1127 # this function, and just call forward.
1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1129 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1130 return forward_call(*input, **kwargs)
1131 # Do not call functions when jit is used
1132 full_backward_hooks, non_full_backward_hooks = [], []
File ~\Desktop\Forum\unit.py:448, in ShrinkingUnitStack.forward(self, feature_matrix_batch)
446 feature_matrix_batch = self.selfCorrStack(feature_matrix_batch)
447 # feature_matrix_batch size = (S',N,I,D) where S'=stack_size, N=batch number, I=members, D=member dimensionality
--> 448 feature_matrix_batch_, conv_feature_matrix_batch, cluster_index = self.kmeansConvStack(feature_matrix_batch)
449 feature_matrix_batch = self.localAdaptFeaAggreStack(feature_matrix_batch, conv_feature_matrix_batch)
450 output = self.graphMaxPoolStack(feature_matrix_batch, cluster_index)
File C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py:1130, in Module._call_impl(self, *input, **kwargs)
1126 # If we don't have any hooks, we want to skip the rest of the logic in
1127 # this function, and just call forward.
1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1129 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1130 return forward_call(*input, **kwargs)
1131 # Do not call functions when jit is used
1132 full_backward_hooks, non_full_backward_hooks = [], []
File ~\Desktop\Forum\unit.py:519, in KMeansConvStack.forward(self, feature_matrix_batch)
517 def forward(self, feature_matrix_batch: torch.Tensor):
518 # feature_matrix_batch size = (S,N,I,D) where S=stack size, N=batch number, I=members, D=member dimensionality
--> 519 feature_matrix_batch, conv_feature_matrix_batch, cluster_index = kmeansConvThreader(self.kmeansConvStack,
520 feature_matrix_batch)
521 # feature_matrix_batch size = (S,N,I,D) where where S=stack_size, N=batch number, I=members, D=member dimensionality
522 # conv_feature_matrix_batch size = (S,N,I,D) where where S=stack_size, N=batch number, I=members, D=member dimensionality
523 # cluster_index size = (S,M) where S=stack_size, M=N*I
524 return feature_matrix_batch, conv_feature_matrix_batch, cluster_index
File ~\Desktop\Forum\unit.py:611, in kmeansConvThreader(modules, input_tensor)
609 list2_append = list(map(lambda x: x[1], list2_append))
610 list3_append = list(map(lambda x: x[1], list3_append))
--> 611 return torch.stack(list1_append), torch.stack(list2_append), torch.stack(list3_append)
RuntimeError: stack expects a non-empty TensorList
def forward(self, feature_matrix_batch):
# feature_matrix_batch size = (N,I,D) where N=batch number, I=members, D=member dimensionality
N, I, D = feature_matrix_batch.size()
clusters = []
for i, feature_matrix in enumerate(feature_matrix_batch):
kmeans = KMeans(n_clusters=self.k, init=self.kmeansInit, n_init=self.n_init)
labels = np.apply_along_axis(lambda x: x + (i*self.k), axis=0, arr=kmeans.labels_)
def kmeansConvThreader(modules, input_tensor):
list1_append = []
list2_append = []
list3_append = []
threads = []
for i, t in enumerate(input_tensor):
threads.append(
Thread(target=kmeansAppender, args=(modules[i], t, list1_append, list2_append, list3_append, i)))
[t.start() for t in threads]
[t.join() for t in threads]
list1_append.sort()
list2_append.sort()
list3_append.sort()
list1_append = list(map(lambda x: x[1], list1_append))
list2_append = list(map(lambda x: x[1], list2_append))
list3_append = list(map(lambda x: x[1], list3_append))
return torch.stack(list1_append), torch.stack(list2_append), torch.stack(list3_append)
AttributeError: 'KMeans' object has no attribute 'labels_'
RuntimeError: stack expects a non-empty TensorList
Thanks for your help

RuntimeError: mat1 and mat2 shapes cannot be multiplied (1x1 and 512x22400)

I'm building an MLP model for a project and ran into the following error. I'm new to DL and PyTorch and I know the error is in the hidden layer, I'm a bit confused regarding the input and output features of the hidden layer and why it is trying to multiply with 1x1 matrix. Any help would be appreciated.
Traceback:
RuntimeError Traceback (most recent call last)
Input In [92], in <cell line: 22>()
22 for epoch in range(1, num_epochs+1):
23 print(f"\nEpoch: {epoch}/{num_epochs}")
---> 25 train(model, device, train_loader, optimizer, criterion, epoch)
26 test(model, device, test_loader, criterion, mode = "Test")
Input In [90], in train(model, device, train_loader, optimizer, criterion, epoch)
13 optimizer.zero_grad()
15 # pass the batch to the model and assign the output to variable named y_pred
---> 16 y_pred = model(batch_idx)
18 # calculate the loss (use CrossEntropyLoss in pytorch)
19 loss = criterion(y_pred, target)
File ~\AppData\Local\Programs\Python\Python310\lib\site-
packages\torch\nn\modules\module.py:1130, in Module._call_impl(self, *input, **kwargs)
1126 # If we don't have any hooks, we want to skip the rest of the logic in
1127 # this function, and just call forward.
1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or
_global_backward_hooks
1129 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1130 return forward_call(*input, **kwargs)
1131 # Do not call functions when jit is used
1132 full_backward_hooks, non_full_backward_hooks = [], []
Input In [89], in MLP.forward(self, x)
23 print(x.size())
24 print(x.size())
---> 25 x = self.relu(self.hidden(x))
26 print(x.size())
27 x = self.relu(self.classifier(x))
File ~\AppData\Local\Programs\Python\Python310\lib\site-
packages\torch\nn\modules\module.py:1130, in Module._call_impl(self, *input, **kwargs)
1126 # If we don't have any hooks, we want to skip the rest of the logic in
1127 # this function, and just call forward.
1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or
_global_backward_hooks
1129 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1130 return forward_call(*input, **kwargs)
1131 # Do not call functions when jit is used
1132 full_backward_hooks, non_full_backward_hooks = [], []
File ~\AppData\Local\Programs\Python\Python310\lib\site-
packages\torch\nn\modules\linear.py:114, in Linear.forward(self, input)
113 def forward(self, input: Tensor) -> Tensor:
--> 114 return F.linear(input, self.weight, self.bias)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (1x1 and 512x22400)
Below is my code:
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, num_features, num_classes, num_hidden):
super(MLP, self).__init__()
# define a linear layer with output channels as 32
self.hidden = nn.Linear(num_hidden, num_features*70*32)
self.relu = torch.nn.ReLU()
# define a linear layer with output features corresponding to the number of classes
self.classifier = nn.Linear(num_features*70*32, 5)
def forward(self, x):
x = torch.tensor(x).unsqueeze(dim=0)
x = self.relu(self.hidden(x))
out = self.relu(self.classifier(x))
return out
num_hidden = 512
num_features = 10
classes = [0, 1, 2, 3, 4]
num_classes = len(classes)

pytorch error in multiplying matrices in neural network

I was trying to make a Neural Network in PyTorch, however I ran into the error below. I'm still new to this topic so I am not able to understand how I should go about solving this.
Code:
class ANN_Model(nn.Module):
def __init__(self,input_features=8,hidden1=8,hidden2=200,hidden3=200,hidden4=300,hidden5=300,hidden6=400,hidden7=400,hidden8=300,hidden9=300,out_features=2):
super().__init__()
self.f_connected1=nn.Linear(input_features,hidden1)
self.f_connected2=nn.Linear(hidden1,hidden2)
self.f_connected2=nn.Linear(hidden2,hidden3)
self.f_connected2=nn.Linear(hidden3,hidden4)
self.f_connected2=nn.Linear(hidden4,hidden5)
self.f_connected2=nn.Linear(hidden5,hidden6)
self.f_connected2=nn.Linear(hidden6,hidden7)
self.f_connected2=nn.Linear(hidden7,hidden8)
self.f_connected2=nn.Linear(hidden8,hidden9)
self.out=nn.Linear(hidden9,out_features)
def forward(self,x):
x=F.relu(self.f_connected1(x))
x=F.relu(self.f_connected2(x))
x=F.relu(self.f_connected3(x))
x=F.relu(self.f_connected4(x))
x=F.relu(self.f_connected5(x))
x=F.relu(self.f_connected6(x))
x=F.relu(self.f_connected7(x))
x=F.relu(self.f_connected8(x))
x=F.relu(self.f_connected9(x))
x=self.out(x)
return x
loss_function = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr = 0.01)
epochs = 500
final_losses = []
for i in range(epochs):
i = i + 1
y_pred = model.forward(X_train)
loss=loss_function(y_pred, y_train)
final_losses.append(loss.item())
if i%10==1:
print("Epoch number: {} and the loss: {}".format(i, loss.item()))
optimizer.zero_grad()
loss.backward()
optimizer.step()
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
Input In [13], in <cell line: 3>()
3 for i in range(epochs):
4 i = i + 1
----> 5 y_pred = model.forward(X_train)
6 loss=loss_function(y_pred, y_train)
7 final_losses.append(loss.item())
Input In [8], in ANN_Model.forward(self, x)
14 def forward(self,x):
15 x=F.relu(self.f_connected1(x))
---> 16 x=F.relu(self.f_connected2(x))
17 x=F.relu(self.f_connected3(x))
18 x=F.relu(self.f_connected4(x))
File ~/miniconda3/lib/python3.9/site-packages/torch/nn/modules/module.py:1130, in Module._call_impl(self, *input, **kwargs)
1126 # If we don't have any hooks, we want to skip the rest of the logic in
1127 # this function, and just call forward.
1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1129 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1130 return forward_call(*input, **kwargs)
1131 # Do not call functions when jit is used
1132 full_backward_hooks, non_full_backward_hooks = [], []
File ~/miniconda3/lib/python3.9/site-packages/torch/nn/modules/linear.py:114, in Linear.forward(self, input)
113 def forward(self, input: Tensor) -> Tensor:
--> 114 return F.linear(input, self.weight, self.bias)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (691x8 and 300x300)
I was trying to make a Neural Network in PyTorch, however I ran into the error below. I'm still new to this topic so I am not able to understand how I should go about solving this.
I found it, in your model's constructor __init__ every layer is named self.f_connected2 and because of that it expects a shape of (batch_size,300).

Pytorch : Expected all tensors on same device

I have my model and inputs moved on the same device but I still get the runtime error :
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument mat1 in method wrapper_addmm)
Here is my code,
First my model implementation :
import torch
import torch.nn.functional as F
class Net(torch.nn.Module):
def __init__(self, n_hiddens, n_feature= 2, n_output= 1):
super().__init__()
self.hiddens = []
n_hidden_in = n_feature
for n_hidden in n_hiddens :
self.hiddens.append( torch.nn.Linear(n_hidden_in, n_hidden) ) # hidden layer
n_hidden_in = n_hidden
self.predict = torch.nn.Linear(n_hidden, n_output) # output layer
def forward(self, x):
for hidden in self.hiddens :
x = F.relu(hidden(x)) # activation function for hidden layer
x = self.predict(x) # linear output
return x
Then I define my dataloaders. Here, X and y are numpy arrays
from torch.utils.data import TensorDataset, DataLoader
# Split training/test
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state= 42)
X_train_tensor = torch.from_numpy(X_train)
y_train_tensor = torch.from_numpy(y_train)
X_test_tensor = torch.from_numpy(X_test)
y_test_tensor = torch.from_numpy(y_test)
train_dataset = TensorDataset(X_train_tensor, y_train_tensor) # create your datset
train_dataloader = DataLoader(train_dataset, batch_size= 1000) # create your dataloader
test_dataset = TensorDataset(X_test_tensor, y_test_tensor) # create your datset
test_dataloader = DataLoader(test_dataset, batch_size= 1000) # create your dataloader
Here I train my model. The error occurs during the line "outputs = regressor(inputs)"
NUM_EPOCHS = 2000
BATCH_SIZE = 1000
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(f"Device used : {device}")
# 1 hidden layer
total_num_nodes = 256
regressor = Net(n_hiddens= [total_num_nodes]).to(device)
optimizer = torch.optim.SGD(regressor.parameters(), lr=0.2, momentum= 0.1, nesterov= True)
loss_func = torch.nn.MSELoss() # this is for regression mean squared loss
for epoch in range(NUM_EPOCHS):
running_loss = 0.0
for i, data in enumerate(train_dataloader, 0):
inputs, values = data
inputs = inputs.float().to(device)
values = values.float().to(device)
optimizer.zero_grad() # clear gradients for next train
print(f"Input device is : cuda:{inputs.get_device()}")
print(f"Target value device is : cuda:{values.get_device()}")
print(f"Is model on cuda ? : {next(regressor.parameters()).is_cuda}")
outputs = regressor(inputs) # <-- This is where I have the error
loss = loss_func(outputs, values)
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
Here are the outputs of my print statements :
Device used : cuda:0
Input device is : cuda:0
Target value device is : cuda:0
Is model on cuda ? :True
This should mean that my model and my tensors are all on the same device so why do I still have this error ?
The error log is :
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-6-5234b830bebc> in <module>()
24 print(f"Target value device is : cuda:{values.get_device()}")
25 print(f"Is model on cuda ? : {next(regressor.parameters()).is_cuda}")
---> 26 outputs = regressor(inputs)
27 loss = loss_func(outputs, values)
28 loss.backward() # backpropagation, compute gradients
4 frames
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1101 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102 return forward_call(*input, **kwargs)
1103 # Do not call functions when jit is used
1104 full_backward_hooks, non_full_backward_hooks = [], []
<ipython-input-4-56c54b30b771> in forward(self, x)
16 def forward(self, x):
17 for hidden in self.hiddens :
---> 18 x = F.relu(hidden(x)) # activation function for hidden layer
19 x = self.predict(x) # linear output
20 return x
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1101 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102 return forward_call(*input, **kwargs)
1103 # Do not call functions when jit is used
1104 full_backward_hooks, non_full_backward_hooks = [], []
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/linear.py in forward(self, input)
101
102 def forward(self, input: Tensor) -> Tensor:
--> 103 return F.linear(input, self.weight, self.bias)
104
105 def extra_repr(self) -> str:
/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py in linear(input, weight, bias)
1846 if has_torch_function_variadic(input, weight, bias):
1847 return handle_torch_function(linear, (input, weight, bias), input, weight, bias=bias)
-> 1848 return torch._C._nn.linear(input, weight, bias)
1849
1850
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument mat1 in method wrapper_addmm)
Thank you very much
TL;DR use nn.ModuleList instead of a pythonic one to store the hidden layers in Net.
All your hidden layers are stored in a simple pythonic list self.hidden in Net. When you move your model to GPU, using .to(device), pytorch has no way to tell that all the elements of this pythonic list should also be moved to the same device.
however, if you make self.hidden = nn.ModuleLis(), pytorch now knows to treat all elements of this special list as nn.Modules and recursively move them to the same device as Net.
See these answers 1, 2, 3 for more details.

RuntimeError: all elements of input should be between 0 and 1

I want to use an RNN with bilstm layers using pytorch on protein embeddings. It worked with Linear Layer but when i use Bilstm i have a Runtime error. Sorry if its not clear its my first publication and i will be grateful if someone can help me.
from collections import Counter, OrderedDict
from typing import Optional
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn.functional as F # noqa
from deepchain import log
from sklearn.model_selection import train_test_split
from sklearn.utils.class_weight import compute_class_weight
from torch import Tensor, nn
num_layers=2
hidden_size=256
from torch.utils.data import DataLoader, TensorDataset
def classification_dataloader_from_numpy(
x: np.ndarray, y: np.array, batch_size: int = 32
) -> DataLoader:
"""Build a dataloader from numpy for classification problem
This dataloader is use only for classification. It detects automatically the class of
the problem (binary or multiclass classification)
Args:
x (np.ndarray): [description]
y (np.array): [description]
batch_size (int, optional): [description]. Defaults to None.
Returns:
DataLoader: [description]
"""
n_class: int = len(np.unique(y))
if n_class > 2:
log.info("This is a classification problem with %s classes", n_class)
else:
log.info("This is a binary classification problem")
# y is float for binary classification, int for multiclass
y_tensor = torch.tensor(y).long() if len(np.unique(y)) > 2 else torch.tensor(y).float()
tensor_set = TensorDataset(torch.tensor(x).float(), y_tensor)
loader = DataLoader(tensor_set, batch_size=batch_size)
return loader
class RNN(pl.LightningModule):
"""A `pytorch` based deep learning model"""
def __init__(self, input_shape: int, n_class: int, num_layers, n_neurons: int = 128, lr: float = 1e-3):
super(RNN,self).__init__()
self.lr = lr
self.n_neurons=n_neurons
self.num_layers=num_layers
self.input_shape = input_shape
self.output_shape = 1 if n_class <= 2 else n_class
self.activation = nn.Sigmoid() if n_class <= 2 else nn.Softmax(dim=-1)
self.lstm = nn.LSTM(self.input_shape, self.n_neurons, num_layers, batch_first=True, bidirectional=True)
self.fc= nn.Linear(self.n_neurons, self.output_shape)
def forward(self, x):
h0=torch.zeros(self.num_layers, x_size(0), self.n_neurons).to(device)
c0=torch.zeros(self.num_layers, x_size(0), self.n_neurons).to(device)
out, _=self.lstm(x,(h0, c0))
out=self.fc(out[:, -1, :])
return self.fc(x)
def training_step(self, batch, batch_idx):
"""training_step defined the train loop. It is independent of forward"""
x, y = batch
y_hat = self.fc(x).squeeze()
y = y.squeeze()
if self.output_shape > 1:
y_hat = torch.log(y_hat)
loss = self.loss(y_hat, y)
self.log("train_loss", loss, on_epoch=True, on_step=False)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
"""training_step defined the train loop. It is independent of forward"""
x, y = batch
y_hat = self.fc(x).squeeze()
y = y.squeeze()
if self.output_shape > 1:
y_hat = torch.log(y_hat)
loss = self.loss(y_hat, y)
self.log("val_loss", loss, on_epoch=True, on_step=False)
return {"val_loss": loss}
def configure_optimizers(self):
"""(Optional) Configure training optimizers."""
return torch.optim.Adam(self.parameters(),lr=self.lr)
def compute_class_weight(self, y: np.array, n_class: int):
"""Compute class weight for binary/multiple classification
If n_class=2, only compute weights for the positve class.
If n>2, compute for all classes.
Args:
y ([np.array]):vector of int represented the class
n_class (int) : number fo class to use
"""
if n_class == 2:
class_count: typing.Counter = Counter(y)
cond_binary = (0 in class_count) and (1 in class_count)
assert cond_binary, "Must have O and 1 class for binary classification"
weight = class_count[0] / class_count[1]
else:
weight = compute_class_weight(class_weight="balanced", classes=np.unique(y), y=y)
return torch.tensor(weight).float()
def fit(
self,
x: np.ndarray,
y: np.array,
epochs: int = 10,
batch_size: int = 32,
class_weight: Optional[str] = None,
validation_data: bool = True,
**kwargs
):
assert isinstance(x, np.ndarray), "X should be a numpy array"
assert isinstance(y, np.ndarray), "y should be a numpy array"
assert class_weight in (
None,
"balanced",
), "the only choice available for class_weight is 'balanced'"
n_class = len(np.unique(y))
weight = None
self.input_shape = x.shape[1]
self.output_shape = 1 if n_class <= 2 else n_class
self.activation = nn.Sigmoid() if n_class <= 2 else nn.Softmax(dim=-1)
if class_weight == "balanced":
weight = self.compute_class_weight(y, n_class)
self.loss = nn.NLLLoss(weight) if self.output_shape > 1 else nn.BCELoss(weight)
if validation_data:
x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=0.2)
train_loader = classification_dataloader_from_numpy(
x_train, y_train, batch_size=batch_size
)
val_loader = classification_dataloader_from_numpy(x_val, y_val, batch_size=batch_size)
else:
train_loader = classification_dataloader_from_numpy(x, y, batch_size=batch_size)
val_loader = None
self.trainer = pl.Trainer(max_epochs=epochs, **kwargs)
self.trainer.fit(self, train_loader, val_loader)
def predict(self, x):
"""Run inference on data."""
if self.output_shape is None:
log.warning("Model is not fitted. Can't do predict")
return
return self.forward(x).detach().numpy()
def save(self, path: str):
"""Save the state dict model with torch"""
torch.save(self.fc.state_dict(), path)
log.info("Save state_dict parameters in model.pt")
def load_state_dict(self, state_dict: "OrderedDict[str, Tensor]", strict: bool = False):
"""Load state_dict saved parameters
Args:
state_dict (OrderedDict[str, Tensor]): state_dict tensor
strict (bool, optional): [description]. Defaults to False.
"""
self.fc.load_state_dict(state_dict, strict=strict)
self.fc.eval()
mlp = RNN(input_shape=1024, n_neurons=1024, num_layers=2, n_class=2)
mlp.fit(embeddings_train, np.array(y_train),validation_data=(embeddings_test, np.array(y_test)), epochs=30)
mlp.save("model.pt")
These are the errors that are occured. I really need help and i remain at your disposal for further informations.
Error 1
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-154-e5fde11a675c> in <module>
1 # init MLP model, train it on the data, then save model
2 mlp = RNN(input_shape=1024, n_neurons=1024, num_layers=2, n_class=2)
----> 3 mlp.fit(embeddings_train, np.array(y_train),validation_data=(embeddings_test, np.array(y_test)), epochs=30)
4 mlp.save("model.pt")
<ipython-input-153-a8d51af53bb5> in fit(self, x, y, epochs, batch_size, class_weight, validation_data, **kwargs)
134 val_loader = None
135 self.trainer = pl.Trainer(max_epochs=epochs, **kwargs)
--> 136 self.trainer.fit(self, train_loader, val_loader)
137 def predict(self, x):
138 """Run inference on data."""
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
456 )
457
--> 458 self._run(model)
459
460 assert self.state.stopped
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in _run(self, model)
754
755 # dispatch `start_training` or `start_evaluating` or `start_predicting`
--> 756 self.dispatch()
757
758 # plugin will finalized fitting (e.g. ddp_spawn will load trained model)
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in dispatch(self)
795 self.accelerator.start_predicting(self)
796 else:
--> 797 self.accelerator.start_training(self)
798
799 def run_stage(self):
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in start_training(self, trainer)
94
95 def start_training(self, trainer: 'pl.Trainer') -> None:
---> 96 self.training_type_plugin.start_training(trainer)
97
98 def start_evaluating(self, trainer: 'pl.Trainer') -> None:
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py in start_training(self, trainer)
142 def start_training(self, trainer: 'pl.Trainer') -> None:
143 # double dispatch to initiate the training loop
--> 144 self._results = trainer.run_stage()
145
146 def start_evaluating(self, trainer: 'pl.Trainer') -> None:
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_stage(self)
805 if self.predicting:
806 return self.run_predict()
--> 807 return self.run_train()
808
809 def _pre_training_routine(self):
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_train(self)
840 self.progress_bar_callback.disable()
841
--> 842 self.run_sanity_check(self.lightning_module)
843
844 self.checkpoint_connector.has_trained = False
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
1105
1106 # run eval step
-> 1107 self.run_evaluation()
1108
1109 self.on_sanity_check_end()
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, on_epoch)
960 # lightning module methods
961 with self.profiler.profile("evaluation_step_and_end"):
--> 962 output = self.evaluation_loop.evaluation_step(batch, batch_idx, dataloader_idx)
963 output = self.evaluation_loop.evaluation_step_end(output)
964
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, batch, batch_idx, dataloader_idx)
172 model_ref._current_fx_name = "validation_step"
173 with self.trainer.profiler.profile("validation_step"):
--> 174 output = self.trainer.accelerator.validation_step(args)
175
176 # capture any logged information
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in validation_step(self, args)
224
225 with self.precision_plugin.val_step_context(), self.training_type_plugin.val_step_context():
--> 226 return self.training_type_plugin.validation_step(*args)
227
228 def test_step(self, args: List[Union[Any, int]]) -> Optional[STEP_OUTPUT]:
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py in validation_step(self, *args, **kwargs)
159
160 def validation_step(self, *args, **kwargs):
--> 161 return self.lightning_module.validation_step(*args, **kwargs)
162
163 def test_step(self, *args, **kwargs):
<ipython-input-153-a8d51af53bb5> in validation_step(self, batch, batch_idx)
78 if self.output_shape > 1:
79 y_hat = torch.log(y_hat)
---> 80 loss = self.loss(y_hat, y)
81 self.log("val_loss", loss, on_epoch=True, on_step=False)
82 return {"val_loss": loss}
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
887 result = self._slow_forward(*input, **kwargs)
888 else:
--> 889 result = self.forward(*input, **kwargs)
890 for hook in itertools.chain(
891 _global_forward_hooks.values(),
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/torch/nn/modules/loss.py in forward(self, input, target)
611 def forward(self, input: Tensor, target: Tensor) -> Tensor:
612 assert self.weight is None or isinstance(self.weight, Tensor)
--> 613 return F.binary_cross_entropy(input, target, weight=self.weight, reduction=self.reduction)
614
615
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/torch/nn/functional.py in binary_cross_entropy(input, target, weight, size_average, reduce, reduction)
2760 weight = weight.expand(new_size)
2761
-> 2762 return torch._C._nn.binary_cross_entropy(input, target, weight, reduction_enum)
2763
2764
RuntimeError: all elements of input should be between 0 and 1
Error 2
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-139-b7e8b13763ef> in <module>
1 # Model evaluation
----> 2 y_pred = mlp(embeddings_val).squeeze().detach().numpy()
3 model_evaluation_accuracy(np.array(y_val), y_pred)
/opt/conda/envs/bio-transformers/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
887 result = self._slow_forward(*input, **kwargs)
888 else:
--> 889 result = self.forward(*input, **kwargs)
890 for hook in itertools.chain(
891 _global_forward_hooks.values(),
<ipython-input-136-e2fc535640ab> in forward(self, x)
55 self.fc= nn.Linear(self.hidden_size, self.output_shape)
56 def forward(self, x):
---> 57 h0=torch.zeros(self.num_layers, x_size(0), self.hidden_size).to(device)
58 c0=torch.zeros(self.num_layers, x_size(0), self.hidden_size).to(device)
59 out, _=self.lstm(x,(h0, c0))
NameError: name 'x_size' is not defined
I am adding this as an answer because it would be too hard to put in comment.
The main problem that you have is about BCE loss. IIRC BCE loss expects p(y=1), so your output should be between 0 and 1. If you want to use logits (which is also more numerically stable), you should use BinaryCrossEntropyWithLogits.
As you mention in one of the comments, you are using the sigmoid activation but something about your forward function looks off to me. Mainly the last line of your forward function is
return self.fc(x)
This does not use sigmoid activation. Moreover you are only using input, x for producing the output. The LSTM outputs are just being discarded? I think, it would be a good idea to add some prints statements or breakpoints to make sure that the intermediate outputs are as you expect them to be.
I got the error RuntimeError: all elements of input should be between 0 and 1 because my x data had NaN entries.
I just bumped into this myself. It looks like both you and I missed adding a sigmoid function at the end of the forward function. This update should fix your problem.
def forward(self, x):
h0=torch.zeros(self.num_layers, x_size(0), self.n_neurons).to(device)
c0=torch.zeros(self.num_layers, x_size(0), self.n_neurons).to(device)
out, _=self.lstm(x,(h0, c0))
out=self.fc(out[:, -1, :])
return torch.sigmoid(out)

Resources