Hi I'm training my pytorch model on remote server.
All the job is managed by slurm.
My problem is 'training is extremely slower after training first epoch.'
I checked gpu utilization.
On my first epoch, utilization was like below image.
I can see gpu was utilized.
But from second epoch utilized percentage is almos zero
My dataloader code like this
class img2selfie_dataset(Dataset):
def __init__(self, path, transform, csv_file, cap_vec):
self.path = path
self.transformer = transform
self.images = [path + item for item in list(csv_file['file_name'])]
self.smiles_list = cap_vec
def __getitem__(self, idx):
img = Image.open(self.images[idx])
img = self.transformer(img)
label = self.smiles_list[idx]
label = torch.Tensor(label)
return img, label.type(torch.LongTensor)
def __len__(self):
return len(self.images)
My dataloader is defined like this
train_data_set = img2selfie_dataset(train_path, preprocess, train_dataset, train_cap_vec)
train_loader = DataLoader(train_data_set, batch_size = 256, num_workers = 2, pin_memory = True)
val_data_set = img2selfie_dataset(train_path, preprocess, val_dataset, val_cap_vec)
val_loader = DataLoader(val_data_set, batch_size = 256, num_workers = 2, pin_memory = True)
My training step defined like this
train_loss = []
valid_loss = []
epochs = 20
best_loss = 1e5
for epoch in range(1, epochs + 1):
print('Epoch {}/{}'.format(epoch, epochs))
print('-' * 10)
epoch_train_loss, epoch_valid_loss = train(encoder_model, transformer_decoder, train_loader, val_loader, criterion, optimizer)
train_loss.append(epoch_train_loss)
valid_loss.append(epoch_valid_loss)
if len(valid_loss) > 1:
if valid_loss[-1] < best_loss:
print(f"valid loss on this {epoch} is better than previous one, saving model.....")
torch.save(encoder_model.state_dict(), 'model/encoder_model.pickle')
torch.save(transformer_decoder.state_dict(), 'model/decoder_model.pickle')
best_loss = valid_loss[-1]
print(best_loss)
print(f'Epoch : [{epoch}] Train Loss : [{train_loss[-1]:.5f}], Valid Loss : [{valid_loss[-1]:.5f}]')
In my opinion, if this problem comes from my code. It wouldn't have hitted 100% utilization in first epoch.
I fixed this issue with moving my training data into local drive.
My remote server(school server) policy was storing personel data into NAS.
And file i/o from NAS proveked heavy load on network.
It was also affected by other user's file i/o from NAS.
After I moved training data into NAS, everything is fine.
Related
I am trying to find the training/validation accuracy and loss of my model for each epoch as I train it to find the best epoch to use from now on. I appreciate that there is lots of information on this now but this topic is very new to me, and I find it very difficult to find the right answer for my situation.
I assume that I need to add in one or two bits to the train_one_epoch() and evaluate() functions in order to do this?
My model setup is:
model = torchvision.models.detection.fasterrcnn_resnet50_fpn_v2(weights=models.detection.FasterRCNN_ResNet50_FPN_V2_Weights.DEFAULT)
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
model.to(device)
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.02, momentum=0.9, weight_decay=0.0001)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[20,40], gamma=0.1)
And my training function is:
epochs = 50
for epoch in range(epochs):
train_one_epoch(model, optimizer, train_data_loader, device, epoch, print_freq=20)
lr_scheduler.step()
evaluate(model, val_data_loader, device=device)
print("\n\n")
torch.save(model, f'./Models/trained_{ds}_model_Epoch{epochs}_LR0_02.pt')
I am using coco-like annotations, for example:
{'boxes': tensor([[316.9700, 242.5500, 464.1000, 442.1700], [ 39.2200, 172.6700, 169.8400, 430.9600]]), 'labels': tensor([2, 2]), 'image_id': tensor(1416), 'area': tensor([29370.1094, 33738.3789]), 'iscrowd': tensor([0, 0])}
The train_one_epoch and evaluate functions are from 'engine.py' from Torchvision.
It seems like using Tensorboard is a good tool to use, but I don't really know how to use it.
The engine.py is:
def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq, scaler=None):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value:.6f}"))
header = f"Epoch: [{epoch}]"
lr_scheduler = None
if epoch == 0:
warmup_factor = 1.0 / 1000
warmup_iters = min(1000, len(data_loader) - 1)
lr_scheduler = torch.optim.lr_scheduler.LinearLR(
optimizer, start_factor=warmup_factor, total_iters=warmup_iters
)
for images, targets in metric_logger.log_every(data_loader, print_freq, header):
images = list(image.to(device) for image in images)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
with torch.cuda.amp.autocast(enabled=scaler is not None):
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
loss_value = losses_reduced.item()
if not math.isfinite(loss_value):
print(f"Loss is {loss_value}, stopping training")
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
if scaler is not None:
scaler.scale(losses).backward()
scaler.step(optimizer)
scaler.update()
else:
losses.backward()
optimizer.step()
if lr_scheduler is not None:
lr_scheduler.step()
metric_logger.update(loss=losses_reduced, **loss_dict_reduced)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
return metric_logger
The evaluate function is:
def evaluate(model, data_loader, device):
n_threads = torch.get_num_threads()
# FIXME remove this and make paste_masks_in_image run on the GPU
torch.set_num_threads(1)
cpu_device = torch.device("cpu")
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = "Test:"
coco = get_coco_api_from_dataset(data_loader.dataset)
iou_types = _get_iou_types(model)
coco_evaluator = CocoEvaluator(coco, iou_types)
for images, targets in metric_logger.log_every(data_loader, 100, header):
images = list(img.to(device) for img in images)
if torch.cuda.is_available():
torch.cuda.synchronize()
model_time = time.time()
outputs = model(images)
outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
model_time = time.time() - model_time
res = {target["image_id"].item(): output for target, output in zip(targets, outputs)}
evaluator_time = time.time()
coco_evaluator.update(res)
evaluator_time = time.time() - evaluator_time
metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
coco_evaluator.synchronize_between_processes()
# accumulate predictions from all images
coco_evaluator.accumulate()
coco_evaluator.summarize()
torch.set_num_threads(n_threads)
return coco_evaluator
The machine I am using for training has 4 GPUs. I am "moving" classifier, loss function and tensors to GPU. But when I run nvidia-smi on the machine while training is ongoing, I see GPU utilization is very low (3%) on one core and 0 on other cores.
Questions I have are
Is there an easier approach to ask Pytorch to use GPU and as many cores as available without me having to do so many .to(device) all over the place
Is there something other than .to(device) that is needed to use GPU?
Is there a way to see if training is happening on CPU vs GPU or is running nvidia-smi on the machine and looking at GPU utilization the only way?
How do I interpret GPU utilization of 3% in nvidia-smi. Does it mean CPU is being used in many places? If yes, is there a way to debug what is making the training use CPU?
Will setting num_workers to number of available cores in DataLoader class be enough to use multiple GPU cores? Is there any generic way to automatically learn number of GPU cores available?
Code used to train
random.seed(1234)
np.random.seed(1234)
torch.manual_seed(1234)
torch.cuda.manual_seed(1234)
torch.backends.cudnn.deterministic = True
start_time = time.time()
clf = MLP(len(X_training[0]), hidden_size=[100, 100, 100, 100, 100])
#Move to GPU if available
use_gpu = torch.cuda.is_available()
device = torch.device('cuda' if use_gpu else 'cpu')
# Define the loss function and optimizer
optimizer = torch.optim.Adam(clf.parameters(), lr=8e-4)
clf = clf.to(device)
loss_function = nn.BCELoss()
loss_function = loss_function.to(device)
# Run the training loop
# per_epoch_precision = []
# per_epoch_recall = []
for epoch in range(0, 150):
# Set current loss value
current_loss = 0.0
dataset = MyDataset(X_training, y_training, use_gpu)
kwargs = {'num_workers': 1, 'pin_memory': True} if use_gpu else {}
trainloader = torch.utils.data.DataLoader(dataset, batch_size=10000, shuffle=True, **kwargs)
# Iterate over the DataLoader for training data
clf.train() # set to train mode
for i, data in enumerate(trainloader):
# Get inputs
inputs, targets = data
inputs = inputs.to(device)
targets = targets.to(device)
# Zero the gradients
optimizer.zero_grad()
# Perform forward pass
outputs = clf(inputs)
# Compute loss
targets = targets.float().unsqueeze(1)
loss = loss_function(outputs, targets)
# Perform backward pass
loss.backward()
# Perform optimization
optimizer.step()
# Print statistics
current_loss += loss.item()
if i % 20000 == 19999:
print("Loss after mini-batch %5d: %.3f" % (i + 1, current_loss / 500))
current_loss = 0.0
# Process is complete.
print("Training process has finished.")
print(f"Train time is {time.time() - start_time}")
class MyDataset(Dataset):
def __init__(self, x, y, use_gpu=False):
x = x.astype(np.float32)
self.x_train = torch.from_numpy(x)
self.y_train = torch.from_numpy(y.values)
if use_gpu:
device = torch.device("cuda")
self.x_train.to(device)
self.y_train.to(device)
# self.y_train = torch.LongTensor(y.values, dtype=torch.int)
def __len__(self):
return len(self.y_train)
def __getitem__(self,idx):
return self.x_train[idx],self.y_train[idx]
class MLP(nn.Module):
def __init__(self, input_size, hidden_size, act_fn=nn.ReLU(), use_dropout=False, drop_rate=0.25):
super(MLP, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.layers = nn.Sequential()
if use_dropout:
self.layers.append(nn.Dropout(p=drop_rate))
self.layers.append(nn.Linear(self.input_size, self.hidden_size[0]))
self.layers.append(act_fn)
for i in range(1, len(hidden_size)):
if use_dropout:
self.layers.append(nn.Dropout(p=drop_rate))
self.layers.append(nn.Linear(self.hidden_size[i - 1], self.hidden_size[i]))
self.layers.append(act_fn)
if use_dropout:
self.layers.append(nn.Dropout(p=drop_rate))
self.layers.append(nn.Linear(self.hidden_size[-1], 1))
self.layers.append(nn.Sigmoid())
def forward(self, x):
return self.layers(x)
I am writing a classifier that takes a surname and predicts a language it belongs to. I found that small batch sizes (256 and less) perform poorly compared to big batch sizes (2048 and more). Could someone give me some insight on why this is happening and how to fix it? Thank you.
Training code:
def indices_to_packed(names, input_size):
names = [F.one_hot(item, input_size).float() for item in names]
names_packed = pack_sequence(names, enforce_sorted=False)
return names_packed
def infer(model, data, labels, lengths, device):
data_packed = indices_to_packed(data, model.rnn.input_size)
data_packed, labels, lengths = data_packed.to(device), labels.to(device), lengths.to(device)
preds = model(data_packed, lengths)
loss = loss_fn(preds, labels)
return loss, preds
results = {}
epochs = 100
for BATCH_SIZE in [4096, 2048, 256]:
train_loader = data.DataLoader(train_data, BATCH_SIZE, sampler=train_sampler, collate_fn=partial(my_collate, input_size=input_size, output_size=output_size))
val_loader = data.DataLoader(val_data, BATCH_SIZE, sampler=val_sampler, collate_fn=partial(my_collate, input_size=input_size, output_size=output_size))
model = LSTM(input_size, HIDDEN_SIZE, NUM_LAYERS, DROPOUT, output_size)
optimizer = torch.optim.Adam(model.parameters())
model.to(device)
train_losses = []
val_losses = []
cur_losses = {}
duration = 0
for epoch in range(epochs):
start = time.time()
train_loss = 0
model.train()
# Using PackedSequence
for names, langs, lengths in train_loader:
optimizer.zero_grad()
loss, _ = infer(model, names, langs, lengths, device)
loss.backward()
optimizer.step()
train_loss += loss
train_loss /= len(train_data)
train_losses.append(train_loss.cpu().detach().numpy())
model.eval()
val_loss = 0
with torch.no_grad():
for names, langs, lengths in val_loader:
loss, _ = infer(model, names, langs, lengths, device)
val_loss += loss
val_loss /= len(val_data)
val_losses.append(val_loss.cpu().detach().numpy())
cur_duration = time.time() - start
duration += cur_duration
log_line = (f"BATCH_SIZE: {BATCH_SIZE} epoch: {epoch} train loss: "
f"{train_loss:.5f} val loss: {val_loss:.5f}")
print(log_line)
cur_losses["train_losses"] = train_losses
cur_losses["val_losses"] = val_losses
results[BATCH_SIZE] = {"losses" : cur_losses, "duration" : duration, "model": model}
Model:
class LSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, dropout, output_size):
super().__init__()
self.rnn = nn.LSTM(input_size, hidden_size, num_layers, dropout=DROPOUT)
self.linear = nn.Linear(hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, x, lengths):
lstm_out, _ = self.rnn(x)
# https://discuss.pytorch.org/t/get-each-sequences-last-item-from-packed-sequence/41118/7
sum_batch_sizes = torch.cat((
torch.zeros(2, dtype=torch.int64),
torch.cumsum(lstm_out.batch_sizes, 0)
))
sorted_lengths = lengths[lstm_out.sorted_indices]
last_seq_idxs = sum_batch_sizes[sorted_lengths] + torch.arange(lengths.size(0))
last_seq_items = lstm_out.data[last_seq_idxs]
lstm_last_out = last_seq_items[lstm_out.unsorted_indices]
linear_out = self.linear(lstm_last_out)
softmax_out = self.softmax(linear_out)
return softmax_out
Losses with different batch sizes:
It looks like there issue is how the loss is calculated.
train_loss += loss line accumulates the loss. When batch size is higher, there will be fewer steps to do. The code normalizes this by dividing by the length of train data, train_loss /= len(train_data), but should probably take into account the batch size: train_loss /= (len(train_data) / BATCH_SIZE).
The same for validation loss, but the effect is different probably because of smaller data size compared to training data.
I'm working on a waste/garbage detector for a personal project. I rely on Tensorflow (in Python 3) to train my own dataset.
I have a script that creates and trains a model from scratch. Then, I freeze the checkpoints to get a PB file for detection.
The code I have for the detection (found here) requires two files to work: the previous PB file and a labelmap.txt.
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = 'frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = 'label_map.pbtxt'
I know how a labelmap.txt looks like and it is quite simple to write it myself actually, but I don't know how to generate it because it links each class to an ID and the ID is unknown to me.
I tried to search on the Internet, when people mention the labelmap.txt, it involves Tfrecords. However, I don't use Tf records for my project, I extract each region of interest and save them in subfolders, one subfolder for a class (can, bottle...).
As I am new to Tensorflow, I may have misunderstood something in the training process. Do you have any lead so I can see if my model is accurate by testing it ? I can provide some codes if you need it.
Thanking you in advance,
The labelmap.pbtxt file maps the IDs used internally in the network to the label names. You cannot simply generate one after training. You need to make sure to use same ID-label mapping was used during training or you might get incorrect results.
If you use the training instructions for the tensorflow object_detection model then you will have generated this labelmap-file at some point and you can just re-use it.
Check out the steps you used to train the network or post them here.
Before training, I gathered and labelled thousands of images, extracted each labelled area, resized each of them and, according to their classes, I splitted them in different folders.
There are several files involved in the training step. I originally retrieved the code from this repository and added the possibility to resume training.
trainer.py
import os
import tensorflow as tf
import model_architecture
from utils import utils
from build_model import model_tools
# Images directory.
data_path = os.path.join('dataset' + os.sep)# contains subfolders, one per item
all_classes = os.listdir(data_path)
number_of_classes = len(all_classes)
# Images dimensions.
height = 64
width = 64
# Checkpoints directory.
output_dir = os.path.join(os.pardir + os.sep, 'checkpoints' + os.sep)
model_pattern = 'model.ckpt'
model_base_path = os.path.join(output_dir, model_pattern)
meta_file_path = model_base_path + '.meta'
# Training params.
color_channels = 3
start = 0
epochs = 5
batch_size = 10
batch_counter = 0
# Create Placeholders for images and labels.
images_ph = tf.placeholder(tf.float32, shape=[None, height, width, color_channels])
labels_ph = tf.placeholder(tf.float32, shape=[None, number_of_classes])
def trainer(network, number_of_images):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=network, labels=labels_ph)
cost = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer().minimize(cost)
tf.summary.scalar('cost', cost)
tf.add_to_collection('optimizer', optimizer)
global_step = tf.Variable(0, name='global_step', trainable=False)
saver = tf.train.Saver()
# Launch the graph in a session
with tf.Session() as sess:
# Initialize all variables.
tf.global_variables_initializer().run()
# Read checkpoints directory.
ckpt = tf.train.get_checkpoint_state(output_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print('Reloading existing model.')
else:
init = tf.global_variables_initializer()
sess.run(init)
print('Creating a new model.')
# Get last epoch index.
start = global_step.eval()
writer = tf.summary.FileWriter(output_dir, graph=tf.get_default_graph())
merged = tf.summary.merge_all()
saver = tf.train.Saver(write_version=tf.train.SaverDef.V2, max_to_keep=5)
counter = 0
# Training.
for epoch in range(start, epochs):
tools = utils()
for batch in range(int(number_of_images / batch_size)):
counter += 1
images, labels = tools.batch_dispatch()
if images is None:
break
loss, summary = sess.run([cost, merged], feed_dict={images_ph: images, labels_ph: labels})
sess.run(optimizer, feed_dict={images_ph: images, labels_ph: labels})
print('Epoch number {epoch} batch {batch} complete - loss {loss}'.format(
epoch=epoch, batch=batch, loss=loss))
writer.add_summary(summary, counter)
global_step.assign(epoch).eval()
# Save progression.
saver.save(sess, model_base_path, global_step=epoch)
# Main program.
if __name__ == '__main__':
tools = utils()
model = model_tools()
network = model_architecture.generate_model(images_ph, number_of_classes)
number_of_images = sum([len(files) for r, d, files in os.walk('dataset')])
trainer(network, number_of_images)
model_tools.py
class model_tools:
def add_weights(self, shape):
return tf.Variable(tf.truncated_normal(shape=shape, stddev=0.05))
def add_biases(self, shape):
return tf.Variable(tf.constant(0.05, shape=shape))
def conv_layer(self, layer, kernel, input_shape, output_shape, stride_size):
weights = self.add_weights([kernel, kernel, input_shape, output_shape])
biases = self.add_biases([output_shape])
stride = [1, stride_size, stride_size, 1]
layer = tf.nn.conv2d(layer, weights, strides=stride, padding='SAME') + biases
return layer
def pooling_layer(self, layer, kernel_size, stride_size):
kernel = [1, kernel_size, kernel_size, 1]
stride = [1, stride_size, stride_size, 1]
return tf.nn.max_pool(layer, ksize=kernel, strides=stride, padding='SAME')
def flattening_layer(self, layer):
input_size = layer.get_shape().as_list()
new_size = input_size[-1] * input_size[-2] * input_size[-3]
return tf.reshape(layer, [-1, new_size]), new_size
def fully_connected_layer(self, layer, input_shape, output_shape):
weights = self.add_weights([input_shape, output_shape])
biases = self.add_biases([output_shape])
layer = tf.matmul(layer, weights) + biases
return layer
def activation_layer(self, layer):
return tf.nn.relu(layer)
utils.py
import cv2
import random
class utils:
image_count = []
count_buffer = []
class_buffer = all_classes[:]
def __init__(self):
self.image_count = []
self.count_buffer = []
for i in os.walk(data_path):
if len(i[2]):
self.image_count.append(len(i[2]))
self.count_buffer = self.image_count[:]
def batch_dispatch(self, batch_size=batch_size):
global batch_counter
if sum(self.count_buffer):
class_name = random.choice(self.class_buffer)
choice_index = all_classes.index(class_name)
choice_count = self.count_buffer[choice_index]
if choice_count == 0:
class_name = all_classes[self.count_buffer.index(max(self.count_buffer))]
choice_index = all_classes.index(class_name)
choice_count = self.count_buffer[choice_index]
slicer = batch_size if batch_size < choice_count else choice_count
img_ind = self.image_count[choice_index] - choice_count
indices = [img_ind, img_ind + slicer]
images = self.generate_images(class_name, indices)
labels = self.generate_labels(class_name, slicer)
self.count_buffer[choice_index] = self.count_buffer[choice_index] - slicer
else:
images, labels = (None,) * 2
return images, labels
def generate_labels(self, class_name, number_of_samples):
one_hot_labels = [0] * number_of_classes
one_hot_labels[all_classes.index(class_name)] = 1
one_hot_labels = [one_hot_labels] * number_of_samples
return one_hot_labels
def generate_images(self, class_name, indices):
batch_images = []
choice_folder = os.path.join(data_path, class_name)
selected_images = os.listdir(choice_folder)[indices[0]:indices[1]]
for image in selected_images:
img = cv2.imread(os.path.join(choice_folder, image))
batch_images.append(img)
return batch_images
model_architecture.py contains the structure of the 3 layered Image classifier.
When I run trainer.py, I get a checkpoints folder filled with meta and index files. It seems correct.
About exporting the model, I'm embarrassed as I don't know what to give as parameter for the pipeline config path.
python3 export_inference_graph.py \
--input_type image_tensor \
--trained_checkpoint_prefix "/home/user/model/model.ckpt-4" \
--pipeline_config_path ???? \
--output_directory /home/user/exports/
To get the PB file, I used this:
checkpoint_location = 'checkpoints/model.ckpt-0'
export_dir = 'frozen/'
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
loader = tf.train.import_meta_graph(checkpoint_location+ '.meta')
loader.restore(sess, checkpoint_location)
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(sess,
[tf.saved_model.tag_constants.TRAINING],
strip_default_attrs=True)
builder.add_meta_graph([tf.saved_model.tag_constants.SERVING], strip_default_attrs=True)
builder.save()
It creates a save_model.pb file but not a labelmap.pbtxt.
Should I completely change the way I train my model ?
I am training the skipgram word embeddings using the famous model described in https://arxiv.org/abs/1310.4546. I want to train it in PyTorch but I am getting errors and I can't figure out where they are coming from. Below I have provided my model class, training loop, and batching method. Does anyone have any insight into whats going on?
I am getting an error on the output = loss(data, target) line. It is having a problem with <class 'torch.LongTensor'> which is weird because CrossEntropyLoss takes a long tensor. The output shape might be wrong which is: torch.Size([1000, 100, 1000]) after the feedforward.
I have my model defined as:
import torch
import torch.nn as nn
torch.manual_seed(1)
class SkipGram(nn.Module):
def __init__(self, vocab_size, embedding_dim):
super(SkipGram, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.hidden_layer = nn.Linear(embedding_dim, vocab_size)
# Loss needs to be input: (minibatch (N), C) target: (minibatch, 1), each label is a class
# Calculate loss in training
def forward(self, x):
embeds = self.embeddings(x)
x = self.hidden_layer(embeds)
return x
My training is defined as:
import torch.optim as optim
from torch.autograd import Variable
net = SkipGram(1000, 300)
optimizer = optim.SGD(net.parameters(), lr=0.01)
batch_size = 100
size = len(train_ints)
batches = batch_index_gen(batch_size, size)
inputs, targets = build_tensor_from_batch_index(batches[0], train_ints)
for i in range(100):
running_loss = 0.0
for batch_idx, batch in enumerate(batches):
data, target = build_tensor_from_batch_index(batch, train_ints)
# if (torch.cuda.is_available()):
# data, target = data.cuda(), target.cuda()
# net = net.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = net.forward(data)
loss = nn.CrossEntropyLoss()
output = loss(data, target)
output.backward()
optimizer.step()
running_loss += loss.data[0]
optimizer.step()
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
i, batch_idx * len(batch_size), len(size),
100. * (batch_idx * len(batch_size)) / len(size), loss.data[0]))
If useful my batching is:
def build_tensor_from_batch_index(index, train_ints):
minibatch = []
for i in range(index[0], index[1]):
input_arr = np.zeros( (1000,1), dtype=np.int )
target_arr = np.zeros( (1000,1), dtype=np.int )
input_index, target_index = train_ints[i]
input_arr[input_index] = 1
target_arr[input_index] = 1
input_tensor = torch.from_numpy(input_arr)
target_tensor = torch.from_numpy(target_arr)
minibatch.append( (input_tensor, target_tensor) )
# Concatenate all tensors into a minibatch
#x = [tensor[0] for tensor in minibatch]
#print(x)
input_minibatch = torch.cat([tensor[0] for tensor in minibatch], 1)
target_minibatch = torch.cat([tensor[1] for tensor in minibatch], 1)
#target_minibatch = minibatch[0][1]
return input_minibatch, target_minibatch
I'm not sure about that since I did not read the paper, but seems weird that you are computing the loss with the original data and the targets:
output = loss(data, target)
Considering that the output of the network is output = net.forward(data) I think you should compute your loss as:
error = loss(output, target)
If this doesn't help, briefly point me out what the paper says about the loss function.