How to efficient batch-process in huggingface? - python-3.x

I am using Huggingface library and transformers to find whether a sentence is well-formed or not. I am using a masked language model called XLMR. I first tokenize my sentence, and then mask each word of the sentence one by one, and then process the masked sentences and find the probability that the predicted masked word is right.
def calculate_scores(sent, model, tokenizer, device, print_pred=False, maskval=False):
k = 0
dic = {}
ls = tokenizer.batch_encode_plus(sent)
input_list = ls.input_ids
h=0
with torch.no_grad():
for i in tqdm(range(len(input_list))):
item = input_list[i]
real_input = item
attmask = [1]*len(item)
seg = [0]*len(item)
seglist = [seg]
masked_list = [real_input]
attlist = [attmask]
for j in range(1, len(item)-1):
input = copy.deepcopy(real_input)
input[j] = 50264
masked_list.append(input)
attlist.append(attmask)
seglist.append(seg)
inid = torch.tensor(masked_list)
segtensor = torch.tensor(seglist)
atttensor = torch.tensor(attlist)
inid=inid.to(device)
segtensor=segtensor.to(device)
output = model(inid, segtensor)
predictions_logits = output.logits
predictions = torch.softmax(predictions_logits, dim=2)
ppscore = 0
for j in range(1, len(item)-1):
ppscore = ppscore+math.log(predictions[j, j, item[j]], 2)
try:
score = math.pow(2, (-1/(len(item)-2))*ppscore)
dic[sent[i]] = score
except:
print(sent[i])
dic[sent[i]] = 10000000
# dic[sent[i]]=10000000
return dic
I will explain my code quickly. The function calculate_scores has sent as an input which is a list of sentences. I first batch encode this list of sentences. And then for each encoded sentence that I get, I generate masked sentences where only one word is masked and the rest are un-masked. Then I input these generated sentences to output and get the probability. Then I compute perplexity.
But the way I'm using this is not a very good way of utilizing GPU. I want to process multiple sentences at once but at the same time, I also need to find the perplexity scores for each sentence. How would I go about doing this?

Related

Pytorch build seq2seq MT model ,But how to get the translation results from the output tensor?

I am try to implement my own MT engine, i am following the steps in https://github.com/bentrevett/pytorch-seq2seq/blob/master/1%20-%20Sequence%20to%20Sequence%20Learning%20with%20Neural%20Networks.ipynb
SRC = Field(tokenize=tokenize_en,
init_token='<sos>',
eos_token='<eos>',
lower=True)
TRG = Field(tokenize=tokenize_de,
init_token='<sos>',
eos_token='<eos>',
lower=True)
After training the model,the link only share a way to batch evaluate but i want to try single string and get the translation results. for example i want my model to translate the input "Boys" and get the German translations.
savedfilemodelpath='./pretrained_model/2020-09-27en-de.pth'
model.load_state_dict(torch.load(savedfilemodelpath))
model.eval()
inputstring = 'Boys'
processed=SRC.process([SRC.preprocess(inputstring)]).to(device)
output=model(processed,processed)
output_dim = output.shape[-1]
outputs = output[1:].view(-1, output_dim)
for item in outputs:
print('item shape is {} and item.argmax is {}, and words is {}'.format(item.shape,item.argmax(),TRG.vocab.itos[item.argmax()]))
So my question is that it it right to get the translation results by:
First: convert the string to tensor
inputstring = 'Boys'
processed=SRC.process([SRC.preprocess(inputstring)]).to(device)
Second: send the tensor to the model. As the model have a TRG param.I have to give the tensor,am i able not given the TRG tensor?
output=model(processed,processed)
output_dim = output.shape[-1]
outputs = output[1:].view(-1, output_dim)
Third:through the return tensor, i use the argmax to get the translation results? is it right?
Or how can i get the right translation results?
for item in outputs:
print('item shape is {} and item.argmax is {}, and words is {}'.format(item.shape,item.argmax(),TRG.vocab.itos[item.argmax()+1]))
i got the answer from the translate_sentence.really thanks #Aladdin Persson
def translate_sentence(model, sentence, SRC, TRG, device, max_length=50):
# print(sentence)
# sys.exit()
# Create tokens using spacy and everything in lower case (which is what our vocab is)
if type(sentence) == str:
tokens = [token.text.lower() for token in spacy_en(sentence)]
else:
tokens = [token.lower() for token in sentence]
# print(tokens)
# sys.exit()
# Add <SOS> and <EOS> in beginning and end respectively
tokens.insert(0, SRC.init_token)
tokens.append(SRC.eos_token)
# Go through each english token and convert to an index
text_to_indices = [SRC.vocab.stoi[token] for token in tokens]
# Convert to Tensor
sentence_tensor = torch.LongTensor(text_to_indices).unsqueeze(1).to(device)
# Build encoder hidden, cell state
with torch.no_grad():
hidden, cell = model.encoder(sentence_tensor)
outputs = [TRG.vocab.stoi["<sos>"]]
for _ in range(max_length):
previous_word = torch.LongTensor([outputs[-1]]).to(device)
with torch.no_grad():
output, hidden, cell = model.decoder(previous_word, hidden, cell)
best_guess = output.argmax(1).item()
outputs.append(best_guess)
# Model predicts it's the end of the sentence
if output.argmax(1).item() == TRG.vocab.stoi["<eos>"]:
break
translated_sentence = [TRG.vocab.itos[idx] for idx in outputs]
# remove start token
return translated_sentence[1:]
And the translation is not generated once. Acutualy it generate once a time and use several times.

how to predict a character based on character based RNN model?

i want to create a prediction function which complete a part of "sentence"
the model used here is a character based RNN(LSTM). what are the steps we should fellow ?
i tried this but i can't give as input the sentence
def generate(self) -> Tuple[List[Token], torch.tensor]:
start_symbol_idx = self.vocab.get_token_index(START_SYMBOL, 'tokens')
# print(start_symbol_idx)
end_symbol_idx = self.vocab.get_token_index(END_SYMBOL, 'tokens')
padding_symbol_idx = self.vocab.get_token_index(DEFAULT_PADDING_TOKEN, 'tokens')
log_likelihood = 0.
words = []
state = (torch.zeros(1, 1, self.hidden_size), torch.zeros(1, 1, self.hidden_size))
word_idx = start_symbol_idx
for i in range(self.max_len):
tokens = torch.tensor([[word_idx]])
embeddings = self.embedder({'tokens': tokens})
output, state = self.rnn._module(embeddings, state)
output = self.hidden2out(output)
log_prob = torch.log_softmax(output[0, 0], dim=0)
dist = torch.exp(log_prob)
word_idx = start_symbol_idx
while word_idx in {start_symbol_idx, padding_symbol_idx}:
word_idx = torch.multinomial(
dist, num_samples=1, replacement=False).item()
log_likelihood += log_prob[word_idx]
if word_idx == end_symbol_idx:
break
token = Token(text=self.vocab.get_token_from_index(word_idx, 'tokens'))
words.append(token)
return words, log_likelihood,start_symbol_idx
Here are two tutorial on how to use machine learning libraries to generate text Tensorflow and PyTorch.
this code snippet is the part of allennlp "language model" tutorial, here the generate function is defined to compute the probability of tokens and find the best token and sequence of tokens according to the maximum likelihood of model output, the full code is in the colab notebook bellow you can refer to it: https://colab.research.google.com/github/mhagiwara/realworldnlp/blob/master/examples/generation/lm.ipynb#scrollTo=8AU8pwOWgKxE
after training the the language model for using this function you can say:
for _ in range(50):
tokens, _ = model.generate()
print(''.join(token.text for token in tokens))

How to simplify text comparison for big data-set where text meaning is same but not exact - deduplicate text data

I have text data set (different menu items like chocolate, cake, coke etc) of around 1.8 million records which belongs to 6 different categories (category A, B, C, D, E, F). one of the category has around 700k records. Most of the menu items are mixed up in multiple categories to which they doesn't belong to, for example: cake belongs to category 'A' but it is found in category 'B' & 'C' as well.
I want to identify those misclassified items and report to a personnel but the challenge is the item name is not always correct because it is totally human typed text. For example: Chocolate might be updated as hot chclt, sweet choklate, chocolat etc. There can also be items like chocolate cake ;)
so to handle this, I tried a simple method using cosine similarity to compare category-wise and identify those anomalies but it takes alot of time since I am comparing each items to 1.8 million records (Sample code is as shown below). Can anyone suggest a better way to deal with this problem?
#Function
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
def cos_similarity(a,b):
X =a
Y =b
# tokenization
X_list = word_tokenize(X)
Y_list = word_tokenize(Y)
# sw contains the list of stopwords
sw = stopwords.words('english')
l1 =[];l2 =[]
# remove stop words from the string
X_set = {w for w in X_list if not w in sw}
Y_set = {w for w in Y_list if not w in sw}
# form a set containing keywords of both strings
rvector = X_set.union(Y_set)
for w in rvector:
if w in X_set: l1.append(1) # create a vector
else: l1.append(0)
if w in Y_set: l2.append(1)
else: l2.append(0)
c = 0
# cosine formula
for i in range(len(rvector)):
c+= l1[i]*l2[i]
if float((sum(l1)*sum(l2))**0.5)>0:
cosine = c / float((sum(l1)*sum(l2))**0.5)
else:
cosine = 0
return cosine
#Base code
cos_sim_list = []
for i in category_B.index:
ln_cosdegree = 0
ln_degsem = []
for j in category_A.index:
ln_j = str(category_A['item_name'][j])
ln_i = str(category_B['item_name'][i])
degreeOfSimilarity = cos_similarity(ln_j,ln_i)
if degreeOfSimilarity>0.5:
cos_sim_list.append([ln_j,ln_i,degreeOfSimilarity])
Consider text is already cleaned
I used KNeighbor and cosine similarity to solve this case. Though I am running the code multiple times to compare category by category; still it is effective because of lesser number of categories. Please suggest me if any better solution is available
cat_A_clean = category_A['item_name'].unique()
print('Vecorizing the data - this could take a few minutes for large datasets...')
vectorizer = TfidfVectorizer(min_df=1, analyzer=ngrams, lowercase=False)
tfidf = vectorizer.fit_transform(cat_A_clean)
print('Vecorizing completed...')
from sklearn.neighbors import NearestNeighbors
nbrs = NearestNeighbors(n_neighbors=1, n_jobs=-1).fit(tfidf)
unique_B = set(category_B['item_name'].values)
def getNearestN(query):
queryTFIDF_ = vectorizer.transform(query)
distances, indices = nbrs.kneighbors(queryTFIDF_)
return distances, indices
import time
t1 = time.time()
print('getting nearest n...')
distances, indices = getNearestN(unique_B)
t = time.time()-t1
print("COMPLETED IN:", t)
unique_B = list(unique_B)
print('finding matches...')
matches = []
for i,j in enumerate(indices):
temp = [round(distances[i][0],2), cat_A_clean['item_name'].values[j],unique_B[i]]
matches.append(temp)
print('Building data frame...')
matches = pd.DataFrame(matches, columns=['Match confidence (lower is better)','ITEM_A','ITEM_B'])
print('Done')
def clean_string(text):
text = str(text)
text = text.lower()
return(text)
def cosine_sim_vectors(vec1,vec2):
vec1 = vec1.reshape(1,-1)
vec2 = vec2.reshape(1,-1)
return cosine_similarity(vec1,vec2)[0][0]
def cos_similarity(sentences):
cleaned = list(map(clean_string,sentences))
print(cleaned)
vectorizer = CountVectorizer().fit_transform(cleaned)
vectors = vectorizer.toarray()
print(vectors)
return(cosine_sim_vectors(vectors[0],vectors[1]))
cos_sim_list =[]
for ind in matches.index:
a = matches['Match confidence (lower is better)'][ind]
b = matches['ITEM_A'][ind]
c = matches['ITEM_B'][ind]
degreeOfSimilarity = cos_similarity([b,c])
cos_sim_list.append([a,b,c,degreeOfSimilarity])

How do I extract x co-ordinate of a point using Python

I'm trying to build an NMF model for topic extraction. For re-training of the model, I've to pass a parameter to the nmf function, for which I need to pass the x co-ordinate from a given point that the algorithm returns, here is the code for reference:
no_features = 1000
no_topics = 9
print ('Old number of topics: ', no_topics)
tfidf_vectorizer = TfidfVectorizer(max_df = 0.95, min_df = 2, max_features = no_features, stop_words = 'english')
tfidf = tfidf_vectorizer.fit_transform(documents)
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
no_topics = tfidf.shape
print('New number of topics :', no_topics)
# nmf = NMF(n_components = no_topics, random_state = 1, alpha = .1, l1_ratio = .5, init = 'nndsvd').fit(tfidf)
On the third last line, the tfidf.shape returns a point (3,1000) to the variable 'no_topics', however I want that variable to be set to only the x co-ordinate, i.e (3).
How can I extract just the x co-ordinate from the point?
you can select the first values with no_topics[0]
print('New number of topics : {}'.format(no_topics[0]))
You can do a slicing on your numpy array tfidf with
topics = tfidf[0,:]

Why I need to load my training data before loading my keras model?

I'm building a chatbot with keras. I saw a guide on how to build a contextual chatbot, with a bag of words model.
The model is trained on a set of unique words, and learns to identify categories linked to certain words.
The documents list is a list of tuples with the words and their respective categorie or 'tag'.
Here is the link: https://medium.com/#ferrygunawan/build-facebook-messenger-contextual-chatbot-with-tensorflow-and-keras-4f8cc79438cf
After the model is built, I have a small script, where I receive user input (sentence) and that sentence is classified.
The classify function receives the input of the user (sentence) and the list of tokenized words, which is the same list of words that the model was trained on.
My question is: Do I need to feed the model with his training data before I load the model? And if so, how can I overcome this and load the model and classify it without the need to load all of his training data again?
def classify(sentence, words):
# generate probabilities for the model
p = bow(sentence, words)
print(p)
d = len(p)
f = len(documents)-2
a = np.zeros([f, d])
tot = np.vstack((p,a))
results = model.predict(tot)[0]
return results
def bow(sentence, words):
# tokenize the pattern
sentence_words = clean_up_sentence(sentence)
# bag of words
print(words)
bag = [0] * len(words)
for s in sentence_words:
for i,w in enumerate(words):
if w == s:
bag[i] = 1
return(np.array(bag))
def clean_up_sentence(sentence):
sentence_words = nltk.word_tokenize(sentence)
sentence_words = [stemmer.stem(word.lower()) for word in sentence_words]
return sentence_words
#This is how I load the data, it is loaded in the same way in the script where the model is built
with open('teste.csv', 'r', encoding='utf-8-sig', newline='') as csv_file:
csvReader = csv.DictReader(csv_file, delimiter = ';', quoting=csv.QUOTE_MINIMAL)
data = list(csvReader)
intents = {}
intents['intents'] = data
words = []
classes = []
documents = []
ignore_words = ['?']
for intent in intents['intents']:
pattern = intent['patterns']
w = nltk.word_tokenize(pattern)
words.extend(w)
documents.append((w,intent['tag']))
if intent['tag'] not in classes:
classes.append(intent['tag'])
words = [stemmer.stem(w.lower()) for w in words if w not in ignore_words]
words = sorted(list(set(words)))
# remover duplicados na lista
classes = sorted(list(set(classes)))
model = keras.models.load_model ("model_test.h5")
sentence = input()
classify(sentence)

Resources