document clustering in python - python-3.x

I am new to both python and scikit-learn, I am going to cluster bunch of text files ( body of NEWS) , I am using the following code :
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import nltk, sklearn, string, os
from nltk.stem.porter import PorterStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.cluster import KMeans
# Preprocessing text with NLTK package
token_dict = {}
stemmer = PorterStemmer()
def stem_tokens(tokens, stemmer):
stemmed = []
for item in tokens:
stemmed.append(stemmer.stem(item))
return stemmed
def tokenize(text):
tokens = nltk.word_tokenize(text)
stems = stem_tokens(tokens, stemmer)
return stems
###########################################################################
# Loading and preprocessing text data
print("\n Loading text dataset:")
path = 'n'
for subdir, dirs, files in (os.walk(path)):
for i,f in enumerate(files):
if f != '.DS_Store':
file_path = subdir + os.path.sep + f
shakes = open(file_path, 'r')
text = shakes.read()
lowers = text.lower()
no_punctuation = lowers.translate(string.punctuation)
token_dict[f] = no_punctuation
###########################################################################
true_k = 3 # *
print("\n Performing stemming and tokenization...")
vectorizer = TfidfVectorizer(tokenizer=tokenize, encoding='latin-1',
stop_words='english')
X = vectorizer.fit_transform(token_dict.values())
print("n_samples: %d, n_features: %d" % X.shape)
print()
###############################################################################
# Do the actual clustering
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1)
y=km.fit(X)
print(km)
print("Top terms per cluster:")
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
This code is getting the top words. But what document it is and how can I know which original text files belongs to cluster0, cluster1 or cluster2?

To explain a bit more--you can store the cluster allocations using the follow:
clusters = km.labels_.tolist()
This list will be ordered the same as the dict you passed to your vectorizer.
I just put together a guide to document clustering you might find helpful. Let me know if I can explain anything in more detail: http://brandonrose.org/clustering

Related

Converting TFRECORD file to text data

I have converted a .txt file to tfrecords with some changes to it. But now I want to convert or read same file so I could understand my data which is now changed. I am doing this for my knowledge graph project.
import numpy as np
import os
import tensorflow as tf
import tqdm
import pdb
import glob
import time
import sys
import re
import argparse
import fastBPE
import platform
use_py3 = platform.python_version()[0] == '3'
parser = argparse.ArgumentParser(description='TensorFlow code for creating TFRecords data')
parser.add_argument('--text_file', type=str, required=True,
help='location of text file to convert to TFRecords')
parser.add_argument('--control_code', type=str, required=True,
help='control code to use for this file. must be in the vocabulary, else it will error out.')
parser.add_argument('--sequence_len', type=int, required=True,
help='sequence length of model being fine-tuned (256 or 512)')
args = parser.parse_args()
path_to_train_file = fname = args.text_file
domain = [args.control_code]
train_text = open(path_to_train_file, 'rb').read().decode(encoding='utf-8')
bpe = fastBPE.fastBPE('../codes', '../vocab')
tokenized_train_text = bpe.apply([train_text.encode('ascii', errors='ignore') if not use_py3 else train_text])[0] # will NOT work for non-English texts
# if you want to run non-english text, please tokenize separately using ./fast applybpe and then run this script on the .bpe file with utf8 encoding
tokenized_train_text = re.findall(r'\S+|\n', tokenized_train_text)
tokenized_train_text = list(filter(lambda x: x != u'##', tokenized_train_text))
# load the vocabulary from file
vocab = open('../vocab').read().decode(encoding='utf-8').split('\n') if not use_py3 else open('../vocab', encoding='utf-8').read().split('\n')
vocab = list(map(lambda x: x.split(' ')[0], vocab)) + ['<unk>'] + ['\n']
print ('{} unique words'.format(len(vocab)))
if args.control_code not in vocab:
print('Provided control code is not in the vocabulary')
print('Please provide a different one; refer to the vocab file for allowable tokens')
sys.exit(1)
# Creating a mapping from unique characters to indices
word2idx = {u:i for i, u in enumerate(vocab)}
idx2word = np.array(vocab)
seq_length = args.sequence_len-1
def numericalize(x):
count = 0
for i in x:
if i not in word2idx:
print(i)
count += 1
return count>1, [word2idx.get(i, word2idx['<unk>']) for i in x]
tfrecords_fname = fname.lower()+'.tfrecords'
total = 0
skipped = 0
with tf.io.TFRecordWriter(tfrecords_fname) as writer:
for i in tqdm.tqdm(range(0, len(tokenized_train_text), seq_length)):
flag_input, inputs = numericalize(domain+tokenized_train_text[i:i+seq_length])
flag_output, outputs = numericalize(tokenized_train_text[i:i+seq_length+1])
total += 1
if flag_input or flag_output:
skipped += 1
continue
if len(inputs)!=seq_length+1 or len(outputs)!=seq_length+1:
break
example_proto = tf.train.Example(features=tf.train.Features(feature={'input': tf.train.Feature(int64_list=tf.train.Int64List(value=inputs)),
'output': tf.train.Feature(int64_list=tf.train.Int64List(value=outputs))}))
writer.write(example_proto.SerializeToString())
print('Done')
print('Skipped', skipped, 'of', total)
This is my code I want every changes in it except that to convert in tfrecords.
Read the TFRecord with a TFRecordDataset.
Then iterate through the TFRecordDataset and for each element, write to a new text file or print out the results.
https://www.tensorflow.org/api_docs/python/tf/data/TFRecordDataset

How to iterate on keras Dataset and edit content

I am working on this movie classification problem
https://www.tensorflow.org/tutorials/keras/text_classification
In this example text files(12500 files with movie revies) are read and a batched dataset is prepared like below
raw_train_ds = tf.keras.preprocessing.text_dataset_from_directory(
'aclImdb/train',
batch_size=batch_size,
validation_split=0.2,
subset='training',
seed=seed)
at the time of standardization
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
stripped_html = tf.strings.regex_replace(lowercase, '<br />', ' ')
#I WANT TO REMOVE STOP WORDS HERE, CAN I DO
return tf.strings.regex_replace(stripped_html,'[%s]' % re.escape(string.punctuation),'')
Problem: I understand that I have got training dataset with labels in variable 'raw_train_ds'. Now I want to iterate over this dataset and remove stop words from the movie review text and store back to same variable, I tried to do it in function 'custom_standardization' but it gives type error,
I also tried to use tf.strings.as_strings but it returns error
InvalidArgumentError: Value for attr 'T' of string is not in the list of allowed values: int8, int16, int32, int64
can someone please help on it OR simply please help how to remove stopwords from the batch dataset
It looks like right now TensorFlow does not have built in support for stop words removal, just basic standardization (lowercase & punctuation stripping). The TextVectorization used in the tutorial supports a custom standardization callback, but I couldn't find any stop words examples.
Since the tutorial downloads the imdb dataset and reads the text files from disc you can just do standardization manually with python before reading them. This will modify the text files themselves, but then you can read in the files normally using tf.keras.preprocessing.text_dataset_from_directory, and the entries will already have the stop words removed.
#!/usr/bin/env python3
import pathlib
import re
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
stop_words = set(stopwords.words("english"))
def cleanup_text_files_in_folder(folder_name):
text_files = []
for file_path in pathlib.Path(folder_name).glob('*.txt'):
text_files.append(str(file_path))
print(f'Found {len(text_files)} files in {folder_name}')
# Give some kind of status
i = 0
for text_file in text_files:
replace_file_contents(text_file)
i += 1
if i % 1000 == 0:
print("No of files processed =", i)
return text_files
def replace_file_contents(input_file):
"""
This will read in the contents of the text file, process it (clean up, remove stop words)
and overwrite the new 'processed' output to that same file
"""
with open(input_file, 'r') as file:
file_data = file.read()
file_data = process_text_adv(file_data)
with open(input_file, 'w') as file:
file.write(file_data)
def process_text_adv(text):
# review without HTML tags
text = BeautifulSoup(text, features="html.parser").get_text()
# review without punctuation and numbers
text = re.sub(r'[^\w\s]','',text, re.UNICODE)
# lowercase
text = text.lower()
# simple split
text = text.split()
swords = set(stopwords.words("english")) # conversion into set for fast searching
text = [w for w in text if w not in swords]
# joining of splitted paragraph by spaces and return
return " ".join(text)
if __name__ == "__main__":
# Download & untar dataset beforehand, then running this would modify the text files
# in place. Back up the originals if that's a concern.
cleanup_text_files_in_folder('aclImdb/train/pos/')
cleanup_text_files_in_folder('aclImdb/train/neg/')
cleanup_text_files_in_folder('aclImdb/test/pos/')
cleanup_text_files_in_folder('aclImdb/test/neg/')

Jupyter Kernel dies/Spyder console stops while training custom NER model in Spacy 2.0.11

I was trying to train a custom NER model in spacy. Initially I had installed the latest spacy version but was getting the following error during the training
ValueError: [E103] Trying to set conflicting doc.ents: A token can only be part of one entity, so make sure the entities you're setting don't overlap.
After that I installed spacy version spacy==2.0.11 and tried running my code. When I am having around 10 rows of data to train, the model is working fine and it's saving to my output directory. But when there is more data(5K rows) which is the original training data, my jupyter kernel dies or when I run in spyder, the console just exists!!
I understand that the deprecated version of spacy is not throwing the value error but still it's of no use as I am unable to train my model.
Sample data:
CarryBag 09038820815c.txt
Stopperneedle 0903882080f4.txt
Foilbags 09038820819.txt
I have around 700 files like this with data to be tagged and in each file multiple entities need tagging.
Code for reference:
import spacy
# import en_core_web_sm
import re
import csv
from spacy.matcher import PhraseMatcher
import plac
from pathlib import Path
import random
#Function to convert PhraseMatcher return value to string indexes
def str_index_conversion(lbl, doc, matchitem):
o_one = len(str(doc[0:matchitem[1]]))
subdoc = doc[matchitem[1]:matchitem[2]]
o_two = o_one + len(str(subdoc))
return (o_one, o_two, lbl)
# nlp = spacy.load('en')
nlp = spacy.load('en_core_web_sm')
if 'ner' not in nlp.pipe_names:
ner = nlp.create_pipe('ner')
nlp.add_pipe(ner)
else:
ner = nlp.get_pipe('ner')
ner.add_label('PRODUCT')
DIR = 'D:/Docs/'
matcher = PhraseMatcher(nlp.vocab)
list_str_index = []
to_train_ents = []
with open(r'D:\ner_dummy_pack.csv', newline='', encoding ='utf-8') as myFile:
reader = csv.reader(myFile)
for row in reader:
try:
product = row[0].lower()
#print('K---'+ product)
filename = row[1]
file = open(DIR+filename, "r", encoding ='utf-8')
print(file)
filecontents = file.read()
for s in filecontents:
filecontents = re.sub(r'\s+', ' ', filecontents)
filecontents = re.sub(r'^https?:\/\/.*[\r\n]*', '', filecontents, flags=re.MULTILINE)
filecontents = re.sub(r"http\S+", "", filecontents)
filecontents = re.sub(r"[-\"#/#;:<>?{}*`• ?+=~|$.!‘?“”?,_]", " ", filecontents)
filecontents = re.sub(r'\d+', '', filecontents)#removing all numbers
filecontents = re.sub(' +', ' ',filecontents)
#filecontents = filecontents.encode().decode('unicode-escape')
filecontents = ''.join([line.lower() for line in filecontents])
if "," in product:
product_patterns = product.split(',')
product_patterns = [i.strip() for i in product_patterns]
for elem in product_patterns:
matcher.add('PRODUCT', None, nlp(elem))
else:
matcher.add('PRODUCT', None, nlp(product))
print(filecontents)
doc = nlp(filecontents)
matches = matcher(doc)
#print(matches)
list_str_index = [str_index_conversion('PRODUCT', doc, x) for x in matches]
to_train_ents.append((filecontents, dict(entities=list_str_index)))
break
except Exception as e:
print(e)
pass
to_train_entsfinal=to_train_ents
def main(model=None, output_dir=None, n_iter=100):
# nlp.vocab.vectors.name = 'spacy_pretrained_vectors'
optimizer = nlp.begin_training()
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']
with nlp.disable_pipes(*other_pipes): # only train NER
for itn in range(10):
losses = {}
random.shuffle(to_train_entsfinal)
for item in to_train_entsfinal:
nlp.update([item[0]],
[item[1]],
sgd=optimizer,
drop=0.50,
losses=losses)
print(losses)
print("OUTTTTT")
if output_dir is None:
output_dir = "C:\\Users\\APRIL"
noutput_dir = Path(output_dir)
if not noutput_dir.exists():
noutput_dir.mkdir()
#nlp.meta['name'] = new_model_name
nlp.to_disk(output_dir)
random.shuffle(to_train_entsfinal)
if __name__ == '__main__':
main()
Can anyone help me solve this. Even when I removed conflicting entities in a sample of 10+ rows, example:
Blister abc.txt
Blisterpack abc.txt
Blisters abc.txt
the same issue is happening and the model is not training
Suggested changes:
def main(model=None, output_dir=None, n_iter=100):
top_memory_precentage_use = 75 # or what ever number you choose
def handle_memory(ruler):
if psutil.virtual_memory().percent < top_memory_precentage_use:
dump_ruler_nonascii(ruler)
ruler = nlp.begin_training() #or just init the nlp object again
return ruler
# This fitted for my use case
def dump_ruler_nonascii(ruler):
path = Path(os.path.join(self.data_path, 'config.jsonl'))
pattern = ruler.patterns
with open(path, "a", encoding="utf-8") as f:
for line in pattern:
f.write(json.dumps(line, ensure_ascii=False) + "\n")
return ruler
# nlp.vocab.vectors.name = 'spacy_pretrained_vectors'
optimizer = nlp.begin_training()
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']
with nlp.disable_pipes(*other_pipes): # only train NER
for itn in range(10):
losses = {}
random.shuffle(to_train_entsfinal)
for item in to_train_entsfinal:
nlp.update([item[0]],
[item[1]],
sgd=optimizer,
drop=0.50,
losses=losses)
print(losses)
print("OUTTTTT")
if output_dir is None:
output_dir = "C:\\Users\\APRIL"
noutput_dir = Path(output_dir)
if not noutput_dir.exists():
noutput_dir.mkdir()
#nlp.meta['name'] = new_model_name
nlp.to_disk(output_dir)
random.shuffle(to_train_entsfinal)
if __name__ == '__main__':
main()
It is hard to tell you why it is happening, but I can supply you 2 helper functions your training loop. that you can adjust to your use. In my case it was writing patterns and I checked the memory use every iteration.
#add the following imports
import psutil
import os
top_memory_precentage_use = 75 # or what ever number you choose
def handle_memory(ruler):
if psutil.virtual_memory().percent < top_memory_precentage_use:
dump_ruler_nonascii(ruler)
ruler = nlp.begin_training() #or just init the nlp object again
return ruler
# This fitted for my use case
def dump_ruler_nonascii(ruler):
path = Path(os.path.join(self.data_path, 'config.jsonl'))
pattern = ruler.patterns
with open(path, "a", encoding="utf-8") as f:
for line in pattern:
f.write(json.dumps(line, ensure_ascii=False) + "\n")

What is the math behind TfidfVectorizer?

I am trying to understand the math behind the TfidfVectorizer. I used this tutorial, but my code is a little bit changed:
what also says at the end that The values differ slightly because sklearn uses a smoothed version idf and various other little optimizations.
I want to be able to use TfidfVectorizer but also calculate the same simple sample by my hand.
Here is my whole code:
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
def main():
documentA = 'the man went out for a walk'
documentB = 'the children sat around the fire'
corpus = [documentA, documentB]
bagOfWordsA = documentA.split(' ')
bagOfWordsB = documentB.split(' ')
uniqueWords = set(bagOfWordsA).union(set(bagOfWordsB))
print('----------- compare word count -------------------')
numOfWordsA = dict.fromkeys(uniqueWords, 0)
for word in bagOfWordsA:
numOfWordsA[word] += 1
numOfWordsB = dict.fromkeys(uniqueWords, 0)
for word in bagOfWordsB:
numOfWordsB[word] += 1
tfA = computeTF(numOfWordsA, bagOfWordsA)
tfB = computeTF(numOfWordsB, bagOfWordsB)
print(pd.DataFrame([tfA, tfB]))
CV = CountVectorizer(stop_words=None, token_pattern='(?u)\\b\\w\\w*\\b')
cv_ft = CV.fit_transform(corpus)
tt = TfidfTransformer(use_idf=False, norm='l1')
t = tt.fit_transform(cv_ft)
print(pd.DataFrame(t.todense().tolist(), columns=CV.get_feature_names()))
print('----------- compare idf -------------------')
idfs = computeIDF([numOfWordsA, numOfWordsB])
print(pd.DataFrame([idfs]))
tfidfA = computeTFIDF(tfA, idfs)
tfidfB = computeTFIDF(tfB, idfs)
print(pd.DataFrame([tfidfA, tfidfB]))
ttf = TfidfTransformer(use_idf=True, smooth_idf=False, norm=None)
f = ttf.fit_transform(cv_ft)
print(pd.DataFrame(f.todense().tolist(), columns=CV.get_feature_names()))
print('----------- TfidfVectorizer -------------------')
vectorizer = TfidfVectorizer(smooth_idf=False, use_idf=True, stop_words=None, token_pattern='(?u)\\b\\w\\w*\\b', norm=None)
vectors = vectorizer.fit_transform([documentA, documentB])
feature_names = vectorizer.get_feature_names()
print(pd.DataFrame(vectors.todense().tolist(), columns=feature_names))
def computeTF(wordDict, bagOfWords):
tfDict = {}
bagOfWordsCount = len(bagOfWords)
for word, count in wordDict.items():
tfDict[word] = count / float(bagOfWordsCount)
return tfDict
def computeIDF(documents):
import math
N = len(documents)
idfDict = dict.fromkeys(documents[0].keys(), 0)
for document in documents:
for word, val in document.items():
if val > 0:
idfDict[word] += 1
for word, val in idfDict.items():
idfDict[word] = math.log(N / float(val))
return idfDict
def computeTFIDF(tfBagOfWords, idfs):
tfidf = {}
for word, val in tfBagOfWords.items():
tfidf[word] = val * idfs[word]
return tfidf
if __name__ == "__main__":
main()
I can compare calculation of Term Frequency. Both results look the same. But when I calculate the IDF and then TF-IDF there are differences between the code from the website and TfidfVectorizer (I also try combination of CountVectorizer and TfidfTransformer to be sure it returns the same results like TfidfVectorizer does).
Code Tf-Idf results:
TfidfVectorizer Tf-Idf results:
Can anybody help me with a code that would return the same returns as TfidfVectorizer or setting of TfidfVectorizer what would return the same results as the code above?
Here is my improvisation of your code to reproduce TfidfVectorizer output for your data .
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer
from IPython.display import display
documentA = 'the man went out for a walk'
documentB = 'the children sat around the fire'
corpus = [documentA, documentB]
bagOfWordsA = documentA.split(' ')
bagOfWordsB = documentB.split(' ')
uniqueWords = set(bagOfWordsA).union(set(bagOfWordsB))
print('----------- compare word count -------------------')
numOfWordsA = dict.fromkeys(uniqueWords, 0)
for word in bagOfWordsA:
numOfWordsA[word] += 1
numOfWordsB = dict.fromkeys(uniqueWords, 0)
for word in bagOfWordsB:
numOfWordsB[word] += 1
series_A = pd.Series(numOfWordsA)
series_B = pd.Series(numOfWordsB)
df = pd.concat([series_A, series_B], axis=1).T
df = df.reindex(sorted(df.columns), axis=1)
display(df)
tf_df = df.divide(df.sum(1),axis='index')
n_d = 1+ tf_df.shape[0]
df_d_t = 1 + (tf_df.values>0).sum(0)
idf = np.log(n_d/df_d_t) + 1
pd.DataFrame(df.values * idf,
columns=df.columns )
tfidf = TfidfVectorizer(token_pattern='(?u)\\b\\w\\w*\\b', norm=None)
pd.DataFrame(tfidf.fit_transform(corpus).todense(),
columns=tfidf.get_feature_names() )
More details on the implementation refer the documentation here.

Having issues computing the average of compound sentiment values for each text file in a folder

# below is the sentiment analysis code written for sentence-level analysis
import glob
import os
import nltk.data
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk import sentiment
from nltk import word_tokenize
# Next, VADER is initialized so I can use it within the Python script
sid = SentimentIntensityAnalyzer()
# I will also initialize the 'english.pickle' function and give it a short
name
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
#Each of the text file is listed from the folder speeches
files = glob.glob(os.path.join(os.getcwd(), 'cnn_articles', '*.txt'))
text = []
#iterate over the list getting each file
for file in files:
#open the file and then call .read() to get the text
with open(file) as f:
text.append(f.read())
text_str = "\n".join(text)
# This breaks up the paragraph into a list of strings.
sentences = tokenizer.tokenize(text_str )
sent = 0.0
count = 0
# Iterating through the list of sentences and extracting the compound scores
for sentence in sentences:
count +=1
scores = sid.polarity_scores(sentence)
sent += scores['compound'] #Adding up the overall compound sentiment
# print(sent, file=open('cnn_compound.txt', 'a'))
if count != 0:
sent = float(sent / count)
print(sent, file=open('cnn_compound.txt', 'a'))
With these lines of code, I have been able to get the average of all the compound sentiment values for all the text files. What I really want is the
average compound sentiment value for each text file, such that if I have 10
text files in the folder, I will have 10 floating point values representing
each of the text file. So that I can plot these values against each other.
Kindly assist me as I am very new to Python.
# below is the sentiment analysis code written for sentence-level analysis
import os, string, glob, pandas as pd, numpy as np
import nltk.data
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk import sentiment
from nltk import word_tokenize
# Next, VADER is initialized so I can use it within the Python
script
sid = SentimentIntensityAnalyzer()
exclude = set(string.punctuation)
# I will also initialize the 'english.pickle' function and give
it a short
name
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
#Each of the text file is listed from the folder speeches
files = glob.glob(os.path.join(os.getcwd(), 'cnn_articles',
'*.txt'))
text = []
sent = 0.0
count = 0
cnt = 0
#iterate over the list getting each file
for file in files:
f = open(file).read().split('.')
cnt +=1
count = (len(f))
for sentence in f:
if sentence not in exclude:
scores = sid.polarity_scores(sentence)
print(scores)
break
sent += scores['compound']
average = round((sent/count), 4)
t = [cnt, average]
text.append(t)
break
df = pd.DataFrame(text, columns=['Article Number', 'Average
Value'])
#
#df.to_csv(r'Result.txt', header=True, index=None, sep='"\t\"
+"\t\"', mode='w')
df.to_csv('cnn_result.csv', index=None)

Resources