Loading pickle NotFittedError: TfidfVectorizer - Vocabulary wasn't fitted - python-3.x

multilabel classification
I am trying to predict a multilabel classification using scikit-learn/pandas/OneVsRestClassifier/logistic regression. Building and evaluating the model works but attempting to classify new sample text does not.
scenario 1:
Once I build a model saved the model with the name(sample.pkl) and restarting my kernel, but when I load the saved model(sample.pkl) during prediction on sample text getting its giving error:
NotFittedError: TfidfVectorizer - Vocabulary wasn't fitted.
I build the model and evaluate the model and i save it the model wtith the name sample.pkl. i restrat my kernal then i load the model making prediction on sample text NotFittedError: TfidfVectorizer - Vocabulary wasn't fitted
inference
import pickle,os
import collections
import numpy as np
import pandas as pd
import seaborn as sns
from tqdm import tqdm
import matplotlib.pyplot as plt
from collections import Counter
from nltk.corpus import stopwords
import json, nltk, re, csv, pickle
from sklearn.metrics import f1_score # performance matrix
from sklearn.multiclass import OneVsRestClassifier # binary relavance
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
stop_words = set(stopwords.words('english'))
def cleanHtml(sentence):
'''' remove the tags '''
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, ' ', str(sentence))
return cleantext
def cleanPunc(sentence):
''' function to clean the word of any
punctuation or special characters '''
cleaned = re.sub(r'[?|!|\'|"|#]',r'',sentence)
cleaned = re.sub(r'[.|,|)|(|\|/]',r' ',cleaned)
cleaned = cleaned.strip()
cleaned = cleaned.replace("\n"," ")
return cleaned
def keepAlpha(sentence):
""" keep the alpha sentenes """
alpha_sent = ""
for word in sentence.split():
alpha_word = re.sub('[^a-z A-Z]+', ' ', word)
alpha_sent += alpha_word
alpha_sent += " "
alpha_sent = alpha_sent.strip()
return alpha_sent
def remove_stopwords(text):
""" remove stop words """
no_stopword_text = [w for w in text.split() if not w in stop_words]
return ' '.join(no_stopword_text)
test1 = pd.read_csv("C:\\Users\\abc\\Downloads\\test1.csv")
test1.columns
test1.head()
siNo plot movie_name genre_new
1 The story begins with Hannah... sing [drama,teen]
2 Debbie's favorite band is Dream.. the bigeest fan [drama]
3 This story of a Zulu family is .. come back,africa [drama,Documentary]
getting Error
I am getting the error here when iam inference on sample text
def infer_tags(q):
q = cleanHtml(q)
q = cleanPunc(q)
q = keepAlpha(q)
q = remove_stopwords(q)
multilabel_binarizer = MultiLabelBinarizer()
tfidf_vectorizer = TfidfVectorizer()
q_vec = tfidf_vectorizer.transform([q])
q_pred = clf.predict(q_vec)
return multilabel_binarizer.inverse_transform(q_pred)
for i in range(5):
print(i)
k = test1.sample(1).index[0]
print("Movie: ", test1['movie_name'][k], "\nPredicted genre: ", infer_tags(test1['plot'][k])), print("Actual genre: ",test1['genre_new'][k], "\n")
solved
I solved the i save tfidf and multibiniraze into pickle model
from sklearn.externals import joblib
pickle.dump(tfidf_vectorizer, open("tfidf_vectorizer.pickle", "wb"))
pickle.dump(multilabel_binarizer, open("multibinirizer_vectorizer.pickle", "wb"))
vectorizer = joblib.load('/abc/downloads/tfidf_vectorizer.pickle')
multilabel_binarizer = joblib.load('/abc/downloads/multibinirizer_vectorizer.pickle')
def infer_tags(q):
q = cleanHtml(q)
q = cleanPunc(q)
q = keepAlpha(q)
q = remove_stopwords(q)
q_vec = vectorizer .transform([q])
q_pred = rf_model.predict(q_vec)
return multilabel_binarizer.inverse_transform(q_pred)
i go though the below link i got the solution
,How do I store a TfidfVectorizer for future use in scikit-learn?>

This happens because you are only dumping the classifier into the pickle and not the vectorizer.
During inference, when you call
tfidf_vectorizer = TfidfVectorizer()
, your vectorizer is not fitted on the training vocabulary, which is giving the error.
What you should do is, dump both the classifier and the vectorizer to pickle. Load them both during inference.

Related

Rouge-L score very low

I use huggingface transformer api to calculate the rouge score of summarization results. The rouge-1 and rouge-2 scores are fine, but I find my rouge-L score is very low compared to the results in papers.
For example, in the dataset of eife, the baseline model lead-k's rouge scores are 34.12 6.73 32.06, while mine is 37.18 7.97 15.05. Apparently, something goes wrong with my calculation.
Here is my code:
import evaluate
import transformers
import os
import torch
from datasets import list_datasets, load_dataset
import nltk
import numpy as np
rouge = evaluate.load('rouge')
elife = load_dataset('tomasg25/scientific_lay_summarisation', 'elife')
print(elife)
"""
lexsum = load_dataset('allenai/multi_lexsum')
print(lexsum)
"""
refs = []
predicts_lead3 = []
predicts_leadk = []
for text in elife['test']['summary']:
refs.append(text)
for text in elife['test']['article']:
predicts_lead3.append(' '.join(nltk.sent_tokenize(text)[:3]))
predicts_leadk.append(' '.join(text.split(' ')[:383]))
result_3 = rouge.compute(predictions=predicts_lead3, references=refs)
print("lead 3 results:")
print(result_3)
result_k = rouge.compute(predictions=predicts_leadk, references=refs)
print("lead k results:")
print(result_k)

'ascii' codec can't encode characters

I use python3.8 in Windows, Here is my code
from numpy import mean
from numpy import std
from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.feature_selection import RFE
from sklearn.tree import DecisionTreeClassifier
from sklearn.pipeline import Pipeline
from matplotlib import pyplot
# get a list of models to evaluate
def get_models():
models = dict()
for i in range(2, 29):
rfe = RFE(estimator=DecisionTreeClassifier(), n_features_to_select=i)
model = DecisionTreeClassifier()
models[str(i)] = Pipeline(steps=[('s',rfe),('m',model)])
return models
# evaluate a give model using cross-validation
def evaluate_model(model, x, y):
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=7)
scores = cross_val_score(model, x, y, scoring='accuracy', cv=cv, n_jobs= -1, error_score='raise')
return scores
# get the models to evaluate
models = get_models()
# evaluate the models and store results
results, names = list(), list()
for name, model in models.items():
scores = evaluate_model(model, x, y)
results.append(scores)
names.append(name)
print('>%s %.3f (%.3f)' % (name, mean(scores), std(scores)))
# plot model performance for comparison
pyplot.boxplot(results, labels=names, showmeans=True)
pyplot.show()
The problem is:
D:\software\anaconda3\lib\site-packages\joblib\externals\loky\backend\resource_tracker.py in _send(self, cmd, name, rtype)
202
203 def _send(self, cmd, name, rtype):
--> 204 msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii')
205 if len(name) > 512:
206 # posix guarantees that writes to a pipe of less than PIPE_BUF
UnicodeEncodeError: 'ascii' codec can't encode characters in position 18-19: ordinal not in range(128)
I have tried many solutions such as add:
import sys
import imp
imp.reload(sys)
And I think there is no Chinese character in my dataset.
Thing is, if I remove the n_jobs=-1 parameter in scores = cross_val_score part, the code works but it runs really slow as there would be only one core working. Any solution for that?

Keras Model Training with Azure Machine Learning

I have trained a multiclass-classification model locally using Keras. I am attempting to migrate this so that it can be trained and run in Azure Machine Learning Studio (AML).
I have provided the sections of code below which are used in AML - the Main AML Code and the script to train the model (EnsemblingModel.py). From the Main AML Code, the script to train the model is called via src = (Script Run Config).
Please note that I have also uploaded the dataset which the model should be trained upon to AML directly and is titled 'test_data'.
However an error is returned when executing the line RunDetails(run).show() from the Main AML code section. The error is:
Error occurred: User program failed with FileNotFoundError: [Errno 2] No such file or directory: 'test_data'
This error message refers to the the following line from the EnsemblingModel.py script:
dataframe = pd.read_csv("test_data", header=None)
I understand that the script is unable to load the data and I have therefore tried changing the code, for example:
dataframe = dataset.get_by_name(ws, name='test_data')
Which returned the following error:
Error occurred: User program failed with NameError: name 'dataset' is not defined
How do I change this so that the script is able to read and load the data so that training can commence? Maybe I am going about this completely the wrong way, so any advice is welcomed.
I have consulted the various Microsoft documentation as well as Github azure guides here, but there seems to be limited examples.
I am new to AML, so if anyone has any resources for using it alongside Keras, then that would also be appreciated.
Main AML Code:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
import azureml
from azureml.core import Experiment
from azureml.core import Environment
from azureml.core import Dataset
from azureml.core import Workspace, Run
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
ws = Workspace.from_config()
print('Workspace name: ' + ws.name,
'Azure region: ' + ws.location,
'Subscription id: ' + ws.subscription_id,
'Resource group: ' + ws.resource_group, sep='\n')
from azureml.core import Experiment
script_folder = './TestingModel1'
os.makedirs(script_folder, exist_ok=True)
exp = Experiment(workspace=ws, name='TestingModel1')
dataset = Dataset.get_by_name(ws, name='test_data')
dataframe = dataset.to_pandas_dataframe()
df = dataframe.values
cluster_name = "cpu-cluster"
try:
compute_target = ComputeTarget(workspace=ws, name=cluster_name)
print('Found existing compute target')
except ComputeTargetException:
print('Creating a new compute target...')
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',
max_nodes=4)
compute_target = ComputeTarget.create(ws, cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)
compute_targets = ws.compute_targets
for name, ct in compute_targets.items():
print(name, ct.type, ct.provisioning_state)
from azureml.core import Environment
keras_env = Environment.from_conda_specification(name = 'keras-2.3.1', file_path = './conda_dependencies.yml')
# Specify a GPU base image
#keras_env.docker.enabled = True
keras_env.docker.base_image = 'mcr.microsoft.com/azureml/openmpi3.1.2-cuda10.0-cudnn7-ubuntu18.04'
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory=script_folder,
script='EnsemblingModel.py',
compute_target=compute_target,
environment=keras_env)
run = exp.submit(src)
from azureml.widgets import RunDetails
RunDetails(run).show()
Ensembling Model Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#KerasLibraries
from keras import callbacks
from keras.layers.normalization import BatchNormalization
from keras.layers import Activation
from keras.layers import Dropout
from keras.optimizers import SGD
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
#tensorFlow
import tensorflow as tf
#SKLearnLibraries
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
from azureml.core import Run
# In[3]:
dataframe = pd.read_csv("test_data", header=None)
dataframe = dataset.get_by_name(ws, name='test_data')
dataset = dataframe.values
# In[4]:
X = dataset[:,0:22].astype(float)
y = dataset[:,22]
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(y)
encoded_y = encoder.transform(y)
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y = np_utils.to_categorical(encoded_y)
print(dummy_y.shape)
#print(X.shape)
#print(X)
import sys
np.set_printoptions(threshold=sys.maxsize)
dummy_y_new = dummy_y[0:42,:]
print(dummy_y_new)
#dataset
# In[5]:
earlystopping = callbacks.EarlyStopping(monitor ="val_loss",
mode ="min", patience = 125,
restore_best_weights = True)
#define Keras
model1 = Sequential()
model1.add(Dense(50, input_dim=22))
model1.add(BatchNormalization())
model1.add(Activation('relu'))
model1.add(Dropout(0.5,input_shape=(50,)))
model1.add(Dense(50))
model1.add(BatchNormalization())
model1.add(Activation('relu'))
model1.add(Dropout(0.5,input_shape=(50,)))
model1.add(Dense(8, activation='softmax'))
#compile the keras model
model1.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])
# fit the keras model on the dataset
model1.fit(X, dummy_y, validation_split=0.25, epochs=10000, batch_size=100, verbose=1, callbacks=[earlystopping])
_, accuracy3 = model1.evaluate(X, dummy_y, verbose=0)
print('Accuracy: %.2f' % (accuracy3*100))
predict_dataset = tf.convert_to_tensor([
[1,5,1,0.459,0.322,0.041,0.002,0.103,0.032,0.041,14,0.404,0.284,0.052,0.008,0.128,0.044,0.037,0.043,54,0,155],
])
predictions = model1(predict_dataset, training=False)
predictions2 = predictions.numpy()
print(predictions2)
print(type(predictions2))
I have resolved the above issue by adding an argument to the ScriptRunConfig code:
test_data_ds = Dataset.get_by_name(ws, name='test_data')
src = ScriptRunConfig(source_directory=script_folder,
script='EnsemblingModel.py',
# pass dataset as an input with friendly name 'titanic'
arguments=['--input-data', test_data_ds.as_named_input('test_data')],
compute_target=compute_target,
environment=keras_env)
As well as the following to the modelling script itself:
import argparse
from azureml.core import Dataset, Run
parser = argparse.ArgumentParser()
parser.add_argument("--input-data", type=str)
args = parser.parse_args()
run = Run.get_context()
ws = run.experiment.workspace
# get the input dataset by ID
dataset = Dataset.get_by_id(ws, id=args.input_data)
# load the TabularDataset to pandas DataFrame
df = dataset.to_pandas_dataframe()
dataset = df.values
For anyone curious, more information can be found here:

Deal with Out of vocabulary word with Gensim pretrained GloVe

I am working on an NLP assignment and loaded the GloVe vectors provided by Gensim:
import gensim.downloader
glove_vectors = gensim.downloader.load('glove-twitter-25')
I am trying to get the word embedding for each word in a sentence, but some of them are not in the vocabulary.
What is the best way to deal with it working with the Gensim API?
Thanks!
Load the model:
import gensim.downloader as api
model = api.load("glove-twitter-25") # load glove vectors
# model.most_similar("cat") # show words that similar to word 'cat'
There is a very simple way to find out if the words exist in the model's vocabulary.
result = print('Word exists') if word in model.wv.vocab else print('Word does not exist")
Apart from that, I had used the following logic to create sentence embedding (25 dim) with N tokens:
from __future__ import print_function, division
import os
import re
import sys
import regex
import numpy as np
from functools import partial
from fuzzywuzzy import process
from Levenshtein import ratio as lev_ratio
import gensim
import tempfile
def vocab_check(model, word):
similar_words = model.most_similar(word)
match_ratio = 0.
match_word = ''
for sim_word, sim_score in similar_words:
ratio = lev_ratio(word, sim_word)
if ratio > match_ratio:
match_word = sim_word
if match_word == '':
return similar_words[0][1]
return model.similarity(word, match_word)
def sentence2vector(model, sent, dim=25):
words = sent.split(' ')
emb = [model[w.strip()] for w in words]
weights = [1. if w in model.wv.vocab else vocab_check(model, w) for w in words]
if len(emb) == 0:
sent_vec = np.zeros(dim, dtype=np.float16)
else:
sent_vec = np.dot(weights, emb)
sent_vec = sent_vec.astype("float16")
return sent_vec

When I applied RandomForest in Python, ValueError: Found input variables with inconsistent numbers of samples: [2883, 1236]

File "D:\Users\Watson Rockstar\Anaconda3\lib\site-packages\sklearn\utils\validation.py", line 205, in check_consistent_length
" samples: %r" % [int(l) for l in lengths])
ValueError:
Found input variables with inconsistent numbers of samples: [2883, 1236]
This dataset totally has 4119 data, and the Xtrain volum= (2883,18), Xtest volum = (1236,18)
I have tried to use LabelEncoder and OneHotEncoder to sovle the problems, but it is not helpful:
# Ignore the warnings
import warnings
warnings.filterwarnings('always')
warnings.filterwarnings('ignore')
# data visualisation and manipulation
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
import seaborn as sns
import missingno as msno
#configure
# sets matplotlib to inline and displays graphs below the corressponding cell.
#import the necessary modelling algos.
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
#preprocessing
from sklearn.preprocessing import LabelEncoder
telebanking = pd.read_csv('bank-additional.csv')
telebank = telebanking.drop(['duration','default'],axis =1)
def transform(feature):
le = LabelEncoder()
telebank[feature] = le.fit_transform(telebank[feature])
print(le.classes_)
cat_telebank=telebank.select_dtypes(include='object')
cat_telebank.columns
for col in cat_telebank.columns:
transform(col)
scaler=StandardScaler()
scaled_telebank=scaler.fit_transform(telebank.drop('y',axis=1))
X=scaled_telebank
Y=telebank['y'].as_matrix()
Xtrain,Xtest,Ytrain,Ytest = train_test_split(X,Y,test_size=0.3)
def compare(model):
clf = model
clf.fit(Xtrain,Ytrain)
pred = clf.predict(Xtrain)
acc.append(accuracy_score(pred,Ytest))
prec.append(precision_score(pred,Ytest))
rec.append(recall_score(pred,Ytest))
auroc.append(roc_auc_score(pred,Ytest))
acc=[]
prec=[]
rec=[]
auroc=[]
models=[RandomForestClassifier(),DecisionTreeClassifier()]
model_names=['RandomForestClassifier','DecisionTreeClassifier']
for model in range(len(models)):
compare(models[model])
d={'Modelling Algo':model_names,'Accuracy':acc,'Precision':prec,'Recall':rec,'Area Under ROC Curve':auroc}
met_telebank=pd.DataFrame(d)
met_telebank
It is the first warning's detail.
Xtrain,Xtest,Ytrain,Ytest = train_test_split(X,Y,test_size=0.3)
should be
Xtrain,Ytrain,Xtest,Ytest = train_test_split(X,Y,test_size=0.3)
This is causing the error, because it wants to use Xtest as the Ytrain values.

Resources