I have a DataFrame with 2 columns of strings, imported from a tsv file. Both columns need to be converted to ascii. (This is because I want to pass the text through a CountVectorizer and TfidfTransformer pipeline in scikit-learn).
I have gone through dozens of posts both on stackoverflow as well as outside, but cannot figure this one out. My code is below, including some of the things I have tried.
Any suggestions to make this work?
# tried including adding encoding="utf-8", does not work
df = pd.read_csv(questions, usecols = [3, 4, 5], nrows = 10, header=0, sep="\t")
y = df["is_duplicate"].values
X = df.drop("is_duplicate", axis=1).values
for col in X:
X = X.encode('utf-8') # does not work
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3,
random_state = 21, stratify = y)
def flat_list(my_list):
return [str(item) for sublist in my_list for item in sublist]
def transform_data(trans_obj_list,dataset_splits):
X_train = dataset_splits[0].astype(str)
X_train = flat_list(X_train)
for trfs in trans_obj_list:
transformed_vector = trfs().fit(X_train)
for x in range(0,len(dataset_splits)):
dataset_splits[x] =flat_list(dataset_splits[x].astype(str))
return dataset_splits
new_X_train, new_X_test = transform_data([CountVectorizer,TfidfTransformer],
[X_train, X_test])
You need to call X.str.encode(..) instead of X.encode(..) like this:
for col in X:
X = X.str.encode('utf-8') # does not work
I found an answer to my question in this question: How do I use encode (Python 3) to fix non-ascii code for CSV import in Pandas?
file_obj = open(file_name, encoding="utf-8")
master = pd.read_csv(file_obj)
I just used "ascii" instead of "utf-8" for my case.
Related
I am doing multi-class classification using ML. After preprocessing the data, I am using train_test_split function to divide the data into training and testing dataset. Is there a way to know how many samples from each class are present in the training and testing dataset? For example:
Class
No. of Training Samples
No. of Testing Samples
a
30
5
b
20
10
c
25
5
My Code:
classes = ['a','b','c']
def pp():
data_list=[]
for index,label in enumerate(classes):
class_list=[]
if label=='silence':
silence_path = os.path.join(C["dire"],'silence')
if not os.path.exists(silence_path):
os.mkdir(silence_path)
silence_stride = 2000
#sample_rate = 16000
folder = os.path.join(C["dire"],'_background_noise_')
for file_ in os.listdir(folder):
if '.wav' in file_:
load_path = os.path.join(folder,file_)
sample_rate,y = wavfile.read(load_path)
for i in range(0,len(y)-sample_rate,silence_stride):
file_path = "silence/{}_{}.wav".format(file_[:-4],i)
y_slice = y[i:i+sample_rate]
wavfile.write(os.path.join(C["dire"],file_path),sample_rate,y_slice)
class_list.append(file_path)
else:
folder = os.path.join(C["dire"],label)
for file_ in os.listdir(folder):
file_path = '{}/{}'.format(label,file_)
class_list.append(file_path)
random.shuffle(class_list)
data_list.append(class_list)
X = []
Y = []
preemphasis = 0.985
print("Feature Extraction Started")
for i,class_list in enumerate(data_list):
for j,samples in enumerate(class_list):
if(samples.endswith('.wav')):
sample_rate,audio = wavfile.read(os.path.join(C["dire"],samples))
if(audio.size<sample_rate):
audio = np.pad(audio,(sample_rate-audio.size,0),mode="constant")
coeff = mfccforconfidence.mfcc(audio,sample_rate,preemphasis)
X.append(coeff)
#print(X)
if(samples.split('/')[0] in classes):
Y.append(samples.split('/')[0])
elif(samples.split('/')[0]=='_background_noise_'):
Y.append('silence')
A = np.zeros((len(X),X[0].shape[0],X[0][0].shape[0]),dtype='object')
for i in range(0,len(X)):
A[i] = np.array(X[i]) #Converting list X into array A
# print(A.shape)
end1 = time.time()
print("Time taken for feature extraction:{}sec".format(end1-start))
MLB = MultiLabelBinarizer() # one hot encoding for converting labels into binary form
MLB.fit(pd.Series(Y).fillna("missing").str.split(', '))
Y_MLB = MLB.transform(pd.Series(Y).fillna("missing").str.split(', '))
MLB.classes_ #Same like classes array
print(Y_MLB.shape)
Y = Y_MLB
X = tf.keras.utils.normalize(X)
X_train,X_valtest,Y_train,Y_valtest = train_test_split(X,Y,test_size=0.2,random_state=37)
X_val,X_test,Y_val,Y_test = train_test_split(X_valtest,Y_valtest,test_size=0.5,random_state=37)
print(X_train.shape,X_val.shape,X_test.shape,Y_train.shape,Y_val.shape,Y_test.shape)
So, basically I am using ML for audio classification. After extracting the features, I divide the data into training and testing dataset.
I hope that this piece of code will be useful to answer the question.
If you have a "3D numpy array", here's a demonstration of one way you could do it.
import numpy as np
from random import randint,choices
# Create some data
my_data = np.array(list(zip(
(randint(0,100) for _ in range(100)),
(choices(["a","b","c"], k=100)),
(randint(0,100) for _ in range(100))
))
)
# Show the first 5 elements
print(my_data[0:5,:])
# [['69' 'a' '38']
# ['18' 'c' '73']
# ['57' 'a' '50']
# ['35' 'a' '60']
# ['52' 'b' '1']]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(my_data[:,[0,1]], my_data[:,2])
from collections import Counter
print(Counter(X_train[:,1]))
# Counter({'c': 31, 'b': 26, 'a': 18})
print(Counter(X_train[:,1])["a"])
# 18
print(Counter(X_test[:,1]))
# Counter({'b': 12, 'c': 7, 'a': 6})
Im tring to use list as a value in pandas.DataFrame
but Im getting Exception when trying to use use the adapt function in on the Normalization layer with the NumPy array
this is the error:
ValueError: Failed to convert a NumPy array to a Tensor (Unsupported object type list).
and this is the code:
import pandas as pd
import numpy as np
# Make NumPy printouts easier to read.
np.set_printoptions(precision=3, suppress=True)
import tensorflow as tf
from tensorflow.keras import layers
data = [[45.975, 45.81, 45.715, 45.52, 45.62, 45.65, 4],
[55.67, 55.975, 55.97, 56.27, 56.23, 56.275, 5],
[86.87, 86.925, 86.85, 85.78, 86.165, 86.165, 3],
[64.3, 64.27, 64.285, 64.29, 64.325, 64.245, 6],
[35.655, 35.735, 35.66, 35.69, 35.665, 35.63, 5]
]
lables = [0, 1, 0, 1, 1]
def do():
d_1 = None
for l, d in zip(lables, data):
if d_1 is None:
d_1 = pd.DataFrame({'lable': l, 'close_price': [d]})
else:
d_1 = d_1.append({'lable': l, 'close_price': d}, ignore_index=True)
dataset = d_1.copy()
print(dataset.isna().sum())
dataset = dataset.dropna()
print(dataset.keys())
train_dataset = dataset.sample(frac=0.8, random_state=0)
test_dataset = dataset.drop(train_dataset.index)
print(train_dataset.describe().transpose())
train_features = train_dataset.copy()
test_features = test_dataset.copy()
train_labels = train_features.pop('lable')
test_labels = test_features.pop('lable')
print(train_dataset.describe().transpose()[['mean', 'std']])
normalizer = tf.keras.layers.Normalization(axis=-1)
ar = np.array(train_features)
normalizer.adapt(ar)
print(normalizer.mean.numpy())
first = np.array(train_features[:1])
with np.printoptions(precision=2, suppress=True):
print('First example:', first)
print()
print('Normalized:', normalizer(first).numpy())
diraction = np.array(train_features)
diraction_normalizer = layers.Normalization(input_shape=[1, ], axis=None)
diraction_normalizer.adapt(diraction)
diraction_model = tf.keras.Sequential([
diraction_normalizer,
layers.Dense(units=1)
])
print(diraction_model.summary())
print(diraction_model.predict(diraction[:10]))
diraction_model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.1),
loss='mean_absolute_error')
print(train_features['close_price'])
history = diraction_model.fit(
train_features['close_price'],
train_labels,
epochs=100,
# Suppress logging.
verbose=0,
# Calculate validation results on 20% of the training data.
validation_split=0.2)
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
print(hist.tail())
test_results = {}
test_results['diraction_model'] = diraction_model.evaluate(
test_features,
test_labels, verbose=0)
x = tf.linspace(0.0, 250, 251)
y = diraction_model.predict(x)
print("end")
def main():
do()
if __name__ == "__main__":
main()
I think it is not the usual practice to shrink your features into one column.
Quick-fix is you may put the following line
train_features = np.array(train_features['close_price'].to_list())
before
normalizer = tf.keras.layers.Normalization(axis=-1)
to get rid of the error, but now because your train_features has changed from a DataFrame into a np.array, your subsequent code may suffer, so you need to take care of that too.
If I were you, however, I would have constructed the DataFrame this way
df = pd.DataFrame(data)
df['label'] = lables
Please consider.
Trying to make my classification accepting a text (string) and not just a number (numeric). Working with data, carrying a load of pulled articles, I want the classification algo to show which ones to proceed with and which ones to drop. Applying a number, things are working just fine, yet this is not very intuitive, although I know that the number represents a relationship to one of the two classes I am working with.
How do I change the logic in the algo to make it accept a text as search criteria and not just an anonymous number, picked from the 'Unique_id' column? Columns are, btw...'Title', 'Abstract', 'Relevant', 'Label', 'Unique_id'. The reason for concatenating df's at algo end is that I want to compare results. Finally. it should be noted that the col 'Label' consists of a list of keywords, so basically I want the algo to read from that col.
I did try, reading from data sources, changing the 'index_col='Unique_id' to 'index_col='Label', but that did not work out either.
An example of what I want:
print("\nPrint KNN1")
print(get_closest_neighs1('search word'), "\n")
print("\nPrint KNN2")
print(get_closest_neighs2('search word'), "\n")
print("\nPrint KNN3")
print(get_closest_neighs3('search word'), "\n")
This is the full code (view end of algo to see above example as it runs today, using a number to identify nearest neighbor):
import pandas as pd
print("\nPerforming Analysis using Text Classification")
data = pd.read_csv('File_1_coltest_demo.csv', sep=';', encoding="ISO-8859-1").dropna()
data['Unique_id'] = data.groupby(['Title', 'Abstract', 'Relevant']).ngroup()
data.to_csv('File_2_coltest_demo_KNN.csv', sep=';', encoding="ISO-8859-1", index=False)
data1 = pd.read_csv('File_2_coltest_demo_KNN.csv', sep=';', encoding="ISO-8859-1", index_col='Unique_id')
data2 = pd.DataFrame(data1, columns=['Abstract', 'Relevant'])
data2.to_csv('File_3_coltest_demo_KNN_reduced.csv', sep=';', encoding="ISO-8859-1", index=False)
print("\nData top 25 items")
print(data2.head(25))
print("\nData info")
print(data2.info())
print("\nData columns")
print(data2.columns)
from sklearn.feature_extraction.text import CountVectorizer
from nltk.tokenize import RegexpTokenizer
token = RegexpTokenizer(r'[a-zA-Z0-9]+')
cv = CountVectorizer(lowercase=True, stop_words='english', ngram_range=(1, 1), tokenizer=token.tokenize)
text_counts = cv.fit_transform(data2['Abstract'])
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
text_counts, data2['Abstract'], test_size=0.5, random_state=1)
print("\nTF IDF")
from sklearn.feature_extraction.text import TfidfVectorizer
tf = TfidfVectorizer()
text_tf = tf.fit_transform(data2['Abstract'])
print(text_tf)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
text_tf, data2['Abstract'], test_size=0.3, random_state=123)
from sklearn.neighbors import NearestNeighbors
import pandas as pd
nbrs = NearestNeighbors(n_neighbors=20, metric='euclidean').fit(text_tf)
def get_closest_neighs1(Abstract):
row = data2.index.get_loc(Abstract)
distances, indices = nbrs.kneighbors(text_tf.getrow(row))
names_similar = pd.Series(indices.flatten()).map(data2.reset_index()['Abstract'])
result = pd.DataFrame({'distance1' : distances.flatten(), 'Abstract' : names_similar}) # 'Unique_id' : names_similar,
return result
def get_closest_neighs2(Unique_id):
row = data2.index.get_loc(Unique_id)
distances, indices = nbrs.kneighbors(text_tf.getrow(row))
names_similar = pd.Series(indices.flatten()).map(data2.reset_index()['Unique_id'])
result1 = pd.DataFrame({'Distance' : distances.flatten() / 10, 'Unique_id' : names_similar}) # 'Unique_id' : names_similar,
return result1
def get_closest_neighs3(Relevant):
row = data2.index.get_loc(Relevant)
distances, indices = nbrs.kneighbors(text_tf.getrow(row))
names_similar = pd.Series(indices.flatten()).map(data2.reset_index()['Relevant'])
result2 = pd.DataFrame({'distance2' : distances.flatten(), 'Relevant' : names_similar}) # 'Unique_id' : names_similar,
return result2
print("\nPrint KNN1")
print(get_closest_neighs1(114), "\n")
print("\nPrint KNN2")
print(get_closest_neighs2(114), "\n")
print("\nPrint KNN3")
print(get_closest_neighs3(114), "\n")
data3 = pd.DataFrame(get_closest_neighs1(114))
data4 = pd.DataFrame(get_closest_neighs2(114))
data5 = pd.DataFrame(get_closest_neighs3(114))
del data5['distance2']
data6 = pd.concat([data3, data4, data5], axis=1).reindex(data3.index)
del data6['distance1']
data6.to_csv('File_4_coltest_demo_KNN_results.csv', sep=';', encoding="ISO-8859-1", index=False)
If I understand you right you are trying to do this:
You have vectorised all your documents by their "Abstract" field. Therefore documents with abstracts with similar word distributions should be nearby in TFIDF space.
You want to find the nearest neighbours to a document which has the search keyword.
Therefore you'd need to search the original corpus for the first or all documents which have that keyword
then find the index of that/those document(s), and then find their neighbours.
if there are multiple documents with that keyword, you would need to sort the index list and merge the overall results somehow with some weightings.
If this is true, then the keyword search/lookup isn't really "inside" the model, it's just preselecting a document from the corpus. Once you have the document index, you can perform the KNN (repeatedly).
I'm not hugely familiar with Pandas, but I've done this kind of thing "manually" before e.g. by keeping the document titles in a separate array, with a map to the indexes.
I would imagine you would need to replace your data2.index.get_loc() calls with an iteration over the column values for "Label" and do a simple string search on each. Or does Pandas provide search functions within the corpus?
e.g. https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.query.html#pandas.DataFrame.query
X_train_1, X_test_1, y_train_1, y_test = train_test_split(x, y,
test_size = .3)
X_train_sam, y_train_sam = ADASYN(random_state=42).fit_sample(X_train_1, y_train_1)
type(X_train_1)
pandas.core.frame.DataFrame
X_train_1.shape
(1668, 353)
type(X_train_sam)
numpy.ndarray
X_train_sam.shape
(2698, 353)
How can I convert X_train_sam back to the dataframe, so that it is the same as X_train_1 and maintain indices while adding indices to the new data ?
Something like this:
result = pd.DataFrame(X_train_sam)
result.columns = train_1.columns
I am trying to load imdb dataset in python. I want to pad the sequences so that each sequence is of same length. I am currently doing it with numpy. What is a good way to do it in tensorflow with tf.pad. I saw the given here but I dont know how to apply it with a 2 d matrix.
Here is my current code
import tensorflow as tf
from keras.datasets import imdb
max_features = 5000
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
def padSequence(dataset,max_length):
dataset_p = []
for x in dataset:
if(len(x) <=max_length):
dataset_p.append(np.pad(x,pad_width=(0,max_length-len(x)),mode='constant',constant_values=0))
else:
dataset_p.append(x[0:max_length])
return np.array(x_train_p)
max_length = max(len(x) for x in x_train)
x_train_p = padSequence(x_train,max_length)
x_test_p = padSequence(x_test,max_length)
print("input x shape: " ,x_train_p.shape)
Can someone please help ?
I am using tensorflow 1.0
In Response to the comment:
The padding dimensions are given by
# 'paddings' is [[1, 1,], [2, 2]].
I have a 2 d matrix where every row is of different length. I want to be able to pad to to make them of equal length. In my padSequence(dataset,max_length) function, I get the length of every row with len(x) function. Should I just do the same with tf ? Or is there a way to do it like Keras Function
x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=maxlen)
If you want to use tf.pad, according to me you have to iterate for each row.
Code will be something like this:
max_length = 250
number_of_samples = 5
padded_data = np.ndarray(shape=[number_of_samples, max_length],dtype=np.int32)
sess = tf.InteractiveSession()
for i in range(number_of_samples):
reviewToBePadded = dataSet[i] #dataSet numpy array
paddings = [[0,0], [0, maxLength-len(reviewToBePadded)]]
data_tf = tf.convert_to_tensor(reviewToBePadded,tf.int32)
data_tf = tf.reshape(data_tf,[1,len(reviewToBePadded)])
data_tf = tf.pad(data_tf, paddings, 'CONSTANT')
padded_data[i] = data_tf.eval()
print(padded_data)
sess.close()
New to Python, possibly not the best code. But I just want to explain the concept.