convert categorical features to numeric features using One hot encoding
dataset = pd.read_csv('bank.csv',index_col=0)
X = dataset.iloc[:,:].values
Z = pd.DataFrame(X)
print(Z)
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
labelencoder_X = LabelEncoder()
X[:,0] = labelencoder_X.fit_transform(X[:,0])
Z = pd.DataFrame(X)
print(Z)
but for columns, it can only can be convert 1 single column. how can i conver tmore columns, like columns 1,2,3 and more together.
i was tried to change '0' to '0:', but then it comes with error that " ValueError: bad input shape (11162, 16)."
and if i change X[:,0] to X[:,1,2,3...],then it comes with erroe that "IndexError: too many indices for array"
I have a function that can do the job for you:
# Own implementation of One Hot Encoding - Data Transformation
def convert_to_binary(df, column_to_convert):
categories = list(df[column_to_convert].drop_duplicates())
for category in categories:
cat_name = str(category).replace(" ", "_").replace("(", "").replace(")", "").replace("/", "_").replace("-", "").lower()
col_name = column_to_convert[:5] + '_' + cat_name[:10]
df[col_name] = 0
df.loc[(df[column_to_convert] == category), col_name] = 1
return df
# One Hot Encoding
print("One Hot Encoding categorical data...")
columns_to_convert = [col1,col2]#Enter your column names here that you want to one hot encode.
for column in df_all.columns: #columns_to_convert
if df_all.column.dtype == 'category':
df_all = convert_to_binary(df=df_all, column_to_convert=column)
df_all.drop(column, axis=1, inplace=True)
print("One Hot Encoding categorical data...completed")
Make sure you enter your list of columns (if you dont want all categorical variables to be converted) in the columns_to_convert.
Related
I wanted to create a program to convert CSV files to DXF(AutoCAD), but the CSV file sometimes comes with a header and sometimes no and there are cells that cannot be empty such as coordinates, and I also noticed that after excluding some of the inputs the value is nan or NaN and it was necessary to get rid of them so I offer you my answer and please share your opinions to implement a better method.
sample input
output
solution
import string
import pandas
def pandas_clean_csv(csv_file):
"""
Function pandas_clean_csv Documentation
- I Got help from this site, it's may help you as well:
Get the row with the largest number of missing data for more Documentation
https://moonbooks.org/Articles/How-to-filter-missing-data-NAN-or-NULL-values-in-a-pandas-DataFrame-/
"""
try:
if not csv_file.endswith('.csv'):
raise TypeError("Be sure you select .csv file")
# get punctuations marks as list !"#$%&'()*+,-./:;<=>?#[\]^_`{|}~
punctuations_list = [mark for mark in string.punctuation]
# import csv file and read it by pandas
data_frame = pandas.read_csv(
filepath_or_buffer=csv_file,
header=None,
skip_blank_lines=True,
error_bad_lines=True,
encoding='utf8',
na_values=punctuations_list
)
# if elevation column is NaN convert it to 0
data_frame[3] = data_frame.iloc[:, [3]].fillna(0)
# if Description column is NaN convert it to -
data_frame[4] = data_frame.iloc[:, [4]].fillna('-')
# select coordinates columns
coord_columns = data_frame.iloc[:, [1, 2]]
# convert coordinates columns to numeric type
coord_columns = coord_columns.apply(pandas.to_numeric, errors='coerce', axis=1)
# Find rows with missing data
index_with_nan = coord_columns.index[coord_columns.isnull().any(axis=1)]
# Remove rows with missing data
data_frame.drop(index_with_nan, 0, inplace=True)
# iterate data frame as tuple data
output_clean_csv = data_frame.itertuples(index=False)
return output_clean_csv
except Exception as E:
print(f"Error: {E}")
exit(1)
out_data = pandas_clean_csv('csv_files/version2_bad_headers.csl')
for i in out_data:
print(i[0], i[1], i[2], i[3], i[4])
Here you can Download my test CSV files
So, I'm trying to generate some fake random data of a given dimension size. Essentially, I want a dataframe in which the data has a uniform random distribution. The data consist of both continuous and categorical values. I've written the following code, but it doesn't work the way I want it to be.
import random
import pandas as pd
import time
from datetime import datetime
# declare global variables
adv_name = ['soft toys', 'kitchenware', 'electronics',
'mobile phones', 'laptops']
adv_loc = ['location_1', 'location_2', 'location_3',
'location_4', 'location_5']
adv_prod = ['baby product', 'kitchenware', 'electronics',
'mobile phones', 'laptops']
adv_size = [1, 2, 3, 4, 10]
adv_layout = ['static', 'dynamic'] # advertisment layout type on website
# adv_date, start_time, end_time = []
num = 10 # the given dimension
# define function to generate random advert locations
def rand_shuf_loc(str_lst, num):
lst = adv_loc
# using list comprehension
rand_shuf_str = [item for item in lst for i in range(num)]
return(rand_shuf_str)
# define function to generate random advert names
def rand_shuf_prod(loc_list, num):
rand_shuf_str = [item for item in loc_list for i in range(num)]
random.shuffle(rand_shuf_str)
return(rand_shuf_str)
# define function to generate random impression and click data
def rand_clic_impr(num):
rand_impr_lst = []
click_lst = []
for i in range(num):
rand_impr_lst.append(random.randint(0, 100))
click_lst.append(random.randint(0, 100))
return {'rand_impr_lst': rand_impr_lst, 'rand_click_lst': click_lst}
# define function to generate random product price and discount
def rand_prod_price_discount(num):
prod_price_lst = [] # advertised product price
prod_discnt_lst = [] # advertised product discount
for i in range(num):
prod_price_lst.append(random.randint(10, 100))
prod_discnt_lst.append(random.randint(10, 100))
return {'prod_price_lst': prod_price_lst, 'prod_discnt_lst': prod_discnt_lst}
def rand_prod_click_timestamp(stime, etime, num):
prod_clik_tmstmp = []
frmt = '%d-%m-%Y %H:%M:%S'
for i in range(num):
rtime = int(random.random()*86400)
hours = int(rtime/3600)
minutes = int((rtime - hours*3600)/60)
seconds = rtime - hours*3600 - minutes*60
time_string = '%02d:%02d:%02d' % (hours, minutes, seconds)
prod_clik_tmstmp.append(time_string)
time_stmp = [item for item in prod_clik_tmstmp for i in range(num)]
return {'prod_clik_tmstmp_lst':time_stmp}
def main():
print('generating data...')
# print('generating random geographic coordinates...')
# get the impressions and click data
impression = rand_clic_impr(num)
clicks = rand_clic_impr(num)
product_price = rand_prod_price_discount(num)
product_discount = rand_prod_price_discount(num)
prod_clik_tmstmp = rand_prod_click_timestamp("20-01-2018 13:30:00",
"23-01-2018 04:50:34",num)
lst_dict = {"ad_loc": rand_shuf_loc(adv_loc, num),
"prod": rand_shuf_prod(adv_prod, num),
"imprsn": impression['rand_impr_lst'],
"cliks": clicks['rand_click_lst'],
"prod_price": product_price['prod_price_lst'],
"prod_discnt": product_discount['prod_discnt_lst'],
"prod_clik_stmp": prod_clik_tmstmp['prod_clik_tmstmp_lst']}
fake_data = pd.DataFrame.from_dict(lst_dict, orient="index")
res = fake_data.apply(lambda x: x.fillna(0)
if x.dtype.kind in 'biufc'
# where 'biufc' means boolean, integer,
# unicode, float & complex data types
else x.fillna(random.randint(0, 100)
)
)
print(res.transpose())
res.to_csv("fake_data.csv", sep=",")
# invoke the main function
if __name__ == "__main__":
main()
Problem 1
when I execute the above code snippet, it prints fine but when written to csv format, its horizontally positioned; i.e., it looks like this... How do I position it vertically when writing to csv file? What I want is 7 columns (see lst_dict variable above) with n number of rows?
Problem 2
I dont understand why the random date is generated for the first 50 columns and remaining columns are filled with numerical values?
To answer your first question, replace
print(res.transpose())
with
res.transpose() print(res)
To answer your second question look at the length of the output of the method
rand_shuf_loc()
it as well as the other helper functions only produce a list of 50 items.
The creation of res using the method
fake_data.apply
replaces all nan with a random numeric, so it also applies a numeric to the columns without any predefined values.
I am solving a problem where I am pulling data of all the ProductIDs and then I iterate through the dataframe to look at unique ProductIDs to perform a set of functions.
Here, item is the ProductID/Item number:
#looping through the big dataframe to get a dataframe pertaining to the unique ID
for item in df2['Item Nbr'].unique():
# fetch item data
df = df2.loc[df2['Item Nbr'] == item]
And then I have a set of custom made python functions:
So, when I get through the first loop (for one productID) it works all great, but when it iterates through the loop and goes to the next Product ID, I am certain that the data it is pulling out is right, but I get this error:
Found array with 0 feature(s) (shape=(268215, 0)) while a minimum of 1 is required by StandardScaler.
Although, the X_train and y_train shapes are : (268215, 6) (268215,)
Code Snippet : (Extra Information)
It is a huge file to show. But the initial big dataframe has
[362988 rows x 7 columns] - for first product and
[268215 rows x 7 columns] - for second product
Expansion of the code:
the big dataframe with two unique product IDS
biqQueryData = get_item_data(verbose=True)
iterate over each unique product ID for extracting a subset of dataframes that pertain to the product ID
for item in biqQueryData['Item Nbr'].unique():
df = biqQueryData.loc[biqQueryData['Item Nbr'] == item]
try:
df_model = model_all_stores(df, item, n_jobs=n_jobs,
train_model=train_model,
test_model=test_model,
tune_model=tune_model,
export_model=export_model,
output=export_demand)
the function model_all_stores
def model_all_stores(df_raw, item_nbr, n_jobs=1, train_model=False,
test_model=False, export_model=False, output=False,
tune_model=False):
"""Models demand for specified item.
Predict the demand of specified item for all stores. Does not
filter for predict hidden demand (the function get_hidden_demand
should be used for this.)
Output: data frame output
"""
# ML model hyperparameters
impute_with = 'median'
n_estimators = 100
min_samples_split = 3
min_samples_leaf = 3
max_depth = None
# load data and subset traited and valid
dfnew = subset_traited_valid(df_raw)
# get known demand
df_ma = get_demand(dfnew)
# impute missing sales data
median_sales = df_ma['Sales Qty'].median()
df_ma['Sales Qty'] = df_ma['Sales Qty'].fillna(median_sales)
# add moving average features
df_ma = df_ma.sort_values('Gregorian Days')
window_list = [7 * x for x in [1, 2, 4, 8, 16, 52]]
for w in window_list:
grouped = df_ma.groupby('Store Nbr')['Sales Qty'].shift(1)
rolling = grouped.rolling(window=w, min_periods=1).mean()
df_ma['MA' + str(w)] = rolling.reset_index(0, drop=True)
X_full = df_ma.loc[:, 'MA7':].values
#print(X_full.shape)
# use full data if not testing/tuning
rows_for_model = df_ma['Known Demand'].notnull()
X = df_ma.loc[rows_for_model, 'MA7':].values
y = df_ma.loc[rows_for_model, 'Known Demand'].values
X_train, y_train = X, y
print(X_train.shape, y_train.shape)
if train_model:
# instantiate model components
imputer = Imputer(missing_values='NaN', strategy=impute_with, axis=0)
scale = StandardScaler()
pca = PCA()
forest = RandomForestRegressor(n_estimators=n_estimators,
max_features='sqrt',
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
max_depth=max_depth,
criterion='mse',
random_state=42,
warm_start=True,
n_jobs=n_jobs)
# pipeline for model
pipeline_steps = [('imputer', imputer),
('scale', scale),
('pca', pca),
('forest', forest)]
regr = Pipeline(pipeline_steps)
regr.fit(X_train, y_train)
It fails here
Snippet Of data:
biqQueryData (the entire Dataframe)
364174,1084,2019-12-12,,,,0.0
.....
364174,1084,2019-12-13,,,,0.0
188880,397752,19421,2020-02-04,2.0,1.0,1.0,0.0
.....
188881,397752,19421,2020-02-05,2.0,1.0,1.0,0.0
Subset DF 1:
364174,1084,2019-12-12,,,,0.0
.....
364174,1084,2019-12-13,,,,0.0
Subset DF 2:
188880,397752,19421,2020-02-04,2.0,1.0,1.0,0.0
.....
188881,397752,19421,2020-02-05,2.0,1.0,1.0,0.0
Any help here would be great! Thank you
I have several .txt files with 140k+ lines each. They all have three types of data, which are a mix of string and floats:
- 7 col
- 14 col
- 18 col
What is the best and fastest way to parse such data?
I tried to use numpy.genfromtxt with usecols=np.arange(0,7) but obviously cuts out the 14 and 18 col data.
# for 7 col data
load = np.genfromtxt(filename, dtype=None, names=('day', 'tod', 'condition', 'code', 'type', 'state', 'timing'), usecols=np.arange(0,7))
I would like to parse the data as efficiently as possible.
The solution is rather simple and intuitive. We check if the number of columns in each row is equal to the specified number and append it to an array. For better analysis/modification of our data, we can then convert it to a Pandas DataFrame or Numpy as desired, below I show conversion to DataFrame. The number of columns in my dataset are 7, 14 and 18. I want my data labeled, so I can use Pandas' columns to label from an array.
import pandas as pd
filename = "textfile.txt"
labels_array1 = [] # 7 labels
labels_array2 = [] # 14 labels
labels_array3 = [] # 18 labels
with open(filename, "r") as f:
lines = f.readlines()
for line in lines:
num_items = len(line.split())
if num_items==7:
array1.append(line.rstrip())
elif num_items==14:
array2.append(line.rstrip())
elif num_items==18:
array3.append(line.rstrip())
else:
print("Detected a line with different columns.", num_items)
df1 = pd.DataFrame([sub.split() for sub in array1], columns=labels_array1)
df2 = pd.DataFrame([sub.split() for sub in array2], columns=labels_array2)
df3 = pd.DataFrame([sub.split() for sub in array3], columns=labels_array3)
torchtext is able to read a file with some columns, each one corresponding to a field. What if I want to create a new column (which I will use as a feature)? For example, imagine the file has two columns, text and target, and I want to extract some information from the text and generate a new feature (e.g. if it contains certain words), can I do this directly with torchtext or do I need to do it in the file before?
Thanks!
It can be done.
def postprocessing(arr,vocab,pad_token):
# required to pad the sequence
max_len = max([len(a) for a in arr])
l = []
for a in arr:
res = max_len - len(a)
if res > 0:
a.extend([[pad_token]*len(a[0])]*res)
l.append(a)
return l
def featurization(text_list):
# creates character level features
# text_list is a list of characters.
features = []
for ch in text_list:
l = []
l.append(1 if ch.isupper() else 0)
l.append(1 if ch in string.digits else 0)
l.append(1 if ch in string.punctuation else 0)
features.append(l)
return features
temp_data = pd.read_csv("../data/processed/data.csv")
The below step is necessary to take only those columns which we want to process and the column order matters
temp_data.loc[:,["text","label"]].to_csv("temp.csv",index=False)
Create the Text, Feature, and Target fields. Here I am tokenizing a sentence into characters.
TEXT = torchtext.data.Field(sequential=True, use_vocab=True,
tokenize=lambda x: list(x), include_lengths=True,
batch_first=True)
LABEL_PAD_TOKEN=-1
FEAT = torchtext.data.LabelField(use_vocab=False,batch_first=True,preprocessing=featurization,
pad_token=None,postprocessing=lambda x, _:postprocessing(x,_,LABEL_PAD_TOKEN))
LABELS = torchtext.data.Field(use_vocab=False,pad_token=LABEL_PAD_TOKEN,unk_token=None,
batch_first=True,dtype=torch.int64,tokenize=lambda x: list(x),
preprocessing=lambda x:[eval(i) for i in x])
In the TabularDataset, the correct field order should be given matching the temp.csv column order.
train_data = torchtext.data.TabularDataset(path="temp.csv",format="csv",skip_header=True,
fields=[(("text","feat"),(TEXT,FEAT)),
("labels",LABELS)])
TEXT.build_vocab(train_data)
train_data,valid_data = train_data.split() # create train val
Build the iterator
train_iter,valid_iter=torchtext.data.BucketIterator.splits((train_data,valid_data,),batch_size=2,device=device ,sort_within_batch=True,sort_key=lambda x:len(x.text))
a = next(iter(train_iter))
a.feat.shape, a.text[0].shape # printing the shape
(torch.Size([2, 36, 3]), torch.Size([2, 36]))
Next, you can pass the text to the embedding layer whose input is [batch_size, seq_len]
which will output [batch_size, seq_len, emb_dim]
The features have the shape of [batch_size, seq_len,3] because we have 3 features
Concatenate both of these on last dimension giving [batch_size, seq_len, emb_dim+3] and pass it either to LSTM or CNN