NumPy np.chararray to nd.array - python-3.x

I have the following np.chararray
array = np.chararray(shape=(5))
initialize_array(array)
# Gives out the following array of type S1
# [b'1' b'0' b'1' b'0' b'1']
How can I cast this array to a nd array? I wish for an array like
[ 1 0 1 0 1 ] # dtype = int
Is this possible via some function that I am not aware of? Or should I do it by "hand"?
Using astype like:
new_ndarray = array.astype("int")
Raises a ValueError:
ValueError: Can only create a chararray from string data.
MCVE
#!/usr/bin/python3
import numpy as np
char_array = np.chararray(shape=(5))
char_array[:] = [b'1',b'0',b'1',b'0',b'1']
nd_array = char_array.astype("int")

You can do:
import numpy as np
array = np.chararray(shape=(5))
array[:] = [b'1', b'0', b'1', b'0', b'1']
array_int = np.array(array, dtype=np.int32)
print(array_int)
# [1 0 1 0 1]

Related

Replacing a string value in Python

I have a column named "status" full of string values either "legitimate" or "phishing". I'm trying to convert them into a 0 for "legitimate" or 1 for "phishing". Currently my approach is to replace "legitimate" with a string value of "0", and "phishing" with a string value of "1", then convert the strings "0" and "1" to the int values 0 and 1. I'm getting the error:
TypeError: '(0, status legitimate
Name: 0, dtype: object)' is an invalid key
with the following code, what am I doing wrong?
df2 = pd.read_csv('dataset_phishing.csv', usecols=[87], dtype=str)
leg = 'legitimate'
phi = 'phishing'
for i in df2.iterrows():
if df2[i] == leg:
df2[i].replace('legitimate', '0')
else if df2[i] == phi:
df2[i].replace('phishing', '1')
Here iterrow gives you tuple which can't be used as index, that why you get that error. Here is a simple solution:
import pandas as pd
df2=pd.DataFrame([["legitimate"],["phishing"]],columns=["status"])
leg = 'legitimate'
phi = 'phishing'
for i in range(len(df2)):
df2.iloc[i]["status"]='1' if df2.iloc[i]["status"]==phi else '0'
print(df2)
Here is more pythonic way to do this:
import pandas as pd
import numpy as np
df2=pd.DataFrame([["legitimate"],["phishing"]],columns=["status"])
leg = 'legitimate'
phi = 'phishing'
df2["status"]=np.where(df2["status"]==phi,'1','0')
print(df2)
Hope this helps you
Here is another way to do this
import pandas as pd
import numpy as np
data = {'status': ["legitimate", "phishing"]}
df = pd.DataFrame(data)
leg = 'legitimate'
phi = 'phishing'
df.loc[df["status"] == leg, "status"] = 0
df.loc[df["status"] == phi, "status"] = 1
print(df)

Apply function to pandas series given varying arguments

Initial question
I want to calculate the Levenshtein distance between multiple strings, one in a series, the other in a list. I tried my hands on map, zip, etc., but I only got the desired result using a for loop and apply. Is there a way to improve style and especially speed?
Here is what I tried and it does what it is supposed to do, but lacks of speed given a large series.
import stringdist
strings = ['Hello', 'my', 'Friend', 'I', 'am']
s = pd.Series(data=strings, index=strings)
c = ['me', 'mine', 'Friend']
df = pd.DataFrame()
for w in c:
df[w] = s.apply(lambda x: stringdist.levenshtein(x, w))
## Result: ##
me mine Friend
Hello 4 5 6
my 1 3 6
Friend 5 4 0
I 2 4 6
am 2 4 6
Solution
Thanks to #Dames and #molybdenum42, I can provide the solution I used, directly beneath the question. For more insights, please check their great answers below.
import stringdist
from itertools import product
strings = ['Hello', 'my', 'Friend', 'I', 'am']
s = pd.Series(data=strings, index=strings)
c = ['me', 'mine', 'Friend']
word_combinations = np.array(list(product(s.values, c)))
vectorized_levenshtein = np.vectorize(stringdist.levenshtein)
result = vectorized_levenshtein(word_combinations[:, 0],
word_combinations[:, 1])
result = result.reshape((len(s), len(c)))
df = pd.DataFrame(result, columns=c, index=s)
This results in the desired data frame.
Setup:
import stringdist
import pandas as pd
import numpy as np
import itertools
s = pd.Series(data=['Hello', 'my', 'Friend'],
index=['Hello', 'my', 'Friend'])
c = ['me', 'mine', 'Friend']
Options
option: an easy one-liner
df = pd.DataFrame([s.apply(lambda x: stringdist.levenshtein(x, w)) for w in c])
option: np.fromfunction (thanks to #baccandr)
#np.vectorize
def lavdist(a, b):
return stringdist.levenshtein(c[a], s[b])
df = pd.DataFrame(np.fromfunction(lavdist, (len(c), len(s)), dtype = int),
columns=c, index=s)
option: see #molybdenum42
word_combinations = np.array(list(itertools.product(s.values, c)))
vectorized_levenshtein = np.vectorize(stringdist.levenshtein)
result = vectorized_levenshtein(word_combinations[:,0], word_combinations[:,1])
df = pd.DataFrame([word_combinations[:,1], word_combinations[:,1], result])
df = df.set_index([0,1])[2].unstack()
(the best) option: modified option 3
word_combinations = np.array(list(itertools.product(s.values, c)))
vectorized_levenshtein = np.vectorize(distance)
result = vectorized_levenshtein(word_combinations[:,0], word_combinations[:,1])
result = result.reshape((len(s), len(c)))
df = pd.DataFrame(result, columns=c, index=s)
Performance testing:
import timeit
from Levenshtein import distance
import pandas as pd
import numpy as np
import itertools
s = pd.Series(data=['Hello', 'my', 'Friend'],
index=['Hello', 'my', 'Friend'])
c = ['me', 'mine', 'Friend']
test_code0 = """
df = pd.DataFrame()
for w in c:
df[w] = s.apply(lambda x: distance(x, w))
"""
test_code1 = """
df = pd.DataFrame({w:s.apply(lambda x: distance(x, w)) for w in c})
"""
test_code2 = """
#np.vectorize
def lavdist(a, b):
return distance(c[a], s[b])
df = pd.DataFrame(np.fromfunction(lavdist, (len(c), len(s)), dtype = int),
columns=c, index=s)
"""
test_code3 = """
word_combinations = np.array(list(itertools.product(s.values, c)))
vectorized_levenshtein = np.vectorize(distance)
result = vectorized_levenshtein(word_combinations[:,0], word_combinations[:,1])
df = pd.DataFrame([word_combinations[:,1], word_combinations[:,1], result])
df = df.set_index([0,1])[2] #.unstack() produces error
"""
test_code4 = """
word_combinations = np.array(list(itertools.product(s.values, c)))
vectorized_levenshtein = np.vectorize(distance)
result = vectorized_levenshtein(word_combinations[:,0], word_combinations[:,1])
result = result.reshape((len(s), len(c)))
df = pd.DataFrame(result, columns=c, index=s)
"""
test_setup = "from __main__ import distance, s, c, pd, np, itertools"
print("test0", timeit.timeit(test_code0, number = 1000, setup = test_setup))
print("test1", timeit.timeit(test_code1, number = 1000, setup = test_setup))
print("test2", timeit.timeit(test_code2, number = 1000, setup = test_setup))
print("test3", timeit.timeit(test_code3, number = 1000, setup = test_setup))
print("test4", timeit.timeit(test_code4, number = 1000, setup = test_setup))
Results
# results
# test0 1.3671939949999796
# test1 0.5982696900009614
# test2 0.3246431229999871
# test3 2.0100400850005826
# test4 0.23796007100099814
Using itertools, you can at least get all the required combinations. Using a vectorized version of stringcount.levenshtein (made using numpy.vectorize()) you can then get your desired result without looping at all, though I haven't tested the performance of the vectorized levenshtein function.
The code could look something like this:
import stringdist
import numpy as np
import pandas as pd
import itertools
s = pd.Series(["Hello", "my","Friend"])
c = ['me', 'mine', 'Friend']
word_combinations = np.array(list(itertools.product(s.values, c)))
vectorized_levenshtein = np.vectorize(stringdist.levenshtein)
result = vectorized_levenshtein(word_combinations[:,0], word_combinations[:,1])
At this point you have the results in a numpy array, each corresponding to one of all the possible combinations of your two intial arrays. If you want to get it into the shape you have in your example, there's some pandas trickery to be done:
df = pd.DataFrame([word_combinations[:,0], word_combinations[:,1], result]).T
### initially looks like: ###
# 0 1 2
# 0 Hello me 4
# 1 Hello mine 5
# 2 Hello Friend 6
# 3 my me 1
# 4 my mine 3
# 5 my Friend 6
# 6 Friend me 5
# 7 Friend mine 4
# 8 Friend Friend 0
df = df.set_index([0,1])[2].unstack()
### Now looks like: ###
# Friend Hello my
# Friend 0 6 6
# me 5 4 1
# mine 4 5 3
Again, I haven't tested the performance of this method, so I recommend checking that out - it should be faster than iteration though.
EDIT:
User #Dames has a better suggestion for making the result all pretty-like:
result = result.reshape(len(c), len(s))
df = pd.DataFrame(result, columns=c, index=s)

Use of datetime timedelta with numpy 3d array

I have a 3D array with the count of number of days past a benchmark date (e.g., 01.01.2000). I am interested in the actual day-of-year (DOY: 1-365/366)rather than the total number of days past a given date.
For a single value, the below syntax works. For e.g.,
import numpy as np
import datetime
data = 1595
date = datetime.datetime(2000,1,1,0,0) + datetime.timedelta(data -1)
date.timetuple().tm_yday
134
However, I am having issues with using a 3D array.
import numpy as np
import datetime
data = np.random.randint(5, size = (2,2,2))
data = data + 1595
data
array([[[1596, 1595],
[1599, 1599]],
[[1596, 1599],
[1595, 1595]]])
#Function
def Int_to_DOY(int_array):
date_ = datetime.datetime(2000,1,1,0,0) + datetime.timedelta(int_array - 1)
return date_.timetuple().tm_yday
doy_data = data * 0 #Empty array
for i in range(2):
doy_data[:, :, i] = Int_to_DOY(data[:, :, i])
Here is the error message and I am not able to figure this out.
TypeError: unsupported type for timedelta days component: numpy.ndarray
Thanks for your help.
import numpy as np
import datetime
data = np.random.randint(5, size = (2,2,2))
data = data + 1595
#Function
def Int_to_DOY(int_array):
date_ = datetime.datetime(2000,1,1,0,0) + datetime.timedelta(int(int_array) -1)
return date_.timetuple().tm_yday
doy_data = data.flatten()
for i in range(len(doy_data)):
doy_data[i] = Int_to_DOY(doy_data[i])
doy_data = doy_data.reshape((2,2,2))
Since you tagged pandas:
data = np.array([[[1596, 1595],
[1599, 1599]],
[[1596, 1599],
[1595, 1595]]])
s = pd.to_datetime('2000-01-01') + pd.to_timedelta(data.ravel(), unit='D')
s.dayofyear.values.reshape(data.shape) - 1
Output:
array([[[135, 134],
[138, 138]],
[[135, 138],
[134, 134]]], dtype=int64)

can't convert expression to float problem

i am trying to use the "subs" function for differential equation
but i get the error: "can't convert expression to float"
i tryed to check the type of the arrays, but they all float
import sympy as sym
from sympy.integrals import inverse_laplace_transform
from sympy.abc import s,t,y
import numpy as np
U = 1
G =(s+1)/(s*(s+2))
Y = G*U
y = inverse_laplace_transform(Y, s, t)
tm = np.linspace(0,2,3)
y_val = np.zeros(len(tm))
for i in range(len(tm)):
y_val[i] = y.subs(t, tm[i])
print(y)
print(y_val)
line 17
y_val[i] = y.subs(t, tm[i])
TypeError: can't convert expression to float
Ths issue here is that, because tm[0] == 0, the evaluated y in the first iteration of your loop is Heaviside(0), which has no defined real value by default (see https://docs.sympy.org/latest/modules/functions/special.html#heaviside). This is because you have
from sympy.functions import exp, Heaviside
assert y == Heaviside(t) / 2 + exp(-2 * t) * Heaviside(t) / 2
The simplest workaround here is defining a linear space excluding 0, for instance
epsilon = 1e-15
tm = np.linspace(epsilon, 2, 3)
Using y_val = np.zeros(len(tm)), the default datatype of array is float. After modifying the code, you find that one of y_val elements is an object, not float. You can use a list object as a placeholder or you can specify the datatype of numpy array as object:
import sympy as sym
from sympy.integrals import inverse_laplace_transform
from sympy.abc import s,t,y
import numpy as np
U = 1
G =(s+1)/(s*(s+2))
Y = G*U
y = inverse_laplace_transform(Y, s, t)
tm = np.linspace(0,2,3)
# y_val = [0 for _ in range(len(tm))]
y_val = np.zeros(len(tm), dtype=object)
for i in range(len(tm)):
y_val[i] = y.subs(t, tm[i])
print(y_val)
result: [Heaviside(0.0) 0.567667641618306 0.509157819444367]
I have similar problem and your answers work for me, but I still need to put the data into graph.. I modified my problem for this question:
import sympy as sym
from sympy.integrals import inverse_laplace_transform
from sympy.abc import s,t,y
import numpy as np
import matplotlib.pyplot as plt
Y = (5*(1 - 5*s))/(s*(4*(s**2) + s + 1))*(1/s)
y = inverse_laplace_transform(Y, s, t)
tm = np.linspace(1e-15, 20, 100)
y_val = np.zeros(len(tm), dtype=object)
for i in range(len(tm)):
y_val[i] = y.subs(t, tm[i])
plt.plot(y_val, tm)
plt.show()
Running this code I got same error:
TypeError: can't convert expression to float

Goodness of fit always being zero despite taking random data?

I'm trying to write code that generates random data and computes goodness of fit but I'm not understanding why the chi-squared test is always zero, may I have a fix for this ? For an attempted fix I tried playing around with different types to see if I get any resulting changes in the initial output, also I've tried changing the parameters to the loop in question.
from scipy import stats
import math
import random
import numpy
import scipy
import numpy as np
def Linear_Chi2_Generate(observed_values = [], expected_values = []):
#===============================================================#
# !!!!!!! Generation of Data !!!!!!!!!! #
#===============================================================#
for i in range(0,12):
a = random.randint(-10,10)
b = random.randint(-10,10)
y = a * (b + i)
observed_values.append(y)
#######################################################################################
# !!! Array Setup !!!! #
# ***Had the Array types converted to floats before computing Chi2*** #
# #
#######################################################################################
t_s = 0
o_v = np.array(observed_values)
e_v = np.array(expected_values)
o_v_f = o_v.astype(float)
e_v_f = o_v.astype(float)
z_o_e_v_f = zip(o_v.astype(float), e_v.astype(float))
######################################################################################
for i in z_o_e_v_f:
t_s += [((o_v_f)-(e_v_f))]**2/(e_v_f) # Computs the Chi2 Stat !
######################################################################################
print("Observed Values ", o_v_f)
print("Expected Values" , e_v_f)
df=len(o_v_f)-1
print("Our goodness of fit for our linear function", stats.chi2.cdf(t_s,df))
return t_s
Linear_Chi2_Generate()
In your original code, e_v_f = o_v.astype(float) made o_v_f, e_v_f ending up the same. There was also some issue in the for loop. I have edited your code a bit. See what it does you are looking for:
from scipy import stats
import math
import random
import numpy
import scipy
import numpy as np
def Linear_Chi2_Generate(observed_values = [], expected_values = []):
#===============================================================#
# !!!!!!! Generation of Data !!!!!!!!!! #
#===============================================================#
for i in range(0,12):
a_o = random.randint(-10,10)
b_o = random.randint(-10,10)
y_o = a_o * (b_o + i)
observed_values.append(y_o)
# a_e = random.randint(-10,10)
# b_e = random.randint(-10,10)
# y_e = a_e * (b_e + i)
expected_values.append(y_o + 5)
#######################################################################################
# !!! Array Setup !!!! #
# ***Had the Array types converted to floats before computing Chi2*** #
# #
#######################################################################################
t_s = 0
o_v = np.array(observed_values)
e_v = np.array(expected_values)
o_v_f = o_v.astype(float)
e_v_f = e_v.astype(float)
z_o_e_v_f = zip(o_v.astype(float), e_v.astype(float))
######################################################################################
for o, e in z_o_e_v_f:
t_s += (o - e) **2 / e # Computs the Chi2 Stat !
######################################################################################
print("Observed Values ", o_v_f)
print("Expected Values" , e_v_f)
df=len(o_v_f)-1
print("Our goodness of fit for our linear function", stats.chi2.cdf(t_s,df))
return t_s
Linear_Chi2_Generate()

Resources