Featuretools TypeError: unhashable type: 'Int64Index' - python-3.x

I am trying to create an entity set from 3 dataframes and while doing so I am getting the error: TypeError: unhashable type: 'Int64Index'
I have searched the Internet for similar issues but could not find any issues related to datetime types. Please note that none of the columns of df_raw_view_logs are unique and hence none of the columns can be used as index value and hence the dataframe.index is being used.
I am sharing the dtypes for the dataframe for which it is throwing error when I make a column from it as a time index.
df_raw_view_logs.dtypes
server_time datetime64[ns]
device_type int8
session_id int64
user_id int64
item_id int64
dtype: object
es = ft.EntitySet()
es = es.entity_from_dataframe(entity_id="train",
dataframe=df_es_train,
index=df_es_train.index,
time_index="impression_time",
)
es = es.entity_from_dataframe(entity_id="viewlogs",
dataframe=df_es_view_logs,
index=df_es_view_logs.index,
time_index="server_time",
)
es = es.entity_from_dataframe(entity_id="itemdata",
dataframe=df_es_item_data,
index=df_es_item_data.index,
)
new_relationship = ft.Relationship(es["train"]["user_id"],
es["viewlogs"]["user_id"])
es = es.add_relationship(new_relationship)
new_relationship_1 = ft.Relationship(es["viewlogs"]["item_id"],
es["itemdata"]["item_id"])
es = es.add_relationship(new_relationship_1)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-32-81425e9b87c5> in <module>
9 dataframe=df_es_view_logs,
10 index=df_es_view_logs.index,
---> 11 time_index="server_time",
12 )
13
D:\Anaconda3\envs\fastai\lib\site-packages\featuretools\entityset\entityset.py in entity_from_dataframe(self, entity_id, dataframe, index, variable_types, make_index, time_index, secondary_time_index, already_sorted)
495 secondary_time_index=secondary_time_index,
496 already_sorted=already_sorted,
--> 497 make_index=make_index)
498 self.entity_dict[entity.id] = entity
499 self.reset_data_description()
D:\Anaconda3\envs\fastai\lib\site-packages\featuretools\entityset\entity.py in __init__(self, id, df, entityset, variable_types, index, time_index, secondary_time_index, last_time_index, already_sorted, make_index, verbose)
67 """
68 _validate_entity_params(id, df, time_index)
---> 69 created_index, index, df = _create_index(index, make_index, df)
70
71 self.id = id
D:\Anaconda3\envs\fastai\lib\site-packages\featuretools\entityset\entity.py in _create_index(index, make_index, df)
547 # Case 3: user wanted to make index but column already exists
548 raise RuntimeError("Cannot make index: index variable already present")
--> 549 elif index not in df.columns:
550 if not make_index:
551 # Case 4: user names index, it is not in df. does not specify
D:\Anaconda3\envs\fastai\lib\site-packages\pandas\core\indexes\base.py in __contains__(self, key)
3917 #Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
3918 def __contains__(self, key):
-> 3919 hash(key)
3920 try:
3921 return key in self._engine
D:\Anaconda3\envs\fastai\lib\site-packages\pandas\core\indexes\base.py in __hash__(self)
3932
3933 def __hash__(self):
-> 3934 raise TypeError("unhashable type: %r" % type(self).__name__)
3935
3936 def __setitem__(self, key, value):
TypeError: unhashable type: 'Int64Index'

This is erroring because the index argument is supposed to be a string that is the name of the column in your DataFrame that is the index. Not the index values themselves.

Related

Value Error: index must be monotonic increasing or decreasing while using resample('M') function on datetime values

I am stuck at this point in my code. I am trying to divide the startdate and enddate into multiple rows based on months and for that I am trying to use the resample function to sample the dates on monthly basis. The sample code looks like this-
PS- A lot of the BCA_REF, STARTDATE, ENDDATE values are repeated and are not unique owing to the usecase
df = pd.DataFrame(
data = [['abc','2018-08-01','2025-07-31'], ['abc','2018-08-01','2025-07-31'],['xyz','2017-04-01','2017-04-01'], ['xyz','2017-04-01','2017-04-01'], ['pqr','2016-05-16','2017-10-15']],
columns = ['BCA_REF', 'STARTDATE', 'ENDDATE']
)
df['STARTDATE'] = pd.to_datetime(df['STARTDATE'])
df['ENDDATE'] = pd.to_datetime(df['ENDDATE'])
df_start_end = df.melt(id_vars=['BCA_REF'],value_vars=['STARTDATE','ENDDATE'], value_name='date')
df_new = (
df_start_end.groupby(['BCA_REF'])
.apply(lambda x: x.drop_duplicates('date').set_index('date')
.resample('M').pad())
.drop(columns=['BCA_REF','variable'])
.reset_index()
)
After I run this for 40K such rows, it gives me the following error
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/tmp/ipykernel_15069/2048245652.py in <module>
4 merged_final_new = (
5 mf_start_end.groupby(['BCA_REF'])
----> 6 .apply(lambda x: x.drop_duplicates('date').set_index('date')
7 .resample('M').pad())
8 # .drop(columns=['BCA_REF','variable'])
~/.local/lib/python3.7/site-packages/pandas/core/groupby/groupby.py in apply(self, func, *args, **kwargs)
1273 with option_context("mode.chained_assignment", None):
1274 try:
-> 1275 result = self._python_apply_general(f, self._selected_obj)
1276 except TypeError:
1277 # gh-20949
~/.local/lib/python3.7/site-packages/pandas/core/groupby/groupby.py in _python_apply_general(self, f, data)
1307 data after applying f
1308 """
-> 1309 keys, values, mutated = self.grouper.apply(f, data, self.axis)
1310
1311 return self._wrap_applied_output(
~/.local/lib/python3.7/site-packages/pandas/core/groupby/ops.py in apply(self, f, data, axis)
850 # group might be modified
851 group_axes = group.axes
--> 852 res = f(group)
853 if not _is_indexed_like(res, group_axes, axis):
854 mutated = True
/tmp/ipykernel_15069/2048245652.py in <lambda>(x)
5 mf_start_end.groupby(['BCA_REF'])
6 .apply(lambda x: x.drop_duplicates('date').set_index('date')
----> 7 .resample('M').pad())
8 # .drop(columns=['BCA_REF','variable'])
9 # .reset_index()
~/.local/lib/python3.7/site-packages/pandas/core/resample.py in pad(self, limit)
507 DataFrame.fillna: Fill NA/NaN values using the specified method.
508 """
--> 509 return self._upsample("pad", limit=limit)
510
511 ffill = pad
~/.local/lib/python3.7/site-packages/pandas/core/resample.py in _upsample(self, method, limit, fill_value)
1204 else:
1205 result = obj.reindex(
-> 1206 res_index, method=method, limit=limit, fill_value=fill_value
1207 )
1208
~/.local/lib/python3.7/site-packages/pandas/util/_decorators.py in wrapper(*args, **kwargs)
322 #wraps(func)
323 def wrapper(*args, **kwargs) -> Callable[..., Any]:
--> 324 return func(*args, **kwargs)
325
326 kind = inspect.Parameter.POSITIONAL_OR_KEYWORD
~/.local/lib/python3.7/site-packages/pandas/core/frame.py in reindex(self, *args, **kwargs)
4770 kwargs.pop("axis", None)
4771 kwargs.pop("labels", None)
-> 4772 return super().reindex(**kwargs)
4773
4774 #deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "labels"])
~/.local/lib/python3.7/site-packages/pandas/core/generic.py in reindex(self, *args, **kwargs)
4817 # perform the reindex on the axes
4818 return self._reindex_axes(
-> 4819 axes, level, limit, tolerance, method, fill_value, copy
4820 ).__finalize__(self, method="reindex")
4821
~/.local/lib/python3.7/site-packages/pandas/core/frame.py in _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy)
4596 if index is not None:
4597 frame = frame._reindex_index(
-> 4598 index, method, copy, level, fill_value, limit, tolerance
4599 )
4600
~/.local/lib/python3.7/site-packages/pandas/core/frame.py in _reindex_index(self, new_index, method, copy, level, fill_value, limit, tolerance)
4612 ):
4613 new_index, indexer = self.index.reindex(
-> 4614 new_index, method=method, level=level, limit=limit, tolerance=tolerance
4615 )
4616 return self._reindex_with_indexers(
~/.local/lib/python3.7/site-packages/pandas/core/indexes/base.py in reindex(self, target, method, level, limit, tolerance)
3824 if self._index_as_unique:
3825 indexer = self.get_indexer(
-> 3826 target, method=method, limit=limit, tolerance=tolerance
3827 )
3828 else:
~/.local/lib/python3.7/site-packages/pandas/core/indexes/base.py in get_indexer(self, target, method, limit, tolerance)
3484 )
3485
-> 3486 return self._get_indexer(target, method, limit, tolerance)
3487
3488 def _get_indexer(
~/.local/lib/python3.7/site-packages/pandas/core/indexes/base.py in _get_indexer(self, target, method, limit, tolerance)
3506
3507 if method in ["pad", "backfill"]:
-> 3508 indexer = self._get_fill_indexer(target, method, limit, tolerance)
3509 elif method == "nearest":
3510 indexer = self._get_nearest_indexer(target, limit, tolerance)
~/.local/lib/python3.7/site-packages/pandas/core/indexes/base.py in _get_fill_indexer(self, target, method, limit, tolerance)
3582 indexer = engine_method(target_values, limit)
3583 else:
-> 3584 indexer = self._get_fill_indexer_searchsorted(target, method, limit)
3585 if tolerance is not None and len(self):
3586 indexer = self._filter_indexer_tolerance(target_values, indexer, tolerance)
~/.local/lib/python3.7/site-packages/pandas/core/indexes/base.py in _get_fill_indexer_searchsorted(self, target, method, limit)
3606 indexer = self.get_indexer(target)
3607 nonexact = indexer == -1
-> 3608 indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side)
3609 if side == "left":
3610 # searchsorted returns "indices into a sorted array such that,
~/.local/lib/python3.7/site-packages/pandas/core/indexes/base.py in _searchsorted_monotonic(self, label, side)
5763 return len(self) - pos
5764
-> 5765 raise ValueError("index must be monotonic increasing or decreasing")
5766
5767 def get_slice_bound(self, label, side: str_t, kind=None) -> int:
ValueError: index must be monotonic increasing or decreasing
I tried to look for solutions for this error wherein people suggested using sort_index()/sort_values() for the 'date' column but it still does not work. I believe the issue is with the resample function.
Any help would be appreciated. Thank you

IndexError multiprocessing.Pool

I'm getting an IndexError using multiprocessing to process parts of a pandas DataFrame in parallel. vacancies is a pandas DataFrame containing several vacancies, of which one column is the raw text.
def addSkillRelevance(vacancies):
skills = pickle.load(open("skills.pkl", "rb"))
vacancies['skill'] = ''
vacancies['skillcount'] = 0
vacancies['all_skills_in_vacancy'] = ''
new_vacancies = pd.DataFrame(columns=vacancies.columns)
for vacancy_index, vacancy_row in vacancies.iterrows():
#Create a df for which each row is a found skill (with the other attributes of the vacancy)
per_vacancy_df = pd.DataFrame(columns=vacancies.columns)
all_skills_in_vacancy = []
skillcount = 0
for skill_index, skill_row in skills.iterrows():
#Making the search for the skill in the text body a bit smarter
spaceafter = ' ' + skill_row['txn_skill_name'] + ' '
newlineafter = ' ' + skill_row['txn_skill_name'] + '\n'
tabafter = ' ' + skill_row['txn_skill_name'] + '\t'
#Statement that returns true if we find a variation of the skill in the text body
if((spaceafter in vacancies.at[vacancy_index,'body']) or (newlineafter in vacancies.at[vacancy_index,'body']) or (tabafter in vacancies.at[vacancy_index,'body'])):
#Adding the skill to the list of skills found in the vacancy
all_skills_in_vacancy.append(skill_row['txn_skill_name'])
#Increasing the skillcount
skillcount += 1
#Adding the skill to the row
vacancies.at[vacancy_index,'skill'] = skill_row['txn_skill_name']
#Add a row to the vacancy df where 1 row, means 1 skill
per_vacancy_df = per_vacancy_df.append(vacancies.iloc[vacancy_index])
#Adding the list of all found skills in the vacancy to each (skill) row
per_vacancy_df['all_skills_in_vacancy'] = str(all_skills_in_vacancy)
per_vacancy_df['skillcount'] = skillcount
#Adds the individual vacancy df to a new vacancy df
new_vacancies = new_vacancies.append(per_vacancy_df)
return(new_vacancies)
def executeSkillScript(vacancies):
from multiprocessing import Pool
vacancies = vacancies.head(100298)
num_workers = 47
pool = Pool(num_workers)
vacancy_splits = np.array_split(vacancies, num_workers)
results_list = pool.map(addSkillRelevance,vacancy_splits)
new_vacancies = pd.concat(results_list, axis=0)
pool.close()
pool.join()
executeSkillScript(vacancies)
The function addSkillRelevance() takes in a pandas DataFrame and outputs a pandas DataFrame (with more columns). For some reason, after finishing all the multiprocessing, I get an IndexError on results_list = pool.map(addSkillRelevance,vacancy_splits). I'm quite stuck as I don't know how to handle the error. Does anyone have tips as to why the IndexError is occurring?
The error:
IndexError Traceback (most recent call last)
<ipython-input-11-7cb04a51c051> in <module>()
----> 1 executeSkillScript(vacancies)
<ipython-input-9-5195d46f223f> in executeSkillScript(vacancies)
14
15 vacancy_splits = np.array_split(vacancies, num_workers)
---> 16 results_list = pool.map(addSkillRelevance,vacancy_splits)
17 new_vacancies = pd.concat(results_list, axis=0)
18
~/anaconda3/envs/amazonei_tensorflow_p36/lib/python3.6/multiprocessing/pool.py in map(self, func, iterable, chunksize)
264 in a list that is returned.
265 '''
--> 266 return self._map_async(func, iterable, mapstar, chunksize).get()
267
268 def starmap(self, func, iterable, chunksize=None):
~/anaconda3/envs/amazonei_tensorflow_p36/lib/python3.6/multiprocessing/pool.py in get(self, timeout)
642 return self._value
643 else:
--> 644 raise self._value
645
646 def _set(self, i, obj):
IndexError: single positional indexer is out-of-bounds
As per the suggestion
The error is coming from this line:
per_vacancy_df = per_vacancy_df.append(vacancies.iloc[vacancy_index])
The error is occuring because vacancy_index is not in the index of the vacancies dataframe.

How to impute values in a column and overwrite existing values

Im trying to learn machine learning and i need to fill in the missing values for the cleaning stage of the workflow. i have 13 columns and need to impute the values for 8 of them. One column is called Dependents and i want to fill in the blanks with the word missing and change the cells that do contain data as follows: 1 to one, two to 2, 3 to three and 3+ to threePlus.
Im running the program in Anaconda and the name of the dataframe is train
train.columns
this gives me
Index(['Loan_ID', 'Gender', 'Married', 'Dependents', 'Education',
'Self_Employed', 'ApplicantIncome', 'CoapplicantIncome', 'LoanAmount',
'Loan_Amount_Term', 'Credit_History', 'Property_Area', 'Loan_Status'],
dtype='object')
next
print("Dependents")
print(train['Dependents'].unique())
this gives me
Dependents
['0' '1' '2' '3+' nan]
now i try imputing values as stated
def impute_dependent():
my_dict={'1':'one','2':'two','3':'three','3+':'threePlus'};
return train.Dependents.map(my_dict).fillna('missing')
def convert_data(dataset):
temp_data = dataset.copy()
temp_data['Dependents'] = temp_data[['Dependents']].apply(impute_dependent,axis=1)
return temp_data
this gives the error
TypeError Traceback (most recent call last)
<ipython-input-46-ccb1a5ea7edd> in <module>()
4 return temp_data
5
----> 6 train_dataset = convert_data(train)
7 #test_dataset = convert_data(test)
<ipython-input-46-ccb1a5ea7edd> in convert_data(dataset)
1 def convert_data(dataset):
2 temp_data = dataset.copy()
----> 3 temp_data['Dependents'] =
temp_data[['Dependents']].apply(impute_dependent,axis=1)
4 return temp_data
5
D:\Anaconda2\lib\site-packages\pandas\core\frame.py in apply(self, func,
axis, broadcast, raw, reduce, result_type, args, **kwds)
6002 args=args,
6003 kwds=kwds)
-> 6004 return op.get_result()
6005
6006 def applymap(self, func):
D:\Anaconda2\lib\site-packages\pandas\core\apply.py in get_result(self)
140 return self.apply_raw()
141
--> 142 return self.apply_standard()
143
144 def apply_empty_result(self):
D:\Anaconda2\lib\site-packages\pandas\core\apply.py in apply_standard(self)
246
247 # compute the result using the series generator
--> 248 self.apply_series_generator()
249
250 # wrap results
D:\Anaconda2\lib\site-packages\pandas\core\apply.py in
apply_series_generator(self)
275 try:
276 for i, v in enumerate(series_gen):
--> 277 results[i] = self.f(v)
278 keys.append(v.name)
279 except Exception as e:
TypeError: ('impute_dependent() takes 0 positional arguments but 1 was
given', 'occurred at index 0')
i expected one, two , three and threePlus to replace the existing values and missing to fill in the blanks
Would this do?
my_dict = {'1':'one','2':'two','3':'three','3+':'threePlus', np.nan: 'missing'}
def convert_data(dataset):
temp_data = dataset.copy()
temp_data.Dependents = temp_data.Dependents.map(my_dict)
return temp_data
As a side note, part of your problem might be the use of apply: essentially apply passes data through a function and puts in what comes out. I might be wrong but I think your function needs to take the input given by apply, eg:
def impute_dependent(dep):
my_dict = {'1':'one','2':'two','3':'three','3+':'threePlus', np.nan: 'missing'}
return my_dict[dep]
df.dependents = df.dependents.apply(impute_dependents)
This way, for every value in df.dependents, apply will take that value and give it to impute_dependents as an argument, then take the returned value as output. As is, when I trial your code I get an error because impute_dependent takes no arguments.

Getting Type Error Expected Strings or Bytes Like Object

I am working on a dataset with tweets and I am trying to find the mentions to other users in a tweet, these tweets can have none, single or multiple users mentioned.
Here is the head of the DataFrame:
The following is the function that I created to extract the list of mentions in a tweet:
def getMention(text):
mention = re.findall('(^|[^#\w])#(\w{1,15})', text)
if len(mention) > 0:
return [x[1] for x in mention]
else:
return None
I'm trying to create a new column in the DataFrame and apply the function with the following code:
df['mention'] = df['text'].apply(getMention)
On running this code I get the following error:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-43-426da09a8770> in <module>
----> 1 df['mention'] = df['text'].apply(getMention)
~/anaconda3_501/lib/python3.6/site-packages/pandas/core/series.py in apply(self, func, convert_dtype, args, **kwds)
3192 else:
3193 values = self.astype(object).values
-> 3194 mapped = lib.map_infer(values, f, convert=convert_dtype)
3195
3196 if len(mapped) and isinstance(mapped[0], Series):
pandas/_libs/src/inference.pyx in pandas._libs.lib.map_infer()
<ipython-input-42-d27373022afd> in getMention(text)
1 def getMention(text):
2
----> 3 mention = re.findall('(^|[^#\w])#(\w{1,15})', text)
4 if len(mention) > 0:
5 return [x[1] for x in mention]
~/anaconda3_501/lib/python3.6/re.py in findall(pattern, string, flags)
220
221 Empty matches are included in the result."""
--> 222 return _compile(pattern, flags).findall(string)
223
224 def finditer(pattern, string, flags=0):
TypeError: expected string or bytes-like object
I can't comment (not enough rep) so here's what I suggest to troubleshoot the error.
It seems findall raises an exception because text is not a string so you might want to check which type text actually is, using this:
def getMention(text):
print(type(text))
mention = re.findall(r'(^|[^#\w])#(\w{1,15})', text)
if len(mention) > 0:
return [x[1] for x in mention]
else:
return None
(or the debugger if you know how to)
And if text can be converted to a string maybe try this ?
def getMention(text):
mention = re.findall(r'(^|[^#\w])#(\w{1,15})', str(text))
if len(mention) > 0:
return [x[1] for x in mention]
else:
return None
P.S.: don't forget the r'...' in front of your regexp, to avoid special chars to be interpreted

How to iterate over window objects to add them to a DataFrame?

I have an object, it seems to be a window object, EWM [com=9.5,min_periods=0,adjust=True,ignore_na=False,axis=0], it was created from a dataframe predictions_df_list["prices"] to be a one with dates as index and exponential weighted average of prices as values. I wanted to add it to a dataframe: predictions_df_list['ewma']. Yet it raised a NotImplementedError in inferring:
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
<ipython-input-21-b1286fe39d1c> in <module>
---> 59 predictions_df_list['ewma'] = pd.DataFrame.ewm(predictions_df_list["prices"], span=20) #pd.DataFrame.ewma
60 predictions_df_list['actual_value'] = test['prices']
61 predictions_df_list['actual_value_ewma'] = pd.DataFrame.ewm(predictions_df_list["actual_value"], span=20)
C:\ProgramData\Anaconda3\lib\site-packages\pandas\core\frame.py in __setitem__(self, key, value)
3117 else:
3118 # set column
-> 3119 self._set_item(key, value)
3120
3121 def _setitem_slice(self, key, value):
C:\ProgramData\Anaconda3\lib\site-packages\pandas\core\frame.py in _set_item(self, key, value)
3192
3193 self._ensure_valid_index(value)
-> 3194 value = self._sanitize_column(key, value)
3195 NDFrame._set_item(self, key, value)
3196
C:\ProgramData\Anaconda3\lib\site-packages\pandas\core\frame.py in _sanitize_column(self, key, value, broadcast)
3385 value = _sanitize_index(value, self.index, copy=False)
3386
-> 3387 elif isinstance(value, Index) or is_sequence(value):
3388 from pandas.core.series import _sanitize_index
3389
C:\ProgramData\Anaconda3\lib\site-packages\pandas\core\dtypes\inference.py in is_sequence(obj)
470
471 try:
--> 472 iter(obj) # Can iterate over it.
473 len(obj) # Has a length associated with it.
474 return not isinstance(obj, string_and_binary_types)
C:\ProgramData\Anaconda3\lib\site-packages\pandas\core\window.py in __iter__(self)
184 def __iter__(self):
185 url = 'https://github.com/pandas-dev/pandas/issues/11704'
--> 186 raise NotImplementedError('See issue #11704 {url}'.format(url=url))
187
188 def _get_index(self, index=None):
NotImplementedError: See issue #11704 https://github.com/pandas-dev/pandas/issues/11704
When looking for documentation on window objects which seems that window objects are Python2 objects. Anyway here is predictions_df_list["prices"] which I am working with for a reproducing the error :
2007-11-01 14021.1
2007-11-02 13825.1
2007-11-03 13533.1
2007-11-04 14021.1
2007-11-05 13345.1
2007-11-06 12578.1
2007-11-07 14021.1
2007-11-08 13533.1
2007-11-09 12678.1
2007-11-10 12578.1
2007-11-11 14021.1
2007-11-12 13825.1
2007-11-13 13533.1
2007-11-14 12661.1
2007-11-15 13320.1
2007-11-16 12678.1
2007-11-17 12775.1
2007-11-18 13533.1
2007-11-19 13868.1
2007-11-20 12581.1
2007-11-21 13345.1
2007-11-22 13533.1
2007-11-23 12678.1
2007-11-24 13533.1
2007-11-25 12684.1
2007-11-26 13825.1
2007-11-27 14021.1
2007-11-28 14021.1
2007-11-29 12678.1
2007-11-30 12578.1
...
2007-12-02 13320.1
2007-12-03 12661.1
2007-12-04 13533.1
2007-12-05 12578.1
2007-12-06 13533.1
2007-12-07 13533.1
2007-12-08 14021.1
2007-12-09 12639.1
2007-12-10 12661.1
2007-12-11 13345.1
2007-12-12 12578.1
2007-12-13 14021.1
2007-12-14 13345.1
2007-12-15 13533.1
2007-12-16 12895.1
2007-12-17 13686.1
2007-12-18 14052.1
2007-12-19 14021.1
2007-12-20 13686.1
2007-12-21 12730.1
2007-12-22 13686.1
2007-12-23 12586.1
2007-12-24 12741.1
2007-12-25 12678.1
2007-12-26 13533.1
2007-12-27 12775.1
2007-12-28 12578.1
2007-12-29 12661.1
2007-12-30 12895.1
2007-12-31 12639.1
Freq: D, Name: prices, Length: 61, dtype: float64
Your ewma values can be found by using the EMA object you have and calling .mean() on it.
df['ewm'] = df['values'].ewm(alpha=0.001).mean()

Resources