PhraseMatcher to match in a different token attribute - nlp

We would like to match a set of phrases using PhraseMatcher. However we whould like to match not only on the verbatim text, but a normalized version of the input. For instance, lower case, with the accents removed, etc.
We have tried to add a custom attibute to the Token, and use it in the init of the PhraseMatcher to match it but, it did not work.
We could transform the text using a custom pipeline but we want to keep the original text to be able to use other components of spacy.
def deaccent(text):
...
return modified_text
def get_normalization(doc):
return deaccent(doc.text)
Token.set_extension('get_norm', getter=get_normalization)
patterns_ = [{"label": "TECH", "pattern": "java"}]
ruler = EntityRuler(nlp, phrase_matcher_attr="get_norm")
ruler.add_patterns(patterns_)
nlp.add_pipe(ruler)
What is the way to do this?

Since EntityRuler is based on PhraseMatcher, I copy here a working example with Spacy v2.2.0. Follow the comments to understand how to work with "NORM" attribute from tokens.
At the end, you can see how the word "FÁCIL" matches the pattern "facil", since it has been normalized.
import re
import spacy
from unicodedata import normalize
from spacy.matcher import PhraseMatcher
from spacy.tokens import Span
from spacy.lang.es import Spanish
# Define our custom pipeline component that overwrites the custom attribute "norm" from tokens
class Deaccentuate(object):
def __init__(self, nlp):
self._nlp = nlp
def __call__(self, doc):
for token in doc:
token.norm_ = self.deaccent(token.lower_) # write norm_ attribute!
return doc
#staticmethod
def deaccent(text):
""" Remove accentuation from the given string """
text = re.sub(
r"([^n\u0300-\u036f]|n(?!\u0303(?![\u0300-\u036f])))[\u0300-\u036f]+", r"\1",
normalize("NFD", text), 0, re.I
)
return normalize("NFC", text)
nlp = Spanish()
# Add component to pipeline
custom_component = Deaccentuate(nlp)
nlp.add_pipe(custom_component, first=True, name='normalizer')
# Initialize matcher with patterns to be matched
matcher = PhraseMatcher(nlp.vocab, attr="NORM") # match in norm attribute from token
patterns_ = nlp.pipe(['facil', 'dificil'])
matcher.add('MY_ENTITY', None, *patterns_)
# Run an example and print results
doc = nlp("esto es un ejemplo FÁCIL")
matches = matcher(doc)
for match_id, start, end in matches:
span = Span(doc, start, end, label=match_id)
print("MATCHED: " + span.text)
This bug was fixed in release v2.1.8
https://github.com/explosion/spaCy/issues/4002

Related

Lemmatize df column

I am trying to lemmatize content in a df but the function I wrote isn't working. Prior to trying to lemmatize the data in the column looked like this.
Then I ran the following code:
import nltk
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
# Init the Wordnet Lemmatizer
lemmatizer = WordNetLemmatizer()
def lemmatize_text(text):
lemmatizer = WordNetLemmatizer()
return [lemmatizer.lemmatize(w) for w in text]
df['content'] = df["content"].apply(lemmatize_text)
print(df.content)
Now the content column looks like this:
I'm not sure what i did wrong, but I am just trying to lemmatize the data in the content column. Any help would be greatly appreciated.
You are lemmatizing each char instead of word. Your function should look like this instead:
def lemmatize_text(text):
lemmatizer = WordNetLemmatizer()
return ' '.join([lemmatizer.lemmatize(w) for w in text.split(' ')])

Django REST API AttributeError - The serializer field might be named incorrectly and not match any attribute or key on the `list` instance

We are trying to build Django Rest API functions for an existing Django Website. This website uses Django 3.1, MySQL 8.0 and Python 3.7. Further I am New to Django REST Api. Thus, need help with a common issue most of you may have faced in the past. Details below -
I am getting the following error message -
AttributeError at /analysisapis/constituents
Got AttributeError when attempting to get a value for field strings on serializer ConstituentNameLists.
The serializer field might be named incorrectly and not match any attribute or key on the list instance.
Original exception text was: 'list' object has no attribute 'strings'.
My Model is as follows -
Note - This is common Model used from Django Website as well.
from django.db import models, connection
from django.urls import reverse
from . import queries as queries
# Create your models here.
class ISMASymbol(models.Manager):
'''
Used to Manage the Symbol
'''
def __init__(self):
'''
Initialize the Class instance
'''
self.symbol = 'NIFTY50'
self.tablename = 'NIFTY50'
self.exists = 2
self.message = 'NIFTY50 exists.'
self.close = 0
self.change = 0
self.returns = 0
self.band = 0
self.upperband = 0
self.lowerband = 0
self.lotsize = {0:0, 1:0, 2:0}
self.stepvalue = 0
self.weekly_expiry = True
self.weekly_future = False
self.isIndex = True
self.cashstocks = []
self.derivativestocks = []
self.indexlist = []
self.companyname = 'NIFTY 50'
self.has_fno = True
SOME ADDITIONAL FUNCTIONS
def get_symbol_list(self):
'''
Creates a Symbol List for all Cash and Derivatives
Used to Create Sitemap
'''
symbol_query = queries.symbol_list
derivative_query = queries.derivative_list
index_query = queries.index_list
with connection.cursor() as cursor:
# All Stock List
cursor.execute(symbol_query)
for item in cursor.fetchall():
self.cashstocks.append(item[0])
# Derivative List
cursor.execute(derivative_query)
for item in cursor.fetchall():
self.derivativestocks.append(item[0])
# Index List
cursor.execute(index_query)
for item in cursor.fetchall():
self.indexlist.append(item[0])
cursor.close()
def get_cash_stocks(self):
'''
Returns all stocks listed on NSE
'''
return self.cashstocks
def get_derivative_stocks(self):
'''
Returns all derivative stocks listed on NSE
'''
return self.derivativestocks
class Meta:
proxy = True
My Common function from Business Layer -
Note - This common function is called Django Website as well.
def get_constituents(indexname):
'''
Returns the Constituent details based on Index details
'''
constituents = None
# Check for Index and accordingly populating Symbol List
if (indexname == 'ALLSTOCKS'):
constituents = symbol.get_cash_stocks()
elif (indexname == 'DERIVATIVESONLY'):
constituents = symbol.get_derivative_stocks()
else:
indexstocklist = get_index_constituents(indexname)
constituents = indexstocklist
return constituents
My API View used to return just the list -
from rest_framework.views import APIView
from rest_framework.response import Response
from analysis.business import get_constituents
from .serializers import ConstituentNameLists
class ConstituentList(APIView):
'''
Returns the List of Constituents
'''
def get(self, request, indexname='ALLSTOCKS'):
'''
Returns the Constituent List
'''
# Capturing Inputs in Appropriate Cases
indexname = indexname.upper()
constituents = get_constituents(indexname=indexname)
print(constituents)
serializer = ConstituentNameLists(constituents)
return Response(serializer.data)
My Serializer for the list -
from rest_framework import serializers
class ConstituentNameLists(serializers.Serializer):
'''
Returns Index Constituents
'''
strings = serializers.ListField(
child = serializers.CharField(max_length=100)
)
And the list that I am Trying to Serialize which is generating the above error -
['NIFTYINFRA', 'NIFTYALPHA50', 'NIFTYPHARMA', 'NIFTY50TR2XLEV','NIFTYMIDCAP100', 'NIFTYCPSE', NIFTY50TR1XINV', 'NIFTYCONSUMPTION','NIFTY50PR1XINV', 'INDIAVIX']
Please help me resolve this issue.
One does not need to serialize the List of Strings which are coming as an output since they are native python data types. So the Code in the View needs to to as follows -
class ConstituentList(APIView):
def get(self, request, indexname='ALLSTOCKS'):
# Capturing Inputs in Appropriate Cases
indexname = indexname.upper()
constituents = get_constituents(indexname=indexname)
return Response(constituents)
Thus, No Serailization needed for Native datatypes.

How to apply template method pattern in Python data science process while not knowing exactly the number of repeating steps

I like to apply the template method pattern for a data science project while I need to select or identify target subjects from a large pool of original subjects. I will create tags based on different characteristics of these subjects, i.e., age, sex, disease status, etc.
I prefer this code to be reused for future projects of similar nature. But all projects are somewhat different and the criteria of selecting subjects to be in the final filtered pool are different from one another. How do I structure the subject_selection_steps in such a way that it is flexible and customizable based on project needs. Currently, I only included three tags in my code, but I may need more or less in different projects.
import sys
from abc import ABC, abstractmethod
import pandas as pd
import datetime
import ctypes
import numpy as np
import random
import pysnooper
import var_creator.var_creator as vc
import feature_tagger.feature_tagger as ft
import data_descriptor.data_descriptor as dd
import data_transformer.data_transformer as dt
import helper_functions.helper_functions as hf
import sec1_data_preparation as data_prep
import sec2_prepped_data_import as prepped_data_import
class SubjectGrouping(ABC):
def __init__(self):
pass
def subject_selection_steps(self):
self._pandas_output_setting()
self.run_data_preparation()
self.import_processed_main_data()
self.inject_test_data()
self.create_all_subject_list()
self.CREATE_TAG1()
self.FILTER_SUBJECT_BY_TAG1()
self.CREATE_TAG2()
self.FILTER_SUBJECT_BY_TAG2()
self.CREATE_TAG3()
self.FILTER_SUBJECT_BY_TAG3()
self.finalize_data()
def _pandas_output_setting(self):
'''Set pandas output display setting'''
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 180)
#abstractmethod
def run_data_preparation(self):
'''Run data_preparation_steps from base class'''
pass
#abstractmethod
def import_processed_main_data(self):
'''Import processed main data'''
pass
def inject_test_data(self):
'''For unitest, by injecting mock cases that for sure fulfill/fail the defined subject selection criteria'''
pass
def create_all_subject_list(self):
'''Gather all the unique subject ids from all datasets and create a full subject list'''
pass
def CREATE_TAG1(self): pass
def FILTER_SUBJECT_BY_TAG1(self): pass
def CREATE_TAG2(self): pass
def FILTER_SUBJECT_BY_TAG2(self): pass
def CREATE_TAG3(self): pass
def FILTER_SUBJECT_BY_TAG3(self): pass
def finalize_data(self):
pass
class SubjectGrouping_Project1(SubjectGrouping, data_prep.DataPreparation_Project1):
def __init__(self):
self.df_dad = None
self.df_pc = None
self.df_nacrs = None
self.df_pin = None
self.df_reg = None
self.df_final_subject_group1 = None
self.df_final_subject_group2 = None
self.df_final_subject_group3 = None
self.control_panel = {
'save_file_switch': False, # WARNING: Will overwrite existing files
'df_subsampling_switch': True, # WARNING: Only switch to True when testing
'df_subsampling_n': 8999,
'random_seed': 888,
'df_remove_dup_switch': True,
'parse_date_switch': True,
'result_printout_switch': True,
'comp_loc': 'office',
'show_df_n_switch': False, # To be implemented. Show df length before and after record removal
'done_switch': False,
}
def run_data_preparation(self):
self.data_preparation_steps()
def import_processed_main_data(self):
x = prepped_data_import.PreppedDataImport_Project1()
x.data_preparation_steps()
x.prepped_data_import_steps()
df_dict = x.return_all_dfs()
self.df_d, self.df_p, self.df_n, self.df_p, self.df_r = (df_dict['DF_D'], df_dict['DF_P'],
df_dict['DF_N'], df_dict['DF_P'], df_dict['DF_R'])
del x
if __name__=='__main__':
x = SubjectGrouping_Project1()
x.subject_selection_steps()
Consider a Filter Pattern. It basically allows filtering of list of objects based on defined filters and you can easily introduce a new filter at a later point with minimal changes to your code.
Create an Criteria interface or abstract class.
class Criteria():
def filter(self, request):
raise NotImplementedError("Should have implemented this")
and have each of your filter extend from Criteria class. Let's consider one of the filters is an Age filter
class AgeFilter(Criteria):
def __init__(self, age=20):
self.age = age
def filter(self, list):
filteredList = []
for item in self.list:
if (item.age > self.age):
# add to the filteredList
return filteredList
Similar you can define other filters like DiseaseFilter, GenderFilter by extending from Criteria interface.
You can also do logical operations on your filters by defining And or Or filters as well. For eg.
class AndFilter(Criteria):
def __init__(self, filter1, filter2):
self.filter1 = filter1
self.filter2 = filter2
def filter(self, list):
filteredList1 = filter1.filter(list)
filteredList2 = filter2.filter(filteredList1)
return filteredList2
Assuming you have already defined your filters, after which your subject_selection_steps method will look like,
def subject_selection_steps(self):
# define list of filters
filterList = [ageFilter1, maleFilter, MalariaAndJaundiceFilter]
result = personList
for criteria in filterList:
result = criteria.filter(result)
return result

Error assigning label ID to span: not in StringStore

since i'm very new to spacy & python,here i'm trying to create a user defined label to recognize phrase matching, while parsing i'm unable to assign hash value to the 'label', encountring "Error while assigning label Id XXXX to a span object"
enter code here
import spacy
from spacy.matcher import PhraseMatcher
from spacy.tokens import Span
class EntityMatcher(object):
name = 'entity_matcher'
def __init__(self, nlp, terms, label):
patterns = [nlp(text) for text in terms]
self.matcher = PhraseMatcher(nlp.vocab)
self.matcher.add(label, None, *patterns)
def __call__(self, doc):
matches = self.matcher(doc)
for match_id, start, end in matches:
span = Span(doc, start, end, label=match_id)
doc.ents = list(doc.ents) + [span]
return doc
nlp = spacy.load('en_core_web_sm')
terms = (u'cat', u'dog', u'tree kangaroo', u'giant sea spider')
entity_matcher = EntityMatcher(nlp, terms, 'ANIMAL')
nlp.add_pipe(entity_matcher, after='ner')
print(nlp.pipe_names) # the components in the pipeline
doc = nlp(u"This is a text about Barack Obama and a tree kangaroo")
print([(ent.text, ent.label_) for ent in doc.ents])
****[Error]****
File "new.py", line 17, in __call__
span = Span(doc, start, end, label=match_id)
File "span.pyx", line 62, in spacy.tokens.span.Span.__cinit__
ValueError: [E084] Error assigning label ID 893087899 to span: not in
StringStore.
Try to add the label to the nlp vocabulary.
from spacy.strings import StringStore
animal_hash = StringStore([u'ANIMAL']) # <-- match id
nlp.vocab.strings.add('ANIMAL')
I got segmentation fault when I use after='ner'. I removed the parameter like this,
nlp.add_pipe(entity_matcher)
No other changes from your code, and got the output as,
[('Barack Obama', 'PERSON'), ('tree kangaroo', 'ANIMAL')]
I couldn't reproduce your error. Spacy version is '2.0.2'
I noticed the same error when using the small model en_core_web_sm with Spacy '2.0.16'. However, just switching to the bigger model en_core_web_md solved it for me.
Simple solution to get the hash:
nlp.vocab.strings['ANIMAL']

Serialize a custom transformer using python to be used within a Pyspark ML pipeline

I found the same discussion in comments section of Create a custom Transformer in PySpark ML, but there is no clear answer. There is also an unresolved JIRA corresponding to that: https://issues.apache.org/jira/browse/SPARK-17025.
Given that there is no option provided by Pyspark ML pipeline for saving a custom transformer written in python, what are the other options to get it done? How can I implement the _to_java method in my python class that returns a compatible java object?
As of Spark 2.3.0 there's a much, much better way to do this.
Simply extend DefaultParamsWritable and DefaultParamsReadable and your class will automatically have write and read methods that will save your params and will be used by the PipelineModel serialization system.
The docs were not really clear, and I had to do a bit of source reading to understand this was the way that deserialization worked.
PipelineModel.read instantiates a PipelineModelReader
PipelineModelReader loads metadata and checks if language is 'Python'. If it's not, then the typical JavaMLReader is used (what most of these answers are designed for)
Otherwise, PipelineSharedReadWrite is used, which calls DefaultParamsReader.loadParamsInstance
loadParamsInstance will find class from the saved metadata. It will instantiate that class and call .load(path) on it. You can extend DefaultParamsReader and get the DefaultParamsReader.load method automatically. If you do have specialized deserialization logic you need to implement, I would look at that load method as a starting place.
On the opposite side:
PipelineModel.write will check if all stages are Java (implement JavaMLWritable). If so, the typical JavaMLWriter is used (what most of these answers are designed for)
Otherwise, PipelineWriter is used, which checks that all stages implement MLWritable and calls PipelineSharedReadWrite.saveImpl
PipelineSharedReadWrite.saveImpl will call .write().save(path) on each stage.
You can extend DefaultParamsWriter to get the DefaultParamsWritable.write method that saves metadata for your class and params in the right format. If you have custom serialization logic you need to implement, I would look at that and DefaultParamsWriter as a starting point.
Ok, so finally, you have a pretty simple transformer that extends Params and all your parameters are stored in the typical Params fashion:
from pyspark import keyword_only
from pyspark.ml import Transformer
from pyspark.ml.param.shared import HasOutputCols, Param, Params
from pyspark.ml.util import DefaultParamsReadable, DefaultParamsWritable
from pyspark.sql.functions import lit # for the dummy _transform
class SetValueTransformer(
Transformer, HasOutputCols, DefaultParamsReadable, DefaultParamsWritable,
):
value = Param(
Params._dummy(),
"value",
"value to fill",
)
#keyword_only
def __init__(self, outputCols=None, value=0.0):
super(SetValueTransformer, self).__init__()
self._setDefault(value=0.0)
kwargs = self._input_kwargs
self._set(**kwargs)
#keyword_only
def setParams(self, outputCols=None, value=0.0):
"""
setParams(self, outputCols=None, value=0.0)
Sets params for this SetValueTransformer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def setValue(self, value):
"""
Sets the value of :py:attr:`value`.
"""
return self._set(value=value)
def getValue(self):
"""
Gets the value of :py:attr:`value` or its default value.
"""
return self.getOrDefault(self.value)
def _transform(self, dataset):
for col in self.getOutputCols():
dataset = dataset.withColumn(col, lit(self.getValue()))
return dataset
Now we can use it:
from pyspark.ml import Pipeline, PipelineModel
svt = SetValueTransformer(outputCols=["a", "b"], value=123.0)
p = Pipeline(stages=[svt])
df = sc.parallelize([(1, None), (2, 1.0), (3, 0.5)]).toDF(["key", "value"])
pm = p.fit(df)
pm.transform(df).show()
pm.write().overwrite().save('/tmp/example_pyspark_pipeline')
pm2 = PipelineModel.load('/tmp/example_pyspark_pipeline')
print('matches?', pm2.stages[0].extractParamMap() == pm.stages[0].extractParamMap())
pm2.transform(df).show()
Result:
+---+-----+-----+-----+
|key|value| a| b|
+---+-----+-----+-----+
| 1| null|123.0|123.0|
| 2| 1.0|123.0|123.0|
| 3| 0.5|123.0|123.0|
+---+-----+-----+-----+
matches? True
+---+-----+-----+-----+
|key|value| a| b|
+---+-----+-----+-----+
| 1| null|123.0|123.0|
| 2| 1.0|123.0|123.0|
| 3| 0.5|123.0|123.0|
+---+-----+-----+-----+
I am not sure this is the best approach, but I too need the ability to save custom Estimators, Transformers and Models that I have created in Pyspark, and also to support their use in the Pipeline API with persistence. Custom Pyspark Estimators, Transformers and Models may be created and used in the Pipeline API but cannot be saved. This poses an issue in production when the model training takes longer than an event prediction cycle.
In general, Pyspark Estimators, Transformers and Models are just wrappers around the Java or Scala equivalents and the Pyspark wrappers just marshal the parameters to and from Java via py4j. Any persisting of the model is then done on the Java side. Because of this current structure, this limits Custom Pyspark Estimators, Transformers and Models to living only in the python world.
In a previous attempt, I was able to save a single Pyspark model by using Pickle/dill serialization. This worked well, but still did not allow saving or loading back such from within the Pipeline API. But, pointed to by another SO post I was directed to the OneVsRest classifier, and inspected the _to_java and _from_java methods. They do all the heavy lifting on the Pyspark side. After looking I thought, if one had a way to save the pickle dump to an already made and supported savable java object, then it should be possible to save a Custom Pyspark Estimator, Transformer and Model with the Pipeline API.
To that end, I found the StopWordsRemover to be the ideal object to hijack because it has an attribute, stopwords, that is a list of strings. The dill.dumps method returns a pickled representation of the object as a string. The plan was to turn the string into a list and then set the stopwords parameter of a StopWordsRemover to this list. Though a list strings, I found that some of the characters would not marshal to the java object. So the characters get converted to integers then the integers to strings. This all works great for saving a single instance, and also when saving within in a Pipeline, because the Pipeline dutifully calls the _to_java method of my python class (we are still on the Pyspark side so this works). But, coming back to Pyspark from java did not in the Pipeline API.
Because I am hiding my python object in a StopWordsRemover instance, the Pipeline, when coming back to Pyspark, does not know anything about my hidden class object, it knows only it has a StopWordsRemover instance. Ideally, it would be great to subclass Pipeline and PipelineModel, but alas this brings us back to trying to serialize a Python object. To combat this, I created a PysparkPipelineWrapper that takes a Pipeline or PipelineModel and just scans the stages, looking for a coded ID in the stopwords list (remember, this is just the pickled bytes of my python object) that tells it to unwraps the list to my instance and stores it back in the stage it came from. Below is code that shows how this all works.
For any Custom Pyspark Estimator, Transformer and Model, just inherit from Identifiable, PysparkReaderWriter, MLReadable, MLWritable. Then when loading a Pipeline and PipelineModel, pass such through PysparkPipelineWrapper.unwrap(pipeline).
This method does not address using the Pyspark code in Java or Scala, but at least we can save and load Custom Pyspark Estimators, Transformers and Models and work with Pipeline API.
import dill
from pyspark.ml import Transformer, Pipeline, PipelineModel
from pyspark.ml.param import Param, Params
from pyspark.ml.util import Identifiable, MLReadable, MLWritable, JavaMLReader, JavaMLWriter
from pyspark.ml.feature import StopWordsRemover
from pyspark.ml.wrapper import JavaParams
from pyspark.context import SparkContext
from pyspark.sql import Row
class PysparkObjId(object):
"""
A class to specify constants used to idenify and setup python
Estimators, Transformers and Models so they can be serialized on there
own and from within a Pipline or PipelineModel.
"""
def __init__(self):
super(PysparkObjId, self).__init__()
#staticmethod
def _getPyObjId():
return '4c1740b00d3c4ff6806a1402321572cb'
#staticmethod
def _getCarrierClass(javaName=False):
return 'org.apache.spark.ml.feature.StopWordsRemover' if javaName else StopWordsRemover
class PysparkPipelineWrapper(object):
"""
A class to facilitate converting the stages of a Pipeline or PipelineModel
that were saved from PysparkReaderWriter.
"""
def __init__(self):
super(PysparkPipelineWrapper, self).__init__()
#staticmethod
def unwrap(pipeline):
if not (isinstance(pipeline, Pipeline) or isinstance(pipeline, PipelineModel)):
raise TypeError("Cannot recognize a pipeline of type %s." % type(pipeline))
stages = pipeline.getStages() if isinstance(pipeline, Pipeline) else pipeline.stages
for i, stage in enumerate(stages):
if (isinstance(stage, Pipeline) or isinstance(stage, PipelineModel)):
stages[i] = PysparkPipelineWrapper.unwrap(stage)
if isinstance(stage, PysparkObjId._getCarrierClass()) and stage.getStopWords()[-1] == PysparkObjId._getPyObjId():
swords = stage.getStopWords()[:-1] # strip the id
lst = [chr(int(d)) for d in swords]
dmp = ''.join(lst)
py_obj = dill.loads(dmp)
stages[i] = py_obj
if isinstance(pipeline, Pipeline):
pipeline.setStages(stages)
else:
pipeline.stages = stages
return pipeline
class PysparkReaderWriter(object):
"""
A mixin class so custom pyspark Estimators, Transformers and Models may
support saving and loading directly or be saved within a Pipline or PipelineModel.
"""
def __init__(self):
super(PysparkReaderWriter, self).__init__()
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
#classmethod
def read(cls):
"""Returns an MLReader instance for our clarrier class."""
return JavaMLReader(PysparkObjId._getCarrierClass())
#classmethod
def load(cls, path):
"""Reads an ML instance from the input path, a shortcut of `read().load(path)`."""
swr_java_obj = cls.read().load(path)
return cls._from_java(swr_java_obj)
#classmethod
def _from_java(cls, java_obj):
"""
Get the dumby the stopwords that are the characters of the dills dump plus our guid
and convert, via dill, back to our python instance.
"""
swords = java_obj.getStopWords()[:-1] # strip the id
lst = [chr(int(d)) for d in swords] # convert from string integer list to bytes
dmp = ''.join(lst)
py_obj = dill.loads(dmp)
return py_obj
def _to_java(self):
"""
Convert this instance to a dill dump, then to a list of strings with the unicode integer values of each character.
Use this list as a set of dumby stopwords and store in a StopWordsRemover instance
:return: Java object equivalent to this instance.
"""
dmp = dill.dumps(self)
pylist = [str(ord(d)) for d in dmp] # convert byes to string integer list
pylist.append(PysparkObjId._getPyObjId()) # add our id so PysparkPipelineWrapper can id us.
sc = SparkContext._active_spark_context
java_class = sc._gateway.jvm.java.lang.String
java_array = sc._gateway.new_array(java_class, len(pylist))
for i in xrange(len(pylist)):
java_array[i] = pylist[i]
_java_obj = JavaParams._new_java_obj(PysparkObjId._getCarrierClass(javaName=True), self.uid)
_java_obj.setStopWords(java_array)
return _java_obj
class HasFake(Params):
def __init__(self):
super(HasFake, self).__init__()
self.fake = Param(self, "fake", "fake param")
def getFake(self):
return self.getOrDefault(self.fake)
class MockTransformer(Transformer, HasFake, Identifiable):
def __init__(self):
super(MockTransformer, self).__init__()
self.dataset_count = 0
def _transform(self, dataset):
self.dataset_count = dataset.count()
return dataset
class MyTransformer(MockTransformer, Identifiable, PysparkReaderWriter, MLReadable, MLWritable):
def __init__(self):
super(MyTransformer, self).__init__()
def make_a_dataframe(sc):
df = sc.parallelize([Row(name='Alice', age=5, height=80), Row(name='Alice', age=5, height=80), Row(name='Alice', age=10, height=80)]).toDF()
return df
def test1():
trA = MyTransformer()
trA.dataset_count = 999
print trA.dataset_count
trA.save('test.trans')
trB = MyTransformer.load('test.trans')
print trB.dataset_count
def test2():
trA = MyTransformer()
pipeA = Pipeline(stages=[trA])
print type(pipeA)
pipeA.save('testA.pipe')
pipeAA = PysparkPipelineWrapper.unwrap(Pipeline.load('testA.pipe'))
stagesAA = pipeAA.getStages()
trAA = stagesAA[0]
print trAA.dataset_count
def test3():
dfA = make_a_dataframe(sc)
trA = MyTransformer()
pipeA = Pipeline(stages=[trA]).fit(dfA)
print type(pipeA)
pipeA.save('testB.pipe')
pipeAA = PysparkPipelineWrapper.unwrap(PipelineModel.load('testB.pipe'))
stagesAA = pipeAA.stages
trAA = stagesAA[0]
print trAA.dataset_count
dfB = pipeAA.transform(dfA)
dfB.show()
I couldn't get #dmbaker's ingenious solution to work using Python 2 on Spark 2.2.0; I kept getting pickling errors. After several blind alleys I got a working solution by modifying his (her?) idea to write and read the parameter values as strings into StopWordsRemover's stop words directly.
Here's the base class you need if you want to save and load your own estimators or transformers:
from pyspark import SparkContext
from pyspark.ml.feature import StopWordsRemover
from pyspark.ml.util import Identifiable, MLWritable, JavaMLWriter, MLReadable, JavaMLReader
from pyspark.ml.wrapper import JavaWrapper, JavaParams
class PysparkReaderWriter(Identifiable, MLReadable, MLWritable):
"""
A base class for custom pyspark Estimators and Models to support saving and loading directly
or within a Pipeline or PipelineModel.
"""
def __init__(self):
super(PysparkReaderWriter, self).__init__()
#staticmethod
def _getPyObjIdPrefix():
return "_ThisIsReallyA_"
#classmethod
def _getPyObjId(cls):
return PysparkReaderWriter._getPyObjIdPrefix() + cls.__name__
def getParamsAsListOfStrings(self):
raise NotImplementedError("PysparkReaderWriter.getParamsAsListOfStrings() not implemented for instance: %r" % self)
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
def _to_java(self):
# Convert all our parameters to strings:
paramValuesAsStrings = self.getParamsAsListOfStrings()
# Append our own type-specific id so PysparkPipelineLoader can detect this algorithm when unwrapping us.
paramValuesAsStrings.append(self._getPyObjId())
# Convert the parameter values to a Java array:
sc = SparkContext._active_spark_context
java_array = JavaWrapper._new_java_array(paramValuesAsStrings, sc._gateway.jvm.java.lang.String)
# Create a Java (Scala) StopWordsRemover and give it the parameters as its stop words.
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.feature.StopWordsRemover", self.uid)
_java_obj.setStopWords(java_array)
return _java_obj
#classmethod
def _from_java(cls, java_obj):
# Get the stop words, ignoring the id at the end:
stopWords = java_obj.getStopWords()[:-1]
return cls.createAndInitialisePyObj(stopWords)
#classmethod
def createAndInitialisePyObj(cls, paramsAsListOfStrings):
raise NotImplementedError("PysparkReaderWriter.createAndInitialisePyObj() not implemented for type: %r" % cls)
#classmethod
def read(cls):
"""Returns an MLReader instance for our clarrier class."""
return JavaMLReader(StopWordsRemover)
#classmethod
def load(cls, path):
"""Reads an ML instance from the input path, a shortcut of `read().load(path)`."""
swr_java_obj = cls.read().load(path)
return cls._from_java(swr_java_obj)
Your own pyspark algorithm must then inherit from PysparkReaderWriter and override the getParamsAsListOfStrings() method which saves your parameters to a list of strings. Your algorithm must also override the createAndInitialisePyObj() method for converting a list of strings back into your parameters. Behind the scenes the parameters are converted to and from the stop words used by StopWordsRemover.
Example estimator with 3 parameters of different type:
from pyspark.ml.param.shared import Param, Params, TypeConverters
from pyspark.ml.base import Estimator
class MyEstimator(Estimator, PysparkReaderWriter):
def __init__(self):
super(MyEstimator, self).__init__()
# 3 sample parameters, deliberately of different types:
stringParam = Param(Params._dummy(), "stringParam", "A dummy string parameter", typeConverter=TypeConverters.toString)
def setStringParam(self, value):
return self._set(stringParam=value)
def getStringParam(self):
return self.getOrDefault(self.stringParam)
listOfStringsParam = Param(Params._dummy(), "listOfStringsParam", "A dummy list of strings.", typeConverter=TypeConverters.toListString)
def setListOfStringsParam(self, value):
return self._set(listOfStringsParam=value)
def getListOfStringsParam(self):
return self.getOrDefault(self.listOfStringsParam)
intParam = Param(Params._dummy(), "intParam", "A dummy int parameter.", typeConverter=TypeConverters.toInt)
def setIntParam(self, value):
return self._set(intParam=value)
def getIntParam(self):
return self.getOrDefault(self.intParam)
def _fit(self, dataset):
model = MyModel()
# Just some changes to verify we can modify the model (and also it's something we can expect to see when restoring it later):
model.setAnotherStringParam(self.getStringParam() + " World!")
model.setAnotherListOfStringsParam(self.getListOfStringsParam() + ["E", "F"])
model.setAnotherIntParam(self.getIntParam() + 10)
return model
def getParamsAsListOfStrings(self):
paramValuesAsStrings = []
paramValuesAsStrings.append(self.getStringParam()) # Parameter is already a string
paramValuesAsStrings.append(','.join(self.getListOfStringsParam())) # ...convert from a list of strings
paramValuesAsStrings.append(str(self.getIntParam())) # ...convert from an int
return paramValuesAsStrings
#classmethod
def createAndInitialisePyObj(cls, paramsAsListOfStrings):
# Convert back into our parameters. Make sure you do this in the same order you saved them!
py_obj = cls()
py_obj.setStringParam(paramsAsListOfStrings[0])
py_obj.setListOfStringsParam(paramsAsListOfStrings[1].split(","))
py_obj.setIntParam(int(paramsAsListOfStrings[2]))
return py_obj
Example Model (also a Transformer) which has 3 different parameters:
from pyspark.ml.base import Model
class MyModel(Model, PysparkReaderWriter):
def __init__(self):
super(MyModel, self).__init__()
# 3 sample parameters, deliberately of different types:
anotherStringParam = Param(Params._dummy(), "anotherStringParam", "A dummy string parameter", typeConverter=TypeConverters.toString)
def setAnotherStringParam(self, value):
return self._set(anotherStringParam=value)
def getAnotherStringParam(self):
return self.getOrDefault(self.anotherStringParam)
anotherListOfStringsParam = Param(Params._dummy(), "anotherListOfStringsParam", "A dummy list of strings.", typeConverter=TypeConverters.toListString)
def setAnotherListOfStringsParam(self, value):
return self._set(anotherListOfStringsParam=value)
def getAnotherListOfStringsParam(self):
return self.getOrDefault(self.anotherListOfStringsParam)
anotherIntParam = Param(Params._dummy(), "anotherIntParam", "A dummy int parameter.", typeConverter=TypeConverters.toInt)
def setAnotherIntParam(self, value):
return self._set(anotherIntParam=value)
def getAnotherIntParam(self):
return self.getOrDefault(self.anotherIntParam)
def _transform(self, dataset):
# Dummy transform code:
return dataset.withColumn('age2', dataset.age + self.getAnotherIntParam())
def getParamsAsListOfStrings(self):
paramValuesAsStrings = []
paramValuesAsStrings.append(self.getAnotherStringParam()) # Parameter is already a string
paramValuesAsStrings.append(','.join(self.getAnotherListOfStringsParam())) # ...convert from a list of strings
paramValuesAsStrings.append(str(self.getAnotherIntParam())) # ...convert from an int
return paramValuesAsStrings
#classmethod
def createAndInitialisePyObj(cls, paramsAsListOfStrings):
# Convert back into our parameters. Make sure you do this in the same order you saved them!
py_obj = cls()
py_obj.setAnotherStringParam(paramsAsListOfStrings[0])
py_obj.setAnotherListOfStringsParam(paramsAsListOfStrings[1].split(","))
py_obj.setAnotherIntParam(int(paramsAsListOfStrings[2]))
return py_obj
Below is a sample test case showing how you can save and load your model. It's similar for the estimator so I omit that for brevity.
def createAModel():
m = MyModel()
m.setAnotherStringParam("Boo!")
m.setAnotherListOfStringsParam(["P", "Q", "R"])
m.setAnotherIntParam(77)
return m
def testSaveLoadModel():
modA = createAModel()
print(modA.explainParams())
savePath = "/whatever/path/you/want"
#modA.save(savePath) # Can't overwrite, so...
modA.write().overwrite().save(savePath)
modB = MyModel.load(savePath)
print(modB.explainParams())
testSaveLoadModel()
Output:
anotherIntParam: A dummy int parameter. (current: 77)
anotherListOfStringsParam: A dummy list of strings. (current: ['P', 'Q', 'R'])
anotherStringParam: A dummy string parameter (current: Boo!)
anotherIntParam: A dummy int parameter. (current: 77)
anotherListOfStringsParam: A dummy list of strings. (current: [u'P', u'Q', u'R'])
anotherStringParam: A dummy string parameter (current: Boo!)
Notice how the parameters have come back in as unicode strings. This may or may not make a difference to your underlying algorithm that you implement in _transform() (or _fit() for the estimator). So be aware of this.
Finally, because the Scala algorithm behind the scenes is really a StopWordsRemover, you need to unwrap it back into your own class when loading the Pipeline or PipelineModel from disk. Here's the utility class that does this unwrapping:
from pyspark.ml import Pipeline, PipelineModel
from pyspark.ml.feature import StopWordsRemover
class PysparkPipelineLoader(object):
"""
A class to facilitate converting the stages of a Pipeline or PipelineModel
that were saved from PysparkReaderWriter.
"""
def __init__(self):
super(PysparkPipelineLoader, self).__init__()
#staticmethod
def unwrap(thingToUnwrap, customClassList):
if not (isinstance(thingToUnwrap, Pipeline) or isinstance(thingToUnwrap, PipelineModel)):
raise TypeError("Cannot recognize an object of type %s." % type(thingToUnwrap))
stages = thingToUnwrap.getStages() if isinstance(thingToUnwrap, Pipeline) else thingToUnwrap.stages
for i, stage in enumerate(stages):
if (isinstance(stage, Pipeline) or isinstance(stage, PipelineModel)):
stages[i] = PysparkPipelineLoader.unwrap(stage)
if isinstance(stage, StopWordsRemover) and stage.getStopWords()[-1].startswith(PysparkReaderWriter._getPyObjIdPrefix()):
lastWord = stage.getStopWords()[-1]
className = lastWord[len(PysparkReaderWriter._getPyObjIdPrefix()):]
stopWords = stage.getStopWords()[:-1] # Strip the id
# Create and initialise the appropriate class:
py_obj = None
for clazz in customClassList:
if clazz.__name__ == className:
py_obj = clazz.createAndInitialisePyObj(stopWords)
if py_obj is None:
raise TypeError("I don't know how to create an instance of type: %s" % className)
stages[i] = py_obj
if isinstance(thingToUnwrap, Pipeline):
thingToUnwrap.setStages(stages)
else:
# PipelineModel
thingToUnwrap.stages = stages
return thingToUnwrap
Test for saving and loading a pipeline:
def testSaveAndLoadUnfittedPipeline():
estA = createAnEstimator()
#print(estA.explainParams())
pipelineA = Pipeline(stages=[estA])
savePath = "/whatever/path/you/want"
#pipelineA.save(savePath) # Can't overwrite, so...
pipelineA.write().overwrite().save(savePath)
pipelineReloaded = PysparkPipelineLoader.unwrap(Pipeline.load(savePath), [MyEstimator])
estB = pipelineReloaded.getStages()[0]
print(estB.explainParams())
testSaveAndLoadUnfittedPipeline()
Output:
intParam: A dummy int parameter. (current: 42)
listOfStringsParam: A dummy list of strings. (current: [u'A', u'B', u'C', u'D'])
stringParam: A dummy string parameter (current: Hello)
Test for saving and loading a pipeline model:
from pyspark.sql import Row
def make_a_dataframe(sc):
df = sc.parallelize([Row(name='Alice', age=5, height=80), Row(name='Bob', age=7, height=85), Row(name='Chris', age=10, height=90)]).toDF()
return df
def testSaveAndLoadPipelineModel():
dfA = make_a_dataframe(sc)
estA = createAnEstimator()
#print(estA.explainParams())
pipelineModelA = Pipeline(stages=[estA]).fit(dfA)
savePath = "/whatever/path/you/want"
#pipelineModelA.save(savePath) # Can't overwrite, so...
pipelineModelA.write().overwrite().save(savePath)
pipelineModelReloaded = PysparkPipelineLoader.unwrap(PipelineModel.load(savePath), [MyModel])
modB = pipelineModelReloaded.stages[0]
print(modB.explainParams())
dfB = pipelineModelReloaded.transform(dfA)
dfB.show()
testSaveAndLoadPipelineModel()
Output:
anotherIntParam: A dummy int parameter. (current: 52)
anotherListOfStringsParam: A dummy list of strings. (current: [u'A', u'B', u'C', u'D', u'E', u'F'])
anotherStringParam: A dummy string parameter (current: Hello World!)
+---+------+-----+----+
|age|height| name|age2|
+---+------+-----+----+
| 5| 80|Alice| 57|
| 7| 85| Bob| 59|
| 10| 90|Chris| 62|
+---+------+-----+----+
When unwrapping a pipeline or pipeline model you have to pass in a list of the classes that correspond to your own pyspark algorithms that are masquerading as StopWordsRemover objects in the saved pipeline or pipeline model. The last stop word in your saved object is used to identify your own class's name and then createAndInitialisePyObj() is called to create an instance of your class and initialise its parameters with the remaining stop words.
Various refinements could be made. But hopefully this will enable you to save and load custom estimators and transformers, both inside and outside pipelines, until SPARK-17025 is resolved and available to you.
Similar to the working answer by #dmbaker, I wrapped my custom transformer called Aggregator inside of a built-in Spark transformer, in this example, Binarizer, though I'm sure you can inherit from other transformers, too. That allowed my custom transformer to inherit the methods necessary for serialization.
from pyspark.ml import Pipeline
from pyspark.ml.feature import VectorAssembler, Binarizer
from pyspark.ml.regression import LinearRegression
class Aggregator(Binarizer):
"""A huge hack to allow serialization of custom transformer."""
def transform(self, input_df):
agg_df = input_df\
.groupBy('channel_id')\
.agg({
'foo': 'avg',
'bar': 'avg',
})\
.withColumnRenamed('avg(foo)', 'avg_foo')\
.withColumnRenamed('avg(bar)', 'avg_bar')
return agg_df
# Create pipeline stages.
aggregator = Aggregator()
vector_assembler = VectorAssembler(...)
linear_regression = LinearRegression()
# Create pipeline.
pipeline = Pipeline(stages=[aggregator, vector_assembler, linear_regression])
# Train.
pipeline_model = pipeline.fit(input_df)
# Save model file to S3.
pipeline_model.save('s3n://example')
The #dmbaker solution didn't work for me. I believe that is because the python version (2.x versus 3.x). I made some updates on his solution and now it works on Python 3. My setup is listed below:
python: 3.6.3
spark: 2.2.1
dill: 0.2.7.1
class PysparkObjId(object):
"""
A class to specify constants used to idenify and setup python
Estimators, Transformers and Models so they can be serialized on there
own and from within a Pipline or PipelineModel.
"""
def __init__(self):
super(PysparkObjId, self).__init__()
#staticmethod
def _getPyObjId():
return '4c1740b00d3c4ff6806a1402321572cb'
#staticmethod
def _getCarrierClass(javaName=False):
return 'org.apache.spark.ml.feature.StopWordsRemover' if javaName else StopWordsRemover
class PysparkPipelineWrapper(object):
"""
A class to facilitate converting the stages of a Pipeline or PipelineModel
that were saved from PysparkReaderWriter.
"""
def __init__(self):
super(PysparkPipelineWrapper, self).__init__()
#staticmethod
def unwrap(pipeline):
if not (isinstance(pipeline, Pipeline) or isinstance(pipeline, PipelineModel)):
raise TypeError("Cannot recognize a pipeline of type %s." % type(pipeline))
stages = pipeline.getStages() if isinstance(pipeline, Pipeline) else pipeline.stages
for i, stage in enumerate(stages):
if (isinstance(stage, Pipeline) or isinstance(stage, PipelineModel)):
stages[i] = PysparkPipelineWrapper.unwrap(stage)
if isinstance(stage, PysparkObjId._getCarrierClass()) and stage.getStopWords()[-1] == PysparkObjId._getPyObjId():
swords = stage.getStopWords()[:-1] # strip the id
# convert stop words to int
swords = [int(d) for d in swords]
# get the byte value of all ints
lst = [x.to_bytes(length=1, byteorder='big') for x in
swords] # convert from string integer list to bytes
# return the first byte and concatenates all the others
dmp = lst[0]
for byte_counter in range(1, len(lst)):
dmp = dmp + lst[byte_counter]
py_obj = dill.loads(dmp)
stages[i] = py_obj
if isinstance(pipeline, Pipeline):
pipeline.setStages(stages)
else:
pipeline.stages = stages
return pipeline
class PysparkReaderWriter(object):
"""
A mixin class so custom pyspark Estimators, Transformers and Models may
support saving and loading directly or be saved within a Pipline or PipelineModel.
"""
def __init__(self):
super(PysparkReaderWriter, self).__init__()
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
#classmethod
def read(cls):
"""Returns an MLReader instance for our clarrier class."""
return JavaMLReader(PysparkObjId._getCarrierClass())
#classmethod
def load(cls, path):
"""Reads an ML instance from the input path, a shortcut of `read().load(path)`."""
swr_java_obj = cls.read().load(path)
return cls._from_java(swr_java_obj)
#classmethod
def _from_java(cls, java_obj):
"""
Get the dumby the stopwords that are the characters of the dills dump plus our guid
and convert, via dill, back to our python instance.
"""
swords = java_obj.getStopWords()[:-1] # strip the id
lst = [x.to_bytes(length=1, byteorder='big') for x in swords] # convert from string integer list to bytes
dmp = lst[0]
for i in range(1, len(lst)):
dmp = dmp + lst[i]
py_obj = dill.loads(dmp)
return py_obj
def _to_java(self):
"""
Convert this instance to a dill dump, then to a list of strings with the unicode integer values of each character.
Use this list as a set of dumby stopwords and store in a StopWordsRemover instance
:return: Java object equivalent to this instance.
"""
dmp = dill.dumps(self)
pylist = [str(int(d)) for d in dmp] # convert bytes to string integer list
pylist.append(PysparkObjId._getPyObjId()) # add our id so PysparkPipelineWrapper can id us.
sc = SparkContext._active_spark_context
java_class = sc._gateway.jvm.java.lang.String
java_array = sc._gateway.new_array(java_class, len(pylist))
for i in range(len(pylist)):
java_array[i] = pylist[i]
_java_obj = JavaParams._new_java_obj(PysparkObjId._getCarrierClass(javaName=True), self.uid)
_java_obj.setStopWords(java_array)
return _java_obj
class HasFake(Params):
def __init__(self):
super(HasFake, self).__init__()
self.fake = Param(self, "fake", "fake param")
def getFake(self):
return self.getOrDefault(self.fake)
class CleanText(Transformer, HasInputCol, HasOutputCol, Identifiable, PysparkReaderWriter, MLReadable, MLWritable):
#keyword_only
def __init__(self, inputCol=None, outputCol=None):
super(CleanText, self).__init__()
kwargs = self._input_kwargs
self.setParams(**kwargs)
I wrote some base classes to make this easier. Basically I abstract all the complication of the code and initialisation into some base classes that expose a much simpler API to build custom ones. This includes taking care of the serialisation/deserialisation problem and saving and loading SparkML objects. Then you can use concentrate in the __init__ and transform/fit functions. You can find a full explanation with examples in here.

Resources