How could I create a docstring decorator in the presence of properties? - python-3.x

I have a collection of ever more specialized classes which correspond to collections of the same kind of data (temperature, density, etc) but for different drifts, for example, one subclass has dimensions (nx, ny) and a different suclass has dimensions (ncv), and I want to reflect that in the docstrings, for having a better documentation using Sphinx.
After reading many very useful threads here in Stack Overflow, I have arrived to this model:
import numpy as np
from functools import wraps
def class_decorator(cls):
import ipdb; ipdb.set_trace()
clsdict = {}
mro = cls.mro()
mro.reverse()
for tmp in mro[1:]: ##Ignore object class parent.
clsdict.update(tmp.__dict__)
for name, method in clsdict.items():
if hasattr(method, '__og_doc__'):
try:
method.__doc__ = method.__og_doc__.format(**clsdict)
except:
pass
else:
try:
method.__og_doc__ = method.__doc__
method.__doc__ = method.__doc__.format(**clsdict)
except:
pass
return cls
def mark_documentation(fn):
if not hasattr(fn, '__og_doc__'):
try:
fn.__og_doc__ = fn.__doc__
except:
pass
#wraps(fn)
def wrapped(*args, **kwargs):
return fn(*args, **kwargs)
return wrapped
def documented_property(fn):
if not hasattr(fn, '__og_doc__'):
try:
fn.__og_doc__ = fn.__doc__
except:
pass
#wraps(fn)
def wrapped(*args, **kwargs):
return fn(*args, **kwargs)
prp= property(wrapped)
prp.__og_doc__ = fn.__og_doc__
return prp
#class_decorator
class Base(object):
_GRID_DIM = 'nx, ny'
_TYPE = 'BaseData'
def __init__(self, name):
self.name = name
def shape(self):
""" This docstring contains the type '{_TYPE}' of class."""
print('Simple')
def operation(self, a, b, oper=np.sum, **kwargs):
""" Test for functions with args and kwargs in {_TYPE}"""
return oper([a,b])
#classmethod
def help(cls, var):
try:
print(get(cls, var).__doc__)
except:
print("No docstring yet.")
#class_decorator
class Advanced(Base):
_GRID_DIM = 'ncv'
_TYPE = 'AdvancedData'
def __init__(self,name):
super().__init__(name)
#property
#mark_documentation
# #documented_property
def arkansas(self):
"""({_GRID_DIM}, ns): Size of Arkansaw."""
return 'Yeah'
I am aiming to get the correctly formatted docstring when I call the help method or I use Sphinx, so that:
> adv = Advanced('ADV')
> adv.help("arkansas")
(ncv, ns): Size of Arkansaw.
> adv.help("operation")
Test for functions with args and kwargs in AdvancedData
I have managed to make it work so far, except for properties, because I assigned __og_doc__ to the function, but the property does not have that attribute. My last attempt at monkeypatching this, documented_property, fails because property is inmutable (as expected), and I cannot come up with any way to avoid this roadblock.
Is there any way around this problem?

Related

Handling staticmethods while working with metaclasses

In the code below, I am using a metaclass along with a decorator to decorate all the user defined methods.
It works for all instance methods, but in cases of staticmethods it fails due to the self argument, to avoid that I am using a try and except block, which solves the problem. But in one of my projects, it's not working out.
Is there a better way of decorating the output of a staticmethod via a function decorator enclosed in a metaclass ?
from functools import wraps
import types
def decorator_function(input_function):
#wraps(input_function)
def wrapper(self, *args, **kwargs):
if kwargs.get("test_parameter"):
kwargs["test_parameter"] = 999
try:
result = input_function(self, *args, **kwargs)
except:
result = input_function(*args, **kwargs)
return result
return wrapper
class DecoratorMetaClass(type):
def __new__(meta, name, bases, class_dict):
klass = super().__new__(meta, name, bases, class_dict)
for key in dir(klass):
value = getattr(klass, key)
if isinstance(value, types.FunctionType) and "__" not in key:
wrapped = decorator_function(value)
setattr(klass, key, wrapped)
return klass
class InterfaceClass(metaclass=DecoratorMetaClass):
def function(self, test_parameter=1):
print(f"function - Test Parameter= {test_parameter}")
#staticmethod
def static_function(test_parameter=1):
print(f"static_function - Test Parameter= {test_parameter}")
class UserClass(InterfaceClass, metaclass=DecoratorMetaClass):
def __init__(self):
pass
def function_2(self, test_parameter=1):
print(f"function_2 - Test Parameter= {test_parameter}")
instance = UserClass()
instance.function(test_parameter=2)
instance.function_2(test_parameter=2)
instance.static_function(test_parameter=2)
print(isinstance(instance, InterfaceClass))
PS: I am not using a class decorator because it causes the isinstance checks to fail.
Explanation
The major problem goes down to the methods parameters. You were almost there.
You have to make the decorators arguments compatible to your methods parameters;
You can change the signature of the function wrapper from wrapper(self, *args, **kwargs) to wrapper(*args, **kwargs). Then just assign result = input_function(*args, **kwargs). You don't need the try/except block for this decorator;
def decorator_function(input_function):
#wraps(input_function)
def wrapper(*args, **kwargs):
if kwargs.get("test_parameter"):
kwargs["test_parameter"] = 999
return input_function(*args, **kwargs)
return wrapper
Ideally you should add to the methods *args (variable arguments) and **kwargs (variable named arguments) to make them compatible with your decorator;
In this case I added *args before the test_parameter=1 to the static_function in InterfaceClass.
class InterfaceClass(metaclass=DecoratorMetaClass):
#staticmethod
def static_function(*args, test_parameter=1):
print(f"static_function - Test Parameter= {test_parameter}")
Runnable Code
from functools import wraps
import types
def decorator_function(input_function):
#wraps(input_function)
def wrapper(*args, **kwargs):
if kwargs.get("test_parameter"):
kwargs["test_parameter"] = 999
return input_function(*args, **kwargs)
return wrapper
class DecoratorMetaClass(type):
def __new__(meta, name, bases, class_dict):
klass = super().__new__(meta, name, bases, class_dict)
for key in dir(klass):
value = getattr(klass, key)
if isinstance(value, types.FunctionType) and "__" not in key:
wrapped = decorator_function(value)
setattr(klass, key, wrapped)
return klass
class InterfaceClass(metaclass=DecoratorMetaClass):
def function(self, test_parameter=1):
print(f"function - Test Parameter= {test_parameter}")
#staticmethod
def static_function(*args, test_parameter=1):
print(f"static_function - Test Parameter= {test_parameter}")
class UserClass(InterfaceClass, metaclass=DecoratorMetaClass):
def __init__(self):
pass
def function_2(self, test_parameter=1):
print(f"function_2 - Test Parameter= {test_parameter}")
instance = UserClass()
instance.function(test_parameter=2)
instance.function_2(test_parameter=2)
instance.static_function(test_parameter=2)
UserClass.static_function(test_parameter=3)
print(isinstance(instance, InterfaceClass))
Output
function - Test Parameter= 999
function_2 - Test Parameter= 999
static_function - Test Parameter= 999
static_function - Test Parameter= 999
True
Addressing OP's comment
Considering test_parameter is always a named parameter, write the decorator_function as the following:
def decorator_function(input_function):
#wraps(input_function)
def wrapper(*args, **kwargs):
if kwargs.get("test_parameter"):
kwargs["test_parameter"] = 999
try:
result = input_function(*args, **kwargs)
except TypeError:
result = input_function(**kwargs)
return result
return wrapper
This way you don't need to change the methods signature.
If you call the functions also with positional arguments, you will need to check the type of the first argument inserted into args. Things get complicated and error prone.

Python Class, Operator Overloading

Recently while getting my hands on with Python Class concepts, I came upon this observation and was not able to understand.
When I try and create instance out of the below class interactively(Python console), I also get the Finding __len__ line in output.
class MyClass(object):
counter = 0
data = 'Class Variable'
def __init__(self):
self.counter += 1
self.value = -1
def __str__(self):
return "Instance {} is the {} instance".format(self.__class__.__name__, self.counter)
def __getattr__(self, item):
print(f'Finding {item}')
return self.__dict__.get(item, f'Attr {item} not available, {self.__dict__}')
def __setattr__(self, key, value):
if key not in self.__dict__:
self.__dict__[key] = value
def __delattr__(self, item):
print(f'Deleting attr: {item}')
if item in self.__dict__:
del self.__dict__[item]
else:
print(f'Cannot find {item} in {self.__dict__}')
if __name__ == '__main__':
inst = MyClass()
print(inst.id)
But running it as a top level module, doesn't add this additional line in output.
Can someone help me understand, why Finding __len__ output would be displayed interactively.
Below is an interactive output,
import WS1
x = WS1.MyClass()
Finding __len__
x.name = 'Yathin'
Finding __len__

Inheritance in iterable implementation of python's multiprocessing.Queue

I found the default implementation of python's multiprocessing.Queue lacking, in that it's not iterable like any other collection. So I went about the effort of creating a 'subclass' of it, adding the feature in. As you can see from the code below, it's not a proper subclass, as multiprocess.Queue isn't a direct class itself, but a factory function, and the real underlying class is multiprocess.queues.Queue. I don't have the understanding nor effort to expend necessary to go about mimicking the factory function just so I can inherit from the class properly, so I simply had the new class create it's own instance from the factory and treat it as the superclass. Here is the code;
from multiprocessing import Queue, Value, Lock
import queue
class QueueClosed(Exception):
pass
class IterableQueue:
def __init__(self, maxsize=0):
self.closed = Value('b', False)
self.close_lock = Lock()
self.queue = Queue(maxsize)
def close(self):
with self.close_lock:
self.closed.value = True
self.queue.close()
def put(self, elem, block=True, timeout=None):
with self.close_lock:
if self.closed.value:
raise QueueClosed()
else:
self.queue.put(elem, block, timeout)
def put_nowait(self, elem):
self.put(elem, False)
def get(self, block=True):
if not block:
return self.queue.get_nowait()
elif self.closed.value:
try:
return self.queue.get_nowait()
except queue.Empty:
return None
else:
val = None
while not self.closed.value:
try:
val = self.queue.get_nowait()
break
except queue.Empty:
pass
return val
def get_nowait(self):
return self.queue.get_nowait()
def join_thread(self):
return self.queue.join_thread()
def __iter__(self):
return self
def __next__(self):
val = self.get()
if val == None:
raise StopIteration()
else:
return val
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
This allows me to instantiate an IterableQueue object just like a normal multiprocessing.Queue, put elements into it like normal, and then inside child consumers, simply loop over it like so;
from iterable_queue import IterableQueue
from multiprocessing import Process, cpu_count
import os
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
def consumer(queue):
print(f"[{os.getpid()}] Consuming")
for i in queue:
print(f"[{os.getpid()}] < {i}")
n = fib(i)
print(f"[{os.getpid()}] {i} > {n}")
print(f"[{os.getpid()}] Closing")
def producer():
print("Enqueueing")
with IterableQueue() as queue:
procs = [Process(target=consumer, args=(queue,)) for _ in range(cpu_count())]
[p.start() for p in procs]
[queue.put(i) for i in range(36)]
print("Finished")
if __name__ == "__main__":
producer()
and it works almost seamlessly; the consumers exit the loop once the queue has been closed, but only after exhausting all remaining elements. However, I was unsatisfied with the lack of inherited methods. In an attempt to mimic actual inheritance behavior, I tried adding the following meta function call to the class;
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
else:
return self.queue.__getattr__[name]
However, this fails when instances of the IterableQueue class are manipulated inside child multiprocessing.Process threads, as the class's __dict__ property is not preserved within them. I attempted to remedy this in a hacky manner by replacing the class's default __dict__ with a multiprocessing.Manager().dict(), like so;
def __init__(self, maxsize=0):
self.closed = Value('b', False)
self.close_lock = Lock()
self.queue = Queue(maxsize)
self.__dict__ = Manager().dict(self.__dict__)
However on doing so, I received an error stating RuntimeError: Synchronized objects should only be shared between processes through inheritance. So my question is, how should I go about inheriting from the Queue class properly such that the subclass has inherited access to all of it's properties? In addition, while the queue is empty but not closed, the consumers all sit in a busy loop instead of a true IO block, taking up valuable cpu resources. If you have any suggestions on concurrency and race condition issues I might run into with this code, or how I might solve the busy loop issue, I'd be willing to take suggestions therein as well.
Based on code provided by MisterMiyagi, I created this general purpose IterableQueue class which can accept arbitrary input, blocks properly, and does not hang on queue close;
from multiprocessing.queues import Queue
from multiprocessing import get_context
class QueueClosed(Exception):
pass
class IterableQueue(Queue):
def __init__(self, maxsize=0, *, ctx=None):
super().__init__(
maxsize=maxsize,
ctx=ctx if ctx is not None else get_context()
)
def close(self):
super().put((None, False))
super().close()
def __iter__(self):
return self
def __next__(self):
try:
return self.get()
except QueueClosed:
raise StopIteration
def get(self, *args, **kwargs):
result, is_open = super().get(*args, **kwargs)
if not is_open:
super().put((None, False))
raise QueueClosed
return result
def put(self, val, *args, **kwargs):
super().put((val, True), *args, **kwargs)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
The multiprocess.Queue wrapper only serves to use the default context.
def Queue(self, maxsize=0):
'''Returns a queue object'''
from .queues import Queue
return Queue(maxsize, ctx=self.get_context())
When inheriting, you can replicate this in the __init__ method. This allows you to inherit the entire Queue behaviour. You only need to add the iterator methods:
from multiprocessing.queues import Queue
from multiprocessing import get_context
class IterableQueue(Queue):
"""
``multiprocessing.Queue`` that can be iterated to ``get`` values
:param sentinel: signal that no more items will be received
"""
def __init__(self, maxsize=0, *, ctx=None, sentinel=None):
self.sentinel = sentinel
super().__init__(
maxsize=maxsize,
ctx=ctx if ctx is not None else get_context()
)
def close(self):
self.put(self.sentinel)
# wait until buffer is flushed...
while self._buffer:
time.sleep(0.01)
# before shutting down the sender
super().close()
def __iter__(self):
return self
def __next__(self):
result = self.get()
if result == self.sentinel:
# re-queue sentinel for other listeners
self.put(result)
raise StopIteration
return result
Note that the sentinel to indicate end-of-queue is compared by equality, because identity is not preserved across processes. The often-used queue.Queue sentinel object() does not work properly with this.

Why doesn't __get__ method of metaclass get called?

I got class Op:
class Pipeable(type):
def __get__(self, instance, owner):
def pipe_within(*args, **kwargs):
return self(*args, op=instance, **kwargs)
print('piping...')
return pipe_within
class Op(metaclass=Pipeable):
def __init__(self, op=None):
if op is not None:
print('piped!')
self.op = op
self.__dict__[type(self).__name__] = type(self)
I expect Op class itself to work as descriptor, because its metaclass has __get__ method, but the code
op = Op().Op()
doesn't invoke Op.__get__. Why?
It is hard to tell what you really want there. But a metaclass that would add a property to itself at every new class maybe works better for whatever you want.
As far as I can understand your code, older classes won't be populated with references to the newer ones, as you create new instances (that in turn, get the reference for others).
On a second though, dinamically creating properties inisde __new__ seems hacky - but you can just implement the metaclass __getattr__ and __dir__ methods for much less convoluted code:
The simple version works for classes, but not for their instances - because instances do not trigger the __getattr__ on the metaclass:
class Pipeable(type):
_classes = {}
def __new__(metacls, name, bases, namespace, **kwds):
cls = type.__new__(metacls, name, bases, namespace)
metacls._classes[name] = cls
return cls
def __getattr__(cls, attr):
classes = cls.__class__._classes
if attr not in classes:
raise AttributeError
def pipe_within(*args, **kwargs):
return cls(*args, op=classes[attr], **kwargs)
print('piping...')
return pipe_within
def __dir__(cls):
regular = super().__dir__()
return sorted(regular + list(cls.__class__._classes.keys()))
class Op(metaclass=Pipeable):
def __init__(self, op=None):
if op is not None:
print('piped!')
self.op = op
Op.Op()
(Note as well, that over time I picked this parameter naming convention to use on metaclasses - as most their methods take the class created with them in place of what is the "self" in ordinary classes, I find this naming easier to follow. It is not mandatory, not necessarily "correct", though)
But then, we can make it work for instances by creating the __dir__ and __getattr__ directly on the created classes as well. The catch with that is that the class you are creating already have a __getattr__ or custom __dir__, even in their super-classes, those have to be wrapped. And then, we don't want to re-wrap our own __dir__ and __getattr__, so some extra-care:
class Pipeable(type):
_classes = {}
def __new__(metacls, name, bases, namespace, **kwds):
cls = type.__new__(metacls, name, bases, namespace)
metacls._classes[name] = cls
original__getattr__ = getattr(cls, "__getattr__", None)
if hasattr(original__getattr__, "_metapipping"):
# Do not wrap our own (metaclass) implementation of __getattr__
original__getattr__ = None
original__dir__ = getattr(cls, "__dir__") # Exists in "object", so it is always found.
# these two functions have to be nested so they can get the
# values for the originals "__getattr__" and "__dir__" from
# the closure. These values could be set on the class created, alternatively.
def __getattr__(self, attr):
if original__getattr__:
# If it is desired that normal attribute lookup have
# less precedence than these injected operators
# move this "if" block down.
try:
value = original__getattr__(self, attr)
except AttributeError:
pass
else:
return value
classes = self.__class__.__class__._classes
if attr not in classes:
raise AttributeError
def pipe_within(*args, **kwargs):
return cls(*args, op=classes[attr], **kwargs)
print('piping...')
return pipe_within
__getattr__._pipping = True
def __dir__(self):
regular = original__dir__(self)
return sorted(regular + list(self.__class__.__class__._classes.keys()))
__dir__.pipping = True
if not original__getattr__ or not hasattr(original__getattr__, "_pipping"):
cls.__getattr__ = __getattr__
if not hasattr(original__dir__, "_pipping"):
cls.__dir__ = __dir__
return cls
def __getattr__(cls, attr):
classes = cls.__class__._classes
if attr not in classes:
raise AttributeError
def pipe_within(*args, **kwargs):
return cls(*args, op=classes[attr], **kwargs)
print('piping...')
return pipe_within
__getattr__._metapipping = True
def __dir__(cls):
regular = super().__dir__()
return sorted(regular + list(cls.__class__._classes.keys()))
class Op(metaclass=Pipeable):
def __init__(self, op=None):
if op is not None:
print('piped!')
Op().Op()
So, this ended up being lengthy - but it "does the right thing", by ensuring all classes and instances in the hierarchy can see each other, regardless of creation order.
Also, what make up for the complexity is correctly wrapping other possible customizations of __getattr__ and __dir__ in the class hierarchy - if you don't get any customization of those, this can be an order of magnitude simpler:
class Pipeable(type):
_classes = {}
def __new__(metacls, name, bases, namespace, **kwds):
cls = type.__new__(metacls, name, bases, namespace)
metacls._classes[name] = cls
def __getattr__(self, attr):
classes = self.__class__.__class__._classes
if attr not in classes:
raise AttributeError
def pipe_within(*args, **kwargs):
return cls(*args, op=classes[attr], **kwargs)
print('piping...')
return pipe_within
def __dir__(self):
regular = original__dir__(self)
return sorted(regular + list(self.__class__.__class__._classes.keys()))
cls.__getattr__ = __getattr__
cls.__dir__ = __dir__
return cls
def __getattr__(cls, attr):
classes = cls.__class__._classes
if attr not in classes:
raise AttributeError
def pipe_within(*args, **kwargs):
return cls(*args, op=classes[attr], **kwargs)
print('piping...')
return pipe_within
def __dir__(cls):
regular = super().__dir__()
return sorted(regular + list(cls.__class__._classes.keys()))
To get into work, descriptor must be class attribute, not that of instance.
This code does what was desired.
class Pipeable(type):
_instances = {}
def __new__(cls, name, bases, namespace, **kwds):
namespace.update(cls._instances)
instance = type.__new__(cls, name, bases, namespace)
cls._instances[name] = instance
for inst in cls._instances:
setattr(inst, name, instance)
return instance
def __get__(self, instance, owner):
def pipe_within(*args, **kwargs):
return self(*args, op=instance, **kwargs)
print('piping...')
return pipe_within
class Op(metaclass=Pipeable):
def __init__(self, op=None):
if op is not None:
print('piped!')
self.op = op
Op().Op()

Apply decorator to all method of sub classes for timeit

I have a method decorator looking like
def debug_run(fn):
from functools import wraps
#wraps(fn)
def wrapper(self, *args, **kw):
# log some stuff
# timeit fn
res = fn(self, *args, **kw)
return wrapper
Right now I used to use it apply on each method that I want to debug. Now i'm trying to apply to all class method using a class decorator looking like.
Rather doing
class A():
#debug_run
def f(self):
pass
I do
#decallmethods(debug_run)
class A():
def f(self):
pass
def decallmethods(decorator):
def dectheclass(cls):
for name, m in inspect.getmembers(cls, inspect.ismethod):
if name in getattr(cls, 'METHODS_TO_INSPECT', []):
setattr(cls, name, decorator(m))
return cls
return dectheclass
Trying to apply to decorator to the base class, not working as expected. no log to the console. Now i wonder if this approach is the good or I should used something else (apply the debug decorator to selected method from base class to all sub classes).
[EDIT]
Finally found why no logs were printed
Why is there a difference between inspect.ismethod and inspect.isfunction from python 2 -> 3?
Here a complete example reflecting my code
import inspect
import time
import logging as logger
from functools import wraps
logger.basicConfig(format='LOGGER - %(asctime)s %(message)s', level=logger.DEBUG)
def debug_run(fn):
#wraps(fn)
def wrapper(self, *args, **kw):
logger.debug(
"call method %s of instance %s with %r and %s "
% (fn.__name__, self, args, kw))
time1 = time.time()
res = fn(self, *args, **kw)
time2 = time.time()
logger.debug(
"%s function %0.3f ms" % (fn, (time2-time1)*1000.0))
return res
return wrapper
def decallmethods(decorator):
def dectheclass(cls):
for name, m in inspect.getmembers(
cls, predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x)):
methods_to_inspect = getattr(cls, 'METHODS_TO_INSPECT', [])
if name in methods_to_inspect:
setattr(cls, name, decorator(m))
return cls
return dectheclass
class B(object):
METHODS_TO_INSPECT = ["bfoo1", "bfoo2", "foo"]
def __str__(self):
return "%s:%s" % (repr(self), id(self))
def bfoo1(self):
pass
def bfoo2(self):
pass
def foo(self):
pass
def run(self):
print("print - Base run doing nothing")
class C(object):
pass
#decallmethods(debug_run)
class A(B, C):
METHODS_TO_INSPECT = ["bfoo1", "bfoo2", "foo", "run"]
def foo(self):
print("print - A foo")
def run(self):
self.bfoo1()
self.bfoo2()
self.foo()
a = A()
b = B()
a.run()
b.run()
In this case applying decallmethods to B, will not affect the A so i must to apply to both A and B thus to all sub classes of B.
It is possible to have such mechanism that permit to apply decallmethods to all sub classes methods ?
look at this:
How can I decorate all functions of a class without typing it over and over for each method added? Python
delnan has a good answer,
only add this rule to his answer
if name in getattr(cls, 'METHODS_TO_INSPECT', []):

Resources