I am trying to understand how to test Context managert with pytests.
I created some Class and need to count how much times was called static method do_dome_stuff
class Iterator():
def __init__(self):
pass
#staticmethod
def do_some_stuff():
pass
def __enter__(self):
return [i for i in range(10)]
def __exit__(self, *args):
return True
iterator = Iterator()
def f(iterator):
with iterator as i:
for _ in i:
iterator.do_some_stuff()
I have created py.test file and need to check if function was called 10 times. But my solution isn't working:
#pytest.fixture
def iterator():
return MagicMock(spec=Iterator)
def test_f(iterator):
f(iterator)
assert (iterator.do_some_stuff.call_count == 10)
Thanks in advance
The reason your code doesn't work, is that MagicMock(spec=Iterator) replaces the __enter__ method of your Iterator class by a MagicMock object, see the MagicMock documentation. This means that in your test, the value of i in function f is a MagicMock object instead of list(range(10)), so the code inside the for loop is never executed.
To make it work, you will probably only want to mock the do_some_stuff method:
#pytest.fixture
def iterator():
it = Iterator()
it.do_some_stuff = Mock()
return it
def test_f(iterator):
f(iterator)
assert (iterator.do_some_stuff.call_count == 10)
Related
In brief, I have a DataFormatter class that has two possible states: train or infer, that should act similarly to many of the sklearn libraries that have fit and transform functions: if mode is train I want to store in self.metadata a list of the function calls and args that were made, so that they can simply be reapplied verbatim and in order at infer time.
So minimally, I have:
import inspect
class DataFormatter:
def __init__(self, mode, data=None):
self.data = data
self.metadata = []
# The decorator function: broken, but something like--
def meta(self, f, *args):
def wrapper(*args):
return f(*args)
if self.mode == 'train':
print('caching metadata')
meta = {
f.__name__: {
param: arg for param, arg in zip(
inspect.getfillargspec(f).args, args)}}
self.metadata.append(meta)
return wrapper
#meta
def drop(self, cols):
self.data = self.data.drop(cols)
Then if I use:
formatter = DataFormatter('train', my_data)
formatter.drop(['col1', 'col5'])
print(formatter.metadata)
...I would like to get:
[{'drop': {'cols': ['col1', 'col5']}}]
I have tried various permutations and placements of self, and pulling the decorator func outside the class altogether, but no luck so far.
#kindall says "You can't get self at decoration time because the decorator is applied at function definition time. No self exists yet; in fact, the class doesn't exist yet." (Possible to create a #synchronized decorator that's aware of a method's object?), so not even sure if this is possible...
What will "see" a self is the decorated function - and it is represented by the function you call "wrapper":
import inspect
# The decorator function: should be out of the class body
def meta( f): ,
def wrapper(self, *args):
# call the decorated function:
# (but you could run other code, including inspecting
# and modifying arguments, here as well)
result = f(self, *args)
# now, your original method had run, and you have
# access to self:
if self.mode == 'train':
print('caching metadata')
meta = {
f.__name__: {
param: arg for param, arg in zip(
inspect.getfillargspec(f).args, args)}}
self.metadata.append(meta)
return result
return wrapper
class DataFormatter:
def __init__(self, mode, data=None):
self.data = data
self.metadata = []
#meta
def drop(self, cols):
self.data = self.data.drop(cols)
That will work.
In the code below, I am using a metaclass along with a decorator to decorate all the user defined methods.
It works for all instance methods, but in cases of staticmethods it fails due to the self argument, to avoid that I am using a try and except block, which solves the problem. But in one of my projects, it's not working out.
Is there a better way of decorating the output of a staticmethod via a function decorator enclosed in a metaclass ?
from functools import wraps
import types
def decorator_function(input_function):
#wraps(input_function)
def wrapper(self, *args, **kwargs):
if kwargs.get("test_parameter"):
kwargs["test_parameter"] = 999
try:
result = input_function(self, *args, **kwargs)
except:
result = input_function(*args, **kwargs)
return result
return wrapper
class DecoratorMetaClass(type):
def __new__(meta, name, bases, class_dict):
klass = super().__new__(meta, name, bases, class_dict)
for key in dir(klass):
value = getattr(klass, key)
if isinstance(value, types.FunctionType) and "__" not in key:
wrapped = decorator_function(value)
setattr(klass, key, wrapped)
return klass
class InterfaceClass(metaclass=DecoratorMetaClass):
def function(self, test_parameter=1):
print(f"function - Test Parameter= {test_parameter}")
#staticmethod
def static_function(test_parameter=1):
print(f"static_function - Test Parameter= {test_parameter}")
class UserClass(InterfaceClass, metaclass=DecoratorMetaClass):
def __init__(self):
pass
def function_2(self, test_parameter=1):
print(f"function_2 - Test Parameter= {test_parameter}")
instance = UserClass()
instance.function(test_parameter=2)
instance.function_2(test_parameter=2)
instance.static_function(test_parameter=2)
print(isinstance(instance, InterfaceClass))
PS: I am not using a class decorator because it causes the isinstance checks to fail.
Explanation
The major problem goes down to the methods parameters. You were almost there.
You have to make the decorators arguments compatible to your methods parameters;
You can change the signature of the function wrapper from wrapper(self, *args, **kwargs) to wrapper(*args, **kwargs). Then just assign result = input_function(*args, **kwargs). You don't need the try/except block for this decorator;
def decorator_function(input_function):
#wraps(input_function)
def wrapper(*args, **kwargs):
if kwargs.get("test_parameter"):
kwargs["test_parameter"] = 999
return input_function(*args, **kwargs)
return wrapper
Ideally you should add to the methods *args (variable arguments) and **kwargs (variable named arguments) to make them compatible with your decorator;
In this case I added *args before the test_parameter=1 to the static_function in InterfaceClass.
class InterfaceClass(metaclass=DecoratorMetaClass):
#staticmethod
def static_function(*args, test_parameter=1):
print(f"static_function - Test Parameter= {test_parameter}")
Runnable Code
from functools import wraps
import types
def decorator_function(input_function):
#wraps(input_function)
def wrapper(*args, **kwargs):
if kwargs.get("test_parameter"):
kwargs["test_parameter"] = 999
return input_function(*args, **kwargs)
return wrapper
class DecoratorMetaClass(type):
def __new__(meta, name, bases, class_dict):
klass = super().__new__(meta, name, bases, class_dict)
for key in dir(klass):
value = getattr(klass, key)
if isinstance(value, types.FunctionType) and "__" not in key:
wrapped = decorator_function(value)
setattr(klass, key, wrapped)
return klass
class InterfaceClass(metaclass=DecoratorMetaClass):
def function(self, test_parameter=1):
print(f"function - Test Parameter= {test_parameter}")
#staticmethod
def static_function(*args, test_parameter=1):
print(f"static_function - Test Parameter= {test_parameter}")
class UserClass(InterfaceClass, metaclass=DecoratorMetaClass):
def __init__(self):
pass
def function_2(self, test_parameter=1):
print(f"function_2 - Test Parameter= {test_parameter}")
instance = UserClass()
instance.function(test_parameter=2)
instance.function_2(test_parameter=2)
instance.static_function(test_parameter=2)
UserClass.static_function(test_parameter=3)
print(isinstance(instance, InterfaceClass))
Output
function - Test Parameter= 999
function_2 - Test Parameter= 999
static_function - Test Parameter= 999
static_function - Test Parameter= 999
True
Addressing OP's comment
Considering test_parameter is always a named parameter, write the decorator_function as the following:
def decorator_function(input_function):
#wraps(input_function)
def wrapper(*args, **kwargs):
if kwargs.get("test_parameter"):
kwargs["test_parameter"] = 999
try:
result = input_function(*args, **kwargs)
except TypeError:
result = input_function(**kwargs)
return result
return wrapper
This way you don't need to change the methods signature.
If you call the functions also with positional arguments, you will need to check the type of the first argument inserted into args. Things get complicated and error prone.
I have simple iteration object that use generator. But __iter__() method of Obj() can not get iterator from Generator().__init__()
Live demo is here.
#!/usr/bin/env python3
class Obj():
def __init__(self, word):
self.word = word
def __iter__(self):
return Generator(self.word)
class Generator():
def __init__(self, word):
for l in word:
yield l
obj = Obj('qwe')
it = iter(obj)
print(it.__next__())
print(it.__next__())
print(it.__next__())
I expect console output to be 'qwe'.
__init__ cannot return or yield a non-None value. With what you have, Generator is not a generator object, but Obj is. Obj.__iter__ is the method that should be yielding values, and you can probably get rid of the Generator class altogether, unless it is used for other things that you haven't shown in your posted snippet.
class Obj():
def __init__(self, word):
self.word = word
def __iter__(self):
for l in self.word:yield l
obj = Obj('qwe')
print(obj.__next__())
print(obj.__next__())
print(obj.__next__())
I found the default implementation of python's multiprocessing.Queue lacking, in that it's not iterable like any other collection. So I went about the effort of creating a 'subclass' of it, adding the feature in. As you can see from the code below, it's not a proper subclass, as multiprocess.Queue isn't a direct class itself, but a factory function, and the real underlying class is multiprocess.queues.Queue. I don't have the understanding nor effort to expend necessary to go about mimicking the factory function just so I can inherit from the class properly, so I simply had the new class create it's own instance from the factory and treat it as the superclass. Here is the code;
from multiprocessing import Queue, Value, Lock
import queue
class QueueClosed(Exception):
pass
class IterableQueue:
def __init__(self, maxsize=0):
self.closed = Value('b', False)
self.close_lock = Lock()
self.queue = Queue(maxsize)
def close(self):
with self.close_lock:
self.closed.value = True
self.queue.close()
def put(self, elem, block=True, timeout=None):
with self.close_lock:
if self.closed.value:
raise QueueClosed()
else:
self.queue.put(elem, block, timeout)
def put_nowait(self, elem):
self.put(elem, False)
def get(self, block=True):
if not block:
return self.queue.get_nowait()
elif self.closed.value:
try:
return self.queue.get_nowait()
except queue.Empty:
return None
else:
val = None
while not self.closed.value:
try:
val = self.queue.get_nowait()
break
except queue.Empty:
pass
return val
def get_nowait(self):
return self.queue.get_nowait()
def join_thread(self):
return self.queue.join_thread()
def __iter__(self):
return self
def __next__(self):
val = self.get()
if val == None:
raise StopIteration()
else:
return val
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
This allows me to instantiate an IterableQueue object just like a normal multiprocessing.Queue, put elements into it like normal, and then inside child consumers, simply loop over it like so;
from iterable_queue import IterableQueue
from multiprocessing import Process, cpu_count
import os
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
def consumer(queue):
print(f"[{os.getpid()}] Consuming")
for i in queue:
print(f"[{os.getpid()}] < {i}")
n = fib(i)
print(f"[{os.getpid()}] {i} > {n}")
print(f"[{os.getpid()}] Closing")
def producer():
print("Enqueueing")
with IterableQueue() as queue:
procs = [Process(target=consumer, args=(queue,)) for _ in range(cpu_count())]
[p.start() for p in procs]
[queue.put(i) for i in range(36)]
print("Finished")
if __name__ == "__main__":
producer()
and it works almost seamlessly; the consumers exit the loop once the queue has been closed, but only after exhausting all remaining elements. However, I was unsatisfied with the lack of inherited methods. In an attempt to mimic actual inheritance behavior, I tried adding the following meta function call to the class;
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
else:
return self.queue.__getattr__[name]
However, this fails when instances of the IterableQueue class are manipulated inside child multiprocessing.Process threads, as the class's __dict__ property is not preserved within them. I attempted to remedy this in a hacky manner by replacing the class's default __dict__ with a multiprocessing.Manager().dict(), like so;
def __init__(self, maxsize=0):
self.closed = Value('b', False)
self.close_lock = Lock()
self.queue = Queue(maxsize)
self.__dict__ = Manager().dict(self.__dict__)
However on doing so, I received an error stating RuntimeError: Synchronized objects should only be shared between processes through inheritance. So my question is, how should I go about inheriting from the Queue class properly such that the subclass has inherited access to all of it's properties? In addition, while the queue is empty but not closed, the consumers all sit in a busy loop instead of a true IO block, taking up valuable cpu resources. If you have any suggestions on concurrency and race condition issues I might run into with this code, or how I might solve the busy loop issue, I'd be willing to take suggestions therein as well.
Based on code provided by MisterMiyagi, I created this general purpose IterableQueue class which can accept arbitrary input, blocks properly, and does not hang on queue close;
from multiprocessing.queues import Queue
from multiprocessing import get_context
class QueueClosed(Exception):
pass
class IterableQueue(Queue):
def __init__(self, maxsize=0, *, ctx=None):
super().__init__(
maxsize=maxsize,
ctx=ctx if ctx is not None else get_context()
)
def close(self):
super().put((None, False))
super().close()
def __iter__(self):
return self
def __next__(self):
try:
return self.get()
except QueueClosed:
raise StopIteration
def get(self, *args, **kwargs):
result, is_open = super().get(*args, **kwargs)
if not is_open:
super().put((None, False))
raise QueueClosed
return result
def put(self, val, *args, **kwargs):
super().put((val, True), *args, **kwargs)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
The multiprocess.Queue wrapper only serves to use the default context.
def Queue(self, maxsize=0):
'''Returns a queue object'''
from .queues import Queue
return Queue(maxsize, ctx=self.get_context())
When inheriting, you can replicate this in the __init__ method. This allows you to inherit the entire Queue behaviour. You only need to add the iterator methods:
from multiprocessing.queues import Queue
from multiprocessing import get_context
class IterableQueue(Queue):
"""
``multiprocessing.Queue`` that can be iterated to ``get`` values
:param sentinel: signal that no more items will be received
"""
def __init__(self, maxsize=0, *, ctx=None, sentinel=None):
self.sentinel = sentinel
super().__init__(
maxsize=maxsize,
ctx=ctx if ctx is not None else get_context()
)
def close(self):
self.put(self.sentinel)
# wait until buffer is flushed...
while self._buffer:
time.sleep(0.01)
# before shutting down the sender
super().close()
def __iter__(self):
return self
def __next__(self):
result = self.get()
if result == self.sentinel:
# re-queue sentinel for other listeners
self.put(result)
raise StopIteration
return result
Note that the sentinel to indicate end-of-queue is compared by equality, because identity is not preserved across processes. The often-used queue.Queue sentinel object() does not work properly with this.
I have a method decorator looking like
def debug_run(fn):
from functools import wraps
#wraps(fn)
def wrapper(self, *args, **kw):
# log some stuff
# timeit fn
res = fn(self, *args, **kw)
return wrapper
Right now I used to use it apply on each method that I want to debug. Now i'm trying to apply to all class method using a class decorator looking like.
Rather doing
class A():
#debug_run
def f(self):
pass
I do
#decallmethods(debug_run)
class A():
def f(self):
pass
def decallmethods(decorator):
def dectheclass(cls):
for name, m in inspect.getmembers(cls, inspect.ismethod):
if name in getattr(cls, 'METHODS_TO_INSPECT', []):
setattr(cls, name, decorator(m))
return cls
return dectheclass
Trying to apply to decorator to the base class, not working as expected. no log to the console. Now i wonder if this approach is the good or I should used something else (apply the debug decorator to selected method from base class to all sub classes).
[EDIT]
Finally found why no logs were printed
Why is there a difference between inspect.ismethod and inspect.isfunction from python 2 -> 3?
Here a complete example reflecting my code
import inspect
import time
import logging as logger
from functools import wraps
logger.basicConfig(format='LOGGER - %(asctime)s %(message)s', level=logger.DEBUG)
def debug_run(fn):
#wraps(fn)
def wrapper(self, *args, **kw):
logger.debug(
"call method %s of instance %s with %r and %s "
% (fn.__name__, self, args, kw))
time1 = time.time()
res = fn(self, *args, **kw)
time2 = time.time()
logger.debug(
"%s function %0.3f ms" % (fn, (time2-time1)*1000.0))
return res
return wrapper
def decallmethods(decorator):
def dectheclass(cls):
for name, m in inspect.getmembers(
cls, predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x)):
methods_to_inspect = getattr(cls, 'METHODS_TO_INSPECT', [])
if name in methods_to_inspect:
setattr(cls, name, decorator(m))
return cls
return dectheclass
class B(object):
METHODS_TO_INSPECT = ["bfoo1", "bfoo2", "foo"]
def __str__(self):
return "%s:%s" % (repr(self), id(self))
def bfoo1(self):
pass
def bfoo2(self):
pass
def foo(self):
pass
def run(self):
print("print - Base run doing nothing")
class C(object):
pass
#decallmethods(debug_run)
class A(B, C):
METHODS_TO_INSPECT = ["bfoo1", "bfoo2", "foo", "run"]
def foo(self):
print("print - A foo")
def run(self):
self.bfoo1()
self.bfoo2()
self.foo()
a = A()
b = B()
a.run()
b.run()
In this case applying decallmethods to B, will not affect the A so i must to apply to both A and B thus to all sub classes of B.
It is possible to have such mechanism that permit to apply decallmethods to all sub classes methods ?
look at this:
How can I decorate all functions of a class without typing it over and over for each method added? Python
delnan has a good answer,
only add this rule to his answer
if name in getattr(cls, 'METHODS_TO_INSPECT', []):