Python: Pseudo Enums - Classes as enums - how to avoid cyclic import - python-3.x

I want to create a pseudo enums in my python project.
The values are actually classes.
# file my_enums.py
import MyClass1
import MyClass2
import MyClass3
class MyEnum:
MY_CLASS_1 = MyClass1
MY_CLASS_2 = MyClass2
MY_CLASS_3 = MyClass3
# file my_class1.py
import MyEnum
class MyClass1:
def foo(self, x):
print(isinstance(x, MyEnum.MY_CLASS_2))
Doing this will result in cyclic import error.
I want to be able to use the MyEnum values in isinstance function and to import the enum to modules that define some of those classes.
Is there a way to do so?
Solution:
# file my_enums.py
import MyClass1
import MyClass2
import MyClass3
class MyEnum:
MY_CLASS_1 = None
MY_CLASS_2 = None
MY_CLASS_3 = None
#classmethod
def define(cls):
cls.MY_CLASS_1 = MyClass1
MyEnum.define()

The thing to remember is that when a module is loaded, it is executed -- but only top level statements and the immediate interior of top-level classes; the bodies of functions and methods are not evaluated until they are actually called.
# example module
CONSTANT = 7 # top-level, executed
def a_func(value=CONSTANT): # top-level, executed
return value + 9 # body, not executed
class a_class(metaclass=SomeMeta): # top-level, executed (and error as SomeMeta
# has not been defined nor imported)
CLS_CONSTANT = 3 # top-level class body, executed
def a_method(self): # executed
return self.CLS_CONSTANT + FUTURE_CONSTANT # method body, not executed
FUTURE_CONSTANT = 11
So in your example you need to make sure and not use MyEnum anywhere in my_class1.py that will be executed during import, and put the import of my_enums.py at the very end -- then when my_enums.py is executed during its import it will be able to import my_class1 which will, at that point, have the classes defined.

Related

Require overriding method to call super()

I'd like to force certain methods in child classes to call to invoke the method they're overriding.
#abstractmethod can require certain methods be implemented; I'd like a behavior similar to this (i.e., if the overriding method doesn't call super(), don't execute and complain to the user).
Example:
class Foo:
#must_call_super
def i_do_things(self):
print('called')
class Good(Foo):
def i_do_things(self):
# super().i_do_things() is called; will run.
super().i_do_things()
print('called as well')
class Bad(Foo):
def i_do_things(self):
# should complain that super().i_do_things isn't called here
print('called as well')
# should work fine
good = Good()
# should error
bad = Bad()
Thanks for sending me down the rabbit hole.
Below is my solution to this problem. It uses metaclass, ast, and some hacking to detect whether a child class calls super().some_func() in its version of some_func method.
Core classes
These should be controlled by the developer.
import inspect
import ast
import textwrap
class Analyzer(ast.NodeVisitor):
def __init__(self, ast_sig: str):
self.func_exists = False
self.sig = ast_sig
def visit_Call(self, node):
"""Traverse the ast tree. Once a node's signature matches the given
method call's signature, we consider that the method call exists.
"""
# print(ast.dump(node))
if ast.dump(node) == self.sig:
self.func_exists |= True
self.generic_visit(node)
class FooMeta(type):
# _ast_sig_super_methods stores the ast signature of any method that
# a `super().method()` call must be made in its overridden version in an
# inherited child. One can add more method and its associted ast sig in
# this dict.
_ast_sig_super_methods = {
'i_do_things': "Call(func=Attribute(value=Call(func=Name(id='super', ctx=Load()), args=[], keywords=[]), attr='i_do_things', ctx=Load()), args=[], keywords=[])",
}
def __new__(cls, name, bases, dct):
# cls = FooMeta
# name = current class name
# bases = any parents of the current class
# dct = namespace dict of the current class
for method, ast_sig in FooMeta._ast_sig_super_methods.items():
if name != 'Foo' and method in dct: # desired method in subclass
source = inspect.getsource(dct[method]) # get source code
formatted_source = textwrap.dedent(source) # correct indentation
tree = ast.parse(formatted_source) # obtain ast tree
analyzer = Analyzer(ast_sig)
analyzer.visit(tree)
if not analyzer.func_exists:
raise RuntimeError(f'super().{method} is not called in {name}.{method}!')
return super().__new__(cls, name, bases, dct)
class Foo(metaclass=FooMeta):
def i_do_things(self):
print('called')
Usage and Effect
This is done by other people, from whom we want to dictate that super().i_do_things must be called in the overridden version in their inherited classes.
Good
class Good(Foo):
def i_do_things(self):
# super().i_do_things() is called; will run.
super().i_do_things()
print('called as well')
good = Good()
good.i_do_things()
# output:
# called
# called as well
Bad
class Bad(Foo):
def i_do_things(self):
# should complain that super().i_do_things isn't called here
print('called as well')
# Error output:
# RuntimeError: super().i_do_things is not called in Bad.i_do_things!
Secretly Bad
class Good(Foo):
def i_do_things(self):
# super().i_do_things() is called; will run.
super().i_do_things()
print('called as well')
class SecretlyBad(Good):
def i_do_things(self):
# also shall complain super().i_do_things isn't called
print('called as well')
# Error output:
# RuntimeError: super().i_do_things is not called in SecretlyBad.i_do_things!
Note
Since FooMeta is executed when the inherited classes are defined, not when they are instantiated, error is thrown before Bad().i_do_things() or SecretlyBad().i_do_things() is called. This is not the same as the requirement by the OP, but it does achieve the same end goal.
To obtain the ast signature of super().i_do_things(), we can uncomment the print statement in Analyzer, analyze the source code of Good.i_do_things, and inspect from there.

Conflict between mix-ins for abstract dataclasses

1. A problem with dataclass mix-ins, solved
To make abstract dataclasses that type-check under mypy, I've been breaking them into two classes, one that contains the abstract methods and one that contains the data members, as explained in this answer. The abstract class inherits from the dataclass. This runs into a problem, though, when another abstract-class-and-dataclass pair inherits from the first one: the "ancestor" dataclass's fields get wiped out by the "descendant". For example:
from dataclasses import dataclass
from abc import ABC, abstractmethod
#dataclass
class ADataclassMixin:
a_field: int = 1
class A(ADataclassMixin, ABC):
#abstractmethod
def method(self):
pass
#dataclass
#class BDataclassMixin(A): # works but fails mypy 0.931 type-check
class BDataclassMixin: # fails
b_field: int = 2
pass
class B(BDataclassMixin, A):
def method(self):
return self
o = B(a_field=5)
The last line fails, yielding this error message:
TypeError: BDataclassMixin.__init__() got an unexpected keyword argument 'a_field'
B's method-resolution order (B.__mro__) is (B, BDataclassMixin, A, ADataclassMixin, ABC, object), as expected. But a_field is not found.
A solution, shown in the commented-out line above, is to put the ancestor class explicitly in the descendant dataclass's declaration: class BDataclassMixin(A) instead of class BDataclassMixin. This fails type-checking, though, because a dataclass can only be a concrete class.
2. A problem with that solution, unsolved
The above solution breaks down if we add a third class, inheriting from B:
#dataclass
#class CDataclassMixin: # fails
class CDataclassMixin(A): # fails
#class CDataclassMixin(B, A): # works but fails type-check
c_field: int = 3
pass
class C(CDataclassMixin, B):
def method(self):
return "C's result"
pass
o = C(b_field=5)
Now, C has a_field and c_field but has lost b_field.
I have found that if I declare CDataclassMixin explicitly to inherit from B and A (in that order), b_field will be in the resulting class along with a_field_ and c_field`. However, explicitly stating the inheritance hierarchy in every mix-in defeats the purpose of mix-ins, which is to be able to code them independently of all the other mix-ins and to mix them easily and any way you like.
What is the correct way to make abstract dataclass mix-ins, so that classes that inherit from them include all the dataclass fields?
The correct solution is to abandon the DataclassMixin classes and simply make the abstract classes into dataclasses, like this:
#dataclass # type: ignore[misc]
class A(ABC):
a_field: int = 1
#abstractmethod
def method(self):
pass
#dataclass # type: ignore[misc]
class B(A):
b_field: int = 2
#dataclass
class C(B):
c_field: int = 3
def method(self):
return self
The reason for the failures is that, as explained in the documentation on dataclasses, the complete set of fields in a dataclass is determined when the dataclass is compiled, not when it is inherited from. The internal code that generates the dataclass's __init__ function can only examine the MRO of the dataclass as it is declared on its own, not when mixed in to another class.
It's necessary to add # type: ignore[misc] to each abstract dataclass's #dataclass line, not because the solution is wrong but because mypy is wrong. It is mypy, not Python, that requires dataclasses to be concrete. As explained by ilevkivskyi in mypy issue 5374, the problem is that mypy wants a dataclass to be a Type object and for every Type object to be capable of being instantiated. This is a known problem and awaits a resolution.
The behavior in the question and in the solution is exactly how dataclasses should behave. And, happily, abstract dataclasses that inherit this way (the ordinary way) can be mixed into other classes willy-nilly no differently than other mix-ins.
Putting the mixin as the last base class works without error:
#dataclass
class ADataclassMixin:
a_field: int = 1
class A(ABC, ADataclassMixin):
#abstractmethod
def method(self):
pass
#dataclass
class BDataclassMixin:
b_field: int = 2
class B(A, BDataclassMixin):
def method(self):
return self
o = B(a_field=5)
print((o.a_field, o.b_field)) # (5,2)

Retaining a variable created during module import in python

I am trying to populate a dictionary with functions along with the name of the function contained in another file of the form:
{'fn_a': function fn_a at 0x000002239BDCB510, 'fn_b': function fn_b at 0x000002239BDCB268}.
I'm currently attempting to do it with a decorator so when the file containing the functions (definitions.py) is imported the dictionary is populated as follows. The problem is that dictionary is cleared once the import is complete.
definitions.py:
from main import formatter
#formatter
def fn_a(arg):
return arg
#formatter
def fn_b(arg):
return arg
main.py:
available_functions = {}
def formatter(func):
# work out function name and write to func_name
func_name=str(func).split()[1]
available_functions[func_name] = func
return func
import definitions
How can I keep the dictionary populated with values after the module import is finished?
I was able to solve the problem using the FunctionType module to return the available functions from the imported module. It doesn't solve the problem within the conditions I specified above, but does work.
from types import FunctionType
available_functions = {}
def formatter(func):
# work out function name and write to func_name
#global available_functions
func_name=str(func).split()[1]
available_functions[func_name] = func
return func
import definitions
funcs=[getattr(definitions, a) for a in dir(definitions)
if isinstance(getattr(definitions, a), FunctionType)]
for i in funcs:
formatter(i)

How do you annotate the type of an abstract class with mypy?

I'm writing a library where I need a method that takes a (potentially) abstract type, and returns an instance of a concrete subtype of that type:
# script.py
from typing import Type
from abc import ABC, abstractmethod
class AbstractClass(ABC):
#abstractmethod
def abstract_method(self):
pass
T = TypeVar('T', bound=AbstractClass)
def f(c: Type[T]) -> T:
# find concrete implementation of c based on
# environment configuration
...
f(AbstractClass) # doesn't type check
Running mypy script.py yields:
error: Only concrete class can be given where "Type[AbstractClass]" is expected
I don't understand this error message and am having a hard time finding any documentation for it. Is there any way to annotate the function so that mypy will type check this?
As a side note, PyCharm's type checker, which is what I use the most, type checks f with no errors.
It does appear that mypy is a bit biased against using an abstract base class this way, though as you demonstrate there are valid use cases.
You can work around this by making your factory function a class method on your abstract class. If stylistically you'd like to have a top-level function as a factory, then you can create an alias to the class method.
from typing import TYPE_CHECKING
from abc import ABC, abstractmethod
class AbstractClass(ABC):
#abstractmethod
def abstract_method(self):
raise NotImplementedError
#classmethod
def make_concrete(cls) -> 'AbstractClass':
"""
find concrete implementation based on environment configuration
"""
return A()
class A(AbstractClass):
def abstract_method(self):
print("a")
# make alias
f = AbstractClass.make_concrete
x = f()
if TYPE_CHECKING:
reveal_type(x) # AbstractClass
Note that, without more work, mypy cannot know which concrete class is created by the factory function, it will only know that it is compatible with AbstractClass, as demonstrated by the output of reveal_type.
Alternately, if you're willing to give up the runtime checking provided by abc.ABC, you can get something even closer to your original design:
from typing import TYPE_CHECKING
from abc import abstractmethod
class AbstractClass: # do NOT inherit from abc.ABC
#abstractmethod
def abstract_method(self):
raise NotImplementedError
class A(AbstractClass):
def abstract_method(self):
print("a")
class Bad(AbstractClass):
pass
def f() -> AbstractClass:
"""
find concrete implementation based on environment configuration
"""
pass
b = Bad() # mypy displays an error here: Cannot instantiate abstract class 'Bad' with abstract attribute 'abstract_method'
x = f()
if TYPE_CHECKING:
reveal_type(x) # AbstractClass
This works because mypy checks methods marked with #abstractmethod even if the class does not inherit from abc.ABC. But be warned that if you execute the program using python, you will no longer get an error about instantiating the Bad class without implementing its abstract methods.

Python, mocking and wrapping methods without instantating objects

I want to mock a method of a class and use wraps, so that it is actually called, but I can inspect the arguments passed to it. I have seen at several places (here for example) that the usual way to do that is as follows (adapted to show my point):
from unittest import TestCase
from unittest.mock import patch
class Potato(object):
def foo(self, n):
return self.bar(n)
def bar(self, n):
return n + 2
class PotatoTest(TestCase):
spud = Potato()
#patch.object(Potato, 'foo', wraps=spud.foo)
def test_something(self, mock):
forty_two = self.spud.foo(n=40)
mock.assert_called_once_with(n=40)
self.assertEqual(forty_two, 42)
However, this instantiates the class Potato, in order to bind the mock to the instance method spud.foo.
What I need is to mock the method foo in all instances of Potato, and wrap them around the original methods. I.e, I need the following:
from unittest import TestCase
from unittest.mock import patch
class Potato(object):
def foo(self, n):
return self.bar(n)
def bar(self, n):
return n + 2
class PotatoTest(TestCase):
#patch.object(Potato, 'foo', wraps=Potato.foo)
def test_something(self, mock):
self.spud = Potato()
forty_two = self.spud.foo(n=40)
mock.assert_called_once_with(n=40)
self.assertEqual(forty_two, 42)
This of course doesn't work. I get the error:
TypeError: foo() missing 1 required positional argument: 'self'
It works however if wraps is not used, so the problem is not in the mock itself, but in the way it calls the wrapped function. For example, this works (but of course I had to "fake" the returned value, because now Potato.foo is never actually run):
from unittest import TestCase
from unittest.mock import patch
class Potato(object):
def foo(self, n):
return self.bar(n)
def bar(self, n):
return n + 2
class PotatoTest(TestCase):
#patch.object(Potato, 'foo', return_value=42)#, wraps=Potato.foo)
def test_something(self, mock):
self.spud = Potato()
forty_two = self.spud.foo(n=40)
mock.assert_called_once_with(n=40)
self.assertEqual(forty_two, 42)
This works, but it does not run the original function, which I need to run because the return value is used elsewhere (and I cannot fake it from the test).
Can it be done?
Note The actual reason behind my needs is that I'm testing a rest api with webtest. From the tests I perform some wsgi requests to some paths, and my framework instantiates some classes and uses their methods to fulfill the request. I want to capture the parameters sent to those methods to do some asserts about them in my tests.
In short, you can't do this using Mock instances alone.
patch.object creates Mock's for the specified instance (Potato), i.e. it replaces Potato.foo with a single Mock the moment it is called. Therefore, there is no way to pass instances to the Mock as the mock is created before any instances are. To my knowledge getting instance information to the Mock at runtime is also very difficult.
To illustrate:
from unittest.mock import MagicMock
class MyMock(MagicMock):
def __init__(self, *a, **kw):
super(MyMock, self).__init__(*a, **kw)
print('Created Mock instance a={}, kw={}'.format(a,kw))
with patch.object(Potato, 'foo', new_callable=MyMock, wrap=Potato.foo):
print('no instances created')
spud = Potato()
print('instance created')
The output is:
Created Mock instance a=(), kw={'name': 'foo', 'wrap': <function Potato.foo at 0x7f5d9bfddea0>}
no instances created
instance created
I would suggest monkey-patching your class in order to add the Mock to the correct location.
from unittest.mock import MagicMock
class PotatoTest(TestCase):
def test_something(self):
old_foo = Potato.foo
try:
mock = MagicMock(wraps=Potato.foo, return_value=42)
Potato.foo = lambda *a,**kw: mock(*a, **kw)
self.spud = Potato()
forty_two = self.spud.foo(n=40)
mock.assert_called_once_with(self.spud, n=40) # Now needs self instance
self.assertEqual(forty_two, 42)
finally:
Potato.foo = old_foo
Note that you using called_with is problematic as you are calling your functions with an instance.
Do you control creation of Potato instances, or at least have access to these instances after creating them? You should, else you'd not be able to check particular arg lists.
If so, you can wrap methods of individual instances using
spud = dig_out_a_potato()
with mock.patch.object(spud, "foo", wraps=spud.foo) as mock_spud:
# do your thing.
mock_spud.assert_called...
Your question looks identical to python mock - patching a method without obstructing implementation to me. https://stackoverflow.com/a/72446739/9230828 implements what you want (except that it uses a with statement instead of a decorator). wrap_object.py:
# Copyright (C) 2022, Benjamin Drung <bdrung#posteo.de>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import contextlib
import typing
import unittest.mock
#contextlib.contextmanager
def wrap_object(
target: object, attribute: str
) -> typing.Generator[unittest.mock.MagicMock, None, None]:
"""Wrap the named member on an object with a mock object.
wrap_object() can be used as a context manager. Inside the
body of the with statement, the attribute of the target is
wrapped with a :class:`unittest.mock.MagicMock` object. When
the with statement exits the patch is undone.
The instance argument 'self' of the wrapped attribute is
intentionally not logged in the MagicMock call. Therefore
wrap_object() can be used to check all calls to the object,
but not differentiate between different instances.
"""
mock = unittest.mock.MagicMock()
real_attribute = getattr(target, attribute)
def mocked_attribute(self, *args, **kwargs):
mock.__call__(*args, **kwargs)
return real_attribute(self, *args, **kwargs)
with unittest.mock.patch.object(target, attribute, mocked_attribute):
yield mock
Then you can write following unit test:
from unittest import TestCase
from wrap_object import wrap_object
class Potato:
def foo(self, n):
return self.bar(n)
def bar(self, n):
return n + 2
class PotatoTest(TestCase):
def test_something(self):
with wrap_object(Potato, 'foo') as mock:
self.spud = Potato()
forty_two = self.spud.foo(n=40)
mock.assert_called_once_with(n=40)
self.assertEqual(forty_two, 42)

Resources