Python unittest class issue - python-3.x

I'm trying to write a unit test class in python but feel like I'm missing something fundamental as it's not doing what I would expect. Here is my class:
from unittest import TestCase
class MyTestClass(TestCase):
def __init__(self):
self.file_name = None
def setUp(self):
self.file_name = 'give this file a name'
return self.file_name
def test_a_file_name(self):
assert self.file_name == 'give this file a name', 'fail'
tester = MyTestClass()
tester.setUp()
tester.test_a_file_name()
I would expect when running this that the test would pass but I'm getting a __init__() takes 1 positional argument but 2 were given error and I can't see why?

When running unittest.main your class that inherits from TestCase gets handed the test method to call. As such you need to allow your class to be handed that argument and pass it on to the parent class __init__.
from unittest import TestCase, main
class MyTestClass(TestCase):
# accept arbitrary positional and keyword arguments
def __init__(self, *args, **kwargs):
self.file_name = None
# pass them on to the parent
super().__init__(*args, **kwargs)
def setUp(self):
self.file_name = 'give this file a name'
return self.file_name
def test_a_file_name(self):
assert self.file_name == 'give this file a name', 'fail'
if __name__ == '__main__':
main()
As you noticed, you also don't need to handle instantiation and method calling. unittest.main() will do that for you.
In the future, if you ever get an error with arguments, a helpful debugging tip is throwing in an *args, **kwargs and printing them to see what is being handed that you're not handling.

Related

How could I create a docstring decorator in the presence of properties?

I have a collection of ever more specialized classes which correspond to collections of the same kind of data (temperature, density, etc) but for different drifts, for example, one subclass has dimensions (nx, ny) and a different suclass has dimensions (ncv), and I want to reflect that in the docstrings, for having a better documentation using Sphinx.
After reading many very useful threads here in Stack Overflow, I have arrived to this model:
import numpy as np
from functools import wraps
def class_decorator(cls):
import ipdb; ipdb.set_trace()
clsdict = {}
mro = cls.mro()
mro.reverse()
for tmp in mro[1:]: ##Ignore object class parent.
clsdict.update(tmp.__dict__)
for name, method in clsdict.items():
if hasattr(method, '__og_doc__'):
try:
method.__doc__ = method.__og_doc__.format(**clsdict)
except:
pass
else:
try:
method.__og_doc__ = method.__doc__
method.__doc__ = method.__doc__.format(**clsdict)
except:
pass
return cls
def mark_documentation(fn):
if not hasattr(fn, '__og_doc__'):
try:
fn.__og_doc__ = fn.__doc__
except:
pass
#wraps(fn)
def wrapped(*args, **kwargs):
return fn(*args, **kwargs)
return wrapped
def documented_property(fn):
if not hasattr(fn, '__og_doc__'):
try:
fn.__og_doc__ = fn.__doc__
except:
pass
#wraps(fn)
def wrapped(*args, **kwargs):
return fn(*args, **kwargs)
prp= property(wrapped)
prp.__og_doc__ = fn.__og_doc__
return prp
#class_decorator
class Base(object):
_GRID_DIM = 'nx, ny'
_TYPE = 'BaseData'
def __init__(self, name):
self.name = name
def shape(self):
""" This docstring contains the type '{_TYPE}' of class."""
print('Simple')
def operation(self, a, b, oper=np.sum, **kwargs):
""" Test for functions with args and kwargs in {_TYPE}"""
return oper([a,b])
#classmethod
def help(cls, var):
try:
print(get(cls, var).__doc__)
except:
print("No docstring yet.")
#class_decorator
class Advanced(Base):
_GRID_DIM = 'ncv'
_TYPE = 'AdvancedData'
def __init__(self,name):
super().__init__(name)
#property
#mark_documentation
# #documented_property
def arkansas(self):
"""({_GRID_DIM}, ns): Size of Arkansaw."""
return 'Yeah'
I am aiming to get the correctly formatted docstring when I call the help method or I use Sphinx, so that:
> adv = Advanced('ADV')
> adv.help("arkansas")
(ncv, ns): Size of Arkansaw.
> adv.help("operation")
Test for functions with args and kwargs in AdvancedData
I have managed to make it work so far, except for properties, because I assigned __og_doc__ to the function, but the property does not have that attribute. My last attempt at monkeypatching this, documented_property, fails because property is inmutable (as expected), and I cannot come up with any way to avoid this roadblock.
Is there any way around this problem?

Handling staticmethods while working with metaclasses

In the code below, I am using a metaclass along with a decorator to decorate all the user defined methods.
It works for all instance methods, but in cases of staticmethods it fails due to the self argument, to avoid that I am using a try and except block, which solves the problem. But in one of my projects, it's not working out.
Is there a better way of decorating the output of a staticmethod via a function decorator enclosed in a metaclass ?
from functools import wraps
import types
def decorator_function(input_function):
#wraps(input_function)
def wrapper(self, *args, **kwargs):
if kwargs.get("test_parameter"):
kwargs["test_parameter"] = 999
try:
result = input_function(self, *args, **kwargs)
except:
result = input_function(*args, **kwargs)
return result
return wrapper
class DecoratorMetaClass(type):
def __new__(meta, name, bases, class_dict):
klass = super().__new__(meta, name, bases, class_dict)
for key in dir(klass):
value = getattr(klass, key)
if isinstance(value, types.FunctionType) and "__" not in key:
wrapped = decorator_function(value)
setattr(klass, key, wrapped)
return klass
class InterfaceClass(metaclass=DecoratorMetaClass):
def function(self, test_parameter=1):
print(f"function - Test Parameter= {test_parameter}")
#staticmethod
def static_function(test_parameter=1):
print(f"static_function - Test Parameter= {test_parameter}")
class UserClass(InterfaceClass, metaclass=DecoratorMetaClass):
def __init__(self):
pass
def function_2(self, test_parameter=1):
print(f"function_2 - Test Parameter= {test_parameter}")
instance = UserClass()
instance.function(test_parameter=2)
instance.function_2(test_parameter=2)
instance.static_function(test_parameter=2)
print(isinstance(instance, InterfaceClass))
PS: I am not using a class decorator because it causes the isinstance checks to fail.
Explanation
The major problem goes down to the methods parameters. You were almost there.
You have to make the decorators arguments compatible to your methods parameters;
You can change the signature of the function wrapper from wrapper(self, *args, **kwargs) to wrapper(*args, **kwargs). Then just assign result = input_function(*args, **kwargs). You don't need the try/except block for this decorator;
def decorator_function(input_function):
#wraps(input_function)
def wrapper(*args, **kwargs):
if kwargs.get("test_parameter"):
kwargs["test_parameter"] = 999
return input_function(*args, **kwargs)
return wrapper
Ideally you should add to the methods *args (variable arguments) and **kwargs (variable named arguments) to make them compatible with your decorator;
In this case I added *args before the test_parameter=1 to the static_function in InterfaceClass.
class InterfaceClass(metaclass=DecoratorMetaClass):
#staticmethod
def static_function(*args, test_parameter=1):
print(f"static_function - Test Parameter= {test_parameter}")
Runnable Code
from functools import wraps
import types
def decorator_function(input_function):
#wraps(input_function)
def wrapper(*args, **kwargs):
if kwargs.get("test_parameter"):
kwargs["test_parameter"] = 999
return input_function(*args, **kwargs)
return wrapper
class DecoratorMetaClass(type):
def __new__(meta, name, bases, class_dict):
klass = super().__new__(meta, name, bases, class_dict)
for key in dir(klass):
value = getattr(klass, key)
if isinstance(value, types.FunctionType) and "__" not in key:
wrapped = decorator_function(value)
setattr(klass, key, wrapped)
return klass
class InterfaceClass(metaclass=DecoratorMetaClass):
def function(self, test_parameter=1):
print(f"function - Test Parameter= {test_parameter}")
#staticmethod
def static_function(*args, test_parameter=1):
print(f"static_function - Test Parameter= {test_parameter}")
class UserClass(InterfaceClass, metaclass=DecoratorMetaClass):
def __init__(self):
pass
def function_2(self, test_parameter=1):
print(f"function_2 - Test Parameter= {test_parameter}")
instance = UserClass()
instance.function(test_parameter=2)
instance.function_2(test_parameter=2)
instance.static_function(test_parameter=2)
UserClass.static_function(test_parameter=3)
print(isinstance(instance, InterfaceClass))
Output
function - Test Parameter= 999
function_2 - Test Parameter= 999
static_function - Test Parameter= 999
static_function - Test Parameter= 999
True
Addressing OP's comment
Considering test_parameter is always a named parameter, write the decorator_function as the following:
def decorator_function(input_function):
#wraps(input_function)
def wrapper(*args, **kwargs):
if kwargs.get("test_parameter"):
kwargs["test_parameter"] = 999
try:
result = input_function(*args, **kwargs)
except TypeError:
result = input_function(**kwargs)
return result
return wrapper
This way you don't need to change the methods signature.
If you call the functions also with positional arguments, you will need to check the type of the first argument inserted into args. Things get complicated and error prone.

How to raise an error with decorator for unit testing?

I would like to do something in my unit testing before it fails and using decorator to do so
Here is my code :
import requests
import unittest
import test
class ExceptionHandler(object):
def __init__(self, f):
self.f = f
def __call__(self, *args, **kwargs):
try:
self.f(*args, **kwargs)
except Exception as err:
print('do smth')
raise err
class Testing(unittest.TestCase):
#ExceptionHandler
def test_connection_200(self):
r = requests.get("http://www.google.com")
self.assertEqual(r.status_code, 400)
if __name__ == '__main__':
unittest.main(verbosity=2)
But it throws me an :
TypeError: test_connection_200() missing 1 required positional argument: 'self'
How can I do something when my test fail, then having the normal failing behavior of unitest?
Edit :
I would like to do something before my test fail like write a log and then continue the normal process of failing.
If possible with a decorator.
Edit bis the solution thanks to #Thymen :
import requests
import unittest
import test
class ExceptionHandler(object):
def __init__(self, f):
self.f = f
def __call__(self, *args, **kwargs):
try:
self.f(*args, **kwargs)
except Exception as err:
print('do smth')
raise err
class Testing(unittest.TestCase):
def test_connection_200(self):
#ExceptionHandler
def test_connection_bis(self):
r = requests.get("https://www.google.com")
print(r.status_code)
self.assertEqual(r.status_code, 400)
test_connection_bis(self)
if __name__ == '__main__':
unittest.main(verbosity=2)
My comment may not have been clear, so hereby the solution in code
class Testing(unittest.TestCase):
def test_connection_200(self):
#ExceptionHandler
def test_connection():
r = requests.get("http://www.google.com")
self.assertEqual(r.status_code, 400)
with self.assertRaises(AssertionError):
test_connection()
The reason that this works is that there is no dependency on the call for the test (test_connection_200) , and the actual functionality that you are trying to test (the ExceptionHandler).
Edit
The line
with self.assertRaises(AssertionError):
test_connection()
Checks if test_connection() raises an AssertionError. If it does not raises this error, it will fail the test.
If you want the test to fail (because of the AssertionError), you can remove the with statement, and only call test_connection(). This will make the test fail directly.

Pytests with context manager

I am trying to understand how to test Context managert with pytests.
I created some Class and need to count how much times was called static method do_dome_stuff
class Iterator():
def __init__(self):
pass
#staticmethod
def do_some_stuff():
pass
def __enter__(self):
return [i for i in range(10)]
def __exit__(self, *args):
return True
iterator = Iterator()
def f(iterator):
with iterator as i:
for _ in i:
iterator.do_some_stuff()
I have created py.test file and need to check if function was called 10 times. But my solution isn't working:
#pytest.fixture
def iterator():
return MagicMock(spec=Iterator)
def test_f(iterator):
f(iterator)
assert (iterator.do_some_stuff.call_count == 10)
Thanks in advance
The reason your code doesn't work, is that MagicMock(spec=Iterator) replaces the __enter__ method of your Iterator class by a MagicMock object, see the MagicMock documentation. This means that in your test, the value of i in function f is a MagicMock object instead of list(range(10)), so the code inside the for loop is never executed.
To make it work, you will probably only want to mock the do_some_stuff method:
#pytest.fixture
def iterator():
it = Iterator()
it.do_some_stuff = Mock()
return it
def test_f(iterator):
f(iterator)
assert (iterator.do_some_stuff.call_count == 10)

Apply decorator to all method of sub classes for timeit

I have a method decorator looking like
def debug_run(fn):
from functools import wraps
#wraps(fn)
def wrapper(self, *args, **kw):
# log some stuff
# timeit fn
res = fn(self, *args, **kw)
return wrapper
Right now I used to use it apply on each method that I want to debug. Now i'm trying to apply to all class method using a class decorator looking like.
Rather doing
class A():
#debug_run
def f(self):
pass
I do
#decallmethods(debug_run)
class A():
def f(self):
pass
def decallmethods(decorator):
def dectheclass(cls):
for name, m in inspect.getmembers(cls, inspect.ismethod):
if name in getattr(cls, 'METHODS_TO_INSPECT', []):
setattr(cls, name, decorator(m))
return cls
return dectheclass
Trying to apply to decorator to the base class, not working as expected. no log to the console. Now i wonder if this approach is the good or I should used something else (apply the debug decorator to selected method from base class to all sub classes).
[EDIT]
Finally found why no logs were printed
Why is there a difference between inspect.ismethod and inspect.isfunction from python 2 -> 3?
Here a complete example reflecting my code
import inspect
import time
import logging as logger
from functools import wraps
logger.basicConfig(format='LOGGER - %(asctime)s %(message)s', level=logger.DEBUG)
def debug_run(fn):
#wraps(fn)
def wrapper(self, *args, **kw):
logger.debug(
"call method %s of instance %s with %r and %s "
% (fn.__name__, self, args, kw))
time1 = time.time()
res = fn(self, *args, **kw)
time2 = time.time()
logger.debug(
"%s function %0.3f ms" % (fn, (time2-time1)*1000.0))
return res
return wrapper
def decallmethods(decorator):
def dectheclass(cls):
for name, m in inspect.getmembers(
cls, predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x)):
methods_to_inspect = getattr(cls, 'METHODS_TO_INSPECT', [])
if name in methods_to_inspect:
setattr(cls, name, decorator(m))
return cls
return dectheclass
class B(object):
METHODS_TO_INSPECT = ["bfoo1", "bfoo2", "foo"]
def __str__(self):
return "%s:%s" % (repr(self), id(self))
def bfoo1(self):
pass
def bfoo2(self):
pass
def foo(self):
pass
def run(self):
print("print - Base run doing nothing")
class C(object):
pass
#decallmethods(debug_run)
class A(B, C):
METHODS_TO_INSPECT = ["bfoo1", "bfoo2", "foo", "run"]
def foo(self):
print("print - A foo")
def run(self):
self.bfoo1()
self.bfoo2()
self.foo()
a = A()
b = B()
a.run()
b.run()
In this case applying decallmethods to B, will not affect the A so i must to apply to both A and B thus to all sub classes of B.
It is possible to have such mechanism that permit to apply decallmethods to all sub classes methods ?
look at this:
How can I decorate all functions of a class without typing it over and over for each method added? Python
delnan has a good answer,
only add this rule to his answer
if name in getattr(cls, 'METHODS_TO_INSPECT', []):

Resources