Why does 'yield' in a test case let the test pass - python-3.x

I am trying to understand the following python code snippet, taken from the SublimeText3 plugin development unit testing examples.
def test_delayed_insert(self):
sublime.set_timeout(
lambda: self.view.run_command("delayed_insert_hello_world"),
100)
# `delayed_insert_hello_world` will be execulated after the timeout
# `yield 1000` will yield the runtime to main thread and continue
# the execution 1 second later
yield 1000
row = self.getRow(0)
self.assertEqual(row, "hello world")
How can this work? If unittest does not support this (call the test as a generator), the code would not be executed, right?
My current understanding is the unittest framework takes the yielded value 'x', and suspends itself for 'x' ms, in this case 1000ms, similar to the semantics of yield/sleep in c++ threads. However, that depends on 'unittest' to really use the yielded value in this way.
If it does, why does this test not fail (tried it within st3 = python 3.3.6 and python 3.5.2) ?
from unittest import TestCase
class TestMe(TestCase):
def test_failtest(self):
self.assertTrue(False)
yield 0
Running it gives:
~$ python3 -m unittest test_me.py --verbose
test_failtest (test_me.TestMe) ... ok
----------------------------------------------------------------------
Ran 1 test in 0.000s
OK

TL;DR the code you linked to is subclassing a custom TestCase class (DeferrableTestCase) which may alter the "normal" behavior of unittest.
Let's take a step back and not use unittest.
def foo():
assert True is False
foo()
This raises an AssertionError as expected. However,
def foo():
assert True is False
yield
foo()
Does not.
The reason is that the yield keyword turns foo to a generator.
In order to get the AssertionError we would have to consume the generator that foo returns:
def foo():
assert True is False
yield
next(foo())
File "main.py", line 48, in <module>
next(foo())
File "main.py", line 45, in foo
assert True is False
AssertionError
This behavior is inherit to generators and not related to how unittest or assert work:
def foo():
1/0
yield
gen = foo()
print('No exception yet')
next(gen)
Outputs
'No exception yet'
Traceback (most recent call last):
File "main.py", line 50, in <module>
next(gen)
File "main.py", line 45, in foo
1/0
ZeroDivisionError: division by zero
However, the code you linked to is subclassing a custom TestCase class (DeferrableTestCase) which may alter the behavior.

Related

Deadlock when running celery tests under pytest with xdist

If I run without xdist involved, like this:
pytest --disable-warnings --verbose -s test_celery_chords.py
Works just fine. I see the DB created, the tasks run and it exits as expected.
If I run with xdist involved (-n 2), like this:
pytest --disable-warnings --verbose -n 2 -s test_celery_chords.py
I end up with a hung process (and sometimes these messages):
Destroying old test database for alias 'default'...
Chord callback '4c7664ce-89e0-475e-81a7-4973929d2256' raised: ValueError('4c7664ce-89e0-475e-81a7-4973929d2256')
Traceback (most recent call last):
File "/Users/bob/.virtualenv/testme/lib/python3.10/site-packages/celery/backends/base.py", line 1019, in on_chord_part_return
raise ValueError(gid)
ValueError: 4c7664ce-89e0-475e-81a7-4973929d2256
Chord callback '4c7664ce-89e0-475e-81a7-4973929d2256' raised: ValueError('4c7664ce-89e0-475e-81a7-4973929d2256')
Traceback (most recent call last):
File "/Users/bob/.virtualenv/testme/lib/python3.10/site-packages/celery/backends/base.py", line 1019, in on_chord_part_return
raise ValueError(gid)
ValueError: 4c7664ce-89e0-475e-81a7-4973929d2256
Chord callback '4c7664ce-89e0-475e-81a7-4973929d2256' raised: ValueError('4c7664ce-89e0-475e-81a7-4973929d2256')
Traceback (most recent call last):
File "/Users/bob/.virtualenv/testme/lib/python3.10/site-packages/celery/backends/base.py", line 1019, in on_chord_part_return
raise ValueError(gid)
ValueError: 4c7664ce-89e0-475e-81a7-4973929d2256
Chord callback '4c7664ce-89e0-475e-81a7-4973929d2256' raised: ValueError('4c7664ce-89e0-475e-81a7-4973929d2256')
Traceback (most recent call last):
File "/Users/bob/.virtualenv/testme/lib/python3.10/site-packages/celery/backends/base.py", line 1019, in on_chord_part_return
raise ValueError(gid)
ValueError: 4c7664ce-89e0-475e-81a7-4973929d2256
Chord callback '4c7664ce-89e0-475e-81a7-4973929d2256' raised: ValueError('4c7664ce-89e0-475e-81a7-4973929d2256')
Traceback (most recent call last):
File "/Users/bob/.virtualenv/testme/lib/python3.10/site-packages/celery/backends/base.py", line 1019, in on_chord_part_return
raise ValueError(gid)
ValueError: 4c7664ce-89e0-475e-81a7-4973929d2256
[gw0] ERROR test_celery_chords.py::test_chords Destroying test database for alias 'default'...
Only way to end it is with ^C
These are my two tests (essentially the same test). The DB isn't needed for these tasks (simple add and average example tests) but will be needed for the other Django tests that do use the DB.
def test_chords(transactional_db, celery_app, celery_worker, celery_not_eager):
celery_app.config_from_object("django.conf:settings", namespace="CELERY")
task = do_average.delay()
results = task.get()
assert task.state == "SUCCESS"
assert len(results[0][1][1]) == 10
def test_chord_differently(transactional_db, celery_app, celery_worker, celery_not_eager):
celery_app.config_from_object("django.conf:settings", namespace="CELERY")
task = do_average.delay()
results = task.get()
assert task.state == "SUCCESS"
assert len(results[0][1][1]) == 10
and the tasks (shouldn't matter)
#shared_task
def _add(x: int, y: int) -> int:
print(f"{x} + {y} {time.time()}")
return x + y
#shared_task
def _average(numbers: List[int]) -> float:
print(f"AVERAGING {sum(numbers)} / {len(numbers)}")
return sum(numbers) / len(numbers)
#shared_task
def do_average():
tasks = [_add.s(i, i) for i in range(10)]
print(f"Creating chord of {len(tasks)} tasks at {time.time()}")
return chord(tasks)(_average.s())
using a conftest.py of this:
#pytest.fixture
def celery_not_eager(settings):
settings.CELERY_TASK_ALWAYS_EAGER = False
settings.CELERY_TASK_EAGER_PROPAGATES = False
pytest --fixtures
celery_app -- .../python3.10/site packages/celery/contrib/pytest.py:173
Fixture creating a Celery application instance.
celery_worker -- .../python3.10/site-packages/celery/contrib/pytest.py:195
Fixture: Start worker in a thread, stop it when the test returns.
Using
django=4.1.2
pytest-celery==0.0.0
pytest-cov==3.0.0
pytest-django==4.5.2
pytest-xdist==2.5.0
While I have not solved this, I have found a workaround of sorts using #pytest.mark.xdist_group(name="celery") to decorate the test class and I can do the following:
#pytest.mark.xdist_group(name="celery")
#override_settings(CELERY_TASK_ALWAYS_EAGER=False)
#override_settings(CELERY_TASK_EAGER_PROPAGATES=False)
class SyncTaskTestCase2(TransactionTestCase):
#classmethod
def setUpClass(cls):
super().setUpClass()
cls.celery_worker = start_worker(app, perform_ping_check=False)
cls.celery_worker.__enter__()
print(f"Celery Worker started {time.time()}")
#classmethod
def tearDownClass(cls):
print(f"Tearing down Superclass {time.time()}")
super().tearDownClass()
print(f"Tore down Superclass {time.time()}")
cls.celery_worker.__exit__(None, None, None)
print(f"Celery Worker torn down {time.time()}")
def test_success(self):
print(f"Starting test at {time.time()}")
self.task = do_average_in_chord.delay()
self.task.get()
print(f"Finished Averaging at {time.time()}")
assert self.task.successful()
This, combined with the command line option --dist loadgroup forces all of the "celery" group to be run on the same runner process which prevents the deadlock and allows --numprocesses 10 to run to completion.
The biggest drawback here is the 9 second penalty to teardown the celery worker which makes you prone to push all of your celery testing into one class.
# This accomplishes the same things as the unitest above WITHOUT having a Class wrapped around the tests it also eliminates the 9 second teardown wait.
#pytest.mark.xdist_group(name="celery")
#pytest.mark.django_db # Why do I need this and transactional_db???
def test_averaging_in_a_chord(
transactional_db,
celery_session_app,
celery_session_worker,
use_actual_celery_worker,
):
task = do_average_in_chord.delay()
task.get()
assert task.successful()
You do need this in your conftest.py
from typing import Type
import time
import pytest
from pytest_django.fixtures import SettingsWrapper
from celery import Celery
from celery.contrib.testing.worker import start_worker
#pytest.fixture(scope="function")
def use_actual_celery_worker(settings: SettingsWrapper) -> SettingsWrapper:
"""Turns of CELERY_TASK_ALWAYS_EAGER and CELERY_TASK_EAGER_PROPAGATES for a single test. """
settings.CELERY_TASK_ALWAYS_EAGER = False
settings.CELERY_TASK_EAGER_PROPAGATES = False
return settings
#pytest.fixture(scope="session")
def celery_session_worker(celery_session_app: Celery):
"""Re-implemented this so that my celery app gets used. This keeps the priority queue stuff the same
as it is in production. If BROKER_BACKEND is set to "memory" then rabbit shouldn't be involved anyway."""
celery_worker = start_worker(
celery_session_app, perform_ping_check=False, shutdown_timeout=0.5
)
celery_worker.__enter__()
yield celery_worker
# This causes the worker to exit immediately so that we don't have a 9 second wait for the timeout.
celery_session_app.control.shutdown()
print(f"Tearing down Celery Worker {time.time()}")
celery_worker.__exit__(None, None, None)
print(f"Celery Worker torn down {time.time()}")
#pytest.fixture(scope="session")
def celery_session_app() -> Celery:
from workshop.celery import app
""" Get the app you would regularly use for celery tasks and return it. This insures all of your default
app options mirror what you use at runtime."""
yield app

Optionally passing parameters onto another function with jit

I am attempting to jit compile a python function, and use a optional argument to change the arguments of another function call.
I think where jit might be tripping up is that the default value of the optional argument is None, and jit doesn't know how to handle that, or at least doesn't know how to handle it when it changes to a numpy array. See below for a rough overview:
#jit(nopython=True)
def foo(otherFunc,arg1, optionalArg=None):
if optionalArg is not None:
out=otherFunc(arg1,optionalArg)
else:
out=otherFunc(arg1)
return out
Where optionalArg is either None, or a numpy array
One solution would be to turn this into three functions as shown below, but this feels kinda janky and I don't like it, especially because speed is very important for this task.
def foo(otherFunc,arg1,optionalArg=None):
if optionalArg is not None:
out=func1(otherFunc,arg1,optionalArg)
else:
out=func2(otherFunc,arg1)
return out
#jit(nopython=True)
def func1(otherFunc,arg1,optionalArg):
out=otherFunc(arg1,optionalArg)
return out
#jit(nopython=True)
def func2(otherFunc,arg1):
out=otherFunc(arg1)
return out
Note that other stuff is happening besides just calling otherFunc that makes using jit worth it, but I'm almost certain that is not where the problem is since this was working before without the optionalArg portion, so I have decided not to include it.
For those of you that are curious its runge-kutta order 4 implementation with optional extra parameters to pass to the differential equation. If you want to see the whole thing just ask.
The traceback is rather long but here is some of it:
inte.rk4(de2,y0,0.001,200,vals=np.ones(4))
Traceback (most recent call last):
File "<ipython-input-38-478197aa6a1a>", line 1, in <module>
inte.rk4(de2,y0,0.001,200,vals=np.ones(4))
File "C:\Users\Alex\Anaconda3\lib\site-packages\numba\dispatcher.py", line 350, in _compile_for_args
error_rewrite(e, 'typing')
File "C:\Users\Alex\Anaconda3\lib\site-packages\numba\dispatcher.py", line 317, in error_rewrite
reraise(type(e), e, None)
File "C:\Users\Alex\Anaconda3\lib\site-packages\numba\six.py", line 658, in reraise
raise value.with_traceback(tb)
TypingError: Internal error at <numba.typeinfer.CallConstraint object at 0x00000258E168C358>:
This continues...
inte.rk4 is the equiavlent of foo, de2 is otherFunc, y0, 0.001 and 200 are just values, that I swaped out for arg1 in my problem description above, and vals is optionalArg.
A similar thing happens when I try to run this with the vals parameter omitted:
ysExp=inte.rk4(deExp,y0,0.001,200)
Traceback (most recent call last):
File "<ipython-input-39-7dde4bcbdc2f>", line 1, in <module>
ysExp=inte.rk4(deExp,y0,0.001,200)
File "C:\Users\Alex\Anaconda3\lib\site-packages\numba\dispatcher.py", line 350, in _compile_for_args
error_rewrite(e, 'typing')
File "C:\Users\Alex\Anaconda3\lib\site-packages\numba\dispatcher.py", line 317, in error_rewrite
reraise(type(e), e, None)
File "C:\Users\Alex\Anaconda3\lib\site-packages\numba\six.py", line 658, in reraise
raise value.with_traceback(tb)
TypingError: Internal error at <numba.typeinfer.CallConstraint object at 0x00000258E048EA90>:
This continues...
If you see the documentation here, you can specify the optional type arguments explicitly in Numba. For example (this is the same example from documentation):
>>> #jit((optional(intp),))
... def f(x):
... return x is not None
...
>>> f(0)
True
>>> f(None)
False
Additionally, based on the conversation going on this Github issue you can use the following workaround to implement optional keyword. I have modified the code from the solution provided in the github issue to suit your example:
from numba import jitclass, int32, njit
from collections import OrderedDict
import numpy as np
np_arr = np.asarray([1,2])
spec = OrderedDict()
spec['x'] = int32
#jitclass(spec)
class Foo(object):
def __init__(self, x):
self.x = x
def otherFunc(self, optionalArg):
if optionalArg is None:
return self.x + 10
else:
return len(optionalArg)
#njit
def useOtherFunc(arg1, optArg):
foo = Foo(arg1)
print(foo.otherFunc(optArg))
arg1 = 5
useOtherFunc(arg1, np_arr) # Output: 2
useOtherFunc(arg1, None) # Output : 15
See this colab notebook for the example shown above.

multiprocessing.Pool cannot return OrderedDict subclass with additional argument

I am trying to have a simple subclass of OrderedDict that gets created by a Pool then returned.
It seems that the pickling process when returning the created object to the pool tries to re-instantiate the object and fails due to the required additional argument in the __init__ function.
This is a minimal (non) working example:
from collections import OrderedDict
from multiprocessing import Pool
class Obj1(OrderedDict):
def __init__(self, x, *args, **kwargs):
super().__init__(*args, **kwargs)
self.x = x
def task(x):
obj1 = Obj1(x)
return obj1
if __name__ == '__main__':
with Pool(1) as pool:
for x in pool.imap_unordered(task, (1,2,3)):
print(x.x)
If I do this I get the following error.
Exception in thread Thread-3:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/usr/lib/python3.6/multiprocessing/pool.py", line 463, in _handle_results
task = get()
File "/usr/lib/python3.6/multiprocessing/connection.py", line 251, in recv
return _ForkingPickler.loads(buf.getbuffer())
TypeError: init() missing 1 required positional argument: 'x'
Again this fails when the task functions returns to the pool and I guess the object gets pickled?
If I changed OrderedDict by a simple dict it works flawlessly....
I have a workaround to use kwargs and retrieve the attribute of interest but I am stumped about the error to start with. Any ideas?
You can define __getstate__() and __setstate__() methods for your class.
In those functions you can make sure that x is handled as well. For example:
def __getstate__(self):
return self.x, self.items()
def __setstate__(self, state):
self.x = state[0]
self.update(state[1])
BTW, from CPython 3.6 there is no reason to use OrderedDict, since dictionary order is insertion order. This was originally an implementation detail in CPython. In Python 3.7 it was made part of the language.

Prevent custom assert from showing in traceback of python unittest

I'd like to add some custom assert methods to a TestCase. As a simple example, I've just put one inside the test class below. It works as expected, but when the output is generated the traceback includes the custom assert in the output.
What is the step necessary to make it behave like assertEqual()? The code for assertEqual is in TestCase, but the actual line that raises the assertion does not appear in the traceback. What do I need to do to make test_something2's output look more like test_something1's?
import unittest
class TestCustomAssert(unittest.TestCase):
def assertSomething(self, s):
self.assertEqual(s, 'something')
def test_something1(self):
self.assertEqual('foo', 'something')
def test_something2(self):
self.assertSomething('foo')
if __name__ == '__main__':
unittest.main()
Output
python3 custom_assert.py
FF
======================================================================
FAIL: test_something1 (__main__.TestCustomAssert)
----------------------------------------------------------------------
Traceback (most recent call last):
File "custom_assert.py", line 8, in test_something1
self.assertEqual('foo', 'something')
AssertionError: 'foo' != 'something'
- foo
+ something
======================================================================
FAIL: test_something2 (__main__.TestCustomAssert)
----------------------------------------------------------------------
Traceback (most recent call last):
File "custom_assert.py", line 10, in test_something2
self.assertSomething('foo')
File "custom_assert.py", line 6, in assertSomething
self.assertEqual(s, 'something')
AssertionError: 'foo' != 'something'
- foo
+ something
----------------------------------------------------------------------
Ran 2 tests in 0.000s
FAILED (failures=2)
unittest doesn't print tracebacks from frames that have __unittest=True in their globals.
From unittest.result:
def _is_relevant_tb_level(self, tb):
return '__unittest' in tb.tb_frame.f_globals
So, if you make a helper module, you can emulate that behavior:
helper.py:
__unittest = True
def assert_stuff(s):
assert s == 'something', "%s is not something" % s
Now you can call this helper from your test case.
I guess that one could make a very neat decorator that make such magic more automatic without a helper module but IMHO you shouldn't make effort to reduce the traceback anyway.

How to manupulate Python unittest output '.' , E and F

Python unittest give '.', E or F on ok, error or fail. We can avoid it by setting verbosity = 0, or /dev/null. but how can we change it. I mean I want to write after every test PASS, FAIL or ERROR instead of ., E or F, without using verbosity flag -V for output.
I am looking for following kind of output:
test one
PASS
test two
FAIL
test three
PASS
test four
ERROR
Thanks
Zubair
So using extra verbosity (-v) on the command line gets the desired output format:
Passing the -v option to your test script will instruct unittest.main()to enable a higher level of verbosity, and produce the following output:
test_isupper (__main__.TestStringMethods) ... ok
test_split (__main__.TestStringMethods) ... ok
test_upper (__main__.TestStringMethods) ... ok
----------------------------------------------------------------------
Ran 3 tests in 0.001s
OK
(https://docs.python.org/3/library/unittest.html#basic-example)
But we can also set the verbosity level from code:
You can run tests with more detailed information by passing in the verbosity argument:
if __name__ == '__main__':
unittest.main(verbosity=2)
(https://docs.python.org/3/library/unittest.html#unittest.main)
You can define your own test runner and result class to customize almost everything. If you just want to change the output of the test names (just the method name without the class) and change ok to PASS it's enough to override the startTest and addSuccess methods of the standard TextTestResult:
import unittest
class Test(unittest.TestCase):
def test_pass(self):
self.assertTrue(True)
def test_fail(self):
self.assertTrue(False)
def test_error(self):
x = 1 / 0
class MyTestResult(unittest.TextTestResult):
def __init__(self, stream, descriptions, verbosity):
super().__init__(stream, descriptions, verbosity)
self.stream = stream
self.verbosity = verbosity
def addSuccess(self, test):
if self.verbosity > 1:
self.stream.writeln("PASS")
else:
super().addSuccess(test)
def startTest(self, test):
unittest.TestResult.startTest(self, test)
if self.verbosity > 1:
self.stream.write(f"{test._testMethodName} ")
self.stream.flush()
if __name__ == "__main__":
unittest.main(testRunner=unittest.TextTestRunner(resultclass=MyTestResult, verbosity=2))
Output:
test_error ERROR
test_fail FAIL
test_pass PASS
======================================================================
ERROR: test_error (__main__.Test)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test.py", line 12, in test_error
x = 1 / 0
ZeroDivisionError: division by zero
======================================================================
FAIL: test_fail (__main__.Test)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test.py", line 9, in test_fail
self.assertTrue(False)
AssertionError: False is not true
----------------------------------------------------------------------
Ran 3 tests in 0.001s
FAILED (failures=1, errors=1)
instead of the standard output:
test_error (__main__.Test) ... ERROR
test_fail (__main__.Test) ... FAIL
test_pass (__main__.Test) ... ok
...

Resources