class Hand:
def __init__(self,cards, total, soft_ace_count):
self.cards = cards
self.total = total
self.soft_ace_count = soft_ace_count
def __str__(self):
return(str(self.cards)+','+str(self.total)+','+str(self.soft_ace_count))
def add_card(self):
self.cards.append(get_card())
self.score()
def is_blackjack(self):
return len(self.cards)==2 and self.total==21
def is_bust(self):
return self.total > 21
def score(self):
self.total=0
self.soft_ace_count=0
for x in self.cards:
if x > 10:
x=10
self.total+=x
#the following code will decide 1 =11 or 1 = 1
if self.total <= 11 and 1 in self.cards:
self.total+=10
self.soft_ace_count+=1
I am trying to write the unittest for this 'Hand' class. Do I need a init setup for this particular unittest?
Here is part of code of my unittest. Thank you so much if anyone could help me with this. I just started in python. Any suggestions would help. Thank you. Neglect the indent
class hand(unittest.TestCase):
def __init__(self):
self.cards=cards
self.total = total
self.soft_ace_count = soft_ace_count
Assuming that you have your class in a module named hand.py, you can place your unittest script in the same directory. It could look like this:
import unittest
from hand import Hand
class hand(unittest.TestCase):
def test_init(self):
cards = []
h = Hand(cards, 0, 0)
self.assertEqual(h.total, 0)
self.assertEqual(h.soft_ace_count, 0)
self.assertEqual(len(h.cards), 0)
# def test_blackjack(self):
#....
# def test_score(self):
#....
# def test_OTHERTHINGS(self):
#.....
if __name__ == '__main__':
unittest.main()
if you name your unittestscript for example unittesthand.py, you can run it via "python unittestscript.py". You will get the following result:
> .
----------------------------------------------------------------------
Ran 1 test in 0.000s
OK
The unittest component has executed all methods starting with "test" and executed. In the example the "test_init" method. This is NOT the initialization of the test, but the unittest for the initialization of class Hand.
Now change for example the code to "self.assertEqual(h.total, 1)", and you will see that the unittest component reports an error.
Now you can create additional cases for the other methods.You can also create a base test case with code that is always executed before and after the unittests and so on...
Related
I have a fixture that wraps all of my tests, for example checking the GPU memory usage:
import pytest
import nvidia_smi
def gpu_memory_used():
nvidia_smi.nvmlInit()
device_count = nvidia_smi.nvmlDeviceGetCount()
assert device_count == 1, 'Should be 1 GPU'
handle = nvidia_smi.nvmlDeviceGetHandleByIndex(0)
info = nvidia_smi.nvmlDeviceGetMemoryInfo(handle)
used_memory = info.used
nvidia_smi.nvmlShutdown()
return used_memory
#pytest.fixture(autouse=True)
def check_gpu_memory():
memory_used_before_test = gpu_memory_used()
yield
memory_used_after_test = gpu_memory_used()
assert memory_used_after_test == memory_used_before_test
I'm wondering is it possible to run the yield in a subprocess and return the PASS/FAIL of the test to the fixture -> Important to note that I want the subprocess to be closed once done.
Thanks
I am testing a class that in turn uses open. I want to patch open on the test class for simplicity, but set read_data per test. So something like this:
#mock.patch('builtins.open', unittest.mock.mock_open())
class test_TheClassUnderTest:
def test_MakeSureAMethodWorks(self):
open().read_data = "custom data for the test"
I confirmed that tests work when I set read_data in the patch decorator:
#mock.patch('builtins.open', unittest.mock.mock_open(read_data="acid burn and zero cooooooool"))
class test_TheClassUnderTest(unittest.TestCase):
def test_MakeSureAMethodWorks(self):
# ...
self.assertEquals(object.a_method(), expected_value)
I've tried setting the return values of the mock returned from open():
open().read.return_value = "custom data for the test"
open().readline.return_value = "custom data for the test"
but it's not working. Is there a way to set the value of read_data after the mock_open() constructor has been called or, more generally, another way to do this?
You don't need to mock the .return_value for open().read. Use mock_open(read_data="fake data") is enough.
E.g.
main.py:
def main(file_path):
lines = []
with open(file_path) as f:
for line in f:
lines.append(line)
return lines
test_main.py:
import unittest
from unittest.mock import mock_open, patch
from main import main
class TestMain(unittest.TestCase):
def test_main(self):
m = mock_open(read_data="custom data for the test\nsecond line")
with patch('builtins.open', m) as mocked_open:
actual = main('fake/file/path')
self.assertEqual(actual, ['custom data for the test\n', 'second line'])
mocked_open.assert_called_once_with('fake/file/path')
if __name__ == '__main__':
unittest.main()
test result:
⚡ coverage run /Users/dulin/workspace/github.com/mrdulin/python-codelab/src/stackoverflow/65533628/test_main.py && coverage report -m --include='./src/**'
.
----------------------------------------------------------------------
Ran 1 test in 0.010s
OK
Name Stmts Miss Cover Missing
-----------------------------------------------------------------------
src/stackoverflow/65533628/main.py 6 0 100%
src/stackoverflow/65533628/test_main.py 12 0 100%
-----------------------------------------------------------------------
TOTAL 18 0 100%
I known how I can test if an injected object was called with a specific argument. But in my case the injected object will create an object that object will create another object and I want to test if that last object was called with the right argument.
in the example below the question would be if c.dirve was called with 100 as argument:
class car:
def drive(self, distance):
print("so fast")
class car_shop:
def buy_car(self):
return car()
class shop_shop:
def buy_shop(self):
return car_shop()
class processor:
def __init__(self, sshop):
self.sshop = sshop
def run(self):
cshop = self.sshop.buy_shop()
c = cshop.buy_car()
c.drive(100)
def main():
sshop = shop_shop()
proc = processor(sshop)
proc.run()
if __name__ == "__main__":
main()
is there a way to test that?
Since this was requested here my approach for testing these objects:
import pytest
from unittest.mock import Mock
from object_returns_object_test_for_arguments import processor, shop_shop
#pytest.fixture
def mock_shop_shop():
return Mock(spec=shop_shop)
def test_processor_car_called_with_100(mock_shop_shop):
proc = processor(mock_shop_shop)
proc.run()
assert mock_shop_shop.car_shop.car.drive.assert_called_once_with(100)
assert mock_shop_shop.car_shop.car.drive.call_count == 1
If using just the code shown in the question, you only have to mock car.drive. This could be done for example this way:
from unittest import mock
from object_returns_object_test_for_arguments import processor, shop_shop
#mock.patch('object_returns_object_test_for_arguments.car.drive')
def test_processor_car_called_with_100(drive_mock):
proc = processor(shop_shop())
proc.run()
drive_mock.assert_called_once_with(100)
As I don't know your real code, you may have to mock more stuff.
As an aside: class names in Python are written upper-case, camelcase-style by default.
I have a situation which i could not find on stack-overflow:
some_module.py:
class SomeModule():
def a(): pass
def b(): pass
def c(): pass
#classmethod
def bring_them_altogether(cls):
cls.a()
cls.b()
cls.c()
I am writing a unittest and would like to test the method bring_them_altogether() by determining that methods a(), b() & c() are all called once.
In my testcase i have this:
test.py:
#patch('<...>.some_module.SomeModule', autospec=True)
def test_bring_them_altogether(self, mock_class):
mock_class.bring_them_altogether()
self.assertTrue(mock_class.called) # Returns me False
I want to know how to write a proper test case for bring_them_altogether() as i seem to be having some issues getting the output i want.
On that note, I want to know how to determine that a mocked-method has been called X - number of times.
Thank you for pointing me in the right direction
You should use patch.object to patch a, b, c methods of SomeModule class. Check if they are called or not after executing the bring_them_altogether method.
E.g.
some_module.py:
class SomeModule():
def a(): pass
def b(): pass
def c(): pass
#classmethod
def bring_them_altogether(cls):
cls.a()
cls.b()
cls.c()
test_some_module.py:
import unittest
from unittest.mock import patch
from some_module import SomeModule
class TestSomeModule(unittest.TestCase):
#patch.object(SomeModule, 'c')
#patch.object(SomeModule, 'b')
#patch.object(SomeModule, 'a')
def test_bring_them_altogether(self, mock_a, mock_b, mock_c):
SomeModule.bring_them_altogether()
mock_a.assert_called_once()
mock_b.assert_called_once()
mock_c.assert_called_once()
if __name__ == '__main__':
unittest.main()
unit test result with coverage report:
.
----------------------------------------------------------------------
Ran 1 test in 0.001s
OK
Name Stmts Miss Cover Missing
------------------------------------------------------------------------------
src/stackoverflow/58950420/some_module.py 8 0 100%
src/stackoverflow/58950420/test_some_module.py 13 0 100%
------------------------------------------------------------------------------
TOTAL 21 0 100%
I am trying to parametrize a test which is being generated from the
cmdline options in conftest.py.
#!/usr/bin/env python
import pytest
import test
def pytest_addoption(parser):
parser.addoption("--low", action="store", type=int, help="low")
parser.addoption("--high", action="store",type=int, help="high")
#pytest.fixture(scope="session", autouse=True)
def user(request):
return request.config.getoption("low")
#pytest.fixture(scope="session", autouse=True)
def rang(request):
return request.config.getoption("high")
#test_file.py
def data(low, high):
return list(range(low, high))
#pytest.mark.parametrize("num", data(10, 20))
def test(num):
assert num < 1000
I would like run a command like "pytest --low=10 --high=100 test_file.py". Code is working fine with #pytest.mark.parametrize("num", data(x, y)) for the range of values between x and y. I don't want to provide any values in parametrization other than low and high. If I code something like #pytest.mark.parametrize("num", data(low, high)), it throws an error. Is there any way I can get this parametrization work? I know code works when we generate list outside of a method. But I want to write a method for generating list and use that list inside parametrization.
Is there any way I can access these low and high cmdline options anywhere in the test_file.py ?
You can parametrize the test using the pytest_generate_tests hook. Withing the hook, you will have access to the command line args.
# conftest.py
def pytest_addoption(parser):
parser.addoption("--low", action="store", type=int, help="low")
parser.addoption("--high", action="store",type=int, help="high")
def pytest_generate_tests(metafunc):
if 'num' in metafunc.fixturenames:
lo = metafunc.config.getoption('low')
hi = metafunc.config.getoption('high')
metafunc.parametrize('num', range(lo, hi))
# test_file.py
def test_spam(num):
assert num
Another possibility is to access the args via pytest.config, although note that this is a deprecated feature that will be removed soon:
import pytest
def data():
lo = pytest.config.getoption('low')
hi = pytest.config.getoption('high')
return list(range(lo, hi))
#pytest.mark.parametrize('num', data())
def test_spam(num):
assert num