Home | History | Annotate | Download | only in common_lib
      1 #!/usr/bin/python
      2 #pylint: disable-msg=C0111
      3 """Unit Tests for autotest.client.common_lib.test"""
      4 
      5 __author__ = 'gps (at] google.com (Gregory P. Smith)'
      6 
      7 import json
      8 import tempfile
      9 import unittest
     10 import common
     11 from autotest_lib.client.common_lib import test
     12 from autotest_lib.client.common_lib.test_utils import mock
     13 from autotest_lib.client.common_lib import error as common_lib_error
     14 
     15 class TestTestCase(unittest.TestCase):
     16     class _neutered_base_test(test.base_test):
     17         """A child class of base_test to avoid calling the constructor."""
     18         def __init__(self, *args, **kwargs):
     19             class MockJob(object):
     20                 pass
     21             class MockProfilerManager(object):
     22                 def active(self):
     23                     return False
     24                 def present(self):
     25                     return True
     26             self.job = MockJob()
     27             self.job.default_profile_only = False
     28             self.job.profilers = MockProfilerManager()
     29             self.job.test_retry = 0
     30             self._new_keyval = False
     31             self.iteration = 0
     32             self.before_iteration_hooks = []
     33             self.after_iteration_hooks = []
     34 
     35 
     36     def setUp(self):
     37         self.god = mock.mock_god()
     38         self.test = self._neutered_base_test()
     39 
     40 
     41     def tearDown(self):
     42         self.god.unstub_all()
     43 
     44 
     45 
     46 class Test_base_test_execute(TestTestCase):
     47     # Test the various behaviors of the base_test.execute() method.
     48     def setUp(self):
     49         TestTestCase.setUp(self)
     50         self.god.stub_function(self.test, 'run_once_profiling')
     51         self.god.stub_function(self.test, 'postprocess')
     52         self.god.stub_function(self.test, 'process_failed_constraints')
     53 
     54 
     55     def test_call_run_once(self):
     56         # setup
     57         self.god.stub_function(self.test, 'drop_caches_between_iterations')
     58         self.god.stub_function(self.test, 'run_once')
     59         self.god.stub_function(self.test, 'postprocess_iteration')
     60         self.god.stub_function(self.test, 'analyze_perf_constraints')
     61         before_hook = self.god.create_mock_function('before_hook')
     62         after_hook = self.god.create_mock_function('after_hook')
     63         self.test.register_before_iteration_hook(before_hook)
     64         self.test.register_after_iteration_hook(after_hook)
     65 
     66         # tests the test._call_run_once implementation
     67         self.test.drop_caches_between_iterations.expect_call()
     68         before_hook.expect_call(self.test)
     69         self.test.run_once.expect_call(1, 2, arg='val')
     70         self.test.postprocess_iteration.expect_call()
     71         self.test.analyze_perf_constraints.expect_call([])
     72         after_hook.expect_call(self.test)
     73         self.test._call_run_once([], False, None, (1, 2), {'arg': 'val'})
     74         self.god.check_playback()
     75 
     76 
     77     def test_call_run_once_with_exception(self):
     78         # setup
     79         self.god.stub_function(self.test, 'drop_caches_between_iterations')
     80         self.god.stub_function(self.test, 'run_once')
     81         before_hook = self.god.create_mock_function('before_hook')
     82         after_hook = self.god.create_mock_function('after_hook')
     83         self.test.register_before_iteration_hook(before_hook)
     84         self.test.register_after_iteration_hook(after_hook)
     85         error = Exception('fail')
     86 
     87         # tests the test._call_run_once implementation
     88         self.test.drop_caches_between_iterations.expect_call()
     89         before_hook.expect_call(self.test)
     90         self.test.run_once.expect_call(1, 2, arg='val').and_raises(error)
     91         after_hook.expect_call(self.test)
     92         try:
     93             self.test._call_run_once([], False, None, (1, 2), {'arg': 'val'})
     94         except:
     95             pass
     96         self.god.check_playback()
     97 
     98 
     99     def _setup_failed_test_calls(self, fail_count, error):
    100         """
    101         Set up failed test calls for use with call_run_once_with_retry.
    102 
    103         @param fail_count: The amount of times to mock a failure.
    104         @param error: The error to raise while failing.
    105         """
    106         self.god.stub_function(self.test.job, 'record')
    107         self.god.stub_function(self.test, '_call_run_once')
    108         # tests the test._call_run_once implementation
    109         for run in xrange(0, fail_count):
    110             self.test._call_run_once.expect_call([], False, None, (1, 2),
    111                                                  {'arg': 'val'}).and_raises(
    112                                                           error)
    113             info_str = 'Run %s failed with %s' % (run, error)
    114             # On the final run we do not emit this message.
    115             if run != self.test.job.test_retry and isinstance(error,
    116                                                common_lib_error.TestFailRetry):
    117                 self.test.job.record.expect_call('INFO', None, None, info_str)
    118 
    119 
    120     def test_call_run_once_with_retry_exception(self):
    121         """
    122         Test call_run_once_with_retry duplicating a test that will always fail.
    123         """
    124         self.test.job.test_retry = 5
    125         self.god.stub_function(self.test, 'drop_caches_between_iterations')
    126         self.god.stub_function(self.test, 'run_once')
    127         before_hook = self.god.create_mock_function('before_hook')
    128         after_hook = self.god.create_mock_function('after_hook')
    129         self.test.register_before_iteration_hook(before_hook)
    130         self.test.register_after_iteration_hook(after_hook)
    131         error = common_lib_error.TestFailRetry('fail')
    132         self._setup_failed_test_calls(self.test.job.test_retry+1, error)
    133         try:
    134             self.test._call_run_once_with_retry([], False, None, (1, 2),
    135                                                 {'arg': 'val'})
    136         except Exception as err:
    137             if err != error:
    138                 raise
    139         self.god.check_playback()
    140 
    141 
    142     def test_call_run_once_with_retry_exception_unretryable(self):
    143         """
    144         Test call_run_once_with_retry duplicating a test that will always fail
    145         with a non-retryable exception.
    146         """
    147         self.test.job.test_retry = 5
    148         self.god.stub_function(self.test, 'drop_caches_between_iterations')
    149         self.god.stub_function(self.test, 'run_once')
    150         before_hook = self.god.create_mock_function('before_hook')
    151         after_hook = self.god.create_mock_function('after_hook')
    152         self.test.register_before_iteration_hook(before_hook)
    153         self.test.register_after_iteration_hook(after_hook)
    154         error = common_lib_error.TestFail('fail')
    155         self._setup_failed_test_calls(1, error)
    156         try:
    157             self.test._call_run_once_with_retry([], False, None, (1, 2),
    158                                                 {'arg': 'val'})
    159         except Exception as err:
    160             if err != error:
    161                 raise
    162         self.god.check_playback()
    163 
    164 
    165     def test_call_run_once_with_retry_exception_and_pass(self):
    166         """
    167         Test call_run_once_with_retry duplicating a test that fails at first
    168         and later passes.
    169         """
    170         # Stubbed out for the write_keyval call.
    171         self.test.outputdir = '/tmp'
    172         self.test.job._tap = None
    173 
    174         num_to_fail = 2
    175         self.test.job.test_retry = 5
    176         self.god.stub_function(self.test, 'drop_caches_between_iterations')
    177         self.god.stub_function(self.test, 'run_once')
    178         before_hook = self.god.create_mock_function('before_hook')
    179         after_hook = self.god.create_mock_function('after_hook')
    180         self.god.stub_function(self.test, '_call_run_once')
    181         self.test.register_before_iteration_hook(before_hook)
    182         self.test.register_after_iteration_hook(after_hook)
    183         self.god.stub_function(self.test.job, 'record')
    184         # tests the test._call_run_once implementation
    185         error = common_lib_error.TestFailRetry('fail')
    186         self._setup_failed_test_calls(num_to_fail, error)
    187         # Passing call
    188         self.test._call_run_once.expect_call([], False, None, (1, 2),
    189                                              {'arg': 'val'})
    190         self.test._call_run_once_with_retry([], False, None, (1, 2),
    191                                             {'arg': 'val'})
    192         self.god.check_playback()
    193 
    194 
    195     def _expect_call_run_once(self):
    196         self.test._call_run_once.expect_call((), False, None, (), {})
    197 
    198 
    199     def test_execute_test_length(self):
    200         # test that test_length overrides iterations and works.
    201         self.god.stub_function(self.test, '_call_run_once')
    202 
    203         self._expect_call_run_once()
    204         self._expect_call_run_once()
    205         self._expect_call_run_once()
    206         self.test.run_once_profiling.expect_call(None)
    207         self.test.postprocess.expect_call()
    208         self.test.process_failed_constraints.expect_call()
    209 
    210         fake_time = iter(xrange(4)).next
    211         self.test.execute(iterations=1, test_length=3, _get_time=fake_time)
    212         self.god.check_playback()
    213 
    214 
    215     def test_execute_iterations(self):
    216         # test that iterations works.
    217         self.god.stub_function(self.test, '_call_run_once')
    218 
    219         iterations = 2
    220         for _ in range(iterations):
    221             self._expect_call_run_once()
    222         self.test.run_once_profiling.expect_call(None)
    223         self.test.postprocess.expect_call()
    224         self.test.process_failed_constraints.expect_call()
    225 
    226         self.test.execute(iterations=iterations)
    227         self.god.check_playback()
    228 
    229 
    230     def _mock_calls_for_execute_no_iterations(self):
    231         self.test.run_once_profiling.expect_call(None)
    232         self.test.postprocess.expect_call()
    233         self.test.process_failed_constraints.expect_call()
    234 
    235 
    236     def test_execute_iteration_zero(self):
    237         # test that iterations=0 works.
    238         self._mock_calls_for_execute_no_iterations()
    239 
    240         self.test.execute(iterations=0)
    241         self.god.check_playback()
    242 
    243 
    244     def test_execute_profile_only(self):
    245         # test that profile_only=True works.
    246         self.god.stub_function(self.test, 'drop_caches_between_iterations')
    247         self.test.drop_caches_between_iterations.expect_call()
    248         self.test.run_once_profiling.expect_call(None)
    249         self.test.drop_caches_between_iterations.expect_call()
    250         self.test.run_once_profiling.expect_call(None)
    251         self.test.postprocess.expect_call()
    252         self.test.process_failed_constraints.expect_call()
    253         self.test.execute(profile_only=True, iterations=2)
    254         self.god.check_playback()
    255 
    256 
    257     def test_execute_default_profile_only(self):
    258         # test that profile_only=True works.
    259         self.god.stub_function(self.test, 'drop_caches_between_iterations')
    260         for _ in xrange(3):
    261             self.test.drop_caches_between_iterations.expect_call()
    262             self.test.run_once_profiling.expect_call(None)
    263         self.test.postprocess.expect_call()
    264         self.test.process_failed_constraints.expect_call()
    265         self.test.job.default_profile_only = True
    266         self.test.execute(iterations=3)
    267         self.god.check_playback()
    268 
    269 
    270     def test_execute_postprocess_profiled_false(self):
    271         # test that postprocess_profiled_run=False works
    272         self.god.stub_function(self.test, '_call_run_once')
    273 
    274         self.test._call_run_once.expect_call((), False, False, (), {})
    275         self.test.run_once_profiling.expect_call(False)
    276         self.test.postprocess.expect_call()
    277         self.test.process_failed_constraints.expect_call()
    278 
    279         self.test.execute(postprocess_profiled_run=False, iterations=1)
    280         self.god.check_playback()
    281 
    282 
    283     def test_execute_postprocess_profiled_true(self):
    284         # test that postprocess_profiled_run=True works
    285         self.god.stub_function(self.test, '_call_run_once')
    286 
    287         self.test._call_run_once.expect_call((), False, True, (), {})
    288         self.test.run_once_profiling.expect_call(True)
    289         self.test.postprocess.expect_call()
    290         self.test.process_failed_constraints.expect_call()
    291 
    292         self.test.execute(postprocess_profiled_run=True, iterations=1)
    293         self.god.check_playback()
    294 
    295 
    296     def test_output_single_perf_value(self):
    297         self.test.resultsdir = tempfile.mkdtemp()
    298 
    299         self.test.output_perf_value("Test", 1, units="ms", higher_is_better=True)
    300 
    301         f = open(self.test.resultsdir + "/results-chart.json")
    302         expected_result = {"Test": {"summary": {"units": "ms", "type": "scalar",
    303                            "value": 1, "improvement_direction": "up"}}}
    304         self.assertDictEqual(expected_result, json.loads(f.read()))
    305 
    306 
    307     def test_output_single_perf_value_twice(self):
    308         self.test.resultsdir = tempfile.mkdtemp()
    309 
    310         self.test.output_perf_value("Test", 1, units="ms", higher_is_better=True)
    311         self.test.output_perf_value("Test", 2, units="ms", higher_is_better=True)
    312 
    313         f = open(self.test.resultsdir + "/results-chart.json")
    314         expected_result = {"Test": {"summary": {"units": "ms",
    315                            "type": "list_of_scalar_values", "values": [1, 2],
    316                            "improvement_direction": "up"}}}
    317         self.assertDictEqual(expected_result, json.loads(f.read()))
    318 
    319 
    320     def test_output_single_perf_value_three_times(self):
    321         self.test.resultsdir = tempfile.mkdtemp()
    322 
    323         self.test.output_perf_value("Test", 1, units="ms",
    324                                     higher_is_better=True)
    325         self.test.output_perf_value("Test", 2, units="ms", higher_is_better=True)
    326         self.test.output_perf_value("Test", 3, units="ms", higher_is_better=True)
    327 
    328         f = open(self.test.resultsdir + "/results-chart.json")
    329         expected_result = {"Test": {"summary": {"units": "ms",
    330                            "type": "list_of_scalar_values", "values": [1, 2, 3],
    331                            "improvement_direction": "up"}}}
    332         self.assertDictEqual(expected_result, json.loads(f.read()))
    333 
    334 
    335     def test_output_list_perf_value(self):
    336         self.test.resultsdir = tempfile.mkdtemp()
    337 
    338         self.test.output_perf_value("Test", [1, 2, 3], units="ms",
    339                                     higher_is_better=False)
    340 
    341         f = open(self.test.resultsdir + "/results-chart.json")
    342         expected_result = {"Test": {"summary": {"units": "ms",
    343                            "type": "list_of_scalar_values", "values": [1, 2, 3],
    344                            "improvement_direction": "down"}}}
    345         self.assertDictEqual(expected_result, json.loads(f.read()))
    346 
    347 
    348     def test_output_single_then_list_perf_value(self):
    349         self.test.resultsdir = tempfile.mkdtemp()
    350         self.test.output_perf_value("Test", 1, units="ms",
    351                                     higher_is_better=False)
    352         self.test.output_perf_value("Test", [4, 3, 2], units="ms",
    353                                     higher_is_better=False)
    354         f = open(self.test.resultsdir + "/results-chart.json")
    355         expected_result = {"Test": {"summary": {"units": "ms",
    356                            "type": "list_of_scalar_values",
    357                            "values": [1, 4, 3, 2],
    358                            "improvement_direction": "down"}}}
    359         self.assertDictEqual(expected_result, json.loads(f.read()))
    360 
    361 
    362     def test_output_list_then_list_perf_value(self):
    363         self.test.resultsdir = tempfile.mkdtemp()
    364         self.test.output_perf_value("Test", [1, 2, 3], units="ms",
    365                                     higher_is_better=False)
    366         self.test.output_perf_value("Test", [4, 3, 2], units="ms",
    367                                     higher_is_better=False)
    368         f = open(self.test.resultsdir + "/results-chart.json")
    369         expected_result = {"Test": {"summary": {"units": "ms",
    370                            "type": "list_of_scalar_values",
    371                            "values": [1, 2, 3, 4, 3, 2],
    372                            "improvement_direction": "down"}}}
    373         self.assertDictEqual(expected_result, json.loads(f.read()))
    374 
    375 
    376     def test_output_single_perf_value_input_string(self):
    377         self.test.resultsdir = tempfile.mkdtemp()
    378 
    379         self.test.output_perf_value("Test", u'-0.34', units="ms",
    380                                     higher_is_better=True)
    381 
    382         f = open(self.test.resultsdir + "/results-chart.json")
    383         expected_result = {"Test": {"summary": {"units": "ms", "type": "scalar",
    384                            "value": -0.34, "improvement_direction": "up"}}}
    385         self.assertDictEqual(expected_result, json.loads(f.read()))
    386 
    387 
    388     def test_output_single_perf_value_input_list_of_string(self):
    389         self.test.resultsdir = tempfile.mkdtemp()
    390 
    391         self.test.output_perf_value("Test", [0, u'-0.34', 1], units="ms",
    392                                     higher_is_better=True)
    393 
    394         f = open(self.test.resultsdir + "/results-chart.json")
    395         expected_result = {"Test": {"summary": {"units": "ms",
    396                            "type": "list_of_scalar_values",
    397                            "values": [0, -0.34, 1],
    398                            "improvement_direction": "up"}}}
    399         self.assertDictEqual(expected_result, json.loads(f.read()))
    400 
    401     def test_chart_supplied(self):
    402         self.test.resultsdir = tempfile.mkdtemp()
    403 
    404         test_data = [("tcp_tx", "ch006_mode11B_none", "BT_connected_but_not_streaming", 0),
    405                      ("tcp_tx", "ch006_mode11B_none", "BT_streaming_audiofile", 5),
    406                      ("tcp_tx", "ch006_mode11B_none", "BT_disconnected_again", 0),
    407                      ("tcp_rx", "ch006_mode11B_none", "BT_connected_but_not_streaming", 0),
    408                      ("tcp_rx", "ch006_mode11B_none", "BT_streaming_audiofile", 8),
    409                      ("tcp_rx", "ch006_mode11B_none", "BT_disconnected_again", 0),
    410                      ("udp_tx", "ch006_mode11B_none", "BT_connected_but_not_streaming", 0),
    411                      ("udp_tx", "ch006_mode11B_none", "BT_streaming_audiofile", 6),
    412                      ("udp_tx", "ch006_mode11B_none", "BT_disconnected_again", 0),
    413                      ("udp_rx", "ch006_mode11B_none", "BT_connected_but_not_streaming", 0),
    414                      ("udp_rx", "ch006_mode11B_none", "BT_streaming_audiofile", 8),
    415                      ("udp_rx", "ch006_mode11B_none", "BT_streaming_audiofile", 9),
    416                      ("udp_rx", "ch006_mode11B_none", "BT_disconnected_again", 0)]
    417 
    418 
    419         for (config_tag, ap_config_tag, bt_tag, drop) in test_data:
    420           self.test.output_perf_value(config_tag + '_' + bt_tag + '_drop',
    421                                       drop, units='percent_drop',
    422                                       higher_is_better=False,
    423                                       graph=ap_config_tag + '_drop')
    424         f = open(self.test.resultsdir + "/results-chart.json")
    425         expected_result = {
    426           "ch006_mode11B_none_drop": {
    427             "udp_tx_BT_streaming_audiofile_drop": {
    428               "units": "percent_drop",
    429               "type": "scalar",
    430               "value": 6.0,
    431               "improvement_direction": "down"
    432             },
    433             "udp_rx_BT_disconnected_again_drop": {
    434               "units": "percent_drop",
    435               "type": "scalar",
    436               "value": 0.0,
    437               "improvement_direction": "down"
    438             },
    439             "tcp_tx_BT_disconnected_again_drop": {
    440               "units": "percent_drop",
    441               "type": "scalar",
    442               "value": 0.0,
    443               "improvement_direction": "down"
    444             },
    445             "tcp_rx_BT_streaming_audiofile_drop": {
    446               "units": "percent_drop",
    447               "type": "scalar",
    448               "value": 8.0,
    449               "improvement_direction": "down"
    450             },
    451             "udp_tx_BT_connected_but_not_streaming_drop": {
    452               "units": "percent_drop",
    453               "type": "scalar",
    454               "value": 0.0,
    455               "improvement_direction": "down"
    456             },
    457             "tcp_tx_BT_connected_but_not_streaming_drop": {
    458               "units": "percent_drop",
    459               "type": "scalar",
    460               "value": 0.0,
    461               "improvement_direction": "down"
    462             },
    463             "udp_tx_BT_disconnected_again_drop": {
    464               "units": "percent_drop",
    465               "type": "scalar",
    466               "value": 0.0,
    467               "improvement_direction": "down"
    468             },
    469             "tcp_tx_BT_streaming_audiofile_drop": {
    470               "units": "percent_drop",
    471               "type": "scalar",
    472               "value": 5.0,
    473               "improvement_direction": "down"
    474             },
    475             "tcp_rx_BT_connected_but_not_streaming_drop": {
    476               "units": "percent_drop",
    477               "type": "scalar",
    478               "value": 0.0,
    479               "improvement_direction": "down"
    480             },
    481             "udp_rx_BT_connected_but_not_streaming_drop": {
    482               "units": "percent_drop",
    483               "type": "scalar",
    484               "value": 0.0,
    485               "improvement_direction": "down"
    486             },
    487             "udp_rx_BT_streaming_audiofile_drop": {
    488               "units": "percent_drop",
    489               "type": "list_of_scalar_values",
    490               "values": [
    491                 8.0,
    492                 9.0
    493               ],
    494               "improvement_direction": "down"
    495             },
    496             "tcp_rx_BT_disconnected_again_drop": {
    497               "units": "percent_drop",
    498               "type": "scalar",
    499               "value": 0.0,
    500               "improvement_direction": "down"
    501             }
    502           }
    503         }
    504         self.maxDiff = None
    505         self.assertDictEqual(expected_result, json.loads(f.read()))
    506 
    507 if __name__ == '__main__':
    508     unittest.main()
    509