Home | History | Annotate | Download | only in common_lib
      1 #!/usr/bin/python
      2 #pylint: disable-msg=C0111
      3 """Unit Tests for autotest.client.common_lib.test"""
      4 
      5 __author__ = 'gps (at] google.com (Gregory P. Smith)'
      6 
      7 import json
      8 import tempfile
      9 import unittest
     10 import common
     11 from autotest_lib.client.common_lib import test
     12 from autotest_lib.client.common_lib.test_utils import mock
     13 
     14 class TestTestCase(unittest.TestCase):
     15     class _neutered_base_test(test.base_test):
     16         """A child class of base_test to avoid calling the constructor."""
     17         def __init__(self, *args, **kwargs):
     18             class MockJob(object):
     19                 pass
     20             class MockProfilerManager(object):
     21                 def active(self):
     22                     return False
     23                 def present(self):
     24                     return True
     25             self.job = MockJob()
     26             self.job.default_profile_only = False
     27             self.job.profilers = MockProfilerManager()
     28             self.job.test_retry = 0
     29             self.job.fast = False
     30             self._new_keyval = False
     31             self.iteration = 0
     32             self.tagged_testname = 'neutered_base_test'
     33             self.before_iteration_hooks = []
     34             self.after_iteration_hooks = []
     35 
     36 
     37     def setUp(self):
     38         self.god = mock.mock_god()
     39         self.test = self._neutered_base_test()
     40 
     41 
     42     def tearDown(self):
     43         self.god.unstub_all()
     44 
     45 
     46 
     47 class Test_base_test_execute(TestTestCase):
     48     # Test the various behaviors of the base_test.execute() method.
     49     def setUp(self):
     50         TestTestCase.setUp(self)
     51         self.god.stub_function(self.test, 'run_once_profiling')
     52         self.god.stub_function(self.test, 'postprocess')
     53         self.god.stub_function(self.test, 'process_failed_constraints')
     54 
     55 
     56     def test_call_run_once(self):
     57         # setup
     58         self.god.stub_function(self.test, 'drop_caches_between_iterations')
     59         self.god.stub_function(self.test, 'run_once')
     60         self.god.stub_function(self.test, 'postprocess_iteration')
     61         self.god.stub_function(self.test, 'analyze_perf_constraints')
     62         before_hook = self.god.create_mock_function('before_hook')
     63         after_hook = self.god.create_mock_function('after_hook')
     64         self.test.register_before_iteration_hook(before_hook)
     65         self.test.register_after_iteration_hook(after_hook)
     66 
     67         # tests the test._call_run_once implementation
     68         self.test.drop_caches_between_iterations.expect_call()
     69         before_hook.expect_call(self.test)
     70         self.test.run_once.expect_call(1, 2, arg='val')
     71         self.test.postprocess_iteration.expect_call()
     72         self.test.analyze_perf_constraints.expect_call([])
     73         after_hook.expect_call(self.test)
     74         self.test._call_run_once([], False, None, (1, 2), {'arg': 'val'})
     75         self.god.check_playback()
     76 
     77 
     78     def test_call_run_once_with_exception(self):
     79         # setup
     80         self.god.stub_function(self.test, 'drop_caches_between_iterations')
     81         self.god.stub_function(self.test, 'run_once')
     82         before_hook = self.god.create_mock_function('before_hook')
     83         after_hook = self.god.create_mock_function('after_hook')
     84         self.test.register_before_iteration_hook(before_hook)
     85         self.test.register_after_iteration_hook(after_hook)
     86         error = Exception('fail')
     87 
     88         # tests the test._call_run_once implementation
     89         self.test.drop_caches_between_iterations.expect_call()
     90         before_hook.expect_call(self.test)
     91         self.test.run_once.expect_call(1, 2, arg='val').and_raises(error)
     92         after_hook.expect_call(self.test)
     93         try:
     94             self.test._call_run_once([], False, None, (1, 2), {'arg': 'val'})
     95         except:
     96             pass
     97         self.god.check_playback()
     98 
     99 
    100     def _expect_call_run_once(self):
    101         self.test._call_run_once.expect_call((), False, None, (), {})
    102 
    103 
    104     def test_execute_test_length(self):
    105         # test that test_length overrides iterations and works.
    106         self.god.stub_function(self.test, '_call_run_once')
    107 
    108         self._expect_call_run_once()
    109         self._expect_call_run_once()
    110         self._expect_call_run_once()
    111         self.test.run_once_profiling.expect_call(None)
    112         self.test.postprocess.expect_call()
    113         self.test.process_failed_constraints.expect_call()
    114 
    115         fake_time = iter(xrange(4)).next
    116         self.test.execute(iterations=1, test_length=3, _get_time=fake_time)
    117         self.god.check_playback()
    118 
    119 
    120     def test_execute_iterations(self):
    121         # test that iterations works.
    122         self.god.stub_function(self.test, '_call_run_once')
    123 
    124         iterations = 2
    125         for _ in range(iterations):
    126             self._expect_call_run_once()
    127         self.test.run_once_profiling.expect_call(None)
    128         self.test.postprocess.expect_call()
    129         self.test.process_failed_constraints.expect_call()
    130 
    131         self.test.execute(iterations=iterations)
    132         self.god.check_playback()
    133 
    134 
    135     def _mock_calls_for_execute_no_iterations(self):
    136         self.test.run_once_profiling.expect_call(None)
    137         self.test.postprocess.expect_call()
    138         self.test.process_failed_constraints.expect_call()
    139 
    140 
    141     def test_execute_iteration_zero(self):
    142         # test that iterations=0 works.
    143         self._mock_calls_for_execute_no_iterations()
    144 
    145         self.test.execute(iterations=0)
    146         self.god.check_playback()
    147 
    148 
    149     def test_execute_profile_only(self):
    150         # test that profile_only=True works.
    151         self.god.stub_function(self.test, 'drop_caches_between_iterations')
    152         self.test.drop_caches_between_iterations.expect_call()
    153         self.test.run_once_profiling.expect_call(None)
    154         self.test.drop_caches_between_iterations.expect_call()
    155         self.test.run_once_profiling.expect_call(None)
    156         self.test.postprocess.expect_call()
    157         self.test.process_failed_constraints.expect_call()
    158         self.test.execute(profile_only=True, iterations=2)
    159         self.god.check_playback()
    160 
    161 
    162     def test_execute_default_profile_only(self):
    163         # test that profile_only=True works.
    164         self.god.stub_function(self.test, 'drop_caches_between_iterations')
    165         for _ in xrange(3):
    166             self.test.drop_caches_between_iterations.expect_call()
    167             self.test.run_once_profiling.expect_call(None)
    168         self.test.postprocess.expect_call()
    169         self.test.process_failed_constraints.expect_call()
    170         self.test.job.default_profile_only = True
    171         self.test.execute(iterations=3)
    172         self.god.check_playback()
    173 
    174 
    175     def test_execute_postprocess_profiled_false(self):
    176         # test that postprocess_profiled_run=False works
    177         self.god.stub_function(self.test, '_call_run_once')
    178 
    179         self.test._call_run_once.expect_call((), False, False, (), {})
    180         self.test.run_once_profiling.expect_call(False)
    181         self.test.postprocess.expect_call()
    182         self.test.process_failed_constraints.expect_call()
    183 
    184         self.test.execute(postprocess_profiled_run=False, iterations=1)
    185         self.god.check_playback()
    186 
    187 
    188     def test_execute_postprocess_profiled_true(self):
    189         # test that postprocess_profiled_run=True works
    190         self.god.stub_function(self.test, '_call_run_once')
    191 
    192         self.test._call_run_once.expect_call((), False, True, (), {})
    193         self.test.run_once_profiling.expect_call(True)
    194         self.test.postprocess.expect_call()
    195         self.test.process_failed_constraints.expect_call()
    196 
    197         self.test.execute(postprocess_profiled_run=True, iterations=1)
    198         self.god.check_playback()
    199 
    200 
    201     def test_output_single_perf_value(self):
    202         self.test.resultsdir = tempfile.mkdtemp()
    203 
    204         self.test.output_perf_value("Test", 1, units="ms", higher_is_better=True)
    205 
    206         f = open(self.test.resultsdir + "/results-chart.json")
    207         expected_result = {"Test": {"summary": {"units": "ms", "type": "scalar",
    208                            "value": 1, "improvement_direction": "up"}}}
    209         self.assertDictEqual(expected_result, json.loads(f.read()))
    210 
    211 
    212     def test_output_single_perf_value_twice(self):
    213         self.test.resultsdir = tempfile.mkdtemp()
    214 
    215         self.test.output_perf_value("Test", 1, units="ms", higher_is_better=True)
    216         self.test.output_perf_value("Test", 2, units="ms", higher_is_better=True)
    217 
    218         f = open(self.test.resultsdir + "/results-chart.json")
    219         expected_result = {"Test": {"summary": {"units": "ms",
    220                            "type": "list_of_scalar_values", "values": [1, 2],
    221                            "improvement_direction": "up"}}}
    222         self.assertDictEqual(expected_result, json.loads(f.read()))
    223 
    224 
    225     def test_output_single_perf_value_three_times(self):
    226         self.test.resultsdir = tempfile.mkdtemp()
    227 
    228         self.test.output_perf_value("Test", 1, units="ms",
    229                                     higher_is_better=True)
    230         self.test.output_perf_value("Test", 2, units="ms", higher_is_better=True)
    231         self.test.output_perf_value("Test", 3, units="ms", higher_is_better=True)
    232 
    233         f = open(self.test.resultsdir + "/results-chart.json")
    234         expected_result = {"Test": {"summary": {"units": "ms",
    235                            "type": "list_of_scalar_values", "values": [1, 2, 3],
    236                            "improvement_direction": "up"}}}
    237         self.assertDictEqual(expected_result, json.loads(f.read()))
    238 
    239 
    240     def test_output_list_perf_value(self):
    241         self.test.resultsdir = tempfile.mkdtemp()
    242 
    243         self.test.output_perf_value("Test", [1, 2, 3], units="ms",
    244                                     higher_is_better=False)
    245 
    246         f = open(self.test.resultsdir + "/results-chart.json")
    247         expected_result = {"Test": {"summary": {"units": "ms",
    248                            "type": "list_of_scalar_values", "values": [1, 2, 3],
    249                            "improvement_direction": "down"}}}
    250         self.assertDictEqual(expected_result, json.loads(f.read()))
    251 
    252 
    253     def test_output_single_then_list_perf_value(self):
    254         self.test.resultsdir = tempfile.mkdtemp()
    255         self.test.output_perf_value("Test", 1, units="ms",
    256                                     higher_is_better=False)
    257         self.test.output_perf_value("Test", [4, 3, 2], units="ms",
    258                                     higher_is_better=False)
    259         f = open(self.test.resultsdir + "/results-chart.json")
    260         expected_result = {"Test": {"summary": {"units": "ms",
    261                            "type": "list_of_scalar_values",
    262                            "values": [1, 4, 3, 2],
    263                            "improvement_direction": "down"}}}
    264         self.assertDictEqual(expected_result, json.loads(f.read()))
    265 
    266 
    267     def test_output_list_then_list_perf_value(self):
    268         self.test.resultsdir = tempfile.mkdtemp()
    269         self.test.output_perf_value("Test", [1, 2, 3], units="ms",
    270                                     higher_is_better=False)
    271         self.test.output_perf_value("Test", [4, 3, 2], units="ms",
    272                                     higher_is_better=False)
    273         f = open(self.test.resultsdir + "/results-chart.json")
    274         expected_result = {"Test": {"summary": {"units": "ms",
    275                            "type": "list_of_scalar_values",
    276                            "values": [1, 2, 3, 4, 3, 2],
    277                            "improvement_direction": "down"}}}
    278         self.assertDictEqual(expected_result, json.loads(f.read()))
    279 
    280 
    281     def test_output_single_perf_value_input_string(self):
    282         self.test.resultsdir = tempfile.mkdtemp()
    283 
    284         self.test.output_perf_value("Test", u'-0.34', units="ms",
    285                                     higher_is_better=True)
    286 
    287         f = open(self.test.resultsdir + "/results-chart.json")
    288         expected_result = {"Test": {"summary": {"units": "ms", "type": "scalar",
    289                            "value": -0.34, "improvement_direction": "up"}}}
    290         self.assertDictEqual(expected_result, json.loads(f.read()))
    291 
    292 
    293     def test_output_single_perf_value_input_list_of_string(self):
    294         self.test.resultsdir = tempfile.mkdtemp()
    295 
    296         self.test.output_perf_value("Test", [0, u'-0.34', 1], units="ms",
    297                                     higher_is_better=True)
    298 
    299         f = open(self.test.resultsdir + "/results-chart.json")
    300         expected_result = {"Test": {"summary": {"units": "ms",
    301                            "type": "list_of_scalar_values",
    302                            "values": [0, -0.34, 1],
    303                            "improvement_direction": "up"}}}
    304         self.assertDictEqual(expected_result, json.loads(f.read()))
    305 
    306     def test_output_list_then_replace_list_perf_value(self):
    307         self.test.resultsdir = tempfile.mkdtemp()
    308         self.test.output_perf_value("Test", [1, 2, 3], units="ms",
    309                                     higher_is_better=False)
    310         self.test.output_perf_value("Test", [4, 5, 6], units="ms",
    311                                     higher_is_better=False,
    312                                     replace_existing_values=True)
    313         f = open(self.test.resultsdir + "/results-chart.json")
    314         expected_result = {"Test": {"summary": {"units": "ms",
    315                            "type": "list_of_scalar_values",
    316                            "values": [4, 5, 6],
    317                            "improvement_direction": "down"}}}
    318         self.assertDictEqual(expected_result, json.loads(f.read()))
    319 
    320     def test_output_single_then_replace_list_perf_value(self):
    321         self.test.resultsdir = tempfile.mkdtemp()
    322         self.test.output_perf_value("Test", 3, units="ms",
    323                                     higher_is_better=False)
    324         self.test.output_perf_value("Test", [4, 5, 6], units="ms",
    325                                     higher_is_better=False,
    326                                     replace_existing_values=True)
    327         f = open(self.test.resultsdir + "/results-chart.json")
    328         expected_result = {"Test": {"summary": {"units": "ms",
    329                            "type": "list_of_scalar_values",
    330                            "values": [4, 5, 6],
    331                            "improvement_direction": "down"}}}
    332         self.assertDictEqual(expected_result, json.loads(f.read()))
    333 
    334     def test_output_list_then_replace_single_perf_value(self):
    335         self.test.resultsdir = tempfile.mkdtemp()
    336         self.test.output_perf_value("Test", [1,2,3], units="ms",
    337                                     higher_is_better=False)
    338         self.test.output_perf_value("Test", 4, units="ms",
    339                                     higher_is_better=False,
    340                                     replace_existing_values=True)
    341         f = open(self.test.resultsdir + "/results-chart.json")
    342         expected_result = {"Test": {"summary": {"units": "ms",
    343                            "type": "scalar",
    344                            "value": 4,
    345                            "improvement_direction": "down"}}}
    346         self.assertDictEqual(expected_result, json.loads(f.read()))
    347 
    348     def test_output_single_then_replace_single_perf_value(self):
    349         self.test.resultsdir = tempfile.mkdtemp()
    350         self.test.output_perf_value("Test", 1, units="ms",
    351                                     higher_is_better=False)
    352         self.test.output_perf_value("Test", 2, units="ms",
    353                                     higher_is_better=False,
    354                                     replace_existing_values=True)
    355         f = open(self.test.resultsdir + "/results-chart.json")
    356         expected_result = {"Test": {"summary": {"units": "ms",
    357                            "type": "scalar",
    358                            "value": 2,
    359                            "improvement_direction": "down"}}}
    360         self.assertDictEqual(expected_result, json.loads(f.read()))
    361 
    362     def test_output_perf_then_replace_certain_perf_value(self):
    363         self.test.resultsdir = tempfile.mkdtemp()
    364         self.test.output_perf_value("Test1", 1, units="ms",
    365                                     higher_is_better=False)
    366         self.test.output_perf_value("Test2", 2, units="ms",
    367                                     higher_is_better=False)
    368         self.test.output_perf_value("Test3", 3, units="ms",
    369                                     higher_is_better=False)
    370         self.test.output_perf_value("Test2", -1, units="ms",
    371                                     higher_is_better=False,
    372                                     replace_existing_values=True)
    373         f = open(self.test.resultsdir + "/results-chart.json")
    374         expected_result = {"Test1": {"summary":
    375                                        {"units": "ms",
    376                                         "type": "scalar",
    377                                         "value": 1,
    378                                         "improvement_direction": "down"}},
    379                            "Test2": {"summary":
    380                                        {"units": "ms",
    381                                         "type": "scalar",
    382                                         "value": -1,
    383                                         "improvement_direction": "down"}},
    384                            "Test3": {"summary":
    385                                        {"units": "ms",
    386                                         "type": "scalar",
    387                                         "value": 3,
    388                                         "improvement_direction": "down"}}}
    389         self.assertDictEqual(expected_result, json.loads(f.read()))
    390 
    391     def test_chart_supplied(self):
    392         self.test.resultsdir = tempfile.mkdtemp()
    393 
    394         test_data = [("tcp_tx", "ch006_mode11B_none", "BT_connected_but_not_streaming", 0),
    395                      ("tcp_tx", "ch006_mode11B_none", "BT_streaming_audiofile", 5),
    396                      ("tcp_tx", "ch006_mode11B_none", "BT_disconnected_again", 0),
    397                      ("tcp_rx", "ch006_mode11B_none", "BT_connected_but_not_streaming", 0),
    398                      ("tcp_rx", "ch006_mode11B_none", "BT_streaming_audiofile", 8),
    399                      ("tcp_rx", "ch006_mode11B_none", "BT_disconnected_again", 0),
    400                      ("udp_tx", "ch006_mode11B_none", "BT_connected_but_not_streaming", 0),
    401                      ("udp_tx", "ch006_mode11B_none", "BT_streaming_audiofile", 6),
    402                      ("udp_tx", "ch006_mode11B_none", "BT_disconnected_again", 0),
    403                      ("udp_rx", "ch006_mode11B_none", "BT_connected_but_not_streaming", 0),
    404                      ("udp_rx", "ch006_mode11B_none", "BT_streaming_audiofile", 8),
    405                      ("udp_rx", "ch006_mode11B_none", "BT_streaming_audiofile", 9),
    406                      ("udp_rx", "ch006_mode11B_none", "BT_disconnected_again", 0)]
    407 
    408 
    409         for (config_tag, ap_config_tag, bt_tag, drop) in test_data:
    410           self.test.output_perf_value(config_tag + '_' + bt_tag + '_drop',
    411                                       drop, units='percent_drop',
    412                                       higher_is_better=False,
    413                                       graph=ap_config_tag + '_drop')
    414         f = open(self.test.resultsdir + "/results-chart.json")
    415         expected_result = {
    416           "ch006_mode11B_none_drop": {
    417             "udp_tx_BT_streaming_audiofile_drop": {
    418               "units": "percent_drop",
    419               "type": "scalar",
    420               "value": 6.0,
    421               "improvement_direction": "down"
    422             },
    423             "udp_rx_BT_disconnected_again_drop": {
    424               "units": "percent_drop",
    425               "type": "scalar",
    426               "value": 0.0,
    427               "improvement_direction": "down"
    428             },
    429             "tcp_tx_BT_disconnected_again_drop": {
    430               "units": "percent_drop",
    431               "type": "scalar",
    432               "value": 0.0,
    433               "improvement_direction": "down"
    434             },
    435             "tcp_rx_BT_streaming_audiofile_drop": {
    436               "units": "percent_drop",
    437               "type": "scalar",
    438               "value": 8.0,
    439               "improvement_direction": "down"
    440             },
    441             "udp_tx_BT_connected_but_not_streaming_drop": {
    442               "units": "percent_drop",
    443               "type": "scalar",
    444               "value": 0.0,
    445               "improvement_direction": "down"
    446             },
    447             "tcp_tx_BT_connected_but_not_streaming_drop": {
    448               "units": "percent_drop",
    449               "type": "scalar",
    450               "value": 0.0,
    451               "improvement_direction": "down"
    452             },
    453             "udp_tx_BT_disconnected_again_drop": {
    454               "units": "percent_drop",
    455               "type": "scalar",
    456               "value": 0.0,
    457               "improvement_direction": "down"
    458             },
    459             "tcp_tx_BT_streaming_audiofile_drop": {
    460               "units": "percent_drop",
    461               "type": "scalar",
    462               "value": 5.0,
    463               "improvement_direction": "down"
    464             },
    465             "tcp_rx_BT_connected_but_not_streaming_drop": {
    466               "units": "percent_drop",
    467               "type": "scalar",
    468               "value": 0.0,
    469               "improvement_direction": "down"
    470             },
    471             "udp_rx_BT_connected_but_not_streaming_drop": {
    472               "units": "percent_drop",
    473               "type": "scalar",
    474               "value": 0.0,
    475               "improvement_direction": "down"
    476             },
    477             "udp_rx_BT_streaming_audiofile_drop": {
    478               "units": "percent_drop",
    479               "type": "list_of_scalar_values",
    480               "values": [
    481                 8.0,
    482                 9.0
    483               ],
    484               "improvement_direction": "down"
    485             },
    486             "tcp_rx_BT_disconnected_again_drop": {
    487               "units": "percent_drop",
    488               "type": "scalar",
    489               "value": 0.0,
    490               "improvement_direction": "down"
    491             }
    492           }
    493         }
    494         self.maxDiff = None
    495         self.assertDictEqual(expected_result, json.loads(f.read()))
    496 
    497 if __name__ == '__main__':
    498     unittest.main()
    499