1 #!/usr/bin/python 2 #pylint: disable-msg=C0111 3 """Unit Tests for autotest.client.common_lib.test""" 4 5 __author__ = 'gps (at] google.com (Gregory P. Smith)' 6 7 import json 8 import tempfile 9 import unittest 10 import common 11 from autotest_lib.client.common_lib import test 12 from autotest_lib.client.common_lib.test_utils import mock 13 from autotest_lib.client.common_lib import error as common_lib_error 14 15 class TestTestCase(unittest.TestCase): 16 class _neutered_base_test(test.base_test): 17 """A child class of base_test to avoid calling the constructor.""" 18 def __init__(self, *args, **kwargs): 19 class MockJob(object): 20 pass 21 class MockProfilerManager(object): 22 def active(self): 23 return False 24 def present(self): 25 return True 26 self.job = MockJob() 27 self.job.default_profile_only = False 28 self.job.profilers = MockProfilerManager() 29 self.job.test_retry = 0 30 self.job.fast = False 31 self._new_keyval = False 32 self.iteration = 0 33 self.tagged_testname = 'neutered_base_test' 34 self.before_iteration_hooks = [] 35 self.after_iteration_hooks = [] 36 37 38 def setUp(self): 39 self.god = mock.mock_god() 40 self.test = self._neutered_base_test() 41 42 43 def tearDown(self): 44 self.god.unstub_all() 45 46 47 48 class Test_base_test_execute(TestTestCase): 49 # Test the various behaviors of the base_test.execute() method. 50 def setUp(self): 51 TestTestCase.setUp(self) 52 self.god.stub_function(self.test, 'run_once_profiling') 53 self.god.stub_function(self.test, 'postprocess') 54 self.god.stub_function(self.test, 'process_failed_constraints') 55 56 57 def test_call_run_once(self): 58 # setup 59 self.god.stub_function(self.test, 'drop_caches_between_iterations') 60 self.god.stub_function(self.test, 'run_once') 61 self.god.stub_function(self.test, 'postprocess_iteration') 62 self.god.stub_function(self.test, 'analyze_perf_constraints') 63 before_hook = self.god.create_mock_function('before_hook') 64 after_hook = self.god.create_mock_function('after_hook') 65 self.test.register_before_iteration_hook(before_hook) 66 self.test.register_after_iteration_hook(after_hook) 67 68 # tests the test._call_run_once implementation 69 self.test.drop_caches_between_iterations.expect_call() 70 before_hook.expect_call(self.test) 71 self.test.run_once.expect_call(1, 2, arg='val') 72 self.test.postprocess_iteration.expect_call() 73 self.test.analyze_perf_constraints.expect_call([]) 74 after_hook.expect_call(self.test) 75 self.test._call_run_once([], False, None, (1, 2), {'arg': 'val'}) 76 self.god.check_playback() 77 78 79 def test_call_run_once_with_exception(self): 80 # setup 81 self.god.stub_function(self.test, 'drop_caches_between_iterations') 82 self.god.stub_function(self.test, 'run_once') 83 before_hook = self.god.create_mock_function('before_hook') 84 after_hook = self.god.create_mock_function('after_hook') 85 self.test.register_before_iteration_hook(before_hook) 86 self.test.register_after_iteration_hook(after_hook) 87 error = Exception('fail') 88 89 # tests the test._call_run_once implementation 90 self.test.drop_caches_between_iterations.expect_call() 91 before_hook.expect_call(self.test) 92 self.test.run_once.expect_call(1, 2, arg='val').and_raises(error) 93 after_hook.expect_call(self.test) 94 try: 95 self.test._call_run_once([], False, None, (1, 2), {'arg': 'val'}) 96 except: 97 pass 98 self.god.check_playback() 99 100 101 def _setup_failed_test_calls(self, fail_count, error): 102 """ 103 Set up failed test calls for use with call_run_once_with_retry. 104 105 @param fail_count: The amount of times to mock a failure. 106 @param error: The error to raise while failing. 107 """ 108 self.god.stub_function(self.test.job, 'record') 109 self.god.stub_function(self.test, '_call_run_once') 110 # tests the test._call_run_once implementation 111 for run in xrange(0, fail_count): 112 self.test._call_run_once.expect_call([], False, None, (1, 2), 113 {'arg': 'val'}).and_raises( 114 error) 115 info_str = 'Run %s failed with %s' % (run, error) 116 # On the final run we do not emit this message. 117 if run != self.test.job.test_retry and isinstance(error, 118 common_lib_error.TestFailRetry): 119 self.test.job.record.expect_call('INFO', None, None, info_str) 120 121 122 def test_call_run_once_with_retry_exception(self): 123 """ 124 Test call_run_once_with_retry duplicating a test that will always fail. 125 """ 126 self.test.job.test_retry = 5 127 self.god.stub_function(self.test, 'drop_caches_between_iterations') 128 self.god.stub_function(self.test, 'run_once') 129 before_hook = self.god.create_mock_function('before_hook') 130 after_hook = self.god.create_mock_function('after_hook') 131 self.test.register_before_iteration_hook(before_hook) 132 self.test.register_after_iteration_hook(after_hook) 133 error = common_lib_error.TestFailRetry('fail') 134 self._setup_failed_test_calls(self.test.job.test_retry+1, error) 135 try: 136 self.test._call_run_once_with_retry([], False, None, (1, 2), 137 {'arg': 'val'}) 138 except Exception as err: 139 if err != error: 140 raise 141 self.god.check_playback() 142 143 144 def test_call_run_once_with_retry_exception_unretryable(self): 145 """ 146 Test call_run_once_with_retry duplicating a test that will always fail 147 with a non-retryable exception. 148 """ 149 self.test.job.test_retry = 5 150 self.god.stub_function(self.test, 'drop_caches_between_iterations') 151 self.god.stub_function(self.test, 'run_once') 152 before_hook = self.god.create_mock_function('before_hook') 153 after_hook = self.god.create_mock_function('after_hook') 154 self.test.register_before_iteration_hook(before_hook) 155 self.test.register_after_iteration_hook(after_hook) 156 error = common_lib_error.TestFail('fail') 157 self._setup_failed_test_calls(1, error) 158 try: 159 self.test._call_run_once_with_retry([], False, None, (1, 2), 160 {'arg': 'val'}) 161 except Exception as err: 162 if err != error: 163 raise 164 self.god.check_playback() 165 166 167 def test_call_run_once_with_retry_exception_and_pass(self): 168 """ 169 Test call_run_once_with_retry duplicating a test that fails at first 170 and later passes. 171 """ 172 # Stubbed out for the write_keyval call. 173 self.test.outputdir = '/tmp' 174 175 num_to_fail = 2 176 self.test.job.test_retry = 5 177 self.god.stub_function(self.test, 'drop_caches_between_iterations') 178 self.god.stub_function(self.test, 'run_once') 179 before_hook = self.god.create_mock_function('before_hook') 180 after_hook = self.god.create_mock_function('after_hook') 181 self.god.stub_function(self.test, '_call_run_once') 182 self.test.register_before_iteration_hook(before_hook) 183 self.test.register_after_iteration_hook(after_hook) 184 self.god.stub_function(self.test.job, 'record') 185 # tests the test._call_run_once implementation 186 error = common_lib_error.TestFailRetry('fail') 187 self._setup_failed_test_calls(num_to_fail, error) 188 # Passing call 189 self.test._call_run_once.expect_call([], False, None, (1, 2), 190 {'arg': 'val'}) 191 self.test._call_run_once_with_retry([], False, None, (1, 2), 192 {'arg': 'val'}) 193 self.god.check_playback() 194 195 196 def _expect_call_run_once(self): 197 self.test._call_run_once.expect_call((), False, None, (), {}) 198 199 200 def test_execute_test_length(self): 201 # test that test_length overrides iterations and works. 202 self.god.stub_function(self.test, '_call_run_once') 203 204 self._expect_call_run_once() 205 self._expect_call_run_once() 206 self._expect_call_run_once() 207 self.test.run_once_profiling.expect_call(None) 208 self.test.postprocess.expect_call() 209 self.test.process_failed_constraints.expect_call() 210 211 fake_time = iter(xrange(4)).next 212 self.test.execute(iterations=1, test_length=3, _get_time=fake_time) 213 self.god.check_playback() 214 215 216 def test_execute_iterations(self): 217 # test that iterations works. 218 self.god.stub_function(self.test, '_call_run_once') 219 220 iterations = 2 221 for _ in range(iterations): 222 self._expect_call_run_once() 223 self.test.run_once_profiling.expect_call(None) 224 self.test.postprocess.expect_call() 225 self.test.process_failed_constraints.expect_call() 226 227 self.test.execute(iterations=iterations) 228 self.god.check_playback() 229 230 231 def _mock_calls_for_execute_no_iterations(self): 232 self.test.run_once_profiling.expect_call(None) 233 self.test.postprocess.expect_call() 234 self.test.process_failed_constraints.expect_call() 235 236 237 def test_execute_iteration_zero(self): 238 # test that iterations=0 works. 239 self._mock_calls_for_execute_no_iterations() 240 241 self.test.execute(iterations=0) 242 self.god.check_playback() 243 244 245 def test_execute_profile_only(self): 246 # test that profile_only=True works. 247 self.god.stub_function(self.test, 'drop_caches_between_iterations') 248 self.test.drop_caches_between_iterations.expect_call() 249 self.test.run_once_profiling.expect_call(None) 250 self.test.drop_caches_between_iterations.expect_call() 251 self.test.run_once_profiling.expect_call(None) 252 self.test.postprocess.expect_call() 253 self.test.process_failed_constraints.expect_call() 254 self.test.execute(profile_only=True, iterations=2) 255 self.god.check_playback() 256 257 258 def test_execute_default_profile_only(self): 259 # test that profile_only=True works. 260 self.god.stub_function(self.test, 'drop_caches_between_iterations') 261 for _ in xrange(3): 262 self.test.drop_caches_between_iterations.expect_call() 263 self.test.run_once_profiling.expect_call(None) 264 self.test.postprocess.expect_call() 265 self.test.process_failed_constraints.expect_call() 266 self.test.job.default_profile_only = True 267 self.test.execute(iterations=3) 268 self.god.check_playback() 269 270 271 def test_execute_postprocess_profiled_false(self): 272 # test that postprocess_profiled_run=False works 273 self.god.stub_function(self.test, '_call_run_once') 274 275 self.test._call_run_once.expect_call((), False, False, (), {}) 276 self.test.run_once_profiling.expect_call(False) 277 self.test.postprocess.expect_call() 278 self.test.process_failed_constraints.expect_call() 279 280 self.test.execute(postprocess_profiled_run=False, iterations=1) 281 self.god.check_playback() 282 283 284 def test_execute_postprocess_profiled_true(self): 285 # test that postprocess_profiled_run=True works 286 self.god.stub_function(self.test, '_call_run_once') 287 288 self.test._call_run_once.expect_call((), False, True, (), {}) 289 self.test.run_once_profiling.expect_call(True) 290 self.test.postprocess.expect_call() 291 self.test.process_failed_constraints.expect_call() 292 293 self.test.execute(postprocess_profiled_run=True, iterations=1) 294 self.god.check_playback() 295 296 297 def test_output_single_perf_value(self): 298 self.test.resultsdir = tempfile.mkdtemp() 299 300 self.test.output_perf_value("Test", 1, units="ms", higher_is_better=True) 301 302 f = open(self.test.resultsdir + "/results-chart.json") 303 expected_result = {"Test": {"summary": {"units": "ms", "type": "scalar", 304 "value": 1, "improvement_direction": "up"}}} 305 self.assertDictEqual(expected_result, json.loads(f.read())) 306 307 308 def test_output_single_perf_value_twice(self): 309 self.test.resultsdir = tempfile.mkdtemp() 310 311 self.test.output_perf_value("Test", 1, units="ms", higher_is_better=True) 312 self.test.output_perf_value("Test", 2, units="ms", higher_is_better=True) 313 314 f = open(self.test.resultsdir + "/results-chart.json") 315 expected_result = {"Test": {"summary": {"units": "ms", 316 "type": "list_of_scalar_values", "values": [1, 2], 317 "improvement_direction": "up"}}} 318 self.assertDictEqual(expected_result, json.loads(f.read())) 319 320 321 def test_output_single_perf_value_three_times(self): 322 self.test.resultsdir = tempfile.mkdtemp() 323 324 self.test.output_perf_value("Test", 1, units="ms", 325 higher_is_better=True) 326 self.test.output_perf_value("Test", 2, units="ms", higher_is_better=True) 327 self.test.output_perf_value("Test", 3, units="ms", higher_is_better=True) 328 329 f = open(self.test.resultsdir + "/results-chart.json") 330 expected_result = {"Test": {"summary": {"units": "ms", 331 "type": "list_of_scalar_values", "values": [1, 2, 3], 332 "improvement_direction": "up"}}} 333 self.assertDictEqual(expected_result, json.loads(f.read())) 334 335 336 def test_output_list_perf_value(self): 337 self.test.resultsdir = tempfile.mkdtemp() 338 339 self.test.output_perf_value("Test", [1, 2, 3], units="ms", 340 higher_is_better=False) 341 342 f = open(self.test.resultsdir + "/results-chart.json") 343 expected_result = {"Test": {"summary": {"units": "ms", 344 "type": "list_of_scalar_values", "values": [1, 2, 3], 345 "improvement_direction": "down"}}} 346 self.assertDictEqual(expected_result, json.loads(f.read())) 347 348 349 def test_output_single_then_list_perf_value(self): 350 self.test.resultsdir = tempfile.mkdtemp() 351 self.test.output_perf_value("Test", 1, units="ms", 352 higher_is_better=False) 353 self.test.output_perf_value("Test", [4, 3, 2], units="ms", 354 higher_is_better=False) 355 f = open(self.test.resultsdir + "/results-chart.json") 356 expected_result = {"Test": {"summary": {"units": "ms", 357 "type": "list_of_scalar_values", 358 "values": [1, 4, 3, 2], 359 "improvement_direction": "down"}}} 360 self.assertDictEqual(expected_result, json.loads(f.read())) 361 362 363 def test_output_list_then_list_perf_value(self): 364 self.test.resultsdir = tempfile.mkdtemp() 365 self.test.output_perf_value("Test", [1, 2, 3], units="ms", 366 higher_is_better=False) 367 self.test.output_perf_value("Test", [4, 3, 2], units="ms", 368 higher_is_better=False) 369 f = open(self.test.resultsdir + "/results-chart.json") 370 expected_result = {"Test": {"summary": {"units": "ms", 371 "type": "list_of_scalar_values", 372 "values": [1, 2, 3, 4, 3, 2], 373 "improvement_direction": "down"}}} 374 self.assertDictEqual(expected_result, json.loads(f.read())) 375 376 377 def test_output_single_perf_value_input_string(self): 378 self.test.resultsdir = tempfile.mkdtemp() 379 380 self.test.output_perf_value("Test", u'-0.34', units="ms", 381 higher_is_better=True) 382 383 f = open(self.test.resultsdir + "/results-chart.json") 384 expected_result = {"Test": {"summary": {"units": "ms", "type": "scalar", 385 "value": -0.34, "improvement_direction": "up"}}} 386 self.assertDictEqual(expected_result, json.loads(f.read())) 387 388 389 def test_output_single_perf_value_input_list_of_string(self): 390 self.test.resultsdir = tempfile.mkdtemp() 391 392 self.test.output_perf_value("Test", [0, u'-0.34', 1], units="ms", 393 higher_is_better=True) 394 395 f = open(self.test.resultsdir + "/results-chart.json") 396 expected_result = {"Test": {"summary": {"units": "ms", 397 "type": "list_of_scalar_values", 398 "values": [0, -0.34, 1], 399 "improvement_direction": "up"}}} 400 self.assertDictEqual(expected_result, json.loads(f.read())) 401 402 def test_output_list_then_replace_list_perf_value(self): 403 self.test.resultsdir = tempfile.mkdtemp() 404 self.test.output_perf_value("Test", [1, 2, 3], units="ms", 405 higher_is_better=False) 406 self.test.output_perf_value("Test", [4, 5, 6], units="ms", 407 higher_is_better=False, 408 replace_existing_values=True) 409 f = open(self.test.resultsdir + "/results-chart.json") 410 expected_result = {"Test": {"summary": {"units": "ms", 411 "type": "list_of_scalar_values", 412 "values": [4, 5, 6], 413 "improvement_direction": "down"}}} 414 self.assertDictEqual(expected_result, json.loads(f.read())) 415 416 def test_output_single_then_replace_list_perf_value(self): 417 self.test.resultsdir = tempfile.mkdtemp() 418 self.test.output_perf_value("Test", 3, units="ms", 419 higher_is_better=False) 420 self.test.output_perf_value("Test", [4, 5, 6], units="ms", 421 higher_is_better=False, 422 replace_existing_values=True) 423 f = open(self.test.resultsdir + "/results-chart.json") 424 expected_result = {"Test": {"summary": {"units": "ms", 425 "type": "list_of_scalar_values", 426 "values": [4, 5, 6], 427 "improvement_direction": "down"}}} 428 self.assertDictEqual(expected_result, json.loads(f.read())) 429 430 def test_output_list_then_replace_single_perf_value(self): 431 self.test.resultsdir = tempfile.mkdtemp() 432 self.test.output_perf_value("Test", [1,2,3], units="ms", 433 higher_is_better=False) 434 self.test.output_perf_value("Test", 4, units="ms", 435 higher_is_better=False, 436 replace_existing_values=True) 437 f = open(self.test.resultsdir + "/results-chart.json") 438 expected_result = {"Test": {"summary": {"units": "ms", 439 "type": "scalar", 440 "value": 4, 441 "improvement_direction": "down"}}} 442 self.assertDictEqual(expected_result, json.loads(f.read())) 443 444 def test_output_single_then_replace_single_perf_value(self): 445 self.test.resultsdir = tempfile.mkdtemp() 446 self.test.output_perf_value("Test", 1, units="ms", 447 higher_is_better=False) 448 self.test.output_perf_value("Test", 2, units="ms", 449 higher_is_better=False, 450 replace_existing_values=True) 451 f = open(self.test.resultsdir + "/results-chart.json") 452 expected_result = {"Test": {"summary": {"units": "ms", 453 "type": "scalar", 454 "value": 2, 455 "improvement_direction": "down"}}} 456 self.assertDictEqual(expected_result, json.loads(f.read())) 457 458 def test_output_perf_then_replace_certain_perf_value(self): 459 self.test.resultsdir = tempfile.mkdtemp() 460 self.test.output_perf_value("Test1", 1, units="ms", 461 higher_is_better=False) 462 self.test.output_perf_value("Test2", 2, units="ms", 463 higher_is_better=False) 464 self.test.output_perf_value("Test3", 3, units="ms", 465 higher_is_better=False) 466 self.test.output_perf_value("Test2", -1, units="ms", 467 higher_is_better=False, 468 replace_existing_values=True) 469 f = open(self.test.resultsdir + "/results-chart.json") 470 expected_result = {"Test1": {"summary": 471 {"units": "ms", 472 "type": "scalar", 473 "value": 1, 474 "improvement_direction": "down"}}, 475 "Test2": {"summary": 476 {"units": "ms", 477 "type": "scalar", 478 "value": -1, 479 "improvement_direction": "down"}}, 480 "Test3": {"summary": 481 {"units": "ms", 482 "type": "scalar", 483 "value": 3, 484 "improvement_direction": "down"}}} 485 self.assertDictEqual(expected_result, json.loads(f.read())) 486 487 def test_chart_supplied(self): 488 self.test.resultsdir = tempfile.mkdtemp() 489 490 test_data = [("tcp_tx", "ch006_mode11B_none", "BT_connected_but_not_streaming", 0), 491 ("tcp_tx", "ch006_mode11B_none", "BT_streaming_audiofile", 5), 492 ("tcp_tx", "ch006_mode11B_none", "BT_disconnected_again", 0), 493 ("tcp_rx", "ch006_mode11B_none", "BT_connected_but_not_streaming", 0), 494 ("tcp_rx", "ch006_mode11B_none", "BT_streaming_audiofile", 8), 495 ("tcp_rx", "ch006_mode11B_none", "BT_disconnected_again", 0), 496 ("udp_tx", "ch006_mode11B_none", "BT_connected_but_not_streaming", 0), 497 ("udp_tx", "ch006_mode11B_none", "BT_streaming_audiofile", 6), 498 ("udp_tx", "ch006_mode11B_none", "BT_disconnected_again", 0), 499 ("udp_rx", "ch006_mode11B_none", "BT_connected_but_not_streaming", 0), 500 ("udp_rx", "ch006_mode11B_none", "BT_streaming_audiofile", 8), 501 ("udp_rx", "ch006_mode11B_none", "BT_streaming_audiofile", 9), 502 ("udp_rx", "ch006_mode11B_none", "BT_disconnected_again", 0)] 503 504 505 for (config_tag, ap_config_tag, bt_tag, drop) in test_data: 506 self.test.output_perf_value(config_tag + '_' + bt_tag + '_drop', 507 drop, units='percent_drop', 508 higher_is_better=False, 509 graph=ap_config_tag + '_drop') 510 f = open(self.test.resultsdir + "/results-chart.json") 511 expected_result = { 512 "ch006_mode11B_none_drop": { 513 "udp_tx_BT_streaming_audiofile_drop": { 514 "units": "percent_drop", 515 "type": "scalar", 516 "value": 6.0, 517 "improvement_direction": "down" 518 }, 519 "udp_rx_BT_disconnected_again_drop": { 520 "units": "percent_drop", 521 "type": "scalar", 522 "value": 0.0, 523 "improvement_direction": "down" 524 }, 525 "tcp_tx_BT_disconnected_again_drop": { 526 "units": "percent_drop", 527 "type": "scalar", 528 "value": 0.0, 529 "improvement_direction": "down" 530 }, 531 "tcp_rx_BT_streaming_audiofile_drop": { 532 "units": "percent_drop", 533 "type": "scalar", 534 "value": 8.0, 535 "improvement_direction": "down" 536 }, 537 "udp_tx_BT_connected_but_not_streaming_drop": { 538 "units": "percent_drop", 539 "type": "scalar", 540 "value": 0.0, 541 "improvement_direction": "down" 542 }, 543 "tcp_tx_BT_connected_but_not_streaming_drop": { 544 "units": "percent_drop", 545 "type": "scalar", 546 "value": 0.0, 547 "improvement_direction": "down" 548 }, 549 "udp_tx_BT_disconnected_again_drop": { 550 "units": "percent_drop", 551 "type": "scalar", 552 "value": 0.0, 553 "improvement_direction": "down" 554 }, 555 "tcp_tx_BT_streaming_audiofile_drop": { 556 "units": "percent_drop", 557 "type": "scalar", 558 "value": 5.0, 559 "improvement_direction": "down" 560 }, 561 "tcp_rx_BT_connected_but_not_streaming_drop": { 562 "units": "percent_drop", 563 "type": "scalar", 564 "value": 0.0, 565 "improvement_direction": "down" 566 }, 567 "udp_rx_BT_connected_but_not_streaming_drop": { 568 "units": "percent_drop", 569 "type": "scalar", 570 "value": 0.0, 571 "improvement_direction": "down" 572 }, 573 "udp_rx_BT_streaming_audiofile_drop": { 574 "units": "percent_drop", 575 "type": "list_of_scalar_values", 576 "values": [ 577 8.0, 578 9.0 579 ], 580 "improvement_direction": "down" 581 }, 582 "tcp_rx_BT_disconnected_again_drop": { 583 "units": "percent_drop", 584 "type": "scalar", 585 "value": 0.0, 586 "improvement_direction": "down" 587 } 588 } 589 } 590 self.maxDiff = None 591 self.assertDictEqual(expected_result, json.loads(f.read())) 592 593 if __name__ == '__main__': 594 unittest.main() 595