Home | History | Annotate | Download | only in performance_tests
      1 # Copyright (C) 2012 Google Inc. All rights reserved.
      2 #
      3 # Redistribution and use in source and binary forms, with or without
      4 # modification, are permitted provided that the following conditions are
      5 # met:
      6 #
      7 #     * Redistributions of source code must retain the above copyright
      8 # notice, this list of conditions and the following disclaimer.
      9 #     * Redistributions in binary form must reproduce the above
     10 # copyright notice, this list of conditions and the following disclaimer
     11 # in the documentation and/or other materials provided with the
     12 # distribution.
     13 #     * Neither the name of Google Inc. nor the names of its
     14 # contributors may be used to endorse or promote products derived from
     15 # this software without specific prior written permission.
     16 #
     17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     18 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     19 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     20 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     28 
     29 """Unit tests for run_perf_tests."""
     30 
     31 import StringIO
     32 import datetime
     33 import json
     34 import re
     35 import unittest
     36 
     37 from webkitpy.common.host_mock import MockHost
     38 from webkitpy.common.system.outputcapture import OutputCapture
     39 from webkitpy.layout_tests.port.driver import DriverOutput
     40 from webkitpy.layout_tests.port.test import TestPort
     41 from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
     42 from webkitpy.performance_tests.perftest import DEFAULT_TEST_RUNNER_COUNT
     43 from webkitpy.performance_tests.perftest import PerfTest
     44 from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
     45 
     46 
     47 class MainTest(unittest.TestCase):
     48     def create_runner(self, args=[]):
     49         options, parsed_args = PerfTestsRunner._parse_args(args)
     50         test_port = TestPort(host=MockHost(), options=options)
     51         runner = PerfTestsRunner(args=args, port=test_port)
     52         runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
     53         runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
     54         runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
     55         return runner, test_port
     56 
     57     def _add_file(self, runner, dirname, filename, content=True):
     58         dirname = runner._host.filesystem.join(runner._base_path, dirname) if dirname else runner._base_path
     59         runner._host.filesystem.maybe_make_directory(dirname)
     60         runner._host.filesystem.files[runner._host.filesystem.join(dirname, filename)] = content
     61 
     62     def test_collect_tests(self):
     63         runner, port = self.create_runner()
     64         self._add_file(runner, 'inspector', 'a_file.html', 'a content')
     65         tests = runner._collect_tests()
     66         self.assertEqual(len(tests), 1)
     67 
     68     def _collect_tests_and_sort_test_name(self, runner):
     69         return sorted([test.test_name() for test in runner._collect_tests()])
     70 
     71     def test_collect_tests_with_multile_files(self):
     72         runner, port = self.create_runner(args=['PerformanceTests/test1.html', 'test2.html'])
     73 
     74         def add_file(filename):
     75             port.host.filesystem.files[runner._host.filesystem.join(runner._base_path, filename)] = 'some content'
     76 
     77         add_file('test1.html')
     78         add_file('test2.html')
     79         add_file('test3.html')
     80         port.host.filesystem.chdir(runner._port.perf_tests_dir()[:runner._port.perf_tests_dir().rfind(runner._host.filesystem.sep)])
     81         self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner), ['test1.html', 'test2.html'])
     82 
     83     def test_collect_tests_with_skipped_list(self):
     84         runner, port = self.create_runner()
     85 
     86         self._add_file(runner, 'inspector', 'test1.html')
     87         self._add_file(runner, 'inspector', 'unsupported_test1.html')
     88         self._add_file(runner, 'inspector', 'test2.html')
     89         self._add_file(runner, 'inspector/resources', 'resource_file.html')
     90         self._add_file(runner, 'unsupported', 'unsupported_test2.html')
     91         port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
     92         self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html'])
     93 
     94     def test_collect_tests_with_skipped_list_and_files(self):
     95         runner, port = self.create_runner(args=['Suite/Test1.html', 'Suite/SkippedTest1.html', 'SkippedSuite/Test1.html'])
     96 
     97         self._add_file(runner, 'SkippedSuite', 'Test1.html')
     98         self._add_file(runner, 'SkippedSuite', 'Test2.html')
     99         self._add_file(runner, 'Suite', 'Test1.html')
    100         self._add_file(runner, 'Suite', 'Test2.html')
    101         self._add_file(runner, 'Suite', 'SkippedTest1.html')
    102         self._add_file(runner, 'Suite', 'SkippedTest2.html')
    103         port.skipped_perf_tests = lambda: ['Suite/SkippedTest1.html', 'Suite/SkippedTest1.html', 'SkippedSuite']
    104         self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner),
    105             ['SkippedSuite/Test1.html', 'Suite/SkippedTest1.html', 'Suite/Test1.html'])
    106 
    107     def test_collect_tests_with_ignored_skipped_list(self):
    108         runner, port = self.create_runner(args=['--force'])
    109 
    110         self._add_file(runner, 'inspector', 'test1.html')
    111         self._add_file(runner, 'inspector', 'unsupported_test1.html')
    112         self._add_file(runner, 'inspector', 'test2.html')
    113         self._add_file(runner, 'inspector/resources', 'resource_file.html')
    114         self._add_file(runner, 'unsupported', 'unsupported_test2.html')
    115         port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
    116         self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html', 'inspector/unsupported_test1.html', 'unsupported/unsupported_test2.html'])
    117 
    118     def test_default_args(self):
    119         runner, port = self.create_runner()
    120         options, args = PerfTestsRunner._parse_args([])
    121         self.assertTrue(options.build)
    122         self.assertEqual(options.time_out_ms, 600 * 1000)
    123         self.assertTrue(options.generate_results)
    124         self.assertTrue(options.show_results)
    125         self.assertTrue(options.use_skipped_list)
    126         self.assertEqual(options.repeat, 1)
    127         self.assertEqual(options.test_runner_count, DEFAULT_TEST_RUNNER_COUNT)
    128 
    129     def test_parse_args(self):
    130         runner, port = self.create_runner()
    131         options, args = PerfTestsRunner._parse_args([
    132                 '--build-directory=folder42',
    133                 '--platform=platform42',
    134                 '--builder-name', 'webkit-mac-1',
    135                 '--build-number=56',
    136                 '--time-out-ms=42',
    137                 '--no-show-results',
    138                 '--reset-results',
    139                 '--output-json-path=a/output.json',
    140                 '--slave-config-json-path=a/source.json',
    141                 '--test-results-server=somehost',
    142                 '--additional-drt-flag=--enable-threaded-parser',
    143                 '--additional-drt-flag=--awesomesauce',
    144                 '--repeat=5',
    145                 '--test-runner-count=5',
    146                 '--debug'])
    147         self.assertTrue(options.build)
    148         self.assertEqual(options.build_directory, 'folder42')
    149         self.assertEqual(options.platform, 'platform42')
    150         self.assertEqual(options.builder_name, 'webkit-mac-1')
    151         self.assertEqual(options.build_number, '56')
    152         self.assertEqual(options.time_out_ms, '42')
    153         self.assertEqual(options.configuration, 'Debug')
    154         self.assertFalse(options.show_results)
    155         self.assertTrue(options.reset_results)
    156         self.assertEqual(options.output_json_path, 'a/output.json')
    157         self.assertEqual(options.slave_config_json_path, 'a/source.json')
    158         self.assertEqual(options.test_results_server, 'somehost')
    159         self.assertEqual(options.additional_drt_flag, ['--enable-threaded-parser', '--awesomesauce'])
    160         self.assertEqual(options.repeat, 5)
    161         self.assertEqual(options.test_runner_count, 5)
    162 
    163     def test_upload_json(self):
    164         runner, port = self.create_runner()
    165         port.host.filesystem.files['/mock-checkout/some.json'] = 'some content'
    166 
    167         class MockFileUploader:
    168             called = []
    169             upload_single_text_file_throws = False
    170             upload_single_text_file_return_value = None
    171 
    172             @classmethod
    173             def reset(cls):
    174                 cls.called = []
    175                 cls.upload_single_text_file_throws = False
    176                 cls.upload_single_text_file_return_value = None
    177 
    178             def __init__(mock, url, timeout):
    179                 self.assertEqual(url, 'https://some.host/some/path')
    180                 self.assertTrue(isinstance(timeout, int) and timeout)
    181                 mock.called.append('FileUploader')
    182 
    183             def upload_single_text_file(mock, filesystem, content_type, filename):
    184                 self.assertEqual(filesystem, port.host.filesystem)
    185                 self.assertEqual(content_type, 'application/json')
    186                 self.assertEqual(filename, 'some.json')
    187                 mock.called.append('upload_single_text_file')
    188                 if mock.upload_single_text_file_throws:
    189                     raise Exception
    190                 return mock.upload_single_text_file_return_value
    191 
    192         MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('OK')
    193         self.assertTrue(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
    194         self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file'])
    195 
    196         MockFileUploader.reset()
    197         MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('Some error')
    198         output = OutputCapture()
    199         output.capture_output()
    200         self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
    201         _, _, logs = output.restore_output()
    202         self.assertEqual(logs, 'Uploaded JSON to https://some.host/some/path but got a bad response:\nSome error\n')
    203 
    204         # Throwing an exception upload_single_text_file shouldn't blow up _upload_json
    205         MockFileUploader.reset()
    206         MockFileUploader.upload_single_text_file_throws = True
    207         self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
    208         self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file'])
    209 
    210         MockFileUploader.reset()
    211         MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('{"status": "OK"}')
    212         self.assertTrue(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
    213         self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file'])
    214 
    215         MockFileUploader.reset()
    216         MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('{"status": "SomethingHasFailed", "failureStored": false}')
    217         output = OutputCapture()
    218         output.capture_output()
    219         self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
    220         _, _, logs = output.restore_output()
    221         serialized_json = json.dumps({'status': 'SomethingHasFailed', 'failureStored': False}, indent=4)
    222         self.assertEqual(logs, 'Uploaded JSON to https://some.host/some/path but got an error:\n%s\n' % serialized_json)
    223 
    224 
    225 class InspectorPassTestData:
    226     text = 'RESULT group_name: test_name= 42 ms'
    227     output = """Running inspector/pass.html (2 of 2)
    228 RESULT group_name: test_name= 42 ms
    229 Finished: 0.1 s
    230 
    231 """
    232 
    233 
    234 class EventTargetWrapperTestData:
    235     text = """Running 20 times
    236 Ignoring warm-up run (1502)
    237 1504
    238 1505
    239 1510
    240 1504
    241 1507
    242 1509
    243 1510
    244 1487
    245 1488
    246 1472
    247 1472
    248 1488
    249 1473
    250 1472
    251 1475
    252 1487
    253 1486
    254 1486
    255 1475
    256 1471
    257 
    258 Time:
    259 values 1486, 1471, 1510, 1505, 1478, 1490 ms
    260 avg 1490 ms
    261 median 1488 ms
    262 stdev 15.13935 ms
    263 min 1471 ms
    264 max 1510 ms
    265 """
    266 
    267     output = """Running Bindings/event-target-wrapper.html (1 of 2)
    268 RESULT Bindings: event-target-wrapper: Time= 1490.0 ms
    269 median= 1488.0 ms, stdev= 14.11751 ms, min= 1471.0 ms, max= 1510.0 ms
    270 Finished: 0.1 s
    271 
    272 """
    273 
    274     results = {'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Bindings/event-target-wrapper.html',
    275         'metrics': {'Time': {'current': [[1486.0, 1471.0, 1510.0, 1505.0, 1478.0, 1490.0]] * 4}}}
    276 
    277 
    278 class SomeParserTestData:
    279     text = """Running 20 times
    280 Ignoring warm-up run (1115)
    281 
    282 Time:
    283 values 1080, 1120, 1095, 1101, 1104 ms
    284 avg 1100 ms
    285 median 1101 ms
    286 stdev 14.50861 ms
    287 min 1080 ms
    288 max 1120 ms
    289 """
    290 
    291     output = """Running Parser/some-parser.html (2 of 2)
    292 RESULT Parser: some-parser: Time= 1100.0 ms
    293 median= 1101.0 ms, stdev= 13.31402 ms, min= 1080.0 ms, max= 1120.0 ms
    294 Finished: 0.1 s
    295 
    296 """
    297 
    298 
    299 class MemoryTestData:
    300     text = """Running 20 times
    301 Ignoring warm-up run (1115)
    302 
    303 Time:
    304 values 1080, 1120, 1095, 1101, 1104 ms
    305 avg 1100 ms
    306 median 1101 ms
    307 stdev 14.50861 ms
    308 min 1080 ms
    309 max 1120 ms
    310 
    311 JS Heap:
    312 values 825000, 811000, 848000, 837000, 829000 bytes
    313 avg 830000 bytes
    314 median 829000 bytes
    315 stdev 13784.04875 bytes
    316 min 811000 bytes
    317 max 848000 bytes
    318 
    319 Malloc:
    320 values 529000, 511000, 548000, 536000, 521000 bytes
    321 avg 529000 bytes
    322 median 529000 bytes
    323 stdev 14124.44689 bytes
    324 min 511000 bytes
    325 max 548000 bytes
    326 """
    327 
    328     output = """Running 1 tests
    329 Running Parser/memory-test.html (1 of 1)
    330 RESULT Parser: memory-test: Time= 1100.0 ms
    331 median= 1101.0 ms, stdev= 13.31402 ms, min= 1080.0 ms, max= 1120.0 ms
    332 RESULT Parser: memory-test: JSHeap= 830000.0 bytes
    333 median= 829000.0 bytes, stdev= 12649.11064 bytes, min= 811000.0 bytes, max= 848000.0 bytes
    334 RESULT Parser: memory-test: Malloc= 529000.0 bytes
    335 median= 529000.0 bytes, stdev= 12961.48139 bytes, min= 511000.0 bytes, max= 548000.0 bytes
    336 Finished: 0.1 s
    337 """
    338 
    339     results = {'current': [[1080, 1120, 1095, 1101, 1104]] * 4}
    340     js_heap_results = {'current': [[825000, 811000, 848000, 837000, 829000]] * 4}
    341     malloc_results = {'current': [[529000, 511000, 548000, 536000, 521000]] * 4}
    342 
    343 
    344 class TestDriver:
    345     def run_test(self, driver_input, stop_when_done):
    346         text = ''
    347         timeout = False
    348         crash = False
    349         if driver_input.test_name.endswith('pass.html'):
    350             text = InspectorPassTestData.text
    351         elif driver_input.test_name.endswith('timeout.html'):
    352             timeout = True
    353         elif driver_input.test_name.endswith('failed.html'):
    354             text = None
    355         elif driver_input.test_name.endswith('tonguey.html'):
    356             text = 'we are not expecting an output from perf tests but RESULT blablabla'
    357         elif driver_input.test_name.endswith('crash.html'):
    358             crash = True
    359         elif driver_input.test_name.endswith('event-target-wrapper.html'):
    360             text = EventTargetWrapperTestData.text
    361         elif driver_input.test_name.endswith('some-parser.html'):
    362             text = SomeParserTestData.text
    363         elif driver_input.test_name.endswith('memory-test.html'):
    364             text = MemoryTestData.text
    365         return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
    366 
    367     def start(self):
    368         """do nothing"""
    369 
    370     def stop(self):
    371         """do nothing"""
    372 
    373 
    374 class IntegrationTest(unittest.TestCase):
    375     def _normalize_output(self, log):
    376         return re.sub(r'(stdev=\s+\d+\.\d{5})\d+', r'\1', re.sub(r'Finished: [0-9\.]+ s', 'Finished: 0.1 s', log))
    377 
    378     def _load_output_json(self, runner):
    379         json_content = runner._host.filesystem.read_text_file(runner._output_json_path())
    380         return json.loads(re.sub(r'("stdev":\s*\d+\.\d{5})\d+', r'\1', json_content))
    381 
    382     def create_runner(self, args=[], driver_class=TestDriver):
    383         options, parsed_args = PerfTestsRunner._parse_args(args)
    384         test_port = TestPort(host=MockHost(), options=options)
    385         test_port.create_driver = lambda worker_number=None, no_timeout=False: driver_class()
    386 
    387         runner = PerfTestsRunner(args=args, port=test_port)
    388         runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
    389         runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
    390         runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
    391 
    392         return runner, test_port
    393 
    394     def run_test(self, test_name):
    395         runner, port = self.create_runner()
    396         tests = [ChromiumStylePerfTest(port, test_name, runner._host.filesystem.join('some-dir', test_name))]
    397         return runner._run_tests_set(tests) == 0
    398 
    399     def test_run_passing_test(self):
    400         self.assertTrue(self.run_test('pass.html'))
    401 
    402     def test_run_silent_test(self):
    403         self.assertFalse(self.run_test('silent.html'))
    404 
    405     def test_run_failed_test(self):
    406         self.assertFalse(self.run_test('failed.html'))
    407 
    408     def test_run_tonguey_test(self):
    409         self.assertFalse(self.run_test('tonguey.html'))
    410 
    411     def test_run_timeout_test(self):
    412         self.assertFalse(self.run_test('timeout.html'))
    413 
    414     def test_run_crash_test(self):
    415         self.assertFalse(self.run_test('crash.html'))
    416 
    417     def _tests_for_runner(self, runner, test_names):
    418         filesystem = runner._host.filesystem
    419         tests = []
    420         for test in test_names:
    421             path = filesystem.join(runner._base_path, test)
    422             dirname = filesystem.dirname(path)
    423             if test.startswith('inspector/'):
    424                 tests.append(ChromiumStylePerfTest(runner._port, test, path))
    425             else:
    426                 tests.append(PerfTest(runner._port, test, path))
    427         return tests
    428 
    429     def test_run_test_set(self):
    430         runner, port = self.create_runner()
    431         tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
    432             'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
    433         output = OutputCapture()
    434         output.capture_output()
    435         try:
    436             unexpected_result_count = runner._run_tests_set(tests)
    437         finally:
    438             stdout, stderr, log = output.restore_output()
    439         self.assertEqual(unexpected_result_count, len(tests) - 1)
    440         self.assertTrue('\nRESULT group_name: test_name= 42 ms\n' in log)
    441 
    442     def test_run_test_set_kills_drt_per_run(self):
    443 
    444         class TestDriverWithStopCount(TestDriver):
    445             stop_count = 0
    446 
    447             def stop(self):
    448                 TestDriverWithStopCount.stop_count += 1
    449 
    450         runner, port = self.create_runner(driver_class=TestDriverWithStopCount)
    451 
    452         tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
    453             'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
    454         unexpected_result_count = runner._run_tests_set(tests)
    455 
    456         self.assertEqual(TestDriverWithStopCount.stop_count, 6)
    457 
    458     def test_run_test_set_for_parser_tests(self):
    459         runner, port = self.create_runner()
    460         tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.html', 'Parser/some-parser.html'])
    461         output = OutputCapture()
    462         output.capture_output()
    463         try:
    464             unexpected_result_count = runner._run_tests_set(tests)
    465         finally:
    466             stdout, stderr, log = output.restore_output()
    467         self.assertEqual(unexpected_result_count, 0)
    468         self.assertEqual(self._normalize_output(log), EventTargetWrapperTestData.output + SomeParserTestData.output)
    469 
    470     def test_run_memory_test(self):
    471         runner, port = self.create_runner_and_setup_results_template()
    472         runner._timestamp = 123456789
    473         port.host.filesystem.write_text_file(runner._base_path + '/Parser/memory-test.html', 'some content')
    474 
    475         output = OutputCapture()
    476         output.capture_output()
    477         try:
    478             unexpected_result_count = runner.run()
    479         finally:
    480             stdout, stderr, log = output.restore_output()
    481         self.assertEqual(unexpected_result_count, 0)
    482         self.assertEqual(self._normalize_output(log), MemoryTestData.output + '\nMOCK: user.open_url: file://...\n')
    483         parser_tests = self._load_output_json(runner)[0]['tests']['Parser']['tests']
    484         self.assertEqual(parser_tests['memory-test']['metrics']['Time'], MemoryTestData.results)
    485         self.assertEqual(parser_tests['memory-test']['metrics']['JSHeap'], MemoryTestData.js_heap_results)
    486         self.assertEqual(parser_tests['memory-test']['metrics']['Malloc'], MemoryTestData.malloc_results)
    487 
    488     def _test_run_with_json_output(self, runner, filesystem, upload_succeeds=False, results_shown=True, expected_exit_code=0, repeat=1, compare_logs=True):
    489         filesystem.write_text_file(runner._base_path + '/inspector/pass.html', 'some content')
    490         filesystem.write_text_file(runner._base_path + '/Bindings/event-target-wrapper.html', 'some content')
    491 
    492         uploaded = [False]
    493 
    494         def mock_upload_json(hostname, json_path, host_path=None):
    495             # FIXME: Get rid of the hard-coded perf.webkit.org once we've completed the transition.
    496             self.assertIn(hostname, ['some.host'])
    497             self.assertIn(json_path, ['/mock-checkout/output.json'])
    498             self.assertIn(host_path, [None, '/api/report'])
    499             uploaded[0] = upload_succeeds
    500             return upload_succeeds
    501 
    502         runner._upload_json = mock_upload_json
    503         runner._timestamp = 123456789
    504         runner._utc_timestamp = datetime.datetime(2013, 2, 8, 15, 19, 37, 460000)
    505         output_capture = OutputCapture()
    506         output_capture.capture_output()
    507         try:
    508             self.assertEqual(runner.run(), expected_exit_code)
    509         finally:
    510             stdout, stderr, logs = output_capture.restore_output()
    511 
    512         if not expected_exit_code and compare_logs:
    513             expected_logs = ''
    514             for i in xrange(repeat):
    515                 runs = ' (Run %d of %d)' % (i + 1, repeat) if repeat > 1 else ''
    516                 expected_logs += 'Running 2 tests%s\n' % runs + EventTargetWrapperTestData.output + InspectorPassTestData.output
    517             if results_shown:
    518                 expected_logs += 'MOCK: user.open_url: file://...\n'
    519             self.assertEqual(self._normalize_output(logs), expected_logs)
    520 
    521         self.assertEqual(uploaded[0], upload_succeeds)
    522 
    523         return logs
    524 
    525     _event_target_wrapper_and_inspector_results = {
    526         "Bindings":
    527             {"url": "http://trac.webkit.org/browser/trunk/PerformanceTests/Bindings",
    528             "tests": {"event-target-wrapper": EventTargetWrapperTestData.results}}}
    529 
    530     def test_run_with_json_output(self):
    531         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
    532             '--test-results-server=some.host'])
    533         self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
    534         self.assertEqual(self._load_output_json(runner), [{
    535             "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
    536             "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
    537 
    538         filesystem = port.host.filesystem
    539         self.assertTrue(filesystem.isfile(runner._output_json_path()))
    540         self.assertTrue(filesystem.isfile(filesystem.splitext(runner._output_json_path())[0] + '.html'))
    541 
    542     def test_run_with_description(self):
    543         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
    544             '--test-results-server=some.host', '--description', 'some description'])
    545         self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
    546         self.assertEqual(self._load_output_json(runner), [{
    547             "buildTime": "2013-02-08T15:19:37.460000", "description": "some description",
    548             "tests": self._event_target_wrapper_and_inspector_results,
    549             "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
    550 
    551     def create_runner_and_setup_results_template(self, args=[]):
    552         runner, port = self.create_runner(args)
    553         filesystem = port.host.filesystem
    554         filesystem.write_text_file(runner._base_path + '/resources/results-template.html',
    555             'BEGIN<script src="%AbsolutePathToWebKitTrunk%/some.js"></script>'
    556             '<script src="%AbsolutePathToWebKitTrunk%/other.js"></script><script>%PeformanceTestsResultsJSON%</script>END')
    557         filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js', 'jquery content')
    558         return runner, port
    559 
    560     def test_run_respects_no_results(self):
    561         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
    562             '--test-results-server=some.host', '--no-results'])
    563         self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=False, results_shown=False)
    564         self.assertFalse(port.host.filesystem.isfile('/mock-checkout/output.json'))
    565 
    566     def test_run_generates_json_by_default(self):
    567         runner, port = self.create_runner_and_setup_results_template()
    568         filesystem = port.host.filesystem
    569         output_json_path = runner._output_json_path()
    570         results_page_path = filesystem.splitext(output_json_path)[0] + '.html'
    571 
    572         self.assertFalse(filesystem.isfile(output_json_path))
    573         self.assertFalse(filesystem.isfile(results_page_path))
    574 
    575         self._test_run_with_json_output(runner, port.host.filesystem)
    576 
    577         self.assertEqual(self._load_output_json(runner), [{
    578             "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
    579             "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
    580 
    581         self.assertTrue(filesystem.isfile(output_json_path))
    582         self.assertTrue(filesystem.isfile(results_page_path))
    583 
    584     def test_run_merges_output_by_default(self):
    585         runner, port = self.create_runner_and_setup_results_template()
    586         filesystem = port.host.filesystem
    587         output_json_path = runner._output_json_path()
    588 
    589         filesystem.write_text_file(output_json_path, '[{"previous": "results"}]')
    590 
    591         self._test_run_with_json_output(runner, port.host.filesystem)
    592 
    593         self.assertEqual(self._load_output_json(runner), [{"previous": "results"}, {
    594             "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
    595             "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
    596         self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html'))
    597 
    598     def test_run_respects_reset_results(self):
    599         runner, port = self.create_runner_and_setup_results_template(args=["--reset-results"])
    600         filesystem = port.host.filesystem
    601         output_json_path = runner._output_json_path()
    602 
    603         filesystem.write_text_file(output_json_path, '[{"previous": "results"}]')
    604 
    605         self._test_run_with_json_output(runner, port.host.filesystem)
    606 
    607         self.assertEqual(self._load_output_json(runner), [{
    608             "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
    609             "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
    610         self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html'))
    611         pass
    612 
    613     def test_run_generates_and_show_results_page(self):
    614         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
    615         page_shown = []
    616         port.show_results_html_file = lambda path: page_shown.append(path)
    617         filesystem = port.host.filesystem
    618         self._test_run_with_json_output(runner, filesystem, results_shown=False)
    619 
    620         expected_entry = {"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
    621             "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}
    622 
    623         self.maxDiff = None
    624         self.assertEqual(runner._output_json_path(), '/mock-checkout/output.json')
    625         self.assertEqual(self._load_output_json(runner), [expected_entry])
    626         self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
    627             'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
    628             '<script>%s</script>END' % port.host.filesystem.read_text_file(runner._output_json_path()))
    629         self.assertEqual(page_shown[0], '/mock-checkout/output.html')
    630 
    631         self._test_run_with_json_output(runner, filesystem, results_shown=False)
    632         self.assertEqual(runner._output_json_path(), '/mock-checkout/output.json')
    633         self.assertEqual(self._load_output_json(runner), [expected_entry, expected_entry])
    634         self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
    635             'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
    636             '<script>%s</script>END' % port.host.filesystem.read_text_file(runner._output_json_path()))
    637 
    638     def test_run_respects_no_show_results(self):
    639         show_results_html_file = lambda path: page_shown.append(path)
    640 
    641         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
    642         page_shown = []
    643         port.show_results_html_file = show_results_html_file
    644         self._test_run_with_json_output(runner, port.host.filesystem, results_shown=False)
    645         self.assertEqual(page_shown[0], '/mock-checkout/output.html')
    646 
    647         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
    648             '--no-show-results'])
    649         page_shown = []
    650         port.show_results_html_file = show_results_html_file
    651         self._test_run_with_json_output(runner, port.host.filesystem, results_shown=False)
    652         self.assertEqual(page_shown, [])
    653 
    654     def test_run_with_bad_output_json(self):
    655         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
    656         port.host.filesystem.write_text_file('/mock-checkout/output.json', 'bad json')
    657         self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
    658         port.host.filesystem.write_text_file('/mock-checkout/output.json', '{"another bad json": "1"}')
    659         self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
    660 
    661     def test_run_with_slave_config_json(self):
    662         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
    663             '--slave-config-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
    664         port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value"}')
    665         self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
    666         self.assertEqual(self._load_output_json(runner), [{
    667             "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
    668             "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}, "builderKey": "value"}])
    669 
    670     def test_run_with_bad_slave_config_json(self):
    671         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
    672             '--slave-config-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
    673         logs = self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
    674         self.assertTrue('Missing slave configuration JSON file: /mock-checkout/slave-config.json' in logs)
    675         port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', 'bad json')
    676         self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
    677         port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '["another bad json"]')
    678         self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
    679 
    680     def test_run_with_multiple_repositories(self):
    681         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
    682             '--test-results-server=some.host'])
    683         port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some', '/mock-checkout/some')]
    684         self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
    685         self.assertEqual(self._load_output_json(runner), [{
    686             "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
    687             "revisions": {"webkit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"},
    688             "some": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
    689 
    690     def test_run_with_upload_json(self):
    691         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
    692             '--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123'])
    693 
    694         self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
    695         generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
    696         self.assertEqual(generated_json[0]['platform'], 'platform1')
    697         self.assertEqual(generated_json[0]['builderName'], 'builder1')
    698         self.assertEqual(generated_json[0]['buildNumber'], 123)
    699 
    700         self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=False, expected_exit_code=PerfTestsRunner.EXIT_CODE_FAILED_UPLOADING)
    701 
    702     def test_run_with_upload_json_should_generate_perf_webkit_json(self):
    703         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
    704             '--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123',
    705             '--slave-config-json-path=/mock-checkout/slave-config.json'])
    706         port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value1"}')
    707 
    708         self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
    709         generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
    710         self.assertTrue(isinstance(generated_json, list))
    711         self.assertEqual(len(generated_json), 1)
    712 
    713         output = generated_json[0]
    714         self.maxDiff = None
    715         self.assertEqual(output['platform'], 'platform1')
    716         self.assertEqual(output['buildNumber'], 123)
    717         self.assertEqual(output['buildTime'], '2013-02-08T15:19:37.460000')
    718         self.assertEqual(output['builderName'], 'builder1')
    719         self.assertEqual(output['builderKey'], 'value1')
    720         self.assertEqual(output['revisions'], {'blink': {'revision': '5678', 'timestamp': '2013-02-01 08:48:05 +0000'}})
    721         self.assertEqual(output['tests'].keys(), ['Bindings'])
    722         self.assertEqual(sorted(output['tests']['Bindings'].keys()), ['tests', 'url'])
    723         self.assertEqual(output['tests']['Bindings']['url'], 'http://trac.webkit.org/browser/trunk/PerformanceTests/Bindings')
    724         self.assertEqual(output['tests']['Bindings']['tests'].keys(), ['event-target-wrapper'])
    725         self.assertEqual(output['tests']['Bindings']['tests']['event-target-wrapper'], {
    726             'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Bindings/event-target-wrapper.html',
    727             'metrics': {'Time': {'current': [[1486.0, 1471.0, 1510.0, 1505.0, 1478.0, 1490.0]] * 4}}})
    728 
    729     def test_run_with_repeat(self):
    730         self.maxDiff = None
    731         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
    732             '--test-results-server=some.host', '--repeat', '5'])
    733         self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True, repeat=5)
    734         self.assertEqual(self._load_output_json(runner), [
    735             {"buildTime": "2013-02-08T15:19:37.460000",
    736             "tests": self._event_target_wrapper_and_inspector_results,
    737             "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
    738             {"buildTime": "2013-02-08T15:19:37.460000",
    739             "tests": self._event_target_wrapper_and_inspector_results,
    740             "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
    741             {"buildTime": "2013-02-08T15:19:37.460000",
    742             "tests": self._event_target_wrapper_and_inspector_results,
    743             "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
    744             {"buildTime": "2013-02-08T15:19:37.460000",
    745             "tests": self._event_target_wrapper_and_inspector_results,
    746             "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
    747             {"buildTime": "2013-02-08T15:19:37.460000",
    748             "tests": self._event_target_wrapper_and_inspector_results,
    749             "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
    750 
    751     def test_run_with_test_runner_count(self):
    752         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
    753             '--test-runner-count=3'])
    754         self._test_run_with_json_output(runner, port.host.filesystem, compare_logs=False)
    755         generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
    756         self.assertTrue(isinstance(generated_json, list))
    757         self.assertEqual(len(generated_json), 1)
    758 
    759         output = generated_json[0]['tests']['Bindings']['tests']['event-target-wrapper']['metrics']['Time']['current']
    760         self.assertEqual(len(output), 3)
    761         expectedMetrics = EventTargetWrapperTestData.results['metrics']['Time']['current'][0]
    762         for metrics in output:
    763             self.assertEqual(metrics, expectedMetrics)
    764