Home | History | Annotate | Download | only in tests
      1 #
      2 # Copyright (C) 2015 The Android Open Source Project
      3 #
      4 # Licensed under the Apache License, Version 2.0 (the "License");
      5 # you may not use this file except in compliance with the License.
      6 # You may obtain a copy of the License at
      7 #
      8 #      http://www.apache.org/licenses/LICENSE-2.0
      9 #
     10 # Unless required by applicable law or agreed to in writing, software
     11 # distributed under the License is distributed on an "AS IS" BASIS,
     12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13 # See the License for the specific language governing permissions and
     14 # limitations under the License.
     15 #
     16 from __future__ import print_function
     17 
     18 import difflib
     19 import filecmp
     20 import glob
     21 import imp
     22 import multiprocessing
     23 import os
     24 import posixpath
     25 import re
     26 import shutil
     27 import subprocess
     28 
     29 import adb
     30 import ndk
     31 import util
     32 
     33 # pylint: disable=no-self-use
     34 
     35 
     36 def _get_jobs_arg():
     37     return '-j{}'.format(multiprocessing.cpu_count() * 2)
     38 
     39 
     40 def _make_subtest_name(test, case):
     41     return '.'.join([test, case])
     42 
     43 
     44 def _scan_test_suite(suite_dir, test_class, *args):
     45     tests = []
     46     for dentry in os.listdir(suite_dir):
     47         path = os.path.join(suite_dir, dentry)
     48         if os.path.isdir(path):
     49             tests.append(test_class.from_dir(path, *args))
     50     return tests
     51 
     52 
     53 class TestRunner(object):
     54     def __init__(self):
     55         self.tests = {}
     56 
     57     def add_suite(self, name, path, test_class, *args):
     58         if name in self.tests:
     59             raise KeyError('suite {} already exists'.format(name))
     60         self.tests[name] = _scan_test_suite(path, test_class, *args)
     61 
     62     def _fixup_expected_failure(self, result, config, bug):
     63         if isinstance(result, Failure):
     64             return ExpectedFailure(result.test_name, config, bug)
     65         elif isinstance(result, Success):
     66             return UnexpectedSuccess(result.test_name, config, bug)
     67         else:  # Skipped, UnexpectedSuccess, or ExpectedFailure.
     68             return result
     69 
     70     def _run_test(self, test, out_dir, test_filters):
     71         if not test_filters.filter(test.name):
     72             return []
     73 
     74         config = test.check_unsupported()
     75         if config is not None:
     76             message = 'test unsupported for {}'.format(config)
     77             return [Skipped(test.name, message)]
     78 
     79         results = test.run(out_dir, test_filters)
     80         config, bug = test.check_broken()
     81         if config is None:
     82             return results
     83 
     84         # We need to check each individual test case for pass/fail and change
     85         # it to either an ExpectedFailure or an UnexpectedSuccess as necessary.
     86         return [self._fixup_expected_failure(r, config, bug) for r in results]
     87 
     88     def run(self, out_dir, test_filters):
     89         results = {suite: [] for suite in self.tests.keys()}
     90         for suite, tests in self.tests.items():
     91             test_results = []
     92             for test in tests:
     93                 test_results.extend(self._run_test(test, out_dir,
     94                                                    test_filters))
     95             results[suite] = test_results
     96         return results
     97 
     98 
     99 def _maybe_color(text, color, do_color):
    100     return util.color_string(text, color) if do_color else text
    101 
    102 
    103 class TestResult(object):
    104     def __init__(self, test_name):
    105         self.test_name = test_name
    106 
    107     def __repr__(self):
    108         return self.to_string(colored=False)
    109 
    110     def passed(self):
    111         raise NotImplementedError
    112 
    113     def failed(self):
    114         raise NotImplementedError
    115 
    116     def to_string(self, colored=False):
    117         raise NotImplementedError
    118 
    119 
    120 class Failure(TestResult):
    121     def __init__(self, test_name, message):
    122         super(Failure, self).__init__(test_name)
    123         self.message = message
    124 
    125     def passed(self):
    126         return False
    127 
    128     def failed(self):
    129         return True
    130 
    131     def to_string(self, colored=False):
    132         label = _maybe_color('FAIL', 'red', colored)
    133         return '{} {}: {}'.format(label, self.test_name, self.message)
    134 
    135 
    136 class Success(TestResult):
    137     def passed(self):
    138         return True
    139 
    140     def failed(self):
    141         return False
    142 
    143     def to_string(self, colored=False):
    144         label = _maybe_color('PASS', 'green', colored)
    145         return '{} {}'.format(label, self.test_name)
    146 
    147 
    148 class Skipped(TestResult):
    149     def __init__(self, test_name, reason):
    150         super(Skipped, self).__init__(test_name)
    151         self.reason = reason
    152 
    153     def passed(self):
    154         return False
    155 
    156     def failed(self):
    157         return False
    158 
    159     def to_string(self, colored=False):
    160         label = _maybe_color('SKIP', 'yellow', colored)
    161         return '{} {}: {}'.format(label, self.test_name, self.reason)
    162 
    163 
    164 class ExpectedFailure(TestResult):
    165     def __init__(self, test_name, config, bug):
    166         super(ExpectedFailure, self).__init__(test_name)
    167         self.config = config
    168         self.bug = bug
    169 
    170     def passed(self):
    171         return True
    172 
    173     def failed(self):
    174         return False
    175 
    176     def to_string(self, colored=False):
    177         label = _maybe_color('KNOWN FAIL', 'yellow', colored)
    178         return '{} {}: known failure for {} ({})'.format(
    179             label, self.test_name, self.config, self.bug)
    180 
    181 
    182 class UnexpectedSuccess(TestResult):
    183     def __init__(self, test_name, config, bug):
    184         super(UnexpectedSuccess, self).__init__(test_name)
    185         self.config = config
    186         self.bug = bug
    187 
    188     def passed(self):
    189         return False
    190 
    191     def failed(self):
    192         return True
    193 
    194     def to_string(self, colored=False):
    195         label = _maybe_color('SHOULD FAIL', 'red', colored)
    196         return '{} {}: unexpected success for {} ({})'.format(
    197             label, self.test_name, self.config, self.bug)
    198 
    199 
    200 class Test(object):
    201     def __init__(self, name, test_dir):
    202         self.name = name
    203         self.test_dir = test_dir
    204         self.config = self.get_test_config()
    205 
    206     def get_test_config(self):
    207         return TestConfig.from_test_dir(self.test_dir)
    208 
    209     def run(self, out_dir, test_filters):
    210         raise NotImplementedError
    211 
    212     def check_broken(self):
    213         return self.config.match_broken(self.abi, self.platform,
    214                                         self.toolchain)
    215 
    216     def check_unsupported(self):
    217         return self.config.match_unsupported(self.abi, self.platform,
    218                                              self.toolchain)
    219 
    220     def check_subtest_broken(self, name):
    221         return self.config.match_broken(self.abi, self.platform,
    222                                         self.toolchain, subtest=name)
    223 
    224     def check_subtest_unsupported(self, name):
    225         return self.config.match_unsupported(self.abi, self.platform,
    226                                              self.toolchain, subtest=name)
    227 
    228 
    229 class AwkTest(Test):
    230     def __init__(self, name, test_dir, script):
    231         super(AwkTest, self).__init__(name, test_dir)
    232         self.script = script
    233 
    234     @classmethod
    235     def from_dir(cls, test_dir):
    236         test_name = os.path.basename(test_dir)
    237         script_name = test_name + '.awk'
    238         script = os.path.join(ndk.NDK_ROOT, 'build/awk', script_name)
    239         if not os.path.isfile(script):
    240             msg = '{} missing test script: {}'.format(test_name, script)
    241             raise RuntimeError(msg)
    242 
    243         # Check that all of our test cases are valid.
    244         for test_case in glob.glob(os.path.join(test_dir, '*.in')):
    245             golden_path = re.sub(r'\.in$', '.out', test_case)
    246             if not os.path.isfile(golden_path):
    247                 msg = '{} missing output: {}'.format(test_name, golden_path)
    248                 raise RuntimeError(msg)
    249         return cls(test_name, test_dir, script)
    250 
    251     # Awk tests only run in a single configuration. Disabling them per ABI,
    252     # platform, or toolchain has no meaning. Stub out the checks.
    253     def check_broken(self):
    254         return None, None
    255 
    256     def check_unsupported(self):
    257         return None
    258 
    259     def run(self, out_dir, test_filters):
    260         results = []
    261         for test_case in glob.glob(os.path.join(self.test_dir, '*.in')):
    262             golden_path = re.sub(r'\.in$', '.out', test_case)
    263             result = self.run_case(out_dir, test_case, golden_path,
    264                                    test_filters)
    265             if result is not None:
    266                 results.append(result)
    267         return results
    268 
    269     def run_case(self, out_dir, test_case, golden_out_path, test_filters):
    270         case_name = os.path.splitext(os.path.basename(test_case))[0]
    271         name = _make_subtest_name(self.name, case_name)
    272 
    273         if not test_filters.filter(name):
    274             return None
    275 
    276         out_path = os.path.join(out_dir, os.path.basename(golden_out_path))
    277 
    278         with open(test_case, 'r') as test_in, open(out_path, 'w') as out_file:
    279             awk_path = ndk.get_tool('awk')
    280             print('{} -f {} < {} > {}'.format(
    281                 awk_path, self.script, test_case, out_path))
    282             rc = subprocess.call([awk_path, '-f', self.script], stdin=test_in,
    283                                  stdout=out_file)
    284             if rc != 0:
    285                 return Failure(name, 'awk failed')
    286 
    287         if filecmp.cmp(out_path, golden_out_path):
    288             return Success(name)
    289         else:
    290             with open(out_path) as out_file:
    291                 out_lines = out_file.readlines()
    292             with open(golden_out_path) as golden_out_file:
    293                 golden_lines = golden_out_file.readlines()
    294             diff = ''.join(difflib.unified_diff(
    295                 golden_lines, out_lines, fromfile='expected', tofile='actual'))
    296             message = 'output does not match expected:\n\n' + diff
    297             return Failure(name, message)
    298 
    299 
    300 def _prep_build_dir(src_dir, out_dir):
    301     if os.path.exists(out_dir):
    302         shutil.rmtree(out_dir)
    303     shutil.copytree(src_dir, out_dir)
    304 
    305 
    306 class TestConfig(object):
    307     """Describes the status of a test.
    308 
    309     Each test directory can contain a "test_config.py" file that describes
    310     the configurations a test is not expected to pass for. Previously this
    311     information could be captured in one of two places: the Application.mk
    312     file, or a BROKEN_BUILD/BROKEN_RUN file.
    313 
    314     Application.mk was used to state that a test was only to be run for a
    315     specific platform version, specific toolchain, or a set of ABIs.
    316     Unfortunately Application.mk could only specify a single toolchain or
    317     platform, not a set.
    318 
    319     BROKEN_BUILD/BROKEN_RUN files were too general. An empty file meant the
    320     test should always be skipped regardless of configuration. Any change that
    321     would put a test in that situation should be reverted immediately. These
    322     also didn't make it clear if the test was actually broken (and thus should
    323     be fixed) or just not applicable.
    324 
    325     A test_config.py file is more flexible. It is a Python module that defines
    326     at least one function by the same name as one in TestConfig.NullTestConfig.
    327     If a function is not defined the null implementation (not broken,
    328     supported), will be used.
    329     """
    330 
    331     class NullTestConfig(object):
    332         def __init__(self):
    333             pass
    334 
    335         # pylint: disable=unused-argument
    336         @staticmethod
    337         def match_broken(abi, platform, toolchain, subtest=None):
    338             """Tests if a given configuration is known broken.
    339 
    340             A broken test is a known failing test that should be fixed.
    341 
    342             Any test with a non-empty broken section requires a "bug" entry
    343             with a link to either an internal bug (http://b/BUG_NUMBER) or a
    344             public bug (http://b.android.com/BUG_NUMBER).
    345 
    346             These tests will still be built and run. If the test succeeds, it
    347             will be reported as an error.
    348 
    349             Returns: A tuple of (broken_configuration, bug) or (None, None).
    350             """
    351             return None, None
    352 
    353         @staticmethod
    354         def match_unsupported(abi, platform, toolchain, subtest=None):
    355             """Tests if a given configuration is unsupported.
    356 
    357             An unsupported test is a test that do not make sense to run for a
    358             given configuration. Testing x86 assembler on MIPS, for example.
    359 
    360             These tests will not be built or run.
    361 
    362             Returns: The string unsupported_configuration or None.
    363             """
    364             return None
    365         # pylint: enable=unused-argument
    366 
    367     def __init__(self, file_path):
    368 
    369         # Note that this namespace isn't actually meaningful from our side;
    370         # it's only what the loaded module's __name__ gets set to.
    371         dirname = os.path.dirname(file_path)
    372         namespace = '.'.join([dirname, 'test_config'])
    373 
    374         try:
    375             self.module = imp.load_source(namespace, file_path)
    376         except IOError:
    377             self.module = None
    378 
    379         try:
    380             self.match_broken = self.module.match_broken
    381         except AttributeError:
    382             self.match_broken = self.NullTestConfig.match_broken
    383 
    384         try:
    385             self.match_unsupported = self.module.match_unsupported
    386         except AttributeError:
    387             self.match_unsupported = self.NullTestConfig.match_unsupported
    388 
    389     @classmethod
    390     def from_test_dir(cls, test_dir):
    391         path = os.path.join(test_dir, 'test_config.py')
    392         return cls(path)
    393 
    394 
    395 class DeviceTestConfig(TestConfig):
    396     """Specialization of test_config.py that includes device API level.
    397 
    398     We need to mark some tests as broken or unsupported based on what device
    399     they are running on, as opposed to just what they were built for.
    400     """
    401     class NullTestConfig(object):
    402         def __init__(self):
    403             pass
    404 
    405         # pylint: disable=unused-argument
    406         @staticmethod
    407         def match_broken(abi, platform, device_platform, toolchain,
    408                          subtest=None):
    409             return None, None
    410 
    411         @staticmethod
    412         def match_unsupported(abi, platform, device_platform, toolchain,
    413                               subtest=None):
    414             return None
    415         # pylint: enable=unused-argument
    416 
    417 
    418 def _run_build_sh_test(test_name, build_dir, test_dir, build_flags, abi,
    419                        platform, toolchain):
    420     _prep_build_dir(test_dir, build_dir)
    421     with util.cd(build_dir):
    422         build_cmd = ['sh', 'build.sh', _get_jobs_arg()] + build_flags
    423         test_env = dict(os.environ)
    424         if abi is not None:
    425             test_env['APP_ABI'] = abi
    426         if platform is not None:
    427             test_env['APP_PLATFORM'] = platform
    428         assert toolchain is not None
    429         test_env['NDK_TOOLCHAIN_VERSION'] = toolchain
    430         rc, out = util.call_output(build_cmd, env=test_env)
    431         if rc == 0:
    432             return Success(test_name)
    433         else:
    434             return Failure(test_name, out)
    435 
    436 
    437 def _run_ndk_build_test(test_name, build_dir, test_dir, build_flags, abi,
    438                         platform, toolchain):
    439     _prep_build_dir(test_dir, build_dir)
    440     with util.cd(build_dir):
    441         args = [
    442             'APP_ABI=' + abi,
    443             'NDK_TOOLCHAIN_VERSION=' + toolchain,
    444             _get_jobs_arg(),
    445         ]
    446         if platform is not None:
    447             args.append('APP_PLATFORM=' + platform)
    448         rc, out = ndk.build(build_flags + args)
    449         if rc == 0:
    450             return Success(test_name)
    451         else:
    452             return Failure(test_name, out)
    453 
    454 
    455 class PythonBuildTest(Test):
    456     """A test that is implemented by test.py.
    457 
    458     A test.py test has a test.py file in its root directory. This module
    459     contains a run_test function which returns a tuple of `(boolean_success,
    460     string_failure_message)` and takes the following kwargs (all of which
    461     default to None):
    462 
    463     abi: ABI to test as a string.
    464     platform: Platform to build against as a string.
    465     toolchain: Toolchain to use as a string.
    466     build_flags: Additional build flags that should be passed to ndk-build if
    467                  invoked as a list of strings.
    468     """
    469     def __init__(self, name, test_dir, abi, platform, toolchain, build_flags):
    470         super(PythonBuildTest, self).__init__(name, test_dir)
    471         self.abi = abi
    472         self.platform = platform
    473         self.toolchain = toolchain
    474         self.build_flags = build_flags
    475 
    476     def run(self, out_dir, _):
    477         build_dir = os.path.join(out_dir, self.name)
    478         print('Running build test: {}'.format(self.name))
    479         _prep_build_dir(self.test_dir, build_dir)
    480         with util.cd(build_dir):
    481             module = imp.load_source('test', 'test.py')
    482             success, failure_message = module.run_test(
    483                 abi=self.abi, platform=self.platform, toolchain=self.toolchain,
    484                 build_flags=self.build_flags)
    485             if success:
    486                 return [Success(self.name)]
    487             else:
    488                 return [Failure(self.name, failure_message)]
    489 
    490 
    491 class ShellBuildTest(Test):
    492     def __init__(self, name, test_dir, abi, platform, toolchain, build_flags):
    493         super(ShellBuildTest, self).__init__(name, test_dir)
    494         self.abi = abi
    495         self.platform = platform
    496         self.toolchain = toolchain
    497         self.build_flags = build_flags
    498 
    499     def run(self, out_dir, _):
    500         build_dir = os.path.join(out_dir, self.name)
    501         print('Running build test: {}'.format(self.name))
    502         if os.name == 'nt':
    503             reason = 'build.sh tests are not supported on Windows'
    504             return [Skipped(self.name, reason)]
    505         return [_run_build_sh_test(self.name, build_dir, self.test_dir,
    506                                    self.build_flags, self.abi, self.platform,
    507                                    self.toolchain)]
    508 
    509 
    510 class NdkBuildTest(Test):
    511     def __init__(self, name, test_dir, abi, platform, toolchain, build_flags):
    512         super(NdkBuildTest, self).__init__(name, test_dir)
    513         self.abi = abi
    514         self.platform = platform
    515         self.toolchain = toolchain
    516         self.build_flags = build_flags
    517 
    518     def run(self, out_dir, _):
    519         build_dir = os.path.join(out_dir, self.name)
    520         print('Running build test: {}'.format(self.name))
    521         return [_run_ndk_build_test(self.name, build_dir, self.test_dir,
    522                                     self.build_flags, self.abi,
    523                                     self.platform, self.toolchain)]
    524 
    525 
    526 class BuildTest(object):
    527     @classmethod
    528     def from_dir(cls, test_dir, abi, platform, toolchain, build_flags):
    529         test_name = os.path.basename(test_dir)
    530 
    531         if os.path.isfile(os.path.join(test_dir, 'test.py')):
    532             return PythonBuildTest(test_name, test_dir, abi, platform,
    533                                    toolchain, build_flags)
    534         elif os.path.isfile(os.path.join(test_dir, 'build.sh')):
    535             return ShellBuildTest(test_name, test_dir, abi, platform,
    536                                   toolchain, build_flags)
    537         else:
    538             return NdkBuildTest(test_name, test_dir, abi, platform,
    539                                 toolchain, build_flags)
    540 
    541 
    542 def _copy_test_to_device(build_dir, device_dir, abi, test_filters, test_name):
    543     abi_dir = os.path.join(build_dir, 'libs', abi)
    544     if not os.path.isdir(abi_dir):
    545         raise RuntimeError('No libraries for {}'.format(abi))
    546 
    547     test_cases = []
    548     for test_file in os.listdir(abi_dir):
    549         if test_file in ('gdbserver', 'gdb.setup'):
    550             continue
    551 
    552         file_is_lib = False
    553         if not test_file.endswith('.so'):
    554             file_is_lib = True
    555             case_name = _make_subtest_name(test_name, test_file)
    556             if not test_filters.filter(case_name):
    557                 continue
    558             test_cases.append(test_file)
    559 
    560         # TODO(danalbert): Libs with the same name will clobber each other.
    561         # This was the case with the old shell based script too. I'm trying not
    562         # to change too much in the translation.
    563         lib_path = os.path.join(abi_dir, test_file)
    564         print('\tPushing {} to {}...'.format(lib_path, device_dir))
    565         adb.push(lib_path, device_dir)
    566 
    567         # Binaries pushed from Windows may not have execute permissions.
    568         if not file_is_lib:
    569             file_path = posixpath.join(device_dir, test_file)
    570             adb.shell('chmod +x ' + file_path)
    571 
    572         # TODO(danalbert): Sync data.
    573         # The libc++ tests contain a DATA file that lists test names and their
    574         # dependencies on file system data. These files need to be copied to
    575         # the device.
    576 
    577     if len(test_cases) == 0:
    578         raise RuntimeError('Could not find any test executables.')
    579 
    580     return test_cases
    581 
    582 
    583 class DeviceTest(Test):
    584     def __init__(self, name, test_dir, abi, platform, device_platform,
    585                  toolchain, build_flags):
    586         super(DeviceTest, self).__init__(name, test_dir)
    587         self.abi = abi
    588         self.platform = platform
    589         self.device_platform = device_platform
    590         self.toolchain = toolchain
    591         self.build_flags = build_flags
    592 
    593     @classmethod
    594     def from_dir(cls, test_dir, abi, platform, device_platform, toolchain,
    595                  build_flags):
    596         test_name = os.path.basename(test_dir)
    597         return cls(test_name, test_dir, abi, platform, device_platform,
    598                    toolchain, build_flags)
    599 
    600     def get_test_config(self):
    601         return DeviceTestConfig.from_test_dir(self.test_dir)
    602 
    603     def check_broken(self):
    604         return self.config.match_broken(self.abi, self.platform,
    605                                         self.device_platform,
    606                                         self.toolchain)
    607 
    608     def check_unsupported(self):
    609         return self.config.match_unsupported(self.abi, self.platform,
    610                                              self.device_platform,
    611                                              self.toolchain)
    612 
    613     def check_subtest_broken(self, name):
    614         return self.config.match_broken(self.abi, self.platform,
    615                                         self.device_platform,
    616                                         self.toolchain, subtest=name)
    617 
    618     def check_subtest_unsupported(self, name):
    619         return self.config.match_unsupported(self.abi, self.platform,
    620                                              self.device_platform,
    621                                              self.toolchain, subtest=name)
    622 
    623     def run(self, out_dir, test_filters):
    624         print('Running device test: {}'.format(self.name))
    625         build_dir = os.path.join(out_dir, self.name)
    626         build_result = _run_ndk_build_test(self.name, build_dir, self.test_dir,
    627                                            self.build_flags, self.abi,
    628                                            self.platform, self.toolchain)
    629         if not build_result.passed():
    630             return [build_result]
    631 
    632         device_dir = posixpath.join('/data/local/tmp/ndk-tests', self.name)
    633 
    634         # We have to use `ls foo || mkdir foo` because Gingerbread was lacking
    635         # `mkdir -p`, the -d check for directory existence, stat, dirname, and
    636         # every other thing I could think of to implement this aside from ls.
    637         result, out = adb.shell('ls {0} || mkdir {0}'.format(device_dir))
    638         if result != 0:
    639             raise RuntimeError('mkdir failed:\n' + '\n'.join(out))
    640 
    641         results = []
    642         try:
    643             test_cases = _copy_test_to_device(
    644                 build_dir, device_dir, self.abi, test_filters, self.name)
    645             for case in test_cases:
    646                 case_name = _make_subtest_name(self.name, case)
    647                 if not test_filters.filter(case_name):
    648                     continue
    649 
    650                 config = self.check_subtest_unsupported(case)
    651                 if config is not None:
    652                     message = 'test unsupported for {}'.format(config)
    653                     results.append(Skipped(case_name, message))
    654                     continue
    655 
    656                 cmd = 'cd {} && LD_LIBRARY_PATH={} ./{}'.format(
    657                     device_dir, device_dir, case)
    658                 print('\tExecuting {}...'.format(case_name))
    659                 result, out = adb.shell(cmd)
    660 
    661                 config, bug = self.check_subtest_broken(case)
    662                 if config is None:
    663                     if result == 0:
    664                         results.append(Success(case_name))
    665                     else:
    666                         results.append(Failure(case_name, '\n'.join(out)))
    667                 else:
    668                     if result == 0:
    669                         results.append(UnexpectedSuccess(case_name, config,
    670                                                          bug))
    671                     else:
    672                         results.append(ExpectedFailure(case_name, config, bug))
    673             return results
    674         finally:
    675             adb.shell('rm -r {}'.format(device_dir))
    676