Home | History | Annotate | Download | only in brillo_HWRandom
      1 # Copyright 2016 The Chromium OS Authors. All rights reserved.
      2 # Use of this source code is governed by a BSD-style license that can be
      3 # found in the LICENSE file.
      4 
      5 import collections
      6 import logging
      7 import os
      8 import tempfile
      9 from autotest_lib.client.common_lib import error
     10 from autotest_lib.server import test
     11 from autotest_lib.server import utils
     12 
     13 
     14 class brillo_HWRandom(test.test):
     15     """Tests that /dev/hw_random is present and passes basic tests."""
     16     version = 1
     17 
     18     # Basic info for a dieharder test.
     19     TestInfo = collections.namedtuple('TestInfo', 'number custom_args')
     20 
     21     # Basic results of a dieharder test.
     22     TestResult = collections.namedtuple('TestResult', 'test_name assessment')
     23 
     24     # Results of a test suite run.
     25     TestSuiteResult = collections.namedtuple('TestSuiteResult',
     26                                              'num_weak num_failed full_output')
     27 
     28     # A list of dieharder tests that can be reasonably constrained to run within
     29     # a sample space of <= 10MB, and the arguments to constrain them. These have
     30     # been applied somewhat naively and over time these can be tweaked if a test
     31     # has a problematic failure rate. In general, since there is only so much
     32     # that can be done within the constraints these tests should be viewed as a
     33     # sanity check and not as a measure of entropy quality. If a hardware RNG
     34     # repeatedly fails this test, it has a big problem and should not be used.
     35     _TEST_LIST = [
     36         TestInfo(number=0, custom_args=['-p', '50']),
     37         TestInfo(number=1, custom_args=['-p', '50', '-t', '50000']),
     38         TestInfo(number=2, custom_args=['-p', '50', '-t', '1000']),
     39         TestInfo(number=3, custom_args=['-p', '50', '-t', '5000']),
     40         TestInfo(number=8, custom_args=['-p', '40']),
     41         TestInfo(number=10, custom_args=[]),
     42         TestInfo(number=11, custom_args=[]),
     43         TestInfo(number=12, custom_args=[]),
     44         TestInfo(number=15, custom_args=['-p', '50', '-t', '50000']),
     45         TestInfo(number=16, custom_args=['-p', '50', '-t', '7000']),
     46         TestInfo(number=17, custom_args=['-p', '50', '-t', '20000']),
     47         TestInfo(number=100, custom_args=['-p', '50', '-t', '50000']),
     48         TestInfo(number=101, custom_args=['-p', '50', '-t', '50000']),
     49         TestInfo(number=102, custom_args=['-p', '50', '-t', '50000']),
     50         TestInfo(number=200, custom_args=['-p', '20', '-t', '20000',
     51                                           '-n', '3']),
     52         TestInfo(number=202, custom_args=['-p', '20', '-t', '20000']),
     53         TestInfo(number=203, custom_args=['-p', '50', '-t', '50000']),
     54         TestInfo(number=204, custom_args=['-p', '200']),
     55         TestInfo(number=205, custom_args=['-t', '512000']),
     56         TestInfo(number=206, custom_args=['-t', '40000', '-n', '64']),
     57         TestInfo(number=207, custom_args=['-t', '300000']),
     58         TestInfo(number=208, custom_args=['-t', '400000']),
     59         TestInfo(number=209, custom_args=['-t', '2000000']),
     60     ]
     61 
     62     def _run_dieharder_test(self, input_file, test_number, custom_args=None):
     63         """Runs a specific dieharder test (locally) and returns the assessment.
     64 
     65         @param input_file: The name of the file containing the data to be tested
     66         @param test_number: A dieharder test number specifying which test to run
     67         @param custom_args: Optional additional arguments for the test
     68 
     69         @returns A list of TestResult
     70 
     71         @raise TestError: An error occurred running the test.
     72         """
     73         command = ['dieharder',
     74                    '-g', '201',
     75                    '-D', 'test_name',
     76                    '-D', 'ntuple',
     77                    '-D', 'assessment',
     78                    '-D', '32768',  # no_whitespace
     79                    '-c', ',',
     80                    '-d', str(test_number),
     81                    '-f', input_file]
     82         if custom_args:
     83             command.extend(custom_args)
     84         command_result = utils.run(command)
     85         if command_result.stderr != '':
     86             raise error.TestError('Error running dieharder: %s' %
     87                                   command_result.stderr.rstrip())
     88         output = command_result.stdout.splitlines()
     89         results = []
     90         for line in output:
     91             fields = line.split(',')
     92             if len(fields) != 3:
     93                 raise error.TestError(
     94                     'dieharder: unexpected output: %s' % line)
     95             results.append(self.TestResult(
     96                 test_name='%s[%s]' % (fields[0], fields[1]),
     97                 assessment=fields[2]))
     98         return results
     99 
    100 
    101     def _run_all_dieharder_tests(self, input_file):
    102         """Runs all the dieharder tests in _TEST_LIST, continuing on failure.
    103 
    104         @param input_file: The name of the file containing the data to be tested
    105 
    106         @returns TestSuiteResult
    107 
    108         @raise TestError: An error occurred running the test.
    109         """
    110         weak = 0
    111         failed = 0
    112         full_output = 'Test Results:\n'
    113         for test_info in self._TEST_LIST:
    114             results = self._run_dieharder_test(input_file,
    115                                                test_info.number,
    116                                                test_info.custom_args)
    117             for test_result in results:
    118                 logging.info('%s: %s', test_result.test_name,
    119                              test_result.assessment)
    120                 full_output += '  %s: %s\n' % test_result
    121                 if test_result.assessment == 'WEAK':
    122                     weak += 1
    123                 elif test_result.assessment == 'FAILED':
    124                     failed += 1
    125                 elif test_result.assessment != 'PASSED':
    126                     raise error.TestError(
    127                         'Unexpected output: %s' % full_output)
    128         logging.info('Total: %d, Weak: %d, Failed: %d',
    129                      len(self._TEST_LIST), weak, failed)
    130         return self.TestSuiteResult(weak, failed, full_output)
    131 
    132     def run_once(self, host=None):
    133         """Runs the test.
    134 
    135         @param host: A host object representing the DUT.
    136 
    137         @raise TestError: An error occurred running the test.
    138         @raise TestFail: The test ran without error but failed.
    139         """
    140         # Grab 10MB of data from /dev/hw_random.
    141         dut_file = '/data/local/tmp/hw_random_output'
    142         host.run('dd count=20480 if=/dev/hw_random of=%s' % dut_file)
    143         with tempfile.NamedTemporaryFile() as local_file:
    144             host.get_file(dut_file, local_file.name)
    145             output_size = os.stat(local_file.name).st_size
    146             if output_size != 0xA00000:
    147                 raise error.TestError(
    148                     'Unexpected output length: %d (expecting %d)',
    149                     output_size, 0xA00000)
    150             # Run the data through each test (even if one fails).
    151             result = self._run_all_dieharder_tests(local_file.name)
    152             if result.num_failed > 0 or result.num_weak > 5:
    153                 raise error.TestFail(result.full_output)
    154