Home | History | Annotate | Download | only in update_payload
      1 # Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
      2 # Use of this source code is governed by a BSD-style license that can be
      3 # found in the LICENSE file.
      4 
      5 """Verifying the integrity of a Chrome OS update payload.
      6 
      7 This module is used internally by the main Payload class for verifying the
      8 integrity of an update payload. The interface for invoking the checks is as
      9 follows:
     10 
     11   checker = PayloadChecker(payload)
     12   checker.Run(...)
     13 """
     14 
     15 from __future__ import print_function
     16 
     17 import array
     18 import base64
     19 import hashlib
     20 import itertools
     21 import os
     22 import subprocess
     23 
     24 import common
     25 import error
     26 import format_utils
     27 import histogram
     28 import update_metadata_pb2
     29 
     30 
     31 #
     32 # Constants.
     33 #
     34 
     35 _CHECK_DST_PSEUDO_EXTENTS = 'dst-pseudo-extents'
     36 _CHECK_MOVE_SAME_SRC_DST_BLOCK = 'move-same-src-dst-block'
     37 _CHECK_PAYLOAD_SIG = 'payload-sig'
     38 CHECKS_TO_DISABLE = (
     39     _CHECK_DST_PSEUDO_EXTENTS,
     40     _CHECK_MOVE_SAME_SRC_DST_BLOCK,
     41     _CHECK_PAYLOAD_SIG,
     42 )
     43 
     44 _TYPE_FULL = 'full'
     45 _TYPE_DELTA = 'delta'
     46 
     47 _DEFAULT_BLOCK_SIZE = 4096
     48 
     49 _DEFAULT_PUBKEY_BASE_NAME = 'update-payload-key.pub.pem'
     50 _DEFAULT_PUBKEY_FILE_NAME = os.path.join(os.path.dirname(__file__),
     51                                          _DEFAULT_PUBKEY_BASE_NAME)
     52 
     53 # Supported minor version map to payload types allowed to be using them.
     54 _SUPPORTED_MINOR_VERSIONS = {
     55     0: (_TYPE_FULL,),
     56     1: (_TYPE_DELTA,),
     57     2: (_TYPE_DELTA,),
     58     3: (_TYPE_DELTA,),
     59     4: (_TYPE_DELTA,),
     60 }
     61 
     62 _OLD_DELTA_USABLE_PART_SIZE = 2 * 1024 * 1024 * 1024
     63 
     64 #
     65 # Helper functions.
     66 #
     67 
     68 def _IsPowerOfTwo(val):
     69   """Returns True iff val is a power of two."""
     70   return val > 0 and (val & (val - 1)) == 0
     71 
     72 
     73 def _AddFormat(format_func, value):
     74   """Adds a custom formatted representation to ordinary string representation.
     75 
     76   Args:
     77     format_func: A value formatter.
     78     value: Value to be formatted and returned.
     79 
     80   Returns:
     81     A string 'x (y)' where x = str(value) and y = format_func(value).
     82   """
     83   ret = str(value)
     84   formatted_str = format_func(value)
     85   if formatted_str:
     86     ret += ' (%s)' % formatted_str
     87   return ret
     88 
     89 
     90 def _AddHumanReadableSize(size):
     91   """Adds a human readable representation to a byte size value."""
     92   return _AddFormat(format_utils.BytesToHumanReadable, size)
     93 
     94 
     95 #
     96 # Payload report generator.
     97 #
     98 
     99 class _PayloadReport(object):
    100   """A payload report generator.
    101 
    102   A report is essentially a sequence of nodes, which represent data points. It
    103   is initialized to have a "global", untitled section. A node may be a
    104   sub-report itself.
    105   """
    106 
    107   # Report nodes: Field, sub-report, section.
    108   class Node(object):
    109     """A report node interface."""
    110 
    111     @staticmethod
    112     def _Indent(indent, line):
    113       """Indents a line by a given indentation amount.
    114 
    115       Args:
    116         indent: The indentation amount.
    117         line: The line content (string).
    118 
    119       Returns:
    120         The properly indented line (string).
    121       """
    122       return '%*s%s' % (indent, '', line)
    123 
    124     def GenerateLines(self, base_indent, sub_indent, curr_section):
    125       """Generates the report lines for this node.
    126 
    127       Args:
    128         base_indent: Base indentation for each line.
    129         sub_indent: Additional indentation for sub-nodes.
    130         curr_section: The current report section object.
    131 
    132       Returns:
    133         A pair consisting of a list of properly indented report lines and a new
    134         current section object.
    135       """
    136       raise NotImplementedError
    137 
    138   class FieldNode(Node):
    139     """A field report node, representing a (name, value) pair."""
    140 
    141     def __init__(self, name, value, linebreak, indent):
    142       super(_PayloadReport.FieldNode, self).__init__()
    143       self.name = name
    144       self.value = value
    145       self.linebreak = linebreak
    146       self.indent = indent
    147 
    148     def GenerateLines(self, base_indent, sub_indent, curr_section):
    149       """Generates a properly formatted 'name : value' entry."""
    150       report_output = ''
    151       if self.name:
    152         report_output += self.name.ljust(curr_section.max_field_name_len) + ' :'
    153       value_lines = str(self.value).splitlines()
    154       if self.linebreak and self.name:
    155         report_output += '\n' + '\n'.join(
    156             ['%*s%s' % (self.indent, '', line) for line in value_lines])
    157       else:
    158         if self.name:
    159           report_output += ' '
    160         report_output += '%*s' % (self.indent, '')
    161         cont_line_indent = len(report_output)
    162         indented_value_lines = [value_lines[0]]
    163         indented_value_lines.extend(['%*s%s' % (cont_line_indent, '', line)
    164                                      for line in value_lines[1:]])
    165         report_output += '\n'.join(indented_value_lines)
    166 
    167       report_lines = [self._Indent(base_indent, line + '\n')
    168                       for line in report_output.split('\n')]
    169       return report_lines, curr_section
    170 
    171   class SubReportNode(Node):
    172     """A sub-report node, representing a nested report."""
    173 
    174     def __init__(self, title, report):
    175       super(_PayloadReport.SubReportNode, self).__init__()
    176       self.title = title
    177       self.report = report
    178 
    179     def GenerateLines(self, base_indent, sub_indent, curr_section):
    180       """Recurse with indentation."""
    181       report_lines = [self._Indent(base_indent, self.title + ' =>\n')]
    182       report_lines.extend(self.report.GenerateLines(base_indent + sub_indent,
    183                                                     sub_indent))
    184       return report_lines, curr_section
    185 
    186   class SectionNode(Node):
    187     """A section header node."""
    188 
    189     def __init__(self, title=None):
    190       super(_PayloadReport.SectionNode, self).__init__()
    191       self.title = title
    192       self.max_field_name_len = 0
    193 
    194     def GenerateLines(self, base_indent, sub_indent, curr_section):
    195       """Dump a title line, return self as the (new) current section."""
    196       report_lines = []
    197       if self.title:
    198         report_lines.append(self._Indent(base_indent,
    199                                          '=== %s ===\n' % self.title))
    200       return report_lines, self
    201 
    202   def __init__(self):
    203     self.report = []
    204     self.last_section = self.global_section = self.SectionNode()
    205     self.is_finalized = False
    206 
    207   def GenerateLines(self, base_indent, sub_indent):
    208     """Generates the lines in the report, properly indented.
    209 
    210     Args:
    211       base_indent: The indentation used for root-level report lines.
    212       sub_indent: The indentation offset used for sub-reports.
    213 
    214     Returns:
    215       A list of indented report lines.
    216     """
    217     report_lines = []
    218     curr_section = self.global_section
    219     for node in self.report:
    220       node_report_lines, curr_section = node.GenerateLines(
    221           base_indent, sub_indent, curr_section)
    222       report_lines.extend(node_report_lines)
    223 
    224     return report_lines
    225 
    226   def Dump(self, out_file, base_indent=0, sub_indent=2):
    227     """Dumps the report to a file.
    228 
    229     Args:
    230       out_file: File object to output the content to.
    231       base_indent: Base indentation for report lines.
    232       sub_indent: Added indentation for sub-reports.
    233     """
    234     report_lines = self.GenerateLines(base_indent, sub_indent)
    235     if report_lines and not self.is_finalized:
    236       report_lines.append('(incomplete report)\n')
    237 
    238     for line in report_lines:
    239       out_file.write(line)
    240 
    241   def AddField(self, name, value, linebreak=False, indent=0):
    242     """Adds a field/value pair to the payload report.
    243 
    244     Args:
    245       name: The field's name.
    246       value: The field's value.
    247       linebreak: Whether the value should be printed on a new line.
    248       indent: Amount of extra indent for each line of the value.
    249     """
    250     assert not self.is_finalized
    251     if name and self.last_section.max_field_name_len < len(name):
    252       self.last_section.max_field_name_len = len(name)
    253     self.report.append(self.FieldNode(name, value, linebreak, indent))
    254 
    255   def AddSubReport(self, title):
    256     """Adds and returns a sub-report with a title."""
    257     assert not self.is_finalized
    258     sub_report = self.SubReportNode(title, type(self)())
    259     self.report.append(sub_report)
    260     return sub_report.report
    261 
    262   def AddSection(self, title):
    263     """Adds a new section title."""
    264     assert not self.is_finalized
    265     self.last_section = self.SectionNode(title)
    266     self.report.append(self.last_section)
    267 
    268   def Finalize(self):
    269     """Seals the report, marking it as complete."""
    270     self.is_finalized = True
    271 
    272 
    273 #
    274 # Payload verification.
    275 #
    276 
    277 class PayloadChecker(object):
    278   """Checking the integrity of an update payload.
    279 
    280   This is a short-lived object whose purpose is to isolate the logic used for
    281   verifying the integrity of an update payload.
    282   """
    283 
    284   def __init__(self, payload, assert_type=None, block_size=0,
    285                allow_unhashed=False, disabled_tests=()):
    286     """Initialize the checker.
    287 
    288     Args:
    289       payload: The payload object to check.
    290       assert_type: Assert that payload is either 'full' or 'delta' (optional).
    291       block_size: Expected filesystem / payload block size (optional).
    292       allow_unhashed: Allow operations with unhashed data blobs.
    293       disabled_tests: Sequence of tests to disable.
    294     """
    295     if not payload.is_init:
    296       raise ValueError('Uninitialized update payload.')
    297 
    298     # Set checker configuration.
    299     self.payload = payload
    300     self.block_size = block_size if block_size else _DEFAULT_BLOCK_SIZE
    301     if not _IsPowerOfTwo(self.block_size):
    302       raise error.PayloadError(
    303           'Expected block (%d) size is not a power of two.' % self.block_size)
    304     if assert_type not in (None, _TYPE_FULL, _TYPE_DELTA):
    305       raise error.PayloadError('Invalid assert_type value (%r).' %
    306                                assert_type)
    307     self.payload_type = assert_type
    308     self.allow_unhashed = allow_unhashed
    309 
    310     # Disable specific tests.
    311     self.check_dst_pseudo_extents = (
    312         _CHECK_DST_PSEUDO_EXTENTS not in disabled_tests)
    313     self.check_move_same_src_dst_block = (
    314         _CHECK_MOVE_SAME_SRC_DST_BLOCK not in disabled_tests)
    315     self.check_payload_sig = _CHECK_PAYLOAD_SIG not in disabled_tests
    316 
    317     # Reset state; these will be assigned when the manifest is checked.
    318     self.sigs_offset = 0
    319     self.sigs_size = 0
    320     self.old_rootfs_fs_size = 0
    321     self.old_kernel_fs_size = 0
    322     self.new_rootfs_fs_size = 0
    323     self.new_kernel_fs_size = 0
    324     self.minor_version = None
    325 
    326   @staticmethod
    327   def _CheckElem(msg, name, report, is_mandatory, is_submsg, convert=str,
    328                  msg_name=None, linebreak=False, indent=0):
    329     """Adds an element from a protobuf message to the payload report.
    330 
    331     Checks to see whether a message contains a given element, and if so adds
    332     the element value to the provided report. A missing mandatory element
    333     causes an exception to be raised.
    334 
    335     Args:
    336       msg: The message containing the element.
    337       name: The name of the element.
    338       report: A report object to add the element name/value to.
    339       is_mandatory: Whether or not this element must be present.
    340       is_submsg: Whether this element is itself a message.
    341       convert: A function for converting the element value for reporting.
    342       msg_name: The name of the message object (for error reporting).
    343       linebreak: Whether the value report should induce a line break.
    344       indent: Amount of indent used for reporting the value.
    345 
    346     Returns:
    347       A pair consisting of the element value and the generated sub-report for
    348       it (if the element is a sub-message, None otherwise). If the element is
    349       missing, returns (None, None).
    350 
    351     Raises:
    352       error.PayloadError if a mandatory element is missing.
    353     """
    354     if not msg.HasField(name):
    355       if is_mandatory:
    356         raise error.PayloadError('%smissing mandatory %s %r.' %
    357                                  (msg_name + ' ' if msg_name else '',
    358                                   'sub-message' if is_submsg else 'field',
    359                                   name))
    360       return None, None
    361 
    362     value = getattr(msg, name)
    363     if is_submsg:
    364       return value, report and report.AddSubReport(name)
    365     else:
    366       if report:
    367         report.AddField(name, convert(value), linebreak=linebreak,
    368                         indent=indent)
    369       return value, None
    370 
    371   @staticmethod
    372   def _CheckMandatoryField(msg, field_name, report, msg_name, convert=str,
    373                            linebreak=False, indent=0):
    374     """Adds a mandatory field; returning first component from _CheckElem."""
    375     return PayloadChecker._CheckElem(msg, field_name, report, True, False,
    376                                      convert=convert, msg_name=msg_name,
    377                                      linebreak=linebreak, indent=indent)[0]
    378 
    379   @staticmethod
    380   def _CheckOptionalField(msg, field_name, report, convert=str,
    381                           linebreak=False, indent=0):
    382     """Adds an optional field; returning first component from _CheckElem."""
    383     return PayloadChecker._CheckElem(msg, field_name, report, False, False,
    384                                      convert=convert, linebreak=linebreak,
    385                                      indent=indent)[0]
    386 
    387   @staticmethod
    388   def _CheckMandatorySubMsg(msg, submsg_name, report, msg_name):
    389     """Adds a mandatory sub-message; wrapper for _CheckElem."""
    390     return PayloadChecker._CheckElem(msg, submsg_name, report, True, True,
    391                                      msg_name)
    392 
    393   @staticmethod
    394   def _CheckOptionalSubMsg(msg, submsg_name, report):
    395     """Adds an optional sub-message; wrapper for _CheckElem."""
    396     return PayloadChecker._CheckElem(msg, submsg_name, report, False, True)
    397 
    398   @staticmethod
    399   def _CheckPresentIff(val1, val2, name1, name2, obj_name):
    400     """Checks that val1 is None iff val2 is None.
    401 
    402     Args:
    403       val1: first value to be compared.
    404       val2: second value to be compared.
    405       name1: name of object holding the first value.
    406       name2: name of object holding the second value.
    407       obj_name: Name of the object containing these values.
    408 
    409     Raises:
    410       error.PayloadError if assertion does not hold.
    411     """
    412     if None in (val1, val2) and val1 is not val2:
    413       present, missing = (name1, name2) if val2 is None else (name2, name1)
    414       raise error.PayloadError('%r present without %r%s.' %
    415                                (present, missing,
    416                                 ' in ' + obj_name if obj_name else ''))
    417 
    418   @staticmethod
    419   def _Run(cmd, send_data=None):
    420     """Runs a subprocess, returns its output.
    421 
    422     Args:
    423       cmd: Sequence of command-line argument for invoking the subprocess.
    424       send_data: Data to feed to the process via its stdin.
    425 
    426     Returns:
    427       A tuple containing the stdout and stderr output of the process.
    428     """
    429     run_process = subprocess.Popen(cmd, stdin=subprocess.PIPE,
    430                                    stdout=subprocess.PIPE)
    431     try:
    432       result = run_process.communicate(input=send_data)
    433     finally:
    434       exit_code = run_process.wait()
    435 
    436     if exit_code:
    437       raise RuntimeError('Subprocess %r failed with code %r.' %
    438                          (cmd, exit_code))
    439 
    440     return result
    441 
    442   @staticmethod
    443   def _CheckSha256Signature(sig_data, pubkey_file_name, actual_hash, sig_name):
    444     """Verifies an actual hash against a signed one.
    445 
    446     Args:
    447       sig_data: The raw signature data.
    448       pubkey_file_name: Public key used for verifying signature.
    449       actual_hash: The actual hash digest.
    450       sig_name: Signature name for error reporting.
    451 
    452     Raises:
    453       error.PayloadError if signature could not be verified.
    454     """
    455     if len(sig_data) != 256:
    456       raise error.PayloadError(
    457           '%s: signature size (%d) not as expected (256).' %
    458           (sig_name, len(sig_data)))
    459     signed_data, _ = PayloadChecker._Run(
    460         ['openssl', 'rsautl', '-verify', '-pubin', '-inkey', pubkey_file_name],
    461         send_data=sig_data)
    462 
    463     if len(signed_data) != len(common.SIG_ASN1_HEADER) + 32:
    464       raise error.PayloadError('%s: unexpected signed data length (%d).' %
    465                                (sig_name, len(signed_data)))
    466 
    467     if not signed_data.startswith(common.SIG_ASN1_HEADER):
    468       raise error.PayloadError('%s: not containing standard ASN.1 prefix.' %
    469                                sig_name)
    470 
    471     signed_hash = signed_data[len(common.SIG_ASN1_HEADER):]
    472     if signed_hash != actual_hash:
    473       raise error.PayloadError(
    474           '%s: signed hash (%s) different from actual (%s).' %
    475           (sig_name, common.FormatSha256(signed_hash),
    476            common.FormatSha256(actual_hash)))
    477 
    478   @staticmethod
    479   def _CheckBlocksFitLength(length, num_blocks, block_size, length_name,
    480                             block_name=None):
    481     """Checks that a given length fits given block space.
    482 
    483     This ensures that the number of blocks allocated is appropriate for the
    484     length of the data residing in these blocks.
    485 
    486     Args:
    487       length: The actual length of the data.
    488       num_blocks: The number of blocks allocated for it.
    489       block_size: The size of each block in bytes.
    490       length_name: Name of length (used for error reporting).
    491       block_name: Name of block (used for error reporting).
    492 
    493     Raises:
    494       error.PayloadError if the aforementioned invariant is not satisfied.
    495     """
    496     # Check: length <= num_blocks * block_size.
    497     if length > num_blocks * block_size:
    498       raise error.PayloadError(
    499           '%s (%d) > num %sblocks (%d) * block_size (%d).' %
    500           (length_name, length, block_name or '', num_blocks, block_size))
    501 
    502     # Check: length > (num_blocks - 1) * block_size.
    503     if length <= (num_blocks - 1) * block_size:
    504       raise error.PayloadError(
    505           '%s (%d) <= (num %sblocks - 1 (%d)) * block_size (%d).' %
    506           (length_name, length, block_name or '', num_blocks - 1, block_size))
    507 
    508   def _CheckManifestMinorVersion(self, report):
    509     """Checks the payload manifest minor_version field.
    510 
    511     Args:
    512       report: The report object to add to.
    513 
    514     Raises:
    515       error.PayloadError if any of the checks fail.
    516     """
    517     self.minor_version = self._CheckOptionalField(self.payload.manifest,
    518                                                   'minor_version', report)
    519     if self.minor_version in _SUPPORTED_MINOR_VERSIONS:
    520       if self.payload_type not in _SUPPORTED_MINOR_VERSIONS[self.minor_version]:
    521         raise error.PayloadError(
    522             'Minor version %d not compatible with payload type %s.' %
    523             (self.minor_version, self.payload_type))
    524     elif self.minor_version is None:
    525       raise error.PayloadError('Minor version is not set.')
    526     else:
    527       raise error.PayloadError('Unsupported minor version: %d' %
    528                                self.minor_version)
    529 
    530   def _CheckManifest(self, report, rootfs_part_size=0, kernel_part_size=0):
    531     """Checks the payload manifest.
    532 
    533     Args:
    534       report: A report object to add to.
    535       rootfs_part_size: Size of the rootfs partition in bytes.
    536       kernel_part_size: Size of the kernel partition in bytes.
    537 
    538     Returns:
    539       A tuple consisting of the partition block size used during the update
    540       (integer), the signatures block offset and size.
    541 
    542     Raises:
    543       error.PayloadError if any of the checks fail.
    544     """
    545     manifest = self.payload.manifest
    546     report.AddSection('manifest')
    547 
    548     # Check: block_size must exist and match the expected value.
    549     actual_block_size = self._CheckMandatoryField(manifest, 'block_size',
    550                                                   report, 'manifest')
    551     if actual_block_size != self.block_size:
    552       raise error.PayloadError('Block_size (%d) not as expected (%d).' %
    553                                (actual_block_size, self.block_size))
    554 
    555     # Check: signatures_offset <==> signatures_size.
    556     self.sigs_offset = self._CheckOptionalField(manifest, 'signatures_offset',
    557                                                 report)
    558     self.sigs_size = self._CheckOptionalField(manifest, 'signatures_size',
    559                                               report)
    560     self._CheckPresentIff(self.sigs_offset, self.sigs_size,
    561                           'signatures_offset', 'signatures_size', 'manifest')
    562 
    563     # Check: old_kernel_info <==> old_rootfs_info.
    564     oki_msg, oki_report = self._CheckOptionalSubMsg(manifest,
    565                                                     'old_kernel_info', report)
    566     ori_msg, ori_report = self._CheckOptionalSubMsg(manifest,
    567                                                     'old_rootfs_info', report)
    568     self._CheckPresentIff(oki_msg, ori_msg, 'old_kernel_info',
    569                           'old_rootfs_info', 'manifest')
    570     if oki_msg:  # equivalently, ori_msg
    571       # Assert/mark delta payload.
    572       if self.payload_type == _TYPE_FULL:
    573         raise error.PayloadError(
    574             'Apparent full payload contains old_{kernel,rootfs}_info.')
    575       self.payload_type = _TYPE_DELTA
    576 
    577       # Check: {size, hash} present in old_{kernel,rootfs}_info.
    578       self.old_kernel_fs_size = self._CheckMandatoryField(
    579           oki_msg, 'size', oki_report, 'old_kernel_info')
    580       self._CheckMandatoryField(oki_msg, 'hash', oki_report, 'old_kernel_info',
    581                                 convert=common.FormatSha256)
    582       self.old_rootfs_fs_size = self._CheckMandatoryField(
    583           ori_msg, 'size', ori_report, 'old_rootfs_info')
    584       self._CheckMandatoryField(ori_msg, 'hash', ori_report, 'old_rootfs_info',
    585                                 convert=common.FormatSha256)
    586 
    587       # Check: old_{kernel,rootfs} size must fit in respective partition.
    588       if kernel_part_size and self.old_kernel_fs_size > kernel_part_size:
    589         raise error.PayloadError(
    590             'Old kernel content (%d) exceed partition size (%d).' %
    591             (self.old_kernel_fs_size, kernel_part_size))
    592       if rootfs_part_size and self.old_rootfs_fs_size > rootfs_part_size:
    593         raise error.PayloadError(
    594             'Old rootfs content (%d) exceed partition size (%d).' %
    595             (self.old_rootfs_fs_size, rootfs_part_size))
    596     else:
    597       # Assert/mark full payload.
    598       if self.payload_type == _TYPE_DELTA:
    599         raise error.PayloadError(
    600             'Apparent delta payload missing old_{kernel,rootfs}_info.')
    601       self.payload_type = _TYPE_FULL
    602 
    603     # Check: new_kernel_info present; contains {size, hash}.
    604     nki_msg, nki_report = self._CheckMandatorySubMsg(
    605         manifest, 'new_kernel_info', report, 'manifest')
    606     self.new_kernel_fs_size = self._CheckMandatoryField(
    607         nki_msg, 'size', nki_report, 'new_kernel_info')
    608     self._CheckMandatoryField(nki_msg, 'hash', nki_report, 'new_kernel_info',
    609                               convert=common.FormatSha256)
    610 
    611     # Check: new_rootfs_info present; contains {size, hash}.
    612     nri_msg, nri_report = self._CheckMandatorySubMsg(
    613         manifest, 'new_rootfs_info', report, 'manifest')
    614     self.new_rootfs_fs_size = self._CheckMandatoryField(
    615         nri_msg, 'size', nri_report, 'new_rootfs_info')
    616     self._CheckMandatoryField(nri_msg, 'hash', nri_report, 'new_rootfs_info',
    617                               convert=common.FormatSha256)
    618 
    619     # Check: new_{kernel,rootfs} size must fit in respective partition.
    620     if kernel_part_size and self.new_kernel_fs_size > kernel_part_size:
    621       raise error.PayloadError(
    622           'New kernel content (%d) exceed partition size (%d).' %
    623           (self.new_kernel_fs_size, kernel_part_size))
    624     if rootfs_part_size and self.new_rootfs_fs_size > rootfs_part_size:
    625       raise error.PayloadError(
    626           'New rootfs content (%d) exceed partition size (%d).' %
    627           (self.new_rootfs_fs_size, rootfs_part_size))
    628 
    629     # Check: minor_version makes sense for the payload type. This check should
    630     # run after the payload type has been set.
    631     self._CheckManifestMinorVersion(report)
    632 
    633   def _CheckLength(self, length, total_blocks, op_name, length_name):
    634     """Checks whether a length matches the space designated in extents.
    635 
    636     Args:
    637       length: The total length of the data.
    638       total_blocks: The total number of blocks in extents.
    639       op_name: Operation name (for error reporting).
    640       length_name: Length name (for error reporting).
    641 
    642     Raises:
    643       error.PayloadError is there a problem with the length.
    644     """
    645     # Check: length is non-zero.
    646     if length == 0:
    647       raise error.PayloadError('%s: %s is zero.' % (op_name, length_name))
    648 
    649     # Check that length matches number of blocks.
    650     self._CheckBlocksFitLength(length, total_blocks, self.block_size,
    651                                '%s: %s' % (op_name, length_name))
    652 
    653   def _CheckExtents(self, extents, usable_size, block_counters, name,
    654                     allow_pseudo=False, allow_signature=False):
    655     """Checks a sequence of extents.
    656 
    657     Args:
    658       extents: The sequence of extents to check.
    659       usable_size: The usable size of the partition to which the extents apply.
    660       block_counters: Array of counters corresponding to the number of blocks.
    661       name: The name of the extent block.
    662       allow_pseudo: Whether or not pseudo block numbers are allowed.
    663       allow_signature: Whether or not the extents are used for a signature.
    664 
    665     Returns:
    666       The total number of blocks in the extents.
    667 
    668     Raises:
    669       error.PayloadError if any of the entailed checks fails.
    670     """
    671     total_num_blocks = 0
    672     for ex, ex_name in common.ExtentIter(extents, name):
    673       # Check: Mandatory fields.
    674       start_block = PayloadChecker._CheckMandatoryField(ex, 'start_block',
    675                                                         None, ex_name)
    676       num_blocks = PayloadChecker._CheckMandatoryField(ex, 'num_blocks', None,
    677                                                        ex_name)
    678       end_block = start_block + num_blocks
    679 
    680       # Check: num_blocks > 0.
    681       if num_blocks == 0:
    682         raise error.PayloadError('%s: extent length is zero.' % ex_name)
    683 
    684       if start_block != common.PSEUDO_EXTENT_MARKER:
    685         # Check: Make sure we're within the partition limit.
    686         if usable_size and end_block * self.block_size > usable_size:
    687           raise error.PayloadError(
    688               '%s: extent (%s) exceeds usable partition size (%d).' %
    689               (ex_name, common.FormatExtent(ex, self.block_size), usable_size))
    690 
    691         # Record block usage.
    692         for i in xrange(start_block, end_block):
    693           block_counters[i] += 1
    694       elif not (allow_pseudo or (allow_signature and len(extents) == 1)):
    695         # Pseudo-extents must be allowed explicitly, or otherwise be part of a
    696         # signature operation (in which case there has to be exactly one).
    697         raise error.PayloadError('%s: unexpected pseudo-extent.' % ex_name)
    698 
    699       total_num_blocks += num_blocks
    700 
    701     return total_num_blocks
    702 
    703   def _CheckReplaceOperation(self, op, data_length, total_dst_blocks, op_name):
    704     """Specific checks for REPLACE/REPLACE_BZ operations.
    705 
    706     Args:
    707       op: The operation object from the manifest.
    708       data_length: The length of the data blob associated with the operation.
    709       total_dst_blocks: Total number of blocks in dst_extents.
    710       op_name: Operation name for error reporting.
    711 
    712     Raises:
    713       error.PayloadError if any check fails.
    714     """
    715     # Check: Does not contain src extents.
    716     if op.src_extents:
    717       raise error.PayloadError('%s: contains src_extents.' % op_name)
    718 
    719     # Check: Contains data.
    720     if data_length is None:
    721       raise error.PayloadError('%s: missing data_{offset,length}.' % op_name)
    722 
    723     if op.type == common.OpType.REPLACE:
    724       PayloadChecker._CheckBlocksFitLength(data_length, total_dst_blocks,
    725                                            self.block_size,
    726                                            op_name + '.data_length', 'dst')
    727     else:
    728       # Check: data_length must be smaller than the alotted dst blocks.
    729       if data_length >= total_dst_blocks * self.block_size:
    730         raise error.PayloadError(
    731             '%s: data_length (%d) must be less than allotted dst block '
    732             'space (%d * %d).' %
    733             (op_name, data_length, total_dst_blocks, self.block_size))
    734 
    735   def _CheckMoveOperation(self, op, data_offset, total_src_blocks,
    736                           total_dst_blocks, op_name):
    737     """Specific checks for MOVE operations.
    738 
    739     Args:
    740       op: The operation object from the manifest.
    741       data_offset: The offset of a data blob for the operation.
    742       total_src_blocks: Total number of blocks in src_extents.
    743       total_dst_blocks: Total number of blocks in dst_extents.
    744       op_name: Operation name for error reporting.
    745 
    746     Raises:
    747       error.PayloadError if any check fails.
    748     """
    749     # Check: No data_{offset,length}.
    750     if data_offset is not None:
    751       raise error.PayloadError('%s: contains data_{offset,length}.' % op_name)
    752 
    753     # Check: total_src_blocks == total_dst_blocks.
    754     if total_src_blocks != total_dst_blocks:
    755       raise error.PayloadError(
    756           '%s: total src blocks (%d) != total dst blocks (%d).' %
    757           (op_name, total_src_blocks, total_dst_blocks))
    758 
    759     # Check: For all i, i-th src block index != i-th dst block index.
    760     i = 0
    761     src_extent_iter = iter(op.src_extents)
    762     dst_extent_iter = iter(op.dst_extents)
    763     src_extent = dst_extent = None
    764     src_idx = src_num = dst_idx = dst_num = 0
    765     while i < total_src_blocks:
    766       # Get the next source extent, if needed.
    767       if not src_extent:
    768         try:
    769           src_extent = src_extent_iter.next()
    770         except StopIteration:
    771           raise error.PayloadError('%s: ran out of src extents (%d/%d).' %
    772                                    (op_name, i, total_src_blocks))
    773         src_idx = src_extent.start_block
    774         src_num = src_extent.num_blocks
    775 
    776       # Get the next dest extent, if needed.
    777       if not dst_extent:
    778         try:
    779           dst_extent = dst_extent_iter.next()
    780         except StopIteration:
    781           raise error.PayloadError('%s: ran out of dst extents (%d/%d).' %
    782                                    (op_name, i, total_dst_blocks))
    783         dst_idx = dst_extent.start_block
    784         dst_num = dst_extent.num_blocks
    785 
    786       # Check: start block is not 0. See crbug/480751; there are still versions
    787       # of update_engine which fail when seeking to 0 in PReadAll and PWriteAll,
    788       # so we need to fail payloads that try to MOVE to/from block 0.
    789       if src_idx == 0 or dst_idx == 0:
    790         raise error.PayloadError(
    791             '%s: MOVE operation cannot have extent with start block 0' %
    792             op_name)
    793 
    794       if self.check_move_same_src_dst_block and src_idx == dst_idx:
    795         raise error.PayloadError(
    796             '%s: src/dst block number %d is the same (%d).' %
    797             (op_name, i, src_idx))
    798 
    799       advance = min(src_num, dst_num)
    800       i += advance
    801 
    802       src_idx += advance
    803       src_num -= advance
    804       if src_num == 0:
    805         src_extent = None
    806 
    807       dst_idx += advance
    808       dst_num -= advance
    809       if dst_num == 0:
    810         dst_extent = None
    811 
    812     # Make sure we've exhausted all src/dst extents.
    813     if src_extent:
    814       raise error.PayloadError('%s: excess src blocks.' % op_name)
    815     if dst_extent:
    816       raise error.PayloadError('%s: excess dst blocks.' % op_name)
    817 
    818   def _CheckAnyDiffOperation(self, data_length, total_dst_blocks, op_name):
    819     """Specific checks for BSDIFF, SOURCE_BSDIFF and IMGDIFF operations.
    820 
    821     Args:
    822       data_length: The length of the data blob associated with the operation.
    823       total_dst_blocks: Total number of blocks in dst_extents.
    824       op_name: Operation name for error reporting.
    825 
    826     Raises:
    827       error.PayloadError if any check fails.
    828     """
    829     # Check: data_{offset,length} present.
    830     if data_length is None:
    831       raise error.PayloadError('%s: missing data_{offset,length}.' % op_name)
    832 
    833     # Check: data_length is strictly smaller than the alotted dst blocks.
    834     if data_length >= total_dst_blocks * self.block_size:
    835       raise error.PayloadError(
    836           '%s: data_length (%d) must be smaller than allotted dst space '
    837           '(%d * %d = %d).' %
    838           (op_name, data_length, total_dst_blocks, self.block_size,
    839            total_dst_blocks * self.block_size))
    840 
    841   def _CheckSourceCopyOperation(self, data_offset, total_src_blocks,
    842                                 total_dst_blocks, op_name):
    843     """Specific checks for SOURCE_COPY.
    844 
    845     Args:
    846       data_offset: The offset of a data blob for the operation.
    847       total_src_blocks: Total number of blocks in src_extents.
    848       total_dst_blocks: Total number of blocks in dst_extents.
    849       op_name: Operation name for error reporting.
    850 
    851     Raises:
    852       error.PayloadError if any check fails.
    853     """
    854     # Check: No data_{offset,length}.
    855     if data_offset is not None:
    856       raise error.PayloadError('%s: contains data_{offset,length}.' % op_name)
    857 
    858     # Check: total_src_blocks == total_dst_blocks.
    859     if total_src_blocks != total_dst_blocks:
    860       raise error.PayloadError(
    861           '%s: total src blocks (%d) != total dst blocks (%d).' %
    862           (op_name, total_src_blocks, total_dst_blocks))
    863 
    864   def _CheckAnySourceOperation(self, op, total_src_blocks, op_name):
    865     """Specific checks for SOURCE_* operations.
    866 
    867     Args:
    868       op: The operation object from the manifest.
    869       total_src_blocks: Total number of blocks in src_extents.
    870       op_name: Operation name for error reporting.
    871 
    872     Raises:
    873       error.PayloadError if any check fails.
    874     """
    875     # Check: total_src_blocks != 0.
    876     if total_src_blocks == 0:
    877       raise error.PayloadError('%s: no src blocks in a source op.' % op_name)
    878 
    879     # Check: src_sha256_hash present in minor version >= 3.
    880     if self.minor_version >= 3 and op.src_sha256_hash is None:
    881       raise error.PayloadError('%s: source hash missing.' % op_name)
    882 
    883   def _CheckOperation(self, op, op_name, is_last, old_block_counters,
    884                       new_block_counters, old_usable_size, new_usable_size,
    885                       prev_data_offset, allow_signature, blob_hash_counts):
    886     """Checks a single update operation.
    887 
    888     Args:
    889       op: The operation object.
    890       op_name: Operation name string for error reporting.
    891       is_last: Whether this is the last operation in the sequence.
    892       old_block_counters: Arrays of block read counters.
    893       new_block_counters: Arrays of block write counters.
    894       old_usable_size: The overall usable size for src data in bytes.
    895       new_usable_size: The overall usable size for dst data in bytes.
    896       prev_data_offset: Offset of last used data bytes.
    897       allow_signature: Whether this may be a signature operation.
    898       blob_hash_counts: Counters for hashed/unhashed blobs.
    899 
    900     Returns:
    901       The amount of data blob associated with the operation.
    902 
    903     Raises:
    904       error.PayloadError if any check has failed.
    905     """
    906     # Check extents.
    907     total_src_blocks = self._CheckExtents(
    908         op.src_extents, old_usable_size, old_block_counters,
    909         op_name + '.src_extents', allow_pseudo=True)
    910     allow_signature_in_extents = (allow_signature and is_last and
    911                                   op.type == common.OpType.REPLACE)
    912     total_dst_blocks = self._CheckExtents(
    913         op.dst_extents, new_usable_size, new_block_counters,
    914         op_name + '.dst_extents',
    915         allow_pseudo=(not self.check_dst_pseudo_extents),
    916         allow_signature=allow_signature_in_extents)
    917 
    918     # Check: data_offset present <==> data_length present.
    919     data_offset = self._CheckOptionalField(op, 'data_offset', None)
    920     data_length = self._CheckOptionalField(op, 'data_length', None)
    921     self._CheckPresentIff(data_offset, data_length, 'data_offset',
    922                           'data_length', op_name)
    923 
    924     # Check: At least one dst_extent.
    925     if not op.dst_extents:
    926       raise error.PayloadError('%s: dst_extents is empty.' % op_name)
    927 
    928     # Check {src,dst}_length, if present.
    929     if op.HasField('src_length'):
    930       self._CheckLength(op.src_length, total_src_blocks, op_name, 'src_length')
    931     if op.HasField('dst_length'):
    932       self._CheckLength(op.dst_length, total_dst_blocks, op_name, 'dst_length')
    933 
    934     if op.HasField('data_sha256_hash'):
    935       blob_hash_counts['hashed'] += 1
    936 
    937       # Check: Operation carries data.
    938       if data_offset is None:
    939         raise error.PayloadError(
    940             '%s: data_sha256_hash present but no data_{offset,length}.' %
    941             op_name)
    942 
    943       # Check: Hash verifies correctly.
    944       # pylint cannot find the method in hashlib, for some reason.
    945       # pylint: disable=E1101
    946       actual_hash = hashlib.sha256(self.payload.ReadDataBlob(data_offset,
    947                                                              data_length))
    948       if op.data_sha256_hash != actual_hash.digest():
    949         raise error.PayloadError(
    950             '%s: data_sha256_hash (%s) does not match actual hash (%s).' %
    951             (op_name, common.FormatSha256(op.data_sha256_hash),
    952              common.FormatSha256(actual_hash.digest())))
    953     elif data_offset is not None:
    954       if allow_signature_in_extents:
    955         blob_hash_counts['signature'] += 1
    956       elif self.allow_unhashed:
    957         blob_hash_counts['unhashed'] += 1
    958       else:
    959         raise error.PayloadError('%s: unhashed operation not allowed.' %
    960                                  op_name)
    961 
    962     if data_offset is not None:
    963       # Check: Contiguous use of data section.
    964       if data_offset != prev_data_offset:
    965         raise error.PayloadError(
    966             '%s: data offset (%d) not matching amount used so far (%d).' %
    967             (op_name, data_offset, prev_data_offset))
    968 
    969     # Type-specific checks.
    970     if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
    971       self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
    972     elif op.type == common.OpType.MOVE and self.minor_version == 1:
    973       self._CheckMoveOperation(op, data_offset, total_src_blocks,
    974                                total_dst_blocks, op_name)
    975     elif op.type == common.OpType.BSDIFF and self.minor_version == 1:
    976       self._CheckAnyDiffOperation(data_length, total_dst_blocks, op_name)
    977     elif op.type == common.OpType.SOURCE_COPY and self.minor_version >= 2:
    978       self._CheckSourceCopyOperation(data_offset, total_src_blocks,
    979                                      total_dst_blocks, op_name)
    980       self._CheckAnySourceOperation(op, total_src_blocks, op_name)
    981     elif op.type == common.OpType.SOURCE_BSDIFF and self.minor_version >= 2:
    982       self._CheckAnyDiffOperation(data_length, total_dst_blocks, op_name)
    983       self._CheckAnySourceOperation(op, total_src_blocks, op_name)
    984     elif op.type == common.OpType.IMGDIFF and self.minor_version >= 4:
    985       self._CheckAnyDiffOperation(data_length, total_dst_blocks, op_name)
    986       self._CheckAnySourceOperation(op, total_src_blocks, op_name)
    987     else:
    988       raise error.PayloadError(
    989           'Operation %s (type %d) not allowed in minor version %d' %
    990           (op_name, op.type, self.minor_version))
    991     return data_length if data_length is not None else 0
    992 
    993   def _SizeToNumBlocks(self, size):
    994     """Returns the number of blocks needed to contain a given byte size."""
    995     return (size + self.block_size - 1) / self.block_size
    996 
    997   def _AllocBlockCounters(self, total_size):
    998     """Returns a freshly initialized array of block counters.
    999 
   1000     Note that the generated array is not portable as is due to byte-ordering
   1001     issues, hence it should not be serialized.
   1002 
   1003     Args:
   1004       total_size: The total block size in bytes.
   1005 
   1006     Returns:
   1007       An array of unsigned short elements initialized to zero, one for each of
   1008       the blocks necessary for containing the partition.
   1009     """
   1010     return array.array('H',
   1011                        itertools.repeat(0, self._SizeToNumBlocks(total_size)))
   1012 
   1013   def _CheckOperations(self, operations, report, base_name, old_fs_size,
   1014                        new_fs_size, new_usable_size, prev_data_offset,
   1015                        allow_signature):
   1016     """Checks a sequence of update operations.
   1017 
   1018     Args:
   1019       operations: The sequence of operations to check.
   1020       report: The report object to add to.
   1021       base_name: The name of the operation block.
   1022       old_fs_size: The old filesystem size in bytes.
   1023       new_fs_size: The new filesystem size in bytes.
   1024       new_usable_size: The overall usable size of the new partition in bytes.
   1025       prev_data_offset: Offset of last used data bytes.
   1026       allow_signature: Whether this sequence may contain signature operations.
   1027 
   1028     Returns:
   1029       The total data blob size used.
   1030 
   1031     Raises:
   1032       error.PayloadError if any of the checks fails.
   1033     """
   1034     # The total size of data blobs used by operations scanned thus far.
   1035     total_data_used = 0
   1036     # Counts of specific operation types.
   1037     op_counts = {
   1038         common.OpType.REPLACE: 0,
   1039         common.OpType.REPLACE_BZ: 0,
   1040         common.OpType.MOVE: 0,
   1041         common.OpType.BSDIFF: 0,
   1042         common.OpType.SOURCE_COPY: 0,
   1043         common.OpType.SOURCE_BSDIFF: 0,
   1044         common.OpType.IMGDIFF: 0,
   1045     }
   1046     # Total blob sizes for each operation type.
   1047     op_blob_totals = {
   1048         common.OpType.REPLACE: 0,
   1049         common.OpType.REPLACE_BZ: 0,
   1050         # MOVE operations don't have blobs.
   1051         common.OpType.BSDIFF: 0,
   1052         # SOURCE_COPY operations don't have blobs.
   1053         common.OpType.SOURCE_BSDIFF: 0,
   1054         common.OpType.IMGDIFF: 0,
   1055     }
   1056     # Counts of hashed vs unhashed operations.
   1057     blob_hash_counts = {
   1058         'hashed': 0,
   1059         'unhashed': 0,
   1060     }
   1061     if allow_signature:
   1062       blob_hash_counts['signature'] = 0
   1063 
   1064     # Allocate old and new block counters.
   1065     old_block_counters = (self._AllocBlockCounters(new_usable_size)
   1066                           if old_fs_size else None)
   1067     new_block_counters = self._AllocBlockCounters(new_usable_size)
   1068 
   1069     # Process and verify each operation.
   1070     op_num = 0
   1071     for op, op_name in common.OperationIter(operations, base_name):
   1072       op_num += 1
   1073 
   1074       # Check: Type is valid.
   1075       if op.type not in op_counts.keys():
   1076         raise error.PayloadError('%s: invalid type (%d).' % (op_name, op.type))
   1077       op_counts[op.type] += 1
   1078 
   1079       is_last = op_num == len(operations)
   1080       curr_data_used = self._CheckOperation(
   1081           op, op_name, is_last, old_block_counters, new_block_counters,
   1082           new_usable_size if old_fs_size else 0, new_usable_size,
   1083           prev_data_offset + total_data_used, allow_signature,
   1084           blob_hash_counts)
   1085       if curr_data_used:
   1086         op_blob_totals[op.type] += curr_data_used
   1087         total_data_used += curr_data_used
   1088 
   1089     # Report totals and breakdown statistics.
   1090     report.AddField('total operations', op_num)
   1091     report.AddField(
   1092         None,
   1093         histogram.Histogram.FromCountDict(op_counts,
   1094                                           key_names=common.OpType.NAMES),
   1095         indent=1)
   1096     report.AddField('total blobs', sum(blob_hash_counts.values()))
   1097     report.AddField(None,
   1098                     histogram.Histogram.FromCountDict(blob_hash_counts),
   1099                     indent=1)
   1100     report.AddField('total blob size', _AddHumanReadableSize(total_data_used))
   1101     report.AddField(
   1102         None,
   1103         histogram.Histogram.FromCountDict(op_blob_totals,
   1104                                           formatter=_AddHumanReadableSize,
   1105                                           key_names=common.OpType.NAMES),
   1106         indent=1)
   1107 
   1108     # Report read/write histograms.
   1109     if old_block_counters:
   1110       report.AddField('block read hist',
   1111                       histogram.Histogram.FromKeyList(old_block_counters),
   1112                       linebreak=True, indent=1)
   1113 
   1114     new_write_hist = histogram.Histogram.FromKeyList(
   1115         new_block_counters[:self._SizeToNumBlocks(new_fs_size)])
   1116     report.AddField('block write hist', new_write_hist, linebreak=True,
   1117                     indent=1)
   1118 
   1119     # Check: Full update must write each dst block once.
   1120     if self.payload_type == _TYPE_FULL and new_write_hist.GetKeys() != [1]:
   1121       raise error.PayloadError(
   1122           '%s: not all blocks written exactly once during full update.' %
   1123           base_name)
   1124 
   1125     return total_data_used
   1126 
   1127   def _CheckSignatures(self, report, pubkey_file_name):
   1128     """Checks a payload's signature block."""
   1129     sigs_raw = self.payload.ReadDataBlob(self.sigs_offset, self.sigs_size)
   1130     sigs = update_metadata_pb2.Signatures()
   1131     sigs.ParseFromString(sigs_raw)
   1132     report.AddSection('signatures')
   1133 
   1134     # Check: At least one signature present.
   1135     # pylint cannot see through the protobuf object, it seems.
   1136     # pylint: disable=E1101
   1137     if not sigs.signatures:
   1138       raise error.PayloadError('Signature block is empty.')
   1139 
   1140     last_ops_section = (self.payload.manifest.kernel_install_operations or
   1141                         self.payload.manifest.install_operations)
   1142     fake_sig_op = last_ops_section[-1]
   1143     # Check: signatures_{offset,size} must match the last (fake) operation.
   1144     if not (fake_sig_op.type == common.OpType.REPLACE and
   1145             self.sigs_offset == fake_sig_op.data_offset and
   1146             self.sigs_size == fake_sig_op.data_length):
   1147       raise error.PayloadError(
   1148           'Signatures_{offset,size} (%d+%d) does not match last operation '
   1149           '(%d+%d).' %
   1150           (self.sigs_offset, self.sigs_size, fake_sig_op.data_offset,
   1151            fake_sig_op.data_length))
   1152 
   1153     # Compute the checksum of all data up to signature blob.
   1154     # TODO(garnold) we're re-reading the whole data section into a string
   1155     # just to compute the checksum; instead, we could do it incrementally as
   1156     # we read the blobs one-by-one, under the assumption that we're reading
   1157     # them in order (which currently holds). This should be reconsidered.
   1158     payload_hasher = self.payload.manifest_hasher.copy()
   1159     common.Read(self.payload.payload_file, self.sigs_offset,
   1160                 offset=self.payload.data_offset, hasher=payload_hasher)
   1161 
   1162     for sig, sig_name in common.SignatureIter(sigs.signatures, 'signatures'):
   1163       sig_report = report.AddSubReport(sig_name)
   1164 
   1165       # Check: Signature contains mandatory fields.
   1166       self._CheckMandatoryField(sig, 'version', sig_report, sig_name)
   1167       self._CheckMandatoryField(sig, 'data', None, sig_name)
   1168       sig_report.AddField('data len', len(sig.data))
   1169 
   1170       # Check: Signatures pertains to actual payload hash.
   1171       if sig.version == 1:
   1172         self._CheckSha256Signature(sig.data, pubkey_file_name,
   1173                                    payload_hasher.digest(), sig_name)
   1174       else:
   1175         raise error.PayloadError('Unknown signature version (%d).' %
   1176                                  sig.version)
   1177 
   1178   def Run(self, pubkey_file_name=None, metadata_sig_file=None,
   1179           rootfs_part_size=0, kernel_part_size=0, report_out_file=None):
   1180     """Checker entry point, invoking all checks.
   1181 
   1182     Args:
   1183       pubkey_file_name: Public key used for signature verification.
   1184       metadata_sig_file: Metadata signature, if verification is desired.
   1185       rootfs_part_size: The size of rootfs partitions in bytes (default: infer
   1186                         based on payload type and version).
   1187       kernel_part_size: The size of kernel partitions in bytes (default: use
   1188                         reported filesystem size).
   1189       report_out_file: File object to dump the report to.
   1190 
   1191     Raises:
   1192       error.PayloadError if payload verification failed.
   1193     """
   1194     if not pubkey_file_name:
   1195       pubkey_file_name = _DEFAULT_PUBKEY_FILE_NAME
   1196 
   1197     report = _PayloadReport()
   1198 
   1199     # Get payload file size.
   1200     self.payload.payload_file.seek(0, 2)
   1201     payload_file_size = self.payload.payload_file.tell()
   1202     self.payload.ResetFile()
   1203 
   1204     try:
   1205       # Check metadata signature (if provided).
   1206       if metadata_sig_file:
   1207         metadata_sig = base64.b64decode(metadata_sig_file.read())
   1208         self._CheckSha256Signature(metadata_sig, pubkey_file_name,
   1209                                    self.payload.manifest_hasher.digest(),
   1210                                    'metadata signature')
   1211 
   1212       # Part 1: Check the file header.
   1213       report.AddSection('header')
   1214       # Check: Payload version is valid.
   1215       if self.payload.header.version != 1:
   1216         raise error.PayloadError('Unknown payload version (%d).' %
   1217                                  self.payload.header.version)
   1218       report.AddField('version', self.payload.header.version)
   1219       report.AddField('manifest len', self.payload.header.manifest_len)
   1220 
   1221       # Part 2: Check the manifest.
   1222       self._CheckManifest(report, rootfs_part_size, kernel_part_size)
   1223       assert self.payload_type, 'payload type should be known by now'
   1224 
   1225       # Infer the usable partition size when validating rootfs operations:
   1226       # - If rootfs partition size was provided, use that.
   1227       # - Otherwise, if this is an older delta (minor version < 2), stick with
   1228       #   a known constant size. This is necessary because older deltas may
   1229       #   exceed the filesystem size when moving data blocks around.
   1230       # - Otherwise, use the encoded filesystem size.
   1231       new_rootfs_usable_size = self.new_rootfs_fs_size
   1232       if rootfs_part_size:
   1233         new_rootfs_usable_size = rootfs_part_size
   1234       elif self.payload_type == _TYPE_DELTA and self.minor_version in (None, 1):
   1235         new_rootfs_usable_size = _OLD_DELTA_USABLE_PART_SIZE
   1236 
   1237       # Part 3: Examine rootfs operations.
   1238       # TODO(garnold)(chromium:243559) only default to the filesystem size if
   1239       # no explicit size provided *and* the partition size is not embedded in
   1240       # the payload; see issue for more details.
   1241       report.AddSection('rootfs operations')
   1242       total_blob_size = self._CheckOperations(
   1243           self.payload.manifest.install_operations, report,
   1244           'install_operations', self.old_rootfs_fs_size,
   1245           self.new_rootfs_fs_size, new_rootfs_usable_size, 0, False)
   1246 
   1247       # Part 4: Examine kernel operations.
   1248       # TODO(garnold)(chromium:243559) as above.
   1249       report.AddSection('kernel operations')
   1250       total_blob_size += self._CheckOperations(
   1251           self.payload.manifest.kernel_install_operations, report,
   1252           'kernel_install_operations', self.old_kernel_fs_size,
   1253           self.new_kernel_fs_size,
   1254           kernel_part_size if kernel_part_size else self.new_kernel_fs_size,
   1255           total_blob_size, True)
   1256 
   1257       # Check: Operations data reach the end of the payload file.
   1258       used_payload_size = self.payload.data_offset + total_blob_size
   1259       if used_payload_size != payload_file_size:
   1260         raise error.PayloadError(
   1261             'Used payload size (%d) different from actual file size (%d).' %
   1262             (used_payload_size, payload_file_size))
   1263 
   1264       # Part 5: Handle payload signatures message.
   1265       if self.check_payload_sig and self.sigs_size:
   1266         self._CheckSignatures(report, pubkey_file_name)
   1267 
   1268       # Part 6: Summary.
   1269       report.AddSection('summary')
   1270       report.AddField('update type', self.payload_type)
   1271 
   1272       report.Finalize()
   1273     finally:
   1274       if report_out_file:
   1275         report.Dump(report_out_file)
   1276