Home | History | Annotate | Download | only in update_payload
      1 # Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
      2 # Use of this source code is governed by a BSD-style license that can be
      3 # found in the LICENSE file.
      4 
      5 """Verifying the integrity of a Chrome OS update payload.
      6 
      7 This module is used internally by the main Payload class for verifying the
      8 integrity of an update payload. The interface for invoking the checks is as
      9 follows:
     10 
     11   checker = PayloadChecker(payload)
     12   checker.Run(...)
     13 """
     14 
     15 from __future__ import print_function
     16 
     17 import array
     18 import base64
     19 import hashlib
     20 import itertools
     21 import os
     22 import subprocess
     23 
     24 from update_payload import common
     25 from update_payload import error
     26 from update_payload import format_utils
     27 from update_payload import histogram
     28 from update_payload import update_metadata_pb2
     29 
     30 
     31 #
     32 # Constants.
     33 #
     34 
     35 _CHECK_DST_PSEUDO_EXTENTS = 'dst-pseudo-extents'
     36 _CHECK_MOVE_SAME_SRC_DST_BLOCK = 'move-same-src-dst-block'
     37 _CHECK_PAYLOAD_SIG = 'payload-sig'
     38 CHECKS_TO_DISABLE = (
     39     _CHECK_DST_PSEUDO_EXTENTS,
     40     _CHECK_MOVE_SAME_SRC_DST_BLOCK,
     41     _CHECK_PAYLOAD_SIG,
     42 )
     43 
     44 _TYPE_FULL = 'full'
     45 _TYPE_DELTA = 'delta'
     46 
     47 _DEFAULT_BLOCK_SIZE = 4096
     48 
     49 _DEFAULT_PUBKEY_BASE_NAME = 'update-payload-key.pub.pem'
     50 _DEFAULT_PUBKEY_FILE_NAME = os.path.join(os.path.dirname(__file__),
     51                                          _DEFAULT_PUBKEY_BASE_NAME)
     52 
     53 # Supported minor version map to payload types allowed to be using them.
     54 _SUPPORTED_MINOR_VERSIONS = {
     55     0: (_TYPE_FULL,),
     56     1: (_TYPE_DELTA,),
     57     2: (_TYPE_DELTA,),
     58     3: (_TYPE_DELTA,),
     59     4: (_TYPE_DELTA,),
     60     5: (_TYPE_DELTA,),
     61 }
     62 
     63 _OLD_DELTA_USABLE_PART_SIZE = 2 * 1024 * 1024 * 1024
     64 
     65 #
     66 # Helper functions.
     67 #
     68 
     69 def _IsPowerOfTwo(val):
     70   """Returns True iff val is a power of two."""
     71   return val > 0 and (val & (val - 1)) == 0
     72 
     73 
     74 def _AddFormat(format_func, value):
     75   """Adds a custom formatted representation to ordinary string representation.
     76 
     77   Args:
     78     format_func: A value formatter.
     79     value: Value to be formatted and returned.
     80 
     81   Returns:
     82     A string 'x (y)' where x = str(value) and y = format_func(value).
     83   """
     84   ret = str(value)
     85   formatted_str = format_func(value)
     86   if formatted_str:
     87     ret += ' (%s)' % formatted_str
     88   return ret
     89 
     90 
     91 def _AddHumanReadableSize(size):
     92   """Adds a human readable representation to a byte size value."""
     93   return _AddFormat(format_utils.BytesToHumanReadable, size)
     94 
     95 
     96 #
     97 # Payload report generator.
     98 #
     99 
    100 class _PayloadReport(object):
    101   """A payload report generator.
    102 
    103   A report is essentially a sequence of nodes, which represent data points. It
    104   is initialized to have a "global", untitled section. A node may be a
    105   sub-report itself.
    106   """
    107 
    108   # Report nodes: Field, sub-report, section.
    109   class Node(object):
    110     """A report node interface."""
    111 
    112     @staticmethod
    113     def _Indent(indent, line):
    114       """Indents a line by a given indentation amount.
    115 
    116       Args:
    117         indent: The indentation amount.
    118         line: The line content (string).
    119 
    120       Returns:
    121         The properly indented line (string).
    122       """
    123       return '%*s%s' % (indent, '', line)
    124 
    125     def GenerateLines(self, base_indent, sub_indent, curr_section):
    126       """Generates the report lines for this node.
    127 
    128       Args:
    129         base_indent: Base indentation for each line.
    130         sub_indent: Additional indentation for sub-nodes.
    131         curr_section: The current report section object.
    132 
    133       Returns:
    134         A pair consisting of a list of properly indented report lines and a new
    135         current section object.
    136       """
    137       raise NotImplementedError
    138 
    139   class FieldNode(Node):
    140     """A field report node, representing a (name, value) pair."""
    141 
    142     def __init__(self, name, value, linebreak, indent):
    143       super(_PayloadReport.FieldNode, self).__init__()
    144       self.name = name
    145       self.value = value
    146       self.linebreak = linebreak
    147       self.indent = indent
    148 
    149     def GenerateLines(self, base_indent, sub_indent, curr_section):
    150       """Generates a properly formatted 'name : value' entry."""
    151       report_output = ''
    152       if self.name:
    153         report_output += self.name.ljust(curr_section.max_field_name_len) + ' :'
    154       value_lines = str(self.value).splitlines()
    155       if self.linebreak and self.name:
    156         report_output += '\n' + '\n'.join(
    157             ['%*s%s' % (self.indent, '', line) for line in value_lines])
    158       else:
    159         if self.name:
    160           report_output += ' '
    161         report_output += '%*s' % (self.indent, '')
    162         cont_line_indent = len(report_output)
    163         indented_value_lines = [value_lines[0]]
    164         indented_value_lines.extend(['%*s%s' % (cont_line_indent, '', line)
    165                                      for line in value_lines[1:]])
    166         report_output += '\n'.join(indented_value_lines)
    167 
    168       report_lines = [self._Indent(base_indent, line + '\n')
    169                       for line in report_output.split('\n')]
    170       return report_lines, curr_section
    171 
    172   class SubReportNode(Node):
    173     """A sub-report node, representing a nested report."""
    174 
    175     def __init__(self, title, report):
    176       super(_PayloadReport.SubReportNode, self).__init__()
    177       self.title = title
    178       self.report = report
    179 
    180     def GenerateLines(self, base_indent, sub_indent, curr_section):
    181       """Recurse with indentation."""
    182       report_lines = [self._Indent(base_indent, self.title + ' =>\n')]
    183       report_lines.extend(self.report.GenerateLines(base_indent + sub_indent,
    184                                                     sub_indent))
    185       return report_lines, curr_section
    186 
    187   class SectionNode(Node):
    188     """A section header node."""
    189 
    190     def __init__(self, title=None):
    191       super(_PayloadReport.SectionNode, self).__init__()
    192       self.title = title
    193       self.max_field_name_len = 0
    194 
    195     def GenerateLines(self, base_indent, sub_indent, curr_section):
    196       """Dump a title line, return self as the (new) current section."""
    197       report_lines = []
    198       if self.title:
    199         report_lines.append(self._Indent(base_indent,
    200                                          '=== %s ===\n' % self.title))
    201       return report_lines, self
    202 
    203   def __init__(self):
    204     self.report = []
    205     self.last_section = self.global_section = self.SectionNode()
    206     self.is_finalized = False
    207 
    208   def GenerateLines(self, base_indent, sub_indent):
    209     """Generates the lines in the report, properly indented.
    210 
    211     Args:
    212       base_indent: The indentation used for root-level report lines.
    213       sub_indent: The indentation offset used for sub-reports.
    214 
    215     Returns:
    216       A list of indented report lines.
    217     """
    218     report_lines = []
    219     curr_section = self.global_section
    220     for node in self.report:
    221       node_report_lines, curr_section = node.GenerateLines(
    222           base_indent, sub_indent, curr_section)
    223       report_lines.extend(node_report_lines)
    224 
    225     return report_lines
    226 
    227   def Dump(self, out_file, base_indent=0, sub_indent=2):
    228     """Dumps the report to a file.
    229 
    230     Args:
    231       out_file: File object to output the content to.
    232       base_indent: Base indentation for report lines.
    233       sub_indent: Added indentation for sub-reports.
    234     """
    235     report_lines = self.GenerateLines(base_indent, sub_indent)
    236     if report_lines and not self.is_finalized:
    237       report_lines.append('(incomplete report)\n')
    238 
    239     for line in report_lines:
    240       out_file.write(line)
    241 
    242   def AddField(self, name, value, linebreak=False, indent=0):
    243     """Adds a field/value pair to the payload report.
    244 
    245     Args:
    246       name: The field's name.
    247       value: The field's value.
    248       linebreak: Whether the value should be printed on a new line.
    249       indent: Amount of extra indent for each line of the value.
    250     """
    251     assert not self.is_finalized
    252     if name and self.last_section.max_field_name_len < len(name):
    253       self.last_section.max_field_name_len = len(name)
    254     self.report.append(self.FieldNode(name, value, linebreak, indent))
    255 
    256   def AddSubReport(self, title):
    257     """Adds and returns a sub-report with a title."""
    258     assert not self.is_finalized
    259     sub_report = self.SubReportNode(title, type(self)())
    260     self.report.append(sub_report)
    261     return sub_report.report
    262 
    263   def AddSection(self, title):
    264     """Adds a new section title."""
    265     assert not self.is_finalized
    266     self.last_section = self.SectionNode(title)
    267     self.report.append(self.last_section)
    268 
    269   def Finalize(self):
    270     """Seals the report, marking it as complete."""
    271     self.is_finalized = True
    272 
    273 
    274 #
    275 # Payload verification.
    276 #
    277 
    278 class PayloadChecker(object):
    279   """Checking the integrity of an update payload.
    280 
    281   This is a short-lived object whose purpose is to isolate the logic used for
    282   verifying the integrity of an update payload.
    283   """
    284 
    285   def __init__(self, payload, assert_type=None, block_size=0,
    286                allow_unhashed=False, disabled_tests=()):
    287     """Initialize the checker.
    288 
    289     Args:
    290       payload: The payload object to check.
    291       assert_type: Assert that payload is either 'full' or 'delta' (optional).
    292       block_size: Expected filesystem / payload block size (optional).
    293       allow_unhashed: Allow operations with unhashed data blobs.
    294       disabled_tests: Sequence of tests to disable.
    295     """
    296     if not payload.is_init:
    297       raise ValueError('Uninitialized update payload.')
    298 
    299     # Set checker configuration.
    300     self.payload = payload
    301     self.block_size = block_size if block_size else _DEFAULT_BLOCK_SIZE
    302     if not _IsPowerOfTwo(self.block_size):
    303       raise error.PayloadError(
    304           'Expected block (%d) size is not a power of two.' % self.block_size)
    305     if assert_type not in (None, _TYPE_FULL, _TYPE_DELTA):
    306       raise error.PayloadError('Invalid assert_type value (%r).' %
    307                                assert_type)
    308     self.payload_type = assert_type
    309     self.allow_unhashed = allow_unhashed
    310 
    311     # Disable specific tests.
    312     self.check_dst_pseudo_extents = (
    313         _CHECK_DST_PSEUDO_EXTENTS not in disabled_tests)
    314     self.check_move_same_src_dst_block = (
    315         _CHECK_MOVE_SAME_SRC_DST_BLOCK not in disabled_tests)
    316     self.check_payload_sig = _CHECK_PAYLOAD_SIG not in disabled_tests
    317 
    318     # Reset state; these will be assigned when the manifest is checked.
    319     self.sigs_offset = 0
    320     self.sigs_size = 0
    321     self.old_rootfs_fs_size = 0
    322     self.old_kernel_fs_size = 0
    323     self.new_rootfs_fs_size = 0
    324     self.new_kernel_fs_size = 0
    325     self.minor_version = None
    326 
    327   @staticmethod
    328   def _CheckElem(msg, name, report, is_mandatory, is_submsg, convert=str,
    329                  msg_name=None, linebreak=False, indent=0):
    330     """Adds an element from a protobuf message to the payload report.
    331 
    332     Checks to see whether a message contains a given element, and if so adds
    333     the element value to the provided report. A missing mandatory element
    334     causes an exception to be raised.
    335 
    336     Args:
    337       msg: The message containing the element.
    338       name: The name of the element.
    339       report: A report object to add the element name/value to.
    340       is_mandatory: Whether or not this element must be present.
    341       is_submsg: Whether this element is itself a message.
    342       convert: A function for converting the element value for reporting.
    343       msg_name: The name of the message object (for error reporting).
    344       linebreak: Whether the value report should induce a line break.
    345       indent: Amount of indent used for reporting the value.
    346 
    347     Returns:
    348       A pair consisting of the element value and the generated sub-report for
    349       it (if the element is a sub-message, None otherwise). If the element is
    350       missing, returns (None, None).
    351 
    352     Raises:
    353       error.PayloadError if a mandatory element is missing.
    354     """
    355     if not msg.HasField(name):
    356       if is_mandatory:
    357         raise error.PayloadError('%smissing mandatory %s %r.' %
    358                                  (msg_name + ' ' if msg_name else '',
    359                                   'sub-message' if is_submsg else 'field',
    360                                   name))
    361       return None, None
    362 
    363     value = getattr(msg, name)
    364     if is_submsg:
    365       return value, report and report.AddSubReport(name)
    366     else:
    367       if report:
    368         report.AddField(name, convert(value), linebreak=linebreak,
    369                         indent=indent)
    370       return value, None
    371 
    372   @staticmethod
    373   def _CheckMandatoryField(msg, field_name, report, msg_name, convert=str,
    374                            linebreak=False, indent=0):
    375     """Adds a mandatory field; returning first component from _CheckElem."""
    376     return PayloadChecker._CheckElem(msg, field_name, report, True, False,
    377                                      convert=convert, msg_name=msg_name,
    378                                      linebreak=linebreak, indent=indent)[0]
    379 
    380   @staticmethod
    381   def _CheckOptionalField(msg, field_name, report, convert=str,
    382                           linebreak=False, indent=0):
    383     """Adds an optional field; returning first component from _CheckElem."""
    384     return PayloadChecker._CheckElem(msg, field_name, report, False, False,
    385                                      convert=convert, linebreak=linebreak,
    386                                      indent=indent)[0]
    387 
    388   @staticmethod
    389   def _CheckMandatorySubMsg(msg, submsg_name, report, msg_name):
    390     """Adds a mandatory sub-message; wrapper for _CheckElem."""
    391     return PayloadChecker._CheckElem(msg, submsg_name, report, True, True,
    392                                      msg_name)
    393 
    394   @staticmethod
    395   def _CheckOptionalSubMsg(msg, submsg_name, report):
    396     """Adds an optional sub-message; wrapper for _CheckElem."""
    397     return PayloadChecker._CheckElem(msg, submsg_name, report, False, True)
    398 
    399   @staticmethod
    400   def _CheckPresentIff(val1, val2, name1, name2, obj_name):
    401     """Checks that val1 is None iff val2 is None.
    402 
    403     Args:
    404       val1: first value to be compared.
    405       val2: second value to be compared.
    406       name1: name of object holding the first value.
    407       name2: name of object holding the second value.
    408       obj_name: Name of the object containing these values.
    409 
    410     Raises:
    411       error.PayloadError if assertion does not hold.
    412     """
    413     if None in (val1, val2) and val1 is not val2:
    414       present, missing = (name1, name2) if val2 is None else (name2, name1)
    415       raise error.PayloadError('%r present without %r%s.' %
    416                                (present, missing,
    417                                 ' in ' + obj_name if obj_name else ''))
    418 
    419   @staticmethod
    420   def _Run(cmd, send_data=None):
    421     """Runs a subprocess, returns its output.
    422 
    423     Args:
    424       cmd: Sequence of command-line argument for invoking the subprocess.
    425       send_data: Data to feed to the process via its stdin.
    426 
    427     Returns:
    428       A tuple containing the stdout and stderr output of the process.
    429     """
    430     run_process = subprocess.Popen(cmd, stdin=subprocess.PIPE,
    431                                    stdout=subprocess.PIPE)
    432     try:
    433       result = run_process.communicate(input=send_data)
    434     finally:
    435       exit_code = run_process.wait()
    436 
    437     if exit_code:
    438       raise RuntimeError('Subprocess %r failed with code %r.' %
    439                          (cmd, exit_code))
    440 
    441     return result
    442 
    443   @staticmethod
    444   def _CheckSha256Signature(sig_data, pubkey_file_name, actual_hash, sig_name):
    445     """Verifies an actual hash against a signed one.
    446 
    447     Args:
    448       sig_data: The raw signature data.
    449       pubkey_file_name: Public key used for verifying signature.
    450       actual_hash: The actual hash digest.
    451       sig_name: Signature name for error reporting.
    452 
    453     Raises:
    454       error.PayloadError if signature could not be verified.
    455     """
    456     if len(sig_data) != 256:
    457       raise error.PayloadError(
    458           '%s: signature size (%d) not as expected (256).' %
    459           (sig_name, len(sig_data)))
    460     signed_data, _ = PayloadChecker._Run(
    461         ['openssl', 'rsautl', '-verify', '-pubin', '-inkey', pubkey_file_name],
    462         send_data=sig_data)
    463 
    464     if len(signed_data) != len(common.SIG_ASN1_HEADER) + 32:
    465       raise error.PayloadError('%s: unexpected signed data length (%d).' %
    466                                (sig_name, len(signed_data)))
    467 
    468     if not signed_data.startswith(common.SIG_ASN1_HEADER):
    469       raise error.PayloadError('%s: not containing standard ASN.1 prefix.' %
    470                                sig_name)
    471 
    472     signed_hash = signed_data[len(common.SIG_ASN1_HEADER):]
    473     if signed_hash != actual_hash:
    474       raise error.PayloadError(
    475           '%s: signed hash (%s) different from actual (%s).' %
    476           (sig_name, common.FormatSha256(signed_hash),
    477            common.FormatSha256(actual_hash)))
    478 
    479   @staticmethod
    480   def _CheckBlocksFitLength(length, num_blocks, block_size, length_name,
    481                             block_name=None):
    482     """Checks that a given length fits given block space.
    483 
    484     This ensures that the number of blocks allocated is appropriate for the
    485     length of the data residing in these blocks.
    486 
    487     Args:
    488       length: The actual length of the data.
    489       num_blocks: The number of blocks allocated for it.
    490       block_size: The size of each block in bytes.
    491       length_name: Name of length (used for error reporting).
    492       block_name: Name of block (used for error reporting).
    493 
    494     Raises:
    495       error.PayloadError if the aforementioned invariant is not satisfied.
    496     """
    497     # Check: length <= num_blocks * block_size.
    498     if length > num_blocks * block_size:
    499       raise error.PayloadError(
    500           '%s (%d) > num %sblocks (%d) * block_size (%d).' %
    501           (length_name, length, block_name or '', num_blocks, block_size))
    502 
    503     # Check: length > (num_blocks - 1) * block_size.
    504     if length <= (num_blocks - 1) * block_size:
    505       raise error.PayloadError(
    506           '%s (%d) <= (num %sblocks - 1 (%d)) * block_size (%d).' %
    507           (length_name, length, block_name or '', num_blocks - 1, block_size))
    508 
    509   def _CheckManifestMinorVersion(self, report):
    510     """Checks the payload manifest minor_version field.
    511 
    512     Args:
    513       report: The report object to add to.
    514 
    515     Raises:
    516       error.PayloadError if any of the checks fail.
    517     """
    518     self.minor_version = self._CheckOptionalField(self.payload.manifest,
    519                                                   'minor_version', report)
    520     if self.minor_version in _SUPPORTED_MINOR_VERSIONS:
    521       if self.payload_type not in _SUPPORTED_MINOR_VERSIONS[self.minor_version]:
    522         raise error.PayloadError(
    523             'Minor version %d not compatible with payload type %s.' %
    524             (self.minor_version, self.payload_type))
    525     elif self.minor_version is None:
    526       raise error.PayloadError('Minor version is not set.')
    527     else:
    528       raise error.PayloadError('Unsupported minor version: %d' %
    529                                self.minor_version)
    530 
    531   def _CheckManifest(self, report, rootfs_part_size=0, kernel_part_size=0):
    532     """Checks the payload manifest.
    533 
    534     Args:
    535       report: A report object to add to.
    536       rootfs_part_size: Size of the rootfs partition in bytes.
    537       kernel_part_size: Size of the kernel partition in bytes.
    538 
    539     Returns:
    540       A tuple consisting of the partition block size used during the update
    541       (integer), the signatures block offset and size.
    542 
    543     Raises:
    544       error.PayloadError if any of the checks fail.
    545     """
    546     manifest = self.payload.manifest
    547     report.AddSection('manifest')
    548 
    549     # Check: block_size must exist and match the expected value.
    550     actual_block_size = self._CheckMandatoryField(manifest, 'block_size',
    551                                                   report, 'manifest')
    552     if actual_block_size != self.block_size:
    553       raise error.PayloadError('Block_size (%d) not as expected (%d).' %
    554                                (actual_block_size, self.block_size))
    555 
    556     # Check: signatures_offset <==> signatures_size.
    557     self.sigs_offset = self._CheckOptionalField(manifest, 'signatures_offset',
    558                                                 report)
    559     self.sigs_size = self._CheckOptionalField(manifest, 'signatures_size',
    560                                               report)
    561     self._CheckPresentIff(self.sigs_offset, self.sigs_size,
    562                           'signatures_offset', 'signatures_size', 'manifest')
    563 
    564     # Check: old_kernel_info <==> old_rootfs_info.
    565     oki_msg, oki_report = self._CheckOptionalSubMsg(manifest,
    566                                                     'old_kernel_info', report)
    567     ori_msg, ori_report = self._CheckOptionalSubMsg(manifest,
    568                                                     'old_rootfs_info', report)
    569     self._CheckPresentIff(oki_msg, ori_msg, 'old_kernel_info',
    570                           'old_rootfs_info', 'manifest')
    571     if oki_msg:  # equivalently, ori_msg
    572       # Assert/mark delta payload.
    573       if self.payload_type == _TYPE_FULL:
    574         raise error.PayloadError(
    575             'Apparent full payload contains old_{kernel,rootfs}_info.')
    576       self.payload_type = _TYPE_DELTA
    577 
    578       # Check: {size, hash} present in old_{kernel,rootfs}_info.
    579       self.old_kernel_fs_size = self._CheckMandatoryField(
    580           oki_msg, 'size', oki_report, 'old_kernel_info')
    581       self._CheckMandatoryField(oki_msg, 'hash', oki_report, 'old_kernel_info',
    582                                 convert=common.FormatSha256)
    583       self.old_rootfs_fs_size = self._CheckMandatoryField(
    584           ori_msg, 'size', ori_report, 'old_rootfs_info')
    585       self._CheckMandatoryField(ori_msg, 'hash', ori_report, 'old_rootfs_info',
    586                                 convert=common.FormatSha256)
    587 
    588       # Check: old_{kernel,rootfs} size must fit in respective partition.
    589       if kernel_part_size and self.old_kernel_fs_size > kernel_part_size:
    590         raise error.PayloadError(
    591             'Old kernel content (%d) exceed partition size (%d).' %
    592             (self.old_kernel_fs_size, kernel_part_size))
    593       if rootfs_part_size and self.old_rootfs_fs_size > rootfs_part_size:
    594         raise error.PayloadError(
    595             'Old rootfs content (%d) exceed partition size (%d).' %
    596             (self.old_rootfs_fs_size, rootfs_part_size))
    597     else:
    598       # Assert/mark full payload.
    599       if self.payload_type == _TYPE_DELTA:
    600         raise error.PayloadError(
    601             'Apparent delta payload missing old_{kernel,rootfs}_info.')
    602       self.payload_type = _TYPE_FULL
    603 
    604     # Check: new_kernel_info present; contains {size, hash}.
    605     nki_msg, nki_report = self._CheckMandatorySubMsg(
    606         manifest, 'new_kernel_info', report, 'manifest')
    607     self.new_kernel_fs_size = self._CheckMandatoryField(
    608         nki_msg, 'size', nki_report, 'new_kernel_info')
    609     self._CheckMandatoryField(nki_msg, 'hash', nki_report, 'new_kernel_info',
    610                               convert=common.FormatSha256)
    611 
    612     # Check: new_rootfs_info present; contains {size, hash}.
    613     nri_msg, nri_report = self._CheckMandatorySubMsg(
    614         manifest, 'new_rootfs_info', report, 'manifest')
    615     self.new_rootfs_fs_size = self._CheckMandatoryField(
    616         nri_msg, 'size', nri_report, 'new_rootfs_info')
    617     self._CheckMandatoryField(nri_msg, 'hash', nri_report, 'new_rootfs_info',
    618                               convert=common.FormatSha256)
    619 
    620     # Check: new_{kernel,rootfs} size must fit in respective partition.
    621     if kernel_part_size and self.new_kernel_fs_size > kernel_part_size:
    622       raise error.PayloadError(
    623           'New kernel content (%d) exceed partition size (%d).' %
    624           (self.new_kernel_fs_size, kernel_part_size))
    625     if rootfs_part_size and self.new_rootfs_fs_size > rootfs_part_size:
    626       raise error.PayloadError(
    627           'New rootfs content (%d) exceed partition size (%d).' %
    628           (self.new_rootfs_fs_size, rootfs_part_size))
    629 
    630     # Check: minor_version makes sense for the payload type. This check should
    631     # run after the payload type has been set.
    632     self._CheckManifestMinorVersion(report)
    633 
    634   def _CheckLength(self, length, total_blocks, op_name, length_name):
    635     """Checks whether a length matches the space designated in extents.
    636 
    637     Args:
    638       length: The total length of the data.
    639       total_blocks: The total number of blocks in extents.
    640       op_name: Operation name (for error reporting).
    641       length_name: Length name (for error reporting).
    642 
    643     Raises:
    644       error.PayloadError is there a problem with the length.
    645     """
    646     # Check: length is non-zero.
    647     if length == 0:
    648       raise error.PayloadError('%s: %s is zero.' % (op_name, length_name))
    649 
    650     # Check that length matches number of blocks.
    651     self._CheckBlocksFitLength(length, total_blocks, self.block_size,
    652                                '%s: %s' % (op_name, length_name))
    653 
    654   def _CheckExtents(self, extents, usable_size, block_counters, name,
    655                     allow_pseudo=False, allow_signature=False):
    656     """Checks a sequence of extents.
    657 
    658     Args:
    659       extents: The sequence of extents to check.
    660       usable_size: The usable size of the partition to which the extents apply.
    661       block_counters: Array of counters corresponding to the number of blocks.
    662       name: The name of the extent block.
    663       allow_pseudo: Whether or not pseudo block numbers are allowed.
    664       allow_signature: Whether or not the extents are used for a signature.
    665 
    666     Returns:
    667       The total number of blocks in the extents.
    668 
    669     Raises:
    670       error.PayloadError if any of the entailed checks fails.
    671     """
    672     total_num_blocks = 0
    673     for ex, ex_name in common.ExtentIter(extents, name):
    674       # Check: Mandatory fields.
    675       start_block = PayloadChecker._CheckMandatoryField(ex, 'start_block',
    676                                                         None, ex_name)
    677       num_blocks = PayloadChecker._CheckMandatoryField(ex, 'num_blocks', None,
    678                                                        ex_name)
    679       end_block = start_block + num_blocks
    680 
    681       # Check: num_blocks > 0.
    682       if num_blocks == 0:
    683         raise error.PayloadError('%s: extent length is zero.' % ex_name)
    684 
    685       if start_block != common.PSEUDO_EXTENT_MARKER:
    686         # Check: Make sure we're within the partition limit.
    687         if usable_size and end_block * self.block_size > usable_size:
    688           raise error.PayloadError(
    689               '%s: extent (%s) exceeds usable partition size (%d).' %
    690               (ex_name, common.FormatExtent(ex, self.block_size), usable_size))
    691 
    692         # Record block usage.
    693         for i in xrange(start_block, end_block):
    694           block_counters[i] += 1
    695       elif not (allow_pseudo or (allow_signature and len(extents) == 1)):
    696         # Pseudo-extents must be allowed explicitly, or otherwise be part of a
    697         # signature operation (in which case there has to be exactly one).
    698         raise error.PayloadError('%s: unexpected pseudo-extent.' % ex_name)
    699 
    700       total_num_blocks += num_blocks
    701 
    702     return total_num_blocks
    703 
    704   def _CheckReplaceOperation(self, op, data_length, total_dst_blocks, op_name):
    705     """Specific checks for REPLACE/REPLACE_BZ operations.
    706 
    707     Args:
    708       op: The operation object from the manifest.
    709       data_length: The length of the data blob associated with the operation.
    710       total_dst_blocks: Total number of blocks in dst_extents.
    711       op_name: Operation name for error reporting.
    712 
    713     Raises:
    714       error.PayloadError if any check fails.
    715     """
    716     # Check: Does not contain src extents.
    717     if op.src_extents:
    718       raise error.PayloadError('%s: contains src_extents.' % op_name)
    719 
    720     # Check: Contains data.
    721     if data_length is None:
    722       raise error.PayloadError('%s: missing data_{offset,length}.' % op_name)
    723 
    724     if op.type == common.OpType.REPLACE:
    725       PayloadChecker._CheckBlocksFitLength(data_length, total_dst_blocks,
    726                                            self.block_size,
    727                                            op_name + '.data_length', 'dst')
    728     else:
    729       # Check: data_length must be smaller than the alotted dst blocks.
    730       if data_length >= total_dst_blocks * self.block_size:
    731         raise error.PayloadError(
    732             '%s: data_length (%d) must be less than allotted dst block '
    733             'space (%d * %d).' %
    734             (op_name, data_length, total_dst_blocks, self.block_size))
    735 
    736   def _CheckMoveOperation(self, op, data_offset, total_src_blocks,
    737                           total_dst_blocks, op_name):
    738     """Specific checks for MOVE operations.
    739 
    740     Args:
    741       op: The operation object from the manifest.
    742       data_offset: The offset of a data blob for the operation.
    743       total_src_blocks: Total number of blocks in src_extents.
    744       total_dst_blocks: Total number of blocks in dst_extents.
    745       op_name: Operation name for error reporting.
    746 
    747     Raises:
    748       error.PayloadError if any check fails.
    749     """
    750     # Check: No data_{offset,length}.
    751     if data_offset is not None:
    752       raise error.PayloadError('%s: contains data_{offset,length}.' % op_name)
    753 
    754     # Check: total_src_blocks == total_dst_blocks.
    755     if total_src_blocks != total_dst_blocks:
    756       raise error.PayloadError(
    757           '%s: total src blocks (%d) != total dst blocks (%d).' %
    758           (op_name, total_src_blocks, total_dst_blocks))
    759 
    760     # Check: For all i, i-th src block index != i-th dst block index.
    761     i = 0
    762     src_extent_iter = iter(op.src_extents)
    763     dst_extent_iter = iter(op.dst_extents)
    764     src_extent = dst_extent = None
    765     src_idx = src_num = dst_idx = dst_num = 0
    766     while i < total_src_blocks:
    767       # Get the next source extent, if needed.
    768       if not src_extent:
    769         try:
    770           src_extent = src_extent_iter.next()
    771         except StopIteration:
    772           raise error.PayloadError('%s: ran out of src extents (%d/%d).' %
    773                                    (op_name, i, total_src_blocks))
    774         src_idx = src_extent.start_block
    775         src_num = src_extent.num_blocks
    776 
    777       # Get the next dest extent, if needed.
    778       if not dst_extent:
    779         try:
    780           dst_extent = dst_extent_iter.next()
    781         except StopIteration:
    782           raise error.PayloadError('%s: ran out of dst extents (%d/%d).' %
    783                                    (op_name, i, total_dst_blocks))
    784         dst_idx = dst_extent.start_block
    785         dst_num = dst_extent.num_blocks
    786 
    787       # Check: start block is not 0. See crbug/480751; there are still versions
    788       # of update_engine which fail when seeking to 0 in PReadAll and PWriteAll,
    789       # so we need to fail payloads that try to MOVE to/from block 0.
    790       if src_idx == 0 or dst_idx == 0:
    791         raise error.PayloadError(
    792             '%s: MOVE operation cannot have extent with start block 0' %
    793             op_name)
    794 
    795       if self.check_move_same_src_dst_block and src_idx == dst_idx:
    796         raise error.PayloadError(
    797             '%s: src/dst block number %d is the same (%d).' %
    798             (op_name, i, src_idx))
    799 
    800       advance = min(src_num, dst_num)
    801       i += advance
    802 
    803       src_idx += advance
    804       src_num -= advance
    805       if src_num == 0:
    806         src_extent = None
    807 
    808       dst_idx += advance
    809       dst_num -= advance
    810       if dst_num == 0:
    811         dst_extent = None
    812 
    813     # Make sure we've exhausted all src/dst extents.
    814     if src_extent:
    815       raise error.PayloadError('%s: excess src blocks.' % op_name)
    816     if dst_extent:
    817       raise error.PayloadError('%s: excess dst blocks.' % op_name)
    818 
    819   def _CheckZeroOperation(self, op, op_name):
    820     """Specific checks for ZERO operations.
    821 
    822     Args:
    823       op: The operation object from the manifest.
    824       op_name: Operation name for error reporting.
    825 
    826     Raises:
    827       error.PayloadError if any check fails.
    828     """
    829     # Check: Does not contain src extents, data_length and data_offset.
    830     if op.src_extents:
    831       raise error.PayloadError('%s: contains src_extents.' % op_name)
    832     if op.data_length:
    833       raise error.PayloadError('%s: contains data_length.' % op_name)
    834     if op.data_offset:
    835       raise error.PayloadError('%s: contains data_offset.' % op_name)
    836 
    837   def _CheckAnyDiffOperation(self, op, data_length, total_dst_blocks, op_name):
    838     """Specific checks for BSDIFF, SOURCE_BSDIFF, PUFFDIFF and BROTLI_BSDIFF
    839        operations.
    840 
    841     Args:
    842       op: The operation.
    843       data_length: The length of the data blob associated with the operation.
    844       total_dst_blocks: Total number of blocks in dst_extents.
    845       op_name: Operation name for error reporting.
    846 
    847     Raises:
    848       error.PayloadError if any check fails.
    849     """
    850     # Check: data_{offset,length} present.
    851     if data_length is None:
    852       raise error.PayloadError('%s: missing data_{offset,length}.' % op_name)
    853 
    854     # Check: data_length is strictly smaller than the alotted dst blocks.
    855     if data_length >= total_dst_blocks * self.block_size:
    856       raise error.PayloadError(
    857           '%s: data_length (%d) must be smaller than allotted dst space '
    858           '(%d * %d = %d).' %
    859           (op_name, data_length, total_dst_blocks, self.block_size,
    860            total_dst_blocks * self.block_size))
    861 
    862     # Check the existence of src_length and dst_length for legacy bsdiffs.
    863     if (op.type == common.OpType.BSDIFF or
    864         (op.type == common.OpType.SOURCE_BSDIFF and self.minor_version <= 3)):
    865       if not op.HasField('src_length') or not op.HasField('dst_length'):
    866         raise error.PayloadError('%s: require {src,dst}_length.' % op_name)
    867     else:
    868       if op.HasField('src_length') or op.HasField('dst_length'):
    869         raise error.PayloadError('%s: unneeded {src,dst}_length.' % op_name)
    870 
    871   def _CheckSourceCopyOperation(self, data_offset, total_src_blocks,
    872                                 total_dst_blocks, op_name):
    873     """Specific checks for SOURCE_COPY.
    874 
    875     Args:
    876       data_offset: The offset of a data blob for the operation.
    877       total_src_blocks: Total number of blocks in src_extents.
    878       total_dst_blocks: Total number of blocks in dst_extents.
    879       op_name: Operation name for error reporting.
    880 
    881     Raises:
    882       error.PayloadError if any check fails.
    883     """
    884     # Check: No data_{offset,length}.
    885     if data_offset is not None:
    886       raise error.PayloadError('%s: contains data_{offset,length}.' % op_name)
    887 
    888     # Check: total_src_blocks == total_dst_blocks.
    889     if total_src_blocks != total_dst_blocks:
    890       raise error.PayloadError(
    891           '%s: total src blocks (%d) != total dst blocks (%d).' %
    892           (op_name, total_src_blocks, total_dst_blocks))
    893 
    894   def _CheckAnySourceOperation(self, op, total_src_blocks, op_name):
    895     """Specific checks for SOURCE_* operations.
    896 
    897     Args:
    898       op: The operation object from the manifest.
    899       total_src_blocks: Total number of blocks in src_extents.
    900       op_name: Operation name for error reporting.
    901 
    902     Raises:
    903       error.PayloadError if any check fails.
    904     """
    905     # Check: total_src_blocks != 0.
    906     if total_src_blocks == 0:
    907       raise error.PayloadError('%s: no src blocks in a source op.' % op_name)
    908 
    909     # Check: src_sha256_hash present in minor version >= 3.
    910     if self.minor_version >= 3 and op.src_sha256_hash is None:
    911       raise error.PayloadError('%s: source hash missing.' % op_name)
    912 
    913   def _CheckOperation(self, op, op_name, is_last, old_block_counters,
    914                       new_block_counters, old_usable_size, new_usable_size,
    915                       prev_data_offset, allow_signature, blob_hash_counts):
    916     """Checks a single update operation.
    917 
    918     Args:
    919       op: The operation object.
    920       op_name: Operation name string for error reporting.
    921       is_last: Whether this is the last operation in the sequence.
    922       old_block_counters: Arrays of block read counters.
    923       new_block_counters: Arrays of block write counters.
    924       old_usable_size: The overall usable size for src data in bytes.
    925       new_usable_size: The overall usable size for dst data in bytes.
    926       prev_data_offset: Offset of last used data bytes.
    927       allow_signature: Whether this may be a signature operation.
    928       blob_hash_counts: Counters for hashed/unhashed blobs.
    929 
    930     Returns:
    931       The amount of data blob associated with the operation.
    932 
    933     Raises:
    934       error.PayloadError if any check has failed.
    935     """
    936     # Check extents.
    937     total_src_blocks = self._CheckExtents(
    938         op.src_extents, old_usable_size, old_block_counters,
    939         op_name + '.src_extents', allow_pseudo=True)
    940     allow_signature_in_extents = (allow_signature and is_last and
    941                                   op.type == common.OpType.REPLACE)
    942     total_dst_blocks = self._CheckExtents(
    943         op.dst_extents, new_usable_size, new_block_counters,
    944         op_name + '.dst_extents',
    945         allow_pseudo=(not self.check_dst_pseudo_extents),
    946         allow_signature=allow_signature_in_extents)
    947 
    948     # Check: data_offset present <==> data_length present.
    949     data_offset = self._CheckOptionalField(op, 'data_offset', None)
    950     data_length = self._CheckOptionalField(op, 'data_length', None)
    951     self._CheckPresentIff(data_offset, data_length, 'data_offset',
    952                           'data_length', op_name)
    953 
    954     # Check: At least one dst_extent.
    955     if not op.dst_extents:
    956       raise error.PayloadError('%s: dst_extents is empty.' % op_name)
    957 
    958     # Check {src,dst}_length, if present.
    959     if op.HasField('src_length'):
    960       self._CheckLength(op.src_length, total_src_blocks, op_name, 'src_length')
    961     if op.HasField('dst_length'):
    962       self._CheckLength(op.dst_length, total_dst_blocks, op_name, 'dst_length')
    963 
    964     if op.HasField('data_sha256_hash'):
    965       blob_hash_counts['hashed'] += 1
    966 
    967       # Check: Operation carries data.
    968       if data_offset is None:
    969         raise error.PayloadError(
    970             '%s: data_sha256_hash present but no data_{offset,length}.' %
    971             op_name)
    972 
    973       # Check: Hash verifies correctly.
    974       actual_hash = hashlib.sha256(self.payload.ReadDataBlob(data_offset,
    975                                                              data_length))
    976       if op.data_sha256_hash != actual_hash.digest():
    977         raise error.PayloadError(
    978             '%s: data_sha256_hash (%s) does not match actual hash (%s).' %
    979             (op_name, common.FormatSha256(op.data_sha256_hash),
    980              common.FormatSha256(actual_hash.digest())))
    981     elif data_offset is not None:
    982       if allow_signature_in_extents:
    983         blob_hash_counts['signature'] += 1
    984       elif self.allow_unhashed:
    985         blob_hash_counts['unhashed'] += 1
    986       else:
    987         raise error.PayloadError('%s: unhashed operation not allowed.' %
    988                                  op_name)
    989 
    990     if data_offset is not None:
    991       # Check: Contiguous use of data section.
    992       if data_offset != prev_data_offset:
    993         raise error.PayloadError(
    994             '%s: data offset (%d) not matching amount used so far (%d).' %
    995             (op_name, data_offset, prev_data_offset))
    996 
    997     # Type-specific checks.
    998     if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
    999       self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
   1000     elif op.type == common.OpType.MOVE and self.minor_version == 1:
   1001       self._CheckMoveOperation(op, data_offset, total_src_blocks,
   1002                                total_dst_blocks, op_name)
   1003     elif op.type == common.OpType.ZERO and self.minor_version >= 4:
   1004       self._CheckZeroOperation(op, op_name)
   1005     elif op.type == common.OpType.BSDIFF and self.minor_version == 1:
   1006       self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name)
   1007     elif op.type == common.OpType.SOURCE_COPY and self.minor_version >= 2:
   1008       self._CheckSourceCopyOperation(data_offset, total_src_blocks,
   1009                                      total_dst_blocks, op_name)
   1010       self._CheckAnySourceOperation(op, total_src_blocks, op_name)
   1011     elif op.type == common.OpType.SOURCE_BSDIFF and self.minor_version >= 2:
   1012       self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name)
   1013       self._CheckAnySourceOperation(op, total_src_blocks, op_name)
   1014     elif op.type == common.OpType.BROTLI_BSDIFF and self.minor_version >= 4:
   1015       self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name)
   1016       self._CheckAnySourceOperation(op, total_src_blocks, op_name)
   1017     elif op.type == common.OpType.PUFFDIFF and self.minor_version >= 5:
   1018       self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name)
   1019       self._CheckAnySourceOperation(op, total_src_blocks, op_name)
   1020     else:
   1021       raise error.PayloadError(
   1022           'Operation %s (type %d) not allowed in minor version %d' %
   1023           (op_name, op.type, self.minor_version))
   1024     return data_length if data_length is not None else 0
   1025 
   1026   def _SizeToNumBlocks(self, size):
   1027     """Returns the number of blocks needed to contain a given byte size."""
   1028     return (size + self.block_size - 1) / self.block_size
   1029 
   1030   def _AllocBlockCounters(self, total_size):
   1031     """Returns a freshly initialized array of block counters.
   1032 
   1033     Note that the generated array is not portable as is due to byte-ordering
   1034     issues, hence it should not be serialized.
   1035 
   1036     Args:
   1037       total_size: The total block size in bytes.
   1038 
   1039     Returns:
   1040       An array of unsigned short elements initialized to zero, one for each of
   1041       the blocks necessary for containing the partition.
   1042     """
   1043     return array.array('H',
   1044                        itertools.repeat(0, self._SizeToNumBlocks(total_size)))
   1045 
   1046   def _CheckOperations(self, operations, report, base_name, old_fs_size,
   1047                        new_fs_size, old_usable_size, new_usable_size,
   1048                        prev_data_offset, allow_signature):
   1049     """Checks a sequence of update operations.
   1050 
   1051     Args:
   1052       operations: The sequence of operations to check.
   1053       report: The report object to add to.
   1054       base_name: The name of the operation block.
   1055       old_fs_size: The old filesystem size in bytes.
   1056       new_fs_size: The new filesystem size in bytes.
   1057       old_usable_size: The overall usable size of the old partition in bytes.
   1058       new_usable_size: The overall usable size of the new partition in bytes.
   1059       prev_data_offset: Offset of last used data bytes.
   1060       allow_signature: Whether this sequence may contain signature operations.
   1061 
   1062     Returns:
   1063       The total data blob size used.
   1064 
   1065     Raises:
   1066       error.PayloadError if any of the checks fails.
   1067     """
   1068     # The total size of data blobs used by operations scanned thus far.
   1069     total_data_used = 0
   1070     # Counts of specific operation types.
   1071     op_counts = {
   1072         common.OpType.REPLACE: 0,
   1073         common.OpType.REPLACE_BZ: 0,
   1074         common.OpType.MOVE: 0,
   1075         common.OpType.ZERO: 0,
   1076         common.OpType.BSDIFF: 0,
   1077         common.OpType.SOURCE_COPY: 0,
   1078         common.OpType.SOURCE_BSDIFF: 0,
   1079         common.OpType.PUFFDIFF: 0,
   1080         common.OpType.BROTLI_BSDIFF: 0,
   1081     }
   1082     # Total blob sizes for each operation type.
   1083     op_blob_totals = {
   1084         common.OpType.REPLACE: 0,
   1085         common.OpType.REPLACE_BZ: 0,
   1086         # MOVE operations don't have blobs.
   1087         common.OpType.BSDIFF: 0,
   1088         # SOURCE_COPY operations don't have blobs.
   1089         common.OpType.SOURCE_BSDIFF: 0,
   1090         common.OpType.PUFFDIFF: 0,
   1091         common.OpType.BROTLI_BSDIFF: 0,
   1092     }
   1093     # Counts of hashed vs unhashed operations.
   1094     blob_hash_counts = {
   1095         'hashed': 0,
   1096         'unhashed': 0,
   1097     }
   1098     if allow_signature:
   1099       blob_hash_counts['signature'] = 0
   1100 
   1101     # Allocate old and new block counters.
   1102     old_block_counters = (self._AllocBlockCounters(old_usable_size)
   1103                           if old_fs_size else None)
   1104     new_block_counters = self._AllocBlockCounters(new_usable_size)
   1105 
   1106     # Process and verify each operation.
   1107     op_num = 0
   1108     for op, op_name in common.OperationIter(operations, base_name):
   1109       op_num += 1
   1110 
   1111       # Check: Type is valid.
   1112       if op.type not in op_counts.keys():
   1113         raise error.PayloadError('%s: invalid type (%d).' % (op_name, op.type))
   1114       op_counts[op.type] += 1
   1115 
   1116       is_last = op_num == len(operations)
   1117       curr_data_used = self._CheckOperation(
   1118           op, op_name, is_last, old_block_counters, new_block_counters,
   1119           old_usable_size, new_usable_size,
   1120           prev_data_offset + total_data_used, allow_signature,
   1121           blob_hash_counts)
   1122       if curr_data_used:
   1123         op_blob_totals[op.type] += curr_data_used
   1124         total_data_used += curr_data_used
   1125 
   1126     # Report totals and breakdown statistics.
   1127     report.AddField('total operations', op_num)
   1128     report.AddField(
   1129         None,
   1130         histogram.Histogram.FromCountDict(op_counts,
   1131                                           key_names=common.OpType.NAMES),
   1132         indent=1)
   1133     report.AddField('total blobs', sum(blob_hash_counts.values()))
   1134     report.AddField(None,
   1135                     histogram.Histogram.FromCountDict(blob_hash_counts),
   1136                     indent=1)
   1137     report.AddField('total blob size', _AddHumanReadableSize(total_data_used))
   1138     report.AddField(
   1139         None,
   1140         histogram.Histogram.FromCountDict(op_blob_totals,
   1141                                           formatter=_AddHumanReadableSize,
   1142                                           key_names=common.OpType.NAMES),
   1143         indent=1)
   1144 
   1145     # Report read/write histograms.
   1146     if old_block_counters:
   1147       report.AddField('block read hist',
   1148                       histogram.Histogram.FromKeyList(old_block_counters),
   1149                       linebreak=True, indent=1)
   1150 
   1151     new_write_hist = histogram.Histogram.FromKeyList(
   1152         new_block_counters[:self._SizeToNumBlocks(new_fs_size)])
   1153     report.AddField('block write hist', new_write_hist, linebreak=True,
   1154                     indent=1)
   1155 
   1156     # Check: Full update must write each dst block once.
   1157     if self.payload_type == _TYPE_FULL and new_write_hist.GetKeys() != [1]:
   1158       raise error.PayloadError(
   1159           '%s: not all blocks written exactly once during full update.' %
   1160           base_name)
   1161 
   1162     return total_data_used
   1163 
   1164   def _CheckSignatures(self, report, pubkey_file_name):
   1165     """Checks a payload's signature block."""
   1166     sigs_raw = self.payload.ReadDataBlob(self.sigs_offset, self.sigs_size)
   1167     sigs = update_metadata_pb2.Signatures()
   1168     sigs.ParseFromString(sigs_raw)
   1169     report.AddSection('signatures')
   1170 
   1171     # Check: At least one signature present.
   1172     if not sigs.signatures:
   1173       raise error.PayloadError('Signature block is empty.')
   1174 
   1175     last_ops_section = (self.payload.manifest.kernel_install_operations or
   1176                         self.payload.manifest.install_operations)
   1177     fake_sig_op = last_ops_section[-1]
   1178     # Check: signatures_{offset,size} must match the last (fake) operation.
   1179     if not (fake_sig_op.type == common.OpType.REPLACE and
   1180             self.sigs_offset == fake_sig_op.data_offset and
   1181             self.sigs_size == fake_sig_op.data_length):
   1182       raise error.PayloadError(
   1183           'Signatures_{offset,size} (%d+%d) does not match last operation '
   1184           '(%d+%d).' %
   1185           (self.sigs_offset, self.sigs_size, fake_sig_op.data_offset,
   1186            fake_sig_op.data_length))
   1187 
   1188     # Compute the checksum of all data up to signature blob.
   1189     # TODO(garnold) we're re-reading the whole data section into a string
   1190     # just to compute the checksum; instead, we could do it incrementally as
   1191     # we read the blobs one-by-one, under the assumption that we're reading
   1192     # them in order (which currently holds). This should be reconsidered.
   1193     payload_hasher = self.payload.manifest_hasher.copy()
   1194     common.Read(self.payload.payload_file, self.sigs_offset,
   1195                 offset=self.payload.data_offset, hasher=payload_hasher)
   1196 
   1197     for sig, sig_name in common.SignatureIter(sigs.signatures, 'signatures'):
   1198       sig_report = report.AddSubReport(sig_name)
   1199 
   1200       # Check: Signature contains mandatory fields.
   1201       self._CheckMandatoryField(sig, 'version', sig_report, sig_name)
   1202       self._CheckMandatoryField(sig, 'data', None, sig_name)
   1203       sig_report.AddField('data len', len(sig.data))
   1204 
   1205       # Check: Signatures pertains to actual payload hash.
   1206       if sig.version == 1:
   1207         self._CheckSha256Signature(sig.data, pubkey_file_name,
   1208                                    payload_hasher.digest(), sig_name)
   1209       else:
   1210         raise error.PayloadError('Unknown signature version (%d).' %
   1211                                  sig.version)
   1212 
   1213   def Run(self, pubkey_file_name=None, metadata_sig_file=None,
   1214           rootfs_part_size=0, kernel_part_size=0, report_out_file=None):
   1215     """Checker entry point, invoking all checks.
   1216 
   1217     Args:
   1218       pubkey_file_name: Public key used for signature verification.
   1219       metadata_sig_file: Metadata signature, if verification is desired.
   1220       rootfs_part_size: The size of rootfs partitions in bytes (default: infer
   1221                         based on payload type and version).
   1222       kernel_part_size: The size of kernel partitions in bytes (default: use
   1223                         reported filesystem size).
   1224       report_out_file: File object to dump the report to.
   1225 
   1226     Raises:
   1227       error.PayloadError if payload verification failed.
   1228     """
   1229     if not pubkey_file_name:
   1230       pubkey_file_name = _DEFAULT_PUBKEY_FILE_NAME
   1231 
   1232     report = _PayloadReport()
   1233 
   1234     # Get payload file size.
   1235     self.payload.payload_file.seek(0, 2)
   1236     payload_file_size = self.payload.payload_file.tell()
   1237     self.payload.ResetFile()
   1238 
   1239     try:
   1240       # Check metadata signature (if provided).
   1241       if metadata_sig_file:
   1242         metadata_sig = base64.b64decode(metadata_sig_file.read())
   1243         self._CheckSha256Signature(metadata_sig, pubkey_file_name,
   1244                                    self.payload.manifest_hasher.digest(),
   1245                                    'metadata signature')
   1246 
   1247       # Part 1: Check the file header.
   1248       report.AddSection('header')
   1249       # Check: Payload version is valid.
   1250       if self.payload.header.version != 1:
   1251         raise error.PayloadError('Unknown payload version (%d).' %
   1252                                  self.payload.header.version)
   1253       report.AddField('version', self.payload.header.version)
   1254       report.AddField('manifest len', self.payload.header.manifest_len)
   1255 
   1256       # Part 2: Check the manifest.
   1257       self._CheckManifest(report, rootfs_part_size, kernel_part_size)
   1258       assert self.payload_type, 'payload type should be known by now'
   1259 
   1260       # Infer the usable partition size when validating rootfs operations:
   1261       # - If rootfs partition size was provided, use that.
   1262       # - Otherwise, if this is an older delta (minor version < 2), stick with
   1263       #   a known constant size. This is necessary because older deltas may
   1264       #   exceed the filesystem size when moving data blocks around.
   1265       # - Otherwise, use the encoded filesystem size.
   1266       new_rootfs_usable_size = self.new_rootfs_fs_size
   1267       old_rootfs_usable_size = self.old_rootfs_fs_size
   1268       if rootfs_part_size:
   1269         new_rootfs_usable_size = rootfs_part_size
   1270         old_rootfs_usable_size = rootfs_part_size
   1271       elif self.payload_type == _TYPE_DELTA and self.minor_version in (None, 1):
   1272         new_rootfs_usable_size = _OLD_DELTA_USABLE_PART_SIZE
   1273         old_rootfs_usable_size = _OLD_DELTA_USABLE_PART_SIZE
   1274 
   1275       # Part 3: Examine rootfs operations.
   1276       # TODO(garnold)(chromium:243559) only default to the filesystem size if
   1277       # no explicit size provided *and* the partition size is not embedded in
   1278       # the payload; see issue for more details.
   1279       report.AddSection('rootfs operations')
   1280       total_blob_size = self._CheckOperations(
   1281           self.payload.manifest.install_operations, report,
   1282           'install_operations', self.old_rootfs_fs_size,
   1283           self.new_rootfs_fs_size, old_rootfs_usable_size,
   1284           new_rootfs_usable_size, 0, False)
   1285 
   1286       # Part 4: Examine kernel operations.
   1287       # TODO(garnold)(chromium:243559) as above.
   1288       report.AddSection('kernel operations')
   1289       total_blob_size += self._CheckOperations(
   1290           self.payload.manifest.kernel_install_operations, report,
   1291           'kernel_install_operations', self.old_kernel_fs_size,
   1292           self.new_kernel_fs_size,
   1293           kernel_part_size if kernel_part_size else self.old_kernel_fs_size,
   1294           kernel_part_size if kernel_part_size else self.new_kernel_fs_size,
   1295           total_blob_size, True)
   1296 
   1297       # Check: Operations data reach the end of the payload file.
   1298       used_payload_size = self.payload.data_offset + total_blob_size
   1299       if used_payload_size != payload_file_size:
   1300         raise error.PayloadError(
   1301             'Used payload size (%d) different from actual file size (%d).' %
   1302             (used_payload_size, payload_file_size))
   1303 
   1304       # Part 5: Handle payload signatures message.
   1305       if self.check_payload_sig and self.sigs_size:
   1306         self._CheckSignatures(report, pubkey_file_name)
   1307 
   1308       # Part 6: Summary.
   1309       report.AddSection('summary')
   1310       report.AddField('update type', self.payload_type)
   1311 
   1312       report.Finalize()
   1313     finally:
   1314       if report_out_file:
   1315         report.Dump(report_out_file)
   1316