Home | History | Annotate | Download | only in releasetools
      1 # Copyright (C) 2008 The Android Open Source Project
      2 #
      3 # Licensed under the Apache License, Version 2.0 (the "License");
      4 # you may not use this file except in compliance with the License.
      5 # You may obtain a copy of the License at
      6 #
      7 #      http://www.apache.org/licenses/LICENSE-2.0
      8 #
      9 # Unless required by applicable law or agreed to in writing, software
     10 # distributed under the License is distributed on an "AS IS" BASIS,
     11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 # See the License for the specific language governing permissions and
     13 # limitations under the License.
     14 
     15 from __future__ import print_function
     16 
     17 import collections
     18 import copy
     19 import errno
     20 import fnmatch
     21 import getopt
     22 import getpass
     23 import gzip
     24 import imp
     25 import json
     26 import logging
     27 import logging.config
     28 import os
     29 import platform
     30 import re
     31 import shlex
     32 import shutil
     33 import string
     34 import subprocess
     35 import sys
     36 import tempfile
     37 import threading
     38 import time
     39 import zipfile
     40 from hashlib import sha1, sha256
     41 
     42 import blockimgdiff
     43 import sparse_img
     44 
     45 logger = logging.getLogger(__name__)
     46 
     47 
     48 class Options(object):
     49   def __init__(self):
     50     base_out_path = os.getenv('OUT_DIR_COMMON_BASE')
     51     if base_out_path is None:
     52       base_search_path = "out"
     53     else:
     54       base_search_path = os.path.join(base_out_path,
     55                                       os.path.basename(os.getcwd()))
     56 
     57     platform_search_path = {
     58         "linux2": os.path.join(base_search_path, "host/linux-x86"),
     59         "darwin": os.path.join(base_search_path, "host/darwin-x86"),
     60     }
     61 
     62     self.search_path = platform_search_path.get(sys.platform)
     63     self.signapk_path = "framework/signapk.jar"  # Relative to search_path
     64     self.signapk_shared_library_path = "lib64"   # Relative to search_path
     65     self.extra_signapk_args = []
     66     self.java_path = "java"  # Use the one on the path by default.
     67     self.java_args = ["-Xmx2048m"]  # The default JVM args.
     68     self.public_key_suffix = ".x509.pem"
     69     self.private_key_suffix = ".pk8"
     70     # use otatools built boot_signer by default
     71     self.boot_signer_path = "boot_signer"
     72     self.boot_signer_args = []
     73     self.verity_signer_path = None
     74     self.verity_signer_args = []
     75     self.verbose = False
     76     self.tempfiles = []
     77     self.device_specific = None
     78     self.extras = {}
     79     self.info_dict = None
     80     self.source_info_dict = None
     81     self.target_info_dict = None
     82     self.worker_threads = None
     83     # Stash size cannot exceed cache_size * threshold.
     84     self.cache_size = None
     85     self.stash_threshold = 0.8
     86 
     87 
     88 OPTIONS = Options()
     89 
     90 # The block size that's used across the releasetools scripts.
     91 BLOCK_SIZE = 4096
     92 
     93 # Values for "certificate" in apkcerts that mean special things.
     94 SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
     95 
     96 # The partitions allowed to be signed by AVB (Android verified boot 2.0).
     97 AVB_PARTITIONS = ('boot', 'recovery', 'system', 'vendor', 'product',
     98                   'product_services', 'dtbo', 'odm')
     99 
    100 # Partitions that should have their care_map added to META/care_map.pb
    101 PARTITIONS_WITH_CARE_MAP = ('system', 'vendor', 'product', 'product_services',
    102                             'odm')
    103 
    104 
    105 class ErrorCode(object):
    106   """Define error_codes for failures that happen during the actual
    107   update package installation.
    108 
    109   Error codes 0-999 are reserved for failures before the package
    110   installation (i.e. low battery, package verification failure).
    111   Detailed code in 'bootable/recovery/error_code.h' """
    112 
    113   SYSTEM_VERIFICATION_FAILURE = 1000
    114   SYSTEM_UPDATE_FAILURE = 1001
    115   SYSTEM_UNEXPECTED_CONTENTS = 1002
    116   SYSTEM_NONZERO_CONTENTS = 1003
    117   SYSTEM_RECOVER_FAILURE = 1004
    118   VENDOR_VERIFICATION_FAILURE = 2000
    119   VENDOR_UPDATE_FAILURE = 2001
    120   VENDOR_UNEXPECTED_CONTENTS = 2002
    121   VENDOR_NONZERO_CONTENTS = 2003
    122   VENDOR_RECOVER_FAILURE = 2004
    123   OEM_PROP_MISMATCH = 3000
    124   FINGERPRINT_MISMATCH = 3001
    125   THUMBPRINT_MISMATCH = 3002
    126   OLDER_BUILD = 3003
    127   DEVICE_MISMATCH = 3004
    128   BAD_PATCH_FILE = 3005
    129   INSUFFICIENT_CACHE_SPACE = 3006
    130   TUNE_PARTITION_FAILURE = 3007
    131   APPLY_PATCH_FAILURE = 3008
    132 
    133 
    134 class ExternalError(RuntimeError):
    135   pass
    136 
    137 
    138 def InitLogging():
    139   DEFAULT_LOGGING_CONFIG = {
    140       'version': 1,
    141       'disable_existing_loggers': False,
    142       'formatters': {
    143           'standard': {
    144               'format':
    145                   '%(asctime)s - %(filename)s - %(levelname)-8s: %(message)s',
    146               'datefmt': '%Y-%m-%d %H:%M:%S',
    147           },
    148       },
    149       'handlers': {
    150           'default': {
    151               'class': 'logging.StreamHandler',
    152               'formatter': 'standard',
    153           },
    154       },
    155       'loggers': {
    156           '': {
    157               'handlers': ['default'],
    158               'level': 'WARNING',
    159               'propagate': True,
    160           }
    161       }
    162   }
    163   env_config = os.getenv('LOGGING_CONFIG')
    164   if env_config:
    165     with open(env_config) as f:
    166       config = json.load(f)
    167   else:
    168     config = DEFAULT_LOGGING_CONFIG
    169 
    170     # Increase the logging level for verbose mode.
    171     if OPTIONS.verbose:
    172       config = copy.deepcopy(DEFAULT_LOGGING_CONFIG)
    173       config['loggers']['']['level'] = 'INFO'
    174 
    175   logging.config.dictConfig(config)
    176 
    177 
    178 def Run(args, verbose=None, **kwargs):
    179   """Creates and returns a subprocess.Popen object.
    180 
    181   Args:
    182     args: The command represented as a list of strings.
    183     verbose: Whether the commands should be shown. Default to the global
    184         verbosity if unspecified.
    185     kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
    186         stdin, etc. stdout and stderr will default to subprocess.PIPE and
    187         subprocess.STDOUT respectively unless caller specifies any of them.
    188 
    189   Returns:
    190     A subprocess.Popen object.
    191   """
    192   if 'stdout' not in kwargs and 'stderr' not in kwargs:
    193     kwargs['stdout'] = subprocess.PIPE
    194     kwargs['stderr'] = subprocess.STDOUT
    195   # Don't log any if caller explicitly says so.
    196   if verbose != False:
    197     logger.info("  Running: \"%s\"", " ".join(args))
    198   return subprocess.Popen(args, **kwargs)
    199 
    200 
    201 def RunAndWait(args, verbose=None, **kwargs):
    202   """Runs the given command waiting for it to complete.
    203 
    204   Args:
    205     args: The command represented as a list of strings.
    206     verbose: Whether the commands should be shown. Default to the global
    207         verbosity if unspecified.
    208     kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
    209         stdin, etc. stdout and stderr will default to subprocess.PIPE and
    210         subprocess.STDOUT respectively unless caller specifies any of them.
    211 
    212   Raises:
    213     ExternalError: On non-zero exit from the command.
    214   """
    215   proc = Run(args, verbose=verbose, **kwargs)
    216   proc.wait()
    217 
    218   if proc.returncode != 0:
    219     raise ExternalError(
    220         "Failed to run command '{}' (exit code {})".format(
    221             args, proc.returncode))
    222 
    223 
    224 def RunAndCheckOutput(args, verbose=None, **kwargs):
    225   """Runs the given command and returns the output.
    226 
    227   Args:
    228     args: The command represented as a list of strings.
    229     verbose: Whether the commands should be shown. Default to the global
    230         verbosity if unspecified.
    231     kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
    232         stdin, etc. stdout and stderr will default to subprocess.PIPE and
    233         subprocess.STDOUT respectively unless caller specifies any of them.
    234 
    235   Returns:
    236     The output string.
    237 
    238   Raises:
    239     ExternalError: On non-zero exit from the command.
    240   """
    241   proc = Run(args, verbose=verbose, **kwargs)
    242   output, _ = proc.communicate()
    243   # Don't log any if caller explicitly says so.
    244   if verbose != False:
    245     logger.info("%s", output.rstrip())
    246   if proc.returncode != 0:
    247     raise ExternalError(
    248         "Failed to run command '{}' (exit code {}):\n{}".format(
    249             args, proc.returncode, output))
    250   return output
    251 
    252 
    253 def RoundUpTo4K(value):
    254   rounded_up = value + 4095
    255   return rounded_up - (rounded_up % 4096)
    256 
    257 
    258 def CloseInheritedPipes():
    259   """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
    260   before doing other work."""
    261   if platform.system() != "Darwin":
    262     return
    263   for d in range(3, 1025):
    264     try:
    265       stat = os.fstat(d)
    266       if stat is not None:
    267         pipebit = stat[0] & 0x1000
    268         if pipebit != 0:
    269           os.close(d)
    270     except OSError:
    271       pass
    272 
    273 
    274 def LoadInfoDict(input_file, repacking=False):
    275   """Loads the key/value pairs from the given input target_files.
    276 
    277   It reads `META/misc_info.txt` file in the target_files input, does sanity
    278   checks and returns the parsed key/value pairs for to the given build. It's
    279   usually called early when working on input target_files files, e.g. when
    280   generating OTAs, or signing builds. Note that the function may be called
    281   against an old target_files file (i.e. from past dessert releases). So the
    282   property parsing needs to be backward compatible.
    283 
    284   In a `META/misc_info.txt`, a few properties are stored as links to the files
    285   in the PRODUCT_OUT directory. It works fine with the build system. However,
    286   they are no longer available when (re)generating images from target_files zip.
    287   When `repacking` is True, redirect these properties to the actual files in the
    288   unzipped directory.
    289 
    290   Args:
    291     input_file: The input target_files file, which could be an open
    292         zipfile.ZipFile instance, or a str for the dir that contains the files
    293         unzipped from a target_files file.
    294     repacking: Whether it's trying repack an target_files file after loading the
    295         info dict (default: False). If so, it will rewrite a few loaded
    296         properties (e.g. selinux_fc, root_dir) to point to the actual files in
    297         target_files file. When doing repacking, `input_file` must be a dir.
    298 
    299   Returns:
    300     A dict that contains the parsed key/value pairs.
    301 
    302   Raises:
    303     AssertionError: On invalid input arguments.
    304     ValueError: On malformed input values.
    305   """
    306   if repacking:
    307     assert isinstance(input_file, str), \
    308         "input_file must be a path str when doing repacking"
    309 
    310   def read_helper(fn):
    311     if isinstance(input_file, zipfile.ZipFile):
    312       return input_file.read(fn)
    313     else:
    314       path = os.path.join(input_file, *fn.split("/"))
    315       try:
    316         with open(path) as f:
    317           return f.read()
    318       except IOError as e:
    319         if e.errno == errno.ENOENT:
    320           raise KeyError(fn)
    321 
    322   try:
    323     d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
    324   except KeyError:
    325     raise ValueError("Failed to find META/misc_info.txt in input target-files")
    326 
    327   if "recovery_api_version" not in d:
    328     raise ValueError("Failed to find 'recovery_api_version'")
    329   if "fstab_version" not in d:
    330     raise ValueError("Failed to find 'fstab_version'")
    331 
    332   if repacking:
    333     # We carry a copy of file_contexts.bin under META/. If not available, search
    334     # BOOT/RAMDISK/. Note that sometimes we may need a different file to build
    335     # images than the one running on device, in that case, we must have the one
    336     # for image generation copied to META/.
    337     fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts"))
    338     fc_config = os.path.join(input_file, "META", fc_basename)
    339     assert os.path.exists(fc_config)
    340 
    341     d["selinux_fc"] = fc_config
    342 
    343     # Similarly we need to redirect "root_dir", and "root_fs_config".
    344     d["root_dir"] = os.path.join(input_file, "ROOT")
    345     d["root_fs_config"] = os.path.join(
    346         input_file, "META", "root_filesystem_config.txt")
    347 
    348     # Redirect {system,vendor}_base_fs_file.
    349     if "system_base_fs_file" in d:
    350       basename = os.path.basename(d["system_base_fs_file"])
    351       system_base_fs_file = os.path.join(input_file, "META", basename)
    352       if os.path.exists(system_base_fs_file):
    353         d["system_base_fs_file"] = system_base_fs_file
    354       else:
    355         logger.warning(
    356             "Failed to find system base fs file: %s", system_base_fs_file)
    357         del d["system_base_fs_file"]
    358 
    359     if "vendor_base_fs_file" in d:
    360       basename = os.path.basename(d["vendor_base_fs_file"])
    361       vendor_base_fs_file = os.path.join(input_file, "META", basename)
    362       if os.path.exists(vendor_base_fs_file):
    363         d["vendor_base_fs_file"] = vendor_base_fs_file
    364       else:
    365         logger.warning(
    366             "Failed to find vendor base fs file: %s", vendor_base_fs_file)
    367         del d["vendor_base_fs_file"]
    368 
    369   def makeint(key):
    370     if key in d:
    371       d[key] = int(d[key], 0)
    372 
    373   makeint("recovery_api_version")
    374   makeint("blocksize")
    375   makeint("system_size")
    376   makeint("vendor_size")
    377   makeint("userdata_size")
    378   makeint("cache_size")
    379   makeint("recovery_size")
    380   makeint("boot_size")
    381   makeint("fstab_version")
    382 
    383   # We changed recovery.fstab path in Q, from ../RAMDISK/etc/recovery.fstab to
    384   # ../RAMDISK/system/etc/recovery.fstab. LoadInfoDict() has to handle both
    385   # cases, since it may load the info_dict from an old build (e.g. when
    386   # generating incremental OTAs from that build).
    387   system_root_image = d.get("system_root_image") == "true"
    388   if d.get("no_recovery") != "true":
    389     recovery_fstab_path = "RECOVERY/RAMDISK/system/etc/recovery.fstab"
    390     if isinstance(input_file, zipfile.ZipFile):
    391       if recovery_fstab_path not in input_file.namelist():
    392         recovery_fstab_path = "RECOVERY/RAMDISK/etc/recovery.fstab"
    393     else:
    394       path = os.path.join(input_file, *recovery_fstab_path.split("/"))
    395       if not os.path.exists(path):
    396         recovery_fstab_path = "RECOVERY/RAMDISK/etc/recovery.fstab"
    397     d["fstab"] = LoadRecoveryFSTab(
    398         read_helper, d["fstab_version"], recovery_fstab_path, system_root_image)
    399 
    400   elif d.get("recovery_as_boot") == "true":
    401     recovery_fstab_path = "BOOT/RAMDISK/system/etc/recovery.fstab"
    402     if isinstance(input_file, zipfile.ZipFile):
    403       if recovery_fstab_path not in input_file.namelist():
    404         recovery_fstab_path = "BOOT/RAMDISK/etc/recovery.fstab"
    405     else:
    406       path = os.path.join(input_file, *recovery_fstab_path.split("/"))
    407       if not os.path.exists(path):
    408         recovery_fstab_path = "BOOT/RAMDISK/etc/recovery.fstab"
    409     d["fstab"] = LoadRecoveryFSTab(
    410         read_helper, d["fstab_version"], recovery_fstab_path, system_root_image)
    411 
    412   else:
    413     d["fstab"] = None
    414 
    415   # Tries to load the build props for all partitions with care_map, including
    416   # system and vendor.
    417   for partition in PARTITIONS_WITH_CARE_MAP:
    418     partition_prop = "{}.build.prop".format(partition)
    419     d[partition_prop] = LoadBuildProp(
    420         read_helper, "{}/build.prop".format(partition.upper()))
    421     # Some partition might use /<partition>/etc/build.prop as the new path.
    422     # TODO: try new path first when majority of them switch to the new path.
    423     if not d[partition_prop]:
    424       d[partition_prop] = LoadBuildProp(
    425           read_helper, "{}/etc/build.prop".format(partition.upper()))
    426   d["build.prop"] = d["system.build.prop"]
    427 
    428   # Set up the salt (based on fingerprint or thumbprint) that will be used when
    429   # adding AVB footer.
    430   if d.get("avb_enable") == "true":
    431     fp = None
    432     if "build.prop" in d:
    433       build_prop = d["build.prop"]
    434       if "ro.build.fingerprint" in build_prop:
    435         fp = build_prop["ro.build.fingerprint"]
    436       elif "ro.build.thumbprint" in build_prop:
    437         fp = build_prop["ro.build.thumbprint"]
    438     if fp:
    439       d["avb_salt"] = sha256(fp).hexdigest()
    440 
    441   return d
    442 
    443 
    444 def LoadBuildProp(read_helper, prop_file):
    445   try:
    446     data = read_helper(prop_file)
    447   except KeyError:
    448     logger.warning("Failed to read %s", prop_file)
    449     data = ""
    450   return LoadDictionaryFromLines(data.split("\n"))
    451 
    452 
    453 def LoadDictionaryFromLines(lines):
    454   d = {}
    455   for line in lines:
    456     line = line.strip()
    457     if not line or line.startswith("#"):
    458       continue
    459     if "=" in line:
    460       name, value = line.split("=", 1)
    461       d[name] = value
    462   return d
    463 
    464 
    465 def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path,
    466                       system_root_image=False):
    467   class Partition(object):
    468     def __init__(self, mount_point, fs_type, device, length, context):
    469       self.mount_point = mount_point
    470       self.fs_type = fs_type
    471       self.device = device
    472       self.length = length
    473       self.context = context
    474 
    475   try:
    476     data = read_helper(recovery_fstab_path)
    477   except KeyError:
    478     logger.warning("Failed to find %s", recovery_fstab_path)
    479     data = ""
    480 
    481   assert fstab_version == 2
    482 
    483   d = {}
    484   for line in data.split("\n"):
    485     line = line.strip()
    486     if not line or line.startswith("#"):
    487       continue
    488 
    489     # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
    490     pieces = line.split()
    491     if len(pieces) != 5:
    492       raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
    493 
    494     # Ignore entries that are managed by vold.
    495     options = pieces[4]
    496     if "voldmanaged=" in options:
    497       continue
    498 
    499     # It's a good line, parse it.
    500     length = 0
    501     options = options.split(",")
    502     for i in options:
    503       if i.startswith("length="):
    504         length = int(i[7:])
    505       else:
    506         # Ignore all unknown options in the unified fstab.
    507         continue
    508 
    509     mount_flags = pieces[3]
    510     # Honor the SELinux context if present.
    511     context = None
    512     for i in mount_flags.split(","):
    513       if i.startswith("context="):
    514         context = i
    515 
    516     mount_point = pieces[1]
    517     d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
    518                                device=pieces[0], length=length, context=context)
    519 
    520   # / is used for the system mount point when the root directory is included in
    521   # system. Other areas assume system is always at "/system" so point /system
    522   # at /.
    523   if system_root_image:
    524     assert not d.has_key("/system") and d.has_key("/")
    525     d["/system"] = d["/"]
    526   return d
    527 
    528 
    529 def DumpInfoDict(d):
    530   for k, v in sorted(d.items()):
    531     logger.info("%-25s = (%s) %s", k, type(v).__name__, v)
    532 
    533 
    534 def AppendAVBSigningArgs(cmd, partition):
    535   """Append signing arguments for avbtool."""
    536   # e.g., "--key path/to/signing_key --algorithm SHA256_RSA4096"
    537   key_path = OPTIONS.info_dict.get("avb_" + partition + "_key_path")
    538   algorithm = OPTIONS.info_dict.get("avb_" + partition + "_algorithm")
    539   if key_path and algorithm:
    540     cmd.extend(["--key", key_path, "--algorithm", algorithm])
    541   avb_salt = OPTIONS.info_dict.get("avb_salt")
    542   # make_vbmeta_image doesn't like "--salt" (and it's not needed).
    543   if avb_salt and not partition.startswith("vbmeta"):
    544     cmd.extend(["--salt", avb_salt])
    545 
    546 
    547 def GetAvbChainedPartitionArg(partition, info_dict, key=None):
    548   """Constructs and returns the arg to build or verify a chained partition.
    549 
    550   Args:
    551     partition: The partition name.
    552     info_dict: The info dict to look up the key info and rollback index
    553         location.
    554     key: The key to be used for building or verifying the partition. Defaults to
    555         the key listed in info_dict.
    556 
    557   Returns:
    558     A string of form "partition:rollback_index_location:key" that can be used to
    559     build or verify vbmeta image.
    560   """
    561   if key is None:
    562     key = info_dict["avb_" + partition + "_key_path"]
    563   pubkey_path = ExtractAvbPublicKey(key)
    564   rollback_index_location = info_dict[
    565       "avb_" + partition + "_rollback_index_location"]
    566   return "{}:{}:{}".format(partition, rollback_index_location, pubkey_path)
    567 
    568 
    569 def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
    570                         has_ramdisk=False, two_step_image=False):
    571   """Build a bootable image from the specified sourcedir.
    572 
    573   Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
    574   'sourcedir'), and turn them into a boot image. 'two_step_image' indicates if
    575   we are building a two-step special image (i.e. building a recovery image to
    576   be loaded into /boot in two-step OTAs).
    577 
    578   Return the image data, or None if sourcedir does not appear to contains files
    579   for building the requested image.
    580   """
    581 
    582   def make_ramdisk():
    583     ramdisk_img = tempfile.NamedTemporaryFile()
    584 
    585     if os.access(fs_config_file, os.F_OK):
    586       cmd = ["mkbootfs", "-f", fs_config_file,
    587              os.path.join(sourcedir, "RAMDISK")]
    588     else:
    589       cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
    590     p1 = Run(cmd, stdout=subprocess.PIPE)
    591     p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
    592 
    593     p2.wait()
    594     p1.wait()
    595     assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
    596     assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
    597 
    598     return ramdisk_img
    599 
    600   if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
    601     return None
    602 
    603   if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
    604     return None
    605 
    606   if info_dict is None:
    607     info_dict = OPTIONS.info_dict
    608 
    609   img = tempfile.NamedTemporaryFile()
    610 
    611   if has_ramdisk:
    612     ramdisk_img = make_ramdisk()
    613 
    614   # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
    615   mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
    616 
    617   cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
    618 
    619   fn = os.path.join(sourcedir, "second")
    620   if os.access(fn, os.F_OK):
    621     cmd.append("--second")
    622     cmd.append(fn)
    623 
    624   fn = os.path.join(sourcedir, "dtb")
    625   if os.access(fn, os.F_OK):
    626     cmd.append("--dtb")
    627     cmd.append(fn)
    628 
    629   fn = os.path.join(sourcedir, "cmdline")
    630   if os.access(fn, os.F_OK):
    631     cmd.append("--cmdline")
    632     cmd.append(open(fn).read().rstrip("\n"))
    633 
    634   fn = os.path.join(sourcedir, "base")
    635   if os.access(fn, os.F_OK):
    636     cmd.append("--base")
    637     cmd.append(open(fn).read().rstrip("\n"))
    638 
    639   fn = os.path.join(sourcedir, "pagesize")
    640   if os.access(fn, os.F_OK):
    641     cmd.append("--pagesize")
    642     cmd.append(open(fn).read().rstrip("\n"))
    643 
    644   args = info_dict.get("mkbootimg_args")
    645   if args and args.strip():
    646     cmd.extend(shlex.split(args))
    647 
    648   args = info_dict.get("mkbootimg_version_args")
    649   if args and args.strip():
    650     cmd.extend(shlex.split(args))
    651 
    652   if has_ramdisk:
    653     cmd.extend(["--ramdisk", ramdisk_img.name])
    654 
    655   img_unsigned = None
    656   if info_dict.get("vboot"):
    657     img_unsigned = tempfile.NamedTemporaryFile()
    658     cmd.extend(["--output", img_unsigned.name])
    659   else:
    660     cmd.extend(["--output", img.name])
    661 
    662   # "boot" or "recovery", without extension.
    663   partition_name = os.path.basename(sourcedir).lower()
    664 
    665   if partition_name == "recovery":
    666     if info_dict.get("include_recovery_dtbo") == "true":
    667       fn = os.path.join(sourcedir, "recovery_dtbo")
    668       cmd.extend(["--recovery_dtbo", fn])
    669     if info_dict.get("include_recovery_acpio") == "true":
    670       fn = os.path.join(sourcedir, "recovery_acpio")
    671       cmd.extend(["--recovery_acpio", fn])
    672 
    673   RunAndCheckOutput(cmd)
    674 
    675   if (info_dict.get("boot_signer") == "true" and
    676       info_dict.get("verity_key")):
    677     # Hard-code the path as "/boot" for two-step special recovery image (which
    678     # will be loaded into /boot during the two-step OTA).
    679     if two_step_image:
    680       path = "/boot"
    681     else:
    682       path = "/" + partition_name
    683     cmd = [OPTIONS.boot_signer_path]
    684     cmd.extend(OPTIONS.boot_signer_args)
    685     cmd.extend([path, img.name,
    686                 info_dict["verity_key"] + ".pk8",
    687                 info_dict["verity_key"] + ".x509.pem", img.name])
    688     RunAndCheckOutput(cmd)
    689 
    690   # Sign the image if vboot is non-empty.
    691   elif info_dict.get("vboot"):
    692     path = "/" + partition_name
    693     img_keyblock = tempfile.NamedTemporaryFile()
    694     # We have switched from the prebuilt futility binary to using the tool
    695     # (futility-host) built from the source. Override the setting in the old
    696     # TF.zip.
    697     futility = info_dict["futility"]
    698     if futility.startswith("prebuilts/"):
    699       futility = "futility-host"
    700     cmd = [info_dict["vboot_signer_cmd"], futility,
    701            img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
    702            info_dict["vboot_key"] + ".vbprivk",
    703            info_dict["vboot_subkey"] + ".vbprivk",
    704            img_keyblock.name,
    705            img.name]
    706     RunAndCheckOutput(cmd)
    707 
    708     # Clean up the temp files.
    709     img_unsigned.close()
    710     img_keyblock.close()
    711 
    712   # AVB: if enabled, calculate and add hash to boot.img or recovery.img.
    713   if info_dict.get("avb_enable") == "true":
    714     avbtool = info_dict["avb_avbtool"]
    715     part_size = info_dict[partition_name + "_size"]
    716     cmd = [avbtool, "add_hash_footer", "--image", img.name,
    717            "--partition_size", str(part_size), "--partition_name",
    718            partition_name]
    719     AppendAVBSigningArgs(cmd, partition_name)
    720     args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
    721     if args and args.strip():
    722       cmd.extend(shlex.split(args))
    723     RunAndCheckOutput(cmd)
    724 
    725   img.seek(os.SEEK_SET, 0)
    726   data = img.read()
    727 
    728   if has_ramdisk:
    729     ramdisk_img.close()
    730   img.close()
    731 
    732   return data
    733 
    734 
    735 def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
    736                      info_dict=None, two_step_image=False):
    737   """Return a File object with the desired bootable image.
    738 
    739   Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
    740   otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
    741   the source files in 'unpack_dir'/'tree_subdir'."""
    742 
    743   prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
    744   if os.path.exists(prebuilt_path):
    745     logger.info("using prebuilt %s from BOOTABLE_IMAGES...", prebuilt_name)
    746     return File.FromLocalFile(name, prebuilt_path)
    747 
    748   prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
    749   if os.path.exists(prebuilt_path):
    750     logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
    751     return File.FromLocalFile(name, prebuilt_path)
    752 
    753   logger.info("building image from target_files %s...", tree_subdir)
    754 
    755   if info_dict is None:
    756     info_dict = OPTIONS.info_dict
    757 
    758   # With system_root_image == "true", we don't pack ramdisk into the boot image.
    759   # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
    760   # for recovery.
    761   has_ramdisk = (info_dict.get("system_root_image") != "true" or
    762                  prebuilt_name != "boot.img" or
    763                  info_dict.get("recovery_as_boot") == "true")
    764 
    765   fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
    766   data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
    767                              os.path.join(unpack_dir, fs_config),
    768                              info_dict, has_ramdisk, two_step_image)
    769   if data:
    770     return File(name, data)
    771   return None
    772 
    773 
    774 def Gunzip(in_filename, out_filename):
    775   """Gunzips the given gzip compressed file to a given output file."""
    776   with gzip.open(in_filename, "rb") as in_file, \
    777        open(out_filename, "wb") as out_file:
    778     shutil.copyfileobj(in_file, out_file)
    779 
    780 
    781 def UnzipToDir(filename, dirname, patterns=None):
    782   """Unzips the archive to the given directory.
    783 
    784   Args:
    785     filename: The name of the zip file to unzip.
    786     dirname: Where the unziped files will land.
    787     patterns: Files to unzip from the archive. If omitted, will unzip the entire
    788         archvie. Non-matching patterns will be filtered out. If there's no match
    789         after the filtering, no file will be unzipped.
    790   """
    791   cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
    792   if patterns is not None:
    793     # Filter out non-matching patterns. unzip will complain otherwise.
    794     with zipfile.ZipFile(filename) as input_zip:
    795       names = input_zip.namelist()
    796     filtered = [
    797         pattern for pattern in patterns if fnmatch.filter(names, pattern)]
    798 
    799     # There isn't any matching files. Don't unzip anything.
    800     if not filtered:
    801       return
    802     cmd.extend(filtered)
    803 
    804   RunAndCheckOutput(cmd)
    805 
    806 
    807 def UnzipTemp(filename, pattern=None):
    808   """Unzips the given archive into a temporary directory and returns the name.
    809 
    810   Args:
    811     filename: If filename is of the form "foo.zip+bar.zip", unzip foo.zip into
    812     a temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
    813 
    814     pattern: Files to unzip from the archive. If omitted, will unzip the entire
    815     archvie.
    816 
    817   Returns:
    818     The name of the temporary directory.
    819   """
    820 
    821   tmp = MakeTempDir(prefix="targetfiles-")
    822   m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
    823   if m:
    824     UnzipToDir(m.group(1), tmp, pattern)
    825     UnzipToDir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"), pattern)
    826     filename = m.group(1)
    827   else:
    828     UnzipToDir(filename, tmp, pattern)
    829 
    830   return tmp
    831 
    832 
    833 def GetUserImage(which, tmpdir, input_zip,
    834                  info_dict=None,
    835                  allow_shared_blocks=None,
    836                  hashtree_info_generator=None,
    837                  reset_file_map=False):
    838   """Returns an Image object suitable for passing to BlockImageDiff.
    839 
    840   This function loads the specified image from the given path. If the specified
    841   image is sparse, it also performs additional processing for OTA purpose. For
    842   example, it always adds block 0 to clobbered blocks list. It also detects
    843   files that cannot be reconstructed from the block list, for whom we should
    844   avoid applying imgdiff.
    845 
    846   Args:
    847     which: The partition name.
    848     tmpdir: The directory that contains the prebuilt image and block map file.
    849     input_zip: The target-files ZIP archive.
    850     info_dict: The dict to be looked up for relevant info.
    851     allow_shared_blocks: If image is sparse, whether having shared blocks is
    852         allowed. If none, it is looked up from info_dict.
    853     hashtree_info_generator: If present and image is sparse, generates the
    854         hashtree_info for this sparse image.
    855     reset_file_map: If true and image is sparse, reset file map before returning
    856         the image.
    857   Returns:
    858     A Image object. If it is a sparse image and reset_file_map is False, the
    859     image will have file_map info loaded.
    860   """
    861   if info_dict == None:
    862     info_dict = LoadInfoDict(input_zip)
    863 
    864   is_sparse = info_dict.get("extfs_sparse_flag")
    865 
    866   # When target uses 'BOARD_EXT4_SHARE_DUP_BLOCKS := true', images may contain
    867   # shared blocks (i.e. some blocks will show up in multiple files' block
    868   # list). We can only allocate such shared blocks to the first "owner", and
    869   # disable imgdiff for all later occurrences.
    870   if allow_shared_blocks is None:
    871     allow_shared_blocks = info_dict.get("ext4_share_dup_blocks") == "true"
    872 
    873   if is_sparse:
    874     img = GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
    875                          hashtree_info_generator)
    876     if reset_file_map:
    877       img.ResetFileMap()
    878     return img
    879   else:
    880     return GetNonSparseImage(which, tmpdir, hashtree_info_generator)
    881 
    882 
    883 def GetNonSparseImage(which, tmpdir, hashtree_info_generator=None):
    884   """Returns a Image object suitable for passing to BlockImageDiff.
    885 
    886   This function loads the specified non-sparse image from the given path.
    887 
    888   Args:
    889     which: The partition name.
    890     tmpdir: The directory that contains the prebuilt image and block map file.
    891   Returns:
    892     A Image object.
    893   """
    894   path = os.path.join(tmpdir, "IMAGES", which + ".img")
    895   mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
    896 
    897   # The image and map files must have been created prior to calling
    898   # ota_from_target_files.py (since LMP).
    899   assert os.path.exists(path) and os.path.exists(mappath)
    900 
    901   return blockimgdiff.FileImage(path, hashtree_info_generator=
    902                                 hashtree_info_generator)
    903 
    904 def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
    905                    hashtree_info_generator=None):
    906   """Returns a SparseImage object suitable for passing to BlockImageDiff.
    907 
    908   This function loads the specified sparse image from the given path, and
    909   performs additional processing for OTA purpose. For example, it always adds
    910   block 0 to clobbered blocks list. It also detects files that cannot be
    911   reconstructed from the block list, for whom we should avoid applying imgdiff.
    912 
    913   Args:
    914     which: The partition name, e.g. "system", "vendor".
    915     tmpdir: The directory that contains the prebuilt image and block map file.
    916     input_zip: The target-files ZIP archive.
    917     allow_shared_blocks: Whether having shared blocks is allowed.
    918     hashtree_info_generator: If present, generates the hashtree_info for this
    919         sparse image.
    920   Returns:
    921     A SparseImage object, with file_map info loaded.
    922   """
    923   path = os.path.join(tmpdir, "IMAGES", which + ".img")
    924   mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
    925 
    926   # The image and map files must have been created prior to calling
    927   # ota_from_target_files.py (since LMP).
    928   assert os.path.exists(path) and os.path.exists(mappath)
    929 
    930   # In ext4 filesystems, block 0 might be changed even being mounted R/O. We add
    931   # it to clobbered_blocks so that it will be written to the target
    932   # unconditionally. Note that they are still part of care_map. (Bug: 20939131)
    933   clobbered_blocks = "0"
    934 
    935   image = sparse_img.SparseImage(
    936       path, mappath, clobbered_blocks, allow_shared_blocks=allow_shared_blocks,
    937       hashtree_info_generator=hashtree_info_generator)
    938 
    939   # block.map may contain less blocks, because mke2fs may skip allocating blocks
    940   # if they contain all zeros. We can't reconstruct such a file from its block
    941   # list. Tag such entries accordingly. (Bug: 65213616)
    942   for entry in image.file_map:
    943     # Skip artificial names, such as "__ZERO", "__NONZERO-1".
    944     if not entry.startswith('/'):
    945       continue
    946 
    947     # "/system/framework/am.jar" => "SYSTEM/framework/am.jar". Note that the
    948     # filename listed in system.map may contain an additional leading slash
    949     # (i.e. "//system/framework/am.jar"). Using lstrip to get consistent
    950     # results.
    951     arcname = string.replace(entry, which, which.upper(), 1).lstrip('/')
    952 
    953     # Special handling another case, where files not under /system
    954     # (e.g. "/sbin/charger") are packed under ROOT/ in a target_files.zip.
    955     if which == 'system' and not arcname.startswith('SYSTEM'):
    956       arcname = 'ROOT/' + arcname
    957 
    958     assert arcname in input_zip.namelist(), \
    959         "Failed to find the ZIP entry for {}".format(entry)
    960 
    961     info = input_zip.getinfo(arcname)
    962     ranges = image.file_map[entry]
    963 
    964     # If a RangeSet has been tagged as using shared blocks while loading the
    965     # image, check the original block list to determine its completeness. Note
    966     # that the 'incomplete' flag would be tagged to the original RangeSet only.
    967     if ranges.extra.get('uses_shared_blocks'):
    968       ranges = ranges.extra['uses_shared_blocks']
    969 
    970     if RoundUpTo4K(info.file_size) > ranges.size() * 4096:
    971       ranges.extra['incomplete'] = True
    972 
    973   return image
    974 
    975 
    976 def GetKeyPasswords(keylist):
    977   """Given a list of keys, prompt the user to enter passwords for
    978   those which require them.  Return a {key: password} dict.  password
    979   will be None if the key has no password."""
    980 
    981   no_passwords = []
    982   need_passwords = []
    983   key_passwords = {}
    984   devnull = open("/dev/null", "w+b")
    985   for k in sorted(keylist):
    986     # We don't need a password for things that aren't really keys.
    987     if k in SPECIAL_CERT_STRINGS:
    988       no_passwords.append(k)
    989       continue
    990 
    991     p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
    992              "-inform", "DER", "-nocrypt"],
    993             stdin=devnull.fileno(),
    994             stdout=devnull.fileno(),
    995             stderr=subprocess.STDOUT)
    996     p.communicate()
    997     if p.returncode == 0:
    998       # Definitely an unencrypted key.
    999       no_passwords.append(k)
   1000     else:
   1001       p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
   1002                "-inform", "DER", "-passin", "pass:"],
   1003               stdin=devnull.fileno(),
   1004               stdout=devnull.fileno(),
   1005               stderr=subprocess.PIPE)
   1006       _, stderr = p.communicate()
   1007       if p.returncode == 0:
   1008         # Encrypted key with empty string as password.
   1009         key_passwords[k] = ''
   1010       elif stderr.startswith('Error decrypting key'):
   1011         # Definitely encrypted key.
   1012         # It would have said "Error reading key" if it didn't parse correctly.
   1013         need_passwords.append(k)
   1014       else:
   1015         # Potentially, a type of key that openssl doesn't understand.
   1016         # We'll let the routines in signapk.jar handle it.
   1017         no_passwords.append(k)
   1018   devnull.close()
   1019 
   1020   key_passwords.update(PasswordManager().GetPasswords(need_passwords))
   1021   key_passwords.update(dict.fromkeys(no_passwords))
   1022   return key_passwords
   1023 
   1024 
   1025 def GetMinSdkVersion(apk_name):
   1026   """Gets the minSdkVersion declared in the APK.
   1027 
   1028   It calls 'aapt' to query the embedded minSdkVersion from the given APK file.
   1029   This can be both a decimal number (API Level) or a codename.
   1030 
   1031   Args:
   1032     apk_name: The APK filename.
   1033 
   1034   Returns:
   1035     The parsed SDK version string.
   1036 
   1037   Raises:
   1038     ExternalError: On failing to obtain the min SDK version.
   1039   """
   1040   proc = Run(
   1041       ["aapt", "dump", "badging", apk_name], stdout=subprocess.PIPE,
   1042       stderr=subprocess.PIPE)
   1043   stdoutdata, stderrdata = proc.communicate()
   1044   if proc.returncode != 0:
   1045     raise ExternalError(
   1046         "Failed to obtain minSdkVersion: aapt return code {}:\n{}\n{}".format(
   1047             proc.returncode, stdoutdata, stderrdata))
   1048 
   1049   for line in stdoutdata.split("\n"):
   1050     # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'.
   1051     m = re.match(r'sdkVersion:\'([^\']*)\'', line)
   1052     if m:
   1053       return m.group(1)
   1054   raise ExternalError("No minSdkVersion returned by aapt")
   1055 
   1056 
   1057 def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
   1058   """Returns the minSdkVersion declared in the APK as a number (API Level).
   1059 
   1060   If minSdkVersion is set to a codename, it is translated to a number using the
   1061   provided map.
   1062 
   1063   Args:
   1064     apk_name: The APK filename.
   1065 
   1066   Returns:
   1067     The parsed SDK version number.
   1068 
   1069   Raises:
   1070     ExternalError: On failing to get the min SDK version number.
   1071   """
   1072   version = GetMinSdkVersion(apk_name)
   1073   try:
   1074     return int(version)
   1075   except ValueError:
   1076     # Not a decimal number. Codename?
   1077     if version in codename_to_api_level_map:
   1078       return codename_to_api_level_map[version]
   1079     else:
   1080       raise ExternalError(
   1081           "Unknown minSdkVersion: '{}'. Known codenames: {}".format(
   1082               version, codename_to_api_level_map))
   1083 
   1084 
   1085 def SignFile(input_name, output_name, key, password, min_api_level=None,
   1086              codename_to_api_level_map=None, whole_file=False,
   1087              extra_signapk_args=None):
   1088   """Sign the input_name zip/jar/apk, producing output_name.  Use the
   1089   given key and password (the latter may be None if the key does not
   1090   have a password.
   1091 
   1092   If whole_file is true, use the "-w" option to SignApk to embed a
   1093   signature that covers the whole file in the archive comment of the
   1094   zip file.
   1095 
   1096   min_api_level is the API Level (int) of the oldest platform this file may end
   1097   up on. If not specified for an APK, the API Level is obtained by interpreting
   1098   the minSdkVersion attribute of the APK's AndroidManifest.xml.
   1099 
   1100   codename_to_api_level_map is needed to translate the codename which may be
   1101   encountered as the APK's minSdkVersion.
   1102 
   1103   Caller may optionally specify extra args to be passed to SignApk, which
   1104   defaults to OPTIONS.extra_signapk_args if omitted.
   1105   """
   1106   if codename_to_api_level_map is None:
   1107     codename_to_api_level_map = {}
   1108   if extra_signapk_args is None:
   1109     extra_signapk_args = OPTIONS.extra_signapk_args
   1110 
   1111   java_library_path = os.path.join(
   1112       OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
   1113 
   1114   cmd = ([OPTIONS.java_path] + OPTIONS.java_args +
   1115          ["-Djava.library.path=" + java_library_path,
   1116           "-jar", os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)] +
   1117          extra_signapk_args)
   1118   if whole_file:
   1119     cmd.append("-w")
   1120 
   1121   min_sdk_version = min_api_level
   1122   if min_sdk_version is None:
   1123     if not whole_file:
   1124       min_sdk_version = GetMinSdkVersionInt(
   1125           input_name, codename_to_api_level_map)
   1126   if min_sdk_version is not None:
   1127     cmd.extend(["--min-sdk-version", str(min_sdk_version)])
   1128 
   1129   cmd.extend([key + OPTIONS.public_key_suffix,
   1130               key + OPTIONS.private_key_suffix,
   1131               input_name, output_name])
   1132 
   1133   proc = Run(cmd, stdin=subprocess.PIPE)
   1134   if password is not None:
   1135     password += "\n"
   1136   stdoutdata, _ = proc.communicate(password)
   1137   if proc.returncode != 0:
   1138     raise ExternalError(
   1139         "Failed to run signapk.jar: return code {}:\n{}".format(
   1140             proc.returncode, stdoutdata))
   1141 
   1142 
   1143 def CheckSize(data, target, info_dict):
   1144   """Checks the data string passed against the max size limit.
   1145 
   1146   For non-AVB images, raise exception if the data is too big. Print a warning
   1147   if the data is nearing the maximum size.
   1148 
   1149   For AVB images, the actual image size should be identical to the limit.
   1150 
   1151   Args:
   1152     data: A string that contains all the data for the partition.
   1153     target: The partition name. The ".img" suffix is optional.
   1154     info_dict: The dict to be looked up for relevant info.
   1155   """
   1156   if target.endswith(".img"):
   1157     target = target[:-4]
   1158   mount_point = "/" + target
   1159 
   1160   fs_type = None
   1161   limit = None
   1162   if info_dict["fstab"]:
   1163     if mount_point == "/userdata":
   1164       mount_point = "/data"
   1165     p = info_dict["fstab"][mount_point]
   1166     fs_type = p.fs_type
   1167     device = p.device
   1168     if "/" in device:
   1169       device = device[device.rfind("/")+1:]
   1170     limit = info_dict.get(device + "_size")
   1171   if not fs_type or not limit:
   1172     return
   1173 
   1174   size = len(data)
   1175   # target could be 'userdata' or 'cache'. They should follow the non-AVB image
   1176   # path.
   1177   if info_dict.get("avb_enable") == "true" and target in AVB_PARTITIONS:
   1178     if size != limit:
   1179       raise ExternalError(
   1180           "Mismatching image size for %s: expected %d actual %d" % (
   1181               target, limit, size))
   1182   else:
   1183     pct = float(size) * 100.0 / limit
   1184     msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
   1185     if pct >= 99.0:
   1186       raise ExternalError(msg)
   1187     elif pct >= 95.0:
   1188       logger.warning("\n  WARNING: %s\n", msg)
   1189     else:
   1190       logger.info("  %s", msg)
   1191 
   1192 
   1193 def ReadApkCerts(tf_zip):
   1194   """Parses the APK certs info from a given target-files zip.
   1195 
   1196   Given a target-files ZipFile, parses the META/apkcerts.txt entry and returns a
   1197   tuple with the following elements: (1) a dictionary that maps packages to
   1198   certs (based on the "certificate" and "private_key" attributes in the file;
   1199   (2) a string representing the extension of compressed APKs in the target files
   1200   (e.g ".gz", ".bro").
   1201 
   1202   Args:
   1203     tf_zip: The input target_files ZipFile (already open).
   1204 
   1205   Returns:
   1206     (certmap, ext): certmap is a dictionary that maps packages to certs; ext is
   1207         the extension string of compressed APKs (e.g. ".gz"), or None if there's
   1208         no compressed APKs.
   1209   """
   1210   certmap = {}
   1211   compressed_extension = None
   1212 
   1213   # META/apkcerts.txt contains the info for _all_ the packages known at build
   1214   # time. Filter out the ones that are not installed.
   1215   installed_files = set()
   1216   for name in tf_zip.namelist():
   1217     basename = os.path.basename(name)
   1218     if basename:
   1219       installed_files.add(basename)
   1220 
   1221   for line in tf_zip.read("META/apkcerts.txt").split("\n"):
   1222     line = line.strip()
   1223     if not line:
   1224       continue
   1225     m = re.match(
   1226         r'^name="(?P<NAME>.*)"\s+certificate="(?P<CERT>.*)"\s+'
   1227         r'private_key="(?P<PRIVKEY>.*?)"(\s+compressed="(?P<COMPRESSED>.*)")?$',
   1228         line)
   1229     if not m:
   1230       continue
   1231 
   1232     matches = m.groupdict()
   1233     cert = matches["CERT"]
   1234     privkey = matches["PRIVKEY"]
   1235     name = matches["NAME"]
   1236     this_compressed_extension = matches["COMPRESSED"]
   1237 
   1238     public_key_suffix_len = len(OPTIONS.public_key_suffix)
   1239     private_key_suffix_len = len(OPTIONS.private_key_suffix)
   1240     if cert in SPECIAL_CERT_STRINGS and not privkey:
   1241       certmap[name] = cert
   1242     elif (cert.endswith(OPTIONS.public_key_suffix) and
   1243           privkey.endswith(OPTIONS.private_key_suffix) and
   1244           cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
   1245       certmap[name] = cert[:-public_key_suffix_len]
   1246     else:
   1247       raise ValueError("Failed to parse line from apkcerts.txt:\n" + line)
   1248 
   1249     if not this_compressed_extension:
   1250       continue
   1251 
   1252     # Only count the installed files.
   1253     filename = name + '.' + this_compressed_extension
   1254     if filename not in installed_files:
   1255       continue
   1256 
   1257     # Make sure that all the values in the compression map have the same
   1258     # extension. We don't support multiple compression methods in the same
   1259     # system image.
   1260     if compressed_extension:
   1261       if this_compressed_extension != compressed_extension:
   1262         raise ValueError(
   1263             "Multiple compressed extensions: {} vs {}".format(
   1264                 compressed_extension, this_compressed_extension))
   1265     else:
   1266       compressed_extension = this_compressed_extension
   1267 
   1268   return (certmap,
   1269           ("." + compressed_extension) if compressed_extension else None)
   1270 
   1271 
   1272 COMMON_DOCSTRING = """
   1273 Global options
   1274 
   1275   -p  (--path) <dir>
   1276       Prepend <dir>/bin to the list of places to search for binaries run by this
   1277       script, and expect to find jars in <dir>/framework.
   1278 
   1279   -s  (--device_specific) <file>
   1280       Path to the Python module containing device-specific releasetools code.
   1281 
   1282   -x  (--extra) <key=value>
   1283       Add a key/value pair to the 'extras' dict, which device-specific extension
   1284       code may look at.
   1285 
   1286   -v  (--verbose)
   1287       Show command lines being executed.
   1288 
   1289   -h  (--help)
   1290       Display this usage message and exit.
   1291 """
   1292 
   1293 def Usage(docstring):
   1294   print(docstring.rstrip("\n"))
   1295   print(COMMON_DOCSTRING)
   1296 
   1297 
   1298 def ParseOptions(argv,
   1299                  docstring,
   1300                  extra_opts="", extra_long_opts=(),
   1301                  extra_option_handler=None):
   1302   """Parse the options in argv and return any arguments that aren't
   1303   flags.  docstring is the calling module's docstring, to be displayed
   1304   for errors and -h.  extra_opts and extra_long_opts are for flags
   1305   defined by the caller, which are processed by passing them to
   1306   extra_option_handler."""
   1307 
   1308   try:
   1309     opts, args = getopt.getopt(
   1310         argv, "hvp:s:x:" + extra_opts,
   1311         ["help", "verbose", "path=", "signapk_path=",
   1312          "signapk_shared_library_path=", "extra_signapk_args=",
   1313          "java_path=", "java_args=", "public_key_suffix=",
   1314          "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
   1315          "verity_signer_path=", "verity_signer_args=", "device_specific=",
   1316          "extra="] +
   1317         list(extra_long_opts))
   1318   except getopt.GetoptError as err:
   1319     Usage(docstring)
   1320     print("**", str(err), "**")
   1321     sys.exit(2)
   1322 
   1323   for o, a in opts:
   1324     if o in ("-h", "--help"):
   1325       Usage(docstring)
   1326       sys.exit()
   1327     elif o in ("-v", "--verbose"):
   1328       OPTIONS.verbose = True
   1329     elif o in ("-p", "--path"):
   1330       OPTIONS.search_path = a
   1331     elif o in ("--signapk_path",):
   1332       OPTIONS.signapk_path = a
   1333     elif o in ("--signapk_shared_library_path",):
   1334       OPTIONS.signapk_shared_library_path = a
   1335     elif o in ("--extra_signapk_args",):
   1336       OPTIONS.extra_signapk_args = shlex.split(a)
   1337     elif o in ("--java_path",):
   1338       OPTIONS.java_path = a
   1339     elif o in ("--java_args",):
   1340       OPTIONS.java_args = shlex.split(a)
   1341     elif o in ("--public_key_suffix",):
   1342       OPTIONS.public_key_suffix = a
   1343     elif o in ("--private_key_suffix",):
   1344       OPTIONS.private_key_suffix = a
   1345     elif o in ("--boot_signer_path",):
   1346       OPTIONS.boot_signer_path = a
   1347     elif o in ("--boot_signer_args",):
   1348       OPTIONS.boot_signer_args = shlex.split(a)
   1349     elif o in ("--verity_signer_path",):
   1350       OPTIONS.verity_signer_path = a
   1351     elif o in ("--verity_signer_args",):
   1352       OPTIONS.verity_signer_args = shlex.split(a)
   1353     elif o in ("-s", "--device_specific"):
   1354       OPTIONS.device_specific = a
   1355     elif o in ("-x", "--extra"):
   1356       key, value = a.split("=", 1)
   1357       OPTIONS.extras[key] = value
   1358     else:
   1359       if extra_option_handler is None or not extra_option_handler(o, a):
   1360         assert False, "unknown option \"%s\"" % (o,)
   1361 
   1362   if OPTIONS.search_path:
   1363     os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
   1364                           os.pathsep + os.environ["PATH"])
   1365 
   1366   return args
   1367 
   1368 
   1369 def MakeTempFile(prefix='tmp', suffix=''):
   1370   """Make a temp file and add it to the list of things to be deleted
   1371   when Cleanup() is called.  Return the filename."""
   1372   fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
   1373   os.close(fd)
   1374   OPTIONS.tempfiles.append(fn)
   1375   return fn
   1376 
   1377 
   1378 def MakeTempDir(prefix='tmp', suffix=''):
   1379   """Makes a temporary dir that will be cleaned up with a call to Cleanup().
   1380 
   1381   Returns:
   1382     The absolute pathname of the new directory.
   1383   """
   1384   dir_name = tempfile.mkdtemp(suffix=suffix, prefix=prefix)
   1385   OPTIONS.tempfiles.append(dir_name)
   1386   return dir_name
   1387 
   1388 
   1389 def Cleanup():
   1390   for i in OPTIONS.tempfiles:
   1391     if os.path.isdir(i):
   1392       shutil.rmtree(i, ignore_errors=True)
   1393     else:
   1394       os.remove(i)
   1395   del OPTIONS.tempfiles[:]
   1396 
   1397 
   1398 class PasswordManager(object):
   1399   def __init__(self):
   1400     self.editor = os.getenv("EDITOR")
   1401     self.pwfile = os.getenv("ANDROID_PW_FILE")
   1402 
   1403   def GetPasswords(self, items):
   1404     """Get passwords corresponding to each string in 'items',
   1405     returning a dict.  (The dict may have keys in addition to the
   1406     values in 'items'.)
   1407 
   1408     Uses the passwords in $ANDROID_PW_FILE if available, letting the
   1409     user edit that file to add more needed passwords.  If no editor is
   1410     available, or $ANDROID_PW_FILE isn't define, prompts the user
   1411     interactively in the ordinary way.
   1412     """
   1413 
   1414     current = self.ReadFile()
   1415 
   1416     first = True
   1417     while True:
   1418       missing = []
   1419       for i in items:
   1420         if i not in current or not current[i]:
   1421           missing.append(i)
   1422       # Are all the passwords already in the file?
   1423       if not missing:
   1424         return current
   1425 
   1426       for i in missing:
   1427         current[i] = ""
   1428 
   1429       if not first:
   1430         print("key file %s still missing some passwords." % (self.pwfile,))
   1431         answer = raw_input("try to edit again? [y]> ").strip()
   1432         if answer and answer[0] not in 'yY':
   1433           raise RuntimeError("key passwords unavailable")
   1434       first = False
   1435 
   1436       current = self.UpdateAndReadFile(current)
   1437 
   1438   def PromptResult(self, current): # pylint: disable=no-self-use
   1439     """Prompt the user to enter a value (password) for each key in
   1440     'current' whose value is fales.  Returns a new dict with all the
   1441     values.
   1442     """
   1443     result = {}
   1444     for k, v in sorted(current.iteritems()):
   1445       if v:
   1446         result[k] = v
   1447       else:
   1448         while True:
   1449           result[k] = getpass.getpass(
   1450               "Enter password for %s key> " % k).strip()
   1451           if result[k]:
   1452             break
   1453     return result
   1454 
   1455   def UpdateAndReadFile(self, current):
   1456     if not self.editor or not self.pwfile:
   1457       return self.PromptResult(current)
   1458 
   1459     f = open(self.pwfile, "w")
   1460     os.chmod(self.pwfile, 0o600)
   1461     f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
   1462     f.write("# (Additional spaces are harmless.)\n\n")
   1463 
   1464     first_line = None
   1465     sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
   1466     for i, (_, k, v) in enumerate(sorted_list):
   1467       f.write("[[[  %s  ]]] %s\n" % (v, k))
   1468       if not v and first_line is None:
   1469         # position cursor on first line with no password.
   1470         first_line = i + 4
   1471     f.close()
   1472 
   1473     RunAndCheckOutput([self.editor, "+%d" % (first_line,), self.pwfile])
   1474 
   1475     return self.ReadFile()
   1476 
   1477   def ReadFile(self):
   1478     result = {}
   1479     if self.pwfile is None:
   1480       return result
   1481     try:
   1482       f = open(self.pwfile, "r")
   1483       for line in f:
   1484         line = line.strip()
   1485         if not line or line[0] == '#':
   1486           continue
   1487         m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
   1488         if not m:
   1489           logger.warning("Failed to parse password file: %s", line)
   1490         else:
   1491           result[m.group(2)] = m.group(1)
   1492       f.close()
   1493     except IOError as e:
   1494       if e.errno != errno.ENOENT:
   1495         logger.exception("Error reading password file:")
   1496     return result
   1497 
   1498 
   1499 def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
   1500              compress_type=None):
   1501   import datetime
   1502 
   1503   # http://b/18015246
   1504   # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
   1505   # for files larger than 2GiB. We can work around this by adjusting their
   1506   # limit. Note that `zipfile.writestr()` will not work for strings larger than
   1507   # 2GiB. The Python interpreter sometimes rejects strings that large (though
   1508   # it isn't clear to me exactly what circumstances cause this).
   1509   # `zipfile.write()` must be used directly to work around this.
   1510   #
   1511   # This mess can be avoided if we port to python3.
   1512   saved_zip64_limit = zipfile.ZIP64_LIMIT
   1513   zipfile.ZIP64_LIMIT = (1 << 32) - 1
   1514 
   1515   if compress_type is None:
   1516     compress_type = zip_file.compression
   1517   if arcname is None:
   1518     arcname = filename
   1519 
   1520   saved_stat = os.stat(filename)
   1521 
   1522   try:
   1523     # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
   1524     # file to be zipped and reset it when we're done.
   1525     os.chmod(filename, perms)
   1526 
   1527     # Use a fixed timestamp so the output is repeatable.
   1528     # Note: Use of fromtimestamp rather than utcfromtimestamp here is
   1529     # intentional. zip stores datetimes in local time without a time zone
   1530     # attached, so we need "epoch" but in the local time zone to get 2009/01/01
   1531     # in the zip archive.
   1532     local_epoch = datetime.datetime.fromtimestamp(0)
   1533     timestamp = (datetime.datetime(2009, 1, 1) - local_epoch).total_seconds()
   1534     os.utime(filename, (timestamp, timestamp))
   1535 
   1536     zip_file.write(filename, arcname=arcname, compress_type=compress_type)
   1537   finally:
   1538     os.chmod(filename, saved_stat.st_mode)
   1539     os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
   1540     zipfile.ZIP64_LIMIT = saved_zip64_limit
   1541 
   1542 
   1543 def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
   1544                 compress_type=None):
   1545   """Wrap zipfile.writestr() function to work around the zip64 limit.
   1546 
   1547   Even with the ZIP64_LIMIT workaround, it won't allow writing a string
   1548   longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
   1549   when calling crc32(bytes).
   1550 
   1551   But it still works fine to write a shorter string into a large zip file.
   1552   We should use ZipWrite() whenever possible, and only use ZipWriteStr()
   1553   when we know the string won't be too long.
   1554   """
   1555 
   1556   saved_zip64_limit = zipfile.ZIP64_LIMIT
   1557   zipfile.ZIP64_LIMIT = (1 << 32) - 1
   1558 
   1559   if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
   1560     zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
   1561     zinfo.compress_type = zip_file.compression
   1562     if perms is None:
   1563       perms = 0o100644
   1564   else:
   1565     zinfo = zinfo_or_arcname
   1566 
   1567   # If compress_type is given, it overrides the value in zinfo.
   1568   if compress_type is not None:
   1569     zinfo.compress_type = compress_type
   1570 
   1571   # If perms is given, it has a priority.
   1572   if perms is not None:
   1573     # If perms doesn't set the file type, mark it as a regular file.
   1574     if perms & 0o770000 == 0:
   1575       perms |= 0o100000
   1576     zinfo.external_attr = perms << 16
   1577 
   1578   # Use a fixed timestamp so the output is repeatable.
   1579   zinfo.date_time = (2009, 1, 1, 0, 0, 0)
   1580 
   1581   zip_file.writestr(zinfo, data)
   1582   zipfile.ZIP64_LIMIT = saved_zip64_limit
   1583 
   1584 
   1585 def ZipDelete(zip_filename, entries):
   1586   """Deletes entries from a ZIP file.
   1587 
   1588   Since deleting entries from a ZIP file is not supported, it shells out to
   1589   'zip -d'.
   1590 
   1591   Args:
   1592     zip_filename: The name of the ZIP file.
   1593     entries: The name of the entry, or the list of names to be deleted.
   1594 
   1595   Raises:
   1596     AssertionError: In case of non-zero return from 'zip'.
   1597   """
   1598   if isinstance(entries, basestring):
   1599     entries = [entries]
   1600   cmd = ["zip", "-d", zip_filename] + entries
   1601   RunAndCheckOutput(cmd)
   1602 
   1603 
   1604 def ZipClose(zip_file):
   1605   # http://b/18015246
   1606   # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
   1607   # central directory.
   1608   saved_zip64_limit = zipfile.ZIP64_LIMIT
   1609   zipfile.ZIP64_LIMIT = (1 << 32) - 1
   1610 
   1611   zip_file.close()
   1612 
   1613   zipfile.ZIP64_LIMIT = saved_zip64_limit
   1614 
   1615 
   1616 class DeviceSpecificParams(object):
   1617   module = None
   1618   def __init__(self, **kwargs):
   1619     """Keyword arguments to the constructor become attributes of this
   1620     object, which is passed to all functions in the device-specific
   1621     module."""
   1622     for k, v in kwargs.iteritems():
   1623       setattr(self, k, v)
   1624     self.extras = OPTIONS.extras
   1625 
   1626     if self.module is None:
   1627       path = OPTIONS.device_specific
   1628       if not path:
   1629         return
   1630       try:
   1631         if os.path.isdir(path):
   1632           info = imp.find_module("releasetools", [path])
   1633         else:
   1634           d, f = os.path.split(path)
   1635           b, x = os.path.splitext(f)
   1636           if x == ".py":
   1637             f = b
   1638           info = imp.find_module(f, [d])
   1639         logger.info("loaded device-specific extensions from %s", path)
   1640         self.module = imp.load_module("device_specific", *info)
   1641       except ImportError:
   1642         logger.info("unable to load device-specific module; assuming none")
   1643 
   1644   def _DoCall(self, function_name, *args, **kwargs):
   1645     """Call the named function in the device-specific module, passing
   1646     the given args and kwargs.  The first argument to the call will be
   1647     the DeviceSpecific object itself.  If there is no module, or the
   1648     module does not define the function, return the value of the
   1649     'default' kwarg (which itself defaults to None)."""
   1650     if self.module is None or not hasattr(self.module, function_name):
   1651       return kwargs.get("default")
   1652     return getattr(self.module, function_name)(*((self,) + args), **kwargs)
   1653 
   1654   def FullOTA_Assertions(self):
   1655     """Called after emitting the block of assertions at the top of a
   1656     full OTA package.  Implementations can add whatever additional
   1657     assertions they like."""
   1658     return self._DoCall("FullOTA_Assertions")
   1659 
   1660   def FullOTA_InstallBegin(self):
   1661     """Called at the start of full OTA installation."""
   1662     return self._DoCall("FullOTA_InstallBegin")
   1663 
   1664   def FullOTA_GetBlockDifferences(self):
   1665     """Called during full OTA installation and verification.
   1666     Implementation should return a list of BlockDifference objects describing
   1667     the update on each additional partitions.
   1668     """
   1669     return self._DoCall("FullOTA_GetBlockDifferences")
   1670 
   1671   def FullOTA_InstallEnd(self):
   1672     """Called at the end of full OTA installation; typically this is
   1673     used to install the image for the device's baseband processor."""
   1674     return self._DoCall("FullOTA_InstallEnd")
   1675 
   1676   def IncrementalOTA_Assertions(self):
   1677     """Called after emitting the block of assertions at the top of an
   1678     incremental OTA package.  Implementations can add whatever
   1679     additional assertions they like."""
   1680     return self._DoCall("IncrementalOTA_Assertions")
   1681 
   1682   def IncrementalOTA_VerifyBegin(self):
   1683     """Called at the start of the verification phase of incremental
   1684     OTA installation; additional checks can be placed here to abort
   1685     the script before any changes are made."""
   1686     return self._DoCall("IncrementalOTA_VerifyBegin")
   1687 
   1688   def IncrementalOTA_VerifyEnd(self):
   1689     """Called at the end of the verification phase of incremental OTA
   1690     installation; additional checks can be placed here to abort the
   1691     script before any changes are made."""
   1692     return self._DoCall("IncrementalOTA_VerifyEnd")
   1693 
   1694   def IncrementalOTA_InstallBegin(self):
   1695     """Called at the start of incremental OTA installation (after
   1696     verification is complete)."""
   1697     return self._DoCall("IncrementalOTA_InstallBegin")
   1698 
   1699   def IncrementalOTA_GetBlockDifferences(self):
   1700     """Called during incremental OTA installation and verification.
   1701     Implementation should return a list of BlockDifference objects describing
   1702     the update on each additional partitions.
   1703     """
   1704     return self._DoCall("IncrementalOTA_GetBlockDifferences")
   1705 
   1706   def IncrementalOTA_InstallEnd(self):
   1707     """Called at the end of incremental OTA installation; typically
   1708     this is used to install the image for the device's baseband
   1709     processor."""
   1710     return self._DoCall("IncrementalOTA_InstallEnd")
   1711 
   1712   def VerifyOTA_Assertions(self):
   1713     return self._DoCall("VerifyOTA_Assertions")
   1714 
   1715 
   1716 class File(object):
   1717   def __init__(self, name, data, compress_size=None):
   1718     self.name = name
   1719     self.data = data
   1720     self.size = len(data)
   1721     self.compress_size = compress_size or self.size
   1722     self.sha1 = sha1(data).hexdigest()
   1723 
   1724   @classmethod
   1725   def FromLocalFile(cls, name, diskname):
   1726     f = open(diskname, "rb")
   1727     data = f.read()
   1728     f.close()
   1729     return File(name, data)
   1730 
   1731   def WriteToTemp(self):
   1732     t = tempfile.NamedTemporaryFile()
   1733     t.write(self.data)
   1734     t.flush()
   1735     return t
   1736 
   1737   def WriteToDir(self, d):
   1738     with open(os.path.join(d, self.name), "wb") as fp:
   1739       fp.write(self.data)
   1740 
   1741   def AddToZip(self, z, compression=None):
   1742     ZipWriteStr(z, self.name, self.data, compress_type=compression)
   1743 
   1744 
   1745 DIFF_PROGRAM_BY_EXT = {
   1746     ".gz" : "imgdiff",
   1747     ".zip" : ["imgdiff", "-z"],
   1748     ".jar" : ["imgdiff", "-z"],
   1749     ".apk" : ["imgdiff", "-z"],
   1750     ".img" : "imgdiff",
   1751     }
   1752 
   1753 
   1754 class Difference(object):
   1755   def __init__(self, tf, sf, diff_program=None):
   1756     self.tf = tf
   1757     self.sf = sf
   1758     self.patch = None
   1759     self.diff_program = diff_program
   1760 
   1761   def ComputePatch(self):
   1762     """Compute the patch (as a string of data) needed to turn sf into
   1763     tf.  Returns the same tuple as GetPatch()."""
   1764 
   1765     tf = self.tf
   1766     sf = self.sf
   1767 
   1768     if self.diff_program:
   1769       diff_program = self.diff_program
   1770     else:
   1771       ext = os.path.splitext(tf.name)[1]
   1772       diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
   1773 
   1774     ttemp = tf.WriteToTemp()
   1775     stemp = sf.WriteToTemp()
   1776 
   1777     ext = os.path.splitext(tf.name)[1]
   1778 
   1779     try:
   1780       ptemp = tempfile.NamedTemporaryFile()
   1781       if isinstance(diff_program, list):
   1782         cmd = copy.copy(diff_program)
   1783       else:
   1784         cmd = [diff_program]
   1785       cmd.append(stemp.name)
   1786       cmd.append(ttemp.name)
   1787       cmd.append(ptemp.name)
   1788       p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
   1789       err = []
   1790       def run():
   1791         _, e = p.communicate()
   1792         if e:
   1793           err.append(e)
   1794       th = threading.Thread(target=run)
   1795       th.start()
   1796       th.join(timeout=300)   # 5 mins
   1797       if th.is_alive():
   1798         logger.warning("diff command timed out")
   1799         p.terminate()
   1800         th.join(5)
   1801         if th.is_alive():
   1802           p.kill()
   1803           th.join()
   1804 
   1805       if p.returncode != 0:
   1806         logger.warning("Failure running %s:\n%s\n", diff_program, "".join(err))
   1807         self.patch = None
   1808         return None, None, None
   1809       diff = ptemp.read()
   1810     finally:
   1811       ptemp.close()
   1812       stemp.close()
   1813       ttemp.close()
   1814 
   1815     self.patch = diff
   1816     return self.tf, self.sf, self.patch
   1817 
   1818 
   1819   def GetPatch(self):
   1820     """Returns a tuple of (target_file, source_file, patch_data).
   1821 
   1822     patch_data may be None if ComputePatch hasn't been called, or if
   1823     computing the patch failed.
   1824     """
   1825     return self.tf, self.sf, self.patch
   1826 
   1827 
   1828 def ComputeDifferences(diffs):
   1829   """Call ComputePatch on all the Difference objects in 'diffs'."""
   1830   logger.info("%d diffs to compute", len(diffs))
   1831 
   1832   # Do the largest files first, to try and reduce the long-pole effect.
   1833   by_size = [(i.tf.size, i) for i in diffs]
   1834   by_size.sort(reverse=True)
   1835   by_size = [i[1] for i in by_size]
   1836 
   1837   lock = threading.Lock()
   1838   diff_iter = iter(by_size)   # accessed under lock
   1839 
   1840   def worker():
   1841     try:
   1842       lock.acquire()
   1843       for d in diff_iter:
   1844         lock.release()
   1845         start = time.time()
   1846         d.ComputePatch()
   1847         dur = time.time() - start
   1848         lock.acquire()
   1849 
   1850         tf, sf, patch = d.GetPatch()
   1851         if sf.name == tf.name:
   1852           name = tf.name
   1853         else:
   1854           name = "%s (%s)" % (tf.name, sf.name)
   1855         if patch is None:
   1856           logger.error("patching failed! %40s", name)
   1857         else:
   1858           logger.info(
   1859               "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
   1860               tf.size, 100.0 * len(patch) / tf.size, name)
   1861       lock.release()
   1862     except Exception:
   1863       logger.exception("Failed to compute diff from worker")
   1864       raise
   1865 
   1866   # start worker threads; wait for them all to finish.
   1867   threads = [threading.Thread(target=worker)
   1868              for i in range(OPTIONS.worker_threads)]
   1869   for th in threads:
   1870     th.start()
   1871   while threads:
   1872     threads.pop().join()
   1873 
   1874 
   1875 class BlockDifference(object):
   1876   def __init__(self, partition, tgt, src=None, check_first_block=False,
   1877                version=None, disable_imgdiff=False):
   1878     self.tgt = tgt
   1879     self.src = src
   1880     self.partition = partition
   1881     self.check_first_block = check_first_block
   1882     self.disable_imgdiff = disable_imgdiff
   1883 
   1884     if version is None:
   1885       version = max(
   1886           int(i) for i in
   1887           OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
   1888     assert version >= 3
   1889     self.version = version
   1890 
   1891     b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
   1892                                     version=self.version,
   1893                                     disable_imgdiff=self.disable_imgdiff)
   1894     self.path = os.path.join(MakeTempDir(), partition)
   1895     b.Compute(self.path)
   1896     self._required_cache = b.max_stashed_size
   1897     self.touched_src_ranges = b.touched_src_ranges
   1898     self.touched_src_sha1 = b.touched_src_sha1
   1899 
   1900     # On devices with dynamic partitions, for new partitions,
   1901     # src is None but OPTIONS.source_info_dict is not.
   1902     if OPTIONS.source_info_dict is None:
   1903       is_dynamic_build = OPTIONS.info_dict.get(
   1904           "use_dynamic_partitions") == "true"
   1905       is_dynamic_source = False
   1906     else:
   1907       is_dynamic_build = OPTIONS.source_info_dict.get(
   1908           "use_dynamic_partitions") == "true"
   1909       is_dynamic_source = partition in shlex.split(
   1910           OPTIONS.source_info_dict.get("dynamic_partition_list", "").strip())
   1911 
   1912     is_dynamic_target = partition in shlex.split(
   1913         OPTIONS.info_dict.get("dynamic_partition_list", "").strip())
   1914 
   1915     # For dynamic partitions builds, check partition list in both source
   1916     # and target build because new partitions may be added, and existing
   1917     # partitions may be removed.
   1918     is_dynamic = is_dynamic_build and (is_dynamic_source or is_dynamic_target)
   1919 
   1920     if is_dynamic:
   1921       self.device = 'map_partition("%s")' % partition
   1922     else:
   1923       if OPTIONS.source_info_dict is None:
   1924         _, device_path = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
   1925       else:
   1926         _, device_path = GetTypeAndDevice("/" + partition,
   1927                                           OPTIONS.source_info_dict)
   1928       self.device = '"%s"' % device_path
   1929 
   1930   @property
   1931   def required_cache(self):
   1932     return self._required_cache
   1933 
   1934   def WriteScript(self, script, output_zip, progress=None,
   1935                   write_verify_script=False):
   1936     if not self.src:
   1937       # write the output unconditionally
   1938       script.Print("Patching %s image unconditionally..." % (self.partition,))
   1939     else:
   1940       script.Print("Patching %s image after verification." % (self.partition,))
   1941 
   1942     if progress:
   1943       script.ShowProgress(progress, 0)
   1944     self._WriteUpdate(script, output_zip)
   1945 
   1946     if write_verify_script:
   1947       self.WritePostInstallVerifyScript(script)
   1948 
   1949   def WriteStrictVerifyScript(self, script):
   1950     """Verify all the blocks in the care_map, including clobbered blocks.
   1951 
   1952     This differs from the WriteVerifyScript() function: a) it prints different
   1953     error messages; b) it doesn't allow half-way updated images to pass the
   1954     verification."""
   1955 
   1956     partition = self.partition
   1957     script.Print("Verifying %s..." % (partition,))
   1958     ranges = self.tgt.care_map
   1959     ranges_str = ranges.to_string_raw()
   1960     script.AppendExtra(
   1961         'range_sha1(%s, "%s") == "%s" && ui_print("    Verified.") || '
   1962         'ui_print("%s has unexpected contents.");' % (
   1963             self.device, ranges_str,
   1964             self.tgt.TotalSha1(include_clobbered_blocks=True),
   1965             self.partition))
   1966     script.AppendExtra("")
   1967 
   1968   def WriteVerifyScript(self, script, touched_blocks_only=False):
   1969     partition = self.partition
   1970 
   1971     # full OTA
   1972     if not self.src:
   1973       script.Print("Image %s will be patched unconditionally." % (partition,))
   1974 
   1975     # incremental OTA
   1976     else:
   1977       if touched_blocks_only:
   1978         ranges = self.touched_src_ranges
   1979         expected_sha1 = self.touched_src_sha1
   1980       else:
   1981         ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
   1982         expected_sha1 = self.src.TotalSha1()
   1983 
   1984       # No blocks to be checked, skipping.
   1985       if not ranges:
   1986         return
   1987 
   1988       ranges_str = ranges.to_string_raw()
   1989       script.AppendExtra(
   1990           'if (range_sha1(%s, "%s") == "%s" || block_image_verify(%s, '
   1991           'package_extract_file("%s.transfer.list"), "%s.new.dat", '
   1992           '"%s.patch.dat")) then' % (
   1993               self.device, ranges_str, expected_sha1,
   1994               self.device, partition, partition, partition))
   1995       script.Print('Verified %s image...' % (partition,))
   1996       script.AppendExtra('else')
   1997 
   1998       if self.version >= 4:
   1999 
   2000         # Bug: 21124327
   2001         # When generating incrementals for the system and vendor partitions in
   2002         # version 4 or newer, explicitly check the first block (which contains
   2003         # the superblock) of the partition to see if it's what we expect. If
   2004         # this check fails, give an explicit log message about the partition
   2005         # having been remounted R/W (the most likely explanation).
   2006         if self.check_first_block:
   2007           script.AppendExtra('check_first_block(%s);' % (self.device,))
   2008 
   2009         # If version >= 4, try block recovery before abort update
   2010         if partition == "system":
   2011           code = ErrorCode.SYSTEM_RECOVER_FAILURE
   2012         else:
   2013           code = ErrorCode.VENDOR_RECOVER_FAILURE
   2014         script.AppendExtra((
   2015             'ifelse (block_image_recover({device}, "{ranges}") && '
   2016             'block_image_verify({device}, '
   2017             'package_extract_file("{partition}.transfer.list"), '
   2018             '"{partition}.new.dat", "{partition}.patch.dat"), '
   2019             'ui_print("{partition} recovered successfully."), '
   2020             'abort("E{code}: {partition} partition fails to recover"));\n'
   2021             'endif;').format(device=self.device, ranges=ranges_str,
   2022                              partition=partition, code=code))
   2023 
   2024       # Abort the OTA update. Note that the incremental OTA cannot be applied
   2025       # even if it may match the checksum of the target partition.
   2026       # a) If version < 3, operations like move and erase will make changes
   2027       #    unconditionally and damage the partition.
   2028       # b) If version >= 3, it won't even reach here.
   2029       else:
   2030         if partition == "system":
   2031           code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
   2032         else:
   2033           code = ErrorCode.VENDOR_VERIFICATION_FAILURE
   2034         script.AppendExtra((
   2035             'abort("E%d: %s partition has unexpected contents");\n'
   2036             'endif;') % (code, partition))
   2037 
   2038   def WritePostInstallVerifyScript(self, script):
   2039     partition = self.partition
   2040     script.Print('Verifying the updated %s image...' % (partition,))
   2041     # Unlike pre-install verification, clobbered_blocks should not be ignored.
   2042     ranges = self.tgt.care_map
   2043     ranges_str = ranges.to_string_raw()
   2044     script.AppendExtra(
   2045         'if range_sha1(%s, "%s") == "%s" then' % (
   2046             self.device, ranges_str,
   2047             self.tgt.TotalSha1(include_clobbered_blocks=True)))
   2048 
   2049     # Bug: 20881595
   2050     # Verify that extended blocks are really zeroed out.
   2051     if self.tgt.extended:
   2052       ranges_str = self.tgt.extended.to_string_raw()
   2053       script.AppendExtra(
   2054           'if range_sha1(%s, "%s") == "%s" then' % (
   2055               self.device, ranges_str,
   2056               self._HashZeroBlocks(self.tgt.extended.size())))
   2057       script.Print('Verified the updated %s image.' % (partition,))
   2058       if partition == "system":
   2059         code = ErrorCode.SYSTEM_NONZERO_CONTENTS
   2060       else:
   2061         code = ErrorCode.VENDOR_NONZERO_CONTENTS
   2062       script.AppendExtra(
   2063           'else\n'
   2064           '  abort("E%d: %s partition has unexpected non-zero contents after '
   2065           'OTA update");\n'
   2066           'endif;' % (code, partition))
   2067     else:
   2068       script.Print('Verified the updated %s image.' % (partition,))
   2069 
   2070     if partition == "system":
   2071       code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
   2072     else:
   2073       code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
   2074 
   2075     script.AppendExtra(
   2076         'else\n'
   2077         '  abort("E%d: %s partition has unexpected contents after OTA '
   2078         'update");\n'
   2079         'endif;' % (code, partition))
   2080 
   2081   def _WriteUpdate(self, script, output_zip):
   2082     ZipWrite(output_zip,
   2083              '{}.transfer.list'.format(self.path),
   2084              '{}.transfer.list'.format(self.partition))
   2085 
   2086     # For full OTA, compress the new.dat with brotli with quality 6 to reduce
   2087     # its size. Quailty 9 almost triples the compression time but doesn't
   2088     # further reduce the size too much. For a typical 1.8G system.new.dat
   2089     #                       zip  | brotli(quality 6)  | brotli(quality 9)
   2090     #   compressed_size:    942M | 869M (~8% reduced) | 854M
   2091     #   compression_time:   75s  | 265s               | 719s
   2092     #   decompression_time: 15s  | 25s                | 25s
   2093 
   2094     if not self.src:
   2095       brotli_cmd = ['brotli', '--quality=6',
   2096                     '--output={}.new.dat.br'.format(self.path),
   2097                     '{}.new.dat'.format(self.path)]
   2098       print("Compressing {}.new.dat with brotli".format(self.partition))
   2099       RunAndCheckOutput(brotli_cmd)
   2100 
   2101       new_data_name = '{}.new.dat.br'.format(self.partition)
   2102       ZipWrite(output_zip,
   2103                '{}.new.dat.br'.format(self.path),
   2104                new_data_name,
   2105                compress_type=zipfile.ZIP_STORED)
   2106     else:
   2107       new_data_name = '{}.new.dat'.format(self.partition)
   2108       ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name)
   2109 
   2110     ZipWrite(output_zip,
   2111              '{}.patch.dat'.format(self.path),
   2112              '{}.patch.dat'.format(self.partition),
   2113              compress_type=zipfile.ZIP_STORED)
   2114 
   2115     if self.partition == "system":
   2116       code = ErrorCode.SYSTEM_UPDATE_FAILURE
   2117     else:
   2118       code = ErrorCode.VENDOR_UPDATE_FAILURE
   2119 
   2120     call = ('block_image_update({device}, '
   2121             'package_extract_file("{partition}.transfer.list"), '
   2122             '"{new_data_name}", "{partition}.patch.dat") ||\n'
   2123             '  abort("E{code}: Failed to update {partition} image.");'.format(
   2124                 device=self.device, partition=self.partition,
   2125                 new_data_name=new_data_name, code=code))
   2126     script.AppendExtra(script.WordWrap(call))
   2127 
   2128   def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
   2129     data = source.ReadRangeSet(ranges)
   2130     ctx = sha1()
   2131 
   2132     for p in data:
   2133       ctx.update(p)
   2134 
   2135     return ctx.hexdigest()
   2136 
   2137   def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
   2138     """Return the hash value for all zero blocks."""
   2139     zero_block = '\x00' * 4096
   2140     ctx = sha1()
   2141     for _ in range(num_blocks):
   2142       ctx.update(zero_block)
   2143 
   2144     return ctx.hexdigest()
   2145 
   2146 
   2147 DataImage = blockimgdiff.DataImage
   2148 EmptyImage = blockimgdiff.EmptyImage
   2149 
   2150 # map recovery.fstab's fs_types to mount/format "partition types"
   2151 PARTITION_TYPES = {
   2152     "ext4": "EMMC",
   2153     "emmc": "EMMC",
   2154     "f2fs": "EMMC",
   2155     "squashfs": "EMMC"
   2156 }
   2157 
   2158 
   2159 def GetTypeAndDevice(mount_point, info):
   2160   fstab = info["fstab"]
   2161   if fstab:
   2162     return (PARTITION_TYPES[fstab[mount_point].fs_type],
   2163             fstab[mount_point].device)
   2164   else:
   2165     raise KeyError
   2166 
   2167 
   2168 def ParseCertificate(data):
   2169   """Parses and converts a PEM-encoded certificate into DER-encoded.
   2170 
   2171   This gives the same result as `openssl x509 -in <filename> -outform DER`.
   2172 
   2173   Returns:
   2174     The decoded certificate string.
   2175   """
   2176   cert_buffer = []
   2177   save = False
   2178   for line in data.split("\n"):
   2179     if "--END CERTIFICATE--" in line:
   2180       break
   2181     if save:
   2182       cert_buffer.append(line)
   2183     if "--BEGIN CERTIFICATE--" in line:
   2184       save = True
   2185   cert = "".join(cert_buffer).decode('base64')
   2186   return cert
   2187 
   2188 
   2189 def ExtractPublicKey(cert):
   2190   """Extracts the public key (PEM-encoded) from the given certificate file.
   2191 
   2192   Args:
   2193     cert: The certificate filename.
   2194 
   2195   Returns:
   2196     The public key string.
   2197 
   2198   Raises:
   2199     AssertionError: On non-zero return from 'openssl'.
   2200   """
   2201   # The behavior with '-out' is different between openssl 1.1 and openssl 1.0.
   2202   # While openssl 1.1 writes the key into the given filename followed by '-out',
   2203   # openssl 1.0 (both of 1.0.1 and 1.0.2) doesn't. So we collect the output from
   2204   # stdout instead.
   2205   cmd = ['openssl', 'x509', '-pubkey', '-noout', '-in', cert]
   2206   proc = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
   2207   pubkey, stderrdata = proc.communicate()
   2208   assert proc.returncode == 0, \
   2209       'Failed to dump public key from certificate: %s\n%s' % (cert, stderrdata)
   2210   return pubkey
   2211 
   2212 
   2213 def ExtractAvbPublicKey(key):
   2214   """Extracts the AVB public key from the given public or private key.
   2215 
   2216   Args:
   2217     key: The input key file, which should be PEM-encoded public or private key.
   2218 
   2219   Returns:
   2220     The path to the extracted AVB public key file.
   2221   """
   2222   output = MakeTempFile(prefix='avb-', suffix='.avbpubkey')
   2223   RunAndCheckOutput(
   2224       ['avbtool', 'extract_public_key', "--key", key, "--output", output])
   2225   return output
   2226 
   2227 
   2228 def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
   2229                       info_dict=None):
   2230   """Generates the recovery-from-boot patch and writes the script to output.
   2231 
   2232   Most of the space in the boot and recovery images is just the kernel, which is
   2233   identical for the two, so the resulting patch should be efficient. Add it to
   2234   the output zip, along with a shell script that is run from init.rc on first
   2235   boot to actually do the patching and install the new recovery image.
   2236 
   2237   Args:
   2238     input_dir: The top-level input directory of the target-files.zip.
   2239     output_sink: The callback function that writes the result.
   2240     recovery_img: File object for the recovery image.
   2241     boot_img: File objects for the boot image.
   2242     info_dict: A dict returned by common.LoadInfoDict() on the input
   2243         target_files. Will use OPTIONS.info_dict if None has been given.
   2244   """
   2245   if info_dict is None:
   2246     info_dict = OPTIONS.info_dict
   2247 
   2248   full_recovery_image = info_dict.get("full_recovery_image") == "true"
   2249 
   2250   if full_recovery_image:
   2251     output_sink("etc/recovery.img", recovery_img.data)
   2252 
   2253   else:
   2254     system_root_image = info_dict.get("system_root_image") == "true"
   2255     path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
   2256     # With system-root-image, boot and recovery images will have mismatching
   2257     # entries (only recovery has the ramdisk entry) (Bug: 72731506). Use bsdiff
   2258     # to handle such a case.
   2259     if system_root_image:
   2260       diff_program = ["bsdiff"]
   2261       bonus_args = ""
   2262       assert not os.path.exists(path)
   2263     else:
   2264       diff_program = ["imgdiff"]
   2265       if os.path.exists(path):
   2266         diff_program.append("-b")
   2267         diff_program.append(path)
   2268         bonus_args = "--bonus /system/etc/recovery-resource.dat"
   2269       else:
   2270         bonus_args = ""
   2271 
   2272     d = Difference(recovery_img, boot_img, diff_program=diff_program)
   2273     _, _, patch = d.ComputePatch()
   2274     output_sink("recovery-from-boot.p", patch)
   2275 
   2276   try:
   2277     # The following GetTypeAndDevice()s need to use the path in the target
   2278     # info_dict instead of source_info_dict.
   2279     boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
   2280     recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
   2281   except KeyError:
   2282     return
   2283 
   2284   if full_recovery_image:
   2285     sh = """#!/system/bin/sh
   2286 if ! applypatch --check %(type)s:%(device)s:%(size)d:%(sha1)s; then
   2287   applypatch \\
   2288           --flash /system/etc/recovery.img \\
   2289           --target %(type)s:%(device)s:%(size)d:%(sha1)s && \\
   2290       log -t recovery "Installing new recovery image: succeeded" || \\
   2291       log -t recovery "Installing new recovery image: failed"
   2292 else
   2293   log -t recovery "Recovery image already installed"
   2294 fi
   2295 """ % {'type': recovery_type,
   2296        'device': recovery_device,
   2297        'sha1': recovery_img.sha1,
   2298        'size': recovery_img.size}
   2299   else:
   2300     sh = """#!/system/bin/sh
   2301 if ! applypatch --check %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
   2302   applypatch %(bonus_args)s \\
   2303           --patch /system/recovery-from-boot.p \\
   2304           --source %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s \\
   2305           --target %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s && \\
   2306       log -t recovery "Installing new recovery image: succeeded" || \\
   2307       log -t recovery "Installing new recovery image: failed"
   2308 else
   2309   log -t recovery "Recovery image already installed"
   2310 fi
   2311 """ % {'boot_size': boot_img.size,
   2312        'boot_sha1': boot_img.sha1,
   2313        'recovery_size': recovery_img.size,
   2314        'recovery_sha1': recovery_img.sha1,
   2315        'boot_type': boot_type,
   2316        'boot_device': boot_device,
   2317        'recovery_type': recovery_type,
   2318        'recovery_device': recovery_device,
   2319        'bonus_args': bonus_args}
   2320 
   2321   # The install script location moved from /system/etc to /system/bin
   2322   # in the L release.
   2323   sh_location = "bin/install-recovery.sh"
   2324 
   2325   logger.info("putting script in %s", sh_location)
   2326 
   2327   output_sink(sh_location, sh)
   2328 
   2329 
   2330 class DynamicPartitionUpdate(object):
   2331   def __init__(self, src_group=None, tgt_group=None, progress=None,
   2332                block_difference=None):
   2333     self.src_group = src_group
   2334     self.tgt_group = tgt_group
   2335     self.progress = progress
   2336     self.block_difference = block_difference
   2337 
   2338   @property
   2339   def src_size(self):
   2340     if not self.block_difference:
   2341       return 0
   2342     return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.src)
   2343 
   2344   @property
   2345   def tgt_size(self):
   2346     if not self.block_difference:
   2347       return 0
   2348     return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.tgt)
   2349 
   2350   @staticmethod
   2351   def _GetSparseImageSize(img):
   2352     if not img:
   2353       return 0
   2354     return img.blocksize * img.total_blocks
   2355 
   2356 
   2357 class DynamicGroupUpdate(object):
   2358   def __init__(self, src_size=None, tgt_size=None):
   2359     # None: group does not exist. 0: no size limits.
   2360     self.src_size = src_size
   2361     self.tgt_size = tgt_size
   2362 
   2363 
   2364 class DynamicPartitionsDifference(object):
   2365   def __init__(self, info_dict, block_diffs, progress_dict=None,
   2366                source_info_dict=None):
   2367     if progress_dict is None:
   2368       progress_dict = dict()
   2369 
   2370     self._remove_all_before_apply = False
   2371     if source_info_dict is None:
   2372       self._remove_all_before_apply = True
   2373       source_info_dict = dict()
   2374 
   2375     block_diff_dict = {e.partition:e for e in block_diffs}
   2376     assert len(block_diff_dict) == len(block_diffs), \
   2377         "Duplicated BlockDifference object for {}".format(
   2378             [partition for partition, count in
   2379              collections.Counter(e.partition for e in block_diffs).items()
   2380              if count > 1])
   2381 
   2382     self._partition_updates = collections.OrderedDict()
   2383 
   2384     for p, block_diff in block_diff_dict.items():
   2385       self._partition_updates[p] = DynamicPartitionUpdate()
   2386       self._partition_updates[p].block_difference = block_diff
   2387 
   2388     for p, progress in progress_dict.items():
   2389       if p in self._partition_updates:
   2390         self._partition_updates[p].progress = progress
   2391 
   2392     tgt_groups = shlex.split(info_dict.get(
   2393         "super_partition_groups", "").strip())
   2394     src_groups = shlex.split(source_info_dict.get(
   2395         "super_partition_groups", "").strip())
   2396 
   2397     for g in tgt_groups:
   2398       for p in shlex.split(info_dict.get(
   2399           "super_%s_partition_list" % g, "").strip()):
   2400         assert p in self._partition_updates, \
   2401             "{} is in target super_{}_partition_list but no BlockDifference " \
   2402             "object is provided.".format(p, g)
   2403         self._partition_updates[p].tgt_group = g
   2404 
   2405     for g in src_groups:
   2406       for p in shlex.split(source_info_dict.get(
   2407           "super_%s_partition_list" % g, "").strip()):
   2408         assert p in self._partition_updates, \
   2409             "{} is in source super_{}_partition_list but no BlockDifference " \
   2410             "object is provided.".format(p, g)
   2411         self._partition_updates[p].src_group = g
   2412 
   2413     target_dynamic_partitions = set(shlex.split(info_dict.get(
   2414         "dynamic_partition_list", "").strip()))
   2415     block_diffs_with_target = set(p for p, u in self._partition_updates.items()
   2416                                   if u.tgt_size)
   2417     assert block_diffs_with_target == target_dynamic_partitions, \
   2418         "Target Dynamic partitions: {}, BlockDifference with target: {}".format(
   2419             list(target_dynamic_partitions), list(block_diffs_with_target))
   2420 
   2421     source_dynamic_partitions = set(shlex.split(source_info_dict.get(
   2422         "dynamic_partition_list", "").strip()))
   2423     block_diffs_with_source = set(p for p, u in self._partition_updates.items()
   2424                                   if u.src_size)
   2425     assert block_diffs_with_source == source_dynamic_partitions, \
   2426         "Source Dynamic partitions: {}, BlockDifference with source: {}".format(
   2427             list(source_dynamic_partitions), list(block_diffs_with_source))
   2428 
   2429     if self._partition_updates:
   2430       logger.info("Updating dynamic partitions %s",
   2431                   self._partition_updates.keys())
   2432 
   2433     self._group_updates = collections.OrderedDict()
   2434 
   2435     for g in tgt_groups:
   2436       self._group_updates[g] = DynamicGroupUpdate()
   2437       self._group_updates[g].tgt_size = int(info_dict.get(
   2438           "super_%s_group_size" % g, "0").strip())
   2439 
   2440     for g in src_groups:
   2441       if g not in self._group_updates:
   2442         self._group_updates[g] = DynamicGroupUpdate()
   2443       self._group_updates[g].src_size = int(source_info_dict.get(
   2444           "super_%s_group_size" % g, "0").strip())
   2445 
   2446     self._Compute()
   2447 
   2448   def WriteScript(self, script, output_zip, write_verify_script=False):
   2449     script.Comment('--- Start patching dynamic partitions ---')
   2450     for p, u in self._partition_updates.items():
   2451       if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
   2452         script.Comment('Patch partition %s' % p)
   2453         u.block_difference.WriteScript(script, output_zip, progress=u.progress,
   2454                                        write_verify_script=False)
   2455 
   2456     op_list_path = MakeTempFile()
   2457     with open(op_list_path, 'w') as f:
   2458       for line in self._op_list:
   2459         f.write('{}\n'.format(line))
   2460 
   2461     ZipWrite(output_zip, op_list_path, "dynamic_partitions_op_list")
   2462 
   2463     script.Comment('Update dynamic partition metadata')
   2464     script.AppendExtra('assert(update_dynamic_partitions('
   2465                        'package_extract_file("dynamic_partitions_op_list")));')
   2466 
   2467     if write_verify_script:
   2468       for p, u in self._partition_updates.items():
   2469         if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
   2470           u.block_difference.WritePostInstallVerifyScript(script)
   2471           script.AppendExtra('unmap_partition("%s");' % p) # ignore errors
   2472 
   2473     for p, u in self._partition_updates.items():
   2474       if u.tgt_size and u.src_size <= u.tgt_size:
   2475         script.Comment('Patch partition %s' % p)
   2476         u.block_difference.WriteScript(script, output_zip, progress=u.progress,
   2477                                        write_verify_script=write_verify_script)
   2478         if write_verify_script:
   2479           script.AppendExtra('unmap_partition("%s");' % p) # ignore errors
   2480 
   2481     script.Comment('--- End patching dynamic partitions ---')
   2482 
   2483   def _Compute(self):
   2484     self._op_list = list()
   2485 
   2486     def append(line):
   2487       self._op_list.append(line)
   2488 
   2489     def comment(line):
   2490       self._op_list.append("# %s" % line)
   2491 
   2492     if self._remove_all_before_apply:
   2493       comment('Remove all existing dynamic partitions and groups before '
   2494               'applying full OTA')
   2495       append('remove_all_groups')
   2496 
   2497     for p, u in self._partition_updates.items():
   2498       if u.src_group and not u.tgt_group:
   2499         append('remove %s' % p)
   2500 
   2501     for p, u in self._partition_updates.items():
   2502       if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
   2503         comment('Move partition %s from %s to default' % (p, u.src_group))
   2504         append('move %s default' % p)
   2505 
   2506     for p, u in self._partition_updates.items():
   2507       if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
   2508         comment('Shrink partition %s from %d to %d' %
   2509                 (p, u.src_size, u.tgt_size))
   2510         append('resize %s %s' % (p, u.tgt_size))
   2511 
   2512     for g, u in self._group_updates.items():
   2513       if u.src_size is not None and u.tgt_size is None:
   2514         append('remove_group %s' % g)
   2515       if (u.src_size is not None and u.tgt_size is not None and
   2516           u.src_size > u.tgt_size):
   2517         comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
   2518         append('resize_group %s %d' % (g, u.tgt_size))
   2519 
   2520     for g, u in self._group_updates.items():
   2521       if u.src_size is None and u.tgt_size is not None:
   2522         comment('Add group %s with maximum size %d' % (g, u.tgt_size))
   2523         append('add_group %s %d' % (g, u.tgt_size))
   2524       if (u.src_size is not None and u.tgt_size is not None and
   2525           u.src_size < u.tgt_size):
   2526         comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
   2527         append('resize_group %s %d' % (g, u.tgt_size))
   2528 
   2529     for p, u in self._partition_updates.items():
   2530       if u.tgt_group and not u.src_group:
   2531         comment('Add partition %s to group %s' % (p, u.tgt_group))
   2532         append('add %s %s' % (p, u.tgt_group))
   2533 
   2534     for p, u in self._partition_updates.items():
   2535       if u.tgt_size and u.src_size < u.tgt_size:
   2536         comment('Grow partition %s from %d to %d' % (p, u.src_size, u.tgt_size))
   2537         append('resize %s %d' % (p, u.tgt_size))
   2538 
   2539     for p, u in self._partition_updates.items():
   2540       if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
   2541         comment('Move partition %s from default to %s' %
   2542                 (p, u.tgt_group))
   2543         append('move %s %s' % (p, u.tgt_group))
   2544