Home | History | Annotate | Download | only in releasetools
      1 # Copyright (C) 2008 The Android Open Source Project
      2 #
      3 # Licensed under the Apache License, Version 2.0 (the "License");
      4 # you may not use this file except in compliance with the License.
      5 # You may obtain a copy of the License at
      6 #
      7 #      http://www.apache.org/licenses/LICENSE-2.0
      8 #
      9 # Unless required by applicable law or agreed to in writing, software
     10 # distributed under the License is distributed on an "AS IS" BASIS,
     11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 # See the License for the specific language governing permissions and
     13 # limitations under the License.
     14 
     15 from __future__ import print_function
     16 
     17 import copy
     18 import errno
     19 import getopt
     20 import getpass
     21 import imp
     22 import os
     23 import platform
     24 import re
     25 import shlex
     26 import shutil
     27 import subprocess
     28 import sys
     29 import tempfile
     30 import threading
     31 import time
     32 import zipfile
     33 
     34 import blockimgdiff
     35 
     36 from hashlib import sha1 as sha1
     37 
     38 
     39 class Options(object):
     40   def __init__(self):
     41     platform_search_path = {
     42         "linux2": "out/host/linux-x86",
     43         "darwin": "out/host/darwin-x86",
     44     }
     45 
     46     self.search_path = platform_search_path.get(sys.platform, None)
     47     self.signapk_path = "framework/signapk.jar"  # Relative to search_path
     48     self.signapk_shared_library_path = "lib64"   # Relative to search_path
     49     self.extra_signapk_args = []
     50     self.java_path = "java"  # Use the one on the path by default.
     51     self.java_args = ["-Xmx2048m"]  # The default JVM args.
     52     self.public_key_suffix = ".x509.pem"
     53     self.private_key_suffix = ".pk8"
     54     # use otatools built boot_signer by default
     55     self.boot_signer_path = "boot_signer"
     56     self.boot_signer_args = []
     57     self.verity_signer_path = None
     58     self.verity_signer_args = []
     59     self.verbose = False
     60     self.tempfiles = []
     61     self.device_specific = None
     62     self.extras = {}
     63     self.info_dict = None
     64     self.source_info_dict = None
     65     self.target_info_dict = None
     66     self.worker_threads = None
     67     # Stash size cannot exceed cache_size * threshold.
     68     self.cache_size = None
     69     self.stash_threshold = 0.8
     70 
     71 
     72 OPTIONS = Options()
     73 
     74 
     75 # Values for "certificate" in apkcerts that mean special things.
     76 SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
     77 
     78 class ErrorCode(object):
     79   """Define error_codes for failures that happen during the actual
     80   update package installation.
     81 
     82   Error codes 0-999 are reserved for failures before the package
     83   installation (i.e. low battery, package verification failure).
     84   Detailed code in 'bootable/recovery/error_code.h' """
     85 
     86   SYSTEM_VERIFICATION_FAILURE = 1000
     87   SYSTEM_UPDATE_FAILURE = 1001
     88   SYSTEM_UNEXPECTED_CONTENTS = 1002
     89   SYSTEM_NONZERO_CONTENTS = 1003
     90   SYSTEM_RECOVER_FAILURE = 1004
     91   VENDOR_VERIFICATION_FAILURE = 2000
     92   VENDOR_UPDATE_FAILURE = 2001
     93   VENDOR_UNEXPECTED_CONTENTS = 2002
     94   VENDOR_NONZERO_CONTENTS = 2003
     95   VENDOR_RECOVER_FAILURE = 2004
     96   OEM_PROP_MISMATCH = 3000
     97   FINGERPRINT_MISMATCH = 3001
     98   THUMBPRINT_MISMATCH = 3002
     99   OLDER_BUILD = 3003
    100   DEVICE_MISMATCH = 3004
    101   BAD_PATCH_FILE = 3005
    102   INSUFFICIENT_CACHE_SPACE = 3006
    103   TUNE_PARTITION_FAILURE = 3007
    104   APPLY_PATCH_FAILURE = 3008
    105 
    106 class ExternalError(RuntimeError):
    107   pass
    108 
    109 
    110 def Run(args, **kwargs):
    111   """Create and return a subprocess.Popen object, printing the command
    112   line on the terminal if -v was specified."""
    113   if OPTIONS.verbose:
    114     print("  running: ", " ".join(args))
    115   return subprocess.Popen(args, **kwargs)
    116 
    117 
    118 def CloseInheritedPipes():
    119   """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
    120   before doing other work."""
    121   if platform.system() != "Darwin":
    122     return
    123   for d in range(3, 1025):
    124     try:
    125       stat = os.fstat(d)
    126       if stat is not None:
    127         pipebit = stat[0] & 0x1000
    128         if pipebit != 0:
    129           os.close(d)
    130     except OSError:
    131       pass
    132 
    133 
    134 def LoadInfoDict(input_file, input_dir=None):
    135   """Read and parse the META/misc_info.txt key/value pairs from the
    136   input target files and return a dict."""
    137 
    138   def read_helper(fn):
    139     if isinstance(input_file, zipfile.ZipFile):
    140       return input_file.read(fn)
    141     else:
    142       path = os.path.join(input_file, *fn.split("/"))
    143       try:
    144         with open(path) as f:
    145           return f.read()
    146       except IOError as e:
    147         if e.errno == errno.ENOENT:
    148           raise KeyError(fn)
    149 
    150   try:
    151     d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
    152   except KeyError:
    153     raise ValueError("can't find META/misc_info.txt in input target-files")
    154 
    155   assert "recovery_api_version" in d
    156   assert "fstab_version" in d
    157 
    158   # A few properties are stored as links to the files in the out/ directory.
    159   # It works fine with the build system. However, they are no longer available
    160   # when (re)generating from target_files zip. If input_dir is not None, we
    161   # are doing repacking. Redirect those properties to the actual files in the
    162   # unzipped directory.
    163   if input_dir is not None:
    164     # We carry a copy of file_contexts.bin under META/. If not available,
    165     # search BOOT/RAMDISK/. Note that sometimes we may need a different file
    166     # to build images than the one running on device, such as when enabling
    167     # system_root_image. In that case, we must have the one for image
    168     # generation copied to META/.
    169     fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts"))
    170     fc_config = os.path.join(input_dir, "META", fc_basename)
    171     if d.get("system_root_image") == "true":
    172       assert os.path.exists(fc_config)
    173     if not os.path.exists(fc_config):
    174       fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", fc_basename)
    175       if not os.path.exists(fc_config):
    176         fc_config = None
    177 
    178     if fc_config:
    179       d["selinux_fc"] = fc_config
    180 
    181     # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
    182     if d.get("system_root_image") == "true":
    183       d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
    184       d["ramdisk_fs_config"] = os.path.join(
    185           input_dir, "META", "root_filesystem_config.txt")
    186 
    187     # Redirect {system,vendor}_base_fs_file.
    188     if "system_base_fs_file" in d:
    189       basename = os.path.basename(d["system_base_fs_file"])
    190       system_base_fs_file = os.path.join(input_dir, "META", basename)
    191       if os.path.exists(system_base_fs_file):
    192         d["system_base_fs_file"] = system_base_fs_file
    193       else:
    194         print("Warning: failed to find system base fs file: %s" % (
    195             system_base_fs_file,))
    196         del d["system_base_fs_file"]
    197 
    198     if "vendor_base_fs_file" in d:
    199       basename = os.path.basename(d["vendor_base_fs_file"])
    200       vendor_base_fs_file = os.path.join(input_dir, "META", basename)
    201       if os.path.exists(vendor_base_fs_file):
    202         d["vendor_base_fs_file"] = vendor_base_fs_file
    203       else:
    204         print("Warning: failed to find vendor base fs file: %s" % (
    205             vendor_base_fs_file,))
    206         del d["vendor_base_fs_file"]
    207 
    208   try:
    209     data = read_helper("META/imagesizes.txt")
    210     for line in data.split("\n"):
    211       if not line:
    212         continue
    213       name, value = line.split(" ", 1)
    214       if not value:
    215         continue
    216       if name == "blocksize":
    217         d[name] = value
    218       else:
    219         d[name + "_size"] = value
    220   except KeyError:
    221     pass
    222 
    223   def makeint(key):
    224     if key in d:
    225       d[key] = int(d[key], 0)
    226 
    227   makeint("recovery_api_version")
    228   makeint("blocksize")
    229   makeint("system_size")
    230   makeint("vendor_size")
    231   makeint("userdata_size")
    232   makeint("cache_size")
    233   makeint("recovery_size")
    234   makeint("boot_size")
    235   makeint("fstab_version")
    236 
    237   system_root_image = d.get("system_root_image", None) == "true"
    238   if d.get("no_recovery", None) != "true":
    239     recovery_fstab_path = "RECOVERY/RAMDISK/etc/recovery.fstab"
    240     d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
    241         recovery_fstab_path, system_root_image)
    242   elif d.get("recovery_as_boot", None) == "true":
    243     recovery_fstab_path = "BOOT/RAMDISK/etc/recovery.fstab"
    244     d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
    245         recovery_fstab_path, system_root_image)
    246   else:
    247     d["fstab"] = None
    248 
    249   d["build.prop"] = LoadBuildProp(read_helper)
    250   return d
    251 
    252 
    253 def LoadBuildProp(read_helper):
    254   try:
    255     data = read_helper("SYSTEM/build.prop")
    256   except KeyError:
    257     print("Warning: could not find SYSTEM/build.prop in %s" % (zip,))
    258     data = ""
    259   return LoadDictionaryFromLines(data.split("\n"))
    260 
    261 
    262 def LoadDictionaryFromLines(lines):
    263   d = {}
    264   for line in lines:
    265     line = line.strip()
    266     if not line or line.startswith("#"):
    267       continue
    268     if "=" in line:
    269       name, value = line.split("=", 1)
    270       d[name] = value
    271   return d
    272 
    273 
    274 def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path,
    275                       system_root_image=False):
    276   class Partition(object):
    277     def __init__(self, mount_point, fs_type, device, length, context):
    278       self.mount_point = mount_point
    279       self.fs_type = fs_type
    280       self.device = device
    281       self.length = length
    282       self.context = context
    283 
    284   try:
    285     data = read_helper(recovery_fstab_path)
    286   except KeyError:
    287     print("Warning: could not find {}".format(recovery_fstab_path))
    288     data = ""
    289 
    290   assert fstab_version == 2
    291 
    292   d = {}
    293   for line in data.split("\n"):
    294     line = line.strip()
    295     if not line or line.startswith("#"):
    296       continue
    297 
    298     # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
    299     pieces = line.split()
    300     if len(pieces) != 5:
    301       raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
    302 
    303     # Ignore entries that are managed by vold.
    304     options = pieces[4]
    305     if "voldmanaged=" in options:
    306       continue
    307 
    308     # It's a good line, parse it.
    309     length = 0
    310     options = options.split(",")
    311     for i in options:
    312       if i.startswith("length="):
    313         length = int(i[7:])
    314       else:
    315         # Ignore all unknown options in the unified fstab.
    316         continue
    317 
    318     mount_flags = pieces[3]
    319     # Honor the SELinux context if present.
    320     context = None
    321     for i in mount_flags.split(","):
    322       if i.startswith("context="):
    323         context = i
    324 
    325     mount_point = pieces[1]
    326     d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
    327                                device=pieces[0], length=length, context=context)
    328 
    329   # / is used for the system mount point when the root directory is included in
    330   # system. Other areas assume system is always at "/system" so point /system
    331   # at /.
    332   if system_root_image:
    333     assert not d.has_key("/system") and d.has_key("/")
    334     d["/system"] = d["/"]
    335   return d
    336 
    337 
    338 def DumpInfoDict(d):
    339   for k, v in sorted(d.items()):
    340     print("%-25s = (%s) %s" % (k, type(v).__name__, v))
    341 
    342 
    343 def AppendAVBSigningArgs(cmd):
    344   """Append signing arguments for avbtool."""
    345   keypath = OPTIONS.info_dict.get("board_avb_key_path", None)
    346   algorithm = OPTIONS.info_dict.get("board_avb_algorithm", None)
    347   if not keypath or not algorithm:
    348     algorithm = "SHA256_RSA4096"
    349     keypath = "external/avb/test/data/testkey_rsa4096.pem"
    350   cmd.extend(["--key", keypath, "--algorithm", algorithm])
    351 
    352 
    353 def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
    354                         has_ramdisk=False, two_step_image=False):
    355   """Build a bootable image from the specified sourcedir.
    356 
    357   Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
    358   'sourcedir'), and turn them into a boot image. 'two_step_image' indicates if
    359   we are building a two-step special image (i.e. building a recovery image to
    360   be loaded into /boot in two-step OTAs).
    361 
    362   Return the image data, or None if sourcedir does not appear to contains files
    363   for building the requested image.
    364   """
    365 
    366   def make_ramdisk():
    367     ramdisk_img = tempfile.NamedTemporaryFile()
    368 
    369     if os.access(fs_config_file, os.F_OK):
    370       cmd = ["mkbootfs", "-f", fs_config_file,
    371              os.path.join(sourcedir, "RAMDISK")]
    372     else:
    373       cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
    374     p1 = Run(cmd, stdout=subprocess.PIPE)
    375     p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
    376 
    377     p2.wait()
    378     p1.wait()
    379     assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
    380     assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
    381 
    382     return ramdisk_img
    383 
    384   if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
    385     return None
    386 
    387   if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
    388     return None
    389 
    390   if info_dict is None:
    391     info_dict = OPTIONS.info_dict
    392 
    393   img = tempfile.NamedTemporaryFile()
    394 
    395   if has_ramdisk:
    396     ramdisk_img = make_ramdisk()
    397 
    398   # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
    399   mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
    400 
    401   cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
    402 
    403   fn = os.path.join(sourcedir, "second")
    404   if os.access(fn, os.F_OK):
    405     cmd.append("--second")
    406     cmd.append(fn)
    407 
    408   fn = os.path.join(sourcedir, "cmdline")
    409   if os.access(fn, os.F_OK):
    410     cmd.append("--cmdline")
    411     cmd.append(open(fn).read().rstrip("\n"))
    412 
    413   fn = os.path.join(sourcedir, "base")
    414   if os.access(fn, os.F_OK):
    415     cmd.append("--base")
    416     cmd.append(open(fn).read().rstrip("\n"))
    417 
    418   fn = os.path.join(sourcedir, "pagesize")
    419   if os.access(fn, os.F_OK):
    420     cmd.append("--pagesize")
    421     cmd.append(open(fn).read().rstrip("\n"))
    422 
    423   args = info_dict.get("mkbootimg_args", None)
    424   if args and args.strip():
    425     cmd.extend(shlex.split(args))
    426 
    427   args = info_dict.get("mkbootimg_version_args", None)
    428   if args and args.strip():
    429     cmd.extend(shlex.split(args))
    430 
    431   if has_ramdisk:
    432     cmd.extend(["--ramdisk", ramdisk_img.name])
    433 
    434   img_unsigned = None
    435   if info_dict.get("vboot", None):
    436     img_unsigned = tempfile.NamedTemporaryFile()
    437     cmd.extend(["--output", img_unsigned.name])
    438   else:
    439     cmd.extend(["--output", img.name])
    440 
    441   p = Run(cmd, stdout=subprocess.PIPE)
    442   p.communicate()
    443   assert p.returncode == 0, "mkbootimg of %s image failed" % (
    444       os.path.basename(sourcedir),)
    445 
    446   if (info_dict.get("boot_signer", None) == "true" and
    447       info_dict.get("verity_key", None)):
    448     # Hard-code the path as "/boot" for two-step special recovery image (which
    449     # will be loaded into /boot during the two-step OTA).
    450     if two_step_image:
    451       path = "/boot"
    452     else:
    453       path = "/" + os.path.basename(sourcedir).lower()
    454     cmd = [OPTIONS.boot_signer_path]
    455     cmd.extend(OPTIONS.boot_signer_args)
    456     cmd.extend([path, img.name,
    457                 info_dict["verity_key"] + ".pk8",
    458                 info_dict["verity_key"] + ".x509.pem", img.name])
    459     p = Run(cmd, stdout=subprocess.PIPE)
    460     p.communicate()
    461     assert p.returncode == 0, "boot_signer of %s image failed" % path
    462 
    463   # Sign the image if vboot is non-empty.
    464   elif info_dict.get("vboot", None):
    465     path = "/" + os.path.basename(sourcedir).lower()
    466     img_keyblock = tempfile.NamedTemporaryFile()
    467     # We have switched from the prebuilt futility binary to using the tool
    468     # (futility-host) built from the source. Override the setting in the old
    469     # TF.zip.
    470     futility = info_dict["futility"]
    471     if futility.startswith("prebuilts/"):
    472       futility = "futility-host"
    473     cmd = [info_dict["vboot_signer_cmd"], futility,
    474            img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
    475            info_dict["vboot_key"] + ".vbprivk",
    476            info_dict["vboot_subkey"] + ".vbprivk",
    477            img_keyblock.name,
    478            img.name]
    479     p = Run(cmd, stdout=subprocess.PIPE)
    480     p.communicate()
    481     assert p.returncode == 0, "vboot_signer of %s image failed" % path
    482 
    483     # Clean up the temp files.
    484     img_unsigned.close()
    485     img_keyblock.close()
    486 
    487   # AVB: if enabled, calculate and add hash to boot.img.
    488   if info_dict.get("board_avb_enable", None) == "true":
    489     avbtool = os.getenv('AVBTOOL') or "avbtool"
    490     part_size = info_dict.get("boot_size", None)
    491     cmd = [avbtool, "add_hash_footer", "--image", img.name,
    492            "--partition_size", str(part_size), "--partition_name", "boot"]
    493     AppendAVBSigningArgs(cmd)
    494     args = info_dict.get("board_avb_boot_add_hash_footer_args", None)
    495     if args and args.strip():
    496       cmd.extend(shlex.split(args))
    497     p = Run(cmd, stdout=subprocess.PIPE)
    498     p.communicate()
    499     assert p.returncode == 0, "avbtool add_hash_footer of %s failed" % (
    500         os.path.basename(OPTIONS.input_tmp))
    501 
    502   img.seek(os.SEEK_SET, 0)
    503   data = img.read()
    504 
    505   if has_ramdisk:
    506     ramdisk_img.close()
    507   img.close()
    508 
    509   return data
    510 
    511 
    512 def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
    513                      info_dict=None, two_step_image=False):
    514   """Return a File object with the desired bootable image.
    515 
    516   Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
    517   otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
    518   the source files in 'unpack_dir'/'tree_subdir'."""
    519 
    520   prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
    521   if os.path.exists(prebuilt_path):
    522     print("using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,))
    523     return File.FromLocalFile(name, prebuilt_path)
    524 
    525   prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
    526   if os.path.exists(prebuilt_path):
    527     print("using prebuilt %s from IMAGES..." % (prebuilt_name,))
    528     return File.FromLocalFile(name, prebuilt_path)
    529 
    530   print("building image from target_files %s..." % (tree_subdir,))
    531 
    532   if info_dict is None:
    533     info_dict = OPTIONS.info_dict
    534 
    535   # With system_root_image == "true", we don't pack ramdisk into the boot image.
    536   # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
    537   # for recovery.
    538   has_ramdisk = (info_dict.get("system_root_image") != "true" or
    539                  prebuilt_name != "boot.img" or
    540                  info_dict.get("recovery_as_boot") == "true")
    541 
    542   fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
    543   data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
    544                              os.path.join(unpack_dir, fs_config),
    545                              info_dict, has_ramdisk, two_step_image)
    546   if data:
    547     return File(name, data)
    548   return None
    549 
    550 
    551 def UnzipTemp(filename, pattern=None):
    552   """Unzip the given archive into a temporary directory and return the name.
    553 
    554   If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
    555   temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
    556 
    557   Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
    558   main file), open for reading.
    559   """
    560 
    561   tmp = tempfile.mkdtemp(prefix="targetfiles-")
    562   OPTIONS.tempfiles.append(tmp)
    563 
    564   def unzip_to_dir(filename, dirname):
    565     cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
    566     if pattern is not None:
    567       cmd.extend(pattern)
    568     p = Run(cmd, stdout=subprocess.PIPE)
    569     p.communicate()
    570     if p.returncode != 0:
    571       raise ExternalError("failed to unzip input target-files \"%s\"" %
    572                           (filename,))
    573 
    574   m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
    575   if m:
    576     unzip_to_dir(m.group(1), tmp)
    577     unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
    578     filename = m.group(1)
    579   else:
    580     unzip_to_dir(filename, tmp)
    581 
    582   return tmp, zipfile.ZipFile(filename, "r")
    583 
    584 
    585 def GetKeyPasswords(keylist):
    586   """Given a list of keys, prompt the user to enter passwords for
    587   those which require them.  Return a {key: password} dict.  password
    588   will be None if the key has no password."""
    589 
    590   no_passwords = []
    591   need_passwords = []
    592   key_passwords = {}
    593   devnull = open("/dev/null", "w+b")
    594   for k in sorted(keylist):
    595     # We don't need a password for things that aren't really keys.
    596     if k in SPECIAL_CERT_STRINGS:
    597       no_passwords.append(k)
    598       continue
    599 
    600     p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
    601              "-inform", "DER", "-nocrypt"],
    602             stdin=devnull.fileno(),
    603             stdout=devnull.fileno(),
    604             stderr=subprocess.STDOUT)
    605     p.communicate()
    606     if p.returncode == 0:
    607       # Definitely an unencrypted key.
    608       no_passwords.append(k)
    609     else:
    610       p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
    611                "-inform", "DER", "-passin", "pass:"],
    612               stdin=devnull.fileno(),
    613               stdout=devnull.fileno(),
    614               stderr=subprocess.PIPE)
    615       _, stderr = p.communicate()
    616       if p.returncode == 0:
    617         # Encrypted key with empty string as password.
    618         key_passwords[k] = ''
    619       elif stderr.startswith('Error decrypting key'):
    620         # Definitely encrypted key.
    621         # It would have said "Error reading key" if it didn't parse correctly.
    622         need_passwords.append(k)
    623       else:
    624         # Potentially, a type of key that openssl doesn't understand.
    625         # We'll let the routines in signapk.jar handle it.
    626         no_passwords.append(k)
    627   devnull.close()
    628 
    629   key_passwords.update(PasswordManager().GetPasswords(need_passwords))
    630   key_passwords.update(dict.fromkeys(no_passwords, None))
    631   return key_passwords
    632 
    633 
    634 def GetMinSdkVersion(apk_name):
    635   """Get the minSdkVersion delared in the APK. This can be both a decimal number
    636   (API Level) or a codename.
    637   """
    638 
    639   p = Run(["aapt", "dump", "badging", apk_name], stdout=subprocess.PIPE)
    640   output, err = p.communicate()
    641   if err:
    642     raise ExternalError("Failed to obtain minSdkVersion: aapt return code %s"
    643         % (p.returncode,))
    644 
    645   for line in output.split("\n"):
    646     # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'
    647     m = re.match(r'sdkVersion:\'([^\']*)\'', line)
    648     if m:
    649       return m.group(1)
    650   raise ExternalError("No minSdkVersion returned by aapt")
    651 
    652 
    653 def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
    654   """Get the minSdkVersion declared in the APK as a number (API Level). If
    655   minSdkVersion is set to a codename, it is translated to a number using the
    656   provided map.
    657   """
    658 
    659   version = GetMinSdkVersion(apk_name)
    660   try:
    661     return int(version)
    662   except ValueError:
    663     # Not a decimal number. Codename?
    664     if version in codename_to_api_level_map:
    665       return codename_to_api_level_map[version]
    666     else:
    667       raise ExternalError("Unknown minSdkVersion: '%s'. Known codenames: %s"
    668                           % (version, codename_to_api_level_map))
    669 
    670 
    671 def SignFile(input_name, output_name, key, password, min_api_level=None,
    672     codename_to_api_level_map=dict(),
    673     whole_file=False):
    674   """Sign the input_name zip/jar/apk, producing output_name.  Use the
    675   given key and password (the latter may be None if the key does not
    676   have a password.
    677 
    678   If whole_file is true, use the "-w" option to SignApk to embed a
    679   signature that covers the whole file in the archive comment of the
    680   zip file.
    681 
    682   min_api_level is the API Level (int) of the oldest platform this file may end
    683   up on. If not specified for an APK, the API Level is obtained by interpreting
    684   the minSdkVersion attribute of the APK's AndroidManifest.xml.
    685 
    686   codename_to_api_level_map is needed to translate the codename which may be
    687   encountered as the APK's minSdkVersion.
    688   """
    689 
    690   java_library_path = os.path.join(
    691       OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
    692 
    693   cmd = ([OPTIONS.java_path] + OPTIONS.java_args +
    694          ["-Djava.library.path=" + java_library_path,
    695           "-jar", os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)] +
    696          OPTIONS.extra_signapk_args)
    697   if whole_file:
    698     cmd.append("-w")
    699 
    700   min_sdk_version = min_api_level
    701   if min_sdk_version is None:
    702     if not whole_file:
    703       min_sdk_version = GetMinSdkVersionInt(
    704           input_name, codename_to_api_level_map)
    705   if min_sdk_version is not None:
    706     cmd.extend(["--min-sdk-version", str(min_sdk_version)])
    707 
    708   cmd.extend([key + OPTIONS.public_key_suffix,
    709               key + OPTIONS.private_key_suffix,
    710               input_name, output_name])
    711 
    712   p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
    713   if password is not None:
    714     password += "\n"
    715   p.communicate(password)
    716   if p.returncode != 0:
    717     raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
    718 
    719 
    720 def CheckSize(data, target, info_dict):
    721   """Check the data string passed against the max size limit, if
    722   any, for the given target.  Raise exception if the data is too big.
    723   Print a warning if the data is nearing the maximum size."""
    724 
    725   if target.endswith(".img"):
    726     target = target[:-4]
    727   mount_point = "/" + target
    728 
    729   fs_type = None
    730   limit = None
    731   if info_dict["fstab"]:
    732     if mount_point == "/userdata":
    733       mount_point = "/data"
    734     p = info_dict["fstab"][mount_point]
    735     fs_type = p.fs_type
    736     device = p.device
    737     if "/" in device:
    738       device = device[device.rfind("/")+1:]
    739     limit = info_dict.get(device + "_size", None)
    740   if not fs_type or not limit:
    741     return
    742 
    743   size = len(data)
    744   pct = float(size) * 100.0 / limit
    745   msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
    746   if pct >= 99.0:
    747     raise ExternalError(msg)
    748   elif pct >= 95.0:
    749     print("\n  WARNING: %s\n" % (msg,))
    750   elif OPTIONS.verbose:
    751     print("  ", msg)
    752 
    753 
    754 def ReadApkCerts(tf_zip):
    755   """Given a target_files ZipFile, parse the META/apkcerts.txt file
    756   and return a {package: cert} dict."""
    757   certmap = {}
    758   for line in tf_zip.read("META/apkcerts.txt").split("\n"):
    759     line = line.strip()
    760     if not line:
    761       continue
    762     m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
    763                  r'private_key="(.*)"$', line)
    764     if m:
    765       name, cert, privkey = m.groups()
    766       public_key_suffix_len = len(OPTIONS.public_key_suffix)
    767       private_key_suffix_len = len(OPTIONS.private_key_suffix)
    768       if cert in SPECIAL_CERT_STRINGS and not privkey:
    769         certmap[name] = cert
    770       elif (cert.endswith(OPTIONS.public_key_suffix) and
    771             privkey.endswith(OPTIONS.private_key_suffix) and
    772             cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
    773         certmap[name] = cert[:-public_key_suffix_len]
    774       else:
    775         raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
    776   return certmap
    777 
    778 
    779 COMMON_DOCSTRING = """
    780   -p  (--path)  <dir>
    781       Prepend <dir>/bin to the list of places to search for binaries
    782       run by this script, and expect to find jars in <dir>/framework.
    783 
    784   -s  (--device_specific) <file>
    785       Path to the python module containing device-specific
    786       releasetools code.
    787 
    788   -x  (--extra)  <key=value>
    789       Add a key/value pair to the 'extras' dict, which device-specific
    790       extension code may look at.
    791 
    792   -v  (--verbose)
    793       Show command lines being executed.
    794 
    795   -h  (--help)
    796       Display this usage message and exit.
    797 """
    798 
    799 def Usage(docstring):
    800   print(docstring.rstrip("\n"))
    801   print(COMMON_DOCSTRING)
    802 
    803 
    804 def ParseOptions(argv,
    805                  docstring,
    806                  extra_opts="", extra_long_opts=(),
    807                  extra_option_handler=None):
    808   """Parse the options in argv and return any arguments that aren't
    809   flags.  docstring is the calling module's docstring, to be displayed
    810   for errors and -h.  extra_opts and extra_long_opts are for flags
    811   defined by the caller, which are processed by passing them to
    812   extra_option_handler."""
    813 
    814   try:
    815     opts, args = getopt.getopt(
    816         argv, "hvp:s:x:" + extra_opts,
    817         ["help", "verbose", "path=", "signapk_path=",
    818          "signapk_shared_library_path=", "extra_signapk_args=",
    819          "java_path=", "java_args=", "public_key_suffix=",
    820          "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
    821          "verity_signer_path=", "verity_signer_args=", "device_specific=",
    822          "extra="] +
    823         list(extra_long_opts))
    824   except getopt.GetoptError as err:
    825     Usage(docstring)
    826     print("**", str(err), "**")
    827     sys.exit(2)
    828 
    829   for o, a in opts:
    830     if o in ("-h", "--help"):
    831       Usage(docstring)
    832       sys.exit()
    833     elif o in ("-v", "--verbose"):
    834       OPTIONS.verbose = True
    835     elif o in ("-p", "--path"):
    836       OPTIONS.search_path = a
    837     elif o in ("--signapk_path",):
    838       OPTIONS.signapk_path = a
    839     elif o in ("--signapk_shared_library_path",):
    840       OPTIONS.signapk_shared_library_path = a
    841     elif o in ("--extra_signapk_args",):
    842       OPTIONS.extra_signapk_args = shlex.split(a)
    843     elif o in ("--java_path",):
    844       OPTIONS.java_path = a
    845     elif o in ("--java_args",):
    846       OPTIONS.java_args = shlex.split(a)
    847     elif o in ("--public_key_suffix",):
    848       OPTIONS.public_key_suffix = a
    849     elif o in ("--private_key_suffix",):
    850       OPTIONS.private_key_suffix = a
    851     elif o in ("--boot_signer_path",):
    852       OPTIONS.boot_signer_path = a
    853     elif o in ("--boot_signer_args",):
    854       OPTIONS.boot_signer_args = shlex.split(a)
    855     elif o in ("--verity_signer_path",):
    856       OPTIONS.verity_signer_path = a
    857     elif o in ("--verity_signer_args",):
    858       OPTIONS.verity_signer_args = shlex.split(a)
    859     elif o in ("-s", "--device_specific"):
    860       OPTIONS.device_specific = a
    861     elif o in ("-x", "--extra"):
    862       key, value = a.split("=", 1)
    863       OPTIONS.extras[key] = value
    864     else:
    865       if extra_option_handler is None or not extra_option_handler(o, a):
    866         assert False, "unknown option \"%s\"" % (o,)
    867 
    868   if OPTIONS.search_path:
    869     os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
    870                           os.pathsep + os.environ["PATH"])
    871 
    872   return args
    873 
    874 
    875 def MakeTempFile(prefix='tmp', suffix=''):
    876   """Make a temp file and add it to the list of things to be deleted
    877   when Cleanup() is called.  Return the filename."""
    878   fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
    879   os.close(fd)
    880   OPTIONS.tempfiles.append(fn)
    881   return fn
    882 
    883 
    884 def Cleanup():
    885   for i in OPTIONS.tempfiles:
    886     if os.path.isdir(i):
    887       shutil.rmtree(i)
    888     else:
    889       os.remove(i)
    890 
    891 
    892 class PasswordManager(object):
    893   def __init__(self):
    894     self.editor = os.getenv("EDITOR", None)
    895     self.pwfile = os.getenv("ANDROID_PW_FILE", None)
    896 
    897   def GetPasswords(self, items):
    898     """Get passwords corresponding to each string in 'items',
    899     returning a dict.  (The dict may have keys in addition to the
    900     values in 'items'.)
    901 
    902     Uses the passwords in $ANDROID_PW_FILE if available, letting the
    903     user edit that file to add more needed passwords.  If no editor is
    904     available, or $ANDROID_PW_FILE isn't define, prompts the user
    905     interactively in the ordinary way.
    906     """
    907 
    908     current = self.ReadFile()
    909 
    910     first = True
    911     while True:
    912       missing = []
    913       for i in items:
    914         if i not in current or not current[i]:
    915           missing.append(i)
    916       # Are all the passwords already in the file?
    917       if not missing:
    918         return current
    919 
    920       for i in missing:
    921         current[i] = ""
    922 
    923       if not first:
    924         print("key file %s still missing some passwords." % (self.pwfile,))
    925         answer = raw_input("try to edit again? [y]> ").strip()
    926         if answer and answer[0] not in 'yY':
    927           raise RuntimeError("key passwords unavailable")
    928       first = False
    929 
    930       current = self.UpdateAndReadFile(current)
    931 
    932   def PromptResult(self, current): # pylint: disable=no-self-use
    933     """Prompt the user to enter a value (password) for each key in
    934     'current' whose value is fales.  Returns a new dict with all the
    935     values.
    936     """
    937     result = {}
    938     for k, v in sorted(current.iteritems()):
    939       if v:
    940         result[k] = v
    941       else:
    942         while True:
    943           result[k] = getpass.getpass(
    944               "Enter password for %s key> " % k).strip()
    945           if result[k]:
    946             break
    947     return result
    948 
    949   def UpdateAndReadFile(self, current):
    950     if not self.editor or not self.pwfile:
    951       return self.PromptResult(current)
    952 
    953     f = open(self.pwfile, "w")
    954     os.chmod(self.pwfile, 0o600)
    955     f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
    956     f.write("# (Additional spaces are harmless.)\n\n")
    957 
    958     first_line = None
    959     sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
    960     for i, (_, k, v) in enumerate(sorted_list):
    961       f.write("[[[  %s  ]]] %s\n" % (v, k))
    962       if not v and first_line is None:
    963         # position cursor on first line with no password.
    964         first_line = i + 4
    965     f.close()
    966 
    967     p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
    968     _, _ = p.communicate()
    969 
    970     return self.ReadFile()
    971 
    972   def ReadFile(self):
    973     result = {}
    974     if self.pwfile is None:
    975       return result
    976     try:
    977       f = open(self.pwfile, "r")
    978       for line in f:
    979         line = line.strip()
    980         if not line or line[0] == '#':
    981           continue
    982         m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
    983         if not m:
    984           print("failed to parse password file: ", line)
    985         else:
    986           result[m.group(2)] = m.group(1)
    987       f.close()
    988     except IOError as e:
    989       if e.errno != errno.ENOENT:
    990         print("error reading password file: ", str(e))
    991     return result
    992 
    993 
    994 def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
    995              compress_type=None):
    996   import datetime
    997 
    998   # http://b/18015246
    999   # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
   1000   # for files larger than 2GiB. We can work around this by adjusting their
   1001   # limit. Note that `zipfile.writestr()` will not work for strings larger than
   1002   # 2GiB. The Python interpreter sometimes rejects strings that large (though
   1003   # it isn't clear to me exactly what circumstances cause this).
   1004   # `zipfile.write()` must be used directly to work around this.
   1005   #
   1006   # This mess can be avoided if we port to python3.
   1007   saved_zip64_limit = zipfile.ZIP64_LIMIT
   1008   zipfile.ZIP64_LIMIT = (1 << 32) - 1
   1009 
   1010   if compress_type is None:
   1011     compress_type = zip_file.compression
   1012   if arcname is None:
   1013     arcname = filename
   1014 
   1015   saved_stat = os.stat(filename)
   1016 
   1017   try:
   1018     # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
   1019     # file to be zipped and reset it when we're done.
   1020     os.chmod(filename, perms)
   1021 
   1022     # Use a fixed timestamp so the output is repeatable.
   1023     epoch = datetime.datetime.fromtimestamp(0)
   1024     timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
   1025     os.utime(filename, (timestamp, timestamp))
   1026 
   1027     zip_file.write(filename, arcname=arcname, compress_type=compress_type)
   1028   finally:
   1029     os.chmod(filename, saved_stat.st_mode)
   1030     os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
   1031     zipfile.ZIP64_LIMIT = saved_zip64_limit
   1032 
   1033 
   1034 def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
   1035                 compress_type=None):
   1036   """Wrap zipfile.writestr() function to work around the zip64 limit.
   1037 
   1038   Even with the ZIP64_LIMIT workaround, it won't allow writing a string
   1039   longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
   1040   when calling crc32(bytes).
   1041 
   1042   But it still works fine to write a shorter string into a large zip file.
   1043   We should use ZipWrite() whenever possible, and only use ZipWriteStr()
   1044   when we know the string won't be too long.
   1045   """
   1046 
   1047   saved_zip64_limit = zipfile.ZIP64_LIMIT
   1048   zipfile.ZIP64_LIMIT = (1 << 32) - 1
   1049 
   1050   if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
   1051     zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
   1052     zinfo.compress_type = zip_file.compression
   1053     if perms is None:
   1054       perms = 0o100644
   1055   else:
   1056     zinfo = zinfo_or_arcname
   1057 
   1058   # If compress_type is given, it overrides the value in zinfo.
   1059   if compress_type is not None:
   1060     zinfo.compress_type = compress_type
   1061 
   1062   # If perms is given, it has a priority.
   1063   if perms is not None:
   1064     # If perms doesn't set the file type, mark it as a regular file.
   1065     if perms & 0o770000 == 0:
   1066       perms |= 0o100000
   1067     zinfo.external_attr = perms << 16
   1068 
   1069   # Use a fixed timestamp so the output is repeatable.
   1070   zinfo.date_time = (2009, 1, 1, 0, 0, 0)
   1071 
   1072   zip_file.writestr(zinfo, data)
   1073   zipfile.ZIP64_LIMIT = saved_zip64_limit
   1074 
   1075 
   1076 def ZipClose(zip_file):
   1077   # http://b/18015246
   1078   # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
   1079   # central directory.
   1080   saved_zip64_limit = zipfile.ZIP64_LIMIT
   1081   zipfile.ZIP64_LIMIT = (1 << 32) - 1
   1082 
   1083   zip_file.close()
   1084 
   1085   zipfile.ZIP64_LIMIT = saved_zip64_limit
   1086 
   1087 
   1088 class DeviceSpecificParams(object):
   1089   module = None
   1090   def __init__(self, **kwargs):
   1091     """Keyword arguments to the constructor become attributes of this
   1092     object, which is passed to all functions in the device-specific
   1093     module."""
   1094     for k, v in kwargs.iteritems():
   1095       setattr(self, k, v)
   1096     self.extras = OPTIONS.extras
   1097 
   1098     if self.module is None:
   1099       path = OPTIONS.device_specific
   1100       if not path:
   1101         return
   1102       try:
   1103         if os.path.isdir(path):
   1104           info = imp.find_module("releasetools", [path])
   1105         else:
   1106           d, f = os.path.split(path)
   1107           b, x = os.path.splitext(f)
   1108           if x == ".py":
   1109             f = b
   1110           info = imp.find_module(f, [d])
   1111         print("loaded device-specific extensions from", path)
   1112         self.module = imp.load_module("device_specific", *info)
   1113       except ImportError:
   1114         print("unable to load device-specific module; assuming none")
   1115 
   1116   def _DoCall(self, function_name, *args, **kwargs):
   1117     """Call the named function in the device-specific module, passing
   1118     the given args and kwargs.  The first argument to the call will be
   1119     the DeviceSpecific object itself.  If there is no module, or the
   1120     module does not define the function, return the value of the
   1121     'default' kwarg (which itself defaults to None)."""
   1122     if self.module is None or not hasattr(self.module, function_name):
   1123       return kwargs.get("default", None)
   1124     return getattr(self.module, function_name)(*((self,) + args), **kwargs)
   1125 
   1126   def FullOTA_Assertions(self):
   1127     """Called after emitting the block of assertions at the top of a
   1128     full OTA package.  Implementations can add whatever additional
   1129     assertions they like."""
   1130     return self._DoCall("FullOTA_Assertions")
   1131 
   1132   def FullOTA_InstallBegin(self):
   1133     """Called at the start of full OTA installation."""
   1134     return self._DoCall("FullOTA_InstallBegin")
   1135 
   1136   def FullOTA_InstallEnd(self):
   1137     """Called at the end of full OTA installation; typically this is
   1138     used to install the image for the device's baseband processor."""
   1139     return self._DoCall("FullOTA_InstallEnd")
   1140 
   1141   def IncrementalOTA_Assertions(self):
   1142     """Called after emitting the block of assertions at the top of an
   1143     incremental OTA package.  Implementations can add whatever
   1144     additional assertions they like."""
   1145     return self._DoCall("IncrementalOTA_Assertions")
   1146 
   1147   def IncrementalOTA_VerifyBegin(self):
   1148     """Called at the start of the verification phase of incremental
   1149     OTA installation; additional checks can be placed here to abort
   1150     the script before any changes are made."""
   1151     return self._DoCall("IncrementalOTA_VerifyBegin")
   1152 
   1153   def IncrementalOTA_VerifyEnd(self):
   1154     """Called at the end of the verification phase of incremental OTA
   1155     installation; additional checks can be placed here to abort the
   1156     script before any changes are made."""
   1157     return self._DoCall("IncrementalOTA_VerifyEnd")
   1158 
   1159   def IncrementalOTA_InstallBegin(self):
   1160     """Called at the start of incremental OTA installation (after
   1161     verification is complete)."""
   1162     return self._DoCall("IncrementalOTA_InstallBegin")
   1163 
   1164   def IncrementalOTA_InstallEnd(self):
   1165     """Called at the end of incremental OTA installation; typically
   1166     this is used to install the image for the device's baseband
   1167     processor."""
   1168     return self._DoCall("IncrementalOTA_InstallEnd")
   1169 
   1170   def VerifyOTA_Assertions(self):
   1171     return self._DoCall("VerifyOTA_Assertions")
   1172 
   1173 class File(object):
   1174   def __init__(self, name, data, compress_size = None):
   1175     self.name = name
   1176     self.data = data
   1177     self.size = len(data)
   1178     self.compress_size = compress_size or self.size
   1179     self.sha1 = sha1(data).hexdigest()
   1180 
   1181   @classmethod
   1182   def FromLocalFile(cls, name, diskname):
   1183     f = open(diskname, "rb")
   1184     data = f.read()
   1185     f.close()
   1186     return File(name, data)
   1187 
   1188   def WriteToTemp(self):
   1189     t = tempfile.NamedTemporaryFile()
   1190     t.write(self.data)
   1191     t.flush()
   1192     return t
   1193 
   1194   def WriteToDir(self, d):
   1195     with open(os.path.join(d, self.name), "wb") as fp:
   1196       fp.write(self.data)
   1197 
   1198   def AddToZip(self, z, compression=None):
   1199     ZipWriteStr(z, self.name, self.data, compress_type=compression)
   1200 
   1201 DIFF_PROGRAM_BY_EXT = {
   1202     ".gz" : "imgdiff",
   1203     ".zip" : ["imgdiff", "-z"],
   1204     ".jar" : ["imgdiff", "-z"],
   1205     ".apk" : ["imgdiff", "-z"],
   1206     ".img" : "imgdiff",
   1207     }
   1208 
   1209 class Difference(object):
   1210   def __init__(self, tf, sf, diff_program=None):
   1211     self.tf = tf
   1212     self.sf = sf
   1213     self.patch = None
   1214     self.diff_program = diff_program
   1215 
   1216   def ComputePatch(self):
   1217     """Compute the patch (as a string of data) needed to turn sf into
   1218     tf.  Returns the same tuple as GetPatch()."""
   1219 
   1220     tf = self.tf
   1221     sf = self.sf
   1222 
   1223     if self.diff_program:
   1224       diff_program = self.diff_program
   1225     else:
   1226       ext = os.path.splitext(tf.name)[1]
   1227       diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
   1228 
   1229     ttemp = tf.WriteToTemp()
   1230     stemp = sf.WriteToTemp()
   1231 
   1232     ext = os.path.splitext(tf.name)[1]
   1233 
   1234     try:
   1235       ptemp = tempfile.NamedTemporaryFile()
   1236       if isinstance(diff_program, list):
   1237         cmd = copy.copy(diff_program)
   1238       else:
   1239         cmd = [diff_program]
   1240       cmd.append(stemp.name)
   1241       cmd.append(ttemp.name)
   1242       cmd.append(ptemp.name)
   1243       p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
   1244       err = []
   1245       def run():
   1246         _, e = p.communicate()
   1247         if e:
   1248           err.append(e)
   1249       th = threading.Thread(target=run)
   1250       th.start()
   1251       th.join(timeout=300)   # 5 mins
   1252       if th.is_alive():
   1253         print("WARNING: diff command timed out")
   1254         p.terminate()
   1255         th.join(5)
   1256         if th.is_alive():
   1257           p.kill()
   1258           th.join()
   1259 
   1260       if err or p.returncode != 0:
   1261         print("WARNING: failure running %s:\n%s\n" % (
   1262             diff_program, "".join(err)))
   1263         self.patch = None
   1264         return None, None, None
   1265       diff = ptemp.read()
   1266     finally:
   1267       ptemp.close()
   1268       stemp.close()
   1269       ttemp.close()
   1270 
   1271     self.patch = diff
   1272     return self.tf, self.sf, self.patch
   1273 
   1274 
   1275   def GetPatch(self):
   1276     """Return a tuple (target_file, source_file, patch_data).
   1277     patch_data may be None if ComputePatch hasn't been called, or if
   1278     computing the patch failed."""
   1279     return self.tf, self.sf, self.patch
   1280 
   1281 
   1282 def ComputeDifferences(diffs):
   1283   """Call ComputePatch on all the Difference objects in 'diffs'."""
   1284   print(len(diffs), "diffs to compute")
   1285 
   1286   # Do the largest files first, to try and reduce the long-pole effect.
   1287   by_size = [(i.tf.size, i) for i in diffs]
   1288   by_size.sort(reverse=True)
   1289   by_size = [i[1] for i in by_size]
   1290 
   1291   lock = threading.Lock()
   1292   diff_iter = iter(by_size)   # accessed under lock
   1293 
   1294   def worker():
   1295     try:
   1296       lock.acquire()
   1297       for d in diff_iter:
   1298         lock.release()
   1299         start = time.time()
   1300         d.ComputePatch()
   1301         dur = time.time() - start
   1302         lock.acquire()
   1303 
   1304         tf, sf, patch = d.GetPatch()
   1305         if sf.name == tf.name:
   1306           name = tf.name
   1307         else:
   1308           name = "%s (%s)" % (tf.name, sf.name)
   1309         if patch is None:
   1310           print("patching failed!                                  %s" % (name,))
   1311         else:
   1312           print("%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
   1313               dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name))
   1314       lock.release()
   1315     except Exception as e:
   1316       print(e)
   1317       raise
   1318 
   1319   # start worker threads; wait for them all to finish.
   1320   threads = [threading.Thread(target=worker)
   1321              for i in range(OPTIONS.worker_threads)]
   1322   for th in threads:
   1323     th.start()
   1324   while threads:
   1325     threads.pop().join()
   1326 
   1327 
   1328 class BlockDifference(object):
   1329   def __init__(self, partition, tgt, src=None, check_first_block=False,
   1330                version=None, disable_imgdiff=False):
   1331     self.tgt = tgt
   1332     self.src = src
   1333     self.partition = partition
   1334     self.check_first_block = check_first_block
   1335     self.disable_imgdiff = disable_imgdiff
   1336 
   1337     if version is None:
   1338       version = 1
   1339       if OPTIONS.info_dict:
   1340         version = max(
   1341             int(i) for i in
   1342             OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
   1343     assert version >= 3
   1344     self.version = version
   1345 
   1346     b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
   1347                                     version=self.version,
   1348                                     disable_imgdiff=self.disable_imgdiff)
   1349     tmpdir = tempfile.mkdtemp()
   1350     OPTIONS.tempfiles.append(tmpdir)
   1351     self.path = os.path.join(tmpdir, partition)
   1352     b.Compute(self.path)
   1353     self._required_cache = b.max_stashed_size
   1354     self.touched_src_ranges = b.touched_src_ranges
   1355     self.touched_src_sha1 = b.touched_src_sha1
   1356 
   1357     if src is None:
   1358       _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
   1359     else:
   1360       _, self.device = GetTypeAndDevice("/" + partition,
   1361                                         OPTIONS.source_info_dict)
   1362 
   1363   @property
   1364   def required_cache(self):
   1365     return self._required_cache
   1366 
   1367   def WriteScript(self, script, output_zip, progress=None):
   1368     if not self.src:
   1369       # write the output unconditionally
   1370       script.Print("Patching %s image unconditionally..." % (self.partition,))
   1371     else:
   1372       script.Print("Patching %s image after verification." % (self.partition,))
   1373 
   1374     if progress:
   1375       script.ShowProgress(progress, 0)
   1376     self._WriteUpdate(script, output_zip)
   1377     if OPTIONS.verify:
   1378       self._WritePostInstallVerifyScript(script)
   1379 
   1380   def WriteStrictVerifyScript(self, script):
   1381     """Verify all the blocks in the care_map, including clobbered blocks.
   1382 
   1383     This differs from the WriteVerifyScript() function: a) it prints different
   1384     error messages; b) it doesn't allow half-way updated images to pass the
   1385     verification."""
   1386 
   1387     partition = self.partition
   1388     script.Print("Verifying %s..." % (partition,))
   1389     ranges = self.tgt.care_map
   1390     ranges_str = ranges.to_string_raw()
   1391     script.AppendExtra('range_sha1("%s", "%s") == "%s" && '
   1392                        'ui_print("    Verified.") || '
   1393                        'ui_print("\\"%s\\" has unexpected contents.");' % (
   1394                        self.device, ranges_str,
   1395                        self.tgt.TotalSha1(include_clobbered_blocks=True),
   1396                        self.device))
   1397     script.AppendExtra("")
   1398 
   1399   def WriteVerifyScript(self, script, touched_blocks_only=False):
   1400     partition = self.partition
   1401 
   1402     # full OTA
   1403     if not self.src:
   1404       script.Print("Image %s will be patched unconditionally." % (partition,))
   1405 
   1406     # incremental OTA
   1407     else:
   1408       if touched_blocks_only:
   1409         ranges = self.touched_src_ranges
   1410         expected_sha1 = self.touched_src_sha1
   1411       else:
   1412         ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
   1413         expected_sha1 = self.src.TotalSha1()
   1414 
   1415       # No blocks to be checked, skipping.
   1416       if not ranges:
   1417         return
   1418 
   1419       ranges_str = ranges.to_string_raw()
   1420       script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
   1421                           'block_image_verify("%s", '
   1422                           'package_extract_file("%s.transfer.list"), '
   1423                           '"%s.new.dat", "%s.patch.dat")) then') % (
   1424                           self.device, ranges_str, expected_sha1,
   1425                           self.device, partition, partition, partition))
   1426       script.Print('Verified %s image...' % (partition,))
   1427       script.AppendExtra('else')
   1428 
   1429       if self.version >= 4:
   1430 
   1431         # Bug: 21124327
   1432         # When generating incrementals for the system and vendor partitions in
   1433         # version 4 or newer, explicitly check the first block (which contains
   1434         # the superblock) of the partition to see if it's what we expect. If
   1435         # this check fails, give an explicit log message about the partition
   1436         # having been remounted R/W (the most likely explanation).
   1437         if self.check_first_block:
   1438           script.AppendExtra('check_first_block("%s");' % (self.device,))
   1439 
   1440         # If version >= 4, try block recovery before abort update
   1441         if partition == "system":
   1442           code = ErrorCode.SYSTEM_RECOVER_FAILURE
   1443         else:
   1444           code = ErrorCode.VENDOR_RECOVER_FAILURE
   1445         script.AppendExtra((
   1446             'ifelse (block_image_recover("{device}", "{ranges}") && '
   1447             'block_image_verify("{device}", '
   1448             'package_extract_file("{partition}.transfer.list"), '
   1449             '"{partition}.new.dat", "{partition}.patch.dat"), '
   1450             'ui_print("{partition} recovered successfully."), '
   1451             'abort("E{code}: {partition} partition fails to recover"));\n'
   1452             'endif;').format(device=self.device, ranges=ranges_str,
   1453                              partition=partition, code=code))
   1454 
   1455       # Abort the OTA update. Note that the incremental OTA cannot be applied
   1456       # even if it may match the checksum of the target partition.
   1457       # a) If version < 3, operations like move and erase will make changes
   1458       #    unconditionally and damage the partition.
   1459       # b) If version >= 3, it won't even reach here.
   1460       else:
   1461         if partition == "system":
   1462           code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
   1463         else:
   1464           code = ErrorCode.VENDOR_VERIFICATION_FAILURE
   1465         script.AppendExtra((
   1466             'abort("E%d: %s partition has unexpected contents");\n'
   1467             'endif;') % (code, partition))
   1468 
   1469   def _WritePostInstallVerifyScript(self, script):
   1470     partition = self.partition
   1471     script.Print('Verifying the updated %s image...' % (partition,))
   1472     # Unlike pre-install verification, clobbered_blocks should not be ignored.
   1473     ranges = self.tgt.care_map
   1474     ranges_str = ranges.to_string_raw()
   1475     script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
   1476                        self.device, ranges_str,
   1477                        self.tgt.TotalSha1(include_clobbered_blocks=True)))
   1478 
   1479     # Bug: 20881595
   1480     # Verify that extended blocks are really zeroed out.
   1481     if self.tgt.extended:
   1482       ranges_str = self.tgt.extended.to_string_raw()
   1483       script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
   1484                          self.device, ranges_str,
   1485                          self._HashZeroBlocks(self.tgt.extended.size())))
   1486       script.Print('Verified the updated %s image.' % (partition,))
   1487       if partition == "system":
   1488         code = ErrorCode.SYSTEM_NONZERO_CONTENTS
   1489       else:
   1490         code = ErrorCode.VENDOR_NONZERO_CONTENTS
   1491       script.AppendExtra(
   1492           'else\n'
   1493           '  abort("E%d: %s partition has unexpected non-zero contents after '
   1494           'OTA update");\n'
   1495           'endif;' % (code, partition))
   1496     else:
   1497       script.Print('Verified the updated %s image.' % (partition,))
   1498 
   1499     if partition == "system":
   1500       code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
   1501     else:
   1502       code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
   1503 
   1504     script.AppendExtra(
   1505         'else\n'
   1506         '  abort("E%d: %s partition has unexpected contents after OTA '
   1507         'update");\n'
   1508         'endif;' % (code, partition))
   1509 
   1510   def _WriteUpdate(self, script, output_zip):
   1511     ZipWrite(output_zip,
   1512              '{}.transfer.list'.format(self.path),
   1513              '{}.transfer.list'.format(self.partition))
   1514     ZipWrite(output_zip,
   1515              '{}.new.dat'.format(self.path),
   1516              '{}.new.dat'.format(self.partition))
   1517     ZipWrite(output_zip,
   1518              '{}.patch.dat'.format(self.path),
   1519              '{}.patch.dat'.format(self.partition),
   1520              compress_type=zipfile.ZIP_STORED)
   1521 
   1522     if self.partition == "system":
   1523       code = ErrorCode.SYSTEM_UPDATE_FAILURE
   1524     else:
   1525       code = ErrorCode.VENDOR_UPDATE_FAILURE
   1526 
   1527     call = ('block_image_update("{device}", '
   1528             'package_extract_file("{partition}.transfer.list"), '
   1529             '"{partition}.new.dat", "{partition}.patch.dat") ||\n'
   1530             '  abort("E{code}: Failed to update {partition} image.");'.format(
   1531                 device=self.device, partition=self.partition, code=code))
   1532     script.AppendExtra(script.WordWrap(call))
   1533 
   1534   def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
   1535     data = source.ReadRangeSet(ranges)
   1536     ctx = sha1()
   1537 
   1538     for p in data:
   1539       ctx.update(p)
   1540 
   1541     return ctx.hexdigest()
   1542 
   1543   def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
   1544     """Return the hash value for all zero blocks."""
   1545     zero_block = '\x00' * 4096
   1546     ctx = sha1()
   1547     for _ in range(num_blocks):
   1548       ctx.update(zero_block)
   1549 
   1550     return ctx.hexdigest()
   1551 
   1552 
   1553 DataImage = blockimgdiff.DataImage
   1554 
   1555 # map recovery.fstab's fs_types to mount/format "partition types"
   1556 PARTITION_TYPES = {
   1557     "ext4": "EMMC",
   1558     "emmc": "EMMC",
   1559     "f2fs": "EMMC",
   1560     "squashfs": "EMMC"
   1561 }
   1562 
   1563 def GetTypeAndDevice(mount_point, info):
   1564   fstab = info["fstab"]
   1565   if fstab:
   1566     return (PARTITION_TYPES[fstab[mount_point].fs_type],
   1567             fstab[mount_point].device)
   1568   else:
   1569     raise KeyError
   1570 
   1571 
   1572 def ParseCertificate(data):
   1573   """Parse a PEM-format certificate."""
   1574   cert = []
   1575   save = False
   1576   for line in data.split("\n"):
   1577     if "--END CERTIFICATE--" in line:
   1578       break
   1579     if save:
   1580       cert.append(line)
   1581     if "--BEGIN CERTIFICATE--" in line:
   1582       save = True
   1583   cert = "".join(cert).decode('base64')
   1584   return cert
   1585 
   1586 def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
   1587                       info_dict=None):
   1588   """Generate a binary patch that creates the recovery image starting
   1589   with the boot image.  (Most of the space in these images is just the
   1590   kernel, which is identical for the two, so the resulting patch
   1591   should be efficient.)  Add it to the output zip, along with a shell
   1592   script that is run from init.rc on first boot to actually do the
   1593   patching and install the new recovery image.
   1594 
   1595   recovery_img and boot_img should be File objects for the
   1596   corresponding images.  info should be the dictionary returned by
   1597   common.LoadInfoDict() on the input target_files.
   1598   """
   1599 
   1600   if info_dict is None:
   1601     info_dict = OPTIONS.info_dict
   1602 
   1603   full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
   1604   system_root_image = info_dict.get("system_root_image", None) == "true"
   1605 
   1606   if full_recovery_image:
   1607     output_sink("etc/recovery.img", recovery_img.data)
   1608 
   1609   else:
   1610     diff_program = ["imgdiff"]
   1611     path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
   1612     if os.path.exists(path):
   1613       diff_program.append("-b")
   1614       diff_program.append(path)
   1615       bonus_args = "-b /system/etc/recovery-resource.dat"
   1616     else:
   1617       bonus_args = ""
   1618 
   1619     d = Difference(recovery_img, boot_img, diff_program=diff_program)
   1620     _, _, patch = d.ComputePatch()
   1621     output_sink("recovery-from-boot.p", patch)
   1622 
   1623   try:
   1624     # The following GetTypeAndDevice()s need to use the path in the target
   1625     # info_dict instead of source_info_dict.
   1626     boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
   1627     recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
   1628   except KeyError:
   1629     return
   1630 
   1631   if full_recovery_image:
   1632     sh = """#!/system/bin/sh
   1633 if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
   1634   applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
   1635 else
   1636   log -t recovery "Recovery image already installed"
   1637 fi
   1638 """ % {'type': recovery_type,
   1639        'device': recovery_device,
   1640        'sha1': recovery_img.sha1,
   1641        'size': recovery_img.size}
   1642   else:
   1643     sh = """#!/system/bin/sh
   1644 if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
   1645   applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
   1646 else
   1647   log -t recovery "Recovery image already installed"
   1648 fi
   1649 """ % {'boot_size': boot_img.size,
   1650        'boot_sha1': boot_img.sha1,
   1651        'recovery_size': recovery_img.size,
   1652        'recovery_sha1': recovery_img.sha1,
   1653        'boot_type': boot_type,
   1654        'boot_device': boot_device,
   1655        'recovery_type': recovery_type,
   1656        'recovery_device': recovery_device,
   1657        'bonus_args': bonus_args}
   1658 
   1659   # The install script location moved from /system/etc to /system/bin
   1660   # in the L release.  Parse init.*.rc files to find out where the
   1661   # target-files expects it to be, and put it there.
   1662   sh_location = "etc/install-recovery.sh"
   1663   found = False
   1664   if system_root_image:
   1665     init_rc_dir = os.path.join(input_dir, "ROOT")
   1666   else:
   1667     init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
   1668   init_rc_files = os.listdir(init_rc_dir)
   1669   for init_rc_file in init_rc_files:
   1670     if (not init_rc_file.startswith('init.') or
   1671         not init_rc_file.endswith('.rc')):
   1672       continue
   1673 
   1674     with open(os.path.join(init_rc_dir, init_rc_file)) as f:
   1675       for line in f:
   1676         m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
   1677         if m:
   1678           sh_location = m.group(1)
   1679           found = True
   1680           break
   1681 
   1682     if found:
   1683       break
   1684 
   1685   print("putting script in", sh_location)
   1686 
   1687   output_sink(sh_location, sh)
   1688