1 #!/usr/bin/env python 2 # 3 # Copyright (C) 2008 The Android Open Source Project 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 17 """ 18 Given a target-files zipfile, produces an OTA package that installs 19 that build. An incremental OTA is produced if -i is given, otherwise 20 a full OTA is produced. 21 22 Usage: ota_from_target_files [flags] input_target_files output_ota_package 23 24 -k (--package_key) <key> Key to use to sign the package (default is 25 the value of default_system_dev_certificate from the input 26 target-files's META/misc_info.txt, or 27 "build/target/product/security/testkey" if that value is not 28 specified). 29 30 For incremental OTAs, the default value is based on the source 31 target-file, not the target build. 32 33 -i (--incremental_from) <file> 34 Generate an incremental OTA using the given target-files zip as 35 the starting build. 36 37 --full_radio 38 When generating an incremental OTA, always include a full copy of 39 radio image. This option is only meaningful when -i is specified, 40 because a full radio is always included in a full OTA if applicable. 41 42 --full_bootloader 43 Similar to --full_radio. When generating an incremental OTA, always 44 include a full copy of bootloader image. 45 46 --verify 47 Remount and verify the checksums of the files written to the system and 48 vendor (if used) partitions. Non-A/B incremental OTAs only. 49 50 -o (--oem_settings) <main_file[,additional_files...]> 51 Comma seperated list of files used to specify the expected OEM-specific 52 properties on the OEM partition of the intended device. Multiple expected 53 values can be used by providing multiple files. Only the first dict will 54 be used to compute fingerprint, while the rest will be used to assert 55 OEM-specific properties. 56 57 --oem_no_mount 58 For devices with OEM-specific properties but without an OEM partition, 59 do not mount the OEM partition in the updater-script. This should be 60 very rarely used, since it's expected to have a dedicated OEM partition 61 for OEM-specific properties. Only meaningful when -o is specified. 62 63 --wipe_user_data 64 Generate an OTA package that will wipe the user data partition 65 when installed. 66 67 --downgrade 68 Intentionally generate an incremental OTA that updates from a newer build 69 to an older one (e.g. downgrading from P preview back to O MR1). 70 "ota-downgrade=yes" will be set in the package metadata file. A data wipe 71 will always be enforced when using this flag, so "ota-wipe=yes" will also 72 be included in the metadata file. The update-binary in the source build 73 will be used in the OTA package, unless --binary flag is specified. Please 74 also check the comment for --override_timestamp below. 75 76 --override_timestamp 77 Intentionally generate an incremental OTA that updates from a newer build 78 to an older one (based on timestamp comparison), by setting the downgrade 79 flag in the package metadata. This differs from --downgrade flag, as we 80 don't enforce a data wipe with this flag. Because we know for sure this is 81 NOT an actual downgrade case, but two builds happen to be cut in a reverse 82 order (e.g. from two branches). A legit use case is that we cut a new 83 build C (after having A and B), but want to enfore an update path of A -> 84 C -> B. Specifying --downgrade may not help since that would enforce a 85 data wipe for C -> B update. 86 87 We used to set a fake timestamp in the package metadata for this flow. But 88 now we consolidate the two cases (i.e. an actual downgrade, or a downgrade 89 based on timestamp) with the same "ota-downgrade=yes" flag, with the 90 difference being whether "ota-wipe=yes" is set. 91 92 -e (--extra_script) <file> 93 Insert the contents of file at the end of the update script. 94 95 -2 (--two_step) 96 Generate a 'two-step' OTA package, where recovery is updated 97 first, so that any changes made to the system partition are done 98 using the new recovery (new kernel, etc.). 99 100 --include_secondary 101 Additionally include the payload for secondary slot images (default: 102 False). Only meaningful when generating A/B OTAs. 103 104 By default, an A/B OTA package doesn't contain the images for the 105 secondary slot (e.g. system_other.img). Specifying this flag allows 106 generating a separate payload that will install secondary slot images. 107 108 Such a package needs to be applied in a two-stage manner, with a reboot 109 in-between. During the first stage, the updater applies the primary 110 payload only. Upon finishing, it reboots the device into the newly updated 111 slot. It then continues to install the secondary payload to the inactive 112 slot, but without switching the active slot at the end (needs the matching 113 support in update_engine, i.e. SWITCH_SLOT_ON_REBOOT flag). 114 115 Due to the special install procedure, the secondary payload will be always 116 generated as a full payload. 117 118 --block 119 Generate a block-based OTA for non-A/B device. We have deprecated the 120 support for file-based OTA since O. Block-based OTA will be used by 121 default for all non-A/B devices. Keeping this flag here to not break 122 existing callers. 123 124 -b (--binary) <file> 125 Use the given binary as the update-binary in the output package, 126 instead of the binary in the build's target_files. Use for 127 development only. 128 129 -t (--worker_threads) <int> 130 Specifies the number of worker-threads that will be used when 131 generating patches for incremental updates (defaults to 3). 132 133 --stash_threshold <float> 134 Specifies the threshold that will be used to compute the maximum 135 allowed stash size (defaults to 0.8). 136 137 --log_diff <file> 138 Generate a log file that shows the differences in the source and target 139 builds for an incremental package. This option is only meaningful when 140 -i is specified. 141 142 --payload_signer <signer> 143 Specify the signer when signing the payload and metadata for A/B OTAs. 144 By default (i.e. without this flag), it calls 'openssl pkeyutl' to sign 145 with the package private key. If the private key cannot be accessed 146 directly, a payload signer that knows how to do that should be specified. 147 The signer will be supplied with "-inkey <path_to_key>", 148 "-in <input_file>" and "-out <output_file>" parameters. 149 150 --payload_signer_args <args> 151 Specify the arguments needed for payload signer. 152 153 --skip_postinstall 154 Skip the postinstall hooks when generating an A/B OTA package (default: 155 False). Note that this discards ALL the hooks, including non-optional 156 ones. Should only be used if caller knows it's safe to do so (e.g. all the 157 postinstall work is to dexopt apps and a data wipe will happen immediately 158 after). Only meaningful when generating A/B OTAs. 159 """ 160 161 from __future__ import print_function 162 163 import multiprocessing 164 import os.path 165 import shlex 166 import shutil 167 import struct 168 import subprocess 169 import sys 170 import tempfile 171 import zipfile 172 173 import common 174 import edify_generator 175 176 if sys.hexversion < 0x02070000: 177 print("Python 2.7 or newer is required.", file=sys.stderr) 178 sys.exit(1) 179 180 181 OPTIONS = common.OPTIONS 182 OPTIONS.package_key = None 183 OPTIONS.incremental_source = None 184 OPTIONS.verify = False 185 OPTIONS.patch_threshold = 0.95 186 OPTIONS.wipe_user_data = False 187 OPTIONS.downgrade = False 188 OPTIONS.extra_script = None 189 OPTIONS.worker_threads = multiprocessing.cpu_count() // 2 190 if OPTIONS.worker_threads == 0: 191 OPTIONS.worker_threads = 1 192 OPTIONS.two_step = False 193 OPTIONS.include_secondary = False 194 OPTIONS.no_signing = False 195 OPTIONS.block_based = True 196 OPTIONS.updater_binary = None 197 OPTIONS.oem_source = None 198 OPTIONS.oem_no_mount = False 199 OPTIONS.full_radio = False 200 OPTIONS.full_bootloader = False 201 # Stash size cannot exceed cache_size * threshold. 202 OPTIONS.cache_size = None 203 OPTIONS.stash_threshold = 0.8 204 OPTIONS.log_diff = None 205 OPTIONS.payload_signer = None 206 OPTIONS.payload_signer_args = [] 207 OPTIONS.extracted_input = None 208 OPTIONS.key_passwords = [] 209 OPTIONS.skip_postinstall = False 210 211 212 METADATA_NAME = 'META-INF/com/android/metadata' 213 POSTINSTALL_CONFIG = 'META/postinstall_config.txt' 214 UNZIP_PATTERN = ['IMAGES/*', 'META/*'] 215 216 217 class BuildInfo(object): 218 """A class that holds the information for a given build. 219 220 This class wraps up the property querying for a given source or target build. 221 It abstracts away the logic of handling OEM-specific properties, and caches 222 the commonly used properties such as fingerprint. 223 224 There are two types of info dicts: a) build-time info dict, which is generated 225 at build time (i.e. included in a target_files zip); b) OEM info dict that is 226 specified at package generation time (via command line argument 227 '--oem_settings'). If a build doesn't use OEM-specific properties (i.e. not 228 having "oem_fingerprint_properties" in build-time info dict), all the queries 229 would be answered based on build-time info dict only. Otherwise if using 230 OEM-specific properties, some of them will be calculated from two info dicts. 231 232 Users can query properties similarly as using a dict() (e.g. info['fstab']), 233 or to query build properties via GetBuildProp() or GetVendorBuildProp(). 234 235 Attributes: 236 info_dict: The build-time info dict. 237 is_ab: Whether it's a build that uses A/B OTA. 238 oem_dicts: A list of OEM dicts. 239 oem_props: A list of OEM properties that should be read from OEM dicts; None 240 if the build doesn't use any OEM-specific property. 241 fingerprint: The fingerprint of the build, which would be calculated based 242 on OEM properties if applicable. 243 device: The device name, which could come from OEM dicts if applicable. 244 """ 245 246 def __init__(self, info_dict, oem_dicts): 247 """Initializes a BuildInfo instance with the given dicts. 248 249 Arguments: 250 info_dict: The build-time info dict. 251 oem_dicts: A list of OEM dicts (which is parsed from --oem_settings). Note 252 that it always uses the first dict to calculate the fingerprint or the 253 device name. The rest would be used for asserting OEM properties only 254 (e.g. one package can be installed on one of these devices). 255 """ 256 self.info_dict = info_dict 257 self.oem_dicts = oem_dicts 258 259 self._is_ab = info_dict.get("ab_update") == "true" 260 self._oem_props = info_dict.get("oem_fingerprint_properties") 261 262 if self._oem_props: 263 assert oem_dicts, "OEM source required for this build" 264 265 # These two should be computed only after setting self._oem_props. 266 self._device = self.GetOemProperty("ro.product.device") 267 self._fingerprint = self.CalculateFingerprint() 268 269 @property 270 def is_ab(self): 271 return self._is_ab 272 273 @property 274 def device(self): 275 return self._device 276 277 @property 278 def fingerprint(self): 279 return self._fingerprint 280 281 @property 282 def oem_props(self): 283 return self._oem_props 284 285 def __getitem__(self, key): 286 return self.info_dict[key] 287 288 def get(self, key, default=None): 289 return self.info_dict.get(key, default) 290 291 def GetBuildProp(self, prop): 292 """Returns the inquired build property.""" 293 try: 294 return self.info_dict.get("build.prop", {})[prop] 295 except KeyError: 296 raise common.ExternalError("couldn't find %s in build.prop" % (prop,)) 297 298 def GetVendorBuildProp(self, prop): 299 """Returns the inquired vendor build property.""" 300 try: 301 return self.info_dict.get("vendor.build.prop", {})[prop] 302 except KeyError: 303 raise common.ExternalError( 304 "couldn't find %s in vendor.build.prop" % (prop,)) 305 306 def GetOemProperty(self, key): 307 if self.oem_props is not None and key in self.oem_props: 308 return self.oem_dicts[0][key] 309 return self.GetBuildProp(key) 310 311 def CalculateFingerprint(self): 312 if self.oem_props is None: 313 return self.GetBuildProp("ro.build.fingerprint") 314 return "%s/%s/%s:%s" % ( 315 self.GetOemProperty("ro.product.brand"), 316 self.GetOemProperty("ro.product.name"), 317 self.GetOemProperty("ro.product.device"), 318 self.GetBuildProp("ro.build.thumbprint")) 319 320 def WriteMountOemScript(self, script): 321 assert self.oem_props is not None 322 recovery_mount_options = self.info_dict.get("recovery_mount_options") 323 script.Mount("/oem", recovery_mount_options) 324 325 def WriteDeviceAssertions(self, script, oem_no_mount): 326 # Read the property directly if not using OEM properties. 327 if not self.oem_props: 328 script.AssertDevice(self.device) 329 return 330 331 # Otherwise assert OEM properties. 332 if not self.oem_dicts: 333 raise common.ExternalError( 334 "No OEM file provided to answer expected assertions") 335 336 for prop in self.oem_props.split(): 337 values = [] 338 for oem_dict in self.oem_dicts: 339 if prop in oem_dict: 340 values.append(oem_dict[prop]) 341 if not values: 342 raise common.ExternalError( 343 "The OEM file is missing the property %s" % (prop,)) 344 script.AssertOemProperty(prop, values, oem_no_mount) 345 346 347 class PayloadSigner(object): 348 """A class that wraps the payload signing works. 349 350 When generating a Payload, hashes of the payload and metadata files will be 351 signed with the device key, either by calling an external payload signer or 352 by calling openssl with the package key. This class provides a unified 353 interface, so that callers can just call PayloadSigner.Sign(). 354 355 If an external payload signer has been specified (OPTIONS.payload_signer), it 356 calls the signer with the provided args (OPTIONS.payload_signer_args). Note 357 that the signing key should be provided as part of the payload_signer_args. 358 Otherwise without an external signer, it uses the package key 359 (OPTIONS.package_key) and calls openssl for the signing works. 360 """ 361 362 def __init__(self): 363 if OPTIONS.payload_signer is None: 364 # Prepare the payload signing key. 365 private_key = OPTIONS.package_key + OPTIONS.private_key_suffix 366 pw = OPTIONS.key_passwords[OPTIONS.package_key] 367 368 cmd = ["openssl", "pkcs8", "-in", private_key, "-inform", "DER"] 369 cmd.extend(["-passin", "pass:" + pw] if pw else ["-nocrypt"]) 370 signing_key = common.MakeTempFile(prefix="key-", suffix=".key") 371 cmd.extend(["-out", signing_key]) 372 373 get_signing_key = common.Run(cmd, verbose=False, stdout=subprocess.PIPE, 374 stderr=subprocess.STDOUT) 375 stdoutdata, _ = get_signing_key.communicate() 376 assert get_signing_key.returncode == 0, \ 377 "Failed to get signing key: {}".format(stdoutdata) 378 379 self.signer = "openssl" 380 self.signer_args = ["pkeyutl", "-sign", "-inkey", signing_key, 381 "-pkeyopt", "digest:sha256"] 382 else: 383 self.signer = OPTIONS.payload_signer 384 self.signer_args = OPTIONS.payload_signer_args 385 386 def Sign(self, in_file): 387 """Signs the given input file. Returns the output filename.""" 388 out_file = common.MakeTempFile(prefix="signed-", suffix=".bin") 389 cmd = [self.signer] + self.signer_args + ['-in', in_file, '-out', out_file] 390 signing = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) 391 stdoutdata, _ = signing.communicate() 392 assert signing.returncode == 0, \ 393 "Failed to sign the input file: {}".format(stdoutdata) 394 return out_file 395 396 397 class Payload(object): 398 """Manages the creation and the signing of an A/B OTA Payload.""" 399 400 PAYLOAD_BIN = 'payload.bin' 401 PAYLOAD_PROPERTIES_TXT = 'payload_properties.txt' 402 SECONDARY_PAYLOAD_BIN = 'secondary/payload.bin' 403 SECONDARY_PAYLOAD_PROPERTIES_TXT = 'secondary/payload_properties.txt' 404 405 def __init__(self, secondary=False): 406 """Initializes a Payload instance. 407 408 Args: 409 secondary: Whether it's generating a secondary payload (default: False). 410 """ 411 # The place where the output from the subprocess should go. 412 self._log_file = sys.stdout if OPTIONS.verbose else subprocess.PIPE 413 self.payload_file = None 414 self.payload_properties = None 415 self.secondary = secondary 416 417 def Generate(self, target_file, source_file=None, additional_args=None): 418 """Generates a payload from the given target-files zip(s). 419 420 Args: 421 target_file: The filename of the target build target-files zip. 422 source_file: The filename of the source build target-files zip; or None if 423 generating a full OTA. 424 additional_args: A list of additional args that should be passed to 425 brillo_update_payload script; or None. 426 """ 427 if additional_args is None: 428 additional_args = [] 429 430 payload_file = common.MakeTempFile(prefix="payload-", suffix=".bin") 431 cmd = ["brillo_update_payload", "generate", 432 "--payload", payload_file, 433 "--target_image", target_file] 434 if source_file is not None: 435 cmd.extend(["--source_image", source_file]) 436 cmd.extend(additional_args) 437 p = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT) 438 stdoutdata, _ = p.communicate() 439 assert p.returncode == 0, \ 440 "brillo_update_payload generate failed: {}".format(stdoutdata) 441 442 self.payload_file = payload_file 443 self.payload_properties = None 444 445 def Sign(self, payload_signer): 446 """Generates and signs the hashes of the payload and metadata. 447 448 Args: 449 payload_signer: A PayloadSigner() instance that serves the signing work. 450 451 Raises: 452 AssertionError: On any failure when calling brillo_update_payload script. 453 """ 454 assert isinstance(payload_signer, PayloadSigner) 455 456 # 1. Generate hashes of the payload and metadata files. 457 payload_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin") 458 metadata_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin") 459 cmd = ["brillo_update_payload", "hash", 460 "--unsigned_payload", self.payload_file, 461 "--signature_size", "256", 462 "--metadata_hash_file", metadata_sig_file, 463 "--payload_hash_file", payload_sig_file] 464 p1 = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT) 465 p1.communicate() 466 assert p1.returncode == 0, "brillo_update_payload hash failed" 467 468 # 2. Sign the hashes. 469 signed_payload_sig_file = payload_signer.Sign(payload_sig_file) 470 signed_metadata_sig_file = payload_signer.Sign(metadata_sig_file) 471 472 # 3. Insert the signatures back into the payload file. 473 signed_payload_file = common.MakeTempFile(prefix="signed-payload-", 474 suffix=".bin") 475 cmd = ["brillo_update_payload", "sign", 476 "--unsigned_payload", self.payload_file, 477 "--payload", signed_payload_file, 478 "--signature_size", "256", 479 "--metadata_signature_file", signed_metadata_sig_file, 480 "--payload_signature_file", signed_payload_sig_file] 481 p1 = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT) 482 p1.communicate() 483 assert p1.returncode == 0, "brillo_update_payload sign failed" 484 485 # 4. Dump the signed payload properties. 486 properties_file = common.MakeTempFile(prefix="payload-properties-", 487 suffix=".txt") 488 cmd = ["brillo_update_payload", "properties", 489 "--payload", signed_payload_file, 490 "--properties_file", properties_file] 491 p1 = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT) 492 p1.communicate() 493 assert p1.returncode == 0, "brillo_update_payload properties failed" 494 495 if self.secondary: 496 with open(properties_file, "a") as f: 497 f.write("SWITCH_SLOT_ON_REBOOT=0\n") 498 499 if OPTIONS.wipe_user_data: 500 with open(properties_file, "a") as f: 501 f.write("POWERWASH=1\n") 502 503 self.payload_file = signed_payload_file 504 self.payload_properties = properties_file 505 506 def WriteToZip(self, output_zip): 507 """Writes the payload to the given zip. 508 509 Args: 510 output_zip: The output ZipFile instance. 511 """ 512 assert self.payload_file is not None 513 assert self.payload_properties is not None 514 515 if self.secondary: 516 payload_arcname = Payload.SECONDARY_PAYLOAD_BIN 517 payload_properties_arcname = Payload.SECONDARY_PAYLOAD_PROPERTIES_TXT 518 else: 519 payload_arcname = Payload.PAYLOAD_BIN 520 payload_properties_arcname = Payload.PAYLOAD_PROPERTIES_TXT 521 522 # Add the signed payload file and properties into the zip. In order to 523 # support streaming, we pack them as ZIP_STORED. So these entries can be 524 # read directly with the offset and length pairs. 525 common.ZipWrite(output_zip, self.payload_file, arcname=payload_arcname, 526 compress_type=zipfile.ZIP_STORED) 527 common.ZipWrite(output_zip, self.payload_properties, 528 arcname=payload_properties_arcname, 529 compress_type=zipfile.ZIP_STORED) 530 531 532 def SignOutput(temp_zip_name, output_zip_name): 533 pw = OPTIONS.key_passwords[OPTIONS.package_key] 534 535 common.SignFile(temp_zip_name, output_zip_name, OPTIONS.package_key, pw, 536 whole_file=True) 537 538 539 def _LoadOemDicts(oem_source): 540 """Returns the list of loaded OEM properties dict.""" 541 if not oem_source: 542 return None 543 544 oem_dicts = [] 545 for oem_file in oem_source: 546 with open(oem_file) as fp: 547 oem_dicts.append(common.LoadDictionaryFromLines(fp.readlines())) 548 return oem_dicts 549 550 551 def _WriteRecoveryImageToBoot(script, output_zip): 552 """Find and write recovery image to /boot in two-step OTA. 553 554 In two-step OTAs, we write recovery image to /boot as the first step so that 555 we can reboot to there and install a new recovery image to /recovery. 556 A special "recovery-two-step.img" will be preferred, which encodes the correct 557 path of "/boot". Otherwise the device may show "device is corrupt" message 558 when booting into /boot. 559 560 Fall back to using the regular recovery.img if the two-step recovery image 561 doesn't exist. Note that rebuilding the special image at this point may be 562 infeasible, because we don't have the desired boot signer and keys when 563 calling ota_from_target_files.py. 564 """ 565 566 recovery_two_step_img_name = "recovery-two-step.img" 567 recovery_two_step_img_path = os.path.join( 568 OPTIONS.input_tmp, "IMAGES", recovery_two_step_img_name) 569 if os.path.exists(recovery_two_step_img_path): 570 recovery_two_step_img = common.GetBootableImage( 571 recovery_two_step_img_name, recovery_two_step_img_name, 572 OPTIONS.input_tmp, "RECOVERY") 573 common.ZipWriteStr( 574 output_zip, recovery_two_step_img_name, recovery_two_step_img.data) 575 print("two-step package: using %s in stage 1/3" % ( 576 recovery_two_step_img_name,)) 577 script.WriteRawImage("/boot", recovery_two_step_img_name) 578 else: 579 print("two-step package: using recovery.img in stage 1/3") 580 # The "recovery.img" entry has been written into package earlier. 581 script.WriteRawImage("/boot", "recovery.img") 582 583 584 def HasRecoveryPatch(target_files_zip): 585 namelist = [name for name in target_files_zip.namelist()] 586 return ("SYSTEM/recovery-from-boot.p" in namelist or 587 "SYSTEM/etc/recovery.img" in namelist) 588 589 590 def HasVendorPartition(target_files_zip): 591 try: 592 target_files_zip.getinfo("VENDOR/") 593 return True 594 except KeyError: 595 return False 596 597 598 def HasTrebleEnabled(target_files_zip, target_info): 599 return (HasVendorPartition(target_files_zip) and 600 target_info.GetBuildProp("ro.treble.enabled") == "true") 601 602 603 def WriteFingerprintAssertion(script, target_info, source_info): 604 source_oem_props = source_info.oem_props 605 target_oem_props = target_info.oem_props 606 607 if source_oem_props is None and target_oem_props is None: 608 script.AssertSomeFingerprint( 609 source_info.fingerprint, target_info.fingerprint) 610 elif source_oem_props is not None and target_oem_props is not None: 611 script.AssertSomeThumbprint( 612 target_info.GetBuildProp("ro.build.thumbprint"), 613 source_info.GetBuildProp("ro.build.thumbprint")) 614 elif source_oem_props is None and target_oem_props is not None: 615 script.AssertFingerprintOrThumbprint( 616 source_info.fingerprint, 617 target_info.GetBuildProp("ro.build.thumbprint")) 618 else: 619 script.AssertFingerprintOrThumbprint( 620 target_info.fingerprint, 621 source_info.GetBuildProp("ro.build.thumbprint")) 622 623 624 def AddCompatibilityArchiveIfTrebleEnabled(target_zip, output_zip, target_info, 625 source_info=None): 626 """Adds compatibility info into the output zip if it's Treble-enabled target. 627 628 Metadata used for on-device compatibility verification is retrieved from 629 target_zip then added to compatibility.zip which is added to the output_zip 630 archive. 631 632 Compatibility archive should only be included for devices that have enabled 633 Treble support. 634 635 Args: 636 target_zip: Zip file containing the source files to be included for OTA. 637 output_zip: Zip file that will be sent for OTA. 638 target_info: The BuildInfo instance that holds the target build info. 639 source_info: The BuildInfo instance that holds the source build info, if 640 generating an incremental OTA; None otherwise. 641 """ 642 643 def AddCompatibilityArchive(system_updated, vendor_updated): 644 """Adds compatibility info based on system/vendor update status. 645 646 Args: 647 system_updated: If True, the system image will be updated and therefore 648 its metadata should be included. 649 vendor_updated: If True, the vendor image will be updated and therefore 650 its metadata should be included. 651 """ 652 # Determine what metadata we need. Files are names relative to META/. 653 compatibility_files = [] 654 vendor_metadata = ("vendor_manifest.xml", "vendor_matrix.xml") 655 system_metadata = ("system_manifest.xml", "system_matrix.xml") 656 if vendor_updated: 657 compatibility_files += vendor_metadata 658 if system_updated: 659 compatibility_files += system_metadata 660 661 # Create new archive. 662 compatibility_archive = tempfile.NamedTemporaryFile() 663 compatibility_archive_zip = zipfile.ZipFile( 664 compatibility_archive, "w", compression=zipfile.ZIP_DEFLATED) 665 666 # Add metadata. 667 for file_name in compatibility_files: 668 target_file_name = "META/" + file_name 669 670 if target_file_name in target_zip.namelist(): 671 data = target_zip.read(target_file_name) 672 common.ZipWriteStr(compatibility_archive_zip, file_name, data) 673 674 # Ensure files are written before we copy into output_zip. 675 compatibility_archive_zip.close() 676 677 # Only add the archive if we have any compatibility info. 678 if compatibility_archive_zip.namelist(): 679 common.ZipWrite(output_zip, compatibility_archive.name, 680 arcname="compatibility.zip", 681 compress_type=zipfile.ZIP_STORED) 682 683 # Will only proceed if the target has enabled the Treble support (as well as 684 # having a /vendor partition). 685 if not HasTrebleEnabled(target_zip, target_info): 686 return 687 688 # We don't support OEM thumbprint in Treble world (which calculates 689 # fingerprints in a different way as shown in CalculateFingerprint()). 690 assert not target_info.oem_props 691 692 # Full OTA carries the info for system/vendor both. 693 if source_info is None: 694 AddCompatibilityArchive(True, True) 695 return 696 697 assert not source_info.oem_props 698 699 source_fp = source_info.fingerprint 700 target_fp = target_info.fingerprint 701 system_updated = source_fp != target_fp 702 703 source_fp_vendor = source_info.GetVendorBuildProp( 704 "ro.vendor.build.fingerprint") 705 target_fp_vendor = target_info.GetVendorBuildProp( 706 "ro.vendor.build.fingerprint") 707 vendor_updated = source_fp_vendor != target_fp_vendor 708 709 AddCompatibilityArchive(system_updated, vendor_updated) 710 711 712 def WriteFullOTAPackage(input_zip, output_file): 713 target_info = BuildInfo(OPTIONS.info_dict, OPTIONS.oem_dicts) 714 715 # We don't know what version it will be installed on top of. We expect the API 716 # just won't change very often. Similarly for fstab, it might have changed in 717 # the target build. 718 target_api_version = target_info["recovery_api_version"] 719 script = edify_generator.EdifyGenerator(target_api_version, target_info) 720 721 if target_info.oem_props and not OPTIONS.oem_no_mount: 722 target_info.WriteMountOemScript(script) 723 724 metadata = GetPackageMetadata(target_info) 725 726 if not OPTIONS.no_signing: 727 staging_file = common.MakeTempFile(suffix='.zip') 728 else: 729 staging_file = output_file 730 731 output_zip = zipfile.ZipFile( 732 staging_file, "w", compression=zipfile.ZIP_DEFLATED) 733 734 device_specific = common.DeviceSpecificParams( 735 input_zip=input_zip, 736 input_version=target_api_version, 737 output_zip=output_zip, 738 script=script, 739 input_tmp=OPTIONS.input_tmp, 740 metadata=metadata, 741 info_dict=OPTIONS.info_dict) 742 743 assert HasRecoveryPatch(input_zip) 744 745 # Assertions (e.g. downgrade check, device properties check). 746 ts = target_info.GetBuildProp("ro.build.date.utc") 747 ts_text = target_info.GetBuildProp("ro.build.date") 748 script.AssertOlderBuild(ts, ts_text) 749 750 target_info.WriteDeviceAssertions(script, OPTIONS.oem_no_mount) 751 device_specific.FullOTA_Assertions() 752 753 # Two-step package strategy (in chronological order, which is *not* 754 # the order in which the generated script has things): 755 # 756 # if stage is not "2/3" or "3/3": 757 # write recovery image to boot partition 758 # set stage to "2/3" 759 # reboot to boot partition and restart recovery 760 # else if stage is "2/3": 761 # write recovery image to recovery partition 762 # set stage to "3/3" 763 # reboot to recovery partition and restart recovery 764 # else: 765 # (stage must be "3/3") 766 # set stage to "" 767 # do normal full package installation: 768 # wipe and install system, boot image, etc. 769 # set up system to update recovery partition on first boot 770 # complete script normally 771 # (allow recovery to mark itself finished and reboot) 772 773 recovery_img = common.GetBootableImage("recovery.img", "recovery.img", 774 OPTIONS.input_tmp, "RECOVERY") 775 if OPTIONS.two_step: 776 if not target_info.get("multistage_support"): 777 assert False, "two-step packages not supported by this build" 778 fs = target_info["fstab"]["/misc"] 779 assert fs.fs_type.upper() == "EMMC", \ 780 "two-step packages only supported on devices with EMMC /misc partitions" 781 bcb_dev = {"bcb_dev": fs.device} 782 common.ZipWriteStr(output_zip, "recovery.img", recovery_img.data) 783 script.AppendExtra(""" 784 if get_stage("%(bcb_dev)s") == "2/3" then 785 """ % bcb_dev) 786 787 # Stage 2/3: Write recovery image to /recovery (currently running /boot). 788 script.Comment("Stage 2/3") 789 script.WriteRawImage("/recovery", "recovery.img") 790 script.AppendExtra(""" 791 set_stage("%(bcb_dev)s", "3/3"); 792 reboot_now("%(bcb_dev)s", "recovery"); 793 else if get_stage("%(bcb_dev)s") == "3/3" then 794 """ % bcb_dev) 795 796 # Stage 3/3: Make changes. 797 script.Comment("Stage 3/3") 798 799 # Dump fingerprints 800 script.Print("Target: {}".format(target_info.fingerprint)) 801 802 device_specific.FullOTA_InstallBegin() 803 804 system_progress = 0.75 805 806 if OPTIONS.wipe_user_data: 807 system_progress -= 0.1 808 if HasVendorPartition(input_zip): 809 system_progress -= 0.1 810 811 script.ShowProgress(system_progress, 0) 812 813 # See the notes in WriteBlockIncrementalOTAPackage(). 814 allow_shared_blocks = target_info.get('ext4_share_dup_blocks') == "true" 815 816 # Full OTA is done as an "incremental" against an empty source image. This 817 # has the effect of writing new data from the package to the entire 818 # partition, but lets us reuse the updater code that writes incrementals to 819 # do it. 820 system_tgt = common.GetSparseImage("system", OPTIONS.input_tmp, input_zip, 821 allow_shared_blocks) 822 system_tgt.ResetFileMap() 823 system_diff = common.BlockDifference("system", system_tgt, src=None) 824 system_diff.WriteScript(script, output_zip) 825 826 boot_img = common.GetBootableImage( 827 "boot.img", "boot.img", OPTIONS.input_tmp, "BOOT") 828 829 if HasVendorPartition(input_zip): 830 script.ShowProgress(0.1, 0) 831 832 vendor_tgt = common.GetSparseImage("vendor", OPTIONS.input_tmp, input_zip, 833 allow_shared_blocks) 834 vendor_tgt.ResetFileMap() 835 vendor_diff = common.BlockDifference("vendor", vendor_tgt) 836 vendor_diff.WriteScript(script, output_zip) 837 838 AddCompatibilityArchiveIfTrebleEnabled(input_zip, output_zip, target_info) 839 840 common.CheckSize(boot_img.data, "boot.img", target_info) 841 common.ZipWriteStr(output_zip, "boot.img", boot_img.data) 842 843 script.ShowProgress(0.05, 5) 844 script.WriteRawImage("/boot", "boot.img") 845 846 script.ShowProgress(0.2, 10) 847 device_specific.FullOTA_InstallEnd() 848 849 if OPTIONS.extra_script is not None: 850 script.AppendExtra(OPTIONS.extra_script) 851 852 script.UnmountAll() 853 854 if OPTIONS.wipe_user_data: 855 script.ShowProgress(0.1, 10) 856 script.FormatPartition("/data") 857 858 if OPTIONS.two_step: 859 script.AppendExtra(""" 860 set_stage("%(bcb_dev)s", ""); 861 """ % bcb_dev) 862 script.AppendExtra("else\n") 863 864 # Stage 1/3: Nothing to verify for full OTA. Write recovery image to /boot. 865 script.Comment("Stage 1/3") 866 _WriteRecoveryImageToBoot(script, output_zip) 867 868 script.AppendExtra(""" 869 set_stage("%(bcb_dev)s", "2/3"); 870 reboot_now("%(bcb_dev)s", ""); 871 endif; 872 endif; 873 """ % bcb_dev) 874 875 script.SetProgress(1) 876 script.AddToZip(input_zip, output_zip, input_path=OPTIONS.updater_binary) 877 metadata["ota-required-cache"] = str(script.required_cache) 878 879 # We haven't written the metadata entry, which will be done in 880 # FinalizeMetadata. 881 common.ZipClose(output_zip) 882 883 needed_property_files = ( 884 NonAbOtaPropertyFiles(), 885 ) 886 FinalizeMetadata(metadata, staging_file, output_file, needed_property_files) 887 888 889 def WriteMetadata(metadata, output_zip): 890 value = "".join(["%s=%s\n" % kv for kv in sorted(metadata.iteritems())]) 891 common.ZipWriteStr(output_zip, METADATA_NAME, value, 892 compress_type=zipfile.ZIP_STORED) 893 894 895 def HandleDowngradeMetadata(metadata, target_info, source_info): 896 # Only incremental OTAs are allowed to reach here. 897 assert OPTIONS.incremental_source is not None 898 899 post_timestamp = target_info.GetBuildProp("ro.build.date.utc") 900 pre_timestamp = source_info.GetBuildProp("ro.build.date.utc") 901 is_downgrade = long(post_timestamp) < long(pre_timestamp) 902 903 if OPTIONS.downgrade: 904 if not is_downgrade: 905 raise RuntimeError( 906 "--downgrade or --override_timestamp specified but no downgrade " 907 "detected: pre: %s, post: %s" % (pre_timestamp, post_timestamp)) 908 metadata["ota-downgrade"] = "yes" 909 else: 910 if is_downgrade: 911 raise RuntimeError( 912 "Downgrade detected based on timestamp check: pre: %s, post: %s. " 913 "Need to specify --override_timestamp OR --downgrade to allow " 914 "building the incremental." % (pre_timestamp, post_timestamp)) 915 916 917 def GetPackageMetadata(target_info, source_info=None): 918 """Generates and returns the metadata dict. 919 920 It generates a dict() that contains the info to be written into an OTA 921 package (META-INF/com/android/metadata). It also handles the detection of 922 downgrade / data wipe based on the global options. 923 924 Args: 925 target_info: The BuildInfo instance that holds the target build info. 926 source_info: The BuildInfo instance that holds the source build info, or 927 None if generating full OTA. 928 929 Returns: 930 A dict to be written into package metadata entry. 931 """ 932 assert isinstance(target_info, BuildInfo) 933 assert source_info is None or isinstance(source_info, BuildInfo) 934 935 metadata = { 936 'post-build' : target_info.fingerprint, 937 'post-build-incremental' : target_info.GetBuildProp( 938 'ro.build.version.incremental'), 939 'post-sdk-level' : target_info.GetBuildProp( 940 'ro.build.version.sdk'), 941 'post-security-patch-level' : target_info.GetBuildProp( 942 'ro.build.version.security_patch'), 943 } 944 945 if target_info.is_ab: 946 metadata['ota-type'] = 'AB' 947 metadata['ota-required-cache'] = '0' 948 else: 949 metadata['ota-type'] = 'BLOCK' 950 951 if OPTIONS.wipe_user_data: 952 metadata['ota-wipe'] = 'yes' 953 954 is_incremental = source_info is not None 955 if is_incremental: 956 metadata['pre-build'] = source_info.fingerprint 957 metadata['pre-build-incremental'] = source_info.GetBuildProp( 958 'ro.build.version.incremental') 959 metadata['pre-device'] = source_info.device 960 else: 961 metadata['pre-device'] = target_info.device 962 963 # Use the actual post-timestamp, even for a downgrade case. 964 metadata['post-timestamp'] = target_info.GetBuildProp('ro.build.date.utc') 965 966 # Detect downgrades and set up downgrade flags accordingly. 967 if is_incremental: 968 HandleDowngradeMetadata(metadata, target_info, source_info) 969 970 return metadata 971 972 973 class PropertyFiles(object): 974 """A class that computes the property-files string for an OTA package. 975 976 A property-files string is a comma-separated string that contains the 977 offset/size info for an OTA package. The entries, which must be ZIP_STORED, 978 can be fetched directly with the package URL along with the offset/size info. 979 These strings can be used for streaming A/B OTAs, or allowing an updater to 980 download package metadata entry directly, without paying the cost of 981 downloading entire package. 982 983 Computing the final property-files string requires two passes. Because doing 984 the whole package signing (with signapk.jar) will possibly reorder the ZIP 985 entries, which may in turn invalidate earlier computed ZIP entry offset/size 986 values. 987 988 This class provides functions to be called for each pass. The general flow is 989 as follows. 990 991 property_files = PropertyFiles() 992 # The first pass, which writes placeholders before doing initial signing. 993 property_files.Compute() 994 SignOutput() 995 996 # The second pass, by replacing the placeholders with actual data. 997 property_files.Finalize() 998 SignOutput() 999 1000 And the caller can additionally verify the final result. 1001 1002 property_files.Verify() 1003 """ 1004 1005 def __init__(self): 1006 self.name = None 1007 self.required = () 1008 self.optional = () 1009 1010 def Compute(self, input_zip): 1011 """Computes and returns a property-files string with placeholders. 1012 1013 We reserve extra space for the offset and size of the metadata entry itself, 1014 although we don't know the final values until the package gets signed. 1015 1016 Args: 1017 input_zip: The input ZIP file. 1018 1019 Returns: 1020 A string with placeholders for the metadata offset/size info, e.g. 1021 "payload.bin:679:343,payload_properties.txt:378:45,metadata: ". 1022 """ 1023 return self._GetPropertyFilesString(input_zip, reserve_space=True) 1024 1025 class InsufficientSpaceException(Exception): 1026 pass 1027 1028 def Finalize(self, input_zip, reserved_length): 1029 """Finalizes a property-files string with actual METADATA offset/size info. 1030 1031 The input ZIP file has been signed, with the ZIP entries in the desired 1032 place (signapk.jar will possibly reorder the ZIP entries). Now we compute 1033 the ZIP entry offsets and construct the property-files string with actual 1034 data. Note that during this process, we must pad the property-files string 1035 to the reserved length, so that the METADATA entry size remains the same. 1036 Otherwise the entries' offsets and sizes may change again. 1037 1038 Args: 1039 input_zip: The input ZIP file. 1040 reserved_length: The reserved length of the property-files string during 1041 the call to Compute(). The final string must be no more than this 1042 size. 1043 1044 Returns: 1045 A property-files string including the metadata offset/size info, e.g. 1046 "payload.bin:679:343,payload_properties.txt:378:45,metadata:69:379 ". 1047 1048 Raises: 1049 InsufficientSpaceException: If the reserved length is insufficient to hold 1050 the final string. 1051 """ 1052 result = self._GetPropertyFilesString(input_zip, reserve_space=False) 1053 if len(result) > reserved_length: 1054 raise self.InsufficientSpaceException( 1055 'Insufficient reserved space: reserved={}, actual={}'.format( 1056 reserved_length, len(result))) 1057 1058 result += ' ' * (reserved_length - len(result)) 1059 return result 1060 1061 def Verify(self, input_zip, expected): 1062 """Verifies the input ZIP file contains the expected property-files string. 1063 1064 Args: 1065 input_zip: The input ZIP file. 1066 expected: The property-files string that's computed from Finalize(). 1067 1068 Raises: 1069 AssertionError: On finding a mismatch. 1070 """ 1071 actual = self._GetPropertyFilesString(input_zip) 1072 assert actual == expected, \ 1073 "Mismatching streaming metadata: {} vs {}.".format(actual, expected) 1074 1075 def _GetPropertyFilesString(self, zip_file, reserve_space=False): 1076 """Constructs the property-files string per request.""" 1077 1078 def ComputeEntryOffsetSize(name): 1079 """Computes the zip entry offset and size.""" 1080 info = zip_file.getinfo(name) 1081 offset = info.header_offset + len(info.FileHeader()) 1082 size = info.file_size 1083 return '%s:%d:%d' % (os.path.basename(name), offset, size) 1084 1085 tokens = [] 1086 tokens.extend(self._GetPrecomputed(zip_file)) 1087 for entry in self.required: 1088 tokens.append(ComputeEntryOffsetSize(entry)) 1089 for entry in self.optional: 1090 if entry in zip_file.namelist(): 1091 tokens.append(ComputeEntryOffsetSize(entry)) 1092 1093 # 'META-INF/com/android/metadata' is required. We don't know its actual 1094 # offset and length (as well as the values for other entries). So we reserve 1095 # 15-byte as a placeholder ('offset:length'), which is sufficient to cover 1096 # the space for metadata entry. Because 'offset' allows a max of 10-digit 1097 # (i.e. ~9 GiB), with a max of 4-digit for the length. Note that all the 1098 # reserved space serves the metadata entry only. 1099 if reserve_space: 1100 tokens.append('metadata:' + ' ' * 15) 1101 else: 1102 tokens.append(ComputeEntryOffsetSize(METADATA_NAME)) 1103 1104 return ','.join(tokens) 1105 1106 def _GetPrecomputed(self, input_zip): 1107 """Computes the additional tokens to be included into the property-files. 1108 1109 This applies to tokens without actual ZIP entries, such as 1110 payload_metadadata.bin. We want to expose the offset/size to updaters, so 1111 that they can download the payload metadata directly with the info. 1112 1113 Args: 1114 input_zip: The input zip file. 1115 1116 Returns: 1117 A list of strings (tokens) to be added to the property-files string. 1118 """ 1119 # pylint: disable=no-self-use 1120 # pylint: disable=unused-argument 1121 return [] 1122 1123 1124 class StreamingPropertyFiles(PropertyFiles): 1125 """A subclass for computing the property-files for streaming A/B OTAs.""" 1126 1127 def __init__(self): 1128 super(StreamingPropertyFiles, self).__init__() 1129 self.name = 'ota-streaming-property-files' 1130 self.required = ( 1131 # payload.bin and payload_properties.txt must exist. 1132 'payload.bin', 1133 'payload_properties.txt', 1134 ) 1135 self.optional = ( 1136 # care_map.txt is available only if dm-verity is enabled. 1137 'care_map.txt', 1138 # compatibility.zip is available only if target supports Treble. 1139 'compatibility.zip', 1140 ) 1141 1142 1143 class AbOtaPropertyFiles(StreamingPropertyFiles): 1144 """The property-files for A/B OTA that includes payload_metadata.bin info. 1145 1146 Since P, we expose one more token (aka property-file), in addition to the ones 1147 for streaming A/B OTA, for a virtual entry of 'payload_metadata.bin'. 1148 'payload_metadata.bin' is the header part of a payload ('payload.bin'), which 1149 doesn't exist as a separate ZIP entry, but can be used to verify if the 1150 payload can be applied on the given device. 1151 1152 For backward compatibility, we keep both of the 'ota-streaming-property-files' 1153 and the newly added 'ota-property-files' in P. The new token will only be 1154 available in 'ota-property-files'. 1155 """ 1156 1157 def __init__(self): 1158 super(AbOtaPropertyFiles, self).__init__() 1159 self.name = 'ota-property-files' 1160 1161 def _GetPrecomputed(self, input_zip): 1162 offset, size = self._GetPayloadMetadataOffsetAndSize(input_zip) 1163 return ['payload_metadata.bin:{}:{}'.format(offset, size)] 1164 1165 @staticmethod 1166 def _GetPayloadMetadataOffsetAndSize(input_zip): 1167 """Computes the offset and size of the payload metadata for a given package. 1168 1169 (From system/update_engine/update_metadata.proto) 1170 A delta update file contains all the deltas needed to update a system from 1171 one specific version to another specific version. The update format is 1172 represented by this struct pseudocode: 1173 1174 struct delta_update_file { 1175 char magic[4] = "CrAU"; 1176 uint64 file_format_version; 1177 uint64 manifest_size; // Size of protobuf DeltaArchiveManifest 1178 1179 // Only present if format_version > 1: 1180 uint32 metadata_signature_size; 1181 1182 // The Bzip2 compressed DeltaArchiveManifest 1183 char manifest[metadata_signature_size]; 1184 1185 // The signature of the metadata (from the beginning of the payload up to 1186 // this location, not including the signature itself). This is a 1187 // serialized Signatures message. 1188 char medatada_signature_message[metadata_signature_size]; 1189 1190 // Data blobs for files, no specific format. The specific offset 1191 // and length of each data blob is recorded in the DeltaArchiveManifest. 1192 struct { 1193 char data[]; 1194 } blobs[]; 1195 1196 // These two are not signed: 1197 uint64 payload_signatures_message_size; 1198 char payload_signatures_message[]; 1199 }; 1200 1201 'payload-metadata.bin' contains all the bytes from the beginning of the 1202 payload, till the end of 'medatada_signature_message'. 1203 """ 1204 payload_info = input_zip.getinfo('payload.bin') 1205 payload_offset = payload_info.header_offset + len(payload_info.FileHeader()) 1206 payload_size = payload_info.file_size 1207 1208 with input_zip.open('payload.bin', 'r') as payload_fp: 1209 header_bin = payload_fp.read(24) 1210 1211 # network byte order (big-endian) 1212 header = struct.unpack("!IQQL", header_bin) 1213 1214 # 'CrAU' 1215 magic = header[0] 1216 assert magic == 0x43724155, "Invalid magic: {:x}".format(magic) 1217 1218 manifest_size = header[2] 1219 metadata_signature_size = header[3] 1220 metadata_total = 24 + manifest_size + metadata_signature_size 1221 assert metadata_total < payload_size 1222 1223 return (payload_offset, metadata_total) 1224 1225 1226 class NonAbOtaPropertyFiles(PropertyFiles): 1227 """The property-files for non-A/B OTA. 1228 1229 For non-A/B OTA, the property-files string contains the info for METADATA 1230 entry, with which a system updater can be fetched the package metadata prior 1231 to downloading the entire package. 1232 """ 1233 1234 def __init__(self): 1235 super(NonAbOtaPropertyFiles, self).__init__() 1236 self.name = 'ota-property-files' 1237 1238 1239 def FinalizeMetadata(metadata, input_file, output_file, needed_property_files): 1240 """Finalizes the metadata and signs an A/B OTA package. 1241 1242 In order to stream an A/B OTA package, we need 'ota-streaming-property-files' 1243 that contains the offsets and sizes for the ZIP entries. An example 1244 property-files string is as follows. 1245 1246 "payload.bin:679:343,payload_properties.txt:378:45,metadata:69:379" 1247 1248 OTA server can pass down this string, in addition to the package URL, to the 1249 system update client. System update client can then fetch individual ZIP 1250 entries (ZIP_STORED) directly at the given offset of the URL. 1251 1252 Args: 1253 metadata: The metadata dict for the package. 1254 input_file: The input ZIP filename that doesn't contain the package METADATA 1255 entry yet. 1256 output_file: The final output ZIP filename. 1257 needed_property_files: The list of PropertyFiles' to be generated. 1258 """ 1259 1260 def ComputeAllPropertyFiles(input_file, needed_property_files): 1261 # Write the current metadata entry with placeholders. 1262 with zipfile.ZipFile(input_file) as input_zip: 1263 for property_files in needed_property_files: 1264 metadata[property_files.name] = property_files.Compute(input_zip) 1265 namelist = input_zip.namelist() 1266 1267 if METADATA_NAME in namelist: 1268 common.ZipDelete(input_file, METADATA_NAME) 1269 output_zip = zipfile.ZipFile(input_file, 'a') 1270 WriteMetadata(metadata, output_zip) 1271 common.ZipClose(output_zip) 1272 1273 if OPTIONS.no_signing: 1274 return input_file 1275 1276 prelim_signing = common.MakeTempFile(suffix='.zip') 1277 SignOutput(input_file, prelim_signing) 1278 return prelim_signing 1279 1280 def FinalizeAllPropertyFiles(prelim_signing, needed_property_files): 1281 with zipfile.ZipFile(prelim_signing) as prelim_signing_zip: 1282 for property_files in needed_property_files: 1283 metadata[property_files.name] = property_files.Finalize( 1284 prelim_signing_zip, len(metadata[property_files.name])) 1285 1286 # SignOutput(), which in turn calls signapk.jar, will possibly reorder the ZIP 1287 # entries, as well as padding the entry headers. We do a preliminary signing 1288 # (with an incomplete metadata entry) to allow that to happen. Then compute 1289 # the ZIP entry offsets, write back the final metadata and do the final 1290 # signing. 1291 prelim_signing = ComputeAllPropertyFiles(input_file, needed_property_files) 1292 try: 1293 FinalizeAllPropertyFiles(prelim_signing, needed_property_files) 1294 except PropertyFiles.InsufficientSpaceException: 1295 # Even with the preliminary signing, the entry orders may change 1296 # dramatically, which leads to insufficiently reserved space during the 1297 # first call to ComputeAllPropertyFiles(). In that case, we redo all the 1298 # preliminary signing works, based on the already ordered ZIP entries, to 1299 # address the issue. 1300 prelim_signing = ComputeAllPropertyFiles( 1301 prelim_signing, needed_property_files) 1302 FinalizeAllPropertyFiles(prelim_signing, needed_property_files) 1303 1304 # Replace the METADATA entry. 1305 common.ZipDelete(prelim_signing, METADATA_NAME) 1306 output_zip = zipfile.ZipFile(prelim_signing, 'a') 1307 WriteMetadata(metadata, output_zip) 1308 common.ZipClose(output_zip) 1309 1310 # Re-sign the package after updating the metadata entry. 1311 if OPTIONS.no_signing: 1312 output_file = prelim_signing 1313 else: 1314 SignOutput(prelim_signing, output_file) 1315 1316 # Reopen the final signed zip to double check the streaming metadata. 1317 with zipfile.ZipFile(output_file) as output_zip: 1318 for property_files in needed_property_files: 1319 property_files.Verify(output_zip, metadata[property_files.name].strip()) 1320 1321 1322 def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_file): 1323 target_info = BuildInfo(OPTIONS.target_info_dict, OPTIONS.oem_dicts) 1324 source_info = BuildInfo(OPTIONS.source_info_dict, OPTIONS.oem_dicts) 1325 1326 target_api_version = target_info["recovery_api_version"] 1327 source_api_version = source_info["recovery_api_version"] 1328 if source_api_version == 0: 1329 print("WARNING: generating edify script for a source that " 1330 "can't install it.") 1331 1332 script = edify_generator.EdifyGenerator( 1333 source_api_version, target_info, fstab=source_info["fstab"]) 1334 1335 if target_info.oem_props or source_info.oem_props: 1336 if not OPTIONS.oem_no_mount: 1337 source_info.WriteMountOemScript(script) 1338 1339 metadata = GetPackageMetadata(target_info, source_info) 1340 1341 if not OPTIONS.no_signing: 1342 staging_file = common.MakeTempFile(suffix='.zip') 1343 else: 1344 staging_file = output_file 1345 1346 output_zip = zipfile.ZipFile( 1347 staging_file, "w", compression=zipfile.ZIP_DEFLATED) 1348 1349 device_specific = common.DeviceSpecificParams( 1350 source_zip=source_zip, 1351 source_version=source_api_version, 1352 target_zip=target_zip, 1353 target_version=target_api_version, 1354 output_zip=output_zip, 1355 script=script, 1356 metadata=metadata, 1357 info_dict=source_info) 1358 1359 source_boot = common.GetBootableImage( 1360 "/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT", source_info) 1361 target_boot = common.GetBootableImage( 1362 "/tmp/boot.img", "boot.img", OPTIONS.target_tmp, "BOOT", target_info) 1363 updating_boot = (not OPTIONS.two_step and 1364 (source_boot.data != target_boot.data)) 1365 1366 target_recovery = common.GetBootableImage( 1367 "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY") 1368 1369 # When target uses 'BOARD_EXT4_SHARE_DUP_BLOCKS := true', images may contain 1370 # shared blocks (i.e. some blocks will show up in multiple files' block 1371 # list). We can only allocate such shared blocks to the first "owner", and 1372 # disable imgdiff for all later occurrences. 1373 allow_shared_blocks = (source_info.get('ext4_share_dup_blocks') == "true" or 1374 target_info.get('ext4_share_dup_blocks') == "true") 1375 system_src = common.GetSparseImage("system", OPTIONS.source_tmp, source_zip, 1376 allow_shared_blocks) 1377 system_tgt = common.GetSparseImage("system", OPTIONS.target_tmp, target_zip, 1378 allow_shared_blocks) 1379 1380 blockimgdiff_version = max( 1381 int(i) for i in target_info.get("blockimgdiff_versions", "1").split(",")) 1382 assert blockimgdiff_version >= 3 1383 1384 # Check the first block of the source system partition for remount R/W only 1385 # if the filesystem is ext4. 1386 system_src_partition = source_info["fstab"]["/system"] 1387 check_first_block = system_src_partition.fs_type == "ext4" 1388 # Disable using imgdiff for squashfs. 'imgdiff -z' expects input files to be 1389 # in zip formats. However with squashfs, a) all files are compressed in LZ4; 1390 # b) the blocks listed in block map may not contain all the bytes for a given 1391 # file (because they're rounded to be 4K-aligned). 1392 system_tgt_partition = target_info["fstab"]["/system"] 1393 disable_imgdiff = (system_src_partition.fs_type == "squashfs" or 1394 system_tgt_partition.fs_type == "squashfs") 1395 system_diff = common.BlockDifference("system", system_tgt, system_src, 1396 check_first_block, 1397 version=blockimgdiff_version, 1398 disable_imgdiff=disable_imgdiff) 1399 1400 if HasVendorPartition(target_zip): 1401 if not HasVendorPartition(source_zip): 1402 raise RuntimeError("can't generate incremental that adds /vendor") 1403 vendor_src = common.GetSparseImage("vendor", OPTIONS.source_tmp, source_zip, 1404 allow_shared_blocks) 1405 vendor_tgt = common.GetSparseImage("vendor", OPTIONS.target_tmp, target_zip, 1406 allow_shared_blocks) 1407 1408 # Check first block of vendor partition for remount R/W only if 1409 # disk type is ext4 1410 vendor_partition = source_info["fstab"]["/vendor"] 1411 check_first_block = vendor_partition.fs_type == "ext4" 1412 disable_imgdiff = vendor_partition.fs_type == "squashfs" 1413 vendor_diff = common.BlockDifference("vendor", vendor_tgt, vendor_src, 1414 check_first_block, 1415 version=blockimgdiff_version, 1416 disable_imgdiff=disable_imgdiff) 1417 else: 1418 vendor_diff = None 1419 1420 AddCompatibilityArchiveIfTrebleEnabled( 1421 target_zip, output_zip, target_info, source_info) 1422 1423 # Assertions (e.g. device properties check). 1424 target_info.WriteDeviceAssertions(script, OPTIONS.oem_no_mount) 1425 device_specific.IncrementalOTA_Assertions() 1426 1427 # Two-step incremental package strategy (in chronological order, 1428 # which is *not* the order in which the generated script has 1429 # things): 1430 # 1431 # if stage is not "2/3" or "3/3": 1432 # do verification on current system 1433 # write recovery image to boot partition 1434 # set stage to "2/3" 1435 # reboot to boot partition and restart recovery 1436 # else if stage is "2/3": 1437 # write recovery image to recovery partition 1438 # set stage to "3/3" 1439 # reboot to recovery partition and restart recovery 1440 # else: 1441 # (stage must be "3/3") 1442 # perform update: 1443 # patch system files, etc. 1444 # force full install of new boot image 1445 # set up system to update recovery partition on first boot 1446 # complete script normally 1447 # (allow recovery to mark itself finished and reboot) 1448 1449 if OPTIONS.two_step: 1450 if not source_info.get("multistage_support"): 1451 assert False, "two-step packages not supported by this build" 1452 fs = source_info["fstab"]["/misc"] 1453 assert fs.fs_type.upper() == "EMMC", \ 1454 "two-step packages only supported on devices with EMMC /misc partitions" 1455 bcb_dev = {"bcb_dev" : fs.device} 1456 common.ZipWriteStr(output_zip, "recovery.img", target_recovery.data) 1457 script.AppendExtra(""" 1458 if get_stage("%(bcb_dev)s") == "2/3" then 1459 """ % bcb_dev) 1460 1461 # Stage 2/3: Write recovery image to /recovery (currently running /boot). 1462 script.Comment("Stage 2/3") 1463 script.AppendExtra("sleep(20);\n") 1464 script.WriteRawImage("/recovery", "recovery.img") 1465 script.AppendExtra(""" 1466 set_stage("%(bcb_dev)s", "3/3"); 1467 reboot_now("%(bcb_dev)s", "recovery"); 1468 else if get_stage("%(bcb_dev)s") != "3/3" then 1469 """ % bcb_dev) 1470 1471 # Stage 1/3: (a) Verify the current system. 1472 script.Comment("Stage 1/3") 1473 1474 # Dump fingerprints 1475 script.Print("Source: {}".format(source_info.fingerprint)) 1476 script.Print("Target: {}".format(target_info.fingerprint)) 1477 1478 script.Print("Verifying current system...") 1479 1480 device_specific.IncrementalOTA_VerifyBegin() 1481 1482 WriteFingerprintAssertion(script, target_info, source_info) 1483 1484 # Check the required cache size (i.e. stashed blocks). 1485 size = [] 1486 if system_diff: 1487 size.append(system_diff.required_cache) 1488 if vendor_diff: 1489 size.append(vendor_diff.required_cache) 1490 1491 if updating_boot: 1492 boot_type, boot_device = common.GetTypeAndDevice("/boot", source_info) 1493 d = common.Difference(target_boot, source_boot) 1494 _, _, d = d.ComputePatch() 1495 if d is None: 1496 include_full_boot = True 1497 common.ZipWriteStr(output_zip, "boot.img", target_boot.data) 1498 else: 1499 include_full_boot = False 1500 1501 print("boot target: %d source: %d diff: %d" % ( 1502 target_boot.size, source_boot.size, len(d))) 1503 1504 common.ZipWriteStr(output_zip, "patch/boot.img.p", d) 1505 1506 script.PatchCheck("%s:%s:%d:%s:%d:%s" % 1507 (boot_type, boot_device, 1508 source_boot.size, source_boot.sha1, 1509 target_boot.size, target_boot.sha1)) 1510 size.append(target_boot.size) 1511 1512 if size: 1513 script.CacheFreeSpaceCheck(max(size)) 1514 1515 device_specific.IncrementalOTA_VerifyEnd() 1516 1517 if OPTIONS.two_step: 1518 # Stage 1/3: (b) Write recovery image to /boot. 1519 _WriteRecoveryImageToBoot(script, output_zip) 1520 1521 script.AppendExtra(""" 1522 set_stage("%(bcb_dev)s", "2/3"); 1523 reboot_now("%(bcb_dev)s", ""); 1524 else 1525 """ % bcb_dev) 1526 1527 # Stage 3/3: Make changes. 1528 script.Comment("Stage 3/3") 1529 1530 # Verify the existing partitions. 1531 system_diff.WriteVerifyScript(script, touched_blocks_only=True) 1532 if vendor_diff: 1533 vendor_diff.WriteVerifyScript(script, touched_blocks_only=True) 1534 1535 script.Comment("---- start making changes here ----") 1536 1537 device_specific.IncrementalOTA_InstallBegin() 1538 1539 system_diff.WriteScript(script, output_zip, 1540 progress=0.8 if vendor_diff else 0.9) 1541 1542 if vendor_diff: 1543 vendor_diff.WriteScript(script, output_zip, progress=0.1) 1544 1545 if OPTIONS.two_step: 1546 common.ZipWriteStr(output_zip, "boot.img", target_boot.data) 1547 script.WriteRawImage("/boot", "boot.img") 1548 print("writing full boot image (forced by two-step mode)") 1549 1550 if not OPTIONS.two_step: 1551 if updating_boot: 1552 if include_full_boot: 1553 print("boot image changed; including full.") 1554 script.Print("Installing boot image...") 1555 script.WriteRawImage("/boot", "boot.img") 1556 else: 1557 # Produce the boot image by applying a patch to the current 1558 # contents of the boot partition, and write it back to the 1559 # partition. 1560 print("boot image changed; including patch.") 1561 script.Print("Patching boot image...") 1562 script.ShowProgress(0.1, 10) 1563 script.ApplyPatch("%s:%s:%d:%s:%d:%s" 1564 % (boot_type, boot_device, 1565 source_boot.size, source_boot.sha1, 1566 target_boot.size, target_boot.sha1), 1567 "-", 1568 target_boot.size, target_boot.sha1, 1569 source_boot.sha1, "patch/boot.img.p") 1570 else: 1571 print("boot image unchanged; skipping.") 1572 1573 # Do device-specific installation (eg, write radio image). 1574 device_specific.IncrementalOTA_InstallEnd() 1575 1576 if OPTIONS.extra_script is not None: 1577 script.AppendExtra(OPTIONS.extra_script) 1578 1579 if OPTIONS.wipe_user_data: 1580 script.Print("Erasing user data...") 1581 script.FormatPartition("/data") 1582 1583 if OPTIONS.two_step: 1584 script.AppendExtra(""" 1585 set_stage("%(bcb_dev)s", ""); 1586 endif; 1587 endif; 1588 """ % bcb_dev) 1589 1590 script.SetProgress(1) 1591 # For downgrade OTAs, we prefer to use the update-binary in the source 1592 # build that is actually newer than the one in the target build. 1593 if OPTIONS.downgrade: 1594 script.AddToZip(source_zip, output_zip, input_path=OPTIONS.updater_binary) 1595 else: 1596 script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary) 1597 metadata["ota-required-cache"] = str(script.required_cache) 1598 1599 # We haven't written the metadata entry yet, which will be handled in 1600 # FinalizeMetadata(). 1601 common.ZipClose(output_zip) 1602 1603 # Sign the generated zip package unless no_signing is specified. 1604 needed_property_files = ( 1605 NonAbOtaPropertyFiles(), 1606 ) 1607 FinalizeMetadata(metadata, staging_file, output_file, needed_property_files) 1608 1609 1610 def GetTargetFilesZipForSecondaryImages(input_file, skip_postinstall=False): 1611 """Returns a target-files.zip file for generating secondary payload. 1612 1613 Although the original target-files.zip already contains secondary slot 1614 images (i.e. IMAGES/system_other.img), we need to rename the files to the 1615 ones without _other suffix. Note that we cannot instead modify the names in 1616 META/ab_partitions.txt, because there are no matching partitions on device. 1617 1618 For the partitions that don't have secondary images, the ones for primary 1619 slot will be used. This is to ensure that we always have valid boot, vbmeta, 1620 bootloader images in the inactive slot. 1621 1622 Args: 1623 input_file: The input target-files.zip file. 1624 skip_postinstall: Whether to skip copying the postinstall config file. 1625 1626 Returns: 1627 The filename of the target-files.zip for generating secondary payload. 1628 """ 1629 target_file = common.MakeTempFile(prefix="targetfiles-", suffix=".zip") 1630 target_zip = zipfile.ZipFile(target_file, 'w', allowZip64=True) 1631 1632 input_tmp = common.UnzipTemp(input_file, UNZIP_PATTERN) 1633 with zipfile.ZipFile(input_file, 'r') as input_zip: 1634 infolist = input_zip.infolist() 1635 1636 for info in infolist: 1637 unzipped_file = os.path.join(input_tmp, *info.filename.split('/')) 1638 if info.filename == 'IMAGES/system_other.img': 1639 common.ZipWrite(target_zip, unzipped_file, arcname='IMAGES/system.img') 1640 1641 # Primary images and friends need to be skipped explicitly. 1642 elif info.filename in ('IMAGES/system.img', 1643 'IMAGES/system.map'): 1644 pass 1645 1646 # Skip copying the postinstall config if requested. 1647 elif skip_postinstall and info.filename == POSTINSTALL_CONFIG: 1648 pass 1649 1650 elif info.filename.startswith(('META/', 'IMAGES/')): 1651 common.ZipWrite(target_zip, unzipped_file, arcname=info.filename) 1652 1653 common.ZipClose(target_zip) 1654 1655 return target_file 1656 1657 1658 def GetTargetFilesZipWithoutPostinstallConfig(input_file): 1659 """Returns a target-files.zip that's not containing postinstall_config.txt. 1660 1661 This allows brillo_update_payload script to skip writing all the postinstall 1662 hooks in the generated payload. The input target-files.zip file will be 1663 duplicated, with 'META/postinstall_config.txt' skipped. If input_file doesn't 1664 contain the postinstall_config.txt entry, the input file will be returned. 1665 1666 Args: 1667 input_file: The input target-files.zip filename. 1668 1669 Returns: 1670 The filename of target-files.zip that doesn't contain postinstall config. 1671 """ 1672 # We should only make a copy if postinstall_config entry exists. 1673 with zipfile.ZipFile(input_file, 'r') as input_zip: 1674 if POSTINSTALL_CONFIG not in input_zip.namelist(): 1675 return input_file 1676 1677 target_file = common.MakeTempFile(prefix="targetfiles-", suffix=".zip") 1678 shutil.copyfile(input_file, target_file) 1679 common.ZipDelete(target_file, POSTINSTALL_CONFIG) 1680 return target_file 1681 1682 1683 def WriteABOTAPackageWithBrilloScript(target_file, output_file, 1684 source_file=None): 1685 """Generates an Android OTA package that has A/B update payload.""" 1686 # Stage the output zip package for package signing. 1687 if not OPTIONS.no_signing: 1688 staging_file = common.MakeTempFile(suffix='.zip') 1689 else: 1690 staging_file = output_file 1691 output_zip = zipfile.ZipFile(staging_file, "w", 1692 compression=zipfile.ZIP_DEFLATED) 1693 1694 if source_file is not None: 1695 target_info = BuildInfo(OPTIONS.target_info_dict, OPTIONS.oem_dicts) 1696 source_info = BuildInfo(OPTIONS.source_info_dict, OPTIONS.oem_dicts) 1697 else: 1698 target_info = BuildInfo(OPTIONS.info_dict, OPTIONS.oem_dicts) 1699 source_info = None 1700 1701 # Metadata to comply with Android OTA package format. 1702 metadata = GetPackageMetadata(target_info, source_info) 1703 1704 if OPTIONS.skip_postinstall: 1705 target_file = GetTargetFilesZipWithoutPostinstallConfig(target_file) 1706 1707 # Generate payload. 1708 payload = Payload() 1709 1710 # Enforce a max timestamp this payload can be applied on top of. 1711 if OPTIONS.downgrade: 1712 max_timestamp = source_info.GetBuildProp("ro.build.date.utc") 1713 else: 1714 max_timestamp = metadata["post-timestamp"] 1715 additional_args = ["--max_timestamp", max_timestamp] 1716 1717 payload.Generate(target_file, source_file, additional_args) 1718 1719 # Sign the payload. 1720 payload_signer = PayloadSigner() 1721 payload.Sign(payload_signer) 1722 1723 # Write the payload into output zip. 1724 payload.WriteToZip(output_zip) 1725 1726 # Generate and include the secondary payload that installs secondary images 1727 # (e.g. system_other.img). 1728 if OPTIONS.include_secondary: 1729 # We always include a full payload for the secondary slot, even when 1730 # building an incremental OTA. See the comments for "--include_secondary". 1731 secondary_target_file = GetTargetFilesZipForSecondaryImages( 1732 target_file, OPTIONS.skip_postinstall) 1733 secondary_payload = Payload(secondary=True) 1734 secondary_payload.Generate(secondary_target_file, 1735 additional_args=additional_args) 1736 secondary_payload.Sign(payload_signer) 1737 secondary_payload.WriteToZip(output_zip) 1738 1739 # If dm-verity is supported for the device, copy contents of care_map 1740 # into A/B OTA package. 1741 target_zip = zipfile.ZipFile(target_file, "r") 1742 if (target_info.get("verity") == "true" or 1743 target_info.get("avb_enable") == "true"): 1744 care_map_path = "META/care_map.txt" 1745 namelist = target_zip.namelist() 1746 if care_map_path in namelist: 1747 care_map_data = target_zip.read(care_map_path) 1748 # In order to support streaming, care_map.txt needs to be packed as 1749 # ZIP_STORED. 1750 common.ZipWriteStr(output_zip, "care_map.txt", care_map_data, 1751 compress_type=zipfile.ZIP_STORED) 1752 else: 1753 print("Warning: cannot find care map file in target_file package") 1754 1755 AddCompatibilityArchiveIfTrebleEnabled( 1756 target_zip, output_zip, target_info, source_info) 1757 1758 common.ZipClose(target_zip) 1759 1760 # We haven't written the metadata entry yet, which will be handled in 1761 # FinalizeMetadata(). 1762 common.ZipClose(output_zip) 1763 1764 # AbOtaPropertyFiles intends to replace StreamingPropertyFiles, as it covers 1765 # all the info of the latter. However, system updaters and OTA servers need to 1766 # take time to switch to the new flag. We keep both of the flags for 1767 # P-timeframe, and will remove StreamingPropertyFiles in later release. 1768 needed_property_files = ( 1769 AbOtaPropertyFiles(), 1770 StreamingPropertyFiles(), 1771 ) 1772 FinalizeMetadata(metadata, staging_file, output_file, needed_property_files) 1773 1774 1775 def main(argv): 1776 1777 def option_handler(o, a): 1778 if o in ("-k", "--package_key"): 1779 OPTIONS.package_key = a 1780 elif o in ("-i", "--incremental_from"): 1781 OPTIONS.incremental_source = a 1782 elif o == "--full_radio": 1783 OPTIONS.full_radio = True 1784 elif o == "--full_bootloader": 1785 OPTIONS.full_bootloader = True 1786 elif o == "--wipe_user_data": 1787 OPTIONS.wipe_user_data = True 1788 elif o == "--downgrade": 1789 OPTIONS.downgrade = True 1790 OPTIONS.wipe_user_data = True 1791 elif o == "--override_timestamp": 1792 OPTIONS.downgrade = True 1793 elif o in ("-o", "--oem_settings"): 1794 OPTIONS.oem_source = a.split(',') 1795 elif o == "--oem_no_mount": 1796 OPTIONS.oem_no_mount = True 1797 elif o in ("-e", "--extra_script"): 1798 OPTIONS.extra_script = a 1799 elif o in ("-t", "--worker_threads"): 1800 if a.isdigit(): 1801 OPTIONS.worker_threads = int(a) 1802 else: 1803 raise ValueError("Cannot parse value %r for option %r - only " 1804 "integers are allowed." % (a, o)) 1805 elif o in ("-2", "--two_step"): 1806 OPTIONS.two_step = True 1807 elif o == "--include_secondary": 1808 OPTIONS.include_secondary = True 1809 elif o == "--no_signing": 1810 OPTIONS.no_signing = True 1811 elif o == "--verify": 1812 OPTIONS.verify = True 1813 elif o == "--block": 1814 OPTIONS.block_based = True 1815 elif o in ("-b", "--binary"): 1816 OPTIONS.updater_binary = a 1817 elif o == "--stash_threshold": 1818 try: 1819 OPTIONS.stash_threshold = float(a) 1820 except ValueError: 1821 raise ValueError("Cannot parse value %r for option %r - expecting " 1822 "a float" % (a, o)) 1823 elif o == "--log_diff": 1824 OPTIONS.log_diff = a 1825 elif o == "--payload_signer": 1826 OPTIONS.payload_signer = a 1827 elif o == "--payload_signer_args": 1828 OPTIONS.payload_signer_args = shlex.split(a) 1829 elif o == "--extracted_input_target_files": 1830 OPTIONS.extracted_input = a 1831 elif o == "--skip_postinstall": 1832 OPTIONS.skip_postinstall = True 1833 else: 1834 return False 1835 return True 1836 1837 args = common.ParseOptions(argv, __doc__, 1838 extra_opts="b:k:i:d:e:t:2o:", 1839 extra_long_opts=[ 1840 "package_key=", 1841 "incremental_from=", 1842 "full_radio", 1843 "full_bootloader", 1844 "wipe_user_data", 1845 "downgrade", 1846 "override_timestamp", 1847 "extra_script=", 1848 "worker_threads=", 1849 "two_step", 1850 "include_secondary", 1851 "no_signing", 1852 "block", 1853 "binary=", 1854 "oem_settings=", 1855 "oem_no_mount", 1856 "verify", 1857 "stash_threshold=", 1858 "log_diff=", 1859 "payload_signer=", 1860 "payload_signer_args=", 1861 "extracted_input_target_files=", 1862 "skip_postinstall", 1863 ], extra_option_handler=option_handler) 1864 1865 if len(args) != 2: 1866 common.Usage(__doc__) 1867 sys.exit(1) 1868 1869 if OPTIONS.downgrade: 1870 # We should only allow downgrading incrementals (as opposed to full). 1871 # Otherwise the device may go back from arbitrary build with this full 1872 # OTA package. 1873 if OPTIONS.incremental_source is None: 1874 raise ValueError("Cannot generate downgradable full OTAs") 1875 1876 # Load the build info dicts from the zip directly or the extracted input 1877 # directory. We don't need to unzip the entire target-files zips, because they 1878 # won't be needed for A/B OTAs (brillo_update_payload does that on its own). 1879 # When loading the info dicts, we don't need to provide the second parameter 1880 # to common.LoadInfoDict(). Specifying the second parameter allows replacing 1881 # some properties with their actual paths, such as 'selinux_fc', 1882 # 'ramdisk_dir', which won't be used during OTA generation. 1883 if OPTIONS.extracted_input is not None: 1884 OPTIONS.info_dict = common.LoadInfoDict(OPTIONS.extracted_input) 1885 else: 1886 with zipfile.ZipFile(args[0], 'r') as input_zip: 1887 OPTIONS.info_dict = common.LoadInfoDict(input_zip) 1888 1889 if OPTIONS.verbose: 1890 print("--- target info ---") 1891 common.DumpInfoDict(OPTIONS.info_dict) 1892 1893 # Load the source build dict if applicable. 1894 if OPTIONS.incremental_source is not None: 1895 OPTIONS.target_info_dict = OPTIONS.info_dict 1896 with zipfile.ZipFile(OPTIONS.incremental_source, 'r') as source_zip: 1897 OPTIONS.source_info_dict = common.LoadInfoDict(source_zip) 1898 1899 if OPTIONS.verbose: 1900 print("--- source info ---") 1901 common.DumpInfoDict(OPTIONS.source_info_dict) 1902 1903 # Load OEM dicts if provided. 1904 OPTIONS.oem_dicts = _LoadOemDicts(OPTIONS.oem_source) 1905 1906 ab_update = OPTIONS.info_dict.get("ab_update") == "true" 1907 1908 # Use the default key to sign the package if not specified with package_key. 1909 # package_keys are needed on ab_updates, so always define them if an 1910 # ab_update is getting created. 1911 if not OPTIONS.no_signing or ab_update: 1912 if OPTIONS.package_key is None: 1913 OPTIONS.package_key = OPTIONS.info_dict.get( 1914 "default_system_dev_certificate", 1915 "build/target/product/security/testkey") 1916 # Get signing keys 1917 OPTIONS.key_passwords = common.GetKeyPasswords([OPTIONS.package_key]) 1918 1919 if ab_update: 1920 WriteABOTAPackageWithBrilloScript( 1921 target_file=args[0], 1922 output_file=args[1], 1923 source_file=OPTIONS.incremental_source) 1924 1925 print("done.") 1926 return 1927 1928 # Sanity check the loaded info dicts first. 1929 if OPTIONS.info_dict.get("no_recovery") == "true": 1930 raise common.ExternalError( 1931 "--- target build has specified no recovery ---") 1932 1933 # Non-A/B OTAs rely on /cache partition to store temporary files. 1934 cache_size = OPTIONS.info_dict.get("cache_size") 1935 if cache_size is None: 1936 print("--- can't determine the cache partition size ---") 1937 OPTIONS.cache_size = cache_size 1938 1939 if OPTIONS.extra_script is not None: 1940 OPTIONS.extra_script = open(OPTIONS.extra_script).read() 1941 1942 if OPTIONS.extracted_input is not None: 1943 OPTIONS.input_tmp = OPTIONS.extracted_input 1944 else: 1945 print("unzipping target target-files...") 1946 OPTIONS.input_tmp = common.UnzipTemp(args[0], UNZIP_PATTERN) 1947 OPTIONS.target_tmp = OPTIONS.input_tmp 1948 1949 # If the caller explicitly specified the device-specific extensions path via 1950 # -s / --device_specific, use that. Otherwise, use META/releasetools.py if it 1951 # is present in the target target_files. Otherwise, take the path of the file 1952 # from 'tool_extensions' in the info dict and look for that in the local 1953 # filesystem, relative to the current directory. 1954 if OPTIONS.device_specific is None: 1955 from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py") 1956 if os.path.exists(from_input): 1957 print("(using device-specific extensions from target_files)") 1958 OPTIONS.device_specific = from_input 1959 else: 1960 OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions") 1961 1962 if OPTIONS.device_specific is not None: 1963 OPTIONS.device_specific = os.path.abspath(OPTIONS.device_specific) 1964 1965 # Generate a full OTA. 1966 if OPTIONS.incremental_source is None: 1967 with zipfile.ZipFile(args[0], 'r') as input_zip: 1968 WriteFullOTAPackage( 1969 input_zip, 1970 output_file=args[1]) 1971 1972 # Generate an incremental OTA. 1973 else: 1974 print("unzipping source target-files...") 1975 OPTIONS.source_tmp = common.UnzipTemp( 1976 OPTIONS.incremental_source, UNZIP_PATTERN) 1977 with zipfile.ZipFile(args[0], 'r') as input_zip, \ 1978 zipfile.ZipFile(OPTIONS.incremental_source, 'r') as source_zip: 1979 WriteBlockIncrementalOTAPackage( 1980 input_zip, 1981 source_zip, 1982 output_file=args[1]) 1983 1984 if OPTIONS.log_diff: 1985 with open(OPTIONS.log_diff, 'w') as out_file: 1986 import target_files_diff 1987 target_files_diff.recursiveDiff( 1988 '', OPTIONS.source_tmp, OPTIONS.input_tmp, out_file) 1989 1990 print("done.") 1991 1992 1993 if __name__ == '__main__': 1994 try: 1995 common.CloseInheritedPipes() 1996 main(sys.argv[1:]) 1997 except common.ExternalError as e: 1998 print("\n ERROR: %s\n" % (e,)) 1999 sys.exit(1) 2000 finally: 2001 common.Cleanup() 2002