1 #!/usr/bin/env python 2 # 3 # Copyright (C) 2008 The Android Open Source Project 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 17 """ 18 Given a target-files zipfile, produces an OTA package that installs 19 that build. An incremental OTA is produced if -i is given, otherwise 20 a full OTA is produced. 21 22 Usage: ota_from_target_files [flags] input_target_files output_ota_package 23 24 --board_config <file> 25 Deprecated. 26 27 -k (--package_key) <key> Key to use to sign the package (default is 28 the value of default_system_dev_certificate from the input 29 target-files's META/misc_info.txt, or 30 "build/target/product/security/testkey" if that value is not 31 specified). 32 33 For incremental OTAs, the default value is based on the source 34 target-file, not the target build. 35 36 -i (--incremental_from) <file> 37 Generate an incremental OTA using the given target-files zip as 38 the starting build. 39 40 --full_radio 41 When generating an incremental OTA, always include a full copy of 42 radio image. This option is only meaningful when -i is specified, 43 because a full radio is always included in a full OTA if applicable. 44 45 --full_bootloader 46 Similar to --full_radio. When generating an incremental OTA, always 47 include a full copy of bootloader image. 48 49 -v (--verify) 50 Remount and verify the checksums of the files written to the 51 system and vendor (if used) partitions. Incremental builds only. 52 53 -o (--oem_settings) <file> 54 Use the file to specify the expected OEM-specific properties 55 on the OEM partition of the intended device. 56 57 --oem_no_mount 58 For devices with OEM-specific properties but without an OEM partition, 59 do not mount the OEM partition in the updater-script. This should be 60 very rarely used, since it's expected to have a dedicated OEM partition 61 for OEM-specific properties. Only meaningful when -o is specified. 62 63 -w (--wipe_user_data) 64 Generate an OTA package that will wipe the user data partition 65 when installed. 66 67 -n (--no_prereq) 68 Omit the timestamp prereq check normally included at the top of 69 the build scripts (used for developer OTA packages which 70 legitimately need to go back and forth). 71 72 --downgrade 73 Intentionally generate an incremental OTA that updates from a newer 74 build to an older one (based on timestamp comparison). "post-timestamp" 75 will be replaced by "ota-downgrade=yes" in the metadata file. A data 76 wipe will always be enforced, so "ota-wipe=yes" will also be included in 77 the metadata file. The update-binary in the source build will be used in 78 the OTA package, unless --binary flag is specified. 79 80 -e (--extra_script) <file> 81 Insert the contents of file at the end of the update script. 82 83 -a (--aslr_mode) <on|off> 84 Specify whether to turn on ASLR for the package (on by default). 85 86 -2 (--two_step) 87 Generate a 'two-step' OTA package, where recovery is updated 88 first, so that any changes made to the system partition are done 89 using the new recovery (new kernel, etc.). 90 91 --block 92 Generate a block-based OTA if possible. Will fall back to a 93 file-based OTA if the target_files is older and doesn't support 94 block-based OTAs. 95 96 -b (--binary) <file> 97 Use the given binary as the update-binary in the output package, 98 instead of the binary in the build's target_files. Use for 99 development only. 100 101 -t (--worker_threads) <int> 102 Specifies the number of worker-threads that will be used when 103 generating patches for incremental updates (defaults to 3). 104 105 --stash_threshold <float> 106 Specifies the threshold that will be used to compute the maximum 107 allowed stash size (defaults to 0.8). 108 109 --gen_verify 110 Generate an OTA package that verifies the partitions. 111 112 --log_diff <file> 113 Generate a log file that shows the differences in the source and target 114 builds for an incremental package. This option is only meaningful when 115 -i is specified. 116 117 --payload_signer <signer> 118 Specify the signer when signing the payload and metadata for A/B OTAs. 119 By default (i.e. without this flag), it calls 'openssl pkeyutl' to sign 120 with the package private key. If the private key cannot be accessed 121 directly, a payload signer that knows how to do that should be specified. 122 The signer will be supplied with "-inkey <path_to_key>", 123 "-in <input_file>" and "-out <output_file>" parameters. 124 125 --payload_signer_args <args> 126 Specify the arguments needed for payload signer. 127 """ 128 129 import sys 130 131 if sys.hexversion < 0x02070000: 132 print >> sys.stderr, "Python 2.7 or newer is required." 133 sys.exit(1) 134 135 import multiprocessing 136 import os 137 import subprocess 138 import shlex 139 import tempfile 140 import zipfile 141 142 import common 143 import edify_generator 144 import sparse_img 145 146 OPTIONS = common.OPTIONS 147 OPTIONS.package_key = None 148 OPTIONS.incremental_source = None 149 OPTIONS.verify = False 150 OPTIONS.require_verbatim = set() 151 OPTIONS.prohibit_verbatim = set(("system/build.prop",)) 152 OPTIONS.patch_threshold = 0.95 153 OPTIONS.wipe_user_data = False 154 OPTIONS.omit_prereq = False 155 OPTIONS.downgrade = False 156 OPTIONS.extra_script = None 157 OPTIONS.aslr_mode = True 158 OPTIONS.worker_threads = multiprocessing.cpu_count() // 2 159 if OPTIONS.worker_threads == 0: 160 OPTIONS.worker_threads = 1 161 OPTIONS.two_step = False 162 OPTIONS.no_signing = False 163 OPTIONS.block_based = False 164 OPTIONS.updater_binary = None 165 OPTIONS.oem_source = None 166 OPTIONS.oem_no_mount = False 167 OPTIONS.fallback_to_full = True 168 OPTIONS.full_radio = False 169 OPTIONS.full_bootloader = False 170 # Stash size cannot exceed cache_size * threshold. 171 OPTIONS.cache_size = None 172 OPTIONS.stash_threshold = 0.8 173 OPTIONS.gen_verify = False 174 OPTIONS.log_diff = None 175 OPTIONS.payload_signer = None 176 OPTIONS.payload_signer_args = [] 177 178 def MostPopularKey(d, default): 179 """Given a dict, return the key corresponding to the largest 180 value. Returns 'default' if the dict is empty.""" 181 x = [(v, k) for (k, v) in d.iteritems()] 182 if not x: 183 return default 184 x.sort() 185 return x[-1][1] 186 187 188 def IsSymlink(info): 189 """Return true if the zipfile.ZipInfo object passed in represents a 190 symlink.""" 191 return (info.external_attr >> 16) & 0o770000 == 0o120000 192 193 def IsRegular(info): 194 """Return true if the zipfile.ZipInfo object passed in represents a 195 regular file.""" 196 return (info.external_attr >> 16) & 0o770000 == 0o100000 197 198 def ClosestFileMatch(src, tgtfiles, existing): 199 """Returns the closest file match between a source file and list 200 of potential matches. The exact filename match is preferred, 201 then the sha1 is searched for, and finally a file with the same 202 basename is evaluated. Rename support in the updater-binary is 203 required for the latter checks to be used.""" 204 205 result = tgtfiles.get("path:" + src.name) 206 if result is not None: 207 return result 208 209 if not OPTIONS.target_info_dict.get("update_rename_support", False): 210 return None 211 212 if src.size < 1000: 213 return None 214 215 result = tgtfiles.get("sha1:" + src.sha1) 216 if result is not None and existing.get(result.name) is None: 217 return result 218 result = tgtfiles.get("file:" + src.name.split("/")[-1]) 219 if result is not None and existing.get(result.name) is None: 220 return result 221 return None 222 223 class ItemSet(object): 224 def __init__(self, partition, fs_config): 225 self.partition = partition 226 self.fs_config = fs_config 227 self.ITEMS = {} 228 229 def Get(self, name, is_dir=False): 230 if name not in self.ITEMS: 231 self.ITEMS[name] = Item(self, name, is_dir=is_dir) 232 return self.ITEMS[name] 233 234 def GetMetadata(self, input_zip): 235 # The target_files contains a record of what the uid, 236 # gid, and mode are supposed to be. 237 output = input_zip.read(self.fs_config) 238 239 for line in output.split("\n"): 240 if not line: 241 continue 242 columns = line.split() 243 name, uid, gid, mode = columns[:4] 244 selabel = None 245 capabilities = None 246 247 # After the first 4 columns, there are a series of key=value 248 # pairs. Extract out the fields we care about. 249 for element in columns[4:]: 250 key, value = element.split("=") 251 if key == "selabel": 252 selabel = value 253 if key == "capabilities": 254 capabilities = value 255 256 i = self.ITEMS.get(name, None) 257 if i is not None: 258 i.uid = int(uid) 259 i.gid = int(gid) 260 i.mode = int(mode, 8) 261 i.selabel = selabel 262 i.capabilities = capabilities 263 if i.is_dir: 264 i.children.sort(key=lambda i: i.name) 265 266 # Set metadata for the files generated by this script. For full recovery 267 # image at system/etc/recovery.img, it will be taken care by fs_config. 268 i = self.ITEMS.get("system/recovery-from-boot.p", None) 269 if i: 270 i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0o644, None, None 271 i = self.ITEMS.get("system/etc/install-recovery.sh", None) 272 if i: 273 i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0o544, None, None 274 275 276 class Item(object): 277 """Items represent the metadata (user, group, mode) of files and 278 directories in the system image.""" 279 def __init__(self, itemset, name, is_dir=False): 280 self.itemset = itemset 281 self.name = name 282 self.uid = None 283 self.gid = None 284 self.mode = None 285 self.selabel = None 286 self.capabilities = None 287 self.is_dir = is_dir 288 self.descendants = None 289 self.best_subtree = None 290 291 if name: 292 self.parent = itemset.Get(os.path.dirname(name), is_dir=True) 293 self.parent.children.append(self) 294 else: 295 self.parent = None 296 if self.is_dir: 297 self.children = [] 298 299 def Dump(self, indent=0): 300 if self.uid is not None: 301 print "%s%s %d %d %o" % ( 302 " " * indent, self.name, self.uid, self.gid, self.mode) 303 else: 304 print "%s%s %s %s %s" % ( 305 " " * indent, self.name, self.uid, self.gid, self.mode) 306 if self.is_dir: 307 print "%s%s" % (" "*indent, self.descendants) 308 print "%s%s" % (" "*indent, self.best_subtree) 309 for i in self.children: 310 i.Dump(indent=indent+1) 311 312 def CountChildMetadata(self): 313 """Count up the (uid, gid, mode, selabel, capabilities) tuples for 314 all children and determine the best strategy for using set_perm_recursive 315 and set_perm to correctly chown/chmod all the files to their desired 316 values. Recursively calls itself for all descendants. 317 318 Returns a dict of {(uid, gid, dmode, fmode, selabel, capabilities): count} 319 counting up all descendants of this node. (dmode or fmode may be None.) 320 Also sets the best_subtree of each directory Item to the (uid, gid, dmode, 321 fmode, selabel, capabilities) tuple that will match the most descendants of 322 that Item. 323 """ 324 325 assert self.is_dir 326 key = (self.uid, self.gid, self.mode, None, self.selabel, 327 self.capabilities) 328 self.descendants = {key: 1} 329 d = self.descendants 330 for i in self.children: 331 if i.is_dir: 332 for k, v in i.CountChildMetadata().iteritems(): 333 d[k] = d.get(k, 0) + v 334 else: 335 k = (i.uid, i.gid, None, i.mode, i.selabel, i.capabilities) 336 d[k] = d.get(k, 0) + 1 337 338 # Find the (uid, gid, dmode, fmode, selabel, capabilities) 339 # tuple that matches the most descendants. 340 341 # First, find the (uid, gid) pair that matches the most 342 # descendants. 343 ug = {} 344 for (uid, gid, _, _, _, _), count in d.iteritems(): 345 ug[(uid, gid)] = ug.get((uid, gid), 0) + count 346 ug = MostPopularKey(ug, (0, 0)) 347 348 # Now find the dmode, fmode, selabel, and capabilities that match 349 # the most descendants with that (uid, gid), and choose those. 350 best_dmode = (0, 0o755) 351 best_fmode = (0, 0o644) 352 best_selabel = (0, None) 353 best_capabilities = (0, None) 354 for k, count in d.iteritems(): 355 if k[:2] != ug: 356 continue 357 if k[2] is not None and count >= best_dmode[0]: 358 best_dmode = (count, k[2]) 359 if k[3] is not None and count >= best_fmode[0]: 360 best_fmode = (count, k[3]) 361 if k[4] is not None and count >= best_selabel[0]: 362 best_selabel = (count, k[4]) 363 if k[5] is not None and count >= best_capabilities[0]: 364 best_capabilities = (count, k[5]) 365 self.best_subtree = ug + ( 366 best_dmode[1], best_fmode[1], best_selabel[1], best_capabilities[1]) 367 368 return d 369 370 def SetPermissions(self, script): 371 """Append set_perm/set_perm_recursive commands to 'script' to 372 set all permissions, users, and groups for the tree of files 373 rooted at 'self'.""" 374 375 self.CountChildMetadata() 376 377 def recurse(item, current): 378 # current is the (uid, gid, dmode, fmode, selabel, capabilities) tuple 379 # that the current item (and all its children) have already been set to. 380 # We only need to issue set_perm/set_perm_recursive commands if we're 381 # supposed to be something different. 382 if item.is_dir: 383 if current != item.best_subtree: 384 script.SetPermissionsRecursive("/"+item.name, *item.best_subtree) 385 current = item.best_subtree 386 387 if item.uid != current[0] or item.gid != current[1] or \ 388 item.mode != current[2] or item.selabel != current[4] or \ 389 item.capabilities != current[5]: 390 script.SetPermissions("/"+item.name, item.uid, item.gid, 391 item.mode, item.selabel, item.capabilities) 392 393 for i in item.children: 394 recurse(i, current) 395 else: 396 if item.uid != current[0] or item.gid != current[1] or \ 397 item.mode != current[3] or item.selabel != current[4] or \ 398 item.capabilities != current[5]: 399 script.SetPermissions("/"+item.name, item.uid, item.gid, 400 item.mode, item.selabel, item.capabilities) 401 402 recurse(self, (-1, -1, -1, -1, None, None)) 403 404 405 def CopyPartitionFiles(itemset, input_zip, output_zip=None, substitute=None): 406 """Copies files for the partition in the input zip to the output 407 zip. Populates the Item class with their metadata, and returns a 408 list of symlinks. output_zip may be None, in which case the copy is 409 skipped (but the other side effects still happen). substitute is an 410 optional dict of {output filename: contents} to be output instead of 411 certain input files. 412 """ 413 414 symlinks = [] 415 416 partition = itemset.partition 417 418 for info in input_zip.infolist(): 419 prefix = partition.upper() + "/" 420 if info.filename.startswith(prefix): 421 basefilename = info.filename[len(prefix):] 422 if IsSymlink(info): 423 symlinks.append((input_zip.read(info.filename), 424 "/" + partition + "/" + basefilename)) 425 else: 426 import copy 427 info2 = copy.copy(info) 428 fn = info2.filename = partition + "/" + basefilename 429 if substitute and fn in substitute and substitute[fn] is None: 430 continue 431 if output_zip is not None: 432 if substitute and fn in substitute: 433 data = substitute[fn] 434 else: 435 data = input_zip.read(info.filename) 436 common.ZipWriteStr(output_zip, info2, data) 437 if fn.endswith("/"): 438 itemset.Get(fn[:-1], is_dir=True) 439 else: 440 itemset.Get(fn) 441 442 symlinks.sort() 443 return symlinks 444 445 446 def SignOutput(temp_zip_name, output_zip_name): 447 key_passwords = common.GetKeyPasswords([OPTIONS.package_key]) 448 pw = key_passwords[OPTIONS.package_key] 449 450 common.SignFile(temp_zip_name, output_zip_name, OPTIONS.package_key, pw, 451 whole_file=True) 452 453 454 def AppendAssertions(script, info_dict, oem_dict=None): 455 oem_props = info_dict.get("oem_fingerprint_properties") 456 if oem_props is None or len(oem_props) == 0: 457 device = GetBuildProp("ro.product.device", info_dict) 458 script.AssertDevice(device) 459 else: 460 if oem_dict is None: 461 raise common.ExternalError( 462 "No OEM file provided to answer expected assertions") 463 for prop in oem_props.split(): 464 if oem_dict.get(prop) is None: 465 raise common.ExternalError( 466 "The OEM file is missing the property %s" % prop) 467 script.AssertOemProperty(prop, oem_dict.get(prop)) 468 469 470 def HasRecoveryPatch(target_files_zip): 471 namelist = [name for name in target_files_zip.namelist()] 472 return ("SYSTEM/recovery-from-boot.p" in namelist or 473 "SYSTEM/etc/recovery.img" in namelist) 474 475 def HasVendorPartition(target_files_zip): 476 try: 477 target_files_zip.getinfo("VENDOR/") 478 return True 479 except KeyError: 480 return False 481 482 def GetOemProperty(name, oem_props, oem_dict, info_dict): 483 if oem_props is not None and name in oem_props: 484 return oem_dict[name] 485 return GetBuildProp(name, info_dict) 486 487 488 def CalculateFingerprint(oem_props, oem_dict, info_dict): 489 if oem_props is None: 490 return GetBuildProp("ro.build.fingerprint", info_dict) 491 return "%s/%s/%s:%s" % ( 492 GetOemProperty("ro.product.brand", oem_props, oem_dict, info_dict), 493 GetOemProperty("ro.product.name", oem_props, oem_dict, info_dict), 494 GetOemProperty("ro.product.device", oem_props, oem_dict, info_dict), 495 GetBuildProp("ro.build.thumbprint", info_dict)) 496 497 498 def GetImage(which, tmpdir, info_dict): 499 # Return an image object (suitable for passing to BlockImageDiff) 500 # for the 'which' partition (most be "system" or "vendor"). If a 501 # prebuilt image and file map are found in tmpdir they are used, 502 # otherwise they are reconstructed from the individual files. 503 504 assert which in ("system", "vendor") 505 506 path = os.path.join(tmpdir, "IMAGES", which + ".img") 507 mappath = os.path.join(tmpdir, "IMAGES", which + ".map") 508 if os.path.exists(path) and os.path.exists(mappath): 509 print "using %s.img from target-files" % (which,) 510 # This is a 'new' target-files, which already has the image in it. 511 512 else: 513 print "building %s.img from target-files" % (which,) 514 515 # This is an 'old' target-files, which does not contain images 516 # already built. Build them. 517 518 mappath = tempfile.mkstemp()[1] 519 OPTIONS.tempfiles.append(mappath) 520 521 import add_img_to_target_files 522 if which == "system": 523 path = add_img_to_target_files.BuildSystem( 524 tmpdir, info_dict, block_list=mappath) 525 elif which == "vendor": 526 path = add_img_to_target_files.BuildVendor( 527 tmpdir, info_dict, block_list=mappath) 528 529 # Bug: http://b/20939131 530 # In ext4 filesystems, block 0 might be changed even being mounted 531 # R/O. We add it to clobbered_blocks so that it will be written to the 532 # target unconditionally. Note that they are still part of care_map. 533 clobbered_blocks = "0" 534 535 return sparse_img.SparseImage(path, mappath, clobbered_blocks) 536 537 538 def WriteFullOTAPackage(input_zip, output_zip): 539 # TODO: how to determine this? We don't know what version it will 540 # be installed on top of. For now, we expect the API just won't 541 # change very often. Similarly for fstab, it might have changed 542 # in the target build. 543 script = edify_generator.EdifyGenerator(3, OPTIONS.info_dict) 544 545 oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties") 546 recovery_mount_options = OPTIONS.info_dict.get("recovery_mount_options") 547 oem_dict = None 548 if oem_props is not None and len(oem_props) > 0: 549 if OPTIONS.oem_source is None: 550 raise common.ExternalError("OEM source required for this build") 551 if not OPTIONS.oem_no_mount: 552 script.Mount("/oem", recovery_mount_options) 553 oem_dict = common.LoadDictionaryFromLines( 554 open(OPTIONS.oem_source).readlines()) 555 556 metadata = { 557 "post-build": CalculateFingerprint(oem_props, oem_dict, 558 OPTIONS.info_dict), 559 "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict, 560 OPTIONS.info_dict), 561 "post-timestamp": GetBuildProp("ro.build.date.utc", OPTIONS.info_dict), 562 } 563 564 device_specific = common.DeviceSpecificParams( 565 input_zip=input_zip, 566 input_version=OPTIONS.info_dict["recovery_api_version"], 567 output_zip=output_zip, 568 script=script, 569 input_tmp=OPTIONS.input_tmp, 570 metadata=metadata, 571 info_dict=OPTIONS.info_dict) 572 573 has_recovery_patch = HasRecoveryPatch(input_zip) 574 block_based = OPTIONS.block_based and has_recovery_patch 575 576 metadata["ota-type"] = "BLOCK" if block_based else "FILE" 577 578 if not OPTIONS.omit_prereq: 579 ts = GetBuildProp("ro.build.date.utc", OPTIONS.info_dict) 580 ts_text = GetBuildProp("ro.build.date", OPTIONS.info_dict) 581 script.AssertOlderBuild(ts, ts_text) 582 583 AppendAssertions(script, OPTIONS.info_dict, oem_dict) 584 device_specific.FullOTA_Assertions() 585 586 # Two-step package strategy (in chronological order, which is *not* 587 # the order in which the generated script has things): 588 # 589 # if stage is not "2/3" or "3/3": 590 # write recovery image to boot partition 591 # set stage to "2/3" 592 # reboot to boot partition and restart recovery 593 # else if stage is "2/3": 594 # write recovery image to recovery partition 595 # set stage to "3/3" 596 # reboot to recovery partition and restart recovery 597 # else: 598 # (stage must be "3/3") 599 # set stage to "" 600 # do normal full package installation: 601 # wipe and install system, boot image, etc. 602 # set up system to update recovery partition on first boot 603 # complete script normally 604 # (allow recovery to mark itself finished and reboot) 605 606 recovery_img = common.GetBootableImage("recovery.img", "recovery.img", 607 OPTIONS.input_tmp, "RECOVERY") 608 if OPTIONS.two_step: 609 if not OPTIONS.info_dict.get("multistage_support", None): 610 assert False, "two-step packages not supported by this build" 611 fs = OPTIONS.info_dict["fstab"]["/misc"] 612 assert fs.fs_type.upper() == "EMMC", \ 613 "two-step packages only supported on devices with EMMC /misc partitions" 614 bcb_dev = {"bcb_dev": fs.device} 615 common.ZipWriteStr(output_zip, "recovery.img", recovery_img.data) 616 script.AppendExtra(""" 617 if get_stage("%(bcb_dev)s") == "2/3" then 618 """ % bcb_dev) 619 script.WriteRawImage("/recovery", "recovery.img") 620 script.AppendExtra(""" 621 set_stage("%(bcb_dev)s", "3/3"); 622 reboot_now("%(bcb_dev)s", "recovery"); 623 else if get_stage("%(bcb_dev)s") == "3/3" then 624 """ % bcb_dev) 625 626 # Dump fingerprints 627 script.Print("Target: %s" % CalculateFingerprint( 628 oem_props, oem_dict, OPTIONS.info_dict)) 629 630 device_specific.FullOTA_InstallBegin() 631 632 system_progress = 0.75 633 634 if OPTIONS.wipe_user_data: 635 system_progress -= 0.1 636 if HasVendorPartition(input_zip): 637 system_progress -= 0.1 638 639 # Place a copy of file_contexts.bin into the OTA package which will be used 640 # by the recovery program. 641 if "selinux_fc" in OPTIONS.info_dict: 642 WritePolicyConfig(OPTIONS.info_dict["selinux_fc"], output_zip) 643 644 recovery_mount_options = OPTIONS.info_dict.get("recovery_mount_options") 645 646 system_items = ItemSet("system", "META/filesystem_config.txt") 647 script.ShowProgress(system_progress, 0) 648 649 if block_based: 650 # Full OTA is done as an "incremental" against an empty source 651 # image. This has the effect of writing new data from the package 652 # to the entire partition, but lets us reuse the updater code that 653 # writes incrementals to do it. 654 system_tgt = GetImage("system", OPTIONS.input_tmp, OPTIONS.info_dict) 655 system_tgt.ResetFileMap() 656 system_diff = common.BlockDifference("system", system_tgt, src=None) 657 system_diff.WriteScript(script, output_zip) 658 else: 659 script.FormatPartition("/system") 660 script.Mount("/system", recovery_mount_options) 661 if not has_recovery_patch: 662 script.UnpackPackageDir("recovery", "/system") 663 script.UnpackPackageDir("system", "/system") 664 665 symlinks = CopyPartitionFiles(system_items, input_zip, output_zip) 666 script.MakeSymlinks(symlinks) 667 668 boot_img = common.GetBootableImage( 669 "boot.img", "boot.img", OPTIONS.input_tmp, "BOOT") 670 671 if not block_based: 672 def output_sink(fn, data): 673 common.ZipWriteStr(output_zip, "recovery/" + fn, data) 674 system_items.Get("system/" + fn) 675 676 common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, 677 recovery_img, boot_img) 678 679 system_items.GetMetadata(input_zip) 680 system_items.Get("system").SetPermissions(script) 681 682 if HasVendorPartition(input_zip): 683 vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt") 684 script.ShowProgress(0.1, 0) 685 686 if block_based: 687 vendor_tgt = GetImage("vendor", OPTIONS.input_tmp, OPTIONS.info_dict) 688 vendor_tgt.ResetFileMap() 689 vendor_diff = common.BlockDifference("vendor", vendor_tgt) 690 vendor_diff.WriteScript(script, output_zip) 691 else: 692 script.FormatPartition("/vendor") 693 script.Mount("/vendor", recovery_mount_options) 694 script.UnpackPackageDir("vendor", "/vendor") 695 696 symlinks = CopyPartitionFiles(vendor_items, input_zip, output_zip) 697 script.MakeSymlinks(symlinks) 698 699 vendor_items.GetMetadata(input_zip) 700 vendor_items.Get("vendor").SetPermissions(script) 701 702 common.CheckSize(boot_img.data, "boot.img", OPTIONS.info_dict) 703 common.ZipWriteStr(output_zip, "boot.img", boot_img.data) 704 705 script.ShowProgress(0.05, 5) 706 script.WriteRawImage("/boot", "boot.img") 707 708 script.ShowProgress(0.2, 10) 709 device_specific.FullOTA_InstallEnd() 710 711 if OPTIONS.extra_script is not None: 712 script.AppendExtra(OPTIONS.extra_script) 713 714 script.UnmountAll() 715 716 if OPTIONS.wipe_user_data: 717 script.ShowProgress(0.1, 10) 718 script.FormatPartition("/data") 719 720 if OPTIONS.two_step: 721 script.AppendExtra(""" 722 set_stage("%(bcb_dev)s", ""); 723 """ % bcb_dev) 724 script.AppendExtra("else\n") 725 script.WriteRawImage("/boot", "recovery.img") 726 script.AppendExtra(""" 727 set_stage("%(bcb_dev)s", "2/3"); 728 reboot_now("%(bcb_dev)s", ""); 729 endif; 730 endif; 731 """ % bcb_dev) 732 733 script.SetProgress(1) 734 script.AddToZip(input_zip, output_zip, input_path=OPTIONS.updater_binary) 735 metadata["ota-required-cache"] = str(script.required_cache) 736 WriteMetadata(metadata, output_zip) 737 738 739 def WritePolicyConfig(file_name, output_zip): 740 common.ZipWrite(output_zip, file_name, os.path.basename(file_name)) 741 742 743 def WriteMetadata(metadata, output_zip): 744 common.ZipWriteStr(output_zip, "META-INF/com/android/metadata", 745 "".join(["%s=%s\n" % kv 746 for kv in sorted(metadata.iteritems())])) 747 748 749 def LoadPartitionFiles(z, partition): 750 """Load all the files from the given partition in a given target-files 751 ZipFile, and return a dict of {filename: File object}.""" 752 out = {} 753 prefix = partition.upper() + "/" 754 for info in z.infolist(): 755 if info.filename.startswith(prefix) and not IsSymlink(info): 756 basefilename = info.filename[len(prefix):] 757 fn = partition + "/" + basefilename 758 data = z.read(info.filename) 759 out[fn] = common.File(fn, data) 760 return out 761 762 763 def GetBuildProp(prop, info_dict): 764 """Return the fingerprint of the build of a given target-files info_dict.""" 765 try: 766 return info_dict.get("build.prop", {})[prop] 767 except KeyError: 768 raise common.ExternalError("couldn't find %s in build.prop" % (prop,)) 769 770 771 def AddToKnownPaths(filename, known_paths): 772 if filename[-1] == "/": 773 return 774 dirs = filename.split("/")[:-1] 775 while len(dirs) > 0: 776 path = "/".join(dirs) 777 if path in known_paths: 778 break 779 known_paths.add(path) 780 dirs.pop() 781 782 783 def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip): 784 # TODO(tbao): We should factor out the common parts between 785 # WriteBlockIncrementalOTAPackage() and WriteIncrementalOTAPackage(). 786 source_version = OPTIONS.source_info_dict["recovery_api_version"] 787 target_version = OPTIONS.target_info_dict["recovery_api_version"] 788 789 if source_version == 0: 790 print ("WARNING: generating edify script for a source that " 791 "can't install it.") 792 script = edify_generator.EdifyGenerator( 793 source_version, OPTIONS.target_info_dict, 794 fstab=OPTIONS.source_info_dict["fstab"]) 795 796 oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties") 797 recovery_mount_options = OPTIONS.source_info_dict.get( 798 "recovery_mount_options") 799 oem_dict = None 800 if oem_props is not None and len(oem_props) > 0: 801 if OPTIONS.oem_source is None: 802 raise common.ExternalError("OEM source required for this build") 803 if not OPTIONS.oem_no_mount: 804 script.Mount("/oem", recovery_mount_options) 805 oem_dict = common.LoadDictionaryFromLines( 806 open(OPTIONS.oem_source).readlines()) 807 808 metadata = { 809 "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict, 810 OPTIONS.source_info_dict), 811 "ota-type": "BLOCK", 812 } 813 814 post_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.target_info_dict) 815 pre_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.source_info_dict) 816 is_downgrade = long(post_timestamp) < long(pre_timestamp) 817 818 if OPTIONS.downgrade: 819 metadata["ota-downgrade"] = "yes" 820 if not is_downgrade: 821 raise RuntimeError("--downgrade specified but no downgrade detected: " 822 "pre: %s, post: %s" % (pre_timestamp, post_timestamp)) 823 else: 824 if is_downgrade: 825 # Non-fatal here to allow generating such a package which may require 826 # manual work to adjust the post-timestamp. A legit use case is that we 827 # cut a new build C (after having A and B), but want to enfore the 828 # update path of A -> C -> B. Specifying --downgrade may not help since 829 # that would enforce a data wipe for C -> B update. 830 print("\nWARNING: downgrade detected: pre: %s, post: %s.\n" 831 "The package may not be deployed properly. " 832 "Try --downgrade?\n" % (pre_timestamp, post_timestamp)) 833 metadata["post-timestamp"] = post_timestamp 834 835 device_specific = common.DeviceSpecificParams( 836 source_zip=source_zip, 837 source_version=source_version, 838 target_zip=target_zip, 839 target_version=target_version, 840 output_zip=output_zip, 841 script=script, 842 metadata=metadata, 843 info_dict=OPTIONS.source_info_dict) 844 845 source_fp = CalculateFingerprint(oem_props, oem_dict, 846 OPTIONS.source_info_dict) 847 target_fp = CalculateFingerprint(oem_props, oem_dict, 848 OPTIONS.target_info_dict) 849 metadata["pre-build"] = source_fp 850 metadata["post-build"] = target_fp 851 metadata["pre-build-incremental"] = GetBuildProp( 852 "ro.build.version.incremental", OPTIONS.source_info_dict) 853 metadata["post-build-incremental"] = GetBuildProp( 854 "ro.build.version.incremental", OPTIONS.target_info_dict) 855 856 source_boot = common.GetBootableImage( 857 "/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT", 858 OPTIONS.source_info_dict) 859 target_boot = common.GetBootableImage( 860 "/tmp/boot.img", "boot.img", OPTIONS.target_tmp, "BOOT") 861 updating_boot = (not OPTIONS.two_step and 862 (source_boot.data != target_boot.data)) 863 864 target_recovery = common.GetBootableImage( 865 "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY") 866 867 system_src = GetImage("system", OPTIONS.source_tmp, OPTIONS.source_info_dict) 868 system_tgt = GetImage("system", OPTIONS.target_tmp, OPTIONS.target_info_dict) 869 870 blockimgdiff_version = 1 871 if OPTIONS.info_dict: 872 blockimgdiff_version = max( 873 int(i) for i in 874 OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(",")) 875 876 # Check the first block of the source system partition for remount R/W only 877 # if the filesystem is ext4. 878 system_src_partition = OPTIONS.source_info_dict["fstab"]["/system"] 879 check_first_block = system_src_partition.fs_type == "ext4" 880 # Disable using imgdiff for squashfs. 'imgdiff -z' expects input files to be 881 # in zip formats. However with squashfs, a) all files are compressed in LZ4; 882 # b) the blocks listed in block map may not contain all the bytes for a given 883 # file (because they're rounded to be 4K-aligned). 884 system_tgt_partition = OPTIONS.target_info_dict["fstab"]["/system"] 885 disable_imgdiff = (system_src_partition.fs_type == "squashfs" or 886 system_tgt_partition.fs_type == "squashfs") 887 system_diff = common.BlockDifference("system", system_tgt, system_src, 888 check_first_block, 889 version=blockimgdiff_version, 890 disable_imgdiff=disable_imgdiff) 891 892 if HasVendorPartition(target_zip): 893 if not HasVendorPartition(source_zip): 894 raise RuntimeError("can't generate incremental that adds /vendor") 895 vendor_src = GetImage("vendor", OPTIONS.source_tmp, 896 OPTIONS.source_info_dict) 897 vendor_tgt = GetImage("vendor", OPTIONS.target_tmp, 898 OPTIONS.target_info_dict) 899 900 # Check first block of vendor partition for remount R/W only if 901 # disk type is ext4 902 vendor_partition = OPTIONS.source_info_dict["fstab"]["/vendor"] 903 check_first_block = vendor_partition.fs_type == "ext4" 904 disable_imgdiff = vendor_partition.fs_type == "squashfs" 905 vendor_diff = common.BlockDifference("vendor", vendor_tgt, vendor_src, 906 check_first_block, 907 version=blockimgdiff_version, 908 disable_imgdiff=disable_imgdiff) 909 else: 910 vendor_diff = None 911 912 AppendAssertions(script, OPTIONS.target_info_dict, oem_dict) 913 device_specific.IncrementalOTA_Assertions() 914 915 # Two-step incremental package strategy (in chronological order, 916 # which is *not* the order in which the generated script has 917 # things): 918 # 919 # if stage is not "2/3" or "3/3": 920 # do verification on current system 921 # write recovery image to boot partition 922 # set stage to "2/3" 923 # reboot to boot partition and restart recovery 924 # else if stage is "2/3": 925 # write recovery image to recovery partition 926 # set stage to "3/3" 927 # reboot to recovery partition and restart recovery 928 # else: 929 # (stage must be "3/3") 930 # perform update: 931 # patch system files, etc. 932 # force full install of new boot image 933 # set up system to update recovery partition on first boot 934 # complete script normally 935 # (allow recovery to mark itself finished and reboot) 936 937 if OPTIONS.two_step: 938 if not OPTIONS.source_info_dict.get("multistage_support", None): 939 assert False, "two-step packages not supported by this build" 940 fs = OPTIONS.source_info_dict["fstab"]["/misc"] 941 assert fs.fs_type.upper() == "EMMC", \ 942 "two-step packages only supported on devices with EMMC /misc partitions" 943 bcb_dev = {"bcb_dev": fs.device} 944 common.ZipWriteStr(output_zip, "recovery.img", target_recovery.data) 945 script.AppendExtra(""" 946 if get_stage("%(bcb_dev)s") == "2/3" then 947 """ % bcb_dev) 948 script.AppendExtra("sleep(20);\n") 949 script.WriteRawImage("/recovery", "recovery.img") 950 script.AppendExtra(""" 951 set_stage("%(bcb_dev)s", "3/3"); 952 reboot_now("%(bcb_dev)s", "recovery"); 953 else if get_stage("%(bcb_dev)s") != "3/3" then 954 """ % bcb_dev) 955 956 # Dump fingerprints 957 script.Print("Source: %s" % CalculateFingerprint( 958 oem_props, oem_dict, OPTIONS.source_info_dict)) 959 script.Print("Target: %s" % CalculateFingerprint( 960 oem_props, oem_dict, OPTIONS.target_info_dict)) 961 962 script.Print("Verifying current system...") 963 964 device_specific.IncrementalOTA_VerifyBegin() 965 966 if oem_props is None: 967 # When blockimgdiff version is less than 3 (non-resumable block-based OTA), 968 # patching on a device that's already on the target build will damage the 969 # system. Because operations like move don't check the block state, they 970 # always apply the changes unconditionally. 971 if blockimgdiff_version <= 2: 972 script.AssertSomeFingerprint(source_fp) 973 else: 974 script.AssertSomeFingerprint(source_fp, target_fp) 975 else: 976 if blockimgdiff_version <= 2: 977 script.AssertSomeThumbprint( 978 GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict)) 979 else: 980 script.AssertSomeThumbprint( 981 GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict), 982 GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict)) 983 984 # Check the required cache size (i.e. stashed blocks). 985 size = [] 986 if system_diff: 987 size.append(system_diff.required_cache) 988 if vendor_diff: 989 size.append(vendor_diff.required_cache) 990 991 if updating_boot: 992 boot_type, boot_device = common.GetTypeAndDevice( 993 "/boot", OPTIONS.source_info_dict) 994 d = common.Difference(target_boot, source_boot) 995 _, _, d = d.ComputePatch() 996 if d is None: 997 include_full_boot = True 998 common.ZipWriteStr(output_zip, "boot.img", target_boot.data) 999 else: 1000 include_full_boot = False 1001 1002 print "boot target: %d source: %d diff: %d" % ( 1003 target_boot.size, source_boot.size, len(d)) 1004 1005 common.ZipWriteStr(output_zip, "patch/boot.img.p", d) 1006 1007 script.PatchCheck("%s:%s:%d:%s:%d:%s" % 1008 (boot_type, boot_device, 1009 source_boot.size, source_boot.sha1, 1010 target_boot.size, target_boot.sha1)) 1011 size.append(target_boot.size) 1012 1013 if size: 1014 script.CacheFreeSpaceCheck(max(size)) 1015 1016 device_specific.IncrementalOTA_VerifyEnd() 1017 1018 if OPTIONS.two_step: 1019 script.WriteRawImage("/boot", "recovery.img") 1020 script.AppendExtra(""" 1021 set_stage("%(bcb_dev)s", "2/3"); 1022 reboot_now("%(bcb_dev)s", ""); 1023 else 1024 """ % bcb_dev) 1025 1026 # Verify the existing partitions. 1027 system_diff.WriteVerifyScript(script, touched_blocks_only=True) 1028 if vendor_diff: 1029 vendor_diff.WriteVerifyScript(script, touched_blocks_only=True) 1030 1031 script.Comment("---- start making changes here ----") 1032 1033 device_specific.IncrementalOTA_InstallBegin() 1034 1035 system_diff.WriteScript(script, output_zip, 1036 progress=0.8 if vendor_diff else 0.9) 1037 1038 if vendor_diff: 1039 vendor_diff.WriteScript(script, output_zip, progress=0.1) 1040 1041 if OPTIONS.two_step: 1042 common.ZipWriteStr(output_zip, "boot.img", target_boot.data) 1043 script.WriteRawImage("/boot", "boot.img") 1044 print "writing full boot image (forced by two-step mode)" 1045 1046 if not OPTIONS.two_step: 1047 if updating_boot: 1048 if include_full_boot: 1049 print "boot image changed; including full." 1050 script.Print("Installing boot image...") 1051 script.WriteRawImage("/boot", "boot.img") 1052 else: 1053 # Produce the boot image by applying a patch to the current 1054 # contents of the boot partition, and write it back to the 1055 # partition. 1056 print "boot image changed; including patch." 1057 script.Print("Patching boot image...") 1058 script.ShowProgress(0.1, 10) 1059 script.ApplyPatch("%s:%s:%d:%s:%d:%s" 1060 % (boot_type, boot_device, 1061 source_boot.size, source_boot.sha1, 1062 target_boot.size, target_boot.sha1), 1063 "-", 1064 target_boot.size, target_boot.sha1, 1065 source_boot.sha1, "patch/boot.img.p") 1066 else: 1067 print "boot image unchanged; skipping." 1068 1069 # Do device-specific installation (eg, write radio image). 1070 device_specific.IncrementalOTA_InstallEnd() 1071 1072 if OPTIONS.extra_script is not None: 1073 script.AppendExtra(OPTIONS.extra_script) 1074 1075 if OPTIONS.wipe_user_data: 1076 script.Print("Erasing user data...") 1077 script.FormatPartition("/data") 1078 metadata["ota-wipe"] = "yes" 1079 1080 if OPTIONS.two_step: 1081 script.AppendExtra(""" 1082 set_stage("%(bcb_dev)s", ""); 1083 endif; 1084 endif; 1085 """ % bcb_dev) 1086 1087 script.SetProgress(1) 1088 # For downgrade OTAs, we prefer to use the update-binary in the source 1089 # build that is actually newer than the one in the target build. 1090 if OPTIONS.downgrade: 1091 script.AddToZip(source_zip, output_zip, input_path=OPTIONS.updater_binary) 1092 else: 1093 script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary) 1094 metadata["ota-required-cache"] = str(script.required_cache) 1095 WriteMetadata(metadata, output_zip) 1096 1097 1098 def WriteVerifyPackage(input_zip, output_zip): 1099 script = edify_generator.EdifyGenerator(3, OPTIONS.info_dict) 1100 1101 oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties") 1102 recovery_mount_options = OPTIONS.info_dict.get( 1103 "recovery_mount_options") 1104 oem_dict = None 1105 if oem_props is not None and len(oem_props) > 0: 1106 if OPTIONS.oem_source is None: 1107 raise common.ExternalError("OEM source required for this build") 1108 script.Mount("/oem", recovery_mount_options) 1109 oem_dict = common.LoadDictionaryFromLines( 1110 open(OPTIONS.oem_source).readlines()) 1111 1112 target_fp = CalculateFingerprint(oem_props, oem_dict, OPTIONS.info_dict) 1113 metadata = { 1114 "post-build": target_fp, 1115 "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict, 1116 OPTIONS.info_dict), 1117 "post-timestamp": GetBuildProp("ro.build.date.utc", OPTIONS.info_dict), 1118 } 1119 1120 device_specific = common.DeviceSpecificParams( 1121 input_zip=input_zip, 1122 input_version=OPTIONS.info_dict["recovery_api_version"], 1123 output_zip=output_zip, 1124 script=script, 1125 input_tmp=OPTIONS.input_tmp, 1126 metadata=metadata, 1127 info_dict=OPTIONS.info_dict) 1128 1129 AppendAssertions(script, OPTIONS.info_dict, oem_dict) 1130 1131 script.Print("Verifying device images against %s..." % target_fp) 1132 script.AppendExtra("") 1133 1134 script.Print("Verifying boot...") 1135 boot_img = common.GetBootableImage( 1136 "boot.img", "boot.img", OPTIONS.input_tmp, "BOOT") 1137 boot_type, boot_device = common.GetTypeAndDevice( 1138 "/boot", OPTIONS.info_dict) 1139 script.Verify("%s:%s:%d:%s" % ( 1140 boot_type, boot_device, boot_img.size, boot_img.sha1)) 1141 script.AppendExtra("") 1142 1143 script.Print("Verifying recovery...") 1144 recovery_img = common.GetBootableImage( 1145 "recovery.img", "recovery.img", OPTIONS.input_tmp, "RECOVERY") 1146 recovery_type, recovery_device = common.GetTypeAndDevice( 1147 "/recovery", OPTIONS.info_dict) 1148 script.Verify("%s:%s:%d:%s" % ( 1149 recovery_type, recovery_device, recovery_img.size, recovery_img.sha1)) 1150 script.AppendExtra("") 1151 1152 system_tgt = GetImage("system", OPTIONS.input_tmp, OPTIONS.info_dict) 1153 system_tgt.ResetFileMap() 1154 system_diff = common.BlockDifference("system", system_tgt, src=None) 1155 system_diff.WriteStrictVerifyScript(script) 1156 1157 if HasVendorPartition(input_zip): 1158 vendor_tgt = GetImage("vendor", OPTIONS.input_tmp, OPTIONS.info_dict) 1159 vendor_tgt.ResetFileMap() 1160 vendor_diff = common.BlockDifference("vendor", vendor_tgt, src=None) 1161 vendor_diff.WriteStrictVerifyScript(script) 1162 1163 # Device specific partitions, such as radio, bootloader and etc. 1164 device_specific.VerifyOTA_Assertions() 1165 1166 script.SetProgress(1.0) 1167 script.AddToZip(input_zip, output_zip, input_path=OPTIONS.updater_binary) 1168 metadata["ota-required-cache"] = str(script.required_cache) 1169 WriteMetadata(metadata, output_zip) 1170 1171 1172 def WriteABOTAPackageWithBrilloScript(target_file, output_file, 1173 source_file=None): 1174 """Generate an Android OTA package that has A/B update payload.""" 1175 1176 # Setup signing keys. 1177 if OPTIONS.package_key is None: 1178 OPTIONS.package_key = OPTIONS.info_dict.get( 1179 "default_system_dev_certificate", 1180 "build/target/product/security/testkey") 1181 1182 # A/B updater expects a signing key in RSA format. Gets the key ready for 1183 # later use in step 3, unless a payload_signer has been specified. 1184 if OPTIONS.payload_signer is None: 1185 cmd = ["openssl", "pkcs8", 1186 "-in", OPTIONS.package_key + OPTIONS.private_key_suffix, 1187 "-inform", "DER", "-nocrypt"] 1188 rsa_key = common.MakeTempFile(prefix="key-", suffix=".key") 1189 cmd.extend(["-out", rsa_key]) 1190 p1 = common.Run(cmd, stdout=subprocess.PIPE) 1191 p1.wait() 1192 assert p1.returncode == 0, "openssl pkcs8 failed" 1193 1194 # Stage the output zip package for package signing. 1195 temp_zip_file = tempfile.NamedTemporaryFile() 1196 output_zip = zipfile.ZipFile(temp_zip_file, "w", 1197 compression=zipfile.ZIP_DEFLATED) 1198 1199 # Metadata to comply with Android OTA package format. 1200 oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties", None) 1201 oem_dict = None 1202 if oem_props: 1203 if OPTIONS.oem_source is None: 1204 raise common.ExternalError("OEM source required for this build") 1205 oem_dict = common.LoadDictionaryFromLines( 1206 open(OPTIONS.oem_source).readlines()) 1207 1208 metadata = { 1209 "post-build": CalculateFingerprint(oem_props, oem_dict, 1210 OPTIONS.info_dict), 1211 "post-build-incremental" : GetBuildProp("ro.build.version.incremental", 1212 OPTIONS.info_dict), 1213 "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict, 1214 OPTIONS.info_dict), 1215 "post-timestamp": GetBuildProp("ro.build.date.utc", OPTIONS.info_dict), 1216 "ota-required-cache": "0", 1217 "ota-type": "AB", 1218 } 1219 1220 if source_file is not None: 1221 metadata["pre-build"] = CalculateFingerprint(oem_props, oem_dict, 1222 OPTIONS.source_info_dict) 1223 metadata["pre-build-incremental"] = GetBuildProp( 1224 "ro.build.version.incremental", OPTIONS.source_info_dict) 1225 1226 # 1. Generate payload. 1227 payload_file = common.MakeTempFile(prefix="payload-", suffix=".bin") 1228 cmd = ["brillo_update_payload", "generate", 1229 "--payload", payload_file, 1230 "--target_image", target_file] 1231 if source_file is not None: 1232 cmd.extend(["--source_image", source_file]) 1233 p1 = common.Run(cmd, stdout=subprocess.PIPE) 1234 p1.wait() 1235 assert p1.returncode == 0, "brillo_update_payload generate failed" 1236 1237 # 2. Generate hashes of the payload and metadata files. 1238 payload_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin") 1239 metadata_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin") 1240 cmd = ["brillo_update_payload", "hash", 1241 "--unsigned_payload", payload_file, 1242 "--signature_size", "256", 1243 "--metadata_hash_file", metadata_sig_file, 1244 "--payload_hash_file", payload_sig_file] 1245 p1 = common.Run(cmd, stdout=subprocess.PIPE) 1246 p1.wait() 1247 assert p1.returncode == 0, "brillo_update_payload hash failed" 1248 1249 # 3. Sign the hashes and insert them back into the payload file. 1250 signed_payload_sig_file = common.MakeTempFile(prefix="signed-sig-", 1251 suffix=".bin") 1252 signed_metadata_sig_file = common.MakeTempFile(prefix="signed-sig-", 1253 suffix=".bin") 1254 # 3a. Sign the payload hash. 1255 if OPTIONS.payload_signer is not None: 1256 cmd = [OPTIONS.payload_signer] 1257 cmd.extend(OPTIONS.payload_signer_args) 1258 else: 1259 cmd = ["openssl", "pkeyutl", "-sign", 1260 "-inkey", rsa_key, 1261 "-pkeyopt", "digest:sha256"] 1262 cmd.extend(["-in", payload_sig_file, 1263 "-out", signed_payload_sig_file]) 1264 1265 p1 = common.Run(cmd, stdout=subprocess.PIPE) 1266 p1.wait() 1267 assert p1.returncode == 0, "openssl sign payload failed" 1268 1269 # 3b. Sign the metadata hash. 1270 if OPTIONS.payload_signer is not None: 1271 cmd = [OPTIONS.payload_signer] 1272 cmd.extend(OPTIONS.payload_signer_args) 1273 else: 1274 cmd = ["openssl", "pkeyutl", "-sign", 1275 "-inkey", rsa_key, 1276 "-pkeyopt", "digest:sha256"] 1277 cmd.extend(["-in", metadata_sig_file, 1278 "-out", signed_metadata_sig_file]) 1279 p1 = common.Run(cmd, stdout=subprocess.PIPE) 1280 p1.wait() 1281 assert p1.returncode == 0, "openssl sign metadata failed" 1282 1283 # 3c. Insert the signatures back into the payload file. 1284 signed_payload_file = common.MakeTempFile(prefix="signed-payload-", 1285 suffix=".bin") 1286 cmd = ["brillo_update_payload", "sign", 1287 "--unsigned_payload", payload_file, 1288 "--payload", signed_payload_file, 1289 "--signature_size", "256", 1290 "--metadata_signature_file", signed_metadata_sig_file, 1291 "--payload_signature_file", signed_payload_sig_file] 1292 p1 = common.Run(cmd, stdout=subprocess.PIPE) 1293 p1.wait() 1294 assert p1.returncode == 0, "brillo_update_payload sign failed" 1295 1296 # 4. Dump the signed payload properties. 1297 properties_file = common.MakeTempFile(prefix="payload-properties-", 1298 suffix=".txt") 1299 cmd = ["brillo_update_payload", "properties", 1300 "--payload", signed_payload_file, 1301 "--properties_file", properties_file] 1302 p1 = common.Run(cmd, stdout=subprocess.PIPE) 1303 p1.wait() 1304 assert p1.returncode == 0, "brillo_update_payload properties failed" 1305 1306 if OPTIONS.wipe_user_data: 1307 with open(properties_file, "a") as f: 1308 f.write("POWERWASH=1\n") 1309 metadata["ota-wipe"] = "yes" 1310 1311 # Add the signed payload file and properties into the zip. 1312 common.ZipWrite(output_zip, properties_file, arcname="payload_properties.txt") 1313 common.ZipWrite(output_zip, signed_payload_file, arcname="payload.bin", 1314 compress_type=zipfile.ZIP_STORED) 1315 WriteMetadata(metadata, output_zip) 1316 1317 # If dm-verity is supported for the device, copy contents of care_map 1318 # into A/B OTA package. 1319 if OPTIONS.info_dict.get("verity") == "true": 1320 target_zip = zipfile.ZipFile(target_file, "r") 1321 care_map_path = "META/care_map.txt" 1322 namelist = target_zip.namelist() 1323 if care_map_path in namelist: 1324 care_map_data = target_zip.read(care_map_path) 1325 common.ZipWriteStr(output_zip, "care_map.txt", care_map_data) 1326 else: 1327 print "Warning: cannot find care map file in target_file package" 1328 common.ZipClose(target_zip) 1329 1330 # Sign the whole package to comply with the Android OTA package format. 1331 common.ZipClose(output_zip) 1332 SignOutput(temp_zip_file.name, output_file) 1333 temp_zip_file.close() 1334 1335 1336 class FileDifference(object): 1337 def __init__(self, partition, source_zip, target_zip, output_zip): 1338 self.deferred_patch_list = None 1339 print "Loading target..." 1340 self.target_data = target_data = LoadPartitionFiles(target_zip, partition) 1341 print "Loading source..." 1342 self.source_data = source_data = LoadPartitionFiles(source_zip, partition) 1343 1344 self.verbatim_targets = verbatim_targets = [] 1345 self.patch_list = patch_list = [] 1346 diffs = [] 1347 self.renames = renames = {} 1348 known_paths = set() 1349 largest_source_size = 0 1350 1351 matching_file_cache = {} 1352 for fn, sf in source_data.items(): 1353 assert fn == sf.name 1354 matching_file_cache["path:" + fn] = sf 1355 if fn in target_data.keys(): 1356 AddToKnownPaths(fn, known_paths) 1357 # Only allow eligibility for filename/sha matching 1358 # if there isn't a perfect path match. 1359 if target_data.get(sf.name) is None: 1360 matching_file_cache["file:" + fn.split("/")[-1]] = sf 1361 matching_file_cache["sha:" + sf.sha1] = sf 1362 1363 for fn in sorted(target_data.keys()): 1364 tf = target_data[fn] 1365 assert fn == tf.name 1366 sf = ClosestFileMatch(tf, matching_file_cache, renames) 1367 if sf is not None and sf.name != tf.name: 1368 print "File has moved from " + sf.name + " to " + tf.name 1369 renames[sf.name] = tf 1370 1371 if sf is None or fn in OPTIONS.require_verbatim: 1372 # This file should be included verbatim 1373 if fn in OPTIONS.prohibit_verbatim: 1374 raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,)) 1375 print "send", fn, "verbatim" 1376 tf.AddToZip(output_zip) 1377 verbatim_targets.append((fn, tf.size, tf.sha1)) 1378 if fn in target_data.keys(): 1379 AddToKnownPaths(fn, known_paths) 1380 elif tf.sha1 != sf.sha1: 1381 # File is different; consider sending as a patch 1382 diffs.append(common.Difference(tf, sf)) 1383 else: 1384 # Target file data identical to source (may still be renamed) 1385 pass 1386 1387 common.ComputeDifferences(diffs) 1388 1389 for diff in diffs: 1390 tf, sf, d = diff.GetPatch() 1391 path = "/".join(tf.name.split("/")[:-1]) 1392 if d is None or len(d) > tf.size * OPTIONS.patch_threshold or \ 1393 path not in known_paths: 1394 # patch is almost as big as the file; don't bother patching 1395 # or a patch + rename cannot take place due to the target 1396 # directory not existing 1397 tf.AddToZip(output_zip) 1398 verbatim_targets.append((tf.name, tf.size, tf.sha1)) 1399 if sf.name in renames: 1400 del renames[sf.name] 1401 AddToKnownPaths(tf.name, known_paths) 1402 else: 1403 common.ZipWriteStr(output_zip, "patch/" + sf.name + ".p", d) 1404 patch_list.append((tf, sf, tf.size, common.sha1(d).hexdigest())) 1405 largest_source_size = max(largest_source_size, sf.size) 1406 1407 self.largest_source_size = largest_source_size 1408 1409 def EmitVerification(self, script): 1410 so_far = 0 1411 for tf, sf, _, _ in self.patch_list: 1412 if tf.name != sf.name: 1413 script.SkipNextActionIfTargetExists(tf.name, tf.sha1) 1414 script.PatchCheck("/"+sf.name, tf.sha1, sf.sha1) 1415 so_far += sf.size 1416 return so_far 1417 1418 def EmitExplicitTargetVerification(self, script): 1419 for fn, _, sha1 in self.verbatim_targets: 1420 if fn[-1] != "/": 1421 script.FileCheck("/"+fn, sha1) 1422 for tf, _, _, _ in self.patch_list: 1423 script.FileCheck(tf.name, tf.sha1) 1424 1425 def RemoveUnneededFiles(self, script, extras=()): 1426 file_list = ["/" + i[0] for i in self.verbatim_targets] 1427 file_list += ["/" + i for i in self.source_data 1428 if i not in self.target_data and i not in self.renames] 1429 file_list += list(extras) 1430 # Sort the list in descending order, which removes all the files first 1431 # before attempting to remove the folder. (Bug: 22960996) 1432 script.DeleteFiles(sorted(file_list, reverse=True)) 1433 1434 def TotalPatchSize(self): 1435 return sum(i[1].size for i in self.patch_list) 1436 1437 def EmitPatches(self, script, total_patch_size, so_far): 1438 self.deferred_patch_list = deferred_patch_list = [] 1439 for item in self.patch_list: 1440 tf, sf, _, _ = item 1441 if tf.name == "system/build.prop": 1442 deferred_patch_list.append(item) 1443 continue 1444 if sf.name != tf.name: 1445 script.SkipNextActionIfTargetExists(tf.name, tf.sha1) 1446 script.ApplyPatch("/" + sf.name, "-", tf.size, tf.sha1, sf.sha1, 1447 "patch/" + sf.name + ".p") 1448 so_far += tf.size 1449 script.SetProgress(so_far / total_patch_size) 1450 return so_far 1451 1452 def EmitDeferredPatches(self, script): 1453 for item in self.deferred_patch_list: 1454 tf, sf, _, _ = item 1455 script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1, 1456 "patch/" + sf.name + ".p") 1457 script.SetPermissions("/system/build.prop", 0, 0, 0o644, None, None) 1458 1459 def EmitRenames(self, script): 1460 if len(self.renames) > 0: 1461 script.Print("Renaming files...") 1462 for src, tgt in self.renames.iteritems(): 1463 print "Renaming " + src + " to " + tgt.name 1464 script.RenameFile(src, tgt.name) 1465 1466 1467 def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip): 1468 target_has_recovery_patch = HasRecoveryPatch(target_zip) 1469 source_has_recovery_patch = HasRecoveryPatch(source_zip) 1470 1471 if (OPTIONS.block_based and 1472 target_has_recovery_patch and 1473 source_has_recovery_patch): 1474 return WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip) 1475 1476 source_version = OPTIONS.source_info_dict["recovery_api_version"] 1477 target_version = OPTIONS.target_info_dict["recovery_api_version"] 1478 1479 if source_version == 0: 1480 print ("WARNING: generating edify script for a source that " 1481 "can't install it.") 1482 script = edify_generator.EdifyGenerator( 1483 source_version, OPTIONS.target_info_dict, 1484 fstab=OPTIONS.source_info_dict["fstab"]) 1485 1486 oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties") 1487 recovery_mount_options = OPTIONS.source_info_dict.get( 1488 "recovery_mount_options") 1489 oem_dict = None 1490 if oem_props is not None and len(oem_props) > 0: 1491 if OPTIONS.oem_source is None: 1492 raise common.ExternalError("OEM source required for this build") 1493 if not OPTIONS.oem_no_mount: 1494 script.Mount("/oem", recovery_mount_options) 1495 oem_dict = common.LoadDictionaryFromLines( 1496 open(OPTIONS.oem_source).readlines()) 1497 1498 metadata = { 1499 "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict, 1500 OPTIONS.source_info_dict), 1501 "ota-type": "FILE", 1502 } 1503 1504 post_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.target_info_dict) 1505 pre_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.source_info_dict) 1506 is_downgrade = long(post_timestamp) < long(pre_timestamp) 1507 1508 if OPTIONS.downgrade: 1509 metadata["ota-downgrade"] = "yes" 1510 if not is_downgrade: 1511 raise RuntimeError("--downgrade specified but no downgrade detected: " 1512 "pre: %s, post: %s" % (pre_timestamp, post_timestamp)) 1513 else: 1514 if is_downgrade: 1515 # Non-fatal here to allow generating such a package which may require 1516 # manual work to adjust the post-timestamp. A legit use case is that we 1517 # cut a new build C (after having A and B), but want to enfore the 1518 # update path of A -> C -> B. Specifying --downgrade may not help since 1519 # that would enforce a data wipe for C -> B update. 1520 print("\nWARNING: downgrade detected: pre: %s, post: %s.\n" 1521 "The package may not be deployed properly. " 1522 "Try --downgrade?\n" % (pre_timestamp, post_timestamp)) 1523 metadata["post-timestamp"] = post_timestamp 1524 1525 device_specific = common.DeviceSpecificParams( 1526 source_zip=source_zip, 1527 source_version=source_version, 1528 target_zip=target_zip, 1529 target_version=target_version, 1530 output_zip=output_zip, 1531 script=script, 1532 metadata=metadata, 1533 info_dict=OPTIONS.source_info_dict) 1534 1535 system_diff = FileDifference("system", source_zip, target_zip, output_zip) 1536 script.Mount("/system", recovery_mount_options) 1537 if HasVendorPartition(target_zip): 1538 vendor_diff = FileDifference("vendor", source_zip, target_zip, output_zip) 1539 script.Mount("/vendor", recovery_mount_options) 1540 else: 1541 vendor_diff = None 1542 1543 target_fp = CalculateFingerprint(oem_props, oem_dict, 1544 OPTIONS.target_info_dict) 1545 source_fp = CalculateFingerprint(oem_props, oem_dict, 1546 OPTIONS.source_info_dict) 1547 1548 if oem_props is None: 1549 script.AssertSomeFingerprint(source_fp, target_fp) 1550 else: 1551 script.AssertSomeThumbprint( 1552 GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict), 1553 GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict)) 1554 1555 metadata["pre-build"] = source_fp 1556 metadata["post-build"] = target_fp 1557 metadata["pre-build-incremental"] = GetBuildProp( 1558 "ro.build.version.incremental", OPTIONS.source_info_dict) 1559 metadata["post-build-incremental"] = GetBuildProp( 1560 "ro.build.version.incremental", OPTIONS.target_info_dict) 1561 1562 source_boot = common.GetBootableImage( 1563 "/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT", 1564 OPTIONS.source_info_dict) 1565 target_boot = common.GetBootableImage( 1566 "/tmp/boot.img", "boot.img", OPTIONS.target_tmp, "BOOT") 1567 updating_boot = (not OPTIONS.two_step and 1568 (source_boot.data != target_boot.data)) 1569 1570 source_recovery = common.GetBootableImage( 1571 "/tmp/recovery.img", "recovery.img", OPTIONS.source_tmp, "RECOVERY", 1572 OPTIONS.source_info_dict) 1573 target_recovery = common.GetBootableImage( 1574 "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY") 1575 updating_recovery = (source_recovery.data != target_recovery.data) 1576 1577 # Here's how we divide up the progress bar: 1578 # 0.1 for verifying the start state (PatchCheck calls) 1579 # 0.8 for applying patches (ApplyPatch calls) 1580 # 0.1 for unpacking verbatim files, symlinking, and doing the 1581 # device-specific commands. 1582 1583 AppendAssertions(script, OPTIONS.target_info_dict, oem_dict) 1584 device_specific.IncrementalOTA_Assertions() 1585 1586 # Two-step incremental package strategy (in chronological order, 1587 # which is *not* the order in which the generated script has 1588 # things): 1589 # 1590 # if stage is not "2/3" or "3/3": 1591 # do verification on current system 1592 # write recovery image to boot partition 1593 # set stage to "2/3" 1594 # reboot to boot partition and restart recovery 1595 # else if stage is "2/3": 1596 # write recovery image to recovery partition 1597 # set stage to "3/3" 1598 # reboot to recovery partition and restart recovery 1599 # else: 1600 # (stage must be "3/3") 1601 # perform update: 1602 # patch system files, etc. 1603 # force full install of new boot image 1604 # set up system to update recovery partition on first boot 1605 # complete script normally 1606 # (allow recovery to mark itself finished and reboot) 1607 1608 if OPTIONS.two_step: 1609 if not OPTIONS.source_info_dict.get("multistage_support", None): 1610 assert False, "two-step packages not supported by this build" 1611 fs = OPTIONS.source_info_dict["fstab"]["/misc"] 1612 assert fs.fs_type.upper() == "EMMC", \ 1613 "two-step packages only supported on devices with EMMC /misc partitions" 1614 bcb_dev = {"bcb_dev": fs.device} 1615 common.ZipWriteStr(output_zip, "recovery.img", target_recovery.data) 1616 script.AppendExtra(""" 1617 if get_stage("%(bcb_dev)s") == "2/3" then 1618 """ % bcb_dev) 1619 script.AppendExtra("sleep(20);\n") 1620 script.WriteRawImage("/recovery", "recovery.img") 1621 script.AppendExtra(""" 1622 set_stage("%(bcb_dev)s", "3/3"); 1623 reboot_now("%(bcb_dev)s", "recovery"); 1624 else if get_stage("%(bcb_dev)s") != "3/3" then 1625 """ % bcb_dev) 1626 1627 # Dump fingerprints 1628 script.Print("Source: %s" % (source_fp,)) 1629 script.Print("Target: %s" % (target_fp,)) 1630 1631 script.Print("Verifying current system...") 1632 1633 device_specific.IncrementalOTA_VerifyBegin() 1634 1635 script.ShowProgress(0.1, 0) 1636 so_far = system_diff.EmitVerification(script) 1637 if vendor_diff: 1638 so_far += vendor_diff.EmitVerification(script) 1639 1640 size = [] 1641 if system_diff.patch_list: 1642 size.append(system_diff.largest_source_size) 1643 if vendor_diff: 1644 if vendor_diff.patch_list: 1645 size.append(vendor_diff.largest_source_size) 1646 1647 if updating_boot: 1648 d = common.Difference(target_boot, source_boot) 1649 _, _, d = d.ComputePatch() 1650 print "boot target: %d source: %d diff: %d" % ( 1651 target_boot.size, source_boot.size, len(d)) 1652 1653 common.ZipWriteStr(output_zip, "patch/boot.img.p", d) 1654 1655 boot_type, boot_device = common.GetTypeAndDevice( 1656 "/boot", OPTIONS.source_info_dict) 1657 1658 script.PatchCheck("%s:%s:%d:%s:%d:%s" % 1659 (boot_type, boot_device, 1660 source_boot.size, source_boot.sha1, 1661 target_boot.size, target_boot.sha1)) 1662 so_far += source_boot.size 1663 size.append(target_boot.size) 1664 1665 if size: 1666 script.CacheFreeSpaceCheck(max(size)) 1667 1668 device_specific.IncrementalOTA_VerifyEnd() 1669 1670 if OPTIONS.two_step: 1671 script.WriteRawImage("/boot", "recovery.img") 1672 script.AppendExtra(""" 1673 set_stage("%(bcb_dev)s", "2/3"); 1674 reboot_now("%(bcb_dev)s", ""); 1675 else 1676 """ % bcb_dev) 1677 1678 script.Comment("---- start making changes here ----") 1679 1680 device_specific.IncrementalOTA_InstallBegin() 1681 1682 if OPTIONS.two_step: 1683 common.ZipWriteStr(output_zip, "boot.img", target_boot.data) 1684 script.WriteRawImage("/boot", "boot.img") 1685 print "writing full boot image (forced by two-step mode)" 1686 1687 script.Print("Removing unneeded files...") 1688 system_diff.RemoveUnneededFiles(script, ("/system/recovery.img",)) 1689 if vendor_diff: 1690 vendor_diff.RemoveUnneededFiles(script) 1691 1692 script.ShowProgress(0.8, 0) 1693 total_patch_size = 1.0 + system_diff.TotalPatchSize() 1694 if vendor_diff: 1695 total_patch_size += vendor_diff.TotalPatchSize() 1696 if updating_boot: 1697 total_patch_size += target_boot.size 1698 1699 script.Print("Patching system files...") 1700 so_far = system_diff.EmitPatches(script, total_patch_size, 0) 1701 if vendor_diff: 1702 script.Print("Patching vendor files...") 1703 so_far = vendor_diff.EmitPatches(script, total_patch_size, so_far) 1704 1705 if not OPTIONS.two_step: 1706 if updating_boot: 1707 # Produce the boot image by applying a patch to the current 1708 # contents of the boot partition, and write it back to the 1709 # partition. 1710 script.Print("Patching boot image...") 1711 script.ApplyPatch("%s:%s:%d:%s:%d:%s" 1712 % (boot_type, boot_device, 1713 source_boot.size, source_boot.sha1, 1714 target_boot.size, target_boot.sha1), 1715 "-", 1716 target_boot.size, target_boot.sha1, 1717 source_boot.sha1, "patch/boot.img.p") 1718 so_far += target_boot.size 1719 script.SetProgress(so_far / total_patch_size) 1720 print "boot image changed; including." 1721 else: 1722 print "boot image unchanged; skipping." 1723 1724 system_items = ItemSet("system", "META/filesystem_config.txt") 1725 if vendor_diff: 1726 vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt") 1727 1728 if updating_recovery: 1729 # Recovery is generated as a patch using both the boot image 1730 # (which contains the same linux kernel as recovery) and the file 1731 # /system/etc/recovery-resource.dat (which contains all the images 1732 # used in the recovery UI) as sources. This lets us minimize the 1733 # size of the patch, which must be included in every OTA package. 1734 # 1735 # For older builds where recovery-resource.dat is not present, we 1736 # use only the boot image as the source. 1737 1738 if not target_has_recovery_patch: 1739 def output_sink(fn, data): 1740 common.ZipWriteStr(output_zip, "recovery/" + fn, data) 1741 system_items.Get("system/" + fn) 1742 1743 common.MakeRecoveryPatch(OPTIONS.target_tmp, output_sink, 1744 target_recovery, target_boot) 1745 script.DeleteFiles(["/system/recovery-from-boot.p", 1746 "/system/etc/recovery.img", 1747 "/system/etc/install-recovery.sh"]) 1748 print "recovery image changed; including as patch from boot." 1749 else: 1750 print "recovery image unchanged; skipping." 1751 1752 script.ShowProgress(0.1, 10) 1753 1754 target_symlinks = CopyPartitionFiles(system_items, target_zip, None) 1755 if vendor_diff: 1756 target_symlinks.extend(CopyPartitionFiles(vendor_items, target_zip, None)) 1757 1758 temp_script = script.MakeTemporary() 1759 system_items.GetMetadata(target_zip) 1760 system_items.Get("system").SetPermissions(temp_script) 1761 if vendor_diff: 1762 vendor_items.GetMetadata(target_zip) 1763 vendor_items.Get("vendor").SetPermissions(temp_script) 1764 1765 # Note that this call will mess up the trees of Items, so make sure 1766 # we're done with them. 1767 source_symlinks = CopyPartitionFiles(system_items, source_zip, None) 1768 if vendor_diff: 1769 source_symlinks.extend(CopyPartitionFiles(vendor_items, source_zip, None)) 1770 1771 target_symlinks_d = dict([(i[1], i[0]) for i in target_symlinks]) 1772 source_symlinks_d = dict([(i[1], i[0]) for i in source_symlinks]) 1773 1774 # Delete all the symlinks in source that aren't in target. This 1775 # needs to happen before verbatim files are unpacked, in case a 1776 # symlink in the source is replaced by a real file in the target. 1777 1778 # If a symlink in the source will be replaced by a regular file, we cannot 1779 # delete the symlink/file in case the package gets applied again. For such 1780 # a symlink, we prepend a sha1_check() to detect if it has been updated. 1781 # (Bug: 23646151) 1782 replaced_symlinks = dict() 1783 if system_diff: 1784 for i in system_diff.verbatim_targets: 1785 replaced_symlinks["/%s" % (i[0],)] = i[2] 1786 if vendor_diff: 1787 for i in vendor_diff.verbatim_targets: 1788 replaced_symlinks["/%s" % (i[0],)] = i[2] 1789 1790 if system_diff: 1791 for tf in system_diff.renames.values(): 1792 replaced_symlinks["/%s" % (tf.name,)] = tf.sha1 1793 if vendor_diff: 1794 for tf in vendor_diff.renames.values(): 1795 replaced_symlinks["/%s" % (tf.name,)] = tf.sha1 1796 1797 always_delete = [] 1798 may_delete = [] 1799 for dest, link in source_symlinks: 1800 if link not in target_symlinks_d: 1801 if link in replaced_symlinks: 1802 may_delete.append((link, replaced_symlinks[link])) 1803 else: 1804 always_delete.append(link) 1805 script.DeleteFiles(always_delete) 1806 script.DeleteFilesIfNotMatching(may_delete) 1807 1808 if system_diff.verbatim_targets: 1809 script.Print("Unpacking new system files...") 1810 script.UnpackPackageDir("system", "/system") 1811 if vendor_diff and vendor_diff.verbatim_targets: 1812 script.Print("Unpacking new vendor files...") 1813 script.UnpackPackageDir("vendor", "/vendor") 1814 1815 if updating_recovery and not target_has_recovery_patch: 1816 script.Print("Unpacking new recovery...") 1817 script.UnpackPackageDir("recovery", "/system") 1818 1819 system_diff.EmitRenames(script) 1820 if vendor_diff: 1821 vendor_diff.EmitRenames(script) 1822 1823 script.Print("Symlinks and permissions...") 1824 1825 # Create all the symlinks that don't already exist, or point to 1826 # somewhere different than what we want. Delete each symlink before 1827 # creating it, since the 'symlink' command won't overwrite. 1828 to_create = [] 1829 for dest, link in target_symlinks: 1830 if link in source_symlinks_d: 1831 if dest != source_symlinks_d[link]: 1832 to_create.append((dest, link)) 1833 else: 1834 to_create.append((dest, link)) 1835 script.DeleteFiles([i[1] for i in to_create]) 1836 script.MakeSymlinks(to_create) 1837 1838 # Now that the symlinks are created, we can set all the 1839 # permissions. 1840 script.AppendScript(temp_script) 1841 1842 # Do device-specific installation (eg, write radio image). 1843 device_specific.IncrementalOTA_InstallEnd() 1844 1845 if OPTIONS.extra_script is not None: 1846 script.AppendExtra(OPTIONS.extra_script) 1847 1848 # Patch the build.prop file last, so if something fails but the 1849 # device can still come up, it appears to be the old build and will 1850 # get set the OTA package again to retry. 1851 script.Print("Patching remaining system files...") 1852 system_diff.EmitDeferredPatches(script) 1853 1854 if OPTIONS.wipe_user_data: 1855 script.Print("Erasing user data...") 1856 script.FormatPartition("/data") 1857 metadata["ota-wipe"] = "yes" 1858 1859 if OPTIONS.two_step: 1860 script.AppendExtra(""" 1861 set_stage("%(bcb_dev)s", ""); 1862 endif; 1863 endif; 1864 """ % bcb_dev) 1865 1866 if OPTIONS.verify and system_diff: 1867 script.Print("Remounting and verifying system partition files...") 1868 script.Unmount("/system") 1869 script.Mount("/system", recovery_mount_options) 1870 system_diff.EmitExplicitTargetVerification(script) 1871 1872 if OPTIONS.verify and vendor_diff: 1873 script.Print("Remounting and verifying vendor partition files...") 1874 script.Unmount("/vendor") 1875 script.Mount("/vendor", recovery_mount_options) 1876 vendor_diff.EmitExplicitTargetVerification(script) 1877 1878 # For downgrade OTAs, we prefer to use the update-binary in the source 1879 # build that is actually newer than the one in the target build. 1880 if OPTIONS.downgrade: 1881 script.AddToZip(source_zip, output_zip, input_path=OPTIONS.updater_binary) 1882 else: 1883 script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary) 1884 1885 metadata["ota-required-cache"] = str(script.required_cache) 1886 WriteMetadata(metadata, output_zip) 1887 1888 1889 def main(argv): 1890 1891 def option_handler(o, a): 1892 if o == "--board_config": 1893 pass # deprecated 1894 elif o in ("-k", "--package_key"): 1895 OPTIONS.package_key = a 1896 elif o in ("-i", "--incremental_from"): 1897 OPTIONS.incremental_source = a 1898 elif o == "--full_radio": 1899 OPTIONS.full_radio = True 1900 elif o == "--full_bootloader": 1901 OPTIONS.full_bootloader = True 1902 elif o in ("-w", "--wipe_user_data"): 1903 OPTIONS.wipe_user_data = True 1904 elif o in ("-n", "--no_prereq"): 1905 OPTIONS.omit_prereq = True 1906 elif o == "--downgrade": 1907 OPTIONS.downgrade = True 1908 OPTIONS.wipe_user_data = True 1909 elif o in ("-o", "--oem_settings"): 1910 OPTIONS.oem_source = a 1911 elif o == "--oem_no_mount": 1912 OPTIONS.oem_no_mount = True 1913 elif o in ("-e", "--extra_script"): 1914 OPTIONS.extra_script = a 1915 elif o in ("-a", "--aslr_mode"): 1916 if a in ("on", "On", "true", "True", "yes", "Yes"): 1917 OPTIONS.aslr_mode = True 1918 else: 1919 OPTIONS.aslr_mode = False 1920 elif o in ("-t", "--worker_threads"): 1921 if a.isdigit(): 1922 OPTIONS.worker_threads = int(a) 1923 else: 1924 raise ValueError("Cannot parse value %r for option %r - only " 1925 "integers are allowed." % (a, o)) 1926 elif o in ("-2", "--two_step"): 1927 OPTIONS.two_step = True 1928 elif o == "--no_signing": 1929 OPTIONS.no_signing = True 1930 elif o == "--verify": 1931 OPTIONS.verify = True 1932 elif o == "--block": 1933 OPTIONS.block_based = True 1934 elif o in ("-b", "--binary"): 1935 OPTIONS.updater_binary = a 1936 elif o in ("--no_fallback_to_full",): 1937 OPTIONS.fallback_to_full = False 1938 elif o == "--stash_threshold": 1939 try: 1940 OPTIONS.stash_threshold = float(a) 1941 except ValueError: 1942 raise ValueError("Cannot parse value %r for option %r - expecting " 1943 "a float" % (a, o)) 1944 elif o == "--gen_verify": 1945 OPTIONS.gen_verify = True 1946 elif o == "--log_diff": 1947 OPTIONS.log_diff = a 1948 elif o == "--payload_signer": 1949 OPTIONS.payload_signer = a 1950 elif o == "--payload_signer_args": 1951 OPTIONS.payload_signer_args = shlex.split(a) 1952 else: 1953 return False 1954 return True 1955 1956 args = common.ParseOptions(argv, __doc__, 1957 extra_opts="b:k:i:d:wne:t:a:2o:", 1958 extra_long_opts=[ 1959 "board_config=", 1960 "package_key=", 1961 "incremental_from=", 1962 "full_radio", 1963 "full_bootloader", 1964 "wipe_user_data", 1965 "no_prereq", 1966 "downgrade", 1967 "extra_script=", 1968 "worker_threads=", 1969 "aslr_mode=", 1970 "two_step", 1971 "no_signing", 1972 "block", 1973 "binary=", 1974 "oem_settings=", 1975 "oem_no_mount", 1976 "verify", 1977 "no_fallback_to_full", 1978 "stash_threshold=", 1979 "gen_verify", 1980 "log_diff=", 1981 "payload_signer=", 1982 "payload_signer_args=", 1983 ], extra_option_handler=option_handler) 1984 1985 if len(args) != 2: 1986 common.Usage(__doc__) 1987 sys.exit(1) 1988 1989 if OPTIONS.downgrade: 1990 # Sanity check to enforce a data wipe. 1991 if not OPTIONS.wipe_user_data: 1992 raise ValueError("Cannot downgrade without a data wipe") 1993 1994 # We should only allow downgrading incrementals (as opposed to full). 1995 # Otherwise the device may go back from arbitrary build with this full 1996 # OTA package. 1997 if OPTIONS.incremental_source is None: 1998 raise ValueError("Cannot generate downgradable full OTAs - consider" 1999 "using --omit_prereq?") 2000 2001 # Load the dict file from the zip directly to have a peek at the OTA type. 2002 # For packages using A/B update, unzipping is not needed. 2003 input_zip = zipfile.ZipFile(args[0], "r") 2004 OPTIONS.info_dict = common.LoadInfoDict(input_zip) 2005 common.ZipClose(input_zip) 2006 2007 ab_update = OPTIONS.info_dict.get("ab_update") == "true" 2008 2009 if ab_update: 2010 if OPTIONS.incremental_source is not None: 2011 OPTIONS.target_info_dict = OPTIONS.info_dict 2012 source_zip = zipfile.ZipFile(OPTIONS.incremental_source, "r") 2013 OPTIONS.source_info_dict = common.LoadInfoDict(source_zip) 2014 common.ZipClose(source_zip) 2015 2016 if OPTIONS.verbose: 2017 print "--- target info ---" 2018 common.DumpInfoDict(OPTIONS.info_dict) 2019 2020 if OPTIONS.incremental_source is not None: 2021 print "--- source info ---" 2022 common.DumpInfoDict(OPTIONS.source_info_dict) 2023 2024 WriteABOTAPackageWithBrilloScript( 2025 target_file=args[0], 2026 output_file=args[1], 2027 source_file=OPTIONS.incremental_source) 2028 2029 print "done." 2030 return 2031 2032 if OPTIONS.extra_script is not None: 2033 OPTIONS.extra_script = open(OPTIONS.extra_script).read() 2034 2035 print "unzipping target target-files..." 2036 OPTIONS.input_tmp, input_zip = common.UnzipTemp(args[0]) 2037 2038 OPTIONS.target_tmp = OPTIONS.input_tmp 2039 OPTIONS.info_dict = common.LoadInfoDict(input_zip, OPTIONS.target_tmp) 2040 2041 if OPTIONS.verbose: 2042 print "--- target info ---" 2043 common.DumpInfoDict(OPTIONS.info_dict) 2044 2045 # If the caller explicitly specified the device-specific extensions 2046 # path via -s/--device_specific, use that. Otherwise, use 2047 # META/releasetools.py if it is present in the target target_files. 2048 # Otherwise, take the path of the file from 'tool_extensions' in the 2049 # info dict and look for that in the local filesystem, relative to 2050 # the current directory. 2051 2052 if OPTIONS.device_specific is None: 2053 from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py") 2054 if os.path.exists(from_input): 2055 print "(using device-specific extensions from target_files)" 2056 OPTIONS.device_specific = from_input 2057 else: 2058 OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions", None) 2059 2060 if OPTIONS.device_specific is not None: 2061 OPTIONS.device_specific = os.path.abspath(OPTIONS.device_specific) 2062 2063 if OPTIONS.info_dict.get("no_recovery") == "true": 2064 raise common.ExternalError( 2065 "--- target build has specified no recovery ---") 2066 2067 # Use the default key to sign the package if not specified with package_key. 2068 if not OPTIONS.no_signing: 2069 if OPTIONS.package_key is None: 2070 OPTIONS.package_key = OPTIONS.info_dict.get( 2071 "default_system_dev_certificate", 2072 "build/target/product/security/testkey") 2073 2074 # Set up the output zip. Create a temporary zip file if signing is needed. 2075 if OPTIONS.no_signing: 2076 if os.path.exists(args[1]): 2077 os.unlink(args[1]) 2078 output_zip = zipfile.ZipFile(args[1], "w", 2079 compression=zipfile.ZIP_DEFLATED) 2080 else: 2081 temp_zip_file = tempfile.NamedTemporaryFile() 2082 output_zip = zipfile.ZipFile(temp_zip_file, "w", 2083 compression=zipfile.ZIP_DEFLATED) 2084 2085 # Non A/B OTAs rely on /cache partition to store temporary files. 2086 cache_size = OPTIONS.info_dict.get("cache_size", None) 2087 if cache_size is None: 2088 print "--- can't determine the cache partition size ---" 2089 OPTIONS.cache_size = cache_size 2090 2091 # Generate a verify package. 2092 if OPTIONS.gen_verify: 2093 WriteVerifyPackage(input_zip, output_zip) 2094 2095 # Generate a full OTA. 2096 elif OPTIONS.incremental_source is None: 2097 WriteFullOTAPackage(input_zip, output_zip) 2098 2099 # Generate an incremental OTA. It will fall back to generate a full OTA on 2100 # failure unless no_fallback_to_full is specified. 2101 else: 2102 print "unzipping source target-files..." 2103 OPTIONS.source_tmp, source_zip = common.UnzipTemp( 2104 OPTIONS.incremental_source) 2105 OPTIONS.target_info_dict = OPTIONS.info_dict 2106 OPTIONS.source_info_dict = common.LoadInfoDict(source_zip, 2107 OPTIONS.source_tmp) 2108 if OPTIONS.verbose: 2109 print "--- source info ---" 2110 common.DumpInfoDict(OPTIONS.source_info_dict) 2111 try: 2112 WriteIncrementalOTAPackage(input_zip, source_zip, output_zip) 2113 if OPTIONS.log_diff: 2114 out_file = open(OPTIONS.log_diff, 'w') 2115 import target_files_diff 2116 target_files_diff.recursiveDiff('', 2117 OPTIONS.source_tmp, 2118 OPTIONS.input_tmp, 2119 out_file) 2120 out_file.close() 2121 except ValueError: 2122 if not OPTIONS.fallback_to_full: 2123 raise 2124 print "--- failed to build incremental; falling back to full ---" 2125 OPTIONS.incremental_source = None 2126 WriteFullOTAPackage(input_zip, output_zip) 2127 2128 common.ZipClose(output_zip) 2129 2130 # Sign the generated zip package unless no_signing is specified. 2131 if not OPTIONS.no_signing: 2132 SignOutput(temp_zip_file.name, args[1]) 2133 temp_zip_file.close() 2134 2135 print "done." 2136 2137 2138 if __name__ == '__main__': 2139 try: 2140 common.CloseInheritedPipes() 2141 main(sys.argv[1:]) 2142 except common.ExternalError as e: 2143 print 2144 print " ERROR: %s" % (e,) 2145 print 2146 sys.exit(1) 2147 finally: 2148 common.Cleanup() 2149