1 #!/usr/bin/env python 2 # 3 # Copyright (C) 2008 The Android Open Source Project 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 17 """ 18 Given a target-files zipfile, produces an OTA package that installs 19 that build. An incremental OTA is produced if -i is given, otherwise 20 a full OTA is produced. 21 22 Usage: ota_from_target_files [flags] input_target_files output_ota_package 23 24 --board_config <file> 25 Deprecated. 26 27 -k (--package_key) <key> Key to use to sign the package (default is 28 the value of default_system_dev_certificate from the input 29 target-files's META/misc_info.txt, or 30 "build/target/product/security/testkey" if that value is not 31 specified). 32 33 For incremental OTAs, the default value is based on the source 34 target-file, not the target build. 35 36 -i (--incremental_from) <file> 37 Generate an incremental OTA using the given target-files zip as 38 the starting build. 39 40 --full_radio 41 When generating an incremental OTA, always include a full copy of 42 radio image. This option is only meaningful when -i is specified, 43 because a full radio is always included in a full OTA if applicable. 44 45 --full_bootloader 46 Similar to --full_radio. When generating an incremental OTA, always 47 include a full copy of bootloader image. 48 49 -v (--verify) 50 Remount and verify the checksums of the files written to the 51 system and vendor (if used) partitions. Incremental builds only. 52 53 -o (--oem_settings) <file> 54 Use the file to specify the expected OEM-specific properties 55 on the OEM partition of the intended device. 56 57 --oem_no_mount 58 For devices with OEM-specific properties but without an OEM partition, 59 do not mount the OEM partition in the updater-script. This should be 60 very rarely used, since it's expected to have a dedicated OEM partition 61 for OEM-specific properties. Only meaningful when -o is specified. 62 63 -w (--wipe_user_data) 64 Generate an OTA package that will wipe the user data partition 65 when installed. 66 67 -n (--no_prereq) 68 Omit the timestamp prereq check normally included at the top of 69 the build scripts (used for developer OTA packages which 70 legitimately need to go back and forth). 71 72 --downgrade 73 Intentionally generate an incremental OTA that updates from a newer 74 build to an older one (based on timestamp comparison). "post-timestamp" 75 will be replaced by "ota-downgrade=yes" in the metadata file. A data 76 wipe will always be enforced, so "ota-wipe=yes" will also be included in 77 the metadata file. The update-binary in the source build will be used in 78 the OTA package, unless --binary flag is specified. 79 80 -e (--extra_script) <file> 81 Insert the contents of file at the end of the update script. 82 83 -a (--aslr_mode) <on|off> 84 Specify whether to turn on ASLR for the package (on by default). 85 86 -2 (--two_step) 87 Generate a 'two-step' OTA package, where recovery is updated 88 first, so that any changes made to the system partition are done 89 using the new recovery (new kernel, etc.). 90 91 --block 92 Generate a block-based OTA if possible. Will fall back to a 93 file-based OTA if the target_files is older and doesn't support 94 block-based OTAs. 95 96 -b (--binary) <file> 97 Use the given binary as the update-binary in the output package, 98 instead of the binary in the build's target_files. Use for 99 development only. 100 101 -t (--worker_threads) <int> 102 Specifies the number of worker-threads that will be used when 103 generating patches for incremental updates (defaults to 3). 104 105 --stash_threshold <float> 106 Specifies the threshold that will be used to compute the maximum 107 allowed stash size (defaults to 0.8). 108 109 --gen_verify 110 Generate an OTA package that verifies the partitions. 111 112 --log_diff <file> 113 Generate a log file that shows the differences in the source and target 114 builds for an incremental package. This option is only meaningful when 115 -i is specified. 116 """ 117 118 import sys 119 120 if sys.hexversion < 0x02070000: 121 print >> sys.stderr, "Python 2.7 or newer is required." 122 sys.exit(1) 123 124 import multiprocessing 125 import os 126 import subprocess 127 import tempfile 128 import zipfile 129 130 import common 131 import edify_generator 132 import sparse_img 133 134 OPTIONS = common.OPTIONS 135 OPTIONS.package_key = None 136 OPTIONS.incremental_source = None 137 OPTIONS.verify = False 138 OPTIONS.require_verbatim = set() 139 OPTIONS.prohibit_verbatim = set(("system/build.prop",)) 140 OPTIONS.patch_threshold = 0.95 141 OPTIONS.wipe_user_data = False 142 OPTIONS.omit_prereq = False 143 OPTIONS.downgrade = False 144 OPTIONS.extra_script = None 145 OPTIONS.aslr_mode = True 146 OPTIONS.worker_threads = multiprocessing.cpu_count() // 2 147 if OPTIONS.worker_threads == 0: 148 OPTIONS.worker_threads = 1 149 OPTIONS.two_step = False 150 OPTIONS.no_signing = False 151 OPTIONS.block_based = False 152 OPTIONS.updater_binary = None 153 OPTIONS.oem_source = None 154 OPTIONS.oem_no_mount = False 155 OPTIONS.fallback_to_full = True 156 OPTIONS.full_radio = False 157 OPTIONS.full_bootloader = False 158 # Stash size cannot exceed cache_size * threshold. 159 OPTIONS.cache_size = None 160 OPTIONS.stash_threshold = 0.8 161 OPTIONS.gen_verify = False 162 OPTIONS.log_diff = None 163 164 def MostPopularKey(d, default): 165 """Given a dict, return the key corresponding to the largest 166 value. Returns 'default' if the dict is empty.""" 167 x = [(v, k) for (k, v) in d.iteritems()] 168 if not x: 169 return default 170 x.sort() 171 return x[-1][1] 172 173 174 def IsSymlink(info): 175 """Return true if the zipfile.ZipInfo object passed in represents a 176 symlink.""" 177 return (info.external_attr >> 16) & 0o770000 == 0o120000 178 179 def IsRegular(info): 180 """Return true if the zipfile.ZipInfo object passed in represents a 181 regular file.""" 182 return (info.external_attr >> 16) & 0o770000 == 0o100000 183 184 def ClosestFileMatch(src, tgtfiles, existing): 185 """Returns the closest file match between a source file and list 186 of potential matches. The exact filename match is preferred, 187 then the sha1 is searched for, and finally a file with the same 188 basename is evaluated. Rename support in the updater-binary is 189 required for the latter checks to be used.""" 190 191 result = tgtfiles.get("path:" + src.name) 192 if result is not None: 193 return result 194 195 if not OPTIONS.target_info_dict.get("update_rename_support", False): 196 return None 197 198 if src.size < 1000: 199 return None 200 201 result = tgtfiles.get("sha1:" + src.sha1) 202 if result is not None and existing.get(result.name) is None: 203 return result 204 result = tgtfiles.get("file:" + src.name.split("/")[-1]) 205 if result is not None and existing.get(result.name) is None: 206 return result 207 return None 208 209 class ItemSet(object): 210 def __init__(self, partition, fs_config): 211 self.partition = partition 212 self.fs_config = fs_config 213 self.ITEMS = {} 214 215 def Get(self, name, is_dir=False): 216 if name not in self.ITEMS: 217 self.ITEMS[name] = Item(self, name, is_dir=is_dir) 218 return self.ITEMS[name] 219 220 def GetMetadata(self, input_zip): 221 # The target_files contains a record of what the uid, 222 # gid, and mode are supposed to be. 223 output = input_zip.read(self.fs_config) 224 225 for line in output.split("\n"): 226 if not line: 227 continue 228 columns = line.split() 229 name, uid, gid, mode = columns[:4] 230 selabel = None 231 capabilities = None 232 233 # After the first 4 columns, there are a series of key=value 234 # pairs. Extract out the fields we care about. 235 for element in columns[4:]: 236 key, value = element.split("=") 237 if key == "selabel": 238 selabel = value 239 if key == "capabilities": 240 capabilities = value 241 242 i = self.ITEMS.get(name, None) 243 if i is not None: 244 i.uid = int(uid) 245 i.gid = int(gid) 246 i.mode = int(mode, 8) 247 i.selabel = selabel 248 i.capabilities = capabilities 249 if i.is_dir: 250 i.children.sort(key=lambda i: i.name) 251 252 # Set metadata for the files generated by this script. For full recovery 253 # image at system/etc/recovery.img, it will be taken care by fs_config. 254 i = self.ITEMS.get("system/recovery-from-boot.p", None) 255 if i: 256 i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0o644, None, None 257 i = self.ITEMS.get("system/etc/install-recovery.sh", None) 258 if i: 259 i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0o544, None, None 260 261 262 class Item(object): 263 """Items represent the metadata (user, group, mode) of files and 264 directories in the system image.""" 265 def __init__(self, itemset, name, is_dir=False): 266 self.itemset = itemset 267 self.name = name 268 self.uid = None 269 self.gid = None 270 self.mode = None 271 self.selabel = None 272 self.capabilities = None 273 self.is_dir = is_dir 274 self.descendants = None 275 self.best_subtree = None 276 277 if name: 278 self.parent = itemset.Get(os.path.dirname(name), is_dir=True) 279 self.parent.children.append(self) 280 else: 281 self.parent = None 282 if self.is_dir: 283 self.children = [] 284 285 def Dump(self, indent=0): 286 if self.uid is not None: 287 print "%s%s %d %d %o" % ( 288 " " * indent, self.name, self.uid, self.gid, self.mode) 289 else: 290 print "%s%s %s %s %s" % ( 291 " " * indent, self.name, self.uid, self.gid, self.mode) 292 if self.is_dir: 293 print "%s%s" % (" "*indent, self.descendants) 294 print "%s%s" % (" "*indent, self.best_subtree) 295 for i in self.children: 296 i.Dump(indent=indent+1) 297 298 def CountChildMetadata(self): 299 """Count up the (uid, gid, mode, selabel, capabilities) tuples for 300 all children and determine the best strategy for using set_perm_recursive 301 and set_perm to correctly chown/chmod all the files to their desired 302 values. Recursively calls itself for all descendants. 303 304 Returns a dict of {(uid, gid, dmode, fmode, selabel, capabilities): count} 305 counting up all descendants of this node. (dmode or fmode may be None.) 306 Also sets the best_subtree of each directory Item to the (uid, gid, dmode, 307 fmode, selabel, capabilities) tuple that will match the most descendants of 308 that Item. 309 """ 310 311 assert self.is_dir 312 key = (self.uid, self.gid, self.mode, None, self.selabel, 313 self.capabilities) 314 self.descendants = {key: 1} 315 d = self.descendants 316 for i in self.children: 317 if i.is_dir: 318 for k, v in i.CountChildMetadata().iteritems(): 319 d[k] = d.get(k, 0) + v 320 else: 321 k = (i.uid, i.gid, None, i.mode, i.selabel, i.capabilities) 322 d[k] = d.get(k, 0) + 1 323 324 # Find the (uid, gid, dmode, fmode, selabel, capabilities) 325 # tuple that matches the most descendants. 326 327 # First, find the (uid, gid) pair that matches the most 328 # descendants. 329 ug = {} 330 for (uid, gid, _, _, _, _), count in d.iteritems(): 331 ug[(uid, gid)] = ug.get((uid, gid), 0) + count 332 ug = MostPopularKey(ug, (0, 0)) 333 334 # Now find the dmode, fmode, selabel, and capabilities that match 335 # the most descendants with that (uid, gid), and choose those. 336 best_dmode = (0, 0o755) 337 best_fmode = (0, 0o644) 338 best_selabel = (0, None) 339 best_capabilities = (0, None) 340 for k, count in d.iteritems(): 341 if k[:2] != ug: 342 continue 343 if k[2] is not None and count >= best_dmode[0]: 344 best_dmode = (count, k[2]) 345 if k[3] is not None and count >= best_fmode[0]: 346 best_fmode = (count, k[3]) 347 if k[4] is not None and count >= best_selabel[0]: 348 best_selabel = (count, k[4]) 349 if k[5] is not None and count >= best_capabilities[0]: 350 best_capabilities = (count, k[5]) 351 self.best_subtree = ug + ( 352 best_dmode[1], best_fmode[1], best_selabel[1], best_capabilities[1]) 353 354 return d 355 356 def SetPermissions(self, script): 357 """Append set_perm/set_perm_recursive commands to 'script' to 358 set all permissions, users, and groups for the tree of files 359 rooted at 'self'.""" 360 361 self.CountChildMetadata() 362 363 def recurse(item, current): 364 # current is the (uid, gid, dmode, fmode, selabel, capabilities) tuple 365 # that the current item (and all its children) have already been set to. 366 # We only need to issue set_perm/set_perm_recursive commands if we're 367 # supposed to be something different. 368 if item.is_dir: 369 if current != item.best_subtree: 370 script.SetPermissionsRecursive("/"+item.name, *item.best_subtree) 371 current = item.best_subtree 372 373 if item.uid != current[0] or item.gid != current[1] or \ 374 item.mode != current[2] or item.selabel != current[4] or \ 375 item.capabilities != current[5]: 376 script.SetPermissions("/"+item.name, item.uid, item.gid, 377 item.mode, item.selabel, item.capabilities) 378 379 for i in item.children: 380 recurse(i, current) 381 else: 382 if item.uid != current[0] or item.gid != current[1] or \ 383 item.mode != current[3] or item.selabel != current[4] or \ 384 item.capabilities != current[5]: 385 script.SetPermissions("/"+item.name, item.uid, item.gid, 386 item.mode, item.selabel, item.capabilities) 387 388 recurse(self, (-1, -1, -1, -1, None, None)) 389 390 391 def CopyPartitionFiles(itemset, input_zip, output_zip=None, substitute=None): 392 """Copies files for the partition in the input zip to the output 393 zip. Populates the Item class with their metadata, and returns a 394 list of symlinks. output_zip may be None, in which case the copy is 395 skipped (but the other side effects still happen). substitute is an 396 optional dict of {output filename: contents} to be output instead of 397 certain input files. 398 """ 399 400 symlinks = [] 401 402 partition = itemset.partition 403 404 for info in input_zip.infolist(): 405 prefix = partition.upper() + "/" 406 if info.filename.startswith(prefix): 407 basefilename = info.filename[len(prefix):] 408 if IsSymlink(info): 409 symlinks.append((input_zip.read(info.filename), 410 "/" + partition + "/" + basefilename)) 411 else: 412 import copy 413 info2 = copy.copy(info) 414 fn = info2.filename = partition + "/" + basefilename 415 if substitute and fn in substitute and substitute[fn] is None: 416 continue 417 if output_zip is not None: 418 if substitute and fn in substitute: 419 data = substitute[fn] 420 else: 421 data = input_zip.read(info.filename) 422 common.ZipWriteStr(output_zip, info2, data) 423 if fn.endswith("/"): 424 itemset.Get(fn[:-1], is_dir=True) 425 else: 426 itemset.Get(fn) 427 428 symlinks.sort() 429 return symlinks 430 431 432 def SignOutput(temp_zip_name, output_zip_name): 433 key_passwords = common.GetKeyPasswords([OPTIONS.package_key]) 434 pw = key_passwords[OPTIONS.package_key] 435 436 common.SignFile(temp_zip_name, output_zip_name, OPTIONS.package_key, pw, 437 whole_file=True) 438 439 440 def AppendAssertions(script, info_dict, oem_dict=None): 441 oem_props = info_dict.get("oem_fingerprint_properties") 442 if oem_props is None or len(oem_props) == 0: 443 device = GetBuildProp("ro.product.device", info_dict) 444 script.AssertDevice(device) 445 else: 446 if oem_dict is None: 447 raise common.ExternalError( 448 "No OEM file provided to answer expected assertions") 449 for prop in oem_props.split(): 450 if oem_dict.get(prop) is None: 451 raise common.ExternalError( 452 "The OEM file is missing the property %s" % prop) 453 script.AssertOemProperty(prop, oem_dict.get(prop)) 454 455 456 def HasRecoveryPatch(target_files_zip): 457 namelist = [name for name in target_files_zip.namelist()] 458 return ("SYSTEM/recovery-from-boot.p" in namelist or 459 "SYSTEM/etc/recovery.img" in namelist) 460 461 def HasVendorPartition(target_files_zip): 462 try: 463 target_files_zip.getinfo("VENDOR/") 464 return True 465 except KeyError: 466 return False 467 468 def GetOemProperty(name, oem_props, oem_dict, info_dict): 469 if oem_props is not None and name in oem_props: 470 return oem_dict[name] 471 return GetBuildProp(name, info_dict) 472 473 474 def CalculateFingerprint(oem_props, oem_dict, info_dict): 475 if oem_props is None: 476 return GetBuildProp("ro.build.fingerprint", info_dict) 477 return "%s/%s/%s:%s" % ( 478 GetOemProperty("ro.product.brand", oem_props, oem_dict, info_dict), 479 GetOemProperty("ro.product.name", oem_props, oem_dict, info_dict), 480 GetOemProperty("ro.product.device", oem_props, oem_dict, info_dict), 481 GetBuildProp("ro.build.thumbprint", info_dict)) 482 483 484 def GetImage(which, tmpdir, info_dict): 485 # Return an image object (suitable for passing to BlockImageDiff) 486 # for the 'which' partition (most be "system" or "vendor"). If a 487 # prebuilt image and file map are found in tmpdir they are used, 488 # otherwise they are reconstructed from the individual files. 489 490 assert which in ("system", "vendor") 491 492 path = os.path.join(tmpdir, "IMAGES", which + ".img") 493 mappath = os.path.join(tmpdir, "IMAGES", which + ".map") 494 if os.path.exists(path) and os.path.exists(mappath): 495 print "using %s.img from target-files" % (which,) 496 # This is a 'new' target-files, which already has the image in it. 497 498 else: 499 print "building %s.img from target-files" % (which,) 500 501 # This is an 'old' target-files, which does not contain images 502 # already built. Build them. 503 504 mappath = tempfile.mkstemp()[1] 505 OPTIONS.tempfiles.append(mappath) 506 507 import add_img_to_target_files 508 if which == "system": 509 path = add_img_to_target_files.BuildSystem( 510 tmpdir, info_dict, block_list=mappath) 511 elif which == "vendor": 512 path = add_img_to_target_files.BuildVendor( 513 tmpdir, info_dict, block_list=mappath) 514 515 # Bug: http://b/20939131 516 # In ext4 filesystems, block 0 might be changed even being mounted 517 # R/O. We add it to clobbered_blocks so that it will be written to the 518 # target unconditionally. Note that they are still part of care_map. 519 clobbered_blocks = "0" 520 521 return sparse_img.SparseImage(path, mappath, clobbered_blocks) 522 523 524 def WriteFullOTAPackage(input_zip, output_zip): 525 # TODO: how to determine this? We don't know what version it will 526 # be installed on top of. For now, we expect the API just won't 527 # change very often. Similarly for fstab, it might have changed 528 # in the target build. 529 script = edify_generator.EdifyGenerator(3, OPTIONS.info_dict) 530 531 oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties") 532 recovery_mount_options = OPTIONS.info_dict.get("recovery_mount_options") 533 oem_dict = None 534 if oem_props is not None and len(oem_props) > 0: 535 if OPTIONS.oem_source is None: 536 raise common.ExternalError("OEM source required for this build") 537 if not OPTIONS.oem_no_mount: 538 script.Mount("/oem", recovery_mount_options) 539 oem_dict = common.LoadDictionaryFromLines( 540 open(OPTIONS.oem_source).readlines()) 541 542 metadata = { 543 "post-build": CalculateFingerprint(oem_props, oem_dict, 544 OPTIONS.info_dict), 545 "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict, 546 OPTIONS.info_dict), 547 "post-timestamp": GetBuildProp("ro.build.date.utc", OPTIONS.info_dict), 548 } 549 550 device_specific = common.DeviceSpecificParams( 551 input_zip=input_zip, 552 input_version=OPTIONS.info_dict["recovery_api_version"], 553 output_zip=output_zip, 554 script=script, 555 input_tmp=OPTIONS.input_tmp, 556 metadata=metadata, 557 info_dict=OPTIONS.info_dict) 558 559 has_recovery_patch = HasRecoveryPatch(input_zip) 560 block_based = OPTIONS.block_based and has_recovery_patch 561 562 metadata["ota-type"] = "BLOCK" if block_based else "FILE" 563 564 if not OPTIONS.omit_prereq: 565 ts = GetBuildProp("ro.build.date.utc", OPTIONS.info_dict) 566 ts_text = GetBuildProp("ro.build.date", OPTIONS.info_dict) 567 script.AssertOlderBuild(ts, ts_text) 568 569 AppendAssertions(script, OPTIONS.info_dict, oem_dict) 570 device_specific.FullOTA_Assertions() 571 572 # Two-step package strategy (in chronological order, which is *not* 573 # the order in which the generated script has things): 574 # 575 # if stage is not "2/3" or "3/3": 576 # write recovery image to boot partition 577 # set stage to "2/3" 578 # reboot to boot partition and restart recovery 579 # else if stage is "2/3": 580 # write recovery image to recovery partition 581 # set stage to "3/3" 582 # reboot to recovery partition and restart recovery 583 # else: 584 # (stage must be "3/3") 585 # set stage to "" 586 # do normal full package installation: 587 # wipe and install system, boot image, etc. 588 # set up system to update recovery partition on first boot 589 # complete script normally 590 # (allow recovery to mark itself finished and reboot) 591 592 recovery_img = common.GetBootableImage("recovery.img", "recovery.img", 593 OPTIONS.input_tmp, "RECOVERY") 594 if OPTIONS.two_step: 595 if not OPTIONS.info_dict.get("multistage_support", None): 596 assert False, "two-step packages not supported by this build" 597 fs = OPTIONS.info_dict["fstab"]["/misc"] 598 assert fs.fs_type.upper() == "EMMC", \ 599 "two-step packages only supported on devices with EMMC /misc partitions" 600 bcb_dev = {"bcb_dev": fs.device} 601 common.ZipWriteStr(output_zip, "recovery.img", recovery_img.data) 602 script.AppendExtra(""" 603 if get_stage("%(bcb_dev)s") == "2/3" then 604 """ % bcb_dev) 605 script.WriteRawImage("/recovery", "recovery.img") 606 script.AppendExtra(""" 607 set_stage("%(bcb_dev)s", "3/3"); 608 reboot_now("%(bcb_dev)s", "recovery"); 609 else if get_stage("%(bcb_dev)s") == "3/3" then 610 """ % bcb_dev) 611 612 # Dump fingerprints 613 script.Print("Target: %s" % CalculateFingerprint( 614 oem_props, oem_dict, OPTIONS.info_dict)) 615 616 device_specific.FullOTA_InstallBegin() 617 618 system_progress = 0.75 619 620 if OPTIONS.wipe_user_data: 621 system_progress -= 0.1 622 if HasVendorPartition(input_zip): 623 system_progress -= 0.1 624 625 # Place a copy of file_contexts.bin into the OTA package which will be used 626 # by the recovery program. 627 if "selinux_fc" in OPTIONS.info_dict: 628 WritePolicyConfig(OPTIONS.info_dict["selinux_fc"], output_zip) 629 630 recovery_mount_options = OPTIONS.info_dict.get("recovery_mount_options") 631 632 system_items = ItemSet("system", "META/filesystem_config.txt") 633 script.ShowProgress(system_progress, 0) 634 635 if block_based: 636 # Full OTA is done as an "incremental" against an empty source 637 # image. This has the effect of writing new data from the package 638 # to the entire partition, but lets us reuse the updater code that 639 # writes incrementals to do it. 640 system_tgt = GetImage("system", OPTIONS.input_tmp, OPTIONS.info_dict) 641 system_tgt.ResetFileMap() 642 system_diff = common.BlockDifference("system", system_tgt, src=None) 643 system_diff.WriteScript(script, output_zip) 644 else: 645 script.FormatPartition("/system") 646 script.Mount("/system", recovery_mount_options) 647 if not has_recovery_patch: 648 script.UnpackPackageDir("recovery", "/system") 649 script.UnpackPackageDir("system", "/system") 650 651 symlinks = CopyPartitionFiles(system_items, input_zip, output_zip) 652 script.MakeSymlinks(symlinks) 653 654 boot_img = common.GetBootableImage( 655 "boot.img", "boot.img", OPTIONS.input_tmp, "BOOT") 656 657 if not block_based: 658 def output_sink(fn, data): 659 common.ZipWriteStr(output_zip, "recovery/" + fn, data) 660 system_items.Get("system/" + fn) 661 662 common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, 663 recovery_img, boot_img) 664 665 system_items.GetMetadata(input_zip) 666 system_items.Get("system").SetPermissions(script) 667 668 if HasVendorPartition(input_zip): 669 vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt") 670 script.ShowProgress(0.1, 0) 671 672 if block_based: 673 vendor_tgt = GetImage("vendor", OPTIONS.input_tmp, OPTIONS.info_dict) 674 vendor_tgt.ResetFileMap() 675 vendor_diff = common.BlockDifference("vendor", vendor_tgt) 676 vendor_diff.WriteScript(script, output_zip) 677 else: 678 script.FormatPartition("/vendor") 679 script.Mount("/vendor", recovery_mount_options) 680 script.UnpackPackageDir("vendor", "/vendor") 681 682 symlinks = CopyPartitionFiles(vendor_items, input_zip, output_zip) 683 script.MakeSymlinks(symlinks) 684 685 vendor_items.GetMetadata(input_zip) 686 vendor_items.Get("vendor").SetPermissions(script) 687 688 common.CheckSize(boot_img.data, "boot.img", OPTIONS.info_dict) 689 common.ZipWriteStr(output_zip, "boot.img", boot_img.data) 690 691 script.ShowProgress(0.05, 5) 692 script.WriteRawImage("/boot", "boot.img") 693 694 script.ShowProgress(0.2, 10) 695 device_specific.FullOTA_InstallEnd() 696 697 if OPTIONS.extra_script is not None: 698 script.AppendExtra(OPTIONS.extra_script) 699 700 script.UnmountAll() 701 702 if OPTIONS.wipe_user_data: 703 script.ShowProgress(0.1, 10) 704 script.FormatPartition("/data") 705 706 if OPTIONS.two_step: 707 script.AppendExtra(""" 708 set_stage("%(bcb_dev)s", ""); 709 """ % bcb_dev) 710 script.AppendExtra("else\n") 711 script.WriteRawImage("/boot", "recovery.img") 712 script.AppendExtra(""" 713 set_stage("%(bcb_dev)s", "2/3"); 714 reboot_now("%(bcb_dev)s", ""); 715 endif; 716 endif; 717 """ % bcb_dev) 718 719 script.SetProgress(1) 720 script.AddToZip(input_zip, output_zip, input_path=OPTIONS.updater_binary) 721 metadata["ota-required-cache"] = str(script.required_cache) 722 WriteMetadata(metadata, output_zip) 723 724 725 def WritePolicyConfig(file_name, output_zip): 726 common.ZipWrite(output_zip, file_name, os.path.basename(file_name)) 727 728 729 def WriteMetadata(metadata, output_zip): 730 common.ZipWriteStr(output_zip, "META-INF/com/android/metadata", 731 "".join(["%s=%s\n" % kv 732 for kv in sorted(metadata.iteritems())])) 733 734 735 def LoadPartitionFiles(z, partition): 736 """Load all the files from the given partition in a given target-files 737 ZipFile, and return a dict of {filename: File object}.""" 738 out = {} 739 prefix = partition.upper() + "/" 740 for info in z.infolist(): 741 if info.filename.startswith(prefix) and not IsSymlink(info): 742 basefilename = info.filename[len(prefix):] 743 fn = partition + "/" + basefilename 744 data = z.read(info.filename) 745 out[fn] = common.File(fn, data) 746 return out 747 748 749 def GetBuildProp(prop, info_dict): 750 """Return the fingerprint of the build of a given target-files info_dict.""" 751 try: 752 return info_dict.get("build.prop", {})[prop] 753 except KeyError: 754 raise common.ExternalError("couldn't find %s in build.prop" % (prop,)) 755 756 757 def AddToKnownPaths(filename, known_paths): 758 if filename[-1] == "/": 759 return 760 dirs = filename.split("/")[:-1] 761 while len(dirs) > 0: 762 path = "/".join(dirs) 763 if path in known_paths: 764 break 765 known_paths.add(path) 766 dirs.pop() 767 768 769 def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip): 770 # TODO(tbao): We should factor out the common parts between 771 # WriteBlockIncrementalOTAPackage() and WriteIncrementalOTAPackage(). 772 source_version = OPTIONS.source_info_dict["recovery_api_version"] 773 target_version = OPTIONS.target_info_dict["recovery_api_version"] 774 775 if source_version == 0: 776 print ("WARNING: generating edify script for a source that " 777 "can't install it.") 778 script = edify_generator.EdifyGenerator( 779 source_version, OPTIONS.target_info_dict, 780 fstab=OPTIONS.source_info_dict["fstab"]) 781 782 oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties") 783 recovery_mount_options = OPTIONS.source_info_dict.get( 784 "recovery_mount_options") 785 oem_dict = None 786 if oem_props is not None and len(oem_props) > 0: 787 if OPTIONS.oem_source is None: 788 raise common.ExternalError("OEM source required for this build") 789 if not OPTIONS.oem_no_mount: 790 script.Mount("/oem", recovery_mount_options) 791 oem_dict = common.LoadDictionaryFromLines( 792 open(OPTIONS.oem_source).readlines()) 793 794 metadata = { 795 "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict, 796 OPTIONS.source_info_dict), 797 "ota-type": "BLOCK", 798 } 799 800 post_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.target_info_dict) 801 pre_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.source_info_dict) 802 is_downgrade = long(post_timestamp) < long(pre_timestamp) 803 804 if OPTIONS.downgrade: 805 metadata["ota-downgrade"] = "yes" 806 if not is_downgrade: 807 raise RuntimeError("--downgrade specified but no downgrade detected: " 808 "pre: %s, post: %s" % (pre_timestamp, post_timestamp)) 809 else: 810 if is_downgrade: 811 # Non-fatal here to allow generating such a package which may require 812 # manual work to adjust the post-timestamp. A legit use case is that we 813 # cut a new build C (after having A and B), but want to enfore the 814 # update path of A -> C -> B. Specifying --downgrade may not help since 815 # that would enforce a data wipe for C -> B update. 816 print("\nWARNING: downgrade detected: pre: %s, post: %s.\n" 817 "The package may not be deployed properly. " 818 "Try --downgrade?\n" % (pre_timestamp, post_timestamp)) 819 metadata["post-timestamp"] = post_timestamp 820 821 device_specific = common.DeviceSpecificParams( 822 source_zip=source_zip, 823 source_version=source_version, 824 target_zip=target_zip, 825 target_version=target_version, 826 output_zip=output_zip, 827 script=script, 828 metadata=metadata, 829 info_dict=OPTIONS.source_info_dict) 830 831 source_fp = CalculateFingerprint(oem_props, oem_dict, 832 OPTIONS.source_info_dict) 833 target_fp = CalculateFingerprint(oem_props, oem_dict, 834 OPTIONS.target_info_dict) 835 metadata["pre-build"] = source_fp 836 metadata["post-build"] = target_fp 837 metadata["pre-build-incremental"] = GetBuildProp( 838 "ro.build.version.incremental", OPTIONS.source_info_dict) 839 metadata["post-build-incremental"] = GetBuildProp( 840 "ro.build.version.incremental", OPTIONS.target_info_dict) 841 842 source_boot = common.GetBootableImage( 843 "/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT", 844 OPTIONS.source_info_dict) 845 target_boot = common.GetBootableImage( 846 "/tmp/boot.img", "boot.img", OPTIONS.target_tmp, "BOOT") 847 updating_boot = (not OPTIONS.two_step and 848 (source_boot.data != target_boot.data)) 849 850 target_recovery = common.GetBootableImage( 851 "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY") 852 853 system_src = GetImage("system", OPTIONS.source_tmp, OPTIONS.source_info_dict) 854 system_tgt = GetImage("system", OPTIONS.target_tmp, OPTIONS.target_info_dict) 855 856 blockimgdiff_version = 1 857 if OPTIONS.info_dict: 858 blockimgdiff_version = max( 859 int(i) for i in 860 OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(",")) 861 862 # Check first block of system partition for remount R/W only if 863 # disk type is ext4 864 system_partition = OPTIONS.source_info_dict["fstab"]["/system"] 865 check_first_block = system_partition.fs_type == "ext4" 866 # Disable using imgdiff for squashfs. 'imgdiff -z' expects input files to be 867 # in zip formats. However with squashfs, a) all files are compressed in LZ4; 868 # b) the blocks listed in block map may not contain all the bytes for a given 869 # file (because they're rounded to be 4K-aligned). 870 disable_imgdiff = system_partition.fs_type == "squashfs" 871 system_diff = common.BlockDifference("system", system_tgt, system_src, 872 check_first_block, 873 version=blockimgdiff_version, 874 disable_imgdiff=disable_imgdiff) 875 876 if HasVendorPartition(target_zip): 877 if not HasVendorPartition(source_zip): 878 raise RuntimeError("can't generate incremental that adds /vendor") 879 vendor_src = GetImage("vendor", OPTIONS.source_tmp, 880 OPTIONS.source_info_dict) 881 vendor_tgt = GetImage("vendor", OPTIONS.target_tmp, 882 OPTIONS.target_info_dict) 883 884 # Check first block of vendor partition for remount R/W only if 885 # disk type is ext4 886 vendor_partition = OPTIONS.source_info_dict["fstab"]["/vendor"] 887 check_first_block = vendor_partition.fs_type == "ext4" 888 disable_imgdiff = vendor_partition.fs_type == "squashfs" 889 vendor_diff = common.BlockDifference("vendor", vendor_tgt, vendor_src, 890 check_first_block, 891 version=blockimgdiff_version, 892 disable_imgdiff=disable_imgdiff) 893 else: 894 vendor_diff = None 895 896 AppendAssertions(script, OPTIONS.target_info_dict, oem_dict) 897 device_specific.IncrementalOTA_Assertions() 898 899 # Two-step incremental package strategy (in chronological order, 900 # which is *not* the order in which the generated script has 901 # things): 902 # 903 # if stage is not "2/3" or "3/3": 904 # do verification on current system 905 # write recovery image to boot partition 906 # set stage to "2/3" 907 # reboot to boot partition and restart recovery 908 # else if stage is "2/3": 909 # write recovery image to recovery partition 910 # set stage to "3/3" 911 # reboot to recovery partition and restart recovery 912 # else: 913 # (stage must be "3/3") 914 # perform update: 915 # patch system files, etc. 916 # force full install of new boot image 917 # set up system to update recovery partition on first boot 918 # complete script normally 919 # (allow recovery to mark itself finished and reboot) 920 921 if OPTIONS.two_step: 922 if not OPTIONS.source_info_dict.get("multistage_support", None): 923 assert False, "two-step packages not supported by this build" 924 fs = OPTIONS.source_info_dict["fstab"]["/misc"] 925 assert fs.fs_type.upper() == "EMMC", \ 926 "two-step packages only supported on devices with EMMC /misc partitions" 927 bcb_dev = {"bcb_dev": fs.device} 928 common.ZipWriteStr(output_zip, "recovery.img", target_recovery.data) 929 script.AppendExtra(""" 930 if get_stage("%(bcb_dev)s") == "2/3" then 931 """ % bcb_dev) 932 script.AppendExtra("sleep(20);\n") 933 script.WriteRawImage("/recovery", "recovery.img") 934 script.AppendExtra(""" 935 set_stage("%(bcb_dev)s", "3/3"); 936 reboot_now("%(bcb_dev)s", "recovery"); 937 else if get_stage("%(bcb_dev)s") != "3/3" then 938 """ % bcb_dev) 939 940 # Dump fingerprints 941 script.Print("Source: %s" % CalculateFingerprint( 942 oem_props, oem_dict, OPTIONS.source_info_dict)) 943 script.Print("Target: %s" % CalculateFingerprint( 944 oem_props, oem_dict, OPTIONS.target_info_dict)) 945 946 script.Print("Verifying current system...") 947 948 device_specific.IncrementalOTA_VerifyBegin() 949 950 if oem_props is None: 951 # When blockimgdiff version is less than 3 (non-resumable block-based OTA), 952 # patching on a device that's already on the target build will damage the 953 # system. Because operations like move don't check the block state, they 954 # always apply the changes unconditionally. 955 if blockimgdiff_version <= 2: 956 script.AssertSomeFingerprint(source_fp) 957 else: 958 script.AssertSomeFingerprint(source_fp, target_fp) 959 else: 960 if blockimgdiff_version <= 2: 961 script.AssertSomeThumbprint( 962 GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict)) 963 else: 964 script.AssertSomeThumbprint( 965 GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict), 966 GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict)) 967 968 # Check the required cache size (i.e. stashed blocks). 969 size = [] 970 if system_diff: 971 size.append(system_diff.required_cache) 972 if vendor_diff: 973 size.append(vendor_diff.required_cache) 974 975 if updating_boot: 976 boot_type, boot_device = common.GetTypeAndDevice( 977 "/boot", OPTIONS.source_info_dict) 978 d = common.Difference(target_boot, source_boot) 979 _, _, d = d.ComputePatch() 980 if d is None: 981 include_full_boot = True 982 common.ZipWriteStr(output_zip, "boot.img", target_boot.data) 983 else: 984 include_full_boot = False 985 986 print "boot target: %d source: %d diff: %d" % ( 987 target_boot.size, source_boot.size, len(d)) 988 989 common.ZipWriteStr(output_zip, "patch/boot.img.p", d) 990 991 script.PatchCheck("%s:%s:%d:%s:%d:%s" % 992 (boot_type, boot_device, 993 source_boot.size, source_boot.sha1, 994 target_boot.size, target_boot.sha1)) 995 size.append(target_boot.size) 996 997 if size: 998 script.CacheFreeSpaceCheck(max(size)) 999 1000 device_specific.IncrementalOTA_VerifyEnd() 1001 1002 if OPTIONS.two_step: 1003 script.WriteRawImage("/boot", "recovery.img") 1004 script.AppendExtra(""" 1005 set_stage("%(bcb_dev)s", "2/3"); 1006 reboot_now("%(bcb_dev)s", ""); 1007 else 1008 """ % bcb_dev) 1009 1010 # Verify the existing partitions. 1011 system_diff.WriteVerifyScript(script, touched_blocks_only=True) 1012 if vendor_diff: 1013 vendor_diff.WriteVerifyScript(script, touched_blocks_only=True) 1014 1015 script.Comment("---- start making changes here ----") 1016 1017 device_specific.IncrementalOTA_InstallBegin() 1018 1019 system_diff.WriteScript(script, output_zip, 1020 progress=0.8 if vendor_diff else 0.9) 1021 1022 if vendor_diff: 1023 vendor_diff.WriteScript(script, output_zip, progress=0.1) 1024 1025 if OPTIONS.two_step: 1026 common.ZipWriteStr(output_zip, "boot.img", target_boot.data) 1027 script.WriteRawImage("/boot", "boot.img") 1028 print "writing full boot image (forced by two-step mode)" 1029 1030 if not OPTIONS.two_step: 1031 if updating_boot: 1032 if include_full_boot: 1033 print "boot image changed; including full." 1034 script.Print("Installing boot image...") 1035 script.WriteRawImage("/boot", "boot.img") 1036 else: 1037 # Produce the boot image by applying a patch to the current 1038 # contents of the boot partition, and write it back to the 1039 # partition. 1040 print "boot image changed; including patch." 1041 script.Print("Patching boot image...") 1042 script.ShowProgress(0.1, 10) 1043 script.ApplyPatch("%s:%s:%d:%s:%d:%s" 1044 % (boot_type, boot_device, 1045 source_boot.size, source_boot.sha1, 1046 target_boot.size, target_boot.sha1), 1047 "-", 1048 target_boot.size, target_boot.sha1, 1049 source_boot.sha1, "patch/boot.img.p") 1050 else: 1051 print "boot image unchanged; skipping." 1052 1053 # Do device-specific installation (eg, write radio image). 1054 device_specific.IncrementalOTA_InstallEnd() 1055 1056 if OPTIONS.extra_script is not None: 1057 script.AppendExtra(OPTIONS.extra_script) 1058 1059 if OPTIONS.wipe_user_data: 1060 script.Print("Erasing user data...") 1061 script.FormatPartition("/data") 1062 metadata["ota-wipe"] = "yes" 1063 1064 if OPTIONS.two_step: 1065 script.AppendExtra(""" 1066 set_stage("%(bcb_dev)s", ""); 1067 endif; 1068 endif; 1069 """ % bcb_dev) 1070 1071 script.SetProgress(1) 1072 # For downgrade OTAs, we prefer to use the update-binary in the source 1073 # build that is actually newer than the one in the target build. 1074 if OPTIONS.downgrade: 1075 script.AddToZip(source_zip, output_zip, input_path=OPTIONS.updater_binary) 1076 else: 1077 script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary) 1078 metadata["ota-required-cache"] = str(script.required_cache) 1079 WriteMetadata(metadata, output_zip) 1080 1081 1082 def WriteVerifyPackage(input_zip, output_zip): 1083 script = edify_generator.EdifyGenerator(3, OPTIONS.info_dict) 1084 1085 oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties") 1086 recovery_mount_options = OPTIONS.info_dict.get( 1087 "recovery_mount_options") 1088 oem_dict = None 1089 if oem_props is not None and len(oem_props) > 0: 1090 if OPTIONS.oem_source is None: 1091 raise common.ExternalError("OEM source required for this build") 1092 script.Mount("/oem", recovery_mount_options) 1093 oem_dict = common.LoadDictionaryFromLines( 1094 open(OPTIONS.oem_source).readlines()) 1095 1096 target_fp = CalculateFingerprint(oem_props, oem_dict, OPTIONS.info_dict) 1097 metadata = { 1098 "post-build": target_fp, 1099 "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict, 1100 OPTIONS.info_dict), 1101 "post-timestamp": GetBuildProp("ro.build.date.utc", OPTIONS.info_dict), 1102 } 1103 1104 device_specific = common.DeviceSpecificParams( 1105 input_zip=input_zip, 1106 input_version=OPTIONS.info_dict["recovery_api_version"], 1107 output_zip=output_zip, 1108 script=script, 1109 input_tmp=OPTIONS.input_tmp, 1110 metadata=metadata, 1111 info_dict=OPTIONS.info_dict) 1112 1113 AppendAssertions(script, OPTIONS.info_dict, oem_dict) 1114 1115 script.Print("Verifying device images against %s..." % target_fp) 1116 script.AppendExtra("") 1117 1118 script.Print("Verifying boot...") 1119 boot_img = common.GetBootableImage( 1120 "boot.img", "boot.img", OPTIONS.input_tmp, "BOOT") 1121 boot_type, boot_device = common.GetTypeAndDevice( 1122 "/boot", OPTIONS.info_dict) 1123 script.Verify("%s:%s:%d:%s" % ( 1124 boot_type, boot_device, boot_img.size, boot_img.sha1)) 1125 script.AppendExtra("") 1126 1127 script.Print("Verifying recovery...") 1128 recovery_img = common.GetBootableImage( 1129 "recovery.img", "recovery.img", OPTIONS.input_tmp, "RECOVERY") 1130 recovery_type, recovery_device = common.GetTypeAndDevice( 1131 "/recovery", OPTIONS.info_dict) 1132 script.Verify("%s:%s:%d:%s" % ( 1133 recovery_type, recovery_device, recovery_img.size, recovery_img.sha1)) 1134 script.AppendExtra("") 1135 1136 system_tgt = GetImage("system", OPTIONS.input_tmp, OPTIONS.info_dict) 1137 system_tgt.ResetFileMap() 1138 system_diff = common.BlockDifference("system", system_tgt, src=None) 1139 system_diff.WriteStrictVerifyScript(script) 1140 1141 if HasVendorPartition(input_zip): 1142 vendor_tgt = GetImage("vendor", OPTIONS.input_tmp, OPTIONS.info_dict) 1143 vendor_tgt.ResetFileMap() 1144 vendor_diff = common.BlockDifference("vendor", vendor_tgt, src=None) 1145 vendor_diff.WriteStrictVerifyScript(script) 1146 1147 # Device specific partitions, such as radio, bootloader and etc. 1148 device_specific.VerifyOTA_Assertions() 1149 1150 script.SetProgress(1.0) 1151 script.AddToZip(input_zip, output_zip, input_path=OPTIONS.updater_binary) 1152 metadata["ota-required-cache"] = str(script.required_cache) 1153 WriteMetadata(metadata, output_zip) 1154 1155 1156 def WriteABOTAPackageWithBrilloScript(target_file, output_file, 1157 source_file=None): 1158 """Generate an Android OTA package that has A/B update payload.""" 1159 1160 # Setup signing keys. 1161 if OPTIONS.package_key is None: 1162 OPTIONS.package_key = OPTIONS.info_dict.get( 1163 "default_system_dev_certificate", 1164 "build/target/product/security/testkey") 1165 1166 # A/B updater expects key in RSA format. 1167 cmd = ["openssl", "pkcs8", 1168 "-in", OPTIONS.package_key + OPTIONS.private_key_suffix, 1169 "-inform", "DER", "-nocrypt"] 1170 rsa_key = common.MakeTempFile(prefix="key-", suffix=".key") 1171 cmd.extend(["-out", rsa_key]) 1172 p1 = common.Run(cmd, stdout=subprocess.PIPE) 1173 p1.wait() 1174 assert p1.returncode == 0, "openssl pkcs8 failed" 1175 1176 # Stage the output zip package for signing. 1177 temp_zip_file = tempfile.NamedTemporaryFile() 1178 output_zip = zipfile.ZipFile(temp_zip_file, "w", 1179 compression=zipfile.ZIP_DEFLATED) 1180 1181 # Metadata to comply with Android OTA package format. 1182 oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties", None) 1183 oem_dict = None 1184 if oem_props: 1185 if OPTIONS.oem_source is None: 1186 raise common.ExternalError("OEM source required for this build") 1187 oem_dict = common.LoadDictionaryFromLines( 1188 open(OPTIONS.oem_source).readlines()) 1189 1190 metadata = { 1191 "post-build": CalculateFingerprint(oem_props, oem_dict, 1192 OPTIONS.info_dict), 1193 "post-build-incremental" : GetBuildProp("ro.build.version.incremental", 1194 OPTIONS.info_dict), 1195 "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict, 1196 OPTIONS.info_dict), 1197 "post-timestamp": GetBuildProp("ro.build.date.utc", OPTIONS.info_dict), 1198 "ota-required-cache": "0", 1199 "ota-type": "AB", 1200 } 1201 1202 if source_file is not None: 1203 metadata["pre-build"] = CalculateFingerprint(oem_props, oem_dict, 1204 OPTIONS.source_info_dict) 1205 metadata["pre-build-incremental"] = GetBuildProp( 1206 "ro.build.version.incremental", OPTIONS.source_info_dict) 1207 1208 # 1. Generate payload. 1209 payload_file = common.MakeTempFile(prefix="payload-", suffix=".bin") 1210 cmd = ["brillo_update_payload", "generate", 1211 "--payload", payload_file, 1212 "--target_image", target_file] 1213 if source_file is not None: 1214 cmd.extend(["--source_image", source_file]) 1215 p1 = common.Run(cmd, stdout=subprocess.PIPE) 1216 p1.wait() 1217 assert p1.returncode == 0, "brillo_update_payload generate failed" 1218 1219 # 2. Generate hashes of the payload and metadata files. 1220 payload_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin") 1221 metadata_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin") 1222 cmd = ["brillo_update_payload", "hash", 1223 "--unsigned_payload", payload_file, 1224 "--signature_size", "256", 1225 "--metadata_hash_file", metadata_sig_file, 1226 "--payload_hash_file", payload_sig_file] 1227 p1 = common.Run(cmd, stdout=subprocess.PIPE) 1228 p1.wait() 1229 assert p1.returncode == 0, "brillo_update_payload hash failed" 1230 1231 # 3. Sign the hashes and insert them back into the payload file. 1232 signed_payload_sig_file = common.MakeTempFile(prefix="signed-sig-", 1233 suffix=".bin") 1234 signed_metadata_sig_file = common.MakeTempFile(prefix="signed-sig-", 1235 suffix=".bin") 1236 # 3a. Sign the payload hash. 1237 cmd = ["openssl", "pkeyutl", "-sign", 1238 "-inkey", rsa_key, 1239 "-pkeyopt", "digest:sha256", 1240 "-in", payload_sig_file, 1241 "-out", signed_payload_sig_file] 1242 p1 = common.Run(cmd, stdout=subprocess.PIPE) 1243 p1.wait() 1244 assert p1.returncode == 0, "openssl sign payload failed" 1245 1246 # 3b. Sign the metadata hash. 1247 cmd = ["openssl", "pkeyutl", "-sign", 1248 "-inkey", rsa_key, 1249 "-pkeyopt", "digest:sha256", 1250 "-in", metadata_sig_file, 1251 "-out", signed_metadata_sig_file] 1252 p1 = common.Run(cmd, stdout=subprocess.PIPE) 1253 p1.wait() 1254 assert p1.returncode == 0, "openssl sign metadata failed" 1255 1256 # 3c. Insert the signatures back into the payload file. 1257 signed_payload_file = common.MakeTempFile(prefix="signed-payload-", 1258 suffix=".bin") 1259 cmd = ["brillo_update_payload", "sign", 1260 "--unsigned_payload", payload_file, 1261 "--payload", signed_payload_file, 1262 "--signature_size", "256", 1263 "--metadata_signature_file", signed_metadata_sig_file, 1264 "--payload_signature_file", signed_payload_sig_file] 1265 p1 = common.Run(cmd, stdout=subprocess.PIPE) 1266 p1.wait() 1267 assert p1.returncode == 0, "brillo_update_payload sign failed" 1268 1269 # 4. Dump the signed payload properties. 1270 properties_file = common.MakeTempFile(prefix="payload-properties-", 1271 suffix=".txt") 1272 cmd = ["brillo_update_payload", "properties", 1273 "--payload", signed_payload_file, 1274 "--properties_file", properties_file] 1275 p1 = common.Run(cmd, stdout=subprocess.PIPE) 1276 p1.wait() 1277 assert p1.returncode == 0, "brillo_update_payload properties failed" 1278 1279 # Add the signed payload file and properties into the zip. 1280 common.ZipWrite(output_zip, properties_file, arcname="payload_properties.txt") 1281 common.ZipWrite(output_zip, signed_payload_file, arcname="payload.bin", 1282 compress_type=zipfile.ZIP_STORED) 1283 WriteMetadata(metadata, output_zip) 1284 1285 # Sign the whole package to comply with the Android OTA package format. 1286 common.ZipClose(output_zip) 1287 SignOutput(temp_zip_file.name, output_file) 1288 temp_zip_file.close() 1289 1290 1291 class FileDifference(object): 1292 def __init__(self, partition, source_zip, target_zip, output_zip): 1293 self.deferred_patch_list = None 1294 print "Loading target..." 1295 self.target_data = target_data = LoadPartitionFiles(target_zip, partition) 1296 print "Loading source..." 1297 self.source_data = source_data = LoadPartitionFiles(source_zip, partition) 1298 1299 self.verbatim_targets = verbatim_targets = [] 1300 self.patch_list = patch_list = [] 1301 diffs = [] 1302 self.renames = renames = {} 1303 known_paths = set() 1304 largest_source_size = 0 1305 1306 matching_file_cache = {} 1307 for fn, sf in source_data.items(): 1308 assert fn == sf.name 1309 matching_file_cache["path:" + fn] = sf 1310 if fn in target_data.keys(): 1311 AddToKnownPaths(fn, known_paths) 1312 # Only allow eligibility for filename/sha matching 1313 # if there isn't a perfect path match. 1314 if target_data.get(sf.name) is None: 1315 matching_file_cache["file:" + fn.split("/")[-1]] = sf 1316 matching_file_cache["sha:" + sf.sha1] = sf 1317 1318 for fn in sorted(target_data.keys()): 1319 tf = target_data[fn] 1320 assert fn == tf.name 1321 sf = ClosestFileMatch(tf, matching_file_cache, renames) 1322 if sf is not None and sf.name != tf.name: 1323 print "File has moved from " + sf.name + " to " + tf.name 1324 renames[sf.name] = tf 1325 1326 if sf is None or fn in OPTIONS.require_verbatim: 1327 # This file should be included verbatim 1328 if fn in OPTIONS.prohibit_verbatim: 1329 raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,)) 1330 print "send", fn, "verbatim" 1331 tf.AddToZip(output_zip) 1332 verbatim_targets.append((fn, tf.size, tf.sha1)) 1333 if fn in target_data.keys(): 1334 AddToKnownPaths(fn, known_paths) 1335 elif tf.sha1 != sf.sha1: 1336 # File is different; consider sending as a patch 1337 diffs.append(common.Difference(tf, sf)) 1338 else: 1339 # Target file data identical to source (may still be renamed) 1340 pass 1341 1342 common.ComputeDifferences(diffs) 1343 1344 for diff in diffs: 1345 tf, sf, d = diff.GetPatch() 1346 path = "/".join(tf.name.split("/")[:-1]) 1347 if d is None or len(d) > tf.size * OPTIONS.patch_threshold or \ 1348 path not in known_paths: 1349 # patch is almost as big as the file; don't bother patching 1350 # or a patch + rename cannot take place due to the target 1351 # directory not existing 1352 tf.AddToZip(output_zip) 1353 verbatim_targets.append((tf.name, tf.size, tf.sha1)) 1354 if sf.name in renames: 1355 del renames[sf.name] 1356 AddToKnownPaths(tf.name, known_paths) 1357 else: 1358 common.ZipWriteStr(output_zip, "patch/" + sf.name + ".p", d) 1359 patch_list.append((tf, sf, tf.size, common.sha1(d).hexdigest())) 1360 largest_source_size = max(largest_source_size, sf.size) 1361 1362 self.largest_source_size = largest_source_size 1363 1364 def EmitVerification(self, script): 1365 so_far = 0 1366 for tf, sf, _, _ in self.patch_list: 1367 if tf.name != sf.name: 1368 script.SkipNextActionIfTargetExists(tf.name, tf.sha1) 1369 script.PatchCheck("/"+sf.name, tf.sha1, sf.sha1) 1370 so_far += sf.size 1371 return so_far 1372 1373 def EmitExplicitTargetVerification(self, script): 1374 for fn, _, sha1 in self.verbatim_targets: 1375 if fn[-1] != "/": 1376 script.FileCheck("/"+fn, sha1) 1377 for tf, _, _, _ in self.patch_list: 1378 script.FileCheck(tf.name, tf.sha1) 1379 1380 def RemoveUnneededFiles(self, script, extras=()): 1381 file_list = ["/" + i[0] for i in self.verbatim_targets] 1382 file_list += ["/" + i for i in self.source_data 1383 if i not in self.target_data and i not in self.renames] 1384 file_list += list(extras) 1385 # Sort the list in descending order, which removes all the files first 1386 # before attempting to remove the folder. (Bug: 22960996) 1387 script.DeleteFiles(sorted(file_list, reverse=True)) 1388 1389 def TotalPatchSize(self): 1390 return sum(i[1].size for i in self.patch_list) 1391 1392 def EmitPatches(self, script, total_patch_size, so_far): 1393 self.deferred_patch_list = deferred_patch_list = [] 1394 for item in self.patch_list: 1395 tf, sf, _, _ = item 1396 if tf.name == "system/build.prop": 1397 deferred_patch_list.append(item) 1398 continue 1399 if sf.name != tf.name: 1400 script.SkipNextActionIfTargetExists(tf.name, tf.sha1) 1401 script.ApplyPatch("/" + sf.name, "-", tf.size, tf.sha1, sf.sha1, 1402 "patch/" + sf.name + ".p") 1403 so_far += tf.size 1404 script.SetProgress(so_far / total_patch_size) 1405 return so_far 1406 1407 def EmitDeferredPatches(self, script): 1408 for item in self.deferred_patch_list: 1409 tf, sf, _, _ = item 1410 script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1, 1411 "patch/" + sf.name + ".p") 1412 script.SetPermissions("/system/build.prop", 0, 0, 0o644, None, None) 1413 1414 def EmitRenames(self, script): 1415 if len(self.renames) > 0: 1416 script.Print("Renaming files...") 1417 for src, tgt in self.renames.iteritems(): 1418 print "Renaming " + src + " to " + tgt.name 1419 script.RenameFile(src, tgt.name) 1420 1421 1422 def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip): 1423 target_has_recovery_patch = HasRecoveryPatch(target_zip) 1424 source_has_recovery_patch = HasRecoveryPatch(source_zip) 1425 1426 if (OPTIONS.block_based and 1427 target_has_recovery_patch and 1428 source_has_recovery_patch): 1429 return WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip) 1430 1431 source_version = OPTIONS.source_info_dict["recovery_api_version"] 1432 target_version = OPTIONS.target_info_dict["recovery_api_version"] 1433 1434 if source_version == 0: 1435 print ("WARNING: generating edify script for a source that " 1436 "can't install it.") 1437 script = edify_generator.EdifyGenerator( 1438 source_version, OPTIONS.target_info_dict, 1439 fstab=OPTIONS.source_info_dict["fstab"]) 1440 1441 oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties") 1442 recovery_mount_options = OPTIONS.source_info_dict.get( 1443 "recovery_mount_options") 1444 oem_dict = None 1445 if oem_props is not None and len(oem_props) > 0: 1446 if OPTIONS.oem_source is None: 1447 raise common.ExternalError("OEM source required for this build") 1448 if not OPTIONS.oem_no_mount: 1449 script.Mount("/oem", recovery_mount_options) 1450 oem_dict = common.LoadDictionaryFromLines( 1451 open(OPTIONS.oem_source).readlines()) 1452 1453 metadata = { 1454 "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict, 1455 OPTIONS.source_info_dict), 1456 "ota-type": "FILE", 1457 } 1458 1459 post_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.target_info_dict) 1460 pre_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.source_info_dict) 1461 is_downgrade = long(post_timestamp) < long(pre_timestamp) 1462 1463 if OPTIONS.downgrade: 1464 metadata["ota-downgrade"] = "yes" 1465 if not is_downgrade: 1466 raise RuntimeError("--downgrade specified but no downgrade detected: " 1467 "pre: %s, post: %s" % (pre_timestamp, post_timestamp)) 1468 else: 1469 if is_downgrade: 1470 # Non-fatal here to allow generating such a package which may require 1471 # manual work to adjust the post-timestamp. A legit use case is that we 1472 # cut a new build C (after having A and B), but want to enfore the 1473 # update path of A -> C -> B. Specifying --downgrade may not help since 1474 # that would enforce a data wipe for C -> B update. 1475 print("\nWARNING: downgrade detected: pre: %s, post: %s.\n" 1476 "The package may not be deployed properly. " 1477 "Try --downgrade?\n" % (pre_timestamp, post_timestamp)) 1478 metadata["post-timestamp"] = post_timestamp 1479 1480 device_specific = common.DeviceSpecificParams( 1481 source_zip=source_zip, 1482 source_version=source_version, 1483 target_zip=target_zip, 1484 target_version=target_version, 1485 output_zip=output_zip, 1486 script=script, 1487 metadata=metadata, 1488 info_dict=OPTIONS.source_info_dict) 1489 1490 system_diff = FileDifference("system", source_zip, target_zip, output_zip) 1491 script.Mount("/system", recovery_mount_options) 1492 if HasVendorPartition(target_zip): 1493 vendor_diff = FileDifference("vendor", source_zip, target_zip, output_zip) 1494 script.Mount("/vendor", recovery_mount_options) 1495 else: 1496 vendor_diff = None 1497 1498 target_fp = CalculateFingerprint(oem_props, oem_dict, 1499 OPTIONS.target_info_dict) 1500 source_fp = CalculateFingerprint(oem_props, oem_dict, 1501 OPTIONS.source_info_dict) 1502 1503 if oem_props is None: 1504 script.AssertSomeFingerprint(source_fp, target_fp) 1505 else: 1506 script.AssertSomeThumbprint( 1507 GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict), 1508 GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict)) 1509 1510 metadata["pre-build"] = source_fp 1511 metadata["post-build"] = target_fp 1512 metadata["pre-build-incremental"] = GetBuildProp( 1513 "ro.build.version.incremental", OPTIONS.source_info_dict) 1514 metadata["post-build-incremental"] = GetBuildProp( 1515 "ro.build.version.incremental", OPTIONS.target_info_dict) 1516 1517 source_boot = common.GetBootableImage( 1518 "/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT", 1519 OPTIONS.source_info_dict) 1520 target_boot = common.GetBootableImage( 1521 "/tmp/boot.img", "boot.img", OPTIONS.target_tmp, "BOOT") 1522 updating_boot = (not OPTIONS.two_step and 1523 (source_boot.data != target_boot.data)) 1524 1525 source_recovery = common.GetBootableImage( 1526 "/tmp/recovery.img", "recovery.img", OPTIONS.source_tmp, "RECOVERY", 1527 OPTIONS.source_info_dict) 1528 target_recovery = common.GetBootableImage( 1529 "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY") 1530 updating_recovery = (source_recovery.data != target_recovery.data) 1531 1532 # Here's how we divide up the progress bar: 1533 # 0.1 for verifying the start state (PatchCheck calls) 1534 # 0.8 for applying patches (ApplyPatch calls) 1535 # 0.1 for unpacking verbatim files, symlinking, and doing the 1536 # device-specific commands. 1537 1538 AppendAssertions(script, OPTIONS.target_info_dict, oem_dict) 1539 device_specific.IncrementalOTA_Assertions() 1540 1541 # Two-step incremental package strategy (in chronological order, 1542 # which is *not* the order in which the generated script has 1543 # things): 1544 # 1545 # if stage is not "2/3" or "3/3": 1546 # do verification on current system 1547 # write recovery image to boot partition 1548 # set stage to "2/3" 1549 # reboot to boot partition and restart recovery 1550 # else if stage is "2/3": 1551 # write recovery image to recovery partition 1552 # set stage to "3/3" 1553 # reboot to recovery partition and restart recovery 1554 # else: 1555 # (stage must be "3/3") 1556 # perform update: 1557 # patch system files, etc. 1558 # force full install of new boot image 1559 # set up system to update recovery partition on first boot 1560 # complete script normally 1561 # (allow recovery to mark itself finished and reboot) 1562 1563 if OPTIONS.two_step: 1564 if not OPTIONS.source_info_dict.get("multistage_support", None): 1565 assert False, "two-step packages not supported by this build" 1566 fs = OPTIONS.source_info_dict["fstab"]["/misc"] 1567 assert fs.fs_type.upper() == "EMMC", \ 1568 "two-step packages only supported on devices with EMMC /misc partitions" 1569 bcb_dev = {"bcb_dev": fs.device} 1570 common.ZipWriteStr(output_zip, "recovery.img", target_recovery.data) 1571 script.AppendExtra(""" 1572 if get_stage("%(bcb_dev)s") == "2/3" then 1573 """ % bcb_dev) 1574 script.AppendExtra("sleep(20);\n") 1575 script.WriteRawImage("/recovery", "recovery.img") 1576 script.AppendExtra(""" 1577 set_stage("%(bcb_dev)s", "3/3"); 1578 reboot_now("%(bcb_dev)s", "recovery"); 1579 else if get_stage("%(bcb_dev)s") != "3/3" then 1580 """ % bcb_dev) 1581 1582 # Dump fingerprints 1583 script.Print("Source: %s" % (source_fp,)) 1584 script.Print("Target: %s" % (target_fp,)) 1585 1586 script.Print("Verifying current system...") 1587 1588 device_specific.IncrementalOTA_VerifyBegin() 1589 1590 script.ShowProgress(0.1, 0) 1591 so_far = system_diff.EmitVerification(script) 1592 if vendor_diff: 1593 so_far += vendor_diff.EmitVerification(script) 1594 1595 size = [] 1596 if system_diff.patch_list: 1597 size.append(system_diff.largest_source_size) 1598 if vendor_diff: 1599 if vendor_diff.patch_list: 1600 size.append(vendor_diff.largest_source_size) 1601 1602 if updating_boot: 1603 d = common.Difference(target_boot, source_boot) 1604 _, _, d = d.ComputePatch() 1605 print "boot target: %d source: %d diff: %d" % ( 1606 target_boot.size, source_boot.size, len(d)) 1607 1608 common.ZipWriteStr(output_zip, "patch/boot.img.p", d) 1609 1610 boot_type, boot_device = common.GetTypeAndDevice( 1611 "/boot", OPTIONS.source_info_dict) 1612 1613 script.PatchCheck("%s:%s:%d:%s:%d:%s" % 1614 (boot_type, boot_device, 1615 source_boot.size, source_boot.sha1, 1616 target_boot.size, target_boot.sha1)) 1617 so_far += source_boot.size 1618 size.append(target_boot.size) 1619 1620 if size: 1621 script.CacheFreeSpaceCheck(max(size)) 1622 1623 device_specific.IncrementalOTA_VerifyEnd() 1624 1625 if OPTIONS.two_step: 1626 script.WriteRawImage("/boot", "recovery.img") 1627 script.AppendExtra(""" 1628 set_stage("%(bcb_dev)s", "2/3"); 1629 reboot_now("%(bcb_dev)s", ""); 1630 else 1631 """ % bcb_dev) 1632 1633 script.Comment("---- start making changes here ----") 1634 1635 device_specific.IncrementalOTA_InstallBegin() 1636 1637 if OPTIONS.two_step: 1638 common.ZipWriteStr(output_zip, "boot.img", target_boot.data) 1639 script.WriteRawImage("/boot", "boot.img") 1640 print "writing full boot image (forced by two-step mode)" 1641 1642 script.Print("Removing unneeded files...") 1643 system_diff.RemoveUnneededFiles(script, ("/system/recovery.img",)) 1644 if vendor_diff: 1645 vendor_diff.RemoveUnneededFiles(script) 1646 1647 script.ShowProgress(0.8, 0) 1648 total_patch_size = 1.0 + system_diff.TotalPatchSize() 1649 if vendor_diff: 1650 total_patch_size += vendor_diff.TotalPatchSize() 1651 if updating_boot: 1652 total_patch_size += target_boot.size 1653 1654 script.Print("Patching system files...") 1655 so_far = system_diff.EmitPatches(script, total_patch_size, 0) 1656 if vendor_diff: 1657 script.Print("Patching vendor files...") 1658 so_far = vendor_diff.EmitPatches(script, total_patch_size, so_far) 1659 1660 if not OPTIONS.two_step: 1661 if updating_boot: 1662 # Produce the boot image by applying a patch to the current 1663 # contents of the boot partition, and write it back to the 1664 # partition. 1665 script.Print("Patching boot image...") 1666 script.ApplyPatch("%s:%s:%d:%s:%d:%s" 1667 % (boot_type, boot_device, 1668 source_boot.size, source_boot.sha1, 1669 target_boot.size, target_boot.sha1), 1670 "-", 1671 target_boot.size, target_boot.sha1, 1672 source_boot.sha1, "patch/boot.img.p") 1673 so_far += target_boot.size 1674 script.SetProgress(so_far / total_patch_size) 1675 print "boot image changed; including." 1676 else: 1677 print "boot image unchanged; skipping." 1678 1679 system_items = ItemSet("system", "META/filesystem_config.txt") 1680 if vendor_diff: 1681 vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt") 1682 1683 if updating_recovery: 1684 # Recovery is generated as a patch using both the boot image 1685 # (which contains the same linux kernel as recovery) and the file 1686 # /system/etc/recovery-resource.dat (which contains all the images 1687 # used in the recovery UI) as sources. This lets us minimize the 1688 # size of the patch, which must be included in every OTA package. 1689 # 1690 # For older builds where recovery-resource.dat is not present, we 1691 # use only the boot image as the source. 1692 1693 if not target_has_recovery_patch: 1694 def output_sink(fn, data): 1695 common.ZipWriteStr(output_zip, "recovery/" + fn, data) 1696 system_items.Get("system/" + fn) 1697 1698 common.MakeRecoveryPatch(OPTIONS.target_tmp, output_sink, 1699 target_recovery, target_boot) 1700 script.DeleteFiles(["/system/recovery-from-boot.p", 1701 "/system/etc/recovery.img", 1702 "/system/etc/install-recovery.sh"]) 1703 print "recovery image changed; including as patch from boot." 1704 else: 1705 print "recovery image unchanged; skipping." 1706 1707 script.ShowProgress(0.1, 10) 1708 1709 target_symlinks = CopyPartitionFiles(system_items, target_zip, None) 1710 if vendor_diff: 1711 target_symlinks.extend(CopyPartitionFiles(vendor_items, target_zip, None)) 1712 1713 temp_script = script.MakeTemporary() 1714 system_items.GetMetadata(target_zip) 1715 system_items.Get("system").SetPermissions(temp_script) 1716 if vendor_diff: 1717 vendor_items.GetMetadata(target_zip) 1718 vendor_items.Get("vendor").SetPermissions(temp_script) 1719 1720 # Note that this call will mess up the trees of Items, so make sure 1721 # we're done with them. 1722 source_symlinks = CopyPartitionFiles(system_items, source_zip, None) 1723 if vendor_diff: 1724 source_symlinks.extend(CopyPartitionFiles(vendor_items, source_zip, None)) 1725 1726 target_symlinks_d = dict([(i[1], i[0]) for i in target_symlinks]) 1727 source_symlinks_d = dict([(i[1], i[0]) for i in source_symlinks]) 1728 1729 # Delete all the symlinks in source that aren't in target. This 1730 # needs to happen before verbatim files are unpacked, in case a 1731 # symlink in the source is replaced by a real file in the target. 1732 1733 # If a symlink in the source will be replaced by a regular file, we cannot 1734 # delete the symlink/file in case the package gets applied again. For such 1735 # a symlink, we prepend a sha1_check() to detect if it has been updated. 1736 # (Bug: 23646151) 1737 replaced_symlinks = dict() 1738 if system_diff: 1739 for i in system_diff.verbatim_targets: 1740 replaced_symlinks["/%s" % (i[0],)] = i[2] 1741 if vendor_diff: 1742 for i in vendor_diff.verbatim_targets: 1743 replaced_symlinks["/%s" % (i[0],)] = i[2] 1744 1745 if system_diff: 1746 for tf in system_diff.renames.values(): 1747 replaced_symlinks["/%s" % (tf.name,)] = tf.sha1 1748 if vendor_diff: 1749 for tf in vendor_diff.renames.values(): 1750 replaced_symlinks["/%s" % (tf.name,)] = tf.sha1 1751 1752 always_delete = [] 1753 may_delete = [] 1754 for dest, link in source_symlinks: 1755 if link not in target_symlinks_d: 1756 if link in replaced_symlinks: 1757 may_delete.append((link, replaced_symlinks[link])) 1758 else: 1759 always_delete.append(link) 1760 script.DeleteFiles(always_delete) 1761 script.DeleteFilesIfNotMatching(may_delete) 1762 1763 if system_diff.verbatim_targets: 1764 script.Print("Unpacking new system files...") 1765 script.UnpackPackageDir("system", "/system") 1766 if vendor_diff and vendor_diff.verbatim_targets: 1767 script.Print("Unpacking new vendor files...") 1768 script.UnpackPackageDir("vendor", "/vendor") 1769 1770 if updating_recovery and not target_has_recovery_patch: 1771 script.Print("Unpacking new recovery...") 1772 script.UnpackPackageDir("recovery", "/system") 1773 1774 system_diff.EmitRenames(script) 1775 if vendor_diff: 1776 vendor_diff.EmitRenames(script) 1777 1778 script.Print("Symlinks and permissions...") 1779 1780 # Create all the symlinks that don't already exist, or point to 1781 # somewhere different than what we want. Delete each symlink before 1782 # creating it, since the 'symlink' command won't overwrite. 1783 to_create = [] 1784 for dest, link in target_symlinks: 1785 if link in source_symlinks_d: 1786 if dest != source_symlinks_d[link]: 1787 to_create.append((dest, link)) 1788 else: 1789 to_create.append((dest, link)) 1790 script.DeleteFiles([i[1] for i in to_create]) 1791 script.MakeSymlinks(to_create) 1792 1793 # Now that the symlinks are created, we can set all the 1794 # permissions. 1795 script.AppendScript(temp_script) 1796 1797 # Do device-specific installation (eg, write radio image). 1798 device_specific.IncrementalOTA_InstallEnd() 1799 1800 if OPTIONS.extra_script is not None: 1801 script.AppendExtra(OPTIONS.extra_script) 1802 1803 # Patch the build.prop file last, so if something fails but the 1804 # device can still come up, it appears to be the old build and will 1805 # get set the OTA package again to retry. 1806 script.Print("Patching remaining system files...") 1807 system_diff.EmitDeferredPatches(script) 1808 1809 if OPTIONS.wipe_user_data: 1810 script.Print("Erasing user data...") 1811 script.FormatPartition("/data") 1812 metadata["ota-wipe"] = "yes" 1813 1814 if OPTIONS.two_step: 1815 script.AppendExtra(""" 1816 set_stage("%(bcb_dev)s", ""); 1817 endif; 1818 endif; 1819 """ % bcb_dev) 1820 1821 if OPTIONS.verify and system_diff: 1822 script.Print("Remounting and verifying system partition files...") 1823 script.Unmount("/system") 1824 script.Mount("/system", recovery_mount_options) 1825 system_diff.EmitExplicitTargetVerification(script) 1826 1827 if OPTIONS.verify and vendor_diff: 1828 script.Print("Remounting and verifying vendor partition files...") 1829 script.Unmount("/vendor") 1830 script.Mount("/vendor", recovery_mount_options) 1831 vendor_diff.EmitExplicitTargetVerification(script) 1832 1833 # For downgrade OTAs, we prefer to use the update-binary in the source 1834 # build that is actually newer than the one in the target build. 1835 if OPTIONS.downgrade: 1836 script.AddToZip(source_zip, output_zip, input_path=OPTIONS.updater_binary) 1837 else: 1838 script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary) 1839 1840 metadata["ota-required-cache"] = str(script.required_cache) 1841 WriteMetadata(metadata, output_zip) 1842 1843 1844 def main(argv): 1845 1846 def option_handler(o, a): 1847 if o == "--board_config": 1848 pass # deprecated 1849 elif o in ("-k", "--package_key"): 1850 OPTIONS.package_key = a 1851 elif o in ("-i", "--incremental_from"): 1852 OPTIONS.incremental_source = a 1853 elif o == "--full_radio": 1854 OPTIONS.full_radio = True 1855 elif o == "--full_bootloader": 1856 OPTIONS.full_bootloader = True 1857 elif o in ("-w", "--wipe_user_data"): 1858 OPTIONS.wipe_user_data = True 1859 elif o in ("-n", "--no_prereq"): 1860 OPTIONS.omit_prereq = True 1861 elif o == "--downgrade": 1862 OPTIONS.downgrade = True 1863 OPTIONS.wipe_user_data = True 1864 elif o in ("-o", "--oem_settings"): 1865 OPTIONS.oem_source = a 1866 elif o == "--oem_no_mount": 1867 OPTIONS.oem_no_mount = True 1868 elif o in ("-e", "--extra_script"): 1869 OPTIONS.extra_script = a 1870 elif o in ("-a", "--aslr_mode"): 1871 if a in ("on", "On", "true", "True", "yes", "Yes"): 1872 OPTIONS.aslr_mode = True 1873 else: 1874 OPTIONS.aslr_mode = False 1875 elif o in ("-t", "--worker_threads"): 1876 if a.isdigit(): 1877 OPTIONS.worker_threads = int(a) 1878 else: 1879 raise ValueError("Cannot parse value %r for option %r - only " 1880 "integers are allowed." % (a, o)) 1881 elif o in ("-2", "--two_step"): 1882 OPTIONS.two_step = True 1883 elif o == "--no_signing": 1884 OPTIONS.no_signing = True 1885 elif o == "--verify": 1886 OPTIONS.verify = True 1887 elif o == "--block": 1888 OPTIONS.block_based = True 1889 elif o in ("-b", "--binary"): 1890 OPTIONS.updater_binary = a 1891 elif o in ("--no_fallback_to_full",): 1892 OPTIONS.fallback_to_full = False 1893 elif o == "--stash_threshold": 1894 try: 1895 OPTIONS.stash_threshold = float(a) 1896 except ValueError: 1897 raise ValueError("Cannot parse value %r for option %r - expecting " 1898 "a float" % (a, o)) 1899 elif o == "--gen_verify": 1900 OPTIONS.gen_verify = True 1901 elif o == "--log_diff": 1902 OPTIONS.log_diff = a 1903 else: 1904 return False 1905 return True 1906 1907 args = common.ParseOptions(argv, __doc__, 1908 extra_opts="b:k:i:d:wne:t:a:2o:", 1909 extra_long_opts=[ 1910 "board_config=", 1911 "package_key=", 1912 "incremental_from=", 1913 "full_radio", 1914 "full_bootloader", 1915 "wipe_user_data", 1916 "no_prereq", 1917 "downgrade", 1918 "extra_script=", 1919 "worker_threads=", 1920 "aslr_mode=", 1921 "two_step", 1922 "no_signing", 1923 "block", 1924 "binary=", 1925 "oem_settings=", 1926 "oem_no_mount", 1927 "verify", 1928 "no_fallback_to_full", 1929 "stash_threshold=", 1930 "gen_verify", 1931 "log_diff=", 1932 ], extra_option_handler=option_handler) 1933 1934 if len(args) != 2: 1935 common.Usage(__doc__) 1936 sys.exit(1) 1937 1938 if OPTIONS.downgrade: 1939 # Sanity check to enforce a data wipe. 1940 if not OPTIONS.wipe_user_data: 1941 raise ValueError("Cannot downgrade without a data wipe") 1942 1943 # We should only allow downgrading incrementals (as opposed to full). 1944 # Otherwise the device may go back from arbitrary build with this full 1945 # OTA package. 1946 if OPTIONS.incremental_source is None: 1947 raise ValueError("Cannot generate downgradable full OTAs - consider" 1948 "using --omit_prereq?") 1949 1950 # Load the dict file from the zip directly to have a peek at the OTA type. 1951 # For packages using A/B update, unzipping is not needed. 1952 input_zip = zipfile.ZipFile(args[0], "r") 1953 OPTIONS.info_dict = common.LoadInfoDict(input_zip) 1954 common.ZipClose(input_zip) 1955 1956 ab_update = OPTIONS.info_dict.get("ab_update") == "true" 1957 1958 if ab_update: 1959 if OPTIONS.incremental_source is not None: 1960 OPTIONS.target_info_dict = OPTIONS.info_dict 1961 source_zip = zipfile.ZipFile(OPTIONS.incremental_source, "r") 1962 OPTIONS.source_info_dict = common.LoadInfoDict(source_zip) 1963 common.ZipClose(source_zip) 1964 1965 if OPTIONS.verbose: 1966 print "--- target info ---" 1967 common.DumpInfoDict(OPTIONS.info_dict) 1968 1969 if OPTIONS.incremental_source is not None: 1970 print "--- source info ---" 1971 common.DumpInfoDict(OPTIONS.source_info_dict) 1972 1973 WriteABOTAPackageWithBrilloScript( 1974 target_file=args[0], 1975 output_file=args[1], 1976 source_file=OPTIONS.incremental_source) 1977 1978 print "done." 1979 return 1980 1981 if OPTIONS.extra_script is not None: 1982 OPTIONS.extra_script = open(OPTIONS.extra_script).read() 1983 1984 print "unzipping target target-files..." 1985 OPTIONS.input_tmp, input_zip = common.UnzipTemp(args[0]) 1986 1987 OPTIONS.target_tmp = OPTIONS.input_tmp 1988 OPTIONS.info_dict = common.LoadInfoDict(input_zip, OPTIONS.target_tmp) 1989 1990 if OPTIONS.verbose: 1991 print "--- target info ---" 1992 common.DumpInfoDict(OPTIONS.info_dict) 1993 1994 # If the caller explicitly specified the device-specific extensions 1995 # path via -s/--device_specific, use that. Otherwise, use 1996 # META/releasetools.py if it is present in the target target_files. 1997 # Otherwise, take the path of the file from 'tool_extensions' in the 1998 # info dict and look for that in the local filesystem, relative to 1999 # the current directory. 2000 2001 if OPTIONS.device_specific is None: 2002 from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py") 2003 if os.path.exists(from_input): 2004 print "(using device-specific extensions from target_files)" 2005 OPTIONS.device_specific = from_input 2006 else: 2007 OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions", None) 2008 2009 if OPTIONS.device_specific is not None: 2010 OPTIONS.device_specific = os.path.abspath(OPTIONS.device_specific) 2011 2012 if OPTIONS.info_dict.get("no_recovery") == "true": 2013 raise common.ExternalError( 2014 "--- target build has specified no recovery ---") 2015 2016 # Use the default key to sign the package if not specified with package_key. 2017 if not OPTIONS.no_signing: 2018 if OPTIONS.package_key is None: 2019 OPTIONS.package_key = OPTIONS.info_dict.get( 2020 "default_system_dev_certificate", 2021 "build/target/product/security/testkey") 2022 2023 # Set up the output zip. Create a temporary zip file if signing is needed. 2024 if OPTIONS.no_signing: 2025 if os.path.exists(args[1]): 2026 os.unlink(args[1]) 2027 output_zip = zipfile.ZipFile(args[1], "w", 2028 compression=zipfile.ZIP_DEFLATED) 2029 else: 2030 temp_zip_file = tempfile.NamedTemporaryFile() 2031 output_zip = zipfile.ZipFile(temp_zip_file, "w", 2032 compression=zipfile.ZIP_DEFLATED) 2033 2034 # Non A/B OTAs rely on /cache partition to store temporary files. 2035 cache_size = OPTIONS.info_dict.get("cache_size", None) 2036 if cache_size is None: 2037 print "--- can't determine the cache partition size ---" 2038 OPTIONS.cache_size = cache_size 2039 2040 # Generate a verify package. 2041 if OPTIONS.gen_verify: 2042 WriteVerifyPackage(input_zip, output_zip) 2043 2044 # Generate a full OTA. 2045 elif OPTIONS.incremental_source is None: 2046 WriteFullOTAPackage(input_zip, output_zip) 2047 2048 # Generate an incremental OTA. It will fall back to generate a full OTA on 2049 # failure unless no_fallback_to_full is specified. 2050 else: 2051 print "unzipping source target-files..." 2052 OPTIONS.source_tmp, source_zip = common.UnzipTemp( 2053 OPTIONS.incremental_source) 2054 OPTIONS.target_info_dict = OPTIONS.info_dict 2055 OPTIONS.source_info_dict = common.LoadInfoDict(source_zip, 2056 OPTIONS.source_tmp) 2057 if OPTIONS.verbose: 2058 print "--- source info ---" 2059 common.DumpInfoDict(OPTIONS.source_info_dict) 2060 try: 2061 WriteIncrementalOTAPackage(input_zip, source_zip, output_zip) 2062 if OPTIONS.log_diff: 2063 out_file = open(OPTIONS.log_diff, 'w') 2064 import target_files_diff 2065 target_files_diff.recursiveDiff('', 2066 OPTIONS.source_tmp, 2067 OPTIONS.input_tmp, 2068 out_file) 2069 out_file.close() 2070 except ValueError: 2071 if not OPTIONS.fallback_to_full: 2072 raise 2073 print "--- failed to build incremental; falling back to full ---" 2074 OPTIONS.incremental_source = None 2075 WriteFullOTAPackage(input_zip, output_zip) 2076 2077 common.ZipClose(output_zip) 2078 2079 # Sign the generated zip package unless no_signing is specified. 2080 if not OPTIONS.no_signing: 2081 SignOutput(temp_zip_file.name, args[1]) 2082 temp_zip_file.close() 2083 2084 print "done." 2085 2086 2087 if __name__ == '__main__': 2088 try: 2089 common.CloseInheritedPipes() 2090 main(sys.argv[1:]) 2091 except common.ExternalError as e: 2092 print 2093 print " ERROR: %s" % (e,) 2094 print 2095 sys.exit(1) 2096 finally: 2097 common.Cleanup() 2098