1 #!/usr/bin/env python 2 3 # Copyright 2016, The Android Open Source Project 4 # 5 # Permission is hereby granted, free of charge, to any person 6 # obtaining a copy of this software and associated documentation 7 # files (the "Software"), to deal in the Software without 8 # restriction, including without limitation the rights to use, copy, 9 # modify, merge, publish, distribute, sublicense, and/or sell copies 10 # of the Software, and to permit persons to whom the Software is 11 # furnished to do so, subject to the following conditions: 12 # 13 # The above copyright notice and this permission notice shall be 14 # included in all copies or substantial portions of the Software. 15 # 16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 19 # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 20 # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 21 # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 22 # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 # SOFTWARE. 24 # 25 """Command-line tool for working with Android Verified Boot images.""" 26 27 import argparse 28 import binascii 29 import bisect 30 import hashlib 31 import math 32 import os 33 import struct 34 import subprocess 35 import sys 36 import tempfile 37 import time 38 39 # Keep in sync with libavb/avb_version.h. 40 AVB_VERSION_MAJOR = 1 41 AVB_VERSION_MINOR = 0 42 AVB_VERSION_SUB = 0 43 44 AVB_VBMETA_IMAGE_FLAGS_HASHTREE_DISABLED = 1 45 46 47 class AvbError(Exception): 48 """Application-specific errors. 49 50 These errors represent issues for which a stack-trace should not be 51 presented. 52 53 Attributes: 54 message: Error message. 55 """ 56 57 def __init__(self, message): 58 Exception.__init__(self, message) 59 60 61 class Algorithm(object): 62 """Contains details about an algorithm. 63 64 See the avb_vbmeta_header.h file for more details about 65 algorithms. 66 67 The constant |ALGORITHMS| is a dictionary from human-readable 68 names (e.g 'SHA256_RSA2048') to instances of this class. 69 70 Attributes: 71 algorithm_type: Integer code corresponding to |AvbAlgorithmType|. 72 hash_name: Empty or a name from |hashlib.algorithms|. 73 hash_num_bytes: Number of bytes used to store the hash. 74 signature_num_bytes: Number of bytes used to store the signature. 75 public_key_num_bytes: Number of bytes used to store the public key. 76 padding: Padding used for signature, if any. 77 """ 78 79 def __init__(self, algorithm_type, hash_name, hash_num_bytes, 80 signature_num_bytes, public_key_num_bytes, padding): 81 self.algorithm_type = algorithm_type 82 self.hash_name = hash_name 83 self.hash_num_bytes = hash_num_bytes 84 self.signature_num_bytes = signature_num_bytes 85 self.public_key_num_bytes = public_key_num_bytes 86 self.padding = padding 87 88 89 # This must be kept in sync with the avb_crypto.h file. 90 # 91 # The PKC1-v1.5 padding is a blob of binary DER of ASN.1 and is 92 # obtained from section 5.2.2 of RFC 4880. 93 ALGORITHMS = { 94 'NONE': Algorithm( 95 algorithm_type=0, # AVB_ALGORITHM_TYPE_NONE 96 hash_name='', 97 hash_num_bytes=0, 98 signature_num_bytes=0, 99 public_key_num_bytes=0, 100 padding=[]), 101 'SHA256_RSA2048': Algorithm( 102 algorithm_type=1, # AVB_ALGORITHM_TYPE_SHA256_RSA2048 103 hash_name='sha256', 104 hash_num_bytes=32, 105 signature_num_bytes=256, 106 public_key_num_bytes=8 + 2*2048/8, 107 padding=[ 108 # PKCS1-v1_5 padding 109 0x00, 0x01] + [0xff]*202 + [0x00] + [ 110 # ASN.1 header 111 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 112 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 113 0x00, 0x04, 0x20, 114 ]), 115 'SHA256_RSA4096': Algorithm( 116 algorithm_type=2, # AVB_ALGORITHM_TYPE_SHA256_RSA4096 117 hash_name='sha256', 118 hash_num_bytes=32, 119 signature_num_bytes=512, 120 public_key_num_bytes=8 + 2*4096/8, 121 padding=[ 122 # PKCS1-v1_5 padding 123 0x00, 0x01] + [0xff]*458 + [0x00] + [ 124 # ASN.1 header 125 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 126 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 127 0x00, 0x04, 0x20, 128 ]), 129 'SHA256_RSA8192': Algorithm( 130 algorithm_type=3, # AVB_ALGORITHM_TYPE_SHA256_RSA8192 131 hash_name='sha256', 132 hash_num_bytes=32, 133 signature_num_bytes=1024, 134 public_key_num_bytes=8 + 2*8192/8, 135 padding=[ 136 # PKCS1-v1_5 padding 137 0x00, 0x01] + [0xff]*970 + [0x00] + [ 138 # ASN.1 header 139 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 140 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 141 0x00, 0x04, 0x20, 142 ]), 143 'SHA512_RSA2048': Algorithm( 144 algorithm_type=4, # AVB_ALGORITHM_TYPE_SHA512_RSA2048 145 hash_name='sha512', 146 hash_num_bytes=64, 147 signature_num_bytes=256, 148 public_key_num_bytes=8 + 2*2048/8, 149 padding=[ 150 # PKCS1-v1_5 padding 151 0x00, 0x01] + [0xff]*170 + [0x00] + [ 152 # ASN.1 header 153 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 154 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 155 0x00, 0x04, 0x40 156 ]), 157 'SHA512_RSA4096': Algorithm( 158 algorithm_type=5, # AVB_ALGORITHM_TYPE_SHA512_RSA4096 159 hash_name='sha512', 160 hash_num_bytes=64, 161 signature_num_bytes=512, 162 public_key_num_bytes=8 + 2*4096/8, 163 padding=[ 164 # PKCS1-v1_5 padding 165 0x00, 0x01] + [0xff]*426 + [0x00] + [ 166 # ASN.1 header 167 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 168 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 169 0x00, 0x04, 0x40 170 ]), 171 'SHA512_RSA8192': Algorithm( 172 algorithm_type=6, # AVB_ALGORITHM_TYPE_SHA512_RSA8192 173 hash_name='sha512', 174 hash_num_bytes=64, 175 signature_num_bytes=1024, 176 public_key_num_bytes=8 + 2*8192/8, 177 padding=[ 178 # PKCS1-v1_5 padding 179 0x00, 0x01] + [0xff]*938 + [0x00] + [ 180 # ASN.1 header 181 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 182 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 183 0x00, 0x04, 0x40 184 ]), 185 } 186 187 188 def get_release_string(): 189 """Calculates the release string to use in the VBMeta struct.""" 190 # Keep in sync with libavb/avb_version.c:avb_version_string(). 191 return 'avbtool {}.{}.{}'.format(AVB_VERSION_MAJOR, 192 AVB_VERSION_MINOR, 193 AVB_VERSION_SUB) 194 195 196 def round_to_multiple(number, size): 197 """Rounds a number up to nearest multiple of another number. 198 199 Args: 200 number: The number to round up. 201 size: The multiple to round up to. 202 203 Returns: 204 If |number| is a multiple of |size|, returns |number|, otherwise 205 returns |number| + |size|. 206 """ 207 remainder = number % size 208 if remainder == 0: 209 return number 210 return number + size - remainder 211 212 213 def round_to_pow2(number): 214 """Rounds a number up to the next power of 2. 215 216 Args: 217 number: The number to round up. 218 219 Returns: 220 If |number| is already a power of 2 then |number| is 221 returned. Otherwise the smallest power of 2 greater than |number| 222 is returned. 223 """ 224 return 2**((number - 1).bit_length()) 225 226 227 def encode_long(num_bits, value): 228 """Encodes a long to a bytearray() using a given amount of bits. 229 230 This number is written big-endian, e.g. with the most significant 231 bit first. 232 233 This is the reverse of decode_long(). 234 235 Arguments: 236 num_bits: The number of bits to write, e.g. 2048. 237 value: The value to write. 238 239 Returns: 240 A bytearray() with the encoded long. 241 """ 242 ret = bytearray() 243 for bit_pos in range(num_bits, 0, -8): 244 octet = (value >> (bit_pos - 8)) & 0xff 245 ret.extend(struct.pack('!B', octet)) 246 return ret 247 248 249 def decode_long(blob): 250 """Decodes a long from a bytearray() using a given amount of bits. 251 252 This number is expected to be in big-endian, e.g. with the most 253 significant bit first. 254 255 This is the reverse of encode_long(). 256 257 Arguments: 258 value: A bytearray() with the encoded long. 259 260 Returns: 261 The decoded value. 262 """ 263 ret = 0 264 for b in bytearray(blob): 265 ret *= 256 266 ret += b 267 return ret 268 269 270 def egcd(a, b): 271 """Calculate greatest common divisor of two numbers. 272 273 This implementation uses a recursive version of the extended 274 Euclidian algorithm. 275 276 Arguments: 277 a: First number. 278 b: Second number. 279 280 Returns: 281 A tuple (gcd, x, y) that where |gcd| is the greatest common 282 divisor of |a| and |b| and |a|*|x| + |b|*|y| = |gcd|. 283 """ 284 if a == 0: 285 return (b, 0, 1) 286 else: 287 g, y, x = egcd(b % a, a) 288 return (g, x - (b // a) * y, y) 289 290 291 def modinv(a, m): 292 """Calculate modular multiplicative inverse of |a| modulo |m|. 293 294 This calculates the number |x| such that |a| * |x| == 1 (modulo 295 |m|). This number only exists if |a| and |m| are co-prime - |None| 296 is returned if this isn't true. 297 298 Arguments: 299 a: The number to calculate a modular inverse of. 300 m: The modulo to use. 301 302 Returns: 303 The modular multiplicative inverse of |a| and |m| or |None| if 304 these numbers are not co-prime. 305 """ 306 gcd, x, _ = egcd(a, m) 307 if gcd != 1: 308 return None # modular inverse does not exist 309 else: 310 return x % m 311 312 313 def parse_number(string): 314 """Parse a string as a number. 315 316 This is just a short-hand for int(string, 0) suitable for use in the 317 |type| parameter of |ArgumentParser|'s add_argument() function. An 318 improvement to just using type=int is that this function supports 319 numbers in other bases, e.g. "0x1234". 320 321 Arguments: 322 string: The string to parse. 323 324 Returns: 325 The parsed integer. 326 327 Raises: 328 ValueError: If the number could not be parsed. 329 """ 330 return int(string, 0) 331 332 333 class RSAPublicKey(object): 334 """Data structure used for a RSA public key. 335 336 Attributes: 337 exponent: The key exponent. 338 modulus: The key modulus. 339 num_bits: The key size. 340 """ 341 342 MODULUS_PREFIX = 'modulus=' 343 344 def __init__(self, key_path): 345 """Loads and parses an RSA key from either a private or public key file. 346 347 Arguments: 348 key_path: The path to a key file. 349 """ 350 # We used to have something as simple as this: 351 # 352 # key = Crypto.PublicKey.RSA.importKey(open(key_path).read()) 353 # self.exponent = key.e 354 # self.modulus = key.n 355 # self.num_bits = key.size() + 1 356 # 357 # but unfortunately PyCrypto is not available in the builder. So 358 # instead just parse openssl(1) output to get this 359 # information. It's ugly but... 360 args = ['openssl', 'rsa', '-in', key_path, '-modulus', '-noout'] 361 p = subprocess.Popen(args, 362 stdin=subprocess.PIPE, 363 stdout=subprocess.PIPE, 364 stderr=subprocess.PIPE) 365 (pout, perr) = p.communicate() 366 if p.wait() != 0: 367 # Could be just a public key is passed, try that. 368 args.append('-pubin') 369 p = subprocess.Popen(args, 370 stdin=subprocess.PIPE, 371 stdout=subprocess.PIPE, 372 stderr=subprocess.PIPE) 373 (pout, perr) = p.communicate() 374 if p.wait() != 0: 375 raise AvbError('Error getting public key: {}'.format(perr)) 376 377 if not pout.lower().startswith(self.MODULUS_PREFIX): 378 raise AvbError('Unexpected modulus output') 379 380 modulus_hexstr = pout[len(self.MODULUS_PREFIX):] 381 382 # The exponent is assumed to always be 65537 and the number of 383 # bits can be derived from the modulus by rounding up to the 384 # nearest power of 2. 385 self.modulus = int(modulus_hexstr, 16) 386 self.num_bits = round_to_pow2(int(math.ceil(math.log(self.modulus, 2)))) 387 self.exponent = 65537 388 389 390 def encode_rsa_key(key_path): 391 """Encodes a public RSA key in |AvbRSAPublicKeyHeader| format. 392 393 This creates a |AvbRSAPublicKeyHeader| as well as the two large 394 numbers (|key_num_bits| bits long) following it. 395 396 Arguments: 397 key_path: The path to a key file. 398 399 Returns: 400 A bytearray() with the |AvbRSAPublicKeyHeader|. 401 """ 402 key = RSAPublicKey(key_path) 403 if key.exponent != 65537: 404 raise AvbError('Only RSA keys with exponent 65537 are supported.') 405 ret = bytearray() 406 # Calculate n0inv = -1/n[0] (mod 2^32) 407 b = 2L**32 408 n0inv = b - modinv(key.modulus, b) 409 # Calculate rr = r^2 (mod N), where r = 2^(# of key bits) 410 r = 2L**key.modulus.bit_length() 411 rrmodn = r * r % key.modulus 412 ret.extend(struct.pack('!II', key.num_bits, n0inv)) 413 ret.extend(encode_long(key.num_bits, key.modulus)) 414 ret.extend(encode_long(key.num_bits, rrmodn)) 415 return ret 416 417 418 def lookup_algorithm_by_type(alg_type): 419 """Looks up algorithm by type. 420 421 Arguments: 422 alg_type: The integer representing the type. 423 424 Returns: 425 A tuple with the algorithm name and an |Algorithm| instance. 426 427 Raises: 428 Exception: If the algorithm cannot be found 429 """ 430 for alg_name in ALGORITHMS: 431 alg_data = ALGORITHMS[alg_name] 432 if alg_data.algorithm_type == alg_type: 433 return (alg_name, alg_data) 434 raise AvbError('Unknown algorithm type {}'.format(alg_type)) 435 436 437 def raw_sign(signing_helper, signing_helper_with_files, 438 algorithm_name, signature_num_bytes, key_path, 439 raw_data_to_sign): 440 """Computes a raw RSA signature using |signing_helper| or openssl. 441 442 Arguments: 443 signing_helper: Program which signs a hash and returns the signature. 444 signing_helper_with_files: Same as signing_helper but uses files instead. 445 algorithm_name: The algorithm name as per the ALGORITHMS dict. 446 signature_num_bytes: Number of bytes used to store the signature. 447 key_path: Path to the private key file. Must be PEM format. 448 raw_data_to_sign: Data to sign (bytearray or str expected). 449 450 Returns: 451 A bytearray containing the signature. 452 453 Raises: 454 Exception: If an error occurs. 455 """ 456 p = None 457 if signing_helper_with_files is not None: 458 signing_file = tempfile.NamedTemporaryFile() 459 signing_file.write(str(raw_data_to_sign)) 460 signing_file.flush() 461 p = subprocess.Popen( 462 [signing_helper_with_files, algorithm_name, key_path, signing_file.name]) 463 retcode = p.wait() 464 if retcode != 0: 465 raise AvbError('Error signing') 466 signing_file.seek(0) 467 signature = bytearray(signing_file.read()) 468 else: 469 if signing_helper is not None: 470 p = subprocess.Popen( 471 [signing_helper, algorithm_name, key_path], 472 stdin=subprocess.PIPE, 473 stdout=subprocess.PIPE, 474 stderr=subprocess.PIPE) 475 else: 476 p = subprocess.Popen( 477 ['openssl', 'rsautl', '-sign', '-inkey', key_path, '-raw'], 478 stdin=subprocess.PIPE, 479 stdout=subprocess.PIPE, 480 stderr=subprocess.PIPE) 481 (pout, perr) = p.communicate(str(raw_data_to_sign)) 482 retcode = p.wait() 483 if retcode != 0: 484 raise AvbError('Error signing: {}'.format(perr)) 485 signature = bytearray(pout) 486 if len(signature) != signature_num_bytes: 487 raise AvbError('Error signing: Invalid length of signature') 488 return signature 489 490 491 def verify_vbmeta_signature(vbmeta_header, vbmeta_blob): 492 """Checks that the signature in a vbmeta blob was made by 493 the embedded public key. 494 495 Arguments: 496 vbmeta_header: A AvbVBMetaHeader. 497 vbmeta_blob: The whole vbmeta blob, including the header. 498 499 Returns: 500 True if the signature is valid and corresponds to the embedded 501 public key. Also returns True if the vbmeta blob is not signed. 502 """ 503 (_, alg) = lookup_algorithm_by_type(vbmeta_header.algorithm_type) 504 if alg.hash_name == '': 505 return True 506 header_blob = vbmeta_blob[0:256] 507 auth_offset = 256 508 aux_offset = auth_offset + vbmeta_header.authentication_data_block_size 509 aux_size = vbmeta_header.auxiliary_data_block_size 510 aux_blob = vbmeta_blob[aux_offset:aux_offset + aux_size] 511 pubkey_offset = aux_offset + vbmeta_header.public_key_offset 512 pubkey_size = vbmeta_header.public_key_size 513 pubkey_blob = vbmeta_blob[pubkey_offset:pubkey_offset + pubkey_size] 514 515 digest_offset = auth_offset + vbmeta_header.hash_offset 516 digest_size = vbmeta_header.hash_size 517 digest_blob = vbmeta_blob[digest_offset:digest_offset + digest_size] 518 519 sig_offset = auth_offset + vbmeta_header.signature_offset 520 sig_size = vbmeta_header.signature_size 521 sig_blob = vbmeta_blob[sig_offset:sig_offset + sig_size] 522 523 # Now that we've got the stored digest, public key, and signature 524 # all we need to do is to verify. This is the exactly the same 525 # steps as performed in the avb_vbmeta_image_verify() function in 526 # libavb/avb_vbmeta_image.c. 527 528 ha = hashlib.new(alg.hash_name) 529 ha.update(header_blob) 530 ha.update(aux_blob) 531 computed_digest = ha.digest() 532 533 if computed_digest != digest_blob: 534 return False 535 536 padding_and_digest = bytearray(alg.padding) 537 padding_and_digest.extend(computed_digest) 538 539 (num_bits,) = struct.unpack('!I', pubkey_blob[0:4]) 540 modulus_blob = pubkey_blob[8:8 + num_bits/8] 541 modulus = decode_long(modulus_blob) 542 exponent = 65537 543 544 # For now, just use Crypto.PublicKey.RSA to verify the signature. This 545 # is OK since 'avbtool verify_image' is not expected to run on the 546 # Android builders (see bug #36809096). 547 import Crypto.PublicKey.RSA 548 key = Crypto.PublicKey.RSA.construct((modulus, long(exponent))) 549 if not key.verify(decode_long(padding_and_digest), 550 (decode_long(sig_blob), None)): 551 return False 552 return True 553 554 555 class ImageChunk(object): 556 """Data structure used for representing chunks in Android sparse files. 557 558 Attributes: 559 chunk_type: One of TYPE_RAW, TYPE_FILL, or TYPE_DONT_CARE. 560 chunk_offset: Offset in the sparse file where this chunk begins. 561 output_offset: Offset in de-sparsified file where output begins. 562 output_size: Number of bytes in output. 563 input_offset: Offset in sparse file for data if TYPE_RAW otherwise None. 564 fill_data: Blob with data to fill if TYPE_FILL otherwise None. 565 """ 566 567 FORMAT = '<2H2I' 568 TYPE_RAW = 0xcac1 569 TYPE_FILL = 0xcac2 570 TYPE_DONT_CARE = 0xcac3 571 TYPE_CRC32 = 0xcac4 572 573 def __init__(self, chunk_type, chunk_offset, output_offset, output_size, 574 input_offset, fill_data): 575 """Initializes an ImageChunk object. 576 577 Arguments: 578 chunk_type: One of TYPE_RAW, TYPE_FILL, or TYPE_DONT_CARE. 579 chunk_offset: Offset in the sparse file where this chunk begins. 580 output_offset: Offset in de-sparsified file. 581 output_size: Number of bytes in output. 582 input_offset: Offset in sparse file if TYPE_RAW otherwise None. 583 fill_data: Blob with data to fill if TYPE_FILL otherwise None. 584 585 Raises: 586 ValueError: If data is not well-formed. 587 """ 588 self.chunk_type = chunk_type 589 self.chunk_offset = chunk_offset 590 self.output_offset = output_offset 591 self.output_size = output_size 592 self.input_offset = input_offset 593 self.fill_data = fill_data 594 # Check invariants. 595 if self.chunk_type == self.TYPE_RAW: 596 if self.fill_data is not None: 597 raise ValueError('RAW chunk cannot have fill_data set.') 598 if not self.input_offset: 599 raise ValueError('RAW chunk must have input_offset set.') 600 elif self.chunk_type == self.TYPE_FILL: 601 if self.fill_data is None: 602 raise ValueError('FILL chunk must have fill_data set.') 603 if self.input_offset: 604 raise ValueError('FILL chunk cannot have input_offset set.') 605 elif self.chunk_type == self.TYPE_DONT_CARE: 606 if self.fill_data is not None: 607 raise ValueError('DONT_CARE chunk cannot have fill_data set.') 608 if self.input_offset: 609 raise ValueError('DONT_CARE chunk cannot have input_offset set.') 610 else: 611 raise ValueError('Invalid chunk type') 612 613 614 class ImageHandler(object): 615 """Abstraction for image I/O with support for Android sparse images. 616 617 This class provides an interface for working with image files that 618 may be using the Android Sparse Image format. When an instance is 619 constructed, we test whether it's an Android sparse file. If so, 620 operations will be on the sparse file by interpreting the sparse 621 format, otherwise they will be directly on the file. Either way the 622 operations do the same. 623 624 For reading, this interface mimics a file object - it has seek(), 625 tell(), and read() methods. For writing, only truncation 626 (truncate()) and appending is supported (append_raw() and 627 append_dont_care()). Additionally, data can only be written in units 628 of the block size. 629 630 Attributes: 631 is_sparse: Whether the file being operated on is sparse. 632 block_size: The block size, typically 4096. 633 image_size: The size of the unsparsified file. 634 """ 635 # See system/core/libsparse/sparse_format.h for details. 636 MAGIC = 0xed26ff3a 637 HEADER_FORMAT = '<I4H4I' 638 639 # These are formats and offset of just the |total_chunks| and 640 # |total_blocks| fields. 641 NUM_CHUNKS_AND_BLOCKS_FORMAT = '<II' 642 NUM_CHUNKS_AND_BLOCKS_OFFSET = 16 643 644 def __init__(self, image_filename): 645 """Initializes an image handler. 646 647 Arguments: 648 image_filename: The name of the file to operate on. 649 650 Raises: 651 ValueError: If data in the file is invalid. 652 """ 653 self._image_filename = image_filename 654 self._read_header() 655 656 def _read_header(self): 657 """Initializes internal data structures used for reading file. 658 659 This may be called multiple times and is typically called after 660 modifying the file (e.g. appending, truncation). 661 662 Raises: 663 ValueError: If data in the file is invalid. 664 """ 665 self.is_sparse = False 666 self.block_size = 4096 667 self._file_pos = 0 668 self._image = open(self._image_filename, 'r+b') 669 self._image.seek(0, os.SEEK_END) 670 self.image_size = self._image.tell() 671 672 self._image.seek(0, os.SEEK_SET) 673 header_bin = self._image.read(struct.calcsize(self.HEADER_FORMAT)) 674 (magic, major_version, minor_version, file_hdr_sz, chunk_hdr_sz, 675 block_size, self._num_total_blocks, self._num_total_chunks, 676 _) = struct.unpack(self.HEADER_FORMAT, header_bin) 677 if magic != self.MAGIC: 678 # Not a sparse image, our job here is done. 679 return 680 if not (major_version == 1 and minor_version == 0): 681 raise ValueError('Encountered sparse image format version {}.{} but ' 682 'only 1.0 is supported'.format(major_version, 683 minor_version)) 684 if file_hdr_sz != struct.calcsize(self.HEADER_FORMAT): 685 raise ValueError('Unexpected file_hdr_sz value {}.'. 686 format(file_hdr_sz)) 687 if chunk_hdr_sz != struct.calcsize(ImageChunk.FORMAT): 688 raise ValueError('Unexpected chunk_hdr_sz value {}.'. 689 format(chunk_hdr_sz)) 690 691 self.block_size = block_size 692 693 # Build an list of chunks by parsing the file. 694 self._chunks = [] 695 696 # Find the smallest offset where only "Don't care" chunks 697 # follow. This will be the size of the content in the sparse 698 # image. 699 offset = 0 700 output_offset = 0 701 for _ in xrange(1, self._num_total_chunks + 1): 702 chunk_offset = self._image.tell() 703 704 header_bin = self._image.read(struct.calcsize(ImageChunk.FORMAT)) 705 (chunk_type, _, chunk_sz, total_sz) = struct.unpack(ImageChunk.FORMAT, 706 header_bin) 707 data_sz = total_sz - struct.calcsize(ImageChunk.FORMAT) 708 709 if chunk_type == ImageChunk.TYPE_RAW: 710 if data_sz != (chunk_sz * self.block_size): 711 raise ValueError('Raw chunk input size ({}) does not match output ' 712 'size ({})'. 713 format(data_sz, chunk_sz*self.block_size)) 714 self._chunks.append(ImageChunk(ImageChunk.TYPE_RAW, 715 chunk_offset, 716 output_offset, 717 chunk_sz*self.block_size, 718 self._image.tell(), 719 None)) 720 self._image.read(data_sz) 721 722 elif chunk_type == ImageChunk.TYPE_FILL: 723 if data_sz != 4: 724 raise ValueError('Fill chunk should have 4 bytes of fill, but this ' 725 'has {}'.format(data_sz)) 726 fill_data = self._image.read(4) 727 self._chunks.append(ImageChunk(ImageChunk.TYPE_FILL, 728 chunk_offset, 729 output_offset, 730 chunk_sz*self.block_size, 731 None, 732 fill_data)) 733 elif chunk_type == ImageChunk.TYPE_DONT_CARE: 734 if data_sz != 0: 735 raise ValueError('Don\'t care chunk input size is non-zero ({})'. 736 format(data_sz)) 737 self._chunks.append(ImageChunk(ImageChunk.TYPE_DONT_CARE, 738 chunk_offset, 739 output_offset, 740 chunk_sz*self.block_size, 741 None, 742 None)) 743 elif chunk_type == ImageChunk.TYPE_CRC32: 744 if data_sz != 4: 745 raise ValueError('CRC32 chunk should have 4 bytes of CRC, but ' 746 'this has {}'.format(data_sz)) 747 self._image.read(4) 748 else: 749 raise ValueError('Unknown chunk type {}'.format(chunk_type)) 750 751 offset += chunk_sz 752 output_offset += chunk_sz*self.block_size 753 754 # Record where sparse data end. 755 self._sparse_end = self._image.tell() 756 757 # Now that we've traversed all chunks, sanity check. 758 if self._num_total_blocks != offset: 759 raise ValueError('The header said we should have {} output blocks, ' 760 'but we saw {}'.format(self._num_total_blocks, offset)) 761 junk_len = len(self._image.read()) 762 if junk_len > 0: 763 raise ValueError('There were {} bytes of extra data at the end of the ' 764 'file.'.format(junk_len)) 765 766 # Assign |image_size|. 767 self.image_size = output_offset 768 769 # This is used when bisecting in read() to find the initial slice. 770 self._chunk_output_offsets = [i.output_offset for i in self._chunks] 771 772 self.is_sparse = True 773 774 def _update_chunks_and_blocks(self): 775 """Helper function to update the image header. 776 777 The the |total_chunks| and |total_blocks| fields in the header 778 will be set to value of the |_num_total_blocks| and 779 |_num_total_chunks| attributes. 780 781 """ 782 self._image.seek(self.NUM_CHUNKS_AND_BLOCKS_OFFSET, os.SEEK_SET) 783 self._image.write(struct.pack(self.NUM_CHUNKS_AND_BLOCKS_FORMAT, 784 self._num_total_blocks, 785 self._num_total_chunks)) 786 787 def append_dont_care(self, num_bytes): 788 """Appends a DONT_CARE chunk to the sparse file. 789 790 The given number of bytes must be a multiple of the block size. 791 792 Arguments: 793 num_bytes: Size in number of bytes of the DONT_CARE chunk. 794 """ 795 assert num_bytes % self.block_size == 0 796 797 if not self.is_sparse: 798 self._image.seek(0, os.SEEK_END) 799 # This is more efficient that writing NUL bytes since it'll add 800 # a hole on file systems that support sparse files (native 801 # sparse, not Android sparse). 802 self._image.truncate(self._image.tell() + num_bytes) 803 self._read_header() 804 return 805 806 self._num_total_chunks += 1 807 self._num_total_blocks += num_bytes / self.block_size 808 self._update_chunks_and_blocks() 809 810 self._image.seek(self._sparse_end, os.SEEK_SET) 811 self._image.write(struct.pack(ImageChunk.FORMAT, 812 ImageChunk.TYPE_DONT_CARE, 813 0, # Reserved 814 num_bytes / self.block_size, 815 struct.calcsize(ImageChunk.FORMAT))) 816 self._read_header() 817 818 def append_raw(self, data): 819 """Appends a RAW chunk to the sparse file. 820 821 The length of the given data must be a multiple of the block size. 822 823 Arguments: 824 data: Data to append. 825 """ 826 assert len(data) % self.block_size == 0 827 828 if not self.is_sparse: 829 self._image.seek(0, os.SEEK_END) 830 self._image.write(data) 831 self._read_header() 832 return 833 834 self._num_total_chunks += 1 835 self._num_total_blocks += len(data) / self.block_size 836 self._update_chunks_and_blocks() 837 838 self._image.seek(self._sparse_end, os.SEEK_SET) 839 self._image.write(struct.pack(ImageChunk.FORMAT, 840 ImageChunk.TYPE_RAW, 841 0, # Reserved 842 len(data) / self.block_size, 843 len(data) + 844 struct.calcsize(ImageChunk.FORMAT))) 845 self._image.write(data) 846 self._read_header() 847 848 def append_fill(self, fill_data, size): 849 """Appends a fill chunk to the sparse file. 850 851 The total length of the fill data must be a multiple of the block size. 852 853 Arguments: 854 fill_data: Fill data to append - must be four bytes. 855 size: Number of chunk - must be a multiple of four and the block size. 856 """ 857 assert len(fill_data) == 4 858 assert size % 4 == 0 859 assert size % self.block_size == 0 860 861 if not self.is_sparse: 862 self._image.seek(0, os.SEEK_END) 863 self._image.write(fill_data * (size/4)) 864 self._read_header() 865 return 866 867 self._num_total_chunks += 1 868 self._num_total_blocks += size / self.block_size 869 self._update_chunks_and_blocks() 870 871 self._image.seek(self._sparse_end, os.SEEK_SET) 872 self._image.write(struct.pack(ImageChunk.FORMAT, 873 ImageChunk.TYPE_FILL, 874 0, # Reserved 875 size / self.block_size, 876 4 + struct.calcsize(ImageChunk.FORMAT))) 877 self._image.write(fill_data) 878 self._read_header() 879 880 def seek(self, offset): 881 """Sets the cursor position for reading from unsparsified file. 882 883 Arguments: 884 offset: Offset to seek to from the beginning of the file. 885 """ 886 self._file_pos = offset 887 888 def read(self, size): 889 """Reads data from the unsparsified file. 890 891 This method may return fewer than |size| bytes of data if the end 892 of the file was encountered. 893 894 The file cursor for reading is advanced by the number of bytes 895 read. 896 897 Arguments: 898 size: Number of bytes to read. 899 900 Returns: 901 The data. 902 903 """ 904 if not self.is_sparse: 905 self._image.seek(self._file_pos) 906 data = self._image.read(size) 907 self._file_pos += len(data) 908 return data 909 910 # Iterate over all chunks. 911 chunk_idx = bisect.bisect_right(self._chunk_output_offsets, 912 self._file_pos) - 1 913 data = bytearray() 914 to_go = size 915 while to_go > 0: 916 chunk = self._chunks[chunk_idx] 917 chunk_pos_offset = self._file_pos - chunk.output_offset 918 chunk_pos_to_go = min(chunk.output_size - chunk_pos_offset, to_go) 919 920 if chunk.chunk_type == ImageChunk.TYPE_RAW: 921 self._image.seek(chunk.input_offset + chunk_pos_offset) 922 data.extend(self._image.read(chunk_pos_to_go)) 923 elif chunk.chunk_type == ImageChunk.TYPE_FILL: 924 all_data = chunk.fill_data*(chunk_pos_to_go/len(chunk.fill_data) + 2) 925 offset_mod = chunk_pos_offset % len(chunk.fill_data) 926 data.extend(all_data[offset_mod:(offset_mod + chunk_pos_to_go)]) 927 else: 928 assert chunk.chunk_type == ImageChunk.TYPE_DONT_CARE 929 data.extend('\0' * chunk_pos_to_go) 930 931 to_go -= chunk_pos_to_go 932 self._file_pos += chunk_pos_to_go 933 chunk_idx += 1 934 # Generate partial read in case of EOF. 935 if chunk_idx >= len(self._chunks): 936 break 937 938 return data 939 940 def tell(self): 941 """Returns the file cursor position for reading from unsparsified file. 942 943 Returns: 944 The file cursor position for reading. 945 """ 946 return self._file_pos 947 948 def truncate(self, size): 949 """Truncates the unsparsified file. 950 951 Arguments: 952 size: Desired size of unsparsified file. 953 954 Raises: 955 ValueError: If desired size isn't a multiple of the block size. 956 """ 957 if not self.is_sparse: 958 self._image.truncate(size) 959 self._read_header() 960 return 961 962 if size % self.block_size != 0: 963 raise ValueError('Cannot truncate to a size which is not a multiple ' 964 'of the block size') 965 966 if size == self.image_size: 967 # Trivial where there's nothing to do. 968 return 969 elif size < self.image_size: 970 chunk_idx = bisect.bisect_right(self._chunk_output_offsets, size) - 1 971 chunk = self._chunks[chunk_idx] 972 if chunk.output_offset != size: 973 # Truncation in the middle of a trunk - need to keep the chunk 974 # and modify it. 975 chunk_idx_for_update = chunk_idx + 1 976 num_to_keep = size - chunk.output_offset 977 assert num_to_keep % self.block_size == 0 978 if chunk.chunk_type == ImageChunk.TYPE_RAW: 979 truncate_at = (chunk.chunk_offset + 980 struct.calcsize(ImageChunk.FORMAT) + num_to_keep) 981 data_sz = num_to_keep 982 elif chunk.chunk_type == ImageChunk.TYPE_FILL: 983 truncate_at = (chunk.chunk_offset + 984 struct.calcsize(ImageChunk.FORMAT) + 4) 985 data_sz = 4 986 else: 987 assert chunk.chunk_type == ImageChunk.TYPE_DONT_CARE 988 truncate_at = chunk.chunk_offset + struct.calcsize(ImageChunk.FORMAT) 989 data_sz = 0 990 chunk_sz = num_to_keep/self.block_size 991 total_sz = data_sz + struct.calcsize(ImageChunk.FORMAT) 992 self._image.seek(chunk.chunk_offset) 993 self._image.write(struct.pack(ImageChunk.FORMAT, 994 chunk.chunk_type, 995 0, # Reserved 996 chunk_sz, 997 total_sz)) 998 chunk.output_size = num_to_keep 999 else: 1000 # Truncation at trunk boundary. 1001 truncate_at = chunk.chunk_offset 1002 chunk_idx_for_update = chunk_idx 1003 1004 self._num_total_chunks = chunk_idx_for_update 1005 self._num_total_blocks = 0 1006 for i in range(0, chunk_idx_for_update): 1007 self._num_total_blocks += self._chunks[i].output_size / self.block_size 1008 self._update_chunks_and_blocks() 1009 self._image.truncate(truncate_at) 1010 1011 # We've modified the file so re-read all data. 1012 self._read_header() 1013 else: 1014 # Truncating to grow - just add a DONT_CARE section. 1015 self.append_dont_care(size - self.image_size) 1016 1017 1018 class AvbDescriptor(object): 1019 """Class for AVB descriptor. 1020 1021 See the |AvbDescriptor| C struct for more information. 1022 1023 Attributes: 1024 tag: The tag identifying what kind of descriptor this is. 1025 data: The data in the descriptor. 1026 """ 1027 1028 SIZE = 16 1029 FORMAT_STRING = ('!QQ') # tag, num_bytes_following (descriptor header) 1030 1031 def __init__(self, data): 1032 """Initializes a new property descriptor. 1033 1034 Arguments: 1035 data: If not None, must be a bytearray(). 1036 1037 Raises: 1038 LookupError: If the given descriptor is malformed. 1039 """ 1040 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 1041 1042 if data: 1043 (self.tag, num_bytes_following) = ( 1044 struct.unpack(self.FORMAT_STRING, data[0:self.SIZE])) 1045 self.data = data[self.SIZE:self.SIZE + num_bytes_following] 1046 else: 1047 self.tag = None 1048 self.data = None 1049 1050 def print_desc(self, o): 1051 """Print the descriptor. 1052 1053 Arguments: 1054 o: The object to write the output to. 1055 """ 1056 o.write(' Unknown descriptor:\n') 1057 o.write(' Tag: {}\n'.format(self.tag)) 1058 if len(self.data) < 256: 1059 o.write(' Data: {} ({} bytes)\n'.format( 1060 repr(str(self.data)), len(self.data))) 1061 else: 1062 o.write(' Data: {} bytes\n'.format(len(self.data))) 1063 1064 def encode(self): 1065 """Serializes the descriptor. 1066 1067 Returns: 1068 A bytearray() with the descriptor data. 1069 """ 1070 num_bytes_following = len(self.data) 1071 nbf_with_padding = round_to_multiple(num_bytes_following, 8) 1072 padding_size = nbf_with_padding - num_bytes_following 1073 desc = struct.pack(self.FORMAT_STRING, self.tag, nbf_with_padding) 1074 padding = struct.pack(str(padding_size) + 'x') 1075 ret = desc + self.data + padding 1076 return bytearray(ret) 1077 1078 def verify(self, image_dir, image_ext, expected_chain_partitions_map): 1079 """Verifies contents of the descriptor - used in verify_image sub-command. 1080 1081 Arguments: 1082 image_dir: The directory of the file being verified. 1083 image_ext: The extension of the file being verified (e.g. '.img'). 1084 expected_chain_partitions_map: A map from partition name to the 1085 tuple (rollback_index_location, key_blob). 1086 1087 Returns: 1088 True if the descriptor verifies, False otherwise. 1089 """ 1090 # Nothing to do. 1091 return True 1092 1093 class AvbPropertyDescriptor(AvbDescriptor): 1094 """A class for property descriptors. 1095 1096 See the |AvbPropertyDescriptor| C struct for more information. 1097 1098 Attributes: 1099 key: The key. 1100 value: The key. 1101 """ 1102 1103 TAG = 0 1104 SIZE = 32 1105 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header) 1106 'Q' # key size (bytes) 1107 'Q') # value size (bytes) 1108 1109 def __init__(self, data=None): 1110 """Initializes a new property descriptor. 1111 1112 Arguments: 1113 data: If not None, must be a bytearray of size |SIZE|. 1114 1115 Raises: 1116 LookupError: If the given descriptor is malformed. 1117 """ 1118 AvbDescriptor.__init__(self, None) 1119 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 1120 1121 if data: 1122 (tag, num_bytes_following, key_size, 1123 value_size) = struct.unpack(self.FORMAT_STRING, data[0:self.SIZE]) 1124 expected_size = round_to_multiple( 1125 self.SIZE - 16 + key_size + 1 + value_size + 1, 8) 1126 if tag != self.TAG or num_bytes_following != expected_size: 1127 raise LookupError('Given data does not look like a property ' 1128 'descriptor.') 1129 self.key = data[self.SIZE:(self.SIZE + key_size)] 1130 self.value = data[(self.SIZE + key_size + 1):(self.SIZE + key_size + 1 + 1131 value_size)] 1132 else: 1133 self.key = '' 1134 self.value = '' 1135 1136 def print_desc(self, o): 1137 """Print the descriptor. 1138 1139 Arguments: 1140 o: The object to write the output to. 1141 """ 1142 if len(self.value) < 256: 1143 o.write(' Prop: {} -> {}\n'.format(self.key, repr(str(self.value)))) 1144 else: 1145 o.write(' Prop: {} -> ({} bytes)\n'.format(self.key, len(self.value))) 1146 1147 def encode(self): 1148 """Serializes the descriptor. 1149 1150 Returns: 1151 A bytearray() with the descriptor data. 1152 """ 1153 num_bytes_following = self.SIZE + len(self.key) + len(self.value) + 2 - 16 1154 nbf_with_padding = round_to_multiple(num_bytes_following, 8) 1155 padding_size = nbf_with_padding - num_bytes_following 1156 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding, 1157 len(self.key), len(self.value)) 1158 padding = struct.pack(str(padding_size) + 'x') 1159 ret = desc + self.key + '\0' + self.value + '\0' + padding 1160 return bytearray(ret) 1161 1162 def verify(self, image_dir, image_ext, expected_chain_partitions_map): 1163 """Verifies contents of the descriptor - used in verify_image sub-command. 1164 1165 Arguments: 1166 image_dir: The directory of the file being verified. 1167 image_ext: The extension of the file being verified (e.g. '.img'). 1168 expected_chain_partitions_map: A map from partition name to the 1169 tuple (rollback_index_location, key_blob). 1170 1171 Returns: 1172 True if the descriptor verifies, False otherwise. 1173 """ 1174 # Nothing to do. 1175 return True 1176 1177 class AvbHashtreeDescriptor(AvbDescriptor): 1178 """A class for hashtree descriptors. 1179 1180 See the |AvbHashtreeDescriptor| C struct for more information. 1181 1182 Attributes: 1183 dm_verity_version: dm-verity version used. 1184 image_size: Size of the image, after rounding up to |block_size|. 1185 tree_offset: Offset of the hash tree in the file. 1186 tree_size: Size of the tree. 1187 data_block_size: Data block size 1188 hash_block_size: Hash block size 1189 fec_num_roots: Number of roots used for FEC (0 if FEC is not used). 1190 fec_offset: Offset of FEC data (0 if FEC is not used). 1191 fec_size: Size of FEC data (0 if FEC is not used). 1192 hash_algorithm: Hash algorithm used. 1193 partition_name: Partition name. 1194 salt: Salt used. 1195 root_digest: Root digest. 1196 """ 1197 1198 TAG = 1 1199 RESERVED = 64 1200 SIZE = 116 + RESERVED 1201 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header) 1202 'L' # dm-verity version used 1203 'Q' # image size (bytes) 1204 'Q' # tree offset (bytes) 1205 'Q' # tree size (bytes) 1206 'L' # data block size (bytes) 1207 'L' # hash block size (bytes) 1208 'L' # FEC number of roots 1209 'Q' # FEC offset (bytes) 1210 'Q' # FEC size (bytes) 1211 '32s' # hash algorithm used 1212 'L' # partition name (bytes) 1213 'L' # salt length (bytes) 1214 'L' + # root digest length (bytes) 1215 str(RESERVED) + 's') # reserved 1216 1217 def __init__(self, data=None): 1218 """Initializes a new hashtree descriptor. 1219 1220 Arguments: 1221 data: If not None, must be a bytearray of size |SIZE|. 1222 1223 Raises: 1224 LookupError: If the given descriptor is malformed. 1225 """ 1226 AvbDescriptor.__init__(self, None) 1227 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 1228 1229 if data: 1230 (tag, num_bytes_following, self.dm_verity_version, self.image_size, 1231 self.tree_offset, self.tree_size, self.data_block_size, 1232 self.hash_block_size, self.fec_num_roots, self.fec_offset, self.fec_size, 1233 self.hash_algorithm, partition_name_len, salt_len, 1234 root_digest_len, _) = struct.unpack(self.FORMAT_STRING, 1235 data[0:self.SIZE]) 1236 expected_size = round_to_multiple( 1237 self.SIZE - 16 + partition_name_len + salt_len + root_digest_len, 8) 1238 if tag != self.TAG or num_bytes_following != expected_size: 1239 raise LookupError('Given data does not look like a hashtree ' 1240 'descriptor.') 1241 # Nuke NUL-bytes at the end. 1242 self.hash_algorithm = self.hash_algorithm.split('\0', 1)[0] 1243 o = 0 1244 self.partition_name = str(data[(self.SIZE + o):(self.SIZE + o + 1245 partition_name_len)]) 1246 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8. 1247 self.partition_name.decode('utf-8') 1248 o += partition_name_len 1249 self.salt = data[(self.SIZE + o):(self.SIZE + o + salt_len)] 1250 o += salt_len 1251 self.root_digest = data[(self.SIZE + o):(self.SIZE + o + root_digest_len)] 1252 if root_digest_len != len(hashlib.new(name=self.hash_algorithm).digest()): 1253 raise LookupError('root_digest_len doesn\'t match hash algorithm') 1254 1255 else: 1256 self.dm_verity_version = 0 1257 self.image_size = 0 1258 self.tree_offset = 0 1259 self.tree_size = 0 1260 self.data_block_size = 0 1261 self.hash_block_size = 0 1262 self.fec_num_roots = 0 1263 self.fec_offset = 0 1264 self.fec_size = 0 1265 self.hash_algorithm = '' 1266 self.partition_name = '' 1267 self.salt = bytearray() 1268 self.root_digest = bytearray() 1269 1270 def print_desc(self, o): 1271 """Print the descriptor. 1272 1273 Arguments: 1274 o: The object to write the output to. 1275 """ 1276 o.write(' Hashtree descriptor:\n') 1277 o.write(' Version of dm-verity: {}\n'.format(self.dm_verity_version)) 1278 o.write(' Image Size: {} bytes\n'.format(self.image_size)) 1279 o.write(' Tree Offset: {}\n'.format(self.tree_offset)) 1280 o.write(' Tree Size: {} bytes\n'.format(self.tree_size)) 1281 o.write(' Data Block Size: {} bytes\n'.format( 1282 self.data_block_size)) 1283 o.write(' Hash Block Size: {} bytes\n'.format( 1284 self.hash_block_size)) 1285 o.write(' FEC num roots: {}\n'.format(self.fec_num_roots)) 1286 o.write(' FEC offset: {}\n'.format(self.fec_offset)) 1287 o.write(' FEC size: {} bytes\n'.format(self.fec_size)) 1288 o.write(' Hash Algorithm: {}\n'.format(self.hash_algorithm)) 1289 o.write(' Partition Name: {}\n'.format(self.partition_name)) 1290 o.write(' Salt: {}\n'.format(str(self.salt).encode( 1291 'hex'))) 1292 o.write(' Root Digest: {}\n'.format(str( 1293 self.root_digest).encode('hex'))) 1294 1295 def encode(self): 1296 """Serializes the descriptor. 1297 1298 Returns: 1299 A bytearray() with the descriptor data. 1300 """ 1301 encoded_name = self.partition_name.encode('utf-8') 1302 num_bytes_following = (self.SIZE + len(encoded_name) + len(self.salt) + 1303 len(self.root_digest) - 16) 1304 nbf_with_padding = round_to_multiple(num_bytes_following, 8) 1305 padding_size = nbf_with_padding - num_bytes_following 1306 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding, 1307 self.dm_verity_version, self.image_size, 1308 self.tree_offset, self.tree_size, self.data_block_size, 1309 self.hash_block_size, self.fec_num_roots, 1310 self.fec_offset, self.fec_size, self.hash_algorithm, 1311 len(encoded_name), len(self.salt), len(self.root_digest), 1312 self.RESERVED*'\0') 1313 padding = struct.pack(str(padding_size) + 'x') 1314 ret = desc + encoded_name + self.salt + self.root_digest + padding 1315 return bytearray(ret) 1316 1317 def verify(self, image_dir, image_ext, expected_chain_partitions_map): 1318 """Verifies contents of the descriptor - used in verify_image sub-command. 1319 1320 Arguments: 1321 image_dir: The directory of the file being verified. 1322 image_ext: The extension of the file being verified (e.g. '.img'). 1323 expected_chain_partitions_map: A map from partition name to the 1324 tuple (rollback_index_location, key_blob). 1325 1326 Returns: 1327 True if the descriptor verifies, False otherwise. 1328 """ 1329 image_filename = os.path.join(image_dir, self.partition_name + image_ext) 1330 image = ImageHandler(image_filename) 1331 # Generate the hashtree and checks that it matches what's in the file. 1332 digest_size = len(hashlib.new(name=self.hash_algorithm).digest()) 1333 digest_padding = round_to_pow2(digest_size) - digest_size 1334 (hash_level_offsets, tree_size) = calc_hash_level_offsets( 1335 self.image_size, self.data_block_size, digest_size + digest_padding) 1336 root_digest, hash_tree = generate_hash_tree(image, self.image_size, 1337 self.data_block_size, 1338 self.hash_algorithm, self.salt, 1339 digest_padding, 1340 hash_level_offsets, 1341 tree_size) 1342 # The root digest must match... 1343 if root_digest != self.root_digest: 1344 sys.stderr.write('hashtree of {} does not match descriptor\n'. 1345 format(image_filename)) 1346 return False 1347 # ... also check that the on-disk hashtree matches 1348 image.seek(self.tree_offset) 1349 hash_tree_ondisk = image.read(self.tree_size) 1350 if hash_tree != hash_tree_ondisk: 1351 sys.stderr.write('hashtree of {} contains invalid data\n'. 1352 format(image_filename)) 1353 return False 1354 # TODO: we could also verify that the FEC stored in the image is 1355 # correct but this a) currently requires the 'fec' binary; and b) 1356 # takes a long time; and c) is not strictly needed for 1357 # verification purposes as we've already verified the root hash. 1358 print ('{}: Successfully verified {} hashtree of {} for image of {} bytes' 1359 .format(self.partition_name, self.hash_algorithm, image_filename, 1360 self.image_size)) 1361 return True 1362 1363 1364 class AvbHashDescriptor(AvbDescriptor): 1365 """A class for hash descriptors. 1366 1367 See the |AvbHashDescriptor| C struct for more information. 1368 1369 Attributes: 1370 image_size: Image size, in bytes. 1371 hash_algorithm: Hash algorithm used. 1372 partition_name: Partition name. 1373 salt: Salt used. 1374 digest: The hash value of salt and data combined. 1375 """ 1376 1377 TAG = 2 1378 RESERVED = 64 1379 SIZE = 68 + RESERVED 1380 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header) 1381 'Q' # image size (bytes) 1382 '32s' # hash algorithm used 1383 'L' # partition name (bytes) 1384 'L' # salt length (bytes) 1385 'L' + # digest length (bytes) 1386 str(RESERVED) + 's') # reserved 1387 1388 def __init__(self, data=None): 1389 """Initializes a new hash descriptor. 1390 1391 Arguments: 1392 data: If not None, must be a bytearray of size |SIZE|. 1393 1394 Raises: 1395 LookupError: If the given descriptor is malformed. 1396 """ 1397 AvbDescriptor.__init__(self, None) 1398 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 1399 1400 if data: 1401 (tag, num_bytes_following, self.image_size, self.hash_algorithm, 1402 partition_name_len, salt_len, 1403 digest_len, _) = struct.unpack(self.FORMAT_STRING, data[0:self.SIZE]) 1404 expected_size = round_to_multiple( 1405 self.SIZE - 16 + partition_name_len + salt_len + digest_len, 8) 1406 if tag != self.TAG or num_bytes_following != expected_size: 1407 raise LookupError('Given data does not look like a hash ' 'descriptor.') 1408 # Nuke NUL-bytes at the end. 1409 self.hash_algorithm = self.hash_algorithm.split('\0', 1)[0] 1410 o = 0 1411 self.partition_name = str(data[(self.SIZE + o):(self.SIZE + o + 1412 partition_name_len)]) 1413 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8. 1414 self.partition_name.decode('utf-8') 1415 o += partition_name_len 1416 self.salt = data[(self.SIZE + o):(self.SIZE + o + salt_len)] 1417 o += salt_len 1418 self.digest = data[(self.SIZE + o):(self.SIZE + o + digest_len)] 1419 if digest_len != len(hashlib.new(name=self.hash_algorithm).digest()): 1420 raise LookupError('digest_len doesn\'t match hash algorithm') 1421 1422 else: 1423 self.image_size = 0 1424 self.hash_algorithm = '' 1425 self.partition_name = '' 1426 self.salt = bytearray() 1427 self.digest = bytearray() 1428 1429 def print_desc(self, o): 1430 """Print the descriptor. 1431 1432 Arguments: 1433 o: The object to write the output to. 1434 """ 1435 o.write(' Hash descriptor:\n') 1436 o.write(' Image Size: {} bytes\n'.format(self.image_size)) 1437 o.write(' Hash Algorithm: {}\n'.format(self.hash_algorithm)) 1438 o.write(' Partition Name: {}\n'.format(self.partition_name)) 1439 o.write(' Salt: {}\n'.format(str(self.salt).encode( 1440 'hex'))) 1441 o.write(' Digest: {}\n'.format(str(self.digest).encode( 1442 'hex'))) 1443 1444 def encode(self): 1445 """Serializes the descriptor. 1446 1447 Returns: 1448 A bytearray() with the descriptor data. 1449 """ 1450 encoded_name = self.partition_name.encode('utf-8') 1451 num_bytes_following = ( 1452 self.SIZE + len(encoded_name) + len(self.salt) + len(self.digest) - 16) 1453 nbf_with_padding = round_to_multiple(num_bytes_following, 8) 1454 padding_size = nbf_with_padding - num_bytes_following 1455 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding, 1456 self.image_size, self.hash_algorithm, len(encoded_name), 1457 len(self.salt), len(self.digest), self.RESERVED*'\0') 1458 padding = struct.pack(str(padding_size) + 'x') 1459 ret = desc + encoded_name + self.salt + self.digest + padding 1460 return bytearray(ret) 1461 1462 def verify(self, image_dir, image_ext, expected_chain_partitions_map): 1463 """Verifies contents of the descriptor - used in verify_image sub-command. 1464 1465 Arguments: 1466 image_dir: The directory of the file being verified. 1467 image_ext: The extension of the file being verified (e.g. '.img'). 1468 expected_chain_partitions_map: A map from partition name to the 1469 tuple (rollback_index_location, key_blob). 1470 1471 Returns: 1472 True if the descriptor verifies, False otherwise. 1473 """ 1474 image_filename = os.path.join(image_dir, self.partition_name + image_ext) 1475 image = ImageHandler(image_filename) 1476 data = image.read(self.image_size) 1477 ha = hashlib.new(self.hash_algorithm) 1478 ha.update(self.salt) 1479 ha.update(data) 1480 digest = ha.digest() 1481 if digest != self.digest: 1482 sys.stderr.write('{} digest of {} does not match digest in descriptor\n'. 1483 format(self.hash_algorithm, image_filename)) 1484 return False 1485 print ('{}: Successfully verified {} hash of {} for image of {} bytes' 1486 .format(self.partition_name, self.hash_algorithm, image_filename, 1487 self.image_size)) 1488 return True 1489 1490 1491 class AvbKernelCmdlineDescriptor(AvbDescriptor): 1492 """A class for kernel command-line descriptors. 1493 1494 See the |AvbKernelCmdlineDescriptor| C struct for more information. 1495 1496 Attributes: 1497 flags: Flags. 1498 kernel_cmdline: The kernel command-line. 1499 """ 1500 1501 TAG = 3 1502 SIZE = 24 1503 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header) 1504 'L' # flags 1505 'L') # cmdline length (bytes) 1506 1507 FLAGS_USE_ONLY_IF_HASHTREE_NOT_DISABLED = (1 << 0) 1508 FLAGS_USE_ONLY_IF_HASHTREE_DISABLED = (1 << 1) 1509 1510 def __init__(self, data=None): 1511 """Initializes a new kernel cmdline descriptor. 1512 1513 Arguments: 1514 data: If not None, must be a bytearray of size |SIZE|. 1515 1516 Raises: 1517 LookupError: If the given descriptor is malformed. 1518 """ 1519 AvbDescriptor.__init__(self, None) 1520 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 1521 1522 if data: 1523 (tag, num_bytes_following, self.flags, kernel_cmdline_length) = ( 1524 struct.unpack(self.FORMAT_STRING, data[0:self.SIZE])) 1525 expected_size = round_to_multiple(self.SIZE - 16 + kernel_cmdline_length, 1526 8) 1527 if tag != self.TAG or num_bytes_following != expected_size: 1528 raise LookupError('Given data does not look like a kernel cmdline ' 1529 'descriptor.') 1530 # Nuke NUL-bytes at the end. 1531 self.kernel_cmdline = str(data[self.SIZE:(self.SIZE + 1532 kernel_cmdline_length)]) 1533 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8. 1534 self.kernel_cmdline.decode('utf-8') 1535 else: 1536 self.flags = 0 1537 self.kernel_cmdline = '' 1538 1539 def print_desc(self, o): 1540 """Print the descriptor. 1541 1542 Arguments: 1543 o: The object to write the output to. 1544 """ 1545 o.write(' Kernel Cmdline descriptor:\n') 1546 o.write(' Flags: {}\n'.format(self.flags)) 1547 o.write(' Kernel Cmdline: {}\n'.format(repr( 1548 self.kernel_cmdline))) 1549 1550 def encode(self): 1551 """Serializes the descriptor. 1552 1553 Returns: 1554 A bytearray() with the descriptor data. 1555 """ 1556 encoded_str = self.kernel_cmdline.encode('utf-8') 1557 num_bytes_following = (self.SIZE + len(encoded_str) - 16) 1558 nbf_with_padding = round_to_multiple(num_bytes_following, 8) 1559 padding_size = nbf_with_padding - num_bytes_following 1560 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding, 1561 self.flags, len(encoded_str)) 1562 padding = struct.pack(str(padding_size) + 'x') 1563 ret = desc + encoded_str + padding 1564 return bytearray(ret) 1565 1566 def verify(self, image_dir, image_ext, expected_chain_partitions_map): 1567 """Verifies contents of the descriptor - used in verify_image sub-command. 1568 1569 Arguments: 1570 image_dir: The directory of the file being verified. 1571 image_ext: The extension of the file being verified (e.g. '.img'). 1572 expected_chain_partitions_map: A map from partition name to the 1573 tuple (rollback_index_location, key_blob). 1574 1575 Returns: 1576 True if the descriptor verifies, False otherwise. 1577 """ 1578 # Nothing to verify. 1579 return True 1580 1581 class AvbChainPartitionDescriptor(AvbDescriptor): 1582 """A class for chained partition descriptors. 1583 1584 See the |AvbChainPartitionDescriptor| C struct for more information. 1585 1586 Attributes: 1587 rollback_index_location: The rollback index location to use. 1588 partition_name: Partition name. 1589 public_key: Bytes for the public key. 1590 """ 1591 1592 TAG = 4 1593 RESERVED = 64 1594 SIZE = 28 + RESERVED 1595 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header) 1596 'L' # rollback_index_location 1597 'L' # partition_name_size (bytes) 1598 'L' + # public_key_size (bytes) 1599 str(RESERVED) + 's') # reserved 1600 1601 def __init__(self, data=None): 1602 """Initializes a new chain partition descriptor. 1603 1604 Arguments: 1605 data: If not None, must be a bytearray of size |SIZE|. 1606 1607 Raises: 1608 LookupError: If the given descriptor is malformed. 1609 """ 1610 AvbDescriptor.__init__(self, None) 1611 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 1612 1613 if data: 1614 (tag, num_bytes_following, self.rollback_index_location, 1615 partition_name_len, 1616 public_key_len, _) = struct.unpack(self.FORMAT_STRING, data[0:self.SIZE]) 1617 expected_size = round_to_multiple( 1618 self.SIZE - 16 + partition_name_len + public_key_len, 8) 1619 if tag != self.TAG or num_bytes_following != expected_size: 1620 raise LookupError('Given data does not look like a chain partition ' 1621 'descriptor.') 1622 o = 0 1623 self.partition_name = str(data[(self.SIZE + o):(self.SIZE + o + 1624 partition_name_len)]) 1625 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8. 1626 self.partition_name.decode('utf-8') 1627 o += partition_name_len 1628 self.public_key = data[(self.SIZE + o):(self.SIZE + o + public_key_len)] 1629 1630 else: 1631 self.rollback_index_location = 0 1632 self.partition_name = '' 1633 self.public_key = bytearray() 1634 1635 def print_desc(self, o): 1636 """Print the descriptor. 1637 1638 Arguments: 1639 o: The object to write the output to. 1640 """ 1641 o.write(' Chain Partition descriptor:\n') 1642 o.write(' Partition Name: {}\n'.format(self.partition_name)) 1643 o.write(' Rollback Index Location: {}\n'.format( 1644 self.rollback_index_location)) 1645 # Just show the SHA1 of the key, for size reasons. 1646 hexdig = hashlib.sha1(self.public_key).hexdigest() 1647 o.write(' Public key (sha1): {}\n'.format(hexdig)) 1648 1649 def encode(self): 1650 """Serializes the descriptor. 1651 1652 Returns: 1653 A bytearray() with the descriptor data. 1654 """ 1655 encoded_name = self.partition_name.encode('utf-8') 1656 num_bytes_following = ( 1657 self.SIZE + len(encoded_name) + len(self.public_key) - 16) 1658 nbf_with_padding = round_to_multiple(num_bytes_following, 8) 1659 padding_size = nbf_with_padding - num_bytes_following 1660 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding, 1661 self.rollback_index_location, len(encoded_name), 1662 len(self.public_key), self.RESERVED*'\0') 1663 padding = struct.pack(str(padding_size) + 'x') 1664 ret = desc + encoded_name + self.public_key + padding 1665 return bytearray(ret) 1666 1667 def verify(self, image_dir, image_ext, expected_chain_partitions_map): 1668 """Verifies contents of the descriptor - used in verify_image sub-command. 1669 1670 Arguments: 1671 image_dir: The directory of the file being verified. 1672 image_ext: The extension of the file being verified (e.g. '.img'). 1673 expected_chain_partitions_map: A map from partition name to the 1674 tuple (rollback_index_location, key_blob). 1675 1676 Returns: 1677 True if the descriptor verifies, False otherwise. 1678 """ 1679 value = expected_chain_partitions_map.get(self.partition_name) 1680 if not value: 1681 sys.stderr.write('No expected chain partition for partition {}. Use ' 1682 '--expected_chain_partition to specify expected ' 1683 'contents.\n'. 1684 format(self.partition_name)) 1685 return False 1686 rollback_index_location, pk_blob = value 1687 1688 if self.rollback_index_location != rollback_index_location: 1689 sys.stderr.write('Expected rollback_index_location {} does not ' 1690 'match {} in descriptor for partition {}\n'. 1691 format(rollback_index_location, 1692 self.rollback_index_location, 1693 self.partition_name)) 1694 return False 1695 1696 if self.public_key != pk_blob: 1697 sys.stderr.write('Expected public key blob does not match public ' 1698 'key blob in descriptor for partition {}\n'. 1699 format(self.partition_name)) 1700 return False 1701 1702 print ('{}: Successfully verified chain partition descriptor matches ' 1703 'expected data'.format(self.partition_name)) 1704 1705 return True 1706 1707 DESCRIPTOR_CLASSES = [ 1708 AvbPropertyDescriptor, AvbHashtreeDescriptor, AvbHashDescriptor, 1709 AvbKernelCmdlineDescriptor, AvbChainPartitionDescriptor 1710 ] 1711 1712 1713 def parse_descriptors(data): 1714 """Parses a blob of data into descriptors. 1715 1716 Arguments: 1717 data: A bytearray() with encoded descriptors. 1718 1719 Returns: 1720 A list of instances of objects derived from AvbDescriptor. For 1721 unknown descriptors, the class AvbDescriptor is used. 1722 """ 1723 o = 0 1724 ret = [] 1725 while o < len(data): 1726 tag, nb_following = struct.unpack('!2Q', data[o:o + 16]) 1727 if tag < len(DESCRIPTOR_CLASSES): 1728 c = DESCRIPTOR_CLASSES[tag] 1729 else: 1730 c = AvbDescriptor 1731 ret.append(c(bytearray(data[o:o + 16 + nb_following]))) 1732 o += 16 + nb_following 1733 return ret 1734 1735 1736 class AvbFooter(object): 1737 """A class for parsing and writing footers. 1738 1739 Footers are stored at the end of partitions and point to where the 1740 AvbVBMeta blob is located. They also contain the original size of 1741 the image before AVB information was added. 1742 1743 Attributes: 1744 magic: Magic for identifying the footer, see |MAGIC|. 1745 version_major: The major version of avbtool that wrote the footer. 1746 version_minor: The minor version of avbtool that wrote the footer. 1747 original_image_size: Original image size. 1748 vbmeta_offset: Offset of where the AvbVBMeta blob is stored. 1749 vbmeta_size: Size of the AvbVBMeta blob. 1750 """ 1751 1752 MAGIC = 'AVBf' 1753 SIZE = 64 1754 RESERVED = 28 1755 FOOTER_VERSION_MAJOR = 1 1756 FOOTER_VERSION_MINOR = 0 1757 FORMAT_STRING = ('!4s2L' # magic, 2 x version. 1758 'Q' # Original image size. 1759 'Q' # Offset of VBMeta blob. 1760 'Q' + # Size of VBMeta blob. 1761 str(RESERVED) + 'x') # padding for reserved bytes 1762 1763 def __init__(self, data=None): 1764 """Initializes a new footer object. 1765 1766 Arguments: 1767 data: If not None, must be a bytearray of size 4096. 1768 1769 Raises: 1770 LookupError: If the given footer is malformed. 1771 struct.error: If the given data has no footer. 1772 """ 1773 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 1774 1775 if data: 1776 (self.magic, self.version_major, self.version_minor, 1777 self.original_image_size, self.vbmeta_offset, 1778 self.vbmeta_size) = struct.unpack(self.FORMAT_STRING, data) 1779 if self.magic != self.MAGIC: 1780 raise LookupError('Given data does not look like a AVB footer.') 1781 else: 1782 self.magic = self.MAGIC 1783 self.version_major = self.FOOTER_VERSION_MAJOR 1784 self.version_minor = self.FOOTER_VERSION_MINOR 1785 self.original_image_size = 0 1786 self.vbmeta_offset = 0 1787 self.vbmeta_size = 0 1788 1789 def encode(self): 1790 """Gets a string representing the binary encoding of the footer. 1791 1792 Returns: 1793 A bytearray() with a binary representation of the footer. 1794 """ 1795 return struct.pack(self.FORMAT_STRING, self.magic, self.version_major, 1796 self.version_minor, self.original_image_size, 1797 self.vbmeta_offset, self.vbmeta_size) 1798 1799 1800 class AvbVBMetaHeader(object): 1801 """A class for parsing and writing AVB vbmeta images. 1802 1803 Attributes: 1804 The attributes correspond to the |AvbVBMetaHeader| struct 1805 defined in avb_vbmeta_header.h. 1806 """ 1807 1808 SIZE = 256 1809 1810 # Keep in sync with |reserved0| and |reserved| field of 1811 # |AvbVBMetaImageHeader|. 1812 RESERVED0 = 4 1813 RESERVED = 80 1814 1815 # Keep in sync with |AvbVBMetaImageHeader|. 1816 FORMAT_STRING = ('!4s2L' # magic, 2 x version 1817 '2Q' # 2 x block size 1818 'L' # algorithm type 1819 '2Q' # offset, size (hash) 1820 '2Q' # offset, size (signature) 1821 '2Q' # offset, size (public key) 1822 '2Q' # offset, size (public key metadata) 1823 '2Q' # offset, size (descriptors) 1824 'Q' # rollback_index 1825 'L' + # flags 1826 str(RESERVED0) + 'x' + # padding for reserved bytes 1827 '47sx' + # NUL-terminated release string 1828 str(RESERVED) + 'x') # padding for reserved bytes 1829 1830 def __init__(self, data=None): 1831 """Initializes a new header object. 1832 1833 Arguments: 1834 data: If not None, must be a bytearray of size 8192. 1835 1836 Raises: 1837 Exception: If the given data is malformed. 1838 """ 1839 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 1840 1841 if data: 1842 (self.magic, self.required_libavb_version_major, 1843 self.required_libavb_version_minor, 1844 self.authentication_data_block_size, self.auxiliary_data_block_size, 1845 self.algorithm_type, self.hash_offset, self.hash_size, 1846 self.signature_offset, self.signature_size, self.public_key_offset, 1847 self.public_key_size, self.public_key_metadata_offset, 1848 self.public_key_metadata_size, self.descriptors_offset, 1849 self.descriptors_size, 1850 self.rollback_index, 1851 self.flags, 1852 self.release_string) = struct.unpack(self.FORMAT_STRING, data) 1853 # Nuke NUL-bytes at the end of the string. 1854 if self.magic != 'AVB0': 1855 raise AvbError('Given image does not look like a vbmeta image.') 1856 else: 1857 self.magic = 'AVB0' 1858 # Start by just requiring version 1.0. Code that adds features 1859 # in a future version can use bump_required_libavb_version_minor() to 1860 # bump the minor. 1861 self.required_libavb_version_major = AVB_VERSION_MAJOR 1862 self.required_libavb_version_minor = 0 1863 self.authentication_data_block_size = 0 1864 self.auxiliary_data_block_size = 0 1865 self.algorithm_type = 0 1866 self.hash_offset = 0 1867 self.hash_size = 0 1868 self.signature_offset = 0 1869 self.signature_size = 0 1870 self.public_key_offset = 0 1871 self.public_key_size = 0 1872 self.public_key_metadata_offset = 0 1873 self.public_key_metadata_size = 0 1874 self.descriptors_offset = 0 1875 self.descriptors_size = 0 1876 self.rollback_index = 0 1877 self.flags = 0 1878 self.release_string = get_release_string() 1879 1880 def bump_required_libavb_version_minor(self, minor): 1881 """Function to bump required_libavb_version_minor. 1882 1883 Call this when writing data that requires a specific libavb 1884 version to parse it. 1885 1886 Arguments: 1887 minor: The minor version of libavb that has support for the feature. 1888 """ 1889 self.required_libavb_version_minor = ( 1890 min(self.required_libavb_version_minor, minor)) 1891 1892 def save(self, output): 1893 """Serializes the header (256 bytes) to disk. 1894 1895 Arguments: 1896 output: The object to write the output to. 1897 """ 1898 output.write(struct.pack( 1899 self.FORMAT_STRING, self.magic, self.required_libavb_version_major, 1900 self.required_libavb_version_minor, self.authentication_data_block_size, 1901 self.auxiliary_data_block_size, self.algorithm_type, self.hash_offset, 1902 self.hash_size, self.signature_offset, self.signature_size, 1903 self.public_key_offset, self.public_key_size, 1904 self.public_key_metadata_offset, self.public_key_metadata_size, 1905 self.descriptors_offset, self.descriptors_size, self.rollback_index, 1906 self.flags, self.release_string)) 1907 1908 def encode(self): 1909 """Serializes the header (256) to a bytearray(). 1910 1911 Returns: 1912 A bytearray() with the encoded header. 1913 """ 1914 return struct.pack(self.FORMAT_STRING, self.magic, 1915 self.required_libavb_version_major, 1916 self.required_libavb_version_minor, 1917 self.authentication_data_block_size, 1918 self.auxiliary_data_block_size, self.algorithm_type, 1919 self.hash_offset, self.hash_size, self.signature_offset, 1920 self.signature_size, self.public_key_offset, 1921 self.public_key_size, self.public_key_metadata_offset, 1922 self.public_key_metadata_size, self.descriptors_offset, 1923 self.descriptors_size, self.rollback_index, self.flags, 1924 self.release_string) 1925 1926 1927 class Avb(object): 1928 """Business logic for avbtool command-line tool.""" 1929 1930 # Keep in sync with avb_ab_flow.h. 1931 AB_FORMAT_NO_CRC = '!4sBB2xBBBxBBBx12x' 1932 AB_MAGIC = '\0AB0' 1933 AB_MAJOR_VERSION = 1 1934 AB_MINOR_VERSION = 0 1935 AB_MISC_METADATA_OFFSET = 2048 1936 1937 # Constants for maximum metadata size. These are used to give 1938 # meaningful errors if the value passed in via --partition_size is 1939 # too small and when --calc_max_image_size is used. We use 1940 # conservative figures. 1941 MAX_VBMETA_SIZE = 64 * 1024 1942 MAX_FOOTER_SIZE = 4096 1943 1944 def erase_footer(self, image_filename, keep_hashtree): 1945 """Implements the 'erase_footer' command. 1946 1947 Arguments: 1948 image_filename: File to erase a footer from. 1949 keep_hashtree: If True, keep the hashtree and FEC around. 1950 1951 Raises: 1952 AvbError: If there's no footer in the image. 1953 """ 1954 1955 image = ImageHandler(image_filename) 1956 1957 (footer, _, descriptors, _) = self._parse_image(image) 1958 1959 if not footer: 1960 raise AvbError('Given image does not have a footer.') 1961 1962 new_image_size = None 1963 if not keep_hashtree: 1964 new_image_size = footer.original_image_size 1965 else: 1966 # If requested to keep the hashtree, search for a hashtree 1967 # descriptor to figure out the location and size of the hashtree 1968 # and FEC. 1969 for desc in descriptors: 1970 if isinstance(desc, AvbHashtreeDescriptor): 1971 # The hashtree is always just following the main data so the 1972 # new size is easily derived. 1973 new_image_size = desc.tree_offset + desc.tree_size 1974 # If the image has FEC codes, also keep those. 1975 if desc.fec_offset > 0: 1976 fec_end = desc.fec_offset + desc.fec_size 1977 new_image_size = max(new_image_size, fec_end) 1978 break 1979 if not new_image_size: 1980 raise AvbError('Requested to keep hashtree but no hashtree ' 1981 'descriptor was found.') 1982 1983 # And cut... 1984 image.truncate(new_image_size) 1985 1986 def resize_image(self, image_filename, partition_size): 1987 """Implements the 'resize_image' command. 1988 1989 Arguments: 1990 image_filename: File with footer to resize. 1991 partition_size: The new size of the image. 1992 1993 Raises: 1994 AvbError: If there's no footer in the image. 1995 """ 1996 1997 image = ImageHandler(image_filename) 1998 1999 if partition_size % image.block_size != 0: 2000 raise AvbError('Partition size of {} is not a multiple of the image ' 2001 'block size {}.'.format(partition_size, 2002 image.block_size)) 2003 2004 (footer, vbmeta_header, descriptors, _) = self._parse_image(image) 2005 2006 if not footer: 2007 raise AvbError('Given image does not have a footer.') 2008 2009 # The vbmeta blob is always at the end of the data so resizing an 2010 # image amounts to just moving the footer around. 2011 2012 vbmeta_end_offset = footer.vbmeta_offset + footer.vbmeta_size 2013 if vbmeta_end_offset % image.block_size != 0: 2014 vbmeta_end_offset += image.block_size - (vbmeta_end_offset % image.block_size) 2015 2016 if partition_size < vbmeta_end_offset + 1*image.block_size: 2017 raise AvbError('Requested size of {} is too small for an image ' 2018 'of size {}.' 2019 .format(partition_size, 2020 vbmeta_end_offset + 1*image.block_size)) 2021 2022 # Cut at the end of the vbmeta blob and insert a DONT_CARE chunk 2023 # with enough bytes such that the final Footer block is at the end 2024 # of partition_size. 2025 image.truncate(vbmeta_end_offset) 2026 image.append_dont_care(partition_size - vbmeta_end_offset - 2027 1*image.block_size) 2028 2029 # Just reuse the same footer - only difference is that we're 2030 # writing it in a different place. 2031 footer_blob = footer.encode() 2032 footer_blob_with_padding = ('\0'*(image.block_size - AvbFooter.SIZE) + 2033 footer_blob) 2034 image.append_raw(footer_blob_with_padding) 2035 2036 def set_ab_metadata(self, misc_image, slot_data): 2037 """Implements the 'set_ab_metadata' command. 2038 2039 The |slot_data| argument must be of the form 'A_priority:A_tries_remaining: 2040 A_successful_boot:B_priority:B_tries_remaining:B_successful_boot'. 2041 2042 Arguments: 2043 misc_image: The misc image to write to. 2044 slot_data: Slot data as a string 2045 2046 Raises: 2047 AvbError: If slot data is malformed. 2048 """ 2049 tokens = slot_data.split(':') 2050 if len(tokens) != 6: 2051 raise AvbError('Malformed slot data "{}".'.format(slot_data)) 2052 a_priority = int(tokens[0]) 2053 a_tries_remaining = int(tokens[1]) 2054 a_success = True if int(tokens[2]) != 0 else False 2055 b_priority = int(tokens[3]) 2056 b_tries_remaining = int(tokens[4]) 2057 b_success = True if int(tokens[5]) != 0 else False 2058 2059 ab_data_no_crc = struct.pack(self.AB_FORMAT_NO_CRC, 2060 self.AB_MAGIC, 2061 self.AB_MAJOR_VERSION, self.AB_MINOR_VERSION, 2062 a_priority, a_tries_remaining, a_success, 2063 b_priority, b_tries_remaining, b_success) 2064 # Force CRC to be unsigned, see https://bugs.python.org/issue4903 for why. 2065 crc_value = binascii.crc32(ab_data_no_crc) & 0xffffffff 2066 ab_data = ab_data_no_crc + struct.pack('!I', crc_value) 2067 misc_image.seek(self.AB_MISC_METADATA_OFFSET) 2068 misc_image.write(ab_data) 2069 2070 def info_image(self, image_filename, output): 2071 """Implements the 'info_image' command. 2072 2073 Arguments: 2074 image_filename: Image file to get information from (file object). 2075 output: Output file to write human-readable information to (file object). 2076 """ 2077 2078 image = ImageHandler(image_filename) 2079 2080 o = output 2081 2082 (footer, header, descriptors, image_size) = self._parse_image(image) 2083 2084 if footer: 2085 o.write('Footer version: {}.{}\n'.format(footer.version_major, 2086 footer.version_minor)) 2087 o.write('Image size: {} bytes\n'.format(image_size)) 2088 o.write('Original image size: {} bytes\n'.format( 2089 footer.original_image_size)) 2090 o.write('VBMeta offset: {}\n'.format(footer.vbmeta_offset)) 2091 o.write('VBMeta size: {} bytes\n'.format(footer.vbmeta_size)) 2092 o.write('--\n') 2093 2094 (alg_name, _) = lookup_algorithm_by_type(header.algorithm_type) 2095 2096 o.write('Minimum libavb version: {}.{}{}\n'.format( 2097 header.required_libavb_version_major, 2098 header.required_libavb_version_minor, 2099 ' (Sparse)' if image.is_sparse else '')) 2100 o.write('Header Block: {} bytes\n'.format(AvbVBMetaHeader.SIZE)) 2101 o.write('Authentication Block: {} bytes\n'.format( 2102 header.authentication_data_block_size)) 2103 o.write('Auxiliary Block: {} bytes\n'.format( 2104 header.auxiliary_data_block_size)) 2105 o.write('Algorithm: {}\n'.format(alg_name)) 2106 o.write('Rollback Index: {}\n'.format(header.rollback_index)) 2107 o.write('Flags: {}\n'.format(header.flags)) 2108 o.write('Release String: \'{}\'\n'.format( 2109 header.release_string.rstrip('\0'))) 2110 2111 # Print descriptors. 2112 num_printed = 0 2113 o.write('Descriptors:\n') 2114 for desc in descriptors: 2115 desc.print_desc(o) 2116 num_printed += 1 2117 if num_printed == 0: 2118 o.write(' (none)\n') 2119 2120 def verify_image(self, image_filename, key_path, expected_chain_partitions): 2121 """Implements the 'verify_image' command. 2122 2123 Arguments: 2124 image_filename: Image file to get information from (file object). 2125 key_path: None or check that embedded public key matches key at given path. 2126 expected_chain_partitions: List of chain partitions to check or None. 2127 """ 2128 2129 expected_chain_partitions_map = {} 2130 if expected_chain_partitions: 2131 used_locations = {} 2132 for cp in expected_chain_partitions: 2133 cp_tokens = cp.split(':') 2134 if len(cp_tokens) != 3: 2135 raise AvbError('Malformed chained partition "{}".'.format(cp)) 2136 partition_name = cp_tokens[0] 2137 rollback_index_location = int(cp_tokens[1]) 2138 file_path = cp_tokens[2] 2139 pk_blob = open(file_path).read() 2140 expected_chain_partitions_map[partition_name] = (rollback_index_location, pk_blob) 2141 2142 image_dir = os.path.dirname(image_filename) 2143 image_ext = os.path.splitext(image_filename)[1] 2144 2145 key_blob = None 2146 if key_path: 2147 print 'Verifying image {} using key at {}'.format(image_filename, key_path) 2148 key_blob = encode_rsa_key(key_path) 2149 else: 2150 print 'Verifying image {} using embedded public key'.format(image_filename) 2151 2152 image = ImageHandler(image_filename) 2153 (footer, header, descriptors, image_size) = self._parse_image(image) 2154 offset = 0 2155 if footer: 2156 offset = footer.vbmeta_offset 2157 size = (header.SIZE + header.authentication_data_block_size + 2158 header.auxiliary_data_block_size) 2159 image.seek(offset) 2160 vbmeta_blob = image.read(size) 2161 h = AvbVBMetaHeader(vbmeta_blob[0:AvbVBMetaHeader.SIZE]) 2162 alg_name, _ = lookup_algorithm_by_type(header.algorithm_type) 2163 if not verify_vbmeta_signature(header, vbmeta_blob): 2164 raise AvbError('Signature check failed for {} vbmeta struct {}' 2165 .format(alg_name, image_filename)) 2166 2167 if key_blob: 2168 # The embedded public key is in the auxiliary block at an offset. 2169 key_offset = AvbVBMetaHeader.SIZE 2170 key_offset += h.authentication_data_block_size 2171 key_offset += h.public_key_offset 2172 key_blob_in_vbmeta = vbmeta_blob[key_offset:key_offset + h.public_key_size] 2173 if key_blob != key_blob_in_vbmeta: 2174 raise AvbError('Embedded public key does not match given key.') 2175 2176 if footer: 2177 print ('vbmeta: Successfully verified footer and {} vbmeta struct in {}' 2178 .format(alg_name, image_filename)) 2179 else: 2180 print ('vbmeta: Successfully verified {} vbmeta struct in {}' 2181 .format(alg_name, image_filename)) 2182 2183 for desc in descriptors: 2184 if not desc.verify(image_dir, image_ext, expected_chain_partitions_map): 2185 raise AvbError('Error verifying descriptor.') 2186 2187 2188 def _parse_image(self, image): 2189 """Gets information about an image. 2190 2191 The image can either be a vbmeta or an image with a footer. 2192 2193 Arguments: 2194 image: An ImageHandler (vbmeta or footer) with a hashtree descriptor. 2195 2196 Returns: 2197 A tuple where the first argument is a AvbFooter (None if there 2198 is no footer on the image), the second argument is a 2199 AvbVBMetaHeader, the third argument is a list of 2200 AvbDescriptor-derived instances, and the fourth argument is the 2201 size of |image|. 2202 """ 2203 assert isinstance(image, ImageHandler) 2204 footer = None 2205 image.seek(image.image_size - AvbFooter.SIZE) 2206 try: 2207 footer = AvbFooter(image.read(AvbFooter.SIZE)) 2208 except (LookupError, struct.error): 2209 # Nope, just seek back to the start. 2210 image.seek(0) 2211 2212 vbmeta_offset = 0 2213 if footer: 2214 vbmeta_offset = footer.vbmeta_offset 2215 2216 image.seek(vbmeta_offset) 2217 h = AvbVBMetaHeader(image.read(AvbVBMetaHeader.SIZE)) 2218 2219 auth_block_offset = vbmeta_offset + AvbVBMetaHeader.SIZE 2220 aux_block_offset = auth_block_offset + h.authentication_data_block_size 2221 desc_start_offset = aux_block_offset + h.descriptors_offset 2222 image.seek(desc_start_offset) 2223 descriptors = parse_descriptors(image.read(h.descriptors_size)) 2224 2225 return footer, h, descriptors, image.image_size 2226 2227 def _load_vbmeta_blob(self, image): 2228 """Gets the vbmeta struct and associated sections. 2229 2230 The image can either be a vbmeta.img or an image with a footer. 2231 2232 Arguments: 2233 image: An ImageHandler (vbmeta or footer). 2234 2235 Returns: 2236 A blob with the vbmeta struct and other sections. 2237 """ 2238 assert isinstance(image, ImageHandler) 2239 footer = None 2240 image.seek(image.image_size - AvbFooter.SIZE) 2241 try: 2242 footer = AvbFooter(image.read(AvbFooter.SIZE)) 2243 except (LookupError, struct.error): 2244 # Nope, just seek back to the start. 2245 image.seek(0) 2246 2247 vbmeta_offset = 0 2248 if footer: 2249 vbmeta_offset = footer.vbmeta_offset 2250 2251 image.seek(vbmeta_offset) 2252 h = AvbVBMetaHeader(image.read(AvbVBMetaHeader.SIZE)) 2253 2254 image.seek(vbmeta_offset) 2255 data_size = AvbVBMetaHeader.SIZE 2256 data_size += h.authentication_data_block_size 2257 data_size += h.auxiliary_data_block_size 2258 return image.read(data_size) 2259 2260 def _get_cmdline_descriptors_for_hashtree_descriptor(self, ht): 2261 """Generate kernel cmdline descriptors for dm-verity. 2262 2263 Arguments: 2264 ht: A AvbHashtreeDescriptor 2265 2266 Returns: 2267 A list with two AvbKernelCmdlineDescriptor with dm-verity kernel cmdline 2268 instructions. There is one for when hashtree is not disabled and one for 2269 when it is. 2270 2271 """ 2272 2273 c = 'dm="1 vroot none ro 1,' 2274 c += '0' # start 2275 c += ' {}'.format((ht.image_size / 512)) # size (# sectors) 2276 c += ' verity {}'.format(ht.dm_verity_version) # type and version 2277 c += ' PARTUUID=$(ANDROID_SYSTEM_PARTUUID)' # data_dev 2278 c += ' PARTUUID=$(ANDROID_SYSTEM_PARTUUID)' # hash_dev 2279 c += ' {}'.format(ht.data_block_size) # data_block 2280 c += ' {}'.format(ht.hash_block_size) # hash_block 2281 c += ' {}'.format(ht.image_size / ht.data_block_size) # #blocks 2282 c += ' {}'.format(ht.image_size / ht.data_block_size) # hash_offset 2283 c += ' {}'.format(ht.hash_algorithm) # hash_alg 2284 c += ' {}'.format(str(ht.root_digest).encode('hex')) # root_digest 2285 c += ' {}'.format(str(ht.salt).encode('hex')) # salt 2286 if ht.fec_num_roots > 0: 2287 c += ' 10' # number of optional args 2288 c += ' $(ANDROID_VERITY_MODE)' 2289 c += ' ignore_zero_blocks' 2290 c += ' use_fec_from_device PARTUUID=$(ANDROID_SYSTEM_PARTUUID)' 2291 c += ' fec_roots {}'.format(ht.fec_num_roots) 2292 # Note that fec_blocks is the size that FEC covers, *not* the 2293 # size of the FEC data. Since we use FEC for everything up until 2294 # the FEC data, it's the same as the offset. 2295 c += ' fec_blocks {}'.format(ht.fec_offset/ht.data_block_size) 2296 c += ' fec_start {}'.format(ht.fec_offset/ht.data_block_size) 2297 else: 2298 c += ' 2' # number of optional args 2299 c += ' $(ANDROID_VERITY_MODE)' 2300 c += ' ignore_zero_blocks' 2301 c += '" root=/dev/dm-0' 2302 2303 # Now that we have the command-line, generate the descriptor. 2304 desc = AvbKernelCmdlineDescriptor() 2305 desc.kernel_cmdline = c 2306 desc.flags = ( 2307 AvbKernelCmdlineDescriptor.FLAGS_USE_ONLY_IF_HASHTREE_NOT_DISABLED) 2308 2309 # The descriptor for when hashtree verification is disabled is a lot 2310 # simpler - we just set the root to the partition. 2311 desc_no_ht = AvbKernelCmdlineDescriptor() 2312 desc_no_ht.kernel_cmdline = 'root=PARTUUID=$(ANDROID_SYSTEM_PARTUUID)' 2313 desc_no_ht.flags = ( 2314 AvbKernelCmdlineDescriptor.FLAGS_USE_ONLY_IF_HASHTREE_DISABLED) 2315 2316 return [desc, desc_no_ht] 2317 2318 def _get_cmdline_descriptors_for_dm_verity(self, image): 2319 """Generate kernel cmdline descriptors for dm-verity. 2320 2321 Arguments: 2322 image: An ImageHandler (vbmeta or footer) with a hashtree descriptor. 2323 2324 Returns: 2325 A list with two AvbKernelCmdlineDescriptor with dm-verity kernel cmdline 2326 instructions. There is one for when hashtree is not disabled and one for 2327 when it is. 2328 2329 Raises: 2330 AvbError: If |image| doesn't have a hashtree descriptor. 2331 2332 """ 2333 2334 (_, _, descriptors, _) = self._parse_image(image) 2335 2336 ht = None 2337 for desc in descriptors: 2338 if isinstance(desc, AvbHashtreeDescriptor): 2339 ht = desc 2340 break 2341 2342 if not ht: 2343 raise AvbError('No hashtree descriptor in given image') 2344 2345 return self._get_cmdline_descriptors_for_hashtree_descriptor(ht) 2346 2347 def make_vbmeta_image(self, output, chain_partitions, algorithm_name, 2348 key_path, public_key_metadata_path, rollback_index, 2349 flags, props, props_from_file, kernel_cmdlines, 2350 setup_rootfs_from_kernel, 2351 include_descriptors_from_image, 2352 signing_helper, 2353 signing_helper_with_files, 2354 release_string, 2355 append_to_release_string, 2356 print_required_libavb_version, 2357 padding_size): 2358 """Implements the 'make_vbmeta_image' command. 2359 2360 Arguments: 2361 output: File to write the image to. 2362 chain_partitions: List of partitions to chain or None. 2363 algorithm_name: Name of algorithm to use. 2364 key_path: Path to key to use or None. 2365 public_key_metadata_path: Path to public key metadata or None. 2366 rollback_index: The rollback index to use. 2367 flags: Flags value to use in the image. 2368 props: Properties to insert (list of strings of the form 'key:value'). 2369 props_from_file: Properties to insert (list of strings 'key:<path>'). 2370 kernel_cmdlines: Kernel cmdlines to insert (list of strings). 2371 setup_rootfs_from_kernel: None or file to generate from. 2372 include_descriptors_from_image: List of file objects with descriptors. 2373 signing_helper: Program which signs a hash and return signature. 2374 signing_helper_with_files: Same as signing_helper but uses files instead. 2375 release_string: None or avbtool release string to use instead of default. 2376 append_to_release_string: None or string to append. 2377 print_required_libavb_version: True to only print required libavb version. 2378 padding_size: If not 0, pads output so size is a multiple of the number. 2379 2380 Raises: 2381 AvbError: If a chained partition is malformed. 2382 """ 2383 2384 # If we're asked to calculate minimum required libavb version, we're done. 2385 # 2386 # NOTE: When we get to 1.1 and later this will get more complicated. 2387 if print_required_libavb_version: 2388 print '1.0' 2389 return 2390 2391 if not output: 2392 raise AvbError('No output file given') 2393 2394 descriptors = [] 2395 ht_desc_to_setup = None 2396 vbmeta_blob = self._generate_vbmeta_blob( 2397 algorithm_name, key_path, public_key_metadata_path, descriptors, 2398 chain_partitions, rollback_index, flags, props, props_from_file, 2399 kernel_cmdlines, setup_rootfs_from_kernel, ht_desc_to_setup, 2400 include_descriptors_from_image, signing_helper, 2401 signing_helper_with_files, release_string, 2402 append_to_release_string) 2403 2404 # Write entire vbmeta blob (header, authentication, auxiliary). 2405 output.seek(0) 2406 output.write(vbmeta_blob) 2407 2408 if padding_size > 0: 2409 padded_size = round_to_multiple(len(vbmeta_blob), padding_size) 2410 padding_needed = padded_size - len(vbmeta_blob) 2411 output.write('\0' * padding_needed) 2412 2413 def _generate_vbmeta_blob(self, algorithm_name, key_path, 2414 public_key_metadata_path, descriptors, 2415 chain_partitions, 2416 rollback_index, flags, props, props_from_file, 2417 kernel_cmdlines, 2418 setup_rootfs_from_kernel, 2419 ht_desc_to_setup, 2420 include_descriptors_from_image, signing_helper, 2421 signing_helper_with_files, 2422 release_string, append_to_release_string): 2423 """Generates a VBMeta blob. 2424 2425 This blob contains the header (struct AvbVBMetaHeader), the 2426 authentication data block (which contains the hash and signature 2427 for the header and auxiliary block), and the auxiliary block 2428 (which contains descriptors, the public key used, and other data). 2429 2430 The |key| parameter can |None| only if the |algorithm_name| is 2431 'NONE'. 2432 2433 Arguments: 2434 algorithm_name: The algorithm name as per the ALGORITHMS dict. 2435 key_path: The path to the .pem file used to sign the blob. 2436 public_key_metadata_path: Path to public key metadata or None. 2437 descriptors: A list of descriptors to insert or None. 2438 chain_partitions: List of partitions to chain or None. 2439 rollback_index: The rollback index to use. 2440 flags: Flags to use in the image. 2441 props: Properties to insert (List of strings of the form 'key:value'). 2442 props_from_file: Properties to insert (List of strings 'key:<path>'). 2443 kernel_cmdlines: Kernel cmdlines to insert (list of strings). 2444 setup_rootfs_from_kernel: None or file to generate 2445 dm-verity kernel cmdline from. 2446 ht_desc_to_setup: If not None, an AvbHashtreeDescriptor to 2447 generate dm-verity kernel cmdline descriptors from. 2448 include_descriptors_from_image: List of file objects for which 2449 to insert descriptors from. 2450 signing_helper: Program which signs a hash and return signature. 2451 signing_helper_with_files: Same as signing_helper but uses files instead. 2452 release_string: None or avbtool release string. 2453 append_to_release_string: None or string to append. 2454 2455 Returns: 2456 A bytearray() with the VBMeta blob. 2457 2458 Raises: 2459 Exception: If the |algorithm_name| is not found, if no key has 2460 been given and the given algorithm requires one, or the key is 2461 of the wrong size. 2462 2463 """ 2464 try: 2465 alg = ALGORITHMS[algorithm_name] 2466 except KeyError: 2467 raise AvbError('Unknown algorithm with name {}'.format(algorithm_name)) 2468 2469 if not descriptors: 2470 descriptors = [] 2471 2472 # Insert chained partition descriptors, if any 2473 if chain_partitions: 2474 used_locations = {} 2475 for cp in chain_partitions: 2476 cp_tokens = cp.split(':') 2477 if len(cp_tokens) != 3: 2478 raise AvbError('Malformed chained partition "{}".'.format(cp)) 2479 partition_name = cp_tokens[0] 2480 rollback_index_location = int(cp_tokens[1]) 2481 file_path = cp_tokens[2] 2482 # Check that the same rollback location isn't being used by 2483 # multiple chained partitions. 2484 if used_locations.get(rollback_index_location): 2485 raise AvbError('Rollback Index Location {} is already in use.'.format( 2486 rollback_index_location)) 2487 used_locations[rollback_index_location] = True 2488 desc = AvbChainPartitionDescriptor() 2489 desc.partition_name = partition_name 2490 desc.rollback_index_location = rollback_index_location 2491 if desc.rollback_index_location < 1: 2492 raise AvbError('Rollback index location must be 1 or larger.') 2493 desc.public_key = open(file_path, 'rb').read() 2494 descriptors.append(desc) 2495 2496 # Descriptors. 2497 encoded_descriptors = bytearray() 2498 for desc in descriptors: 2499 encoded_descriptors.extend(desc.encode()) 2500 2501 # Add properties. 2502 if props: 2503 for prop in props: 2504 idx = prop.find(':') 2505 if idx == -1: 2506 raise AvbError('Malformed property "{}".'.format(prop)) 2507 desc = AvbPropertyDescriptor() 2508 desc.key = prop[0:idx] 2509 desc.value = prop[(idx + 1):] 2510 encoded_descriptors.extend(desc.encode()) 2511 if props_from_file: 2512 for prop in props_from_file: 2513 idx = prop.find(':') 2514 if idx == -1: 2515 raise AvbError('Malformed property "{}".'.format(prop)) 2516 desc = AvbPropertyDescriptor() 2517 desc.key = prop[0:idx] 2518 desc.value = prop[(idx + 1):] 2519 file_path = prop[(idx + 1):] 2520 desc.value = open(file_path, 'rb').read() 2521 encoded_descriptors.extend(desc.encode()) 2522 2523 # Add AvbKernelCmdline descriptor for dm-verity from an image, if requested. 2524 if setup_rootfs_from_kernel: 2525 image_handler = ImageHandler( 2526 setup_rootfs_from_kernel.name) 2527 cmdline_desc = self._get_cmdline_descriptors_for_dm_verity(image_handler) 2528 encoded_descriptors.extend(cmdline_desc[0].encode()) 2529 encoded_descriptors.extend(cmdline_desc[1].encode()) 2530 2531 # Add AvbKernelCmdline descriptor for dm-verity from desc, if requested. 2532 if ht_desc_to_setup: 2533 cmdline_desc = self._get_cmdline_descriptors_for_hashtree_descriptor( 2534 ht_desc_to_setup) 2535 encoded_descriptors.extend(cmdline_desc[0].encode()) 2536 encoded_descriptors.extend(cmdline_desc[1].encode()) 2537 2538 # Add kernel command-lines. 2539 if kernel_cmdlines: 2540 for i in kernel_cmdlines: 2541 desc = AvbKernelCmdlineDescriptor() 2542 desc.kernel_cmdline = i 2543 encoded_descriptors.extend(desc.encode()) 2544 2545 # Add descriptors from other images. 2546 if include_descriptors_from_image: 2547 for image in include_descriptors_from_image: 2548 image_handler = ImageHandler(image.name) 2549 (_, _, image_descriptors, _) = self._parse_image(image_handler) 2550 for desc in image_descriptors: 2551 encoded_descriptors.extend(desc.encode()) 2552 2553 # Load public key metadata blob, if requested. 2554 pkmd_blob = [] 2555 if public_key_metadata_path: 2556 with open(public_key_metadata_path) as f: 2557 pkmd_blob = f.read() 2558 2559 key = None 2560 encoded_key = bytearray() 2561 if alg.public_key_num_bytes > 0: 2562 if not key_path: 2563 raise AvbError('Key is required for algorithm {}'.format( 2564 algorithm_name)) 2565 encoded_key = encode_rsa_key(key_path) 2566 if len(encoded_key) != alg.public_key_num_bytes: 2567 raise AvbError('Key is wrong size for algorithm {}'.format( 2568 algorithm_name)) 2569 2570 h = AvbVBMetaHeader() 2571 2572 # Override release string, if requested. 2573 if isinstance(release_string, (str, unicode)): 2574 h.release_string = release_string 2575 2576 # Append to release string, if requested. Also insert a space before. 2577 if isinstance(append_to_release_string, (str, unicode)): 2578 h.release_string += ' ' + append_to_release_string 2579 2580 # For the Auxiliary data block, descriptors are stored at offset 0, 2581 # followed by the public key, followed by the public key metadata blob. 2582 h.auxiliary_data_block_size = round_to_multiple( 2583 len(encoded_descriptors) + len(encoded_key) + len(pkmd_blob), 64) 2584 h.descriptors_offset = 0 2585 h.descriptors_size = len(encoded_descriptors) 2586 h.public_key_offset = h.descriptors_size 2587 h.public_key_size = len(encoded_key) 2588 h.public_key_metadata_offset = h.public_key_offset + h.public_key_size 2589 h.public_key_metadata_size = len(pkmd_blob) 2590 2591 # For the Authentication data block, the hash is first and then 2592 # the signature. 2593 h.authentication_data_block_size = round_to_multiple( 2594 alg.hash_num_bytes + alg.signature_num_bytes, 64) 2595 h.algorithm_type = alg.algorithm_type 2596 h.hash_offset = 0 2597 h.hash_size = alg.hash_num_bytes 2598 # Signature offset and size - it's stored right after the hash 2599 # (in Authentication data block). 2600 h.signature_offset = alg.hash_num_bytes 2601 h.signature_size = alg.signature_num_bytes 2602 2603 h.rollback_index = rollback_index 2604 h.flags = flags 2605 2606 # Generate Header data block. 2607 header_data_blob = h.encode() 2608 2609 # Generate Auxiliary data block. 2610 aux_data_blob = bytearray() 2611 aux_data_blob.extend(encoded_descriptors) 2612 aux_data_blob.extend(encoded_key) 2613 aux_data_blob.extend(pkmd_blob) 2614 padding_bytes = h.auxiliary_data_block_size - len(aux_data_blob) 2615 aux_data_blob.extend('\0' * padding_bytes) 2616 2617 # Calculate the hash. 2618 binary_hash = bytearray() 2619 binary_signature = bytearray() 2620 if algorithm_name != 'NONE': 2621 ha = hashlib.new(alg.hash_name) 2622 ha.update(header_data_blob) 2623 ha.update(aux_data_blob) 2624 binary_hash.extend(ha.digest()) 2625 2626 # Calculate the signature. 2627 padding_and_hash = str(bytearray(alg.padding)) + binary_hash 2628 binary_signature.extend(raw_sign(signing_helper, 2629 signing_helper_with_files, 2630 algorithm_name, 2631 alg.signature_num_bytes, key_path, 2632 padding_and_hash)) 2633 2634 # Generate Authentication data block. 2635 auth_data_blob = bytearray() 2636 auth_data_blob.extend(binary_hash) 2637 auth_data_blob.extend(binary_signature) 2638 padding_bytes = h.authentication_data_block_size - len(auth_data_blob) 2639 auth_data_blob.extend('\0' * padding_bytes) 2640 2641 return header_data_blob + auth_data_blob + aux_data_blob 2642 2643 def extract_public_key(self, key_path, output): 2644 """Implements the 'extract_public_key' command. 2645 2646 Arguments: 2647 key_path: The path to a RSA private key file. 2648 output: The file to write to. 2649 """ 2650 output.write(encode_rsa_key(key_path)) 2651 2652 def append_vbmeta_image(self, image_filename, vbmeta_image_filename, 2653 partition_size): 2654 """Implementation of the append_vbmeta_image command. 2655 2656 Arguments: 2657 image_filename: File to add the footer to. 2658 vbmeta_image_filename: File to get vbmeta struct from. 2659 partition_size: Size of partition. 2660 2661 Raises: 2662 AvbError: If an argument is incorrect. 2663 """ 2664 image = ImageHandler(image_filename) 2665 2666 if partition_size % image.block_size != 0: 2667 raise AvbError('Partition size of {} is not a multiple of the image ' 2668 'block size {}.'.format(partition_size, 2669 image.block_size)) 2670 2671 # If there's already a footer, truncate the image to its original 2672 # size. This way 'avbtool append_vbmeta_image' is idempotent. 2673 image.seek(image.image_size - AvbFooter.SIZE) 2674 try: 2675 footer = AvbFooter(image.read(AvbFooter.SIZE)) 2676 # Existing footer found. Just truncate. 2677 original_image_size = footer.original_image_size 2678 image.truncate(footer.original_image_size) 2679 except (LookupError, struct.error): 2680 original_image_size = image.image_size 2681 2682 # If anything goes wrong from here-on, restore the image back to 2683 # its original size. 2684 try: 2685 vbmeta_image_handler = ImageHandler(vbmeta_image_filename) 2686 vbmeta_blob = self._load_vbmeta_blob(vbmeta_image_handler) 2687 2688 # If the image isn't sparse, its size might not be a multiple of 2689 # the block size. This will screw up padding later so just grow it. 2690 if image.image_size % image.block_size != 0: 2691 assert not image.is_sparse 2692 padding_needed = image.block_size - (image.image_size%image.block_size) 2693 image.truncate(image.image_size + padding_needed) 2694 2695 # The append_raw() method requires content with size being a 2696 # multiple of |block_size| so add padding as needed. Also record 2697 # where this is written to since we'll need to put that in the 2698 # footer. 2699 vbmeta_offset = image.image_size 2700 padding_needed = (round_to_multiple(len(vbmeta_blob), image.block_size) - 2701 len(vbmeta_blob)) 2702 vbmeta_blob_with_padding = vbmeta_blob + '\0'*padding_needed 2703 2704 # Append vbmeta blob and footer 2705 image.append_raw(vbmeta_blob_with_padding) 2706 vbmeta_end_offset = vbmeta_offset + len(vbmeta_blob_with_padding) 2707 2708 # Now insert a DONT_CARE chunk with enough bytes such that the 2709 # final Footer block is at the end of partition_size.. 2710 image.append_dont_care(partition_size - vbmeta_end_offset - 2711 1*image.block_size) 2712 2713 # Generate the Footer that tells where the VBMeta footer 2714 # is. Also put enough padding in the front of the footer since 2715 # we'll write out an entire block. 2716 footer = AvbFooter() 2717 footer.original_image_size = original_image_size 2718 footer.vbmeta_offset = vbmeta_offset 2719 footer.vbmeta_size = len(vbmeta_blob) 2720 footer_blob = footer.encode() 2721 footer_blob_with_padding = ('\0'*(image.block_size - AvbFooter.SIZE) + 2722 footer_blob) 2723 image.append_raw(footer_blob_with_padding) 2724 2725 except: 2726 # Truncate back to original size, then re-raise 2727 image.truncate(original_image_size) 2728 raise 2729 2730 def add_hash_footer(self, image_filename, partition_size, partition_name, 2731 hash_algorithm, salt, chain_partitions, algorithm_name, 2732 key_path, 2733 public_key_metadata_path, rollback_index, flags, props, 2734 props_from_file, kernel_cmdlines, 2735 setup_rootfs_from_kernel, 2736 include_descriptors_from_image, calc_max_image_size, 2737 signing_helper, signing_helper_with_files, 2738 release_string, append_to_release_string, 2739 output_vbmeta_image, do_not_append_vbmeta_image, 2740 print_required_libavb_version): 2741 """Implementation of the add_hash_footer on unsparse images. 2742 2743 Arguments: 2744 image_filename: File to add the footer to. 2745 partition_size: Size of partition. 2746 partition_name: Name of partition (without A/B suffix). 2747 hash_algorithm: Hash algorithm to use. 2748 salt: Salt to use as a hexadecimal string or None to use /dev/urandom. 2749 chain_partitions: List of partitions to chain. 2750 algorithm_name: Name of algorithm to use. 2751 key_path: Path to key to use or None. 2752 public_key_metadata_path: Path to public key metadata or None. 2753 rollback_index: Rollback index. 2754 flags: Flags value to use in the image. 2755 props: Properties to insert (List of strings of the form 'key:value'). 2756 props_from_file: Properties to insert (List of strings 'key:<path>'). 2757 kernel_cmdlines: Kernel cmdlines to insert (list of strings). 2758 setup_rootfs_from_kernel: None or file to generate 2759 dm-verity kernel cmdline from. 2760 include_descriptors_from_image: List of file objects for which 2761 to insert descriptors from. 2762 calc_max_image_size: Don't store the footer - instead calculate the 2763 maximum image size leaving enough room for metadata with the 2764 given |partition_size|. 2765 signing_helper: Program which signs a hash and return signature. 2766 signing_helper_with_files: Same as signing_helper but uses files instead. 2767 release_string: None or avbtool release string. 2768 append_to_release_string: None or string to append. 2769 output_vbmeta_image: If not None, also write vbmeta struct to this file. 2770 do_not_append_vbmeta_image: If True, don't append vbmeta struct. 2771 print_required_libavb_version: True to only print required libavb version. 2772 2773 Raises: 2774 AvbError: If an argument is incorrect. 2775 """ 2776 2777 # If we're asked to calculate minimum required libavb version, we're done. 2778 # 2779 # NOTE: When we get to 1.1 and later this will get more complicated. 2780 if print_required_libavb_version: 2781 print '1.0' 2782 return 2783 2784 # First, calculate the maximum image size such that an image 2785 # this size + metadata (footer + vbmeta struct) fits in 2786 # |partition_size|. 2787 max_metadata_size = self.MAX_VBMETA_SIZE + self.MAX_FOOTER_SIZE 2788 max_image_size = partition_size - max_metadata_size 2789 2790 # If we're asked to only calculate the maximum image size, we're done. 2791 if calc_max_image_size: 2792 print '{}'.format(max_image_size) 2793 return 2794 2795 image = ImageHandler(image_filename) 2796 2797 if partition_size % image.block_size != 0: 2798 raise AvbError('Partition size of {} is not a multiple of the image ' 2799 'block size {}.'.format(partition_size, 2800 image.block_size)) 2801 2802 # If there's already a footer, truncate the image to its original 2803 # size. This way 'avbtool add_hash_footer' is idempotent (modulo 2804 # salts). 2805 image.seek(image.image_size - AvbFooter.SIZE) 2806 try: 2807 footer = AvbFooter(image.read(AvbFooter.SIZE)) 2808 # Existing footer found. Just truncate. 2809 original_image_size = footer.original_image_size 2810 image.truncate(footer.original_image_size) 2811 except (LookupError, struct.error): 2812 original_image_size = image.image_size 2813 2814 # If anything goes wrong from here-on, restore the image back to 2815 # its original size. 2816 try: 2817 # If image size exceeds the maximum image size, fail. 2818 if image.image_size > max_image_size: 2819 raise AvbError('Image size of {} exceeds maximum image ' 2820 'size of {} in order to fit in a partition ' 2821 'size of {}.'.format(image.image_size, max_image_size, 2822 partition_size)) 2823 2824 digest_size = len(hashlib.new(name=hash_algorithm).digest()) 2825 if salt: 2826 salt = salt.decode('hex') 2827 else: 2828 if salt is None: 2829 # If salt is not explicitly specified, choose a hash 2830 # that's the same size as the hash size. 2831 hash_size = digest_size 2832 salt = open('/dev/urandom').read(hash_size) 2833 else: 2834 salt = '' 2835 2836 hasher = hashlib.new(name=hash_algorithm, string=salt) 2837 # TODO(zeuthen): might want to read this in chunks to avoid 2838 # memory pressure, then again, this is only supposed to be used 2839 # on kernel/initramfs partitions. Possible optimization. 2840 image.seek(0) 2841 hasher.update(image.read(image.image_size)) 2842 digest = hasher.digest() 2843 2844 h_desc = AvbHashDescriptor() 2845 h_desc.image_size = image.image_size 2846 h_desc.hash_algorithm = hash_algorithm 2847 h_desc.partition_name = partition_name 2848 h_desc.salt = salt 2849 h_desc.digest = digest 2850 2851 # Generate the VBMeta footer. 2852 ht_desc_to_setup = None 2853 vbmeta_blob = self._generate_vbmeta_blob( 2854 algorithm_name, key_path, public_key_metadata_path, [h_desc], 2855 chain_partitions, rollback_index, flags, props, props_from_file, 2856 kernel_cmdlines, setup_rootfs_from_kernel, ht_desc_to_setup, 2857 include_descriptors_from_image, signing_helper, 2858 signing_helper_with_files, release_string, 2859 append_to_release_string) 2860 2861 # If the image isn't sparse, its size might not be a multiple of 2862 # the block size. This will screw up padding later so just grow it. 2863 if image.image_size % image.block_size != 0: 2864 assert not image.is_sparse 2865 padding_needed = image.block_size - (image.image_size%image.block_size) 2866 image.truncate(image.image_size + padding_needed) 2867 2868 # The append_raw() method requires content with size being a 2869 # multiple of |block_size| so add padding as needed. Also record 2870 # where this is written to since we'll need to put that in the 2871 # footer. 2872 vbmeta_offset = image.image_size 2873 padding_needed = (round_to_multiple(len(vbmeta_blob), image.block_size) - 2874 len(vbmeta_blob)) 2875 vbmeta_blob_with_padding = vbmeta_blob + '\0'*padding_needed 2876 2877 # Write vbmeta blob, if requested. 2878 if output_vbmeta_image: 2879 output_vbmeta_image.write(vbmeta_blob) 2880 2881 # Append vbmeta blob and footer, unless requested not to. 2882 if not do_not_append_vbmeta_image: 2883 image.append_raw(vbmeta_blob_with_padding) 2884 vbmeta_end_offset = vbmeta_offset + len(vbmeta_blob_with_padding) 2885 2886 # Now insert a DONT_CARE chunk with enough bytes such that the 2887 # final Footer block is at the end of partition_size.. 2888 image.append_dont_care(partition_size - vbmeta_end_offset - 2889 1*image.block_size) 2890 2891 # Generate the Footer that tells where the VBMeta footer 2892 # is. Also put enough padding in the front of the footer since 2893 # we'll write out an entire block. 2894 footer = AvbFooter() 2895 footer.original_image_size = original_image_size 2896 footer.vbmeta_offset = vbmeta_offset 2897 footer.vbmeta_size = len(vbmeta_blob) 2898 footer_blob = footer.encode() 2899 footer_blob_with_padding = ('\0'*(image.block_size - AvbFooter.SIZE) + 2900 footer_blob) 2901 image.append_raw(footer_blob_with_padding) 2902 2903 except: 2904 # Truncate back to original size, then re-raise 2905 image.truncate(original_image_size) 2906 raise 2907 2908 def add_hashtree_footer(self, image_filename, partition_size, partition_name, 2909 generate_fec, fec_num_roots, hash_algorithm, 2910 block_size, salt, chain_partitions, algorithm_name, 2911 key_path, 2912 public_key_metadata_path, rollback_index, flags, 2913 props, props_from_file, kernel_cmdlines, 2914 setup_rootfs_from_kernel, 2915 setup_as_rootfs_from_kernel, 2916 include_descriptors_from_image, 2917 calc_max_image_size, signing_helper, 2918 signing_helper_with_files, 2919 release_string, append_to_release_string, 2920 output_vbmeta_image, do_not_append_vbmeta_image, 2921 print_required_libavb_version): 2922 """Implements the 'add_hashtree_footer' command. 2923 2924 See https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity for 2925 more information about dm-verity and these hashes. 2926 2927 Arguments: 2928 image_filename: File to add the footer to. 2929 partition_size: Size of partition. 2930 partition_name: Name of partition (without A/B suffix). 2931 generate_fec: If True, generate FEC codes. 2932 fec_num_roots: Number of roots for FEC. 2933 hash_algorithm: Hash algorithm to use. 2934 block_size: Block size to use. 2935 salt: Salt to use as a hexadecimal string or None to use /dev/urandom. 2936 chain_partitions: List of partitions to chain. 2937 algorithm_name: Name of algorithm to use. 2938 key_path: Path to key to use or None. 2939 public_key_metadata_path: Path to public key metadata or None. 2940 rollback_index: Rollback index. 2941 flags: Flags value to use in the image. 2942 props: Properties to insert (List of strings of the form 'key:value'). 2943 props_from_file: Properties to insert (List of strings 'key:<path>'). 2944 kernel_cmdlines: Kernel cmdlines to insert (list of strings). 2945 setup_rootfs_from_kernel: None or file to generate 2946 dm-verity kernel cmdline from. 2947 setup_as_rootfs_from_kernel: If True, generate dm-verity kernel 2948 cmdline to set up rootfs. 2949 include_descriptors_from_image: List of file objects for which 2950 to insert descriptors from. 2951 calc_max_image_size: Don't store the hashtree or footer - instead 2952 calculate the maximum image size leaving enough room for hashtree 2953 and metadata with the given |partition_size|. 2954 signing_helper: Program which signs a hash and return signature. 2955 signing_helper_with_files: Same as signing_helper but uses files instead. 2956 release_string: None or avbtool release string. 2957 append_to_release_string: None or string to append. 2958 output_vbmeta_image: If not None, also write vbmeta struct to this file. 2959 do_not_append_vbmeta_image: If True, don't append vbmeta struct. 2960 print_required_libavb_version: True to only print required libavb version. 2961 2962 Raises: 2963 AvbError: If an argument is incorrect. 2964 """ 2965 2966 # If we're asked to calculate minimum required libavb version, we're done. 2967 # 2968 # NOTE: When we get to 1.1 and later this will get more complicated. 2969 if print_required_libavb_version: 2970 print '1.0' 2971 return 2972 2973 digest_size = len(hashlib.new(name=hash_algorithm).digest()) 2974 digest_padding = round_to_pow2(digest_size) - digest_size 2975 2976 # First, calculate the maximum image size such that an image 2977 # this size + the hashtree + metadata (footer + vbmeta struct) 2978 # fits in |partition_size|. We use very conservative figures for 2979 # metadata. 2980 (_, max_tree_size) = calc_hash_level_offsets( 2981 partition_size, block_size, digest_size + digest_padding) 2982 max_fec_size = 0 2983 if generate_fec: 2984 max_fec_size = calc_fec_data_size(partition_size, fec_num_roots) 2985 max_metadata_size = (max_fec_size + max_tree_size + 2986 self.MAX_VBMETA_SIZE + 2987 self.MAX_FOOTER_SIZE) 2988 max_image_size = partition_size - max_metadata_size 2989 2990 # If we're asked to only calculate the maximum image size, we're done. 2991 if calc_max_image_size: 2992 print '{}'.format(max_image_size) 2993 return 2994 2995 image = ImageHandler(image_filename) 2996 2997 if partition_size % image.block_size != 0: 2998 raise AvbError('Partition size of {} is not a multiple of the image ' 2999 'block size {}.'.format(partition_size, 3000 image.block_size)) 3001 3002 # If there's already a footer, truncate the image to its original 3003 # size. This way 'avbtool add_hashtree_footer' is idempotent 3004 # (modulo salts). 3005 image.seek(image.image_size - AvbFooter.SIZE) 3006 try: 3007 footer = AvbFooter(image.read(AvbFooter.SIZE)) 3008 # Existing footer found. Just truncate. 3009 original_image_size = footer.original_image_size 3010 image.truncate(footer.original_image_size) 3011 except (LookupError, struct.error): 3012 original_image_size = image.image_size 3013 3014 # If anything goes wrong from here-on, restore the image back to 3015 # its original size. 3016 try: 3017 # Ensure image is multiple of block_size. 3018 rounded_image_size = round_to_multiple(image.image_size, block_size) 3019 if rounded_image_size > image.image_size: 3020 image.append_raw('\0' * (rounded_image_size - image.image_size)) 3021 3022 # If image size exceeds the maximum image size, fail. 3023 if image.image_size > max_image_size: 3024 raise AvbError('Image size of {} exceeds maximum image ' 3025 'size of {} in order to fit in a partition ' 3026 'size of {}.'.format(image.image_size, max_image_size, 3027 partition_size)) 3028 3029 if salt: 3030 salt = salt.decode('hex') 3031 else: 3032 if salt is None: 3033 # If salt is not explicitly specified, choose a hash 3034 # that's the same size as the hash size. 3035 hash_size = digest_size 3036 salt = open('/dev/urandom').read(hash_size) 3037 else: 3038 salt = '' 3039 3040 # Hashes are stored upside down so we need to calculate hash 3041 # offsets in advance. 3042 (hash_level_offsets, tree_size) = calc_hash_level_offsets( 3043 image.image_size, block_size, digest_size + digest_padding) 3044 3045 # If the image isn't sparse, its size might not be a multiple of 3046 # the block size. This will screw up padding later so just grow it. 3047 if image.image_size % image.block_size != 0: 3048 assert not image.is_sparse 3049 padding_needed = image.block_size - (image.image_size%image.block_size) 3050 image.truncate(image.image_size + padding_needed) 3051 3052 # Generate the tree and add padding as needed. 3053 tree_offset = image.image_size 3054 root_digest, hash_tree = generate_hash_tree(image, image.image_size, 3055 block_size, 3056 hash_algorithm, salt, 3057 digest_padding, 3058 hash_level_offsets, 3059 tree_size) 3060 3061 # Generate HashtreeDescriptor with details about the tree we 3062 # just generated. 3063 ht_desc = AvbHashtreeDescriptor() 3064 ht_desc.dm_verity_version = 1 3065 ht_desc.image_size = image.image_size 3066 ht_desc.tree_offset = tree_offset 3067 ht_desc.tree_size = tree_size 3068 ht_desc.data_block_size = block_size 3069 ht_desc.hash_block_size = block_size 3070 ht_desc.hash_algorithm = hash_algorithm 3071 ht_desc.partition_name = partition_name 3072 ht_desc.salt = salt 3073 ht_desc.root_digest = root_digest 3074 3075 # Write the hash tree 3076 padding_needed = (round_to_multiple(len(hash_tree), image.block_size) - 3077 len(hash_tree)) 3078 hash_tree_with_padding = hash_tree + '\0'*padding_needed 3079 image.append_raw(hash_tree_with_padding) 3080 len_hashtree_and_fec = len(hash_tree_with_padding) 3081 3082 # Generate FEC codes, if requested. 3083 if generate_fec: 3084 fec_data = generate_fec_data(image_filename, fec_num_roots) 3085 padding_needed = (round_to_multiple(len(fec_data), image.block_size) - 3086 len(fec_data)) 3087 fec_data_with_padding = fec_data + '\0'*padding_needed 3088 fec_offset = image.image_size 3089 image.append_raw(fec_data_with_padding) 3090 len_hashtree_and_fec += len(fec_data_with_padding) 3091 # Update the hashtree descriptor. 3092 ht_desc.fec_num_roots = fec_num_roots 3093 ht_desc.fec_offset = fec_offset 3094 ht_desc.fec_size = len(fec_data) 3095 3096 ht_desc_to_setup = None 3097 if setup_as_rootfs_from_kernel: 3098 ht_desc_to_setup = ht_desc 3099 3100 # Generate the VBMeta footer and add padding as needed. 3101 vbmeta_offset = tree_offset + len_hashtree_and_fec 3102 vbmeta_blob = self._generate_vbmeta_blob( 3103 algorithm_name, key_path, public_key_metadata_path, [ht_desc], 3104 chain_partitions, rollback_index, flags, props, props_from_file, 3105 kernel_cmdlines, setup_rootfs_from_kernel, ht_desc_to_setup, 3106 include_descriptors_from_image, signing_helper, 3107 signing_helper_with_files, release_string, 3108 append_to_release_string) 3109 padding_needed = (round_to_multiple(len(vbmeta_blob), image.block_size) - 3110 len(vbmeta_blob)) 3111 vbmeta_blob_with_padding = vbmeta_blob + '\0'*padding_needed 3112 3113 # Write vbmeta blob, if requested. 3114 if output_vbmeta_image: 3115 output_vbmeta_image.write(vbmeta_blob) 3116 3117 # Append vbmeta blob and footer, unless requested not to. 3118 if not do_not_append_vbmeta_image: 3119 image.append_raw(vbmeta_blob_with_padding) 3120 3121 # Now insert a DONT_CARE chunk with enough bytes such that the 3122 # final Footer block is at the end of partition_size.. 3123 image.append_dont_care(partition_size - image.image_size - 3124 1*image.block_size) 3125 3126 # Generate the Footer that tells where the VBMeta footer 3127 # is. Also put enough padding in the front of the footer since 3128 # we'll write out an entire block. 3129 footer = AvbFooter() 3130 footer.original_image_size = original_image_size 3131 footer.vbmeta_offset = vbmeta_offset 3132 footer.vbmeta_size = len(vbmeta_blob) 3133 footer_blob = footer.encode() 3134 footer_blob_with_padding = ('\0'*(image.block_size - AvbFooter.SIZE) + 3135 footer_blob) 3136 image.append_raw(footer_blob_with_padding) 3137 3138 except: 3139 # Truncate back to original size, then re-raise. 3140 image.truncate(original_image_size) 3141 raise 3142 3143 def make_atx_certificate(self, output, authority_key_path, subject_key_path, 3144 subject_key_version, subject, 3145 is_intermediate_authority, signing_helper, 3146 signing_helper_with_files): 3147 """Implements the 'make_atx_certificate' command. 3148 3149 Android Things certificates are required for Android Things public key 3150 metadata. They chain the vbmeta signing key for a particular product back to 3151 a fused, permanent root key. These certificates are fixed-length and fixed- 3152 format with the explicit goal of not parsing ASN.1 in bootloader code. 3153 3154 Arguments: 3155 output: Certificate will be written to this file on success. 3156 authority_key_path: A PEM file path with the authority private key. 3157 If None, then a certificate will be created without a 3158 signature. The signature can be created out-of-band 3159 and appended. 3160 subject_key_path: Path to a PEM or DER subject public key. 3161 subject_key_version: A 64-bit version value. If this is None, the number 3162 of seconds since the epoch is used. 3163 subject: A subject identifier. For Product Signing Key certificates this 3164 should be the same Product ID found in the permanent attributes. 3165 is_intermediate_authority: True if the certificate is for an intermediate 3166 authority. 3167 signing_helper: Program which signs a hash and returns the signature. 3168 signing_helper_with_files: Same as signing_helper but uses files instead. 3169 """ 3170 signed_data = bytearray() 3171 signed_data.extend(struct.pack('<I', 1)) # Format Version 3172 signed_data.extend(encode_rsa_key(subject_key_path)) 3173 hasher = hashlib.sha256() 3174 hasher.update(subject) 3175 signed_data.extend(hasher.digest()) 3176 usage = 'com.google.android.things.vboot' 3177 if is_intermediate_authority: 3178 usage += '.ca' 3179 hasher = hashlib.sha256() 3180 hasher.update(usage) 3181 signed_data.extend(hasher.digest()) 3182 if not subject_key_version: 3183 subject_key_version = int(time.time()) 3184 signed_data.extend(struct.pack('<Q', subject_key_version)) 3185 signature = bytearray() 3186 if authority_key_path: 3187 padding_and_hash = bytearray() 3188 algorithm_name = 'SHA512_RSA4096' 3189 alg = ALGORITHMS[algorithm_name] 3190 hasher = hashlib.sha512() 3191 padding_and_hash.extend(alg.padding) 3192 hasher.update(signed_data) 3193 padding_and_hash.extend(hasher.digest()) 3194 signature.extend(raw_sign(signing_helper, signing_helper_with_files, 3195 algorithm_name, 3196 alg.signature_num_bytes, authority_key_path, 3197 padding_and_hash)) 3198 output.write(signed_data) 3199 output.write(signature) 3200 3201 def make_atx_permanent_attributes(self, output, root_authority_key_path, 3202 product_id): 3203 """Implements the 'make_atx_permanent_attributes' command. 3204 3205 Android Things permanent attributes are designed to be permanent for a 3206 particular product and a hash of these attributes should be fused into 3207 hardware to enforce this. 3208 3209 Arguments: 3210 output: Attributes will be written to this file on success. 3211 root_authority_key_path: Path to a PEM or DER public key for 3212 the root authority. 3213 product_id: A 16-byte Product ID. 3214 3215 Raises: 3216 AvbError: If an argument is incorrect. 3217 """ 3218 EXPECTED_PRODUCT_ID_SIZE = 16 3219 if len(product_id) != EXPECTED_PRODUCT_ID_SIZE: 3220 raise AvbError('Invalid Product ID length.') 3221 output.write(struct.pack('<I', 1)) # Format Version 3222 output.write(encode_rsa_key(root_authority_key_path)) 3223 output.write(product_id) 3224 3225 def make_atx_metadata(self, output, intermediate_key_certificate, 3226 product_key_certificate): 3227 """Implements the 'make_atx_metadata' command. 3228 3229 Android Things metadata are included in vbmeta images to facilitate 3230 verification. The output of this command can be used as the 3231 public_key_metadata argument to other commands. 3232 3233 Arguments: 3234 output: Metadata will be written to this file on success. 3235 intermediate_key_certificate: A certificate file as output by 3236 make_atx_certificate with 3237 is_intermediate_authority set to true. 3238 product_key_certificate: A certificate file as output by 3239 make_atx_certificate with 3240 is_intermediate_authority set to false. 3241 3242 Raises: 3243 AvbError: If an argument is incorrect. 3244 """ 3245 EXPECTED_CERTIFICATE_SIZE = 1620 3246 if len(intermediate_key_certificate) != EXPECTED_CERTIFICATE_SIZE: 3247 raise AvbError('Invalid intermediate key certificate length.') 3248 if len(product_key_certificate) != EXPECTED_CERTIFICATE_SIZE: 3249 raise AvbError('Invalid product key certificate length.') 3250 output.write(struct.pack('<I', 1)) # Format Version 3251 output.write(intermediate_key_certificate) 3252 output.write(product_key_certificate) 3253 3254 3255 def calc_hash_level_offsets(image_size, block_size, digest_size): 3256 """Calculate the offsets of all the hash-levels in a Merkle-tree. 3257 3258 Arguments: 3259 image_size: The size of the image to calculate a Merkle-tree for. 3260 block_size: The block size, e.g. 4096. 3261 digest_size: The size of each hash, e.g. 32 for SHA-256. 3262 3263 Returns: 3264 A tuple where the first argument is an array of offsets and the 3265 second is size of the tree, in bytes. 3266 """ 3267 level_offsets = [] 3268 level_sizes = [] 3269 tree_size = 0 3270 3271 num_levels = 0 3272 size = image_size 3273 while size > block_size: 3274 num_blocks = (size + block_size - 1) / block_size 3275 level_size = round_to_multiple(num_blocks * digest_size, block_size) 3276 3277 level_sizes.append(level_size) 3278 tree_size += level_size 3279 num_levels += 1 3280 3281 size = level_size 3282 3283 for n in range(0, num_levels): 3284 offset = 0 3285 for m in range(n + 1, num_levels): 3286 offset += level_sizes[m] 3287 level_offsets.append(offset) 3288 3289 return level_offsets, tree_size 3290 3291 3292 # See system/extras/libfec/include/fec/io.h for these definitions. 3293 FEC_FOOTER_FORMAT = '<LLLLLQ32s' 3294 FEC_MAGIC = 0xfecfecfe 3295 3296 3297 def calc_fec_data_size(image_size, num_roots): 3298 """Calculates how much space FEC data will take. 3299 3300 Args: 3301 image_size: The size of the image. 3302 num_roots: Number of roots. 3303 3304 Returns: 3305 The number of bytes needed for FEC for an image of the given size 3306 and with the requested number of FEC roots. 3307 3308 Raises: 3309 ValueError: If output from the 'fec' tool is invalid. 3310 3311 """ 3312 p = subprocess.Popen( 3313 ['fec', '--print-fec-size', str(image_size), '--roots', str(num_roots)], 3314 stdout=subprocess.PIPE, 3315 stderr=subprocess.PIPE) 3316 (pout, perr) = p.communicate() 3317 retcode = p.wait() 3318 if retcode != 0: 3319 raise ValueError('Error invoking fec: {}'.format(perr)) 3320 return int(pout) 3321 3322 3323 def generate_fec_data(image_filename, num_roots): 3324 """Generate FEC codes for an image. 3325 3326 Args: 3327 image_filename: The filename of the image. 3328 num_roots: Number of roots. 3329 3330 Returns: 3331 The FEC data blob. 3332 3333 Raises: 3334 ValueError: If output from the 'fec' tool is invalid. 3335 """ 3336 fec_tmpfile = tempfile.NamedTemporaryFile() 3337 subprocess.check_call( 3338 ['fec', '--encode', '--roots', str(num_roots), image_filename, 3339 fec_tmpfile.name], 3340 stderr=open(os.devnull)) 3341 fec_data = fec_tmpfile.read() 3342 footer_size = struct.calcsize(FEC_FOOTER_FORMAT) 3343 footer_data = fec_data[-footer_size:] 3344 (magic, _, _, num_roots, fec_size, _, _) = struct.unpack(FEC_FOOTER_FORMAT, 3345 footer_data) 3346 if magic != FEC_MAGIC: 3347 raise ValueError('Unexpected magic in FEC footer') 3348 return fec_data[0:fec_size] 3349 3350 3351 def generate_hash_tree(image, image_size, block_size, hash_alg_name, salt, 3352 digest_padding, hash_level_offsets, tree_size): 3353 """Generates a Merkle-tree for a file. 3354 3355 Args: 3356 image: The image, as a file. 3357 image_size: The size of the image. 3358 block_size: The block size, e.g. 4096. 3359 hash_alg_name: The hash algorithm, e.g. 'sha256' or 'sha1'. 3360 salt: The salt to use. 3361 digest_padding: The padding for each digest. 3362 hash_level_offsets: The offsets from calc_hash_level_offsets(). 3363 tree_size: The size of the tree, in number of bytes. 3364 3365 Returns: 3366 A tuple where the first element is the top-level hash and the 3367 second element is the hash-tree. 3368 """ 3369 hash_ret = bytearray(tree_size) 3370 hash_src_offset = 0 3371 hash_src_size = image_size 3372 level_num = 0 3373 while hash_src_size > block_size: 3374 level_output = '' 3375 remaining = hash_src_size 3376 while remaining > 0: 3377 hasher = hashlib.new(name=hash_alg_name, string=salt) 3378 # Only read from the file for the first level - for subsequent 3379 # levels, access the array we're building. 3380 if level_num == 0: 3381 image.seek(hash_src_offset + hash_src_size - remaining) 3382 data = image.read(min(remaining, block_size)) 3383 else: 3384 offset = hash_level_offsets[level_num - 1] + hash_src_size - remaining 3385 data = hash_ret[offset:offset + block_size] 3386 hasher.update(data) 3387 3388 remaining -= len(data) 3389 if len(data) < block_size: 3390 hasher.update('\0' * (block_size - len(data))) 3391 level_output += hasher.digest() 3392 if digest_padding > 0: 3393 level_output += '\0' * digest_padding 3394 3395 padding_needed = (round_to_multiple( 3396 len(level_output), block_size) - len(level_output)) 3397 level_output += '\0' * padding_needed 3398 3399 # Copy level-output into resulting tree. 3400 offset = hash_level_offsets[level_num] 3401 hash_ret[offset:offset + len(level_output)] = level_output 3402 3403 # Continue on to the next level. 3404 hash_src_size = len(level_output) 3405 level_num += 1 3406 3407 hasher = hashlib.new(name=hash_alg_name, string=salt) 3408 hasher.update(level_output) 3409 return hasher.digest(), hash_ret 3410 3411 3412 class AvbTool(object): 3413 """Object for avbtool command-line tool.""" 3414 3415 def __init__(self): 3416 """Initializer method.""" 3417 self.avb = Avb() 3418 3419 def _add_common_args(self, sub_parser): 3420 """Adds arguments used by several sub-commands. 3421 3422 Arguments: 3423 sub_parser: The parser to add arguments to. 3424 """ 3425 sub_parser.add_argument('--algorithm', 3426 help='Algorithm to use (default: NONE)', 3427 metavar='ALGORITHM', 3428 default='NONE') 3429 sub_parser.add_argument('--key', 3430 help='Path to RSA private key file', 3431 metavar='KEY', 3432 required=False) 3433 sub_parser.add_argument('--signing_helper', 3434 help='Path to helper used for signing', 3435 metavar='APP', 3436 default=None, 3437 required=False) 3438 sub_parser.add_argument('--signing_helper_with_files', 3439 help='Path to helper used for signing using files', 3440 metavar='APP', 3441 default=None, 3442 required=False) 3443 sub_parser.add_argument('--public_key_metadata', 3444 help='Path to public key metadata file', 3445 metavar='KEY_METADATA', 3446 required=False) 3447 sub_parser.add_argument('--rollback_index', 3448 help='Rollback Index', 3449 type=parse_number, 3450 default=0) 3451 # This is used internally for unit tests. Do not include in --help output. 3452 sub_parser.add_argument('--internal_release_string', 3453 help=argparse.SUPPRESS) 3454 sub_parser.add_argument('--append_to_release_string', 3455 help='Text to append to release string', 3456 metavar='STR') 3457 sub_parser.add_argument('--prop', 3458 help='Add property', 3459 metavar='KEY:VALUE', 3460 action='append') 3461 sub_parser.add_argument('--prop_from_file', 3462 help='Add property from file', 3463 metavar='KEY:PATH', 3464 action='append') 3465 sub_parser.add_argument('--kernel_cmdline', 3466 help='Add kernel cmdline', 3467 metavar='CMDLINE', 3468 action='append') 3469 # TODO(zeuthen): the --setup_rootfs_from_kernel option used to be called 3470 # --generate_dm_verity_cmdline_from_hashtree. Remove support for the latter 3471 # at some future point. 3472 sub_parser.add_argument('--setup_rootfs_from_kernel', 3473 '--generate_dm_verity_cmdline_from_hashtree', 3474 metavar='IMAGE', 3475 help='Adds kernel cmdline to set up IMAGE', 3476 type=argparse.FileType('rb')) 3477 sub_parser.add_argument('--include_descriptors_from_image', 3478 help='Include descriptors from image', 3479 metavar='IMAGE', 3480 action='append', 3481 type=argparse.FileType('rb')) 3482 sub_parser.add_argument('--print_required_libavb_version', 3483 help=('Don\'t store the footer - ' 3484 'instead calculate the required libavb ' 3485 'version for the given options.'), 3486 action='store_true') 3487 # These are only allowed from top-level vbmeta and boot-in-lieu-of-vbmeta. 3488 sub_parser.add_argument('--chain_partition', 3489 help='Allow signed integrity-data for partition', 3490 metavar='PART_NAME:ROLLBACK_SLOT:KEY_PATH', 3491 action='append') 3492 sub_parser.add_argument('--flags', 3493 help='VBMeta flags', 3494 type=parse_number, 3495 default=0) 3496 sub_parser.add_argument('--set_hashtree_disabled_flag', 3497 help='Set the HASHTREE_DISABLED flag', 3498 action='store_true') 3499 3500 def _fixup_common_args(self, args): 3501 """Common fixups needed by subcommands. 3502 3503 Arguments: 3504 args: Arguments to modify. 3505 3506 Returns: 3507 The modified arguments. 3508 """ 3509 if args.set_hashtree_disabled_flag: 3510 args.flags |= AVB_VBMETA_IMAGE_FLAGS_HASHTREE_DISABLED 3511 return args 3512 3513 def run(self, argv): 3514 """Command-line processor. 3515 3516 Arguments: 3517 argv: Pass sys.argv from main. 3518 """ 3519 parser = argparse.ArgumentParser() 3520 subparsers = parser.add_subparsers(title='subcommands') 3521 3522 sub_parser = subparsers.add_parser('version', 3523 help='Prints version of avbtool.') 3524 sub_parser.set_defaults(func=self.version) 3525 3526 sub_parser = subparsers.add_parser('extract_public_key', 3527 help='Extract public key.') 3528 sub_parser.add_argument('--key', 3529 help='Path to RSA private key file', 3530 required=True) 3531 sub_parser.add_argument('--output', 3532 help='Output file name', 3533 type=argparse.FileType('wb'), 3534 required=True) 3535 sub_parser.set_defaults(func=self.extract_public_key) 3536 3537 sub_parser = subparsers.add_parser('make_vbmeta_image', 3538 help='Makes a vbmeta image.') 3539 sub_parser.add_argument('--output', 3540 help='Output file name', 3541 type=argparse.FileType('wb')) 3542 sub_parser.add_argument('--padding_size', 3543 metavar='NUMBER', 3544 help='If non-zero, pads output with NUL bytes so ' 3545 'its size is a multiple of NUMBER (default: 0)', 3546 type=parse_number, 3547 default=0) 3548 self._add_common_args(sub_parser) 3549 sub_parser.set_defaults(func=self.make_vbmeta_image) 3550 3551 sub_parser = subparsers.add_parser('add_hash_footer', 3552 help='Add hashes and footer to image.') 3553 sub_parser.add_argument('--image', 3554 help='Image to add hashes to', 3555 type=argparse.FileType('rab+')) 3556 sub_parser.add_argument('--partition_size', 3557 help='Partition size', 3558 type=parse_number) 3559 sub_parser.add_argument('--partition_name', 3560 help='Partition name', 3561 default=None) 3562 sub_parser.add_argument('--hash_algorithm', 3563 help='Hash algorithm to use (default: sha256)', 3564 default='sha256') 3565 sub_parser.add_argument('--salt', 3566 help='Salt in hex (default: /dev/urandom)') 3567 sub_parser.add_argument('--calc_max_image_size', 3568 help=('Don\'t store the footer - ' 3569 'instead calculate the maximum image size ' 3570 'leaving enough room for metadata with ' 3571 'the given partition size.'), 3572 action='store_true') 3573 sub_parser.add_argument('--output_vbmeta_image', 3574 help='Also write vbmeta struct to file', 3575 type=argparse.FileType('wb')) 3576 sub_parser.add_argument('--do_not_append_vbmeta_image', 3577 help=('Do not append vbmeta struct or footer ' 3578 'to the image'), 3579 action='store_true') 3580 self._add_common_args(sub_parser) 3581 sub_parser.set_defaults(func=self.add_hash_footer) 3582 3583 sub_parser = subparsers.add_parser('append_vbmeta_image', 3584 help='Append vbmeta image to image.') 3585 sub_parser.add_argument('--image', 3586 help='Image to append vbmeta blob to', 3587 type=argparse.FileType('rab+')) 3588 sub_parser.add_argument('--partition_size', 3589 help='Partition size', 3590 type=parse_number, 3591 required=True) 3592 sub_parser.add_argument('--vbmeta_image', 3593 help='Image with vbmeta blob to append', 3594 type=argparse.FileType('rb')) 3595 sub_parser.set_defaults(func=self.append_vbmeta_image) 3596 3597 sub_parser = subparsers.add_parser('add_hashtree_footer', 3598 help='Add hashtree and footer to image.') 3599 sub_parser.add_argument('--image', 3600 help='Image to add hashtree to', 3601 type=argparse.FileType('rab+')) 3602 sub_parser.add_argument('--partition_size', 3603 help='Partition size', 3604 type=parse_number) 3605 sub_parser.add_argument('--partition_name', 3606 help='Partition name', 3607 default=None) 3608 sub_parser.add_argument('--hash_algorithm', 3609 help='Hash algorithm to use (default: sha1)', 3610 default='sha1') 3611 sub_parser.add_argument('--salt', 3612 help='Salt in hex (default: /dev/urandom)') 3613 sub_parser.add_argument('--block_size', 3614 help='Block size (default: 4096)', 3615 type=parse_number, 3616 default=4096) 3617 # TODO(zeuthen): The --generate_fec option was removed when we 3618 # moved to generating FEC by default. To avoid breaking existing 3619 # users needing to transition we simply just print a warning below 3620 # in add_hashtree_footer(). Remove this option and the warning at 3621 # some point in the future. 3622 sub_parser.add_argument('--generate_fec', 3623 help=argparse.SUPPRESS, 3624 action='store_true') 3625 sub_parser.add_argument('--do_not_generate_fec', 3626 help='Do not generate forward-error-correction codes', 3627 action='store_true') 3628 sub_parser.add_argument('--fec_num_roots', 3629 help='Number of roots for FEC (default: 2)', 3630 type=parse_number, 3631 default=2) 3632 sub_parser.add_argument('--calc_max_image_size', 3633 help=('Don\'t store the hashtree or footer - ' 3634 'instead calculate the maximum image size ' 3635 'leaving enough room for hashtree ' 3636 'and metadata with the given partition ' 3637 'size.'), 3638 action='store_true') 3639 sub_parser.add_argument('--output_vbmeta_image', 3640 help='Also write vbmeta struct to file', 3641 type=argparse.FileType('wb')) 3642 sub_parser.add_argument('--do_not_append_vbmeta_image', 3643 help=('Do not append vbmeta struct or footer ' 3644 'to the image'), 3645 action='store_true') 3646 # This is different from --setup_rootfs_from_kernel insofar that 3647 # it doesn't take an IMAGE, the generated cmdline will be for the 3648 # hashtree we're adding. 3649 sub_parser.add_argument('--setup_as_rootfs_from_kernel', 3650 action='store_true', 3651 help='Adds kernel cmdline for setting up rootfs') 3652 self._add_common_args(sub_parser) 3653 sub_parser.set_defaults(func=self.add_hashtree_footer) 3654 3655 sub_parser = subparsers.add_parser('erase_footer', 3656 help='Erase footer from an image.') 3657 sub_parser.add_argument('--image', 3658 help='Image with a footer', 3659 type=argparse.FileType('rwb+'), 3660 required=True) 3661 sub_parser.add_argument('--keep_hashtree', 3662 help='Keep the hashtree and FEC in the image', 3663 action='store_true') 3664 sub_parser.set_defaults(func=self.erase_footer) 3665 3666 sub_parser = subparsers.add_parser('resize_image', 3667 help='Resize image with a footer.') 3668 sub_parser.add_argument('--image', 3669 help='Image with a footer', 3670 type=argparse.FileType('rwb+'), 3671 required=True) 3672 sub_parser.add_argument('--partition_size', 3673 help='New partition size', 3674 type=parse_number) 3675 sub_parser.set_defaults(func=self.resize_image) 3676 3677 sub_parser = subparsers.add_parser( 3678 'info_image', 3679 help='Show information about vbmeta or footer.') 3680 sub_parser.add_argument('--image', 3681 help='Image to show information about', 3682 type=argparse.FileType('rb'), 3683 required=True) 3684 sub_parser.add_argument('--output', 3685 help='Write info to file', 3686 type=argparse.FileType('wt'), 3687 default=sys.stdout) 3688 sub_parser.set_defaults(func=self.info_image) 3689 3690 sub_parser = subparsers.add_parser( 3691 'verify_image', 3692 help='Verify an image.') 3693 sub_parser.add_argument('--image', 3694 help='Image to verify', 3695 type=argparse.FileType('rb'), 3696 required=True) 3697 sub_parser.add_argument('--key', 3698 help='Check embedded public key matches KEY', 3699 metavar='KEY', 3700 required=False) 3701 sub_parser.add_argument('--expected_chain_partition', 3702 help='Expected chain partition', 3703 metavar='PART_NAME:ROLLBACK_SLOT:KEY_PATH', 3704 action='append') 3705 sub_parser.set_defaults(func=self.verify_image) 3706 3707 sub_parser = subparsers.add_parser('set_ab_metadata', 3708 help='Set A/B metadata.') 3709 sub_parser.add_argument('--misc_image', 3710 help=('The misc image to modify. If the image does ' 3711 'not exist, it will be created.'), 3712 type=argparse.FileType('r+b'), 3713 required=True) 3714 sub_parser.add_argument('--slot_data', 3715 help=('Slot data of the form "priority", ' 3716 '"tries_remaining", "sucessful_boot" for ' 3717 'slot A followed by the same for slot B, ' 3718 'separated by colons. The default value ' 3719 'is 15:7:0:14:7:0.'), 3720 default='15:7:0:14:7:0') 3721 sub_parser.set_defaults(func=self.set_ab_metadata) 3722 3723 sub_parser = subparsers.add_parser( 3724 'make_atx_certificate', 3725 help='Create an Android Things eXtension (ATX) certificate.') 3726 sub_parser.add_argument('--output', 3727 help='Write certificate to file', 3728 type=argparse.FileType('wb'), 3729 default=sys.stdout) 3730 sub_parser.add_argument('--subject', 3731 help=('Path to subject file'), 3732 type=argparse.FileType('rb'), 3733 required=True) 3734 sub_parser.add_argument('--subject_key', 3735 help=('Path to subject RSA public key file'), 3736 type=argparse.FileType('rb'), 3737 required=True) 3738 sub_parser.add_argument('--subject_key_version', 3739 help=('Version of the subject key'), 3740 type=parse_number, 3741 required=False) 3742 sub_parser.add_argument('--subject_is_intermediate_authority', 3743 help=('Generate an intermediate authority ' 3744 'certificate'), 3745 action='store_true') 3746 sub_parser.add_argument('--authority_key', 3747 help='Path to authority RSA private key file', 3748 required=False) 3749 sub_parser.add_argument('--signing_helper', 3750 help='Path to helper used for signing', 3751 metavar='APP', 3752 default=None, 3753 required=False) 3754 sub_parser.add_argument('--signing_helper_with_files', 3755 help='Path to helper used for signing using files', 3756 metavar='APP', 3757 default=None, 3758 required=False) 3759 sub_parser.set_defaults(func=self.make_atx_certificate) 3760 3761 sub_parser = subparsers.add_parser( 3762 'make_atx_permanent_attributes', 3763 help='Create Android Things eXtension (ATX) permanent attributes.') 3764 sub_parser.add_argument('--output', 3765 help='Write attributes to file', 3766 type=argparse.FileType('wb'), 3767 default=sys.stdout) 3768 sub_parser.add_argument('--root_authority_key', 3769 help='Path to authority RSA public key file', 3770 type=argparse.FileType('rb'), 3771 required=True) 3772 sub_parser.add_argument('--product_id', 3773 help=('Path to Product ID file'), 3774 type=argparse.FileType('rb'), 3775 required=True) 3776 sub_parser.set_defaults(func=self.make_atx_permanent_attributes) 3777 3778 sub_parser = subparsers.add_parser( 3779 'make_atx_metadata', 3780 help='Create Android Things eXtension (ATX) metadata.') 3781 sub_parser.add_argument('--output', 3782 help='Write metadata to file', 3783 type=argparse.FileType('wb'), 3784 default=sys.stdout) 3785 sub_parser.add_argument('--intermediate_key_certificate', 3786 help='Path to intermediate key certificate file', 3787 type=argparse.FileType('rb'), 3788 required=True) 3789 sub_parser.add_argument('--product_key_certificate', 3790 help='Path to product key certificate file', 3791 type=argparse.FileType('rb'), 3792 required=True) 3793 sub_parser.set_defaults(func=self.make_atx_metadata) 3794 3795 args = parser.parse_args(argv[1:]) 3796 try: 3797 args.func(args) 3798 except AvbError as e: 3799 sys.stderr.write('{}: {}\n'.format(argv[0], e.message)) 3800 sys.exit(1) 3801 3802 def version(self, _): 3803 """Implements the 'version' sub-command.""" 3804 print get_release_string() 3805 3806 def extract_public_key(self, args): 3807 """Implements the 'extract_public_key' sub-command.""" 3808 self.avb.extract_public_key(args.key, args.output) 3809 3810 def make_vbmeta_image(self, args): 3811 """Implements the 'make_vbmeta_image' sub-command.""" 3812 args = self._fixup_common_args(args) 3813 self.avb.make_vbmeta_image(args.output, args.chain_partition, 3814 args.algorithm, args.key, 3815 args.public_key_metadata, args.rollback_index, 3816 args.flags, args.prop, args.prop_from_file, 3817 args.kernel_cmdline, 3818 args.setup_rootfs_from_kernel, 3819 args.include_descriptors_from_image, 3820 args.signing_helper, 3821 args.signing_helper_with_files, 3822 args.internal_release_string, 3823 args.append_to_release_string, 3824 args.print_required_libavb_version, 3825 args.padding_size) 3826 3827 def append_vbmeta_image(self, args): 3828 """Implements the 'append_vbmeta_image' sub-command.""" 3829 self.avb.append_vbmeta_image(args.image.name, args.vbmeta_image.name, 3830 args.partition_size) 3831 3832 def add_hash_footer(self, args): 3833 """Implements the 'add_hash_footer' sub-command.""" 3834 args = self._fixup_common_args(args) 3835 self.avb.add_hash_footer(args.image.name if args.image else None, 3836 args.partition_size, 3837 args.partition_name, args.hash_algorithm, 3838 args.salt, args.chain_partition, args.algorithm, 3839 args.key, 3840 args.public_key_metadata, args.rollback_index, 3841 args.flags, args.prop, args.prop_from_file, 3842 args.kernel_cmdline, 3843 args.setup_rootfs_from_kernel, 3844 args.include_descriptors_from_image, 3845 args.calc_max_image_size, 3846 args.signing_helper, 3847 args.signing_helper_with_files, 3848 args.internal_release_string, 3849 args.append_to_release_string, 3850 args.output_vbmeta_image, 3851 args.do_not_append_vbmeta_image, 3852 args.print_required_libavb_version) 3853 3854 def add_hashtree_footer(self, args): 3855 """Implements the 'add_hashtree_footer' sub-command.""" 3856 args = self._fixup_common_args(args) 3857 # TODO(zeuthen): Remove when removing support for the 3858 # '--generate_fec' option above. 3859 if args.generate_fec: 3860 sys.stderr.write('The --generate_fec option is deprecated since FEC ' 3861 'is now generated by default. Use the option ' 3862 '--do_not_generate_fec to not generate FEC.\n') 3863 self.avb.add_hashtree_footer(args.image.name if args.image else None, 3864 args.partition_size, 3865 args.partition_name, 3866 not args.do_not_generate_fec, args.fec_num_roots, 3867 args.hash_algorithm, args.block_size, 3868 args.salt, args.chain_partition, args.algorithm, 3869 args.key, args.public_key_metadata, 3870 args.rollback_index, args.flags, args.prop, 3871 args.prop_from_file, 3872 args.kernel_cmdline, 3873 args.setup_rootfs_from_kernel, 3874 args.setup_as_rootfs_from_kernel, 3875 args.include_descriptors_from_image, 3876 args.calc_max_image_size, 3877 args.signing_helper, 3878 args.signing_helper_with_files, 3879 args.internal_release_string, 3880 args.append_to_release_string, 3881 args.output_vbmeta_image, 3882 args.do_not_append_vbmeta_image, 3883 args.print_required_libavb_version) 3884 3885 def erase_footer(self, args): 3886 """Implements the 'erase_footer' sub-command.""" 3887 self.avb.erase_footer(args.image.name, args.keep_hashtree) 3888 3889 def resize_image(self, args): 3890 """Implements the 'resize_image' sub-command.""" 3891 self.avb.resize_image(args.image.name, args.partition_size) 3892 3893 def set_ab_metadata(self, args): 3894 """Implements the 'set_ab_metadata' sub-command.""" 3895 self.avb.set_ab_metadata(args.misc_image, args.slot_data) 3896 3897 def info_image(self, args): 3898 """Implements the 'info_image' sub-command.""" 3899 self.avb.info_image(args.image.name, args.output) 3900 3901 def verify_image(self, args): 3902 """Implements the 'verify_image' sub-command.""" 3903 self.avb.verify_image(args.image.name, args.key, 3904 args.expected_chain_partition) 3905 3906 def make_atx_certificate(self, args): 3907 """Implements the 'make_atx_certificate' sub-command.""" 3908 self.avb.make_atx_certificate(args.output, args.authority_key, 3909 args.subject_key.name, 3910 args.subject_key_version, 3911 args.subject.read(), 3912 args.subject_is_intermediate_authority, 3913 args.signing_helper, 3914 args.signing_helper_with_files) 3915 3916 def make_atx_permanent_attributes(self, args): 3917 """Implements the 'make_atx_permanent_attributes' sub-command.""" 3918 self.avb.make_atx_permanent_attributes(args.output, 3919 args.root_authority_key.name, 3920 args.product_id.read()) 3921 3922 def make_atx_metadata(self, args): 3923 """Implements the 'make_atx_metadata' sub-command.""" 3924 self.avb.make_atx_metadata(args.output, 3925 args.intermediate_key_certificate.read(), 3926 args.product_key_certificate.read()) 3927 3928 3929 if __name__ == '__main__': 3930 tool = AvbTool() 3931 tool.run(sys.argv) 3932