1 #!/usr/bin/env python 2 3 # Copyright 2016, The Android Open Source Project 4 # 5 # Permission is hereby granted, free of charge, to any person 6 # obtaining a copy of this software and associated documentation 7 # files (the "Software"), to deal in the Software without 8 # restriction, including without limitation the rights to use, copy, 9 # modify, merge, publish, distribute, sublicense, and/or sell copies 10 # of the Software, and to permit persons to whom the Software is 11 # furnished to do so, subject to the following conditions: 12 # 13 # The above copyright notice and this permission notice shall be 14 # included in all copies or substantial portions of the Software. 15 # 16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 19 # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 20 # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 21 # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 22 # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 # SOFTWARE. 24 # 25 """Command-line tool for working with Android Verified Boot images.""" 26 27 import argparse 28 import binascii 29 import bisect 30 import hashlib 31 import math 32 import os 33 import struct 34 import subprocess 35 import sys 36 import tempfile 37 import time 38 39 # Keep in sync with libavb/avb_version.h. 40 AVB_VERSION_MAJOR = 1 41 AVB_VERSION_MINOR = 1 42 AVB_VERSION_SUB = 0 43 44 # Keep in sync with libavb/avb_footer.h. 45 AVB_FOOTER_VERSION_MAJOR = 1 46 AVB_FOOTER_VERSION_MINOR = 0 47 48 AVB_VBMETA_IMAGE_FLAGS_HASHTREE_DISABLED = 1 49 50 51 class AvbError(Exception): 52 """Application-specific errors. 53 54 These errors represent issues for which a stack-trace should not be 55 presented. 56 57 Attributes: 58 message: Error message. 59 """ 60 61 def __init__(self, message): 62 Exception.__init__(self, message) 63 64 65 class Algorithm(object): 66 """Contains details about an algorithm. 67 68 See the avb_vbmeta_header.h file for more details about 69 algorithms. 70 71 The constant |ALGORITHMS| is a dictionary from human-readable 72 names (e.g 'SHA256_RSA2048') to instances of this class. 73 74 Attributes: 75 algorithm_type: Integer code corresponding to |AvbAlgorithmType|. 76 hash_name: Empty or a name from |hashlib.algorithms|. 77 hash_num_bytes: Number of bytes used to store the hash. 78 signature_num_bytes: Number of bytes used to store the signature. 79 public_key_num_bytes: Number of bytes used to store the public key. 80 padding: Padding used for signature, if any. 81 """ 82 83 def __init__(self, algorithm_type, hash_name, hash_num_bytes, 84 signature_num_bytes, public_key_num_bytes, padding): 85 self.algorithm_type = algorithm_type 86 self.hash_name = hash_name 87 self.hash_num_bytes = hash_num_bytes 88 self.signature_num_bytes = signature_num_bytes 89 self.public_key_num_bytes = public_key_num_bytes 90 self.padding = padding 91 92 93 # This must be kept in sync with the avb_crypto.h file. 94 # 95 # The PKC1-v1.5 padding is a blob of binary DER of ASN.1 and is 96 # obtained from section 5.2.2 of RFC 4880. 97 ALGORITHMS = { 98 'NONE': Algorithm( 99 algorithm_type=0, # AVB_ALGORITHM_TYPE_NONE 100 hash_name='', 101 hash_num_bytes=0, 102 signature_num_bytes=0, 103 public_key_num_bytes=0, 104 padding=[]), 105 'SHA256_RSA2048': Algorithm( 106 algorithm_type=1, # AVB_ALGORITHM_TYPE_SHA256_RSA2048 107 hash_name='sha256', 108 hash_num_bytes=32, 109 signature_num_bytes=256, 110 public_key_num_bytes=8 + 2*2048/8, 111 padding=[ 112 # PKCS1-v1_5 padding 113 0x00, 0x01] + [0xff]*202 + [0x00] + [ 114 # ASN.1 header 115 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 116 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 117 0x00, 0x04, 0x20, 118 ]), 119 'SHA256_RSA4096': Algorithm( 120 algorithm_type=2, # AVB_ALGORITHM_TYPE_SHA256_RSA4096 121 hash_name='sha256', 122 hash_num_bytes=32, 123 signature_num_bytes=512, 124 public_key_num_bytes=8 + 2*4096/8, 125 padding=[ 126 # PKCS1-v1_5 padding 127 0x00, 0x01] + [0xff]*458 + [0x00] + [ 128 # ASN.1 header 129 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 130 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 131 0x00, 0x04, 0x20, 132 ]), 133 'SHA256_RSA8192': Algorithm( 134 algorithm_type=3, # AVB_ALGORITHM_TYPE_SHA256_RSA8192 135 hash_name='sha256', 136 hash_num_bytes=32, 137 signature_num_bytes=1024, 138 public_key_num_bytes=8 + 2*8192/8, 139 padding=[ 140 # PKCS1-v1_5 padding 141 0x00, 0x01] + [0xff]*970 + [0x00] + [ 142 # ASN.1 header 143 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 144 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 145 0x00, 0x04, 0x20, 146 ]), 147 'SHA512_RSA2048': Algorithm( 148 algorithm_type=4, # AVB_ALGORITHM_TYPE_SHA512_RSA2048 149 hash_name='sha512', 150 hash_num_bytes=64, 151 signature_num_bytes=256, 152 public_key_num_bytes=8 + 2*2048/8, 153 padding=[ 154 # PKCS1-v1_5 padding 155 0x00, 0x01] + [0xff]*170 + [0x00] + [ 156 # ASN.1 header 157 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 158 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 159 0x00, 0x04, 0x40 160 ]), 161 'SHA512_RSA4096': Algorithm( 162 algorithm_type=5, # AVB_ALGORITHM_TYPE_SHA512_RSA4096 163 hash_name='sha512', 164 hash_num_bytes=64, 165 signature_num_bytes=512, 166 public_key_num_bytes=8 + 2*4096/8, 167 padding=[ 168 # PKCS1-v1_5 padding 169 0x00, 0x01] + [0xff]*426 + [0x00] + [ 170 # ASN.1 header 171 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 172 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 173 0x00, 0x04, 0x40 174 ]), 175 'SHA512_RSA8192': Algorithm( 176 algorithm_type=6, # AVB_ALGORITHM_TYPE_SHA512_RSA8192 177 hash_name='sha512', 178 hash_num_bytes=64, 179 signature_num_bytes=1024, 180 public_key_num_bytes=8 + 2*8192/8, 181 padding=[ 182 # PKCS1-v1_5 padding 183 0x00, 0x01] + [0xff]*938 + [0x00] + [ 184 # ASN.1 header 185 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 186 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 187 0x00, 0x04, 0x40 188 ]), 189 } 190 191 192 def get_release_string(): 193 """Calculates the release string to use in the VBMeta struct.""" 194 # Keep in sync with libavb/avb_version.c:avb_version_string(). 195 return 'avbtool {}.{}.{}'.format(AVB_VERSION_MAJOR, 196 AVB_VERSION_MINOR, 197 AVB_VERSION_SUB) 198 199 200 def round_to_multiple(number, size): 201 """Rounds a number up to nearest multiple of another number. 202 203 Args: 204 number: The number to round up. 205 size: The multiple to round up to. 206 207 Returns: 208 If |number| is a multiple of |size|, returns |number|, otherwise 209 returns |number| + |size|. 210 """ 211 remainder = number % size 212 if remainder == 0: 213 return number 214 return number + size - remainder 215 216 217 def round_to_pow2(number): 218 """Rounds a number up to the next power of 2. 219 220 Args: 221 number: The number to round up. 222 223 Returns: 224 If |number| is already a power of 2 then |number| is 225 returned. Otherwise the smallest power of 2 greater than |number| 226 is returned. 227 """ 228 return 2**((number - 1).bit_length()) 229 230 231 def encode_long(num_bits, value): 232 """Encodes a long to a bytearray() using a given amount of bits. 233 234 This number is written big-endian, e.g. with the most significant 235 bit first. 236 237 This is the reverse of decode_long(). 238 239 Arguments: 240 num_bits: The number of bits to write, e.g. 2048. 241 value: The value to write. 242 243 Returns: 244 A bytearray() with the encoded long. 245 """ 246 ret = bytearray() 247 for bit_pos in range(num_bits, 0, -8): 248 octet = (value >> (bit_pos - 8)) & 0xff 249 ret.extend(struct.pack('!B', octet)) 250 return ret 251 252 253 def decode_long(blob): 254 """Decodes a long from a bytearray() using a given amount of bits. 255 256 This number is expected to be in big-endian, e.g. with the most 257 significant bit first. 258 259 This is the reverse of encode_long(). 260 261 Arguments: 262 value: A bytearray() with the encoded long. 263 264 Returns: 265 The decoded value. 266 """ 267 ret = 0 268 for b in bytearray(blob): 269 ret *= 256 270 ret += b 271 return ret 272 273 274 def egcd(a, b): 275 """Calculate greatest common divisor of two numbers. 276 277 This implementation uses a recursive version of the extended 278 Euclidian algorithm. 279 280 Arguments: 281 a: First number. 282 b: Second number. 283 284 Returns: 285 A tuple (gcd, x, y) that where |gcd| is the greatest common 286 divisor of |a| and |b| and |a|*|x| + |b|*|y| = |gcd|. 287 """ 288 if a == 0: 289 return (b, 0, 1) 290 else: 291 g, y, x = egcd(b % a, a) 292 return (g, x - (b // a) * y, y) 293 294 295 def modinv(a, m): 296 """Calculate modular multiplicative inverse of |a| modulo |m|. 297 298 This calculates the number |x| such that |a| * |x| == 1 (modulo 299 |m|). This number only exists if |a| and |m| are co-prime - |None| 300 is returned if this isn't true. 301 302 Arguments: 303 a: The number to calculate a modular inverse of. 304 m: The modulo to use. 305 306 Returns: 307 The modular multiplicative inverse of |a| and |m| or |None| if 308 these numbers are not co-prime. 309 """ 310 gcd, x, _ = egcd(a, m) 311 if gcd != 1: 312 return None # modular inverse does not exist 313 else: 314 return x % m 315 316 317 def parse_number(string): 318 """Parse a string as a number. 319 320 This is just a short-hand for int(string, 0) suitable for use in the 321 |type| parameter of |ArgumentParser|'s add_argument() function. An 322 improvement to just using type=int is that this function supports 323 numbers in other bases, e.g. "0x1234". 324 325 Arguments: 326 string: The string to parse. 327 328 Returns: 329 The parsed integer. 330 331 Raises: 332 ValueError: If the number could not be parsed. 333 """ 334 return int(string, 0) 335 336 337 class RSAPublicKey(object): 338 """Data structure used for a RSA public key. 339 340 Attributes: 341 exponent: The key exponent. 342 modulus: The key modulus. 343 num_bits: The key size. 344 """ 345 346 MODULUS_PREFIX = 'modulus=' 347 348 def __init__(self, key_path): 349 """Loads and parses an RSA key from either a private or public key file. 350 351 Arguments: 352 key_path: The path to a key file. 353 """ 354 # We used to have something as simple as this: 355 # 356 # key = Crypto.PublicKey.RSA.importKey(open(key_path).read()) 357 # self.exponent = key.e 358 # self.modulus = key.n 359 # self.num_bits = key.size() + 1 360 # 361 # but unfortunately PyCrypto is not available in the builder. So 362 # instead just parse openssl(1) output to get this 363 # information. It's ugly but... 364 args = ['openssl', 'rsa', '-in', key_path, '-modulus', '-noout'] 365 p = subprocess.Popen(args, 366 stdin=subprocess.PIPE, 367 stdout=subprocess.PIPE, 368 stderr=subprocess.PIPE) 369 (pout, perr) = p.communicate() 370 if p.wait() != 0: 371 # Could be just a public key is passed, try that. 372 args.append('-pubin') 373 p = subprocess.Popen(args, 374 stdin=subprocess.PIPE, 375 stdout=subprocess.PIPE, 376 stderr=subprocess.PIPE) 377 (pout, perr) = p.communicate() 378 if p.wait() != 0: 379 raise AvbError('Error getting public key: {}'.format(perr)) 380 381 if not pout.lower().startswith(self.MODULUS_PREFIX): 382 raise AvbError('Unexpected modulus output') 383 384 modulus_hexstr = pout[len(self.MODULUS_PREFIX):] 385 386 # The exponent is assumed to always be 65537 and the number of 387 # bits can be derived from the modulus by rounding up to the 388 # nearest power of 2. 389 self.modulus = int(modulus_hexstr, 16) 390 self.num_bits = round_to_pow2(int(math.ceil(math.log(self.modulus, 2)))) 391 self.exponent = 65537 392 393 394 def encode_rsa_key(key_path): 395 """Encodes a public RSA key in |AvbRSAPublicKeyHeader| format. 396 397 This creates a |AvbRSAPublicKeyHeader| as well as the two large 398 numbers (|key_num_bits| bits long) following it. 399 400 Arguments: 401 key_path: The path to a key file. 402 403 Returns: 404 A bytearray() with the |AvbRSAPublicKeyHeader|. 405 """ 406 key = RSAPublicKey(key_path) 407 if key.exponent != 65537: 408 raise AvbError('Only RSA keys with exponent 65537 are supported.') 409 ret = bytearray() 410 # Calculate n0inv = -1/n[0] (mod 2^32) 411 b = 2L**32 412 n0inv = b - modinv(key.modulus, b) 413 # Calculate rr = r^2 (mod N), where r = 2^(# of key bits) 414 r = 2L**key.modulus.bit_length() 415 rrmodn = r * r % key.modulus 416 ret.extend(struct.pack('!II', key.num_bits, n0inv)) 417 ret.extend(encode_long(key.num_bits, key.modulus)) 418 ret.extend(encode_long(key.num_bits, rrmodn)) 419 return ret 420 421 422 def lookup_algorithm_by_type(alg_type): 423 """Looks up algorithm by type. 424 425 Arguments: 426 alg_type: The integer representing the type. 427 428 Returns: 429 A tuple with the algorithm name and an |Algorithm| instance. 430 431 Raises: 432 Exception: If the algorithm cannot be found 433 """ 434 for alg_name in ALGORITHMS: 435 alg_data = ALGORITHMS[alg_name] 436 if alg_data.algorithm_type == alg_type: 437 return (alg_name, alg_data) 438 raise AvbError('Unknown algorithm type {}'.format(alg_type)) 439 440 441 def raw_sign(signing_helper, signing_helper_with_files, 442 algorithm_name, signature_num_bytes, key_path, 443 raw_data_to_sign): 444 """Computes a raw RSA signature using |signing_helper| or openssl. 445 446 Arguments: 447 signing_helper: Program which signs a hash and returns the signature. 448 signing_helper_with_files: Same as signing_helper but uses files instead. 449 algorithm_name: The algorithm name as per the ALGORITHMS dict. 450 signature_num_bytes: Number of bytes used to store the signature. 451 key_path: Path to the private key file. Must be PEM format. 452 raw_data_to_sign: Data to sign (bytearray or str expected). 453 454 Returns: 455 A bytearray containing the signature. 456 457 Raises: 458 Exception: If an error occurs. 459 """ 460 p = None 461 if signing_helper_with_files is not None: 462 signing_file = tempfile.NamedTemporaryFile() 463 signing_file.write(str(raw_data_to_sign)) 464 signing_file.flush() 465 p = subprocess.Popen( 466 [signing_helper_with_files, algorithm_name, key_path, signing_file.name]) 467 retcode = p.wait() 468 if retcode != 0: 469 raise AvbError('Error signing') 470 signing_file.seek(0) 471 signature = bytearray(signing_file.read()) 472 else: 473 if signing_helper is not None: 474 p = subprocess.Popen( 475 [signing_helper, algorithm_name, key_path], 476 stdin=subprocess.PIPE, 477 stdout=subprocess.PIPE, 478 stderr=subprocess.PIPE) 479 else: 480 p = subprocess.Popen( 481 ['openssl', 'rsautl', '-sign', '-inkey', key_path, '-raw'], 482 stdin=subprocess.PIPE, 483 stdout=subprocess.PIPE, 484 stderr=subprocess.PIPE) 485 (pout, perr) = p.communicate(str(raw_data_to_sign)) 486 retcode = p.wait() 487 if retcode != 0: 488 raise AvbError('Error signing: {}'.format(perr)) 489 signature = bytearray(pout) 490 if len(signature) != signature_num_bytes: 491 raise AvbError('Error signing: Invalid length of signature') 492 return signature 493 494 495 def verify_vbmeta_signature(vbmeta_header, vbmeta_blob): 496 """Checks that the signature in a vbmeta blob was made by 497 the embedded public key. 498 499 Arguments: 500 vbmeta_header: A AvbVBMetaHeader. 501 vbmeta_blob: The whole vbmeta blob, including the header. 502 503 Returns: 504 True if the signature is valid and corresponds to the embedded 505 public key. Also returns True if the vbmeta blob is not signed. 506 """ 507 (_, alg) = lookup_algorithm_by_type(vbmeta_header.algorithm_type) 508 if alg.hash_name == '': 509 return True 510 header_blob = vbmeta_blob[0:256] 511 auth_offset = 256 512 aux_offset = auth_offset + vbmeta_header.authentication_data_block_size 513 aux_size = vbmeta_header.auxiliary_data_block_size 514 aux_blob = vbmeta_blob[aux_offset:aux_offset + aux_size] 515 pubkey_offset = aux_offset + vbmeta_header.public_key_offset 516 pubkey_size = vbmeta_header.public_key_size 517 pubkey_blob = vbmeta_blob[pubkey_offset:pubkey_offset + pubkey_size] 518 519 digest_offset = auth_offset + vbmeta_header.hash_offset 520 digest_size = vbmeta_header.hash_size 521 digest_blob = vbmeta_blob[digest_offset:digest_offset + digest_size] 522 523 sig_offset = auth_offset + vbmeta_header.signature_offset 524 sig_size = vbmeta_header.signature_size 525 sig_blob = vbmeta_blob[sig_offset:sig_offset + sig_size] 526 527 # Now that we've got the stored digest, public key, and signature 528 # all we need to do is to verify. This is the exactly the same 529 # steps as performed in the avb_vbmeta_image_verify() function in 530 # libavb/avb_vbmeta_image.c. 531 532 ha = hashlib.new(alg.hash_name) 533 ha.update(header_blob) 534 ha.update(aux_blob) 535 computed_digest = ha.digest() 536 537 if computed_digest != digest_blob: 538 return False 539 540 padding_and_digest = bytearray(alg.padding) 541 padding_and_digest.extend(computed_digest) 542 543 (num_bits,) = struct.unpack('!I', pubkey_blob[0:4]) 544 modulus_blob = pubkey_blob[8:8 + num_bits/8] 545 modulus = decode_long(modulus_blob) 546 exponent = 65537 547 548 # For now, just use Crypto.PublicKey.RSA to verify the signature. This 549 # is OK since 'avbtool verify_image' is not expected to run on the 550 # Android builders (see bug #36809096). 551 import Crypto.PublicKey.RSA 552 key = Crypto.PublicKey.RSA.construct((modulus, long(exponent))) 553 if not key.verify(decode_long(padding_and_digest), 554 (decode_long(sig_blob), None)): 555 return False 556 return True 557 558 559 class ImageChunk(object): 560 """Data structure used for representing chunks in Android sparse files. 561 562 Attributes: 563 chunk_type: One of TYPE_RAW, TYPE_FILL, or TYPE_DONT_CARE. 564 chunk_offset: Offset in the sparse file where this chunk begins. 565 output_offset: Offset in de-sparsified file where output begins. 566 output_size: Number of bytes in output. 567 input_offset: Offset in sparse file for data if TYPE_RAW otherwise None. 568 fill_data: Blob with data to fill if TYPE_FILL otherwise None. 569 """ 570 571 FORMAT = '<2H2I' 572 TYPE_RAW = 0xcac1 573 TYPE_FILL = 0xcac2 574 TYPE_DONT_CARE = 0xcac3 575 TYPE_CRC32 = 0xcac4 576 577 def __init__(self, chunk_type, chunk_offset, output_offset, output_size, 578 input_offset, fill_data): 579 """Initializes an ImageChunk object. 580 581 Arguments: 582 chunk_type: One of TYPE_RAW, TYPE_FILL, or TYPE_DONT_CARE. 583 chunk_offset: Offset in the sparse file where this chunk begins. 584 output_offset: Offset in de-sparsified file. 585 output_size: Number of bytes in output. 586 input_offset: Offset in sparse file if TYPE_RAW otherwise None. 587 fill_data: Blob with data to fill if TYPE_FILL otherwise None. 588 589 Raises: 590 ValueError: If data is not well-formed. 591 """ 592 self.chunk_type = chunk_type 593 self.chunk_offset = chunk_offset 594 self.output_offset = output_offset 595 self.output_size = output_size 596 self.input_offset = input_offset 597 self.fill_data = fill_data 598 # Check invariants. 599 if self.chunk_type == self.TYPE_RAW: 600 if self.fill_data is not None: 601 raise ValueError('RAW chunk cannot have fill_data set.') 602 if not self.input_offset: 603 raise ValueError('RAW chunk must have input_offset set.') 604 elif self.chunk_type == self.TYPE_FILL: 605 if self.fill_data is None: 606 raise ValueError('FILL chunk must have fill_data set.') 607 if self.input_offset: 608 raise ValueError('FILL chunk cannot have input_offset set.') 609 elif self.chunk_type == self.TYPE_DONT_CARE: 610 if self.fill_data is not None: 611 raise ValueError('DONT_CARE chunk cannot have fill_data set.') 612 if self.input_offset: 613 raise ValueError('DONT_CARE chunk cannot have input_offset set.') 614 else: 615 raise ValueError('Invalid chunk type') 616 617 618 class ImageHandler(object): 619 """Abstraction for image I/O with support for Android sparse images. 620 621 This class provides an interface for working with image files that 622 may be using the Android Sparse Image format. When an instance is 623 constructed, we test whether it's an Android sparse file. If so, 624 operations will be on the sparse file by interpreting the sparse 625 format, otherwise they will be directly on the file. Either way the 626 operations do the same. 627 628 For reading, this interface mimics a file object - it has seek(), 629 tell(), and read() methods. For writing, only truncation 630 (truncate()) and appending is supported (append_raw() and 631 append_dont_care()). Additionally, data can only be written in units 632 of the block size. 633 634 Attributes: 635 is_sparse: Whether the file being operated on is sparse. 636 block_size: The block size, typically 4096. 637 image_size: The size of the unsparsified file. 638 """ 639 # See system/core/libsparse/sparse_format.h for details. 640 MAGIC = 0xed26ff3a 641 HEADER_FORMAT = '<I4H4I' 642 643 # These are formats and offset of just the |total_chunks| and 644 # |total_blocks| fields. 645 NUM_CHUNKS_AND_BLOCKS_FORMAT = '<II' 646 NUM_CHUNKS_AND_BLOCKS_OFFSET = 16 647 648 def __init__(self, image_filename): 649 """Initializes an image handler. 650 651 Arguments: 652 image_filename: The name of the file to operate on. 653 654 Raises: 655 ValueError: If data in the file is invalid. 656 """ 657 self._image_filename = image_filename 658 self._read_header() 659 660 def _read_header(self): 661 """Initializes internal data structures used for reading file. 662 663 This may be called multiple times and is typically called after 664 modifying the file (e.g. appending, truncation). 665 666 Raises: 667 ValueError: If data in the file is invalid. 668 """ 669 self.is_sparse = False 670 self.block_size = 4096 671 self._file_pos = 0 672 self._image = open(self._image_filename, 'r+b') 673 self._image.seek(0, os.SEEK_END) 674 self.image_size = self._image.tell() 675 676 self._image.seek(0, os.SEEK_SET) 677 header_bin = self._image.read(struct.calcsize(self.HEADER_FORMAT)) 678 (magic, major_version, minor_version, file_hdr_sz, chunk_hdr_sz, 679 block_size, self._num_total_blocks, self._num_total_chunks, 680 _) = struct.unpack(self.HEADER_FORMAT, header_bin) 681 if magic != self.MAGIC: 682 # Not a sparse image, our job here is done. 683 return 684 if not (major_version == 1 and minor_version == 0): 685 raise ValueError('Encountered sparse image format version {}.{} but ' 686 'only 1.0 is supported'.format(major_version, 687 minor_version)) 688 if file_hdr_sz != struct.calcsize(self.HEADER_FORMAT): 689 raise ValueError('Unexpected file_hdr_sz value {}.'. 690 format(file_hdr_sz)) 691 if chunk_hdr_sz != struct.calcsize(ImageChunk.FORMAT): 692 raise ValueError('Unexpected chunk_hdr_sz value {}.'. 693 format(chunk_hdr_sz)) 694 695 self.block_size = block_size 696 697 # Build an list of chunks by parsing the file. 698 self._chunks = [] 699 700 # Find the smallest offset where only "Don't care" chunks 701 # follow. This will be the size of the content in the sparse 702 # image. 703 offset = 0 704 output_offset = 0 705 for _ in xrange(1, self._num_total_chunks + 1): 706 chunk_offset = self._image.tell() 707 708 header_bin = self._image.read(struct.calcsize(ImageChunk.FORMAT)) 709 (chunk_type, _, chunk_sz, total_sz) = struct.unpack(ImageChunk.FORMAT, 710 header_bin) 711 data_sz = total_sz - struct.calcsize(ImageChunk.FORMAT) 712 713 if chunk_type == ImageChunk.TYPE_RAW: 714 if data_sz != (chunk_sz * self.block_size): 715 raise ValueError('Raw chunk input size ({}) does not match output ' 716 'size ({})'. 717 format(data_sz, chunk_sz*self.block_size)) 718 self._chunks.append(ImageChunk(ImageChunk.TYPE_RAW, 719 chunk_offset, 720 output_offset, 721 chunk_sz*self.block_size, 722 self._image.tell(), 723 None)) 724 self._image.read(data_sz) 725 726 elif chunk_type == ImageChunk.TYPE_FILL: 727 if data_sz != 4: 728 raise ValueError('Fill chunk should have 4 bytes of fill, but this ' 729 'has {}'.format(data_sz)) 730 fill_data = self._image.read(4) 731 self._chunks.append(ImageChunk(ImageChunk.TYPE_FILL, 732 chunk_offset, 733 output_offset, 734 chunk_sz*self.block_size, 735 None, 736 fill_data)) 737 elif chunk_type == ImageChunk.TYPE_DONT_CARE: 738 if data_sz != 0: 739 raise ValueError('Don\'t care chunk input size is non-zero ({})'. 740 format(data_sz)) 741 self._chunks.append(ImageChunk(ImageChunk.TYPE_DONT_CARE, 742 chunk_offset, 743 output_offset, 744 chunk_sz*self.block_size, 745 None, 746 None)) 747 elif chunk_type == ImageChunk.TYPE_CRC32: 748 if data_sz != 4: 749 raise ValueError('CRC32 chunk should have 4 bytes of CRC, but ' 750 'this has {}'.format(data_sz)) 751 self._image.read(4) 752 else: 753 raise ValueError('Unknown chunk type {}'.format(chunk_type)) 754 755 offset += chunk_sz 756 output_offset += chunk_sz*self.block_size 757 758 # Record where sparse data end. 759 self._sparse_end = self._image.tell() 760 761 # Now that we've traversed all chunks, sanity check. 762 if self._num_total_blocks != offset: 763 raise ValueError('The header said we should have {} output blocks, ' 764 'but we saw {}'.format(self._num_total_blocks, offset)) 765 junk_len = len(self._image.read()) 766 if junk_len > 0: 767 raise ValueError('There were {} bytes of extra data at the end of the ' 768 'file.'.format(junk_len)) 769 770 # Assign |image_size|. 771 self.image_size = output_offset 772 773 # This is used when bisecting in read() to find the initial slice. 774 self._chunk_output_offsets = [i.output_offset for i in self._chunks] 775 776 self.is_sparse = True 777 778 def _update_chunks_and_blocks(self): 779 """Helper function to update the image header. 780 781 The the |total_chunks| and |total_blocks| fields in the header 782 will be set to value of the |_num_total_blocks| and 783 |_num_total_chunks| attributes. 784 785 """ 786 self._image.seek(self.NUM_CHUNKS_AND_BLOCKS_OFFSET, os.SEEK_SET) 787 self._image.write(struct.pack(self.NUM_CHUNKS_AND_BLOCKS_FORMAT, 788 self._num_total_blocks, 789 self._num_total_chunks)) 790 791 def append_dont_care(self, num_bytes): 792 """Appends a DONT_CARE chunk to the sparse file. 793 794 The given number of bytes must be a multiple of the block size. 795 796 Arguments: 797 num_bytes: Size in number of bytes of the DONT_CARE chunk. 798 """ 799 assert num_bytes % self.block_size == 0 800 801 if not self.is_sparse: 802 self._image.seek(0, os.SEEK_END) 803 # This is more efficient that writing NUL bytes since it'll add 804 # a hole on file systems that support sparse files (native 805 # sparse, not Android sparse). 806 self._image.truncate(self._image.tell() + num_bytes) 807 self._read_header() 808 return 809 810 self._num_total_chunks += 1 811 self._num_total_blocks += num_bytes / self.block_size 812 self._update_chunks_and_blocks() 813 814 self._image.seek(self._sparse_end, os.SEEK_SET) 815 self._image.write(struct.pack(ImageChunk.FORMAT, 816 ImageChunk.TYPE_DONT_CARE, 817 0, # Reserved 818 num_bytes / self.block_size, 819 struct.calcsize(ImageChunk.FORMAT))) 820 self._read_header() 821 822 def append_raw(self, data): 823 """Appends a RAW chunk to the sparse file. 824 825 The length of the given data must be a multiple of the block size. 826 827 Arguments: 828 data: Data to append. 829 """ 830 assert len(data) % self.block_size == 0 831 832 if not self.is_sparse: 833 self._image.seek(0, os.SEEK_END) 834 self._image.write(data) 835 self._read_header() 836 return 837 838 self._num_total_chunks += 1 839 self._num_total_blocks += len(data) / self.block_size 840 self._update_chunks_and_blocks() 841 842 self._image.seek(self._sparse_end, os.SEEK_SET) 843 self._image.write(struct.pack(ImageChunk.FORMAT, 844 ImageChunk.TYPE_RAW, 845 0, # Reserved 846 len(data) / self.block_size, 847 len(data) + 848 struct.calcsize(ImageChunk.FORMAT))) 849 self._image.write(data) 850 self._read_header() 851 852 def append_fill(self, fill_data, size): 853 """Appends a fill chunk to the sparse file. 854 855 The total length of the fill data must be a multiple of the block size. 856 857 Arguments: 858 fill_data: Fill data to append - must be four bytes. 859 size: Number of chunk - must be a multiple of four and the block size. 860 """ 861 assert len(fill_data) == 4 862 assert size % 4 == 0 863 assert size % self.block_size == 0 864 865 if not self.is_sparse: 866 self._image.seek(0, os.SEEK_END) 867 self._image.write(fill_data * (size/4)) 868 self._read_header() 869 return 870 871 self._num_total_chunks += 1 872 self._num_total_blocks += size / self.block_size 873 self._update_chunks_and_blocks() 874 875 self._image.seek(self._sparse_end, os.SEEK_SET) 876 self._image.write(struct.pack(ImageChunk.FORMAT, 877 ImageChunk.TYPE_FILL, 878 0, # Reserved 879 size / self.block_size, 880 4 + struct.calcsize(ImageChunk.FORMAT))) 881 self._image.write(fill_data) 882 self._read_header() 883 884 def seek(self, offset): 885 """Sets the cursor position for reading from unsparsified file. 886 887 Arguments: 888 offset: Offset to seek to from the beginning of the file. 889 """ 890 if offset < 0: 891 raise RuntimeError("Seeking with negative offset: %d" % offset) 892 self._file_pos = offset 893 894 def read(self, size): 895 """Reads data from the unsparsified file. 896 897 This method may return fewer than |size| bytes of data if the end 898 of the file was encountered. 899 900 The file cursor for reading is advanced by the number of bytes 901 read. 902 903 Arguments: 904 size: Number of bytes to read. 905 906 Returns: 907 The data. 908 909 """ 910 if not self.is_sparse: 911 self._image.seek(self._file_pos) 912 data = self._image.read(size) 913 self._file_pos += len(data) 914 return data 915 916 # Iterate over all chunks. 917 chunk_idx = bisect.bisect_right(self._chunk_output_offsets, 918 self._file_pos) - 1 919 data = bytearray() 920 to_go = size 921 while to_go > 0: 922 chunk = self._chunks[chunk_idx] 923 chunk_pos_offset = self._file_pos - chunk.output_offset 924 chunk_pos_to_go = min(chunk.output_size - chunk_pos_offset, to_go) 925 926 if chunk.chunk_type == ImageChunk.TYPE_RAW: 927 self._image.seek(chunk.input_offset + chunk_pos_offset) 928 data.extend(self._image.read(chunk_pos_to_go)) 929 elif chunk.chunk_type == ImageChunk.TYPE_FILL: 930 all_data = chunk.fill_data*(chunk_pos_to_go/len(chunk.fill_data) + 2) 931 offset_mod = chunk_pos_offset % len(chunk.fill_data) 932 data.extend(all_data[offset_mod:(offset_mod + chunk_pos_to_go)]) 933 else: 934 assert chunk.chunk_type == ImageChunk.TYPE_DONT_CARE 935 data.extend('\0' * chunk_pos_to_go) 936 937 to_go -= chunk_pos_to_go 938 self._file_pos += chunk_pos_to_go 939 chunk_idx += 1 940 # Generate partial read in case of EOF. 941 if chunk_idx >= len(self._chunks): 942 break 943 944 return data 945 946 def tell(self): 947 """Returns the file cursor position for reading from unsparsified file. 948 949 Returns: 950 The file cursor position for reading. 951 """ 952 return self._file_pos 953 954 def truncate(self, size): 955 """Truncates the unsparsified file. 956 957 Arguments: 958 size: Desired size of unsparsified file. 959 960 Raises: 961 ValueError: If desired size isn't a multiple of the block size. 962 """ 963 if not self.is_sparse: 964 self._image.truncate(size) 965 self._read_header() 966 return 967 968 if size % self.block_size != 0: 969 raise ValueError('Cannot truncate to a size which is not a multiple ' 970 'of the block size') 971 972 if size == self.image_size: 973 # Trivial where there's nothing to do. 974 return 975 elif size < self.image_size: 976 chunk_idx = bisect.bisect_right(self._chunk_output_offsets, size) - 1 977 chunk = self._chunks[chunk_idx] 978 if chunk.output_offset != size: 979 # Truncation in the middle of a trunk - need to keep the chunk 980 # and modify it. 981 chunk_idx_for_update = chunk_idx + 1 982 num_to_keep = size - chunk.output_offset 983 assert num_to_keep % self.block_size == 0 984 if chunk.chunk_type == ImageChunk.TYPE_RAW: 985 truncate_at = (chunk.chunk_offset + 986 struct.calcsize(ImageChunk.FORMAT) + num_to_keep) 987 data_sz = num_to_keep 988 elif chunk.chunk_type == ImageChunk.TYPE_FILL: 989 truncate_at = (chunk.chunk_offset + 990 struct.calcsize(ImageChunk.FORMAT) + 4) 991 data_sz = 4 992 else: 993 assert chunk.chunk_type == ImageChunk.TYPE_DONT_CARE 994 truncate_at = chunk.chunk_offset + struct.calcsize(ImageChunk.FORMAT) 995 data_sz = 0 996 chunk_sz = num_to_keep/self.block_size 997 total_sz = data_sz + struct.calcsize(ImageChunk.FORMAT) 998 self._image.seek(chunk.chunk_offset) 999 self._image.write(struct.pack(ImageChunk.FORMAT, 1000 chunk.chunk_type, 1001 0, # Reserved 1002 chunk_sz, 1003 total_sz)) 1004 chunk.output_size = num_to_keep 1005 else: 1006 # Truncation at trunk boundary. 1007 truncate_at = chunk.chunk_offset 1008 chunk_idx_for_update = chunk_idx 1009 1010 self._num_total_chunks = chunk_idx_for_update 1011 self._num_total_blocks = 0 1012 for i in range(0, chunk_idx_for_update): 1013 self._num_total_blocks += self._chunks[i].output_size / self.block_size 1014 self._update_chunks_and_blocks() 1015 self._image.truncate(truncate_at) 1016 1017 # We've modified the file so re-read all data. 1018 self._read_header() 1019 else: 1020 # Truncating to grow - just add a DONT_CARE section. 1021 self.append_dont_care(size - self.image_size) 1022 1023 1024 class AvbDescriptor(object): 1025 """Class for AVB descriptor. 1026 1027 See the |AvbDescriptor| C struct for more information. 1028 1029 Attributes: 1030 tag: The tag identifying what kind of descriptor this is. 1031 data: The data in the descriptor. 1032 """ 1033 1034 SIZE = 16 1035 FORMAT_STRING = ('!QQ') # tag, num_bytes_following (descriptor header) 1036 1037 def __init__(self, data): 1038 """Initializes a new property descriptor. 1039 1040 Arguments: 1041 data: If not None, must be a bytearray(). 1042 1043 Raises: 1044 LookupError: If the given descriptor is malformed. 1045 """ 1046 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 1047 1048 if data: 1049 (self.tag, num_bytes_following) = ( 1050 struct.unpack(self.FORMAT_STRING, data[0:self.SIZE])) 1051 self.data = data[self.SIZE:self.SIZE + num_bytes_following] 1052 else: 1053 self.tag = None 1054 self.data = None 1055 1056 def print_desc(self, o): 1057 """Print the descriptor. 1058 1059 Arguments: 1060 o: The object to write the output to. 1061 """ 1062 o.write(' Unknown descriptor:\n') 1063 o.write(' Tag: {}\n'.format(self.tag)) 1064 if len(self.data) < 256: 1065 o.write(' Data: {} ({} bytes)\n'.format( 1066 repr(str(self.data)), len(self.data))) 1067 else: 1068 o.write(' Data: {} bytes\n'.format(len(self.data))) 1069 1070 def encode(self): 1071 """Serializes the descriptor. 1072 1073 Returns: 1074 A bytearray() with the descriptor data. 1075 """ 1076 num_bytes_following = len(self.data) 1077 nbf_with_padding = round_to_multiple(num_bytes_following, 8) 1078 padding_size = nbf_with_padding - num_bytes_following 1079 desc = struct.pack(self.FORMAT_STRING, self.tag, nbf_with_padding) 1080 padding = struct.pack(str(padding_size) + 'x') 1081 ret = desc + self.data + padding 1082 return bytearray(ret) 1083 1084 def verify(self, image_dir, image_ext, expected_chain_partitions_map): 1085 """Verifies contents of the descriptor - used in verify_image sub-command. 1086 1087 Arguments: 1088 image_dir: The directory of the file being verified. 1089 image_ext: The extension of the file being verified (e.g. '.img'). 1090 expected_chain_partitions_map: A map from partition name to the 1091 tuple (rollback_index_location, key_blob). 1092 1093 Returns: 1094 True if the descriptor verifies, False otherwise. 1095 """ 1096 # Nothing to do. 1097 return True 1098 1099 class AvbPropertyDescriptor(AvbDescriptor): 1100 """A class for property descriptors. 1101 1102 See the |AvbPropertyDescriptor| C struct for more information. 1103 1104 Attributes: 1105 key: The key. 1106 value: The key. 1107 """ 1108 1109 TAG = 0 1110 SIZE = 32 1111 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header) 1112 'Q' # key size (bytes) 1113 'Q') # value size (bytes) 1114 1115 def __init__(self, data=None): 1116 """Initializes a new property descriptor. 1117 1118 Arguments: 1119 data: If not None, must be a bytearray of size |SIZE|. 1120 1121 Raises: 1122 LookupError: If the given descriptor is malformed. 1123 """ 1124 AvbDescriptor.__init__(self, None) 1125 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 1126 1127 if data: 1128 (tag, num_bytes_following, key_size, 1129 value_size) = struct.unpack(self.FORMAT_STRING, data[0:self.SIZE]) 1130 expected_size = round_to_multiple( 1131 self.SIZE - 16 + key_size + 1 + value_size + 1, 8) 1132 if tag != self.TAG or num_bytes_following != expected_size: 1133 raise LookupError('Given data does not look like a property ' 1134 'descriptor.') 1135 self.key = data[self.SIZE:(self.SIZE + key_size)] 1136 self.value = data[(self.SIZE + key_size + 1):(self.SIZE + key_size + 1 + 1137 value_size)] 1138 else: 1139 self.key = '' 1140 self.value = '' 1141 1142 def print_desc(self, o): 1143 """Print the descriptor. 1144 1145 Arguments: 1146 o: The object to write the output to. 1147 """ 1148 if len(self.value) < 256: 1149 o.write(' Prop: {} -> {}\n'.format(self.key, repr(str(self.value)))) 1150 else: 1151 o.write(' Prop: {} -> ({} bytes)\n'.format(self.key, len(self.value))) 1152 1153 def encode(self): 1154 """Serializes the descriptor. 1155 1156 Returns: 1157 A bytearray() with the descriptor data. 1158 """ 1159 num_bytes_following = self.SIZE + len(self.key) + len(self.value) + 2 - 16 1160 nbf_with_padding = round_to_multiple(num_bytes_following, 8) 1161 padding_size = nbf_with_padding - num_bytes_following 1162 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding, 1163 len(self.key), len(self.value)) 1164 padding = struct.pack(str(padding_size) + 'x') 1165 ret = desc + self.key + '\0' + self.value + '\0' + padding 1166 return bytearray(ret) 1167 1168 def verify(self, image_dir, image_ext, expected_chain_partitions_map): 1169 """Verifies contents of the descriptor - used in verify_image sub-command. 1170 1171 Arguments: 1172 image_dir: The directory of the file being verified. 1173 image_ext: The extension of the file being verified (e.g. '.img'). 1174 expected_chain_partitions_map: A map from partition name to the 1175 tuple (rollback_index_location, key_blob). 1176 1177 Returns: 1178 True if the descriptor verifies, False otherwise. 1179 """ 1180 # Nothing to do. 1181 return True 1182 1183 class AvbHashtreeDescriptor(AvbDescriptor): 1184 """A class for hashtree descriptors. 1185 1186 See the |AvbHashtreeDescriptor| C struct for more information. 1187 1188 Attributes: 1189 dm_verity_version: dm-verity version used. 1190 image_size: Size of the image, after rounding up to |block_size|. 1191 tree_offset: Offset of the hash tree in the file. 1192 tree_size: Size of the tree. 1193 data_block_size: Data block size 1194 hash_block_size: Hash block size 1195 fec_num_roots: Number of roots used for FEC (0 if FEC is not used). 1196 fec_offset: Offset of FEC data (0 if FEC is not used). 1197 fec_size: Size of FEC data (0 if FEC is not used). 1198 hash_algorithm: Hash algorithm used. 1199 partition_name: Partition name. 1200 salt: Salt used. 1201 root_digest: Root digest. 1202 flags: Descriptor flags (see avb_hashtree_descriptor.h). 1203 """ 1204 1205 TAG = 1 1206 RESERVED = 60 1207 SIZE = 120 + RESERVED 1208 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header) 1209 'L' # dm-verity version used 1210 'Q' # image size (bytes) 1211 'Q' # tree offset (bytes) 1212 'Q' # tree size (bytes) 1213 'L' # data block size (bytes) 1214 'L' # hash block size (bytes) 1215 'L' # FEC number of roots 1216 'Q' # FEC offset (bytes) 1217 'Q' # FEC size (bytes) 1218 '32s' # hash algorithm used 1219 'L' # partition name (bytes) 1220 'L' # salt length (bytes) 1221 'L' # root digest length (bytes) 1222 'L' + # flags 1223 str(RESERVED) + 's') # reserved 1224 1225 def __init__(self, data=None): 1226 """Initializes a new hashtree descriptor. 1227 1228 Arguments: 1229 data: If not None, must be a bytearray of size |SIZE|. 1230 1231 Raises: 1232 LookupError: If the given descriptor is malformed. 1233 """ 1234 AvbDescriptor.__init__(self, None) 1235 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 1236 1237 if data: 1238 (tag, num_bytes_following, self.dm_verity_version, self.image_size, 1239 self.tree_offset, self.tree_size, self.data_block_size, 1240 self.hash_block_size, self.fec_num_roots, self.fec_offset, self.fec_size, 1241 self.hash_algorithm, partition_name_len, salt_len, 1242 root_digest_len, self.flags, _) = struct.unpack(self.FORMAT_STRING, 1243 data[0:self.SIZE]) 1244 expected_size = round_to_multiple( 1245 self.SIZE - 16 + partition_name_len + salt_len + root_digest_len, 8) 1246 if tag != self.TAG or num_bytes_following != expected_size: 1247 raise LookupError('Given data does not look like a hashtree ' 1248 'descriptor.') 1249 # Nuke NUL-bytes at the end. 1250 self.hash_algorithm = self.hash_algorithm.split('\0', 1)[0] 1251 o = 0 1252 self.partition_name = str(data[(self.SIZE + o):(self.SIZE + o + 1253 partition_name_len)]) 1254 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8. 1255 self.partition_name.decode('utf-8') 1256 o += partition_name_len 1257 self.salt = data[(self.SIZE + o):(self.SIZE + o + salt_len)] 1258 o += salt_len 1259 self.root_digest = data[(self.SIZE + o):(self.SIZE + o + root_digest_len)] 1260 if root_digest_len != len(hashlib.new(name=self.hash_algorithm).digest()): 1261 if root_digest_len != 0: 1262 raise LookupError('root_digest_len doesn\'t match hash algorithm') 1263 1264 else: 1265 self.dm_verity_version = 0 1266 self.image_size = 0 1267 self.tree_offset = 0 1268 self.tree_size = 0 1269 self.data_block_size = 0 1270 self.hash_block_size = 0 1271 self.fec_num_roots = 0 1272 self.fec_offset = 0 1273 self.fec_size = 0 1274 self.hash_algorithm = '' 1275 self.partition_name = '' 1276 self.salt = bytearray() 1277 self.root_digest = bytearray() 1278 self.flags = 0 1279 1280 def print_desc(self, o): 1281 """Print the descriptor. 1282 1283 Arguments: 1284 o: The object to write the output to. 1285 """ 1286 o.write(' Hashtree descriptor:\n') 1287 o.write(' Version of dm-verity: {}\n'.format(self.dm_verity_version)) 1288 o.write(' Image Size: {} bytes\n'.format(self.image_size)) 1289 o.write(' Tree Offset: {}\n'.format(self.tree_offset)) 1290 o.write(' Tree Size: {} bytes\n'.format(self.tree_size)) 1291 o.write(' Data Block Size: {} bytes\n'.format( 1292 self.data_block_size)) 1293 o.write(' Hash Block Size: {} bytes\n'.format( 1294 self.hash_block_size)) 1295 o.write(' FEC num roots: {}\n'.format(self.fec_num_roots)) 1296 o.write(' FEC offset: {}\n'.format(self.fec_offset)) 1297 o.write(' FEC size: {} bytes\n'.format(self.fec_size)) 1298 o.write(' Hash Algorithm: {}\n'.format(self.hash_algorithm)) 1299 o.write(' Partition Name: {}\n'.format(self.partition_name)) 1300 o.write(' Salt: {}\n'.format(str(self.salt).encode( 1301 'hex'))) 1302 o.write(' Root Digest: {}\n'.format(str( 1303 self.root_digest).encode('hex'))) 1304 o.write(' Flags: {}\n'.format(self.flags)) 1305 1306 def encode(self): 1307 """Serializes the descriptor. 1308 1309 Returns: 1310 A bytearray() with the descriptor data. 1311 """ 1312 encoded_name = self.partition_name.encode('utf-8') 1313 num_bytes_following = (self.SIZE + len(encoded_name) + len(self.salt) + 1314 len(self.root_digest) - 16) 1315 nbf_with_padding = round_to_multiple(num_bytes_following, 8) 1316 padding_size = nbf_with_padding - num_bytes_following 1317 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding, 1318 self.dm_verity_version, self.image_size, 1319 self.tree_offset, self.tree_size, self.data_block_size, 1320 self.hash_block_size, self.fec_num_roots, 1321 self.fec_offset, self.fec_size, self.hash_algorithm, 1322 len(encoded_name), len(self.salt), len(self.root_digest), 1323 self.flags, self.RESERVED*'\0') 1324 padding = struct.pack(str(padding_size) + 'x') 1325 ret = desc + encoded_name + self.salt + self.root_digest + padding 1326 return bytearray(ret) 1327 1328 def verify(self, image_dir, image_ext, expected_chain_partitions_map): 1329 """Verifies contents of the descriptor - used in verify_image sub-command. 1330 1331 Arguments: 1332 image_dir: The directory of the file being verified. 1333 image_ext: The extension of the file being verified (e.g. '.img'). 1334 expected_chain_partitions_map: A map from partition name to the 1335 tuple (rollback_index_location, key_blob). 1336 1337 Returns: 1338 True if the descriptor verifies, False otherwise. 1339 """ 1340 image_filename = os.path.join(image_dir, self.partition_name + image_ext) 1341 image = ImageHandler(image_filename) 1342 # Generate the hashtree and checks that it matches what's in the file. 1343 digest_size = len(hashlib.new(name=self.hash_algorithm).digest()) 1344 digest_padding = round_to_pow2(digest_size) - digest_size 1345 (hash_level_offsets, tree_size) = calc_hash_level_offsets( 1346 self.image_size, self.data_block_size, digest_size + digest_padding) 1347 root_digest, hash_tree = generate_hash_tree(image, self.image_size, 1348 self.data_block_size, 1349 self.hash_algorithm, self.salt, 1350 digest_padding, 1351 hash_level_offsets, 1352 tree_size) 1353 # The root digest must match unless it is not embedded in the descriptor. 1354 if len(self.root_digest) != 0 and root_digest != self.root_digest: 1355 sys.stderr.write('hashtree of {} does not match descriptor\n'. 1356 format(image_filename)) 1357 return False 1358 # ... also check that the on-disk hashtree matches 1359 image.seek(self.tree_offset) 1360 hash_tree_ondisk = image.read(self.tree_size) 1361 if hash_tree != hash_tree_ondisk: 1362 sys.stderr.write('hashtree of {} contains invalid data\n'. 1363 format(image_filename)) 1364 return False 1365 # TODO: we could also verify that the FEC stored in the image is 1366 # correct but this a) currently requires the 'fec' binary; and b) 1367 # takes a long time; and c) is not strictly needed for 1368 # verification purposes as we've already verified the root hash. 1369 print ('{}: Successfully verified {} hashtree of {} for image of {} bytes' 1370 .format(self.partition_name, self.hash_algorithm, image_filename, 1371 self.image_size)) 1372 return True 1373 1374 1375 class AvbHashDescriptor(AvbDescriptor): 1376 """A class for hash descriptors. 1377 1378 See the |AvbHashDescriptor| C struct for more information. 1379 1380 Attributes: 1381 image_size: Image size, in bytes. 1382 hash_algorithm: Hash algorithm used. 1383 partition_name: Partition name. 1384 salt: Salt used. 1385 digest: The hash value of salt and data combined. 1386 flags: The descriptor flags (see avb_hash_descriptor.h). 1387 """ 1388 1389 TAG = 2 1390 RESERVED = 60 1391 SIZE = 72 + RESERVED 1392 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header) 1393 'Q' # image size (bytes) 1394 '32s' # hash algorithm used 1395 'L' # partition name (bytes) 1396 'L' # salt length (bytes) 1397 'L' # digest length (bytes) 1398 'L' + # flags 1399 str(RESERVED) + 's') # reserved 1400 1401 def __init__(self, data=None): 1402 """Initializes a new hash descriptor. 1403 1404 Arguments: 1405 data: If not None, must be a bytearray of size |SIZE|. 1406 1407 Raises: 1408 LookupError: If the given descriptor is malformed. 1409 """ 1410 AvbDescriptor.__init__(self, None) 1411 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 1412 1413 if data: 1414 (tag, num_bytes_following, self.image_size, self.hash_algorithm, 1415 partition_name_len, salt_len, 1416 digest_len, self.flags, _) = struct.unpack(self.FORMAT_STRING, 1417 data[0:self.SIZE]) 1418 expected_size = round_to_multiple( 1419 self.SIZE - 16 + partition_name_len + salt_len + digest_len, 8) 1420 if tag != self.TAG or num_bytes_following != expected_size: 1421 raise LookupError('Given data does not look like a hash ' 'descriptor.') 1422 # Nuke NUL-bytes at the end. 1423 self.hash_algorithm = self.hash_algorithm.split('\0', 1)[0] 1424 o = 0 1425 self.partition_name = str(data[(self.SIZE + o):(self.SIZE + o + 1426 partition_name_len)]) 1427 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8. 1428 self.partition_name.decode('utf-8') 1429 o += partition_name_len 1430 self.salt = data[(self.SIZE + o):(self.SIZE + o + salt_len)] 1431 o += salt_len 1432 self.digest = data[(self.SIZE + o):(self.SIZE + o + digest_len)] 1433 if digest_len != len(hashlib.new(name=self.hash_algorithm).digest()): 1434 if digest_len != 0: 1435 raise LookupError('digest_len doesn\'t match hash algorithm') 1436 1437 else: 1438 self.image_size = 0 1439 self.hash_algorithm = '' 1440 self.partition_name = '' 1441 self.salt = bytearray() 1442 self.digest = bytearray() 1443 self.flags = 0 1444 1445 def print_desc(self, o): 1446 """Print the descriptor. 1447 1448 Arguments: 1449 o: The object to write the output to. 1450 """ 1451 o.write(' Hash descriptor:\n') 1452 o.write(' Image Size: {} bytes\n'.format(self.image_size)) 1453 o.write(' Hash Algorithm: {}\n'.format(self.hash_algorithm)) 1454 o.write(' Partition Name: {}\n'.format(self.partition_name)) 1455 o.write(' Salt: {}\n'.format(str(self.salt).encode( 1456 'hex'))) 1457 o.write(' Digest: {}\n'.format(str(self.digest).encode( 1458 'hex'))) 1459 o.write(' Flags: {}\n'.format(self.flags)) 1460 1461 def encode(self): 1462 """Serializes the descriptor. 1463 1464 Returns: 1465 A bytearray() with the descriptor data. 1466 """ 1467 encoded_name = self.partition_name.encode('utf-8') 1468 num_bytes_following = ( 1469 self.SIZE + len(encoded_name) + len(self.salt) + len(self.digest) - 16) 1470 nbf_with_padding = round_to_multiple(num_bytes_following, 8) 1471 padding_size = nbf_with_padding - num_bytes_following 1472 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding, 1473 self.image_size, self.hash_algorithm, len(encoded_name), 1474 len(self.salt), len(self.digest), self.flags, 1475 self.RESERVED*'\0') 1476 padding = struct.pack(str(padding_size) + 'x') 1477 ret = desc + encoded_name + self.salt + self.digest + padding 1478 return bytearray(ret) 1479 1480 def verify(self, image_dir, image_ext, expected_chain_partitions_map): 1481 """Verifies contents of the descriptor - used in verify_image sub-command. 1482 1483 Arguments: 1484 image_dir: The directory of the file being verified. 1485 image_ext: The extension of the file being verified (e.g. '.img'). 1486 expected_chain_partitions_map: A map from partition name to the 1487 tuple (rollback_index_location, key_blob). 1488 1489 Returns: 1490 True if the descriptor verifies, False otherwise. 1491 """ 1492 image_filename = os.path.join(image_dir, self.partition_name + image_ext) 1493 image = ImageHandler(image_filename) 1494 data = image.read(self.image_size) 1495 ha = hashlib.new(self.hash_algorithm) 1496 ha.update(self.salt) 1497 ha.update(data) 1498 digest = ha.digest() 1499 # The digest must match unless there is no digest in the descriptor. 1500 if len(self.digest) != 0 and digest != self.digest: 1501 sys.stderr.write('{} digest of {} does not match digest in descriptor\n'. 1502 format(self.hash_algorithm, image_filename)) 1503 return False 1504 print ('{}: Successfully verified {} hash of {} for image of {} bytes' 1505 .format(self.partition_name, self.hash_algorithm, image_filename, 1506 self.image_size)) 1507 return True 1508 1509 1510 class AvbKernelCmdlineDescriptor(AvbDescriptor): 1511 """A class for kernel command-line descriptors. 1512 1513 See the |AvbKernelCmdlineDescriptor| C struct for more information. 1514 1515 Attributes: 1516 flags: Flags. 1517 kernel_cmdline: The kernel command-line. 1518 """ 1519 1520 TAG = 3 1521 SIZE = 24 1522 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header) 1523 'L' # flags 1524 'L') # cmdline length (bytes) 1525 1526 FLAGS_USE_ONLY_IF_HASHTREE_NOT_DISABLED = (1 << 0) 1527 FLAGS_USE_ONLY_IF_HASHTREE_DISABLED = (1 << 1) 1528 1529 def __init__(self, data=None): 1530 """Initializes a new kernel cmdline descriptor. 1531 1532 Arguments: 1533 data: If not None, must be a bytearray of size |SIZE|. 1534 1535 Raises: 1536 LookupError: If the given descriptor is malformed. 1537 """ 1538 AvbDescriptor.__init__(self, None) 1539 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 1540 1541 if data: 1542 (tag, num_bytes_following, self.flags, kernel_cmdline_length) = ( 1543 struct.unpack(self.FORMAT_STRING, data[0:self.SIZE])) 1544 expected_size = round_to_multiple(self.SIZE - 16 + kernel_cmdline_length, 1545 8) 1546 if tag != self.TAG or num_bytes_following != expected_size: 1547 raise LookupError('Given data does not look like a kernel cmdline ' 1548 'descriptor.') 1549 # Nuke NUL-bytes at the end. 1550 self.kernel_cmdline = str(data[self.SIZE:(self.SIZE + 1551 kernel_cmdline_length)]) 1552 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8. 1553 self.kernel_cmdline.decode('utf-8') 1554 else: 1555 self.flags = 0 1556 self.kernel_cmdline = '' 1557 1558 def print_desc(self, o): 1559 """Print the descriptor. 1560 1561 Arguments: 1562 o: The object to write the output to. 1563 """ 1564 o.write(' Kernel Cmdline descriptor:\n') 1565 o.write(' Flags: {}\n'.format(self.flags)) 1566 o.write(' Kernel Cmdline: {}\n'.format(repr( 1567 self.kernel_cmdline))) 1568 1569 def encode(self): 1570 """Serializes the descriptor. 1571 1572 Returns: 1573 A bytearray() with the descriptor data. 1574 """ 1575 encoded_str = self.kernel_cmdline.encode('utf-8') 1576 num_bytes_following = (self.SIZE + len(encoded_str) - 16) 1577 nbf_with_padding = round_to_multiple(num_bytes_following, 8) 1578 padding_size = nbf_with_padding - num_bytes_following 1579 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding, 1580 self.flags, len(encoded_str)) 1581 padding = struct.pack(str(padding_size) + 'x') 1582 ret = desc + encoded_str + padding 1583 return bytearray(ret) 1584 1585 def verify(self, image_dir, image_ext, expected_chain_partitions_map): 1586 """Verifies contents of the descriptor - used in verify_image sub-command. 1587 1588 Arguments: 1589 image_dir: The directory of the file being verified. 1590 image_ext: The extension of the file being verified (e.g. '.img'). 1591 expected_chain_partitions_map: A map from partition name to the 1592 tuple (rollback_index_location, key_blob). 1593 1594 Returns: 1595 True if the descriptor verifies, False otherwise. 1596 """ 1597 # Nothing to verify. 1598 return True 1599 1600 class AvbChainPartitionDescriptor(AvbDescriptor): 1601 """A class for chained partition descriptors. 1602 1603 See the |AvbChainPartitionDescriptor| C struct for more information. 1604 1605 Attributes: 1606 rollback_index_location: The rollback index location to use. 1607 partition_name: Partition name. 1608 public_key: Bytes for the public key. 1609 """ 1610 1611 TAG = 4 1612 RESERVED = 64 1613 SIZE = 28 + RESERVED 1614 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header) 1615 'L' # rollback_index_location 1616 'L' # partition_name_size (bytes) 1617 'L' + # public_key_size (bytes) 1618 str(RESERVED) + 's') # reserved 1619 1620 def __init__(self, data=None): 1621 """Initializes a new chain partition descriptor. 1622 1623 Arguments: 1624 data: If not None, must be a bytearray of size |SIZE|. 1625 1626 Raises: 1627 LookupError: If the given descriptor is malformed. 1628 """ 1629 AvbDescriptor.__init__(self, None) 1630 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 1631 1632 if data: 1633 (tag, num_bytes_following, self.rollback_index_location, 1634 partition_name_len, 1635 public_key_len, _) = struct.unpack(self.FORMAT_STRING, data[0:self.SIZE]) 1636 expected_size = round_to_multiple( 1637 self.SIZE - 16 + partition_name_len + public_key_len, 8) 1638 if tag != self.TAG or num_bytes_following != expected_size: 1639 raise LookupError('Given data does not look like a chain partition ' 1640 'descriptor.') 1641 o = 0 1642 self.partition_name = str(data[(self.SIZE + o):(self.SIZE + o + 1643 partition_name_len)]) 1644 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8. 1645 self.partition_name.decode('utf-8') 1646 o += partition_name_len 1647 self.public_key = data[(self.SIZE + o):(self.SIZE + o + public_key_len)] 1648 1649 else: 1650 self.rollback_index_location = 0 1651 self.partition_name = '' 1652 self.public_key = bytearray() 1653 1654 def print_desc(self, o): 1655 """Print the descriptor. 1656 1657 Arguments: 1658 o: The object to write the output to. 1659 """ 1660 o.write(' Chain Partition descriptor:\n') 1661 o.write(' Partition Name: {}\n'.format(self.partition_name)) 1662 o.write(' Rollback Index Location: {}\n'.format( 1663 self.rollback_index_location)) 1664 # Just show the SHA1 of the key, for size reasons. 1665 hexdig = hashlib.sha1(self.public_key).hexdigest() 1666 o.write(' Public key (sha1): {}\n'.format(hexdig)) 1667 1668 def encode(self): 1669 """Serializes the descriptor. 1670 1671 Returns: 1672 A bytearray() with the descriptor data. 1673 """ 1674 encoded_name = self.partition_name.encode('utf-8') 1675 num_bytes_following = ( 1676 self.SIZE + len(encoded_name) + len(self.public_key) - 16) 1677 nbf_with_padding = round_to_multiple(num_bytes_following, 8) 1678 padding_size = nbf_with_padding - num_bytes_following 1679 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding, 1680 self.rollback_index_location, len(encoded_name), 1681 len(self.public_key), self.RESERVED*'\0') 1682 padding = struct.pack(str(padding_size) + 'x') 1683 ret = desc + encoded_name + self.public_key + padding 1684 return bytearray(ret) 1685 1686 def verify(self, image_dir, image_ext, expected_chain_partitions_map): 1687 """Verifies contents of the descriptor - used in verify_image sub-command. 1688 1689 Arguments: 1690 image_dir: The directory of the file being verified. 1691 image_ext: The extension of the file being verified (e.g. '.img'). 1692 expected_chain_partitions_map: A map from partition name to the 1693 tuple (rollback_index_location, key_blob). 1694 1695 Returns: 1696 True if the descriptor verifies, False otherwise. 1697 """ 1698 value = expected_chain_partitions_map.get(self.partition_name) 1699 if not value: 1700 sys.stderr.write('No expected chain partition for partition {}. Use ' 1701 '--expected_chain_partition to specify expected ' 1702 'contents.\n'. 1703 format(self.partition_name)) 1704 return False 1705 rollback_index_location, pk_blob = value 1706 1707 if self.rollback_index_location != rollback_index_location: 1708 sys.stderr.write('Expected rollback_index_location {} does not ' 1709 'match {} in descriptor for partition {}\n'. 1710 format(rollback_index_location, 1711 self.rollback_index_location, 1712 self.partition_name)) 1713 return False 1714 1715 if self.public_key != pk_blob: 1716 sys.stderr.write('Expected public key blob does not match public ' 1717 'key blob in descriptor for partition {}\n'. 1718 format(self.partition_name)) 1719 return False 1720 1721 print ('{}: Successfully verified chain partition descriptor matches ' 1722 'expected data'.format(self.partition_name)) 1723 1724 return True 1725 1726 DESCRIPTOR_CLASSES = [ 1727 AvbPropertyDescriptor, AvbHashtreeDescriptor, AvbHashDescriptor, 1728 AvbKernelCmdlineDescriptor, AvbChainPartitionDescriptor 1729 ] 1730 1731 1732 def parse_descriptors(data): 1733 """Parses a blob of data into descriptors. 1734 1735 Arguments: 1736 data: A bytearray() with encoded descriptors. 1737 1738 Returns: 1739 A list of instances of objects derived from AvbDescriptor. For 1740 unknown descriptors, the class AvbDescriptor is used. 1741 """ 1742 o = 0 1743 ret = [] 1744 while o < len(data): 1745 tag, nb_following = struct.unpack('!2Q', data[o:o + 16]) 1746 if tag < len(DESCRIPTOR_CLASSES): 1747 c = DESCRIPTOR_CLASSES[tag] 1748 else: 1749 c = AvbDescriptor 1750 ret.append(c(bytearray(data[o:o + 16 + nb_following]))) 1751 o += 16 + nb_following 1752 return ret 1753 1754 1755 class AvbFooter(object): 1756 """A class for parsing and writing footers. 1757 1758 Footers are stored at the end of partitions and point to where the 1759 AvbVBMeta blob is located. They also contain the original size of 1760 the image before AVB information was added. 1761 1762 Attributes: 1763 magic: Magic for identifying the footer, see |MAGIC|. 1764 version_major: The major version of avbtool that wrote the footer. 1765 version_minor: The minor version of avbtool that wrote the footer. 1766 original_image_size: Original image size. 1767 vbmeta_offset: Offset of where the AvbVBMeta blob is stored. 1768 vbmeta_size: Size of the AvbVBMeta blob. 1769 """ 1770 1771 MAGIC = 'AVBf' 1772 SIZE = 64 1773 RESERVED = 28 1774 FOOTER_VERSION_MAJOR = AVB_FOOTER_VERSION_MAJOR 1775 FOOTER_VERSION_MINOR = AVB_FOOTER_VERSION_MINOR 1776 FORMAT_STRING = ('!4s2L' # magic, 2 x version. 1777 'Q' # Original image size. 1778 'Q' # Offset of VBMeta blob. 1779 'Q' + # Size of VBMeta blob. 1780 str(RESERVED) + 'x') # padding for reserved bytes 1781 1782 def __init__(self, data=None): 1783 """Initializes a new footer object. 1784 1785 Arguments: 1786 data: If not None, must be a bytearray of size 4096. 1787 1788 Raises: 1789 LookupError: If the given footer is malformed. 1790 struct.error: If the given data has no footer. 1791 """ 1792 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 1793 1794 if data: 1795 (self.magic, self.version_major, self.version_minor, 1796 self.original_image_size, self.vbmeta_offset, 1797 self.vbmeta_size) = struct.unpack(self.FORMAT_STRING, data) 1798 if self.magic != self.MAGIC: 1799 raise LookupError('Given data does not look like a AVB footer.') 1800 else: 1801 self.magic = self.MAGIC 1802 self.version_major = self.FOOTER_VERSION_MAJOR 1803 self.version_minor = self.FOOTER_VERSION_MINOR 1804 self.original_image_size = 0 1805 self.vbmeta_offset = 0 1806 self.vbmeta_size = 0 1807 1808 def encode(self): 1809 """Gets a string representing the binary encoding of the footer. 1810 1811 Returns: 1812 A bytearray() with a binary representation of the footer. 1813 """ 1814 return struct.pack(self.FORMAT_STRING, self.magic, self.version_major, 1815 self.version_minor, self.original_image_size, 1816 self.vbmeta_offset, self.vbmeta_size) 1817 1818 1819 class AvbVBMetaHeader(object): 1820 """A class for parsing and writing AVB vbmeta images. 1821 1822 Attributes: 1823 The attributes correspond to the |AvbVBMetaHeader| struct 1824 defined in avb_vbmeta_header.h. 1825 """ 1826 1827 SIZE = 256 1828 1829 # Keep in sync with |reserved0| and |reserved| field of 1830 # |AvbVBMetaImageHeader|. 1831 RESERVED0 = 4 1832 RESERVED = 80 1833 1834 # Keep in sync with |AvbVBMetaImageHeader|. 1835 FORMAT_STRING = ('!4s2L' # magic, 2 x version 1836 '2Q' # 2 x block size 1837 'L' # algorithm type 1838 '2Q' # offset, size (hash) 1839 '2Q' # offset, size (signature) 1840 '2Q' # offset, size (public key) 1841 '2Q' # offset, size (public key metadata) 1842 '2Q' # offset, size (descriptors) 1843 'Q' # rollback_index 1844 'L' + # flags 1845 str(RESERVED0) + 'x' + # padding for reserved bytes 1846 '47sx' + # NUL-terminated release string 1847 str(RESERVED) + 'x') # padding for reserved bytes 1848 1849 def __init__(self, data=None): 1850 """Initializes a new header object. 1851 1852 Arguments: 1853 data: If not None, must be a bytearray of size 8192. 1854 1855 Raises: 1856 Exception: If the given data is malformed. 1857 """ 1858 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 1859 1860 if data: 1861 (self.magic, self.required_libavb_version_major, 1862 self.required_libavb_version_minor, 1863 self.authentication_data_block_size, self.auxiliary_data_block_size, 1864 self.algorithm_type, self.hash_offset, self.hash_size, 1865 self.signature_offset, self.signature_size, self.public_key_offset, 1866 self.public_key_size, self.public_key_metadata_offset, 1867 self.public_key_metadata_size, self.descriptors_offset, 1868 self.descriptors_size, 1869 self.rollback_index, 1870 self.flags, 1871 self.release_string) = struct.unpack(self.FORMAT_STRING, data) 1872 # Nuke NUL-bytes at the end of the string. 1873 if self.magic != 'AVB0': 1874 raise AvbError('Given image does not look like a vbmeta image.') 1875 else: 1876 self.magic = 'AVB0' 1877 # Start by just requiring version 1.0. Code that adds features 1878 # in a future version can use bump_required_libavb_version_minor() to 1879 # bump the minor. 1880 self.required_libavb_version_major = AVB_VERSION_MAJOR 1881 self.required_libavb_version_minor = 0 1882 self.authentication_data_block_size = 0 1883 self.auxiliary_data_block_size = 0 1884 self.algorithm_type = 0 1885 self.hash_offset = 0 1886 self.hash_size = 0 1887 self.signature_offset = 0 1888 self.signature_size = 0 1889 self.public_key_offset = 0 1890 self.public_key_size = 0 1891 self.public_key_metadata_offset = 0 1892 self.public_key_metadata_size = 0 1893 self.descriptors_offset = 0 1894 self.descriptors_size = 0 1895 self.rollback_index = 0 1896 self.flags = 0 1897 self.release_string = get_release_string() 1898 1899 def bump_required_libavb_version_minor(self, minor): 1900 """Function to bump required_libavb_version_minor. 1901 1902 Call this when writing data that requires a specific libavb 1903 version to parse it. 1904 1905 Arguments: 1906 minor: The minor version of libavb that has support for the feature. 1907 """ 1908 self.required_libavb_version_minor = ( 1909 max(self.required_libavb_version_minor, minor)) 1910 1911 def save(self, output): 1912 """Serializes the header (256 bytes) to disk. 1913 1914 Arguments: 1915 output: The object to write the output to. 1916 """ 1917 output.write(struct.pack( 1918 self.FORMAT_STRING, self.magic, self.required_libavb_version_major, 1919 self.required_libavb_version_minor, self.authentication_data_block_size, 1920 self.auxiliary_data_block_size, self.algorithm_type, self.hash_offset, 1921 self.hash_size, self.signature_offset, self.signature_size, 1922 self.public_key_offset, self.public_key_size, 1923 self.public_key_metadata_offset, self.public_key_metadata_size, 1924 self.descriptors_offset, self.descriptors_size, self.rollback_index, 1925 self.flags, self.release_string)) 1926 1927 def encode(self): 1928 """Serializes the header (256) to a bytearray(). 1929 1930 Returns: 1931 A bytearray() with the encoded header. 1932 """ 1933 return struct.pack(self.FORMAT_STRING, self.magic, 1934 self.required_libavb_version_major, 1935 self.required_libavb_version_minor, 1936 self.authentication_data_block_size, 1937 self.auxiliary_data_block_size, self.algorithm_type, 1938 self.hash_offset, self.hash_size, self.signature_offset, 1939 self.signature_size, self.public_key_offset, 1940 self.public_key_size, self.public_key_metadata_offset, 1941 self.public_key_metadata_size, self.descriptors_offset, 1942 self.descriptors_size, self.rollback_index, self.flags, 1943 self.release_string) 1944 1945 1946 class Avb(object): 1947 """Business logic for avbtool command-line tool.""" 1948 1949 # Keep in sync with avb_ab_flow.h. 1950 AB_FORMAT_NO_CRC = '!4sBB2xBBBxBBBx12x' 1951 AB_MAGIC = '\0AB0' 1952 AB_MAJOR_VERSION = 1 1953 AB_MINOR_VERSION = 0 1954 AB_MISC_METADATA_OFFSET = 2048 1955 1956 # Constants for maximum metadata size. These are used to give 1957 # meaningful errors if the value passed in via --partition_size is 1958 # too small and when --calc_max_image_size is used. We use 1959 # conservative figures. 1960 MAX_VBMETA_SIZE = 64 * 1024 1961 MAX_FOOTER_SIZE = 4096 1962 1963 def erase_footer(self, image_filename, keep_hashtree): 1964 """Implements the 'erase_footer' command. 1965 1966 Arguments: 1967 image_filename: File to erase a footer from. 1968 keep_hashtree: If True, keep the hashtree and FEC around. 1969 1970 Raises: 1971 AvbError: If there's no footer in the image. 1972 """ 1973 1974 image = ImageHandler(image_filename) 1975 1976 (footer, _, descriptors, _) = self._parse_image(image) 1977 1978 if not footer: 1979 raise AvbError('Given image does not have a footer.') 1980 1981 new_image_size = None 1982 if not keep_hashtree: 1983 new_image_size = footer.original_image_size 1984 else: 1985 # If requested to keep the hashtree, search for a hashtree 1986 # descriptor to figure out the location and size of the hashtree 1987 # and FEC. 1988 for desc in descriptors: 1989 if isinstance(desc, AvbHashtreeDescriptor): 1990 # The hashtree is always just following the main data so the 1991 # new size is easily derived. 1992 new_image_size = desc.tree_offset + desc.tree_size 1993 # If the image has FEC codes, also keep those. 1994 if desc.fec_offset > 0: 1995 fec_end = desc.fec_offset + desc.fec_size 1996 new_image_size = max(new_image_size, fec_end) 1997 break 1998 if not new_image_size: 1999 raise AvbError('Requested to keep hashtree but no hashtree ' 2000 'descriptor was found.') 2001 2002 # And cut... 2003 image.truncate(new_image_size) 2004 2005 def resize_image(self, image_filename, partition_size): 2006 """Implements the 'resize_image' command. 2007 2008 Arguments: 2009 image_filename: File with footer to resize. 2010 partition_size: The new size of the image. 2011 2012 Raises: 2013 AvbError: If there's no footer in the image. 2014 """ 2015 2016 image = ImageHandler(image_filename) 2017 2018 if partition_size % image.block_size != 0: 2019 raise AvbError('Partition size of {} is not a multiple of the image ' 2020 'block size {}.'.format(partition_size, 2021 image.block_size)) 2022 2023 (footer, vbmeta_header, descriptors, _) = self._parse_image(image) 2024 2025 if not footer: 2026 raise AvbError('Given image does not have a footer.') 2027 2028 # The vbmeta blob is always at the end of the data so resizing an 2029 # image amounts to just moving the footer around. 2030 2031 vbmeta_end_offset = footer.vbmeta_offset + footer.vbmeta_size 2032 if vbmeta_end_offset % image.block_size != 0: 2033 vbmeta_end_offset += image.block_size - (vbmeta_end_offset % image.block_size) 2034 2035 if partition_size < vbmeta_end_offset + 1*image.block_size: 2036 raise AvbError('Requested size of {} is too small for an image ' 2037 'of size {}.' 2038 .format(partition_size, 2039 vbmeta_end_offset + 1*image.block_size)) 2040 2041 # Cut at the end of the vbmeta blob and insert a DONT_CARE chunk 2042 # with enough bytes such that the final Footer block is at the end 2043 # of partition_size. 2044 image.truncate(vbmeta_end_offset) 2045 image.append_dont_care(partition_size - vbmeta_end_offset - 2046 1*image.block_size) 2047 2048 # Just reuse the same footer - only difference is that we're 2049 # writing it in a different place. 2050 footer_blob = footer.encode() 2051 footer_blob_with_padding = ('\0'*(image.block_size - AvbFooter.SIZE) + 2052 footer_blob) 2053 image.append_raw(footer_blob_with_padding) 2054 2055 def set_ab_metadata(self, misc_image, slot_data): 2056 """Implements the 'set_ab_metadata' command. 2057 2058 The |slot_data| argument must be of the form 'A_priority:A_tries_remaining: 2059 A_successful_boot:B_priority:B_tries_remaining:B_successful_boot'. 2060 2061 Arguments: 2062 misc_image: The misc image to write to. 2063 slot_data: Slot data as a string 2064 2065 Raises: 2066 AvbError: If slot data is malformed. 2067 """ 2068 tokens = slot_data.split(':') 2069 if len(tokens) != 6: 2070 raise AvbError('Malformed slot data "{}".'.format(slot_data)) 2071 a_priority = int(tokens[0]) 2072 a_tries_remaining = int(tokens[1]) 2073 a_success = True if int(tokens[2]) != 0 else False 2074 b_priority = int(tokens[3]) 2075 b_tries_remaining = int(tokens[4]) 2076 b_success = True if int(tokens[5]) != 0 else False 2077 2078 ab_data_no_crc = struct.pack(self.AB_FORMAT_NO_CRC, 2079 self.AB_MAGIC, 2080 self.AB_MAJOR_VERSION, self.AB_MINOR_VERSION, 2081 a_priority, a_tries_remaining, a_success, 2082 b_priority, b_tries_remaining, b_success) 2083 # Force CRC to be unsigned, see https://bugs.python.org/issue4903 for why. 2084 crc_value = binascii.crc32(ab_data_no_crc) & 0xffffffff 2085 ab_data = ab_data_no_crc + struct.pack('!I', crc_value) 2086 misc_image.seek(self.AB_MISC_METADATA_OFFSET) 2087 misc_image.write(ab_data) 2088 2089 def info_image(self, image_filename, output): 2090 """Implements the 'info_image' command. 2091 2092 Arguments: 2093 image_filename: Image file to get information from (file object). 2094 output: Output file to write human-readable information to (file object). 2095 """ 2096 2097 image = ImageHandler(image_filename) 2098 2099 o = output 2100 2101 (footer, header, descriptors, image_size) = self._parse_image(image) 2102 2103 if footer: 2104 o.write('Footer version: {}.{}\n'.format(footer.version_major, 2105 footer.version_minor)) 2106 o.write('Image size: {} bytes\n'.format(image_size)) 2107 o.write('Original image size: {} bytes\n'.format( 2108 footer.original_image_size)) 2109 o.write('VBMeta offset: {}\n'.format(footer.vbmeta_offset)) 2110 o.write('VBMeta size: {} bytes\n'.format(footer.vbmeta_size)) 2111 o.write('--\n') 2112 2113 (alg_name, _) = lookup_algorithm_by_type(header.algorithm_type) 2114 2115 o.write('Minimum libavb version: {}.{}{}\n'.format( 2116 header.required_libavb_version_major, 2117 header.required_libavb_version_minor, 2118 ' (Sparse)' if image.is_sparse else '')) 2119 o.write('Header Block: {} bytes\n'.format(AvbVBMetaHeader.SIZE)) 2120 o.write('Authentication Block: {} bytes\n'.format( 2121 header.authentication_data_block_size)) 2122 o.write('Auxiliary Block: {} bytes\n'.format( 2123 header.auxiliary_data_block_size)) 2124 o.write('Algorithm: {}\n'.format(alg_name)) 2125 o.write('Rollback Index: {}\n'.format(header.rollback_index)) 2126 o.write('Flags: {}\n'.format(header.flags)) 2127 o.write('Release String: \'{}\'\n'.format( 2128 header.release_string.rstrip('\0'))) 2129 2130 # Print descriptors. 2131 num_printed = 0 2132 o.write('Descriptors:\n') 2133 for desc in descriptors: 2134 desc.print_desc(o) 2135 num_printed += 1 2136 if num_printed == 0: 2137 o.write(' (none)\n') 2138 2139 def verify_image(self, image_filename, key_path, expected_chain_partitions): 2140 """Implements the 'verify_image' command. 2141 2142 Arguments: 2143 image_filename: Image file to get information from (file object). 2144 key_path: None or check that embedded public key matches key at given path. 2145 expected_chain_partitions: List of chain partitions to check or None. 2146 """ 2147 2148 expected_chain_partitions_map = {} 2149 if expected_chain_partitions: 2150 used_locations = {} 2151 for cp in expected_chain_partitions: 2152 cp_tokens = cp.split(':') 2153 if len(cp_tokens) != 3: 2154 raise AvbError('Malformed chained partition "{}".'.format(cp)) 2155 partition_name = cp_tokens[0] 2156 rollback_index_location = int(cp_tokens[1]) 2157 file_path = cp_tokens[2] 2158 pk_blob = open(file_path).read() 2159 expected_chain_partitions_map[partition_name] = (rollback_index_location, pk_blob) 2160 2161 image_dir = os.path.dirname(image_filename) 2162 image_ext = os.path.splitext(image_filename)[1] 2163 2164 key_blob = None 2165 if key_path: 2166 print 'Verifying image {} using key at {}'.format(image_filename, key_path) 2167 key_blob = encode_rsa_key(key_path) 2168 else: 2169 print 'Verifying image {} using embedded public key'.format(image_filename) 2170 2171 image = ImageHandler(image_filename) 2172 (footer, header, descriptors, image_size) = self._parse_image(image) 2173 offset = 0 2174 if footer: 2175 offset = footer.vbmeta_offset 2176 size = (header.SIZE + header.authentication_data_block_size + 2177 header.auxiliary_data_block_size) 2178 image.seek(offset) 2179 vbmeta_blob = image.read(size) 2180 h = AvbVBMetaHeader(vbmeta_blob[0:AvbVBMetaHeader.SIZE]) 2181 alg_name, _ = lookup_algorithm_by_type(header.algorithm_type) 2182 if not verify_vbmeta_signature(header, vbmeta_blob): 2183 raise AvbError('Signature check failed for {} vbmeta struct {}' 2184 .format(alg_name, image_filename)) 2185 2186 if key_blob: 2187 # The embedded public key is in the auxiliary block at an offset. 2188 key_offset = AvbVBMetaHeader.SIZE 2189 key_offset += h.authentication_data_block_size 2190 key_offset += h.public_key_offset 2191 key_blob_in_vbmeta = vbmeta_blob[key_offset:key_offset + h.public_key_size] 2192 if key_blob != key_blob_in_vbmeta: 2193 raise AvbError('Embedded public key does not match given key.') 2194 2195 if footer: 2196 print ('vbmeta: Successfully verified footer and {} vbmeta struct in {}' 2197 .format(alg_name, image_filename)) 2198 else: 2199 print ('vbmeta: Successfully verified {} vbmeta struct in {}' 2200 .format(alg_name, image_filename)) 2201 2202 for desc in descriptors: 2203 if not desc.verify(image_dir, image_ext, expected_chain_partitions_map): 2204 raise AvbError('Error verifying descriptor.') 2205 2206 2207 def _parse_image(self, image): 2208 """Gets information about an image. 2209 2210 The image can either be a vbmeta or an image with a footer. 2211 2212 Arguments: 2213 image: An ImageHandler (vbmeta or footer) with a hashtree descriptor. 2214 2215 Returns: 2216 A tuple where the first argument is a AvbFooter (None if there 2217 is no footer on the image), the second argument is a 2218 AvbVBMetaHeader, the third argument is a list of 2219 AvbDescriptor-derived instances, and the fourth argument is the 2220 size of |image|. 2221 """ 2222 assert isinstance(image, ImageHandler) 2223 footer = None 2224 image.seek(image.image_size - AvbFooter.SIZE) 2225 try: 2226 footer = AvbFooter(image.read(AvbFooter.SIZE)) 2227 except (LookupError, struct.error): 2228 # Nope, just seek back to the start. 2229 image.seek(0) 2230 2231 vbmeta_offset = 0 2232 if footer: 2233 vbmeta_offset = footer.vbmeta_offset 2234 2235 image.seek(vbmeta_offset) 2236 h = AvbVBMetaHeader(image.read(AvbVBMetaHeader.SIZE)) 2237 2238 auth_block_offset = vbmeta_offset + AvbVBMetaHeader.SIZE 2239 aux_block_offset = auth_block_offset + h.authentication_data_block_size 2240 desc_start_offset = aux_block_offset + h.descriptors_offset 2241 image.seek(desc_start_offset) 2242 descriptors = parse_descriptors(image.read(h.descriptors_size)) 2243 2244 return footer, h, descriptors, image.image_size 2245 2246 def _load_vbmeta_blob(self, image): 2247 """Gets the vbmeta struct and associated sections. 2248 2249 The image can either be a vbmeta.img or an image with a footer. 2250 2251 Arguments: 2252 image: An ImageHandler (vbmeta or footer). 2253 2254 Returns: 2255 A blob with the vbmeta struct and other sections. 2256 """ 2257 assert isinstance(image, ImageHandler) 2258 footer = None 2259 image.seek(image.image_size - AvbFooter.SIZE) 2260 try: 2261 footer = AvbFooter(image.read(AvbFooter.SIZE)) 2262 except (LookupError, struct.error): 2263 # Nope, just seek back to the start. 2264 image.seek(0) 2265 2266 vbmeta_offset = 0 2267 if footer: 2268 vbmeta_offset = footer.vbmeta_offset 2269 2270 image.seek(vbmeta_offset) 2271 h = AvbVBMetaHeader(image.read(AvbVBMetaHeader.SIZE)) 2272 2273 image.seek(vbmeta_offset) 2274 data_size = AvbVBMetaHeader.SIZE 2275 data_size += h.authentication_data_block_size 2276 data_size += h.auxiliary_data_block_size 2277 return image.read(data_size) 2278 2279 def _get_cmdline_descriptors_for_hashtree_descriptor(self, ht): 2280 """Generate kernel cmdline descriptors for dm-verity. 2281 2282 Arguments: 2283 ht: A AvbHashtreeDescriptor 2284 2285 Returns: 2286 A list with two AvbKernelCmdlineDescriptor with dm-verity kernel cmdline 2287 instructions. There is one for when hashtree is not disabled and one for 2288 when it is. 2289 2290 """ 2291 2292 c = 'dm="1 vroot none ro 1,' 2293 c += '0' # start 2294 c += ' {}'.format((ht.image_size / 512)) # size (# sectors) 2295 c += ' verity {}'.format(ht.dm_verity_version) # type and version 2296 c += ' PARTUUID=$(ANDROID_SYSTEM_PARTUUID)' # data_dev 2297 c += ' PARTUUID=$(ANDROID_SYSTEM_PARTUUID)' # hash_dev 2298 c += ' {}'.format(ht.data_block_size) # data_block 2299 c += ' {}'.format(ht.hash_block_size) # hash_block 2300 c += ' {}'.format(ht.image_size / ht.data_block_size) # #blocks 2301 c += ' {}'.format(ht.image_size / ht.data_block_size) # hash_offset 2302 c += ' {}'.format(ht.hash_algorithm) # hash_alg 2303 c += ' {}'.format(str(ht.root_digest).encode('hex')) # root_digest 2304 c += ' {}'.format(str(ht.salt).encode('hex')) # salt 2305 if ht.fec_num_roots > 0: 2306 c += ' 10' # number of optional args 2307 c += ' $(ANDROID_VERITY_MODE)' 2308 c += ' ignore_zero_blocks' 2309 c += ' use_fec_from_device PARTUUID=$(ANDROID_SYSTEM_PARTUUID)' 2310 c += ' fec_roots {}'.format(ht.fec_num_roots) 2311 # Note that fec_blocks is the size that FEC covers, *not* the 2312 # size of the FEC data. Since we use FEC for everything up until 2313 # the FEC data, it's the same as the offset. 2314 c += ' fec_blocks {}'.format(ht.fec_offset/ht.data_block_size) 2315 c += ' fec_start {}'.format(ht.fec_offset/ht.data_block_size) 2316 else: 2317 c += ' 2' # number of optional args 2318 c += ' $(ANDROID_VERITY_MODE)' 2319 c += ' ignore_zero_blocks' 2320 c += '" root=/dev/dm-0' 2321 2322 # Now that we have the command-line, generate the descriptor. 2323 desc = AvbKernelCmdlineDescriptor() 2324 desc.kernel_cmdline = c 2325 desc.flags = ( 2326 AvbKernelCmdlineDescriptor.FLAGS_USE_ONLY_IF_HASHTREE_NOT_DISABLED) 2327 2328 # The descriptor for when hashtree verification is disabled is a lot 2329 # simpler - we just set the root to the partition. 2330 desc_no_ht = AvbKernelCmdlineDescriptor() 2331 desc_no_ht.kernel_cmdline = 'root=PARTUUID=$(ANDROID_SYSTEM_PARTUUID)' 2332 desc_no_ht.flags = ( 2333 AvbKernelCmdlineDescriptor.FLAGS_USE_ONLY_IF_HASHTREE_DISABLED) 2334 2335 return [desc, desc_no_ht] 2336 2337 def _get_cmdline_descriptors_for_dm_verity(self, image): 2338 """Generate kernel cmdline descriptors for dm-verity. 2339 2340 Arguments: 2341 image: An ImageHandler (vbmeta or footer) with a hashtree descriptor. 2342 2343 Returns: 2344 A list with two AvbKernelCmdlineDescriptor with dm-verity kernel cmdline 2345 instructions. There is one for when hashtree is not disabled and one for 2346 when it is. 2347 2348 Raises: 2349 AvbError: If |image| doesn't have a hashtree descriptor. 2350 2351 """ 2352 2353 (_, _, descriptors, _) = self._parse_image(image) 2354 2355 ht = None 2356 for desc in descriptors: 2357 if isinstance(desc, AvbHashtreeDescriptor): 2358 ht = desc 2359 break 2360 2361 if not ht: 2362 raise AvbError('No hashtree descriptor in given image') 2363 2364 return self._get_cmdline_descriptors_for_hashtree_descriptor(ht) 2365 2366 def make_vbmeta_image(self, output, chain_partitions, algorithm_name, 2367 key_path, public_key_metadata_path, rollback_index, 2368 flags, props, props_from_file, kernel_cmdlines, 2369 setup_rootfs_from_kernel, 2370 include_descriptors_from_image, 2371 signing_helper, 2372 signing_helper_with_files, 2373 release_string, 2374 append_to_release_string, 2375 print_required_libavb_version, 2376 padding_size): 2377 """Implements the 'make_vbmeta_image' command. 2378 2379 Arguments: 2380 output: File to write the image to. 2381 chain_partitions: List of partitions to chain or None. 2382 algorithm_name: Name of algorithm to use. 2383 key_path: Path to key to use or None. 2384 public_key_metadata_path: Path to public key metadata or None. 2385 rollback_index: The rollback index to use. 2386 flags: Flags value to use in the image. 2387 props: Properties to insert (list of strings of the form 'key:value'). 2388 props_from_file: Properties to insert (list of strings 'key:<path>'). 2389 kernel_cmdlines: Kernel cmdlines to insert (list of strings). 2390 setup_rootfs_from_kernel: None or file to generate from. 2391 include_descriptors_from_image: List of file objects with descriptors. 2392 signing_helper: Program which signs a hash and return signature. 2393 signing_helper_with_files: Same as signing_helper but uses files instead. 2394 release_string: None or avbtool release string to use instead of default. 2395 append_to_release_string: None or string to append. 2396 print_required_libavb_version: True to only print required libavb version. 2397 padding_size: If not 0, pads output so size is a multiple of the number. 2398 2399 Raises: 2400 AvbError: If a chained partition is malformed. 2401 """ 2402 2403 # If we're asked to calculate minimum required libavb version, we're done. 2404 if print_required_libavb_version: 2405 if include_descriptors_from_image: 2406 # Use the bump logic in AvbVBMetaHeader to calculate the max required 2407 # version of all included descriptors. 2408 tmp_header = AvbVBMetaHeader() 2409 for image in include_descriptors_from_image: 2410 (_, image_header, _, _) = self._parse_image(ImageHandler(image.name)) 2411 tmp_header.bump_required_libavb_version_minor( 2412 image_header.required_libavb_version_minor) 2413 print '1.{}'.format(tmp_header.required_libavb_version_minor) 2414 else: 2415 # Descriptors aside, all vbmeta features are supported in 1.0. 2416 print '1.0' 2417 return 2418 2419 if not output: 2420 raise AvbError('No output file given') 2421 2422 descriptors = [] 2423 ht_desc_to_setup = None 2424 vbmeta_blob = self._generate_vbmeta_blob( 2425 algorithm_name, key_path, public_key_metadata_path, descriptors, 2426 chain_partitions, rollback_index, flags, props, props_from_file, 2427 kernel_cmdlines, setup_rootfs_from_kernel, ht_desc_to_setup, 2428 include_descriptors_from_image, signing_helper, 2429 signing_helper_with_files, release_string, 2430 append_to_release_string, 0) 2431 2432 # Write entire vbmeta blob (header, authentication, auxiliary). 2433 output.seek(0) 2434 output.write(vbmeta_blob) 2435 2436 if padding_size > 0: 2437 padded_size = round_to_multiple(len(vbmeta_blob), padding_size) 2438 padding_needed = padded_size - len(vbmeta_blob) 2439 output.write('\0' * padding_needed) 2440 2441 def _generate_vbmeta_blob(self, algorithm_name, key_path, 2442 public_key_metadata_path, descriptors, 2443 chain_partitions, 2444 rollback_index, flags, props, props_from_file, 2445 kernel_cmdlines, 2446 setup_rootfs_from_kernel, 2447 ht_desc_to_setup, 2448 include_descriptors_from_image, signing_helper, 2449 signing_helper_with_files, 2450 release_string, append_to_release_string, 2451 required_libavb_version_minor): 2452 """Generates a VBMeta blob. 2453 2454 This blob contains the header (struct AvbVBMetaHeader), the 2455 authentication data block (which contains the hash and signature 2456 for the header and auxiliary block), and the auxiliary block 2457 (which contains descriptors, the public key used, and other data). 2458 2459 The |key| parameter can |None| only if the |algorithm_name| is 2460 'NONE'. 2461 2462 Arguments: 2463 algorithm_name: The algorithm name as per the ALGORITHMS dict. 2464 key_path: The path to the .pem file used to sign the blob. 2465 public_key_metadata_path: Path to public key metadata or None. 2466 descriptors: A list of descriptors to insert or None. 2467 chain_partitions: List of partitions to chain or None. 2468 rollback_index: The rollback index to use. 2469 flags: Flags to use in the image. 2470 props: Properties to insert (List of strings of the form 'key:value'). 2471 props_from_file: Properties to insert (List of strings 'key:<path>'). 2472 kernel_cmdlines: Kernel cmdlines to insert (list of strings). 2473 setup_rootfs_from_kernel: None or file to generate 2474 dm-verity kernel cmdline from. 2475 ht_desc_to_setup: If not None, an AvbHashtreeDescriptor to 2476 generate dm-verity kernel cmdline descriptors from. 2477 include_descriptors_from_image: List of file objects for which 2478 to insert descriptors from. 2479 signing_helper: Program which signs a hash and return signature. 2480 signing_helper_with_files: Same as signing_helper but uses files instead. 2481 release_string: None or avbtool release string. 2482 append_to_release_string: None or string to append. 2483 required_libavb_version_minor: Use at least this required minor version. 2484 2485 Returns: 2486 A bytearray() with the VBMeta blob. 2487 2488 Raises: 2489 Exception: If the |algorithm_name| is not found, if no key has 2490 been given and the given algorithm requires one, or the key is 2491 of the wrong size. 2492 2493 """ 2494 try: 2495 alg = ALGORITHMS[algorithm_name] 2496 except KeyError: 2497 raise AvbError('Unknown algorithm with name {}'.format(algorithm_name)) 2498 2499 if not descriptors: 2500 descriptors = [] 2501 2502 h = AvbVBMetaHeader() 2503 h.bump_required_libavb_version_minor(required_libavb_version_minor) 2504 2505 # Insert chained partition descriptors, if any 2506 if chain_partitions: 2507 used_locations = {} 2508 for cp in chain_partitions: 2509 cp_tokens = cp.split(':') 2510 if len(cp_tokens) != 3: 2511 raise AvbError('Malformed chained partition "{}".'.format(cp)) 2512 partition_name = cp_tokens[0] 2513 rollback_index_location = int(cp_tokens[1]) 2514 file_path = cp_tokens[2] 2515 # Check that the same rollback location isn't being used by 2516 # multiple chained partitions. 2517 if used_locations.get(rollback_index_location): 2518 raise AvbError('Rollback Index Location {} is already in use.'.format( 2519 rollback_index_location)) 2520 used_locations[rollback_index_location] = True 2521 desc = AvbChainPartitionDescriptor() 2522 desc.partition_name = partition_name 2523 desc.rollback_index_location = rollback_index_location 2524 if desc.rollback_index_location < 1: 2525 raise AvbError('Rollback index location must be 1 or larger.') 2526 desc.public_key = open(file_path, 'rb').read() 2527 descriptors.append(desc) 2528 2529 # Descriptors. 2530 encoded_descriptors = bytearray() 2531 for desc in descriptors: 2532 encoded_descriptors.extend(desc.encode()) 2533 2534 # Add properties. 2535 if props: 2536 for prop in props: 2537 idx = prop.find(':') 2538 if idx == -1: 2539 raise AvbError('Malformed property "{}".'.format(prop)) 2540 desc = AvbPropertyDescriptor() 2541 desc.key = prop[0:idx] 2542 desc.value = prop[(idx + 1):] 2543 encoded_descriptors.extend(desc.encode()) 2544 if props_from_file: 2545 for prop in props_from_file: 2546 idx = prop.find(':') 2547 if idx == -1: 2548 raise AvbError('Malformed property "{}".'.format(prop)) 2549 desc = AvbPropertyDescriptor() 2550 desc.key = prop[0:idx] 2551 desc.value = prop[(idx + 1):] 2552 file_path = prop[(idx + 1):] 2553 desc.value = open(file_path, 'rb').read() 2554 encoded_descriptors.extend(desc.encode()) 2555 2556 # Add AvbKernelCmdline descriptor for dm-verity from an image, if requested. 2557 if setup_rootfs_from_kernel: 2558 image_handler = ImageHandler( 2559 setup_rootfs_from_kernel.name) 2560 cmdline_desc = self._get_cmdline_descriptors_for_dm_verity(image_handler) 2561 encoded_descriptors.extend(cmdline_desc[0].encode()) 2562 encoded_descriptors.extend(cmdline_desc[1].encode()) 2563 2564 # Add AvbKernelCmdline descriptor for dm-verity from desc, if requested. 2565 if ht_desc_to_setup: 2566 cmdline_desc = self._get_cmdline_descriptors_for_hashtree_descriptor( 2567 ht_desc_to_setup) 2568 encoded_descriptors.extend(cmdline_desc[0].encode()) 2569 encoded_descriptors.extend(cmdline_desc[1].encode()) 2570 2571 # Add kernel command-lines. 2572 if kernel_cmdlines: 2573 for i in kernel_cmdlines: 2574 desc = AvbKernelCmdlineDescriptor() 2575 desc.kernel_cmdline = i 2576 encoded_descriptors.extend(desc.encode()) 2577 2578 # Add descriptors from other images. 2579 if include_descriptors_from_image: 2580 for image in include_descriptors_from_image: 2581 image_handler = ImageHandler(image.name) 2582 (_, image_vbmeta_header, image_descriptors, _) = self._parse_image( 2583 image_handler) 2584 # Bump the required libavb version to support all included descriptors. 2585 h.bump_required_libavb_version_minor( 2586 image_vbmeta_header.required_libavb_version_minor) 2587 for desc in image_descriptors: 2588 encoded_descriptors.extend(desc.encode()) 2589 2590 # Load public key metadata blob, if requested. 2591 pkmd_blob = [] 2592 if public_key_metadata_path: 2593 with open(public_key_metadata_path) as f: 2594 pkmd_blob = f.read() 2595 2596 key = None 2597 encoded_key = bytearray() 2598 if alg.public_key_num_bytes > 0: 2599 if not key_path: 2600 raise AvbError('Key is required for algorithm {}'.format( 2601 algorithm_name)) 2602 encoded_key = encode_rsa_key(key_path) 2603 if len(encoded_key) != alg.public_key_num_bytes: 2604 raise AvbError('Key is wrong size for algorithm {}'.format( 2605 algorithm_name)) 2606 2607 # Override release string, if requested. 2608 if isinstance(release_string, (str, unicode)): 2609 h.release_string = release_string 2610 2611 # Append to release string, if requested. Also insert a space before. 2612 if isinstance(append_to_release_string, (str, unicode)): 2613 h.release_string += ' ' + append_to_release_string 2614 2615 # For the Auxiliary data block, descriptors are stored at offset 0, 2616 # followed by the public key, followed by the public key metadata blob. 2617 h.auxiliary_data_block_size = round_to_multiple( 2618 len(encoded_descriptors) + len(encoded_key) + len(pkmd_blob), 64) 2619 h.descriptors_offset = 0 2620 h.descriptors_size = len(encoded_descriptors) 2621 h.public_key_offset = h.descriptors_size 2622 h.public_key_size = len(encoded_key) 2623 h.public_key_metadata_offset = h.public_key_offset + h.public_key_size 2624 h.public_key_metadata_size = len(pkmd_blob) 2625 2626 # For the Authentication data block, the hash is first and then 2627 # the signature. 2628 h.authentication_data_block_size = round_to_multiple( 2629 alg.hash_num_bytes + alg.signature_num_bytes, 64) 2630 h.algorithm_type = alg.algorithm_type 2631 h.hash_offset = 0 2632 h.hash_size = alg.hash_num_bytes 2633 # Signature offset and size - it's stored right after the hash 2634 # (in Authentication data block). 2635 h.signature_offset = alg.hash_num_bytes 2636 h.signature_size = alg.signature_num_bytes 2637 2638 h.rollback_index = rollback_index 2639 h.flags = flags 2640 2641 # Generate Header data block. 2642 header_data_blob = h.encode() 2643 2644 # Generate Auxiliary data block. 2645 aux_data_blob = bytearray() 2646 aux_data_blob.extend(encoded_descriptors) 2647 aux_data_blob.extend(encoded_key) 2648 aux_data_blob.extend(pkmd_blob) 2649 padding_bytes = h.auxiliary_data_block_size - len(aux_data_blob) 2650 aux_data_blob.extend('\0' * padding_bytes) 2651 2652 # Calculate the hash. 2653 binary_hash = bytearray() 2654 binary_signature = bytearray() 2655 if algorithm_name != 'NONE': 2656 ha = hashlib.new(alg.hash_name) 2657 ha.update(header_data_blob) 2658 ha.update(aux_data_blob) 2659 binary_hash.extend(ha.digest()) 2660 2661 # Calculate the signature. 2662 padding_and_hash = str(bytearray(alg.padding)) + binary_hash 2663 binary_signature.extend(raw_sign(signing_helper, 2664 signing_helper_with_files, 2665 algorithm_name, 2666 alg.signature_num_bytes, key_path, 2667 padding_and_hash)) 2668 2669 # Generate Authentication data block. 2670 auth_data_blob = bytearray() 2671 auth_data_blob.extend(binary_hash) 2672 auth_data_blob.extend(binary_signature) 2673 padding_bytes = h.authentication_data_block_size - len(auth_data_blob) 2674 auth_data_blob.extend('\0' * padding_bytes) 2675 2676 return header_data_blob + auth_data_blob + aux_data_blob 2677 2678 def extract_public_key(self, key_path, output): 2679 """Implements the 'extract_public_key' command. 2680 2681 Arguments: 2682 key_path: The path to a RSA private key file. 2683 output: The file to write to. 2684 """ 2685 output.write(encode_rsa_key(key_path)) 2686 2687 def append_vbmeta_image(self, image_filename, vbmeta_image_filename, 2688 partition_size): 2689 """Implementation of the append_vbmeta_image command. 2690 2691 Arguments: 2692 image_filename: File to add the footer to. 2693 vbmeta_image_filename: File to get vbmeta struct from. 2694 partition_size: Size of partition. 2695 2696 Raises: 2697 AvbError: If an argument is incorrect. 2698 """ 2699 image = ImageHandler(image_filename) 2700 2701 if partition_size % image.block_size != 0: 2702 raise AvbError('Partition size of {} is not a multiple of the image ' 2703 'block size {}.'.format(partition_size, 2704 image.block_size)) 2705 2706 # If there's already a footer, truncate the image to its original 2707 # size. This way 'avbtool append_vbmeta_image' is idempotent. 2708 if image.image_size >= AvbFooter.SIZE: 2709 image.seek(image.image_size - AvbFooter.SIZE) 2710 try: 2711 footer = AvbFooter(image.read(AvbFooter.SIZE)) 2712 # Existing footer found. Just truncate. 2713 original_image_size = footer.original_image_size 2714 image.truncate(footer.original_image_size) 2715 except (LookupError, struct.error): 2716 original_image_size = image.image_size 2717 else: 2718 # Image size is too small to possibly contain a footer. 2719 original_image_size = image.image_size 2720 2721 # If anything goes wrong from here-on, restore the image back to 2722 # its original size. 2723 try: 2724 vbmeta_image_handler = ImageHandler(vbmeta_image_filename) 2725 vbmeta_blob = self._load_vbmeta_blob(vbmeta_image_handler) 2726 2727 # If the image isn't sparse, its size might not be a multiple of 2728 # the block size. This will screw up padding later so just grow it. 2729 if image.image_size % image.block_size != 0: 2730 assert not image.is_sparse 2731 padding_needed = image.block_size - (image.image_size%image.block_size) 2732 image.truncate(image.image_size + padding_needed) 2733 2734 # The append_raw() method requires content with size being a 2735 # multiple of |block_size| so add padding as needed. Also record 2736 # where this is written to since we'll need to put that in the 2737 # footer. 2738 vbmeta_offset = image.image_size 2739 padding_needed = (round_to_multiple(len(vbmeta_blob), image.block_size) - 2740 len(vbmeta_blob)) 2741 vbmeta_blob_with_padding = vbmeta_blob + '\0'*padding_needed 2742 2743 # Append vbmeta blob and footer 2744 image.append_raw(vbmeta_blob_with_padding) 2745 vbmeta_end_offset = vbmeta_offset + len(vbmeta_blob_with_padding) 2746 2747 # Now insert a DONT_CARE chunk with enough bytes such that the 2748 # final Footer block is at the end of partition_size.. 2749 image.append_dont_care(partition_size - vbmeta_end_offset - 2750 1*image.block_size) 2751 2752 # Generate the Footer that tells where the VBMeta footer 2753 # is. Also put enough padding in the front of the footer since 2754 # we'll write out an entire block. 2755 footer = AvbFooter() 2756 footer.original_image_size = original_image_size 2757 footer.vbmeta_offset = vbmeta_offset 2758 footer.vbmeta_size = len(vbmeta_blob) 2759 footer_blob = footer.encode() 2760 footer_blob_with_padding = ('\0'*(image.block_size - AvbFooter.SIZE) + 2761 footer_blob) 2762 image.append_raw(footer_blob_with_padding) 2763 2764 except: 2765 # Truncate back to original size, then re-raise 2766 image.truncate(original_image_size) 2767 raise 2768 2769 def add_hash_footer(self, image_filename, partition_size, partition_name, 2770 hash_algorithm, salt, chain_partitions, algorithm_name, 2771 key_path, 2772 public_key_metadata_path, rollback_index, flags, props, 2773 props_from_file, kernel_cmdlines, 2774 setup_rootfs_from_kernel, 2775 include_descriptors_from_image, calc_max_image_size, 2776 signing_helper, signing_helper_with_files, 2777 release_string, append_to_release_string, 2778 output_vbmeta_image, do_not_append_vbmeta_image, 2779 print_required_libavb_version, use_persistent_digest, 2780 do_not_use_ab): 2781 """Implementation of the add_hash_footer on unsparse images. 2782 2783 Arguments: 2784 image_filename: File to add the footer to. 2785 partition_size: Size of partition. 2786 partition_name: Name of partition (without A/B suffix). 2787 hash_algorithm: Hash algorithm to use. 2788 salt: Salt to use as a hexadecimal string or None to use /dev/urandom. 2789 chain_partitions: List of partitions to chain. 2790 algorithm_name: Name of algorithm to use. 2791 key_path: Path to key to use or None. 2792 public_key_metadata_path: Path to public key metadata or None. 2793 rollback_index: Rollback index. 2794 flags: Flags value to use in the image. 2795 props: Properties to insert (List of strings of the form 'key:value'). 2796 props_from_file: Properties to insert (List of strings 'key:<path>'). 2797 kernel_cmdlines: Kernel cmdlines to insert (list of strings). 2798 setup_rootfs_from_kernel: None or file to generate 2799 dm-verity kernel cmdline from. 2800 include_descriptors_from_image: List of file objects for which 2801 to insert descriptors from. 2802 calc_max_image_size: Don't store the footer - instead calculate the 2803 maximum image size leaving enough room for metadata with the 2804 given |partition_size|. 2805 signing_helper: Program which signs a hash and return signature. 2806 signing_helper_with_files: Same as signing_helper but uses files instead. 2807 release_string: None or avbtool release string. 2808 append_to_release_string: None or string to append. 2809 output_vbmeta_image: If not None, also write vbmeta struct to this file. 2810 do_not_append_vbmeta_image: If True, don't append vbmeta struct. 2811 print_required_libavb_version: True to only print required libavb version. 2812 use_persistent_digest: Use a persistent digest on device. 2813 do_not_use_ab: This partition does not use A/B. 2814 2815 Raises: 2816 AvbError: If an argument is incorrect. 2817 """ 2818 2819 required_libavb_version_minor = 0 2820 if use_persistent_digest or do_not_use_ab: 2821 required_libavb_version_minor = 1 2822 2823 # If we're asked to calculate minimum required libavb version, we're done. 2824 if print_required_libavb_version: 2825 print '1.{}'.format(required_libavb_version_minor) 2826 return 2827 2828 # First, calculate the maximum image size such that an image 2829 # this size + metadata (footer + vbmeta struct) fits in 2830 # |partition_size|. 2831 max_metadata_size = self.MAX_VBMETA_SIZE + self.MAX_FOOTER_SIZE 2832 if partition_size < max_metadata_size: 2833 raise AvbError('Parition size of {} is too small. ' 2834 'Needs to be at least {}'.format( 2835 partition_size, max_metadata_size)) 2836 max_image_size = partition_size - max_metadata_size 2837 2838 # If we're asked to only calculate the maximum image size, we're done. 2839 if calc_max_image_size: 2840 print '{}'.format(max_image_size) 2841 return 2842 2843 image = ImageHandler(image_filename) 2844 2845 if partition_size % image.block_size != 0: 2846 raise AvbError('Partition size of {} is not a multiple of the image ' 2847 'block size {}.'.format(partition_size, 2848 image.block_size)) 2849 2850 # If there's already a footer, truncate the image to its original 2851 # size. This way 'avbtool add_hash_footer' is idempotent (modulo 2852 # salts). 2853 if image.image_size >= AvbFooter.SIZE: 2854 image.seek(image.image_size - AvbFooter.SIZE) 2855 try: 2856 footer = AvbFooter(image.read(AvbFooter.SIZE)) 2857 # Existing footer found. Just truncate. 2858 original_image_size = footer.original_image_size 2859 image.truncate(footer.original_image_size) 2860 except (LookupError, struct.error): 2861 original_image_size = image.image_size 2862 else: 2863 # Image size is too small to possibly contain a footer. 2864 original_image_size = image.image_size 2865 2866 # If anything goes wrong from here-on, restore the image back to 2867 # its original size. 2868 try: 2869 # If image size exceeds the maximum image size, fail. 2870 if image.image_size > max_image_size: 2871 raise AvbError('Image size of {} exceeds maximum image ' 2872 'size of {} in order to fit in a partition ' 2873 'size of {}.'.format(image.image_size, max_image_size, 2874 partition_size)) 2875 2876 digest_size = len(hashlib.new(name=hash_algorithm).digest()) 2877 if salt: 2878 salt = salt.decode('hex') 2879 else: 2880 if salt is None: 2881 # If salt is not explicitly specified, choose a hash 2882 # that's the same size as the hash size. 2883 hash_size = digest_size 2884 salt = open('/dev/urandom').read(hash_size) 2885 else: 2886 salt = '' 2887 2888 hasher = hashlib.new(name=hash_algorithm, string=salt) 2889 # TODO(zeuthen): might want to read this in chunks to avoid 2890 # memory pressure, then again, this is only supposed to be used 2891 # on kernel/initramfs partitions. Possible optimization. 2892 image.seek(0) 2893 hasher.update(image.read(image.image_size)) 2894 digest = hasher.digest() 2895 2896 h_desc = AvbHashDescriptor() 2897 h_desc.image_size = image.image_size 2898 h_desc.hash_algorithm = hash_algorithm 2899 h_desc.partition_name = partition_name 2900 h_desc.salt = salt 2901 h_desc.flags = 0 2902 if do_not_use_ab: 2903 h_desc.flags |= 1 # AVB_HASH_DESCRIPTOR_FLAGS_DO_NOT_USE_AB 2904 if not use_persistent_digest: 2905 h_desc.digest = digest 2906 2907 # Generate the VBMeta footer. 2908 ht_desc_to_setup = None 2909 vbmeta_blob = self._generate_vbmeta_blob( 2910 algorithm_name, key_path, public_key_metadata_path, [h_desc], 2911 chain_partitions, rollback_index, flags, props, props_from_file, 2912 kernel_cmdlines, setup_rootfs_from_kernel, ht_desc_to_setup, 2913 include_descriptors_from_image, signing_helper, 2914 signing_helper_with_files, release_string, 2915 append_to_release_string, required_libavb_version_minor) 2916 2917 # Write vbmeta blob, if requested. 2918 if output_vbmeta_image: 2919 output_vbmeta_image.write(vbmeta_blob) 2920 2921 # Append vbmeta blob and footer, unless requested not to. 2922 if not do_not_append_vbmeta_image: 2923 # If the image isn't sparse, its size might not be a multiple of 2924 # the block size. This will screw up padding later so just grow it. 2925 if image.image_size % image.block_size != 0: 2926 assert not image.is_sparse 2927 padding_needed = image.block_size - ( 2928 image.image_size % image.block_size) 2929 image.truncate(image.image_size + padding_needed) 2930 2931 # The append_raw() method requires content with size being a 2932 # multiple of |block_size| so add padding as needed. Also record 2933 # where this is written to since we'll need to put that in the 2934 # footer. 2935 vbmeta_offset = image.image_size 2936 padding_needed = ( 2937 round_to_multiple(len(vbmeta_blob), image.block_size) - 2938 len(vbmeta_blob)) 2939 vbmeta_blob_with_padding = vbmeta_blob + '\0' * padding_needed 2940 2941 image.append_raw(vbmeta_blob_with_padding) 2942 vbmeta_end_offset = vbmeta_offset + len(vbmeta_blob_with_padding) 2943 2944 # Now insert a DONT_CARE chunk with enough bytes such that the 2945 # final Footer block is at the end of partition_size.. 2946 image.append_dont_care(partition_size - vbmeta_end_offset - 2947 1*image.block_size) 2948 2949 # Generate the Footer that tells where the VBMeta footer 2950 # is. Also put enough padding in the front of the footer since 2951 # we'll write out an entire block. 2952 footer = AvbFooter() 2953 footer.original_image_size = original_image_size 2954 footer.vbmeta_offset = vbmeta_offset 2955 footer.vbmeta_size = len(vbmeta_blob) 2956 footer_blob = footer.encode() 2957 footer_blob_with_padding = ('\0'*(image.block_size - AvbFooter.SIZE) + 2958 footer_blob) 2959 image.append_raw(footer_blob_with_padding) 2960 2961 except: 2962 # Truncate back to original size, then re-raise 2963 image.truncate(original_image_size) 2964 raise 2965 2966 def add_hashtree_footer(self, image_filename, partition_size, partition_name, 2967 generate_fec, fec_num_roots, hash_algorithm, 2968 block_size, salt, chain_partitions, algorithm_name, 2969 key_path, 2970 public_key_metadata_path, rollback_index, flags, 2971 props, props_from_file, kernel_cmdlines, 2972 setup_rootfs_from_kernel, 2973 setup_as_rootfs_from_kernel, 2974 include_descriptors_from_image, 2975 calc_max_image_size, signing_helper, 2976 signing_helper_with_files, 2977 release_string, append_to_release_string, 2978 output_vbmeta_image, do_not_append_vbmeta_image, 2979 print_required_libavb_version, 2980 use_persistent_root_digest, do_not_use_ab): 2981 """Implements the 'add_hashtree_footer' command. 2982 2983 See https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity for 2984 more information about dm-verity and these hashes. 2985 2986 Arguments: 2987 image_filename: File to add the footer to. 2988 partition_size: Size of partition. 2989 partition_name: Name of partition (without A/B suffix). 2990 generate_fec: If True, generate FEC codes. 2991 fec_num_roots: Number of roots for FEC. 2992 hash_algorithm: Hash algorithm to use. 2993 block_size: Block size to use. 2994 salt: Salt to use as a hexadecimal string or None to use /dev/urandom. 2995 chain_partitions: List of partitions to chain. 2996 algorithm_name: Name of algorithm to use. 2997 key_path: Path to key to use or None. 2998 public_key_metadata_path: Path to public key metadata or None. 2999 rollback_index: Rollback index. 3000 flags: Flags value to use in the image. 3001 props: Properties to insert (List of strings of the form 'key:value'). 3002 props_from_file: Properties to insert (List of strings 'key:<path>'). 3003 kernel_cmdlines: Kernel cmdlines to insert (list of strings). 3004 setup_rootfs_from_kernel: None or file to generate 3005 dm-verity kernel cmdline from. 3006 setup_as_rootfs_from_kernel: If True, generate dm-verity kernel 3007 cmdline to set up rootfs. 3008 include_descriptors_from_image: List of file objects for which 3009 to insert descriptors from. 3010 calc_max_image_size: Don't store the hashtree or footer - instead 3011 calculate the maximum image size leaving enough room for hashtree 3012 and metadata with the given |partition_size|. 3013 signing_helper: Program which signs a hash and return signature. 3014 signing_helper_with_files: Same as signing_helper but uses files instead. 3015 release_string: None or avbtool release string. 3016 append_to_release_string: None or string to append. 3017 output_vbmeta_image: If not None, also write vbmeta struct to this file. 3018 do_not_append_vbmeta_image: If True, don't append vbmeta struct. 3019 print_required_libavb_version: True to only print required libavb version. 3020 use_persistent_root_digest: Use a persistent root digest on device. 3021 do_not_use_ab: The partition does not use A/B. 3022 3023 Raises: 3024 AvbError: If an argument is incorrect. 3025 """ 3026 3027 required_libavb_version_minor = 0 3028 if use_persistent_root_digest or do_not_use_ab: 3029 required_libavb_version_minor = 1 3030 3031 # If we're asked to calculate minimum required libavb version, we're done. 3032 if print_required_libavb_version: 3033 print '1.{}'.format(required_libavb_version_minor) 3034 return 3035 3036 digest_size = len(hashlib.new(name=hash_algorithm).digest()) 3037 digest_padding = round_to_pow2(digest_size) - digest_size 3038 3039 # First, calculate the maximum image size such that an image 3040 # this size + the hashtree + metadata (footer + vbmeta struct) 3041 # fits in |partition_size|. We use very conservative figures for 3042 # metadata. 3043 (_, max_tree_size) = calc_hash_level_offsets( 3044 partition_size, block_size, digest_size + digest_padding) 3045 max_fec_size = 0 3046 if generate_fec: 3047 max_fec_size = calc_fec_data_size(partition_size, fec_num_roots) 3048 max_metadata_size = (max_fec_size + max_tree_size + 3049 self.MAX_VBMETA_SIZE + 3050 self.MAX_FOOTER_SIZE) 3051 max_image_size = partition_size - max_metadata_size 3052 3053 # If we're asked to only calculate the maximum image size, we're done. 3054 if calc_max_image_size: 3055 print '{}'.format(max_image_size) 3056 return 3057 3058 image = ImageHandler(image_filename) 3059 3060 if partition_size % image.block_size != 0: 3061 raise AvbError('Partition size of {} is not a multiple of the image ' 3062 'block size {}.'.format(partition_size, 3063 image.block_size)) 3064 3065 # If there's already a footer, truncate the image to its original 3066 # size. This way 'avbtool add_hashtree_footer' is idempotent 3067 # (modulo salts). 3068 if image.image_size >= AvbFooter.SIZE: 3069 image.seek(image.image_size - AvbFooter.SIZE) 3070 try: 3071 footer = AvbFooter(image.read(AvbFooter.SIZE)) 3072 # Existing footer found. Just truncate. 3073 original_image_size = footer.original_image_size 3074 image.truncate(footer.original_image_size) 3075 except (LookupError, struct.error): 3076 original_image_size = image.image_size 3077 else: 3078 # Image size is too small to possibly contain a footer. 3079 original_image_size = image.image_size 3080 3081 # If anything goes wrong from here-on, restore the image back to 3082 # its original size. 3083 try: 3084 # Ensure image is multiple of block_size. 3085 rounded_image_size = round_to_multiple(image.image_size, block_size) 3086 if rounded_image_size > image.image_size: 3087 image.append_raw('\0' * (rounded_image_size - image.image_size)) 3088 3089 # If image size exceeds the maximum image size, fail. 3090 if image.image_size > max_image_size: 3091 raise AvbError('Image size of {} exceeds maximum image ' 3092 'size of {} in order to fit in a partition ' 3093 'size of {}.'.format(image.image_size, max_image_size, 3094 partition_size)) 3095 3096 if salt: 3097 salt = salt.decode('hex') 3098 else: 3099 if salt is None: 3100 # If salt is not explicitly specified, choose a hash 3101 # that's the same size as the hash size. 3102 hash_size = digest_size 3103 salt = open('/dev/urandom').read(hash_size) 3104 else: 3105 salt = '' 3106 3107 # Hashes are stored upside down so we need to calculate hash 3108 # offsets in advance. 3109 (hash_level_offsets, tree_size) = calc_hash_level_offsets( 3110 image.image_size, block_size, digest_size + digest_padding) 3111 3112 # If the image isn't sparse, its size might not be a multiple of 3113 # the block size. This will screw up padding later so just grow it. 3114 if image.image_size % image.block_size != 0: 3115 assert not image.is_sparse 3116 padding_needed = image.block_size - (image.image_size%image.block_size) 3117 image.truncate(image.image_size + padding_needed) 3118 3119 # Generate the tree and add padding as needed. 3120 tree_offset = image.image_size 3121 root_digest, hash_tree = generate_hash_tree(image, image.image_size, 3122 block_size, 3123 hash_algorithm, salt, 3124 digest_padding, 3125 hash_level_offsets, 3126 tree_size) 3127 3128 # Generate HashtreeDescriptor with details about the tree we 3129 # just generated. 3130 ht_desc = AvbHashtreeDescriptor() 3131 ht_desc.dm_verity_version = 1 3132 ht_desc.image_size = image.image_size 3133 ht_desc.tree_offset = tree_offset 3134 ht_desc.tree_size = tree_size 3135 ht_desc.data_block_size = block_size 3136 ht_desc.hash_block_size = block_size 3137 ht_desc.hash_algorithm = hash_algorithm 3138 ht_desc.partition_name = partition_name 3139 ht_desc.salt = salt 3140 if do_not_use_ab: 3141 ht_desc.flags |= 1 # AVB_HASHTREE_DESCRIPTOR_FLAGS_DO_NOT_USE_AB 3142 if not use_persistent_root_digest: 3143 ht_desc.root_digest = root_digest 3144 3145 # Write the hash tree 3146 padding_needed = (round_to_multiple(len(hash_tree), image.block_size) - 3147 len(hash_tree)) 3148 hash_tree_with_padding = hash_tree + '\0'*padding_needed 3149 image.append_raw(hash_tree_with_padding) 3150 len_hashtree_and_fec = len(hash_tree_with_padding) 3151 3152 # Generate FEC codes, if requested. 3153 if generate_fec: 3154 fec_data = generate_fec_data(image_filename, fec_num_roots) 3155 padding_needed = (round_to_multiple(len(fec_data), image.block_size) - 3156 len(fec_data)) 3157 fec_data_with_padding = fec_data + '\0'*padding_needed 3158 fec_offset = image.image_size 3159 image.append_raw(fec_data_with_padding) 3160 len_hashtree_and_fec += len(fec_data_with_padding) 3161 # Update the hashtree descriptor. 3162 ht_desc.fec_num_roots = fec_num_roots 3163 ht_desc.fec_offset = fec_offset 3164 ht_desc.fec_size = len(fec_data) 3165 3166 ht_desc_to_setup = None 3167 if setup_as_rootfs_from_kernel: 3168 ht_desc_to_setup = ht_desc 3169 3170 # Generate the VBMeta footer and add padding as needed. 3171 vbmeta_offset = tree_offset + len_hashtree_and_fec 3172 vbmeta_blob = self._generate_vbmeta_blob( 3173 algorithm_name, key_path, public_key_metadata_path, [ht_desc], 3174 chain_partitions, rollback_index, flags, props, props_from_file, 3175 kernel_cmdlines, setup_rootfs_from_kernel, ht_desc_to_setup, 3176 include_descriptors_from_image, signing_helper, 3177 signing_helper_with_files, release_string, 3178 append_to_release_string, required_libavb_version_minor) 3179 padding_needed = (round_to_multiple(len(vbmeta_blob), image.block_size) - 3180 len(vbmeta_blob)) 3181 vbmeta_blob_with_padding = vbmeta_blob + '\0'*padding_needed 3182 3183 # Write vbmeta blob, if requested. 3184 if output_vbmeta_image: 3185 output_vbmeta_image.write(vbmeta_blob) 3186 3187 # Append vbmeta blob and footer, unless requested not to. 3188 if not do_not_append_vbmeta_image: 3189 image.append_raw(vbmeta_blob_with_padding) 3190 3191 # Now insert a DONT_CARE chunk with enough bytes such that the 3192 # final Footer block is at the end of partition_size.. 3193 image.append_dont_care(partition_size - image.image_size - 3194 1*image.block_size) 3195 3196 # Generate the Footer that tells where the VBMeta footer 3197 # is. Also put enough padding in the front of the footer since 3198 # we'll write out an entire block. 3199 footer = AvbFooter() 3200 footer.original_image_size = original_image_size 3201 footer.vbmeta_offset = vbmeta_offset 3202 footer.vbmeta_size = len(vbmeta_blob) 3203 footer_blob = footer.encode() 3204 footer_blob_with_padding = ('\0'*(image.block_size - AvbFooter.SIZE) + 3205 footer_blob) 3206 image.append_raw(footer_blob_with_padding) 3207 3208 except: 3209 # Truncate back to original size, then re-raise. 3210 image.truncate(original_image_size) 3211 raise 3212 3213 def make_atx_certificate(self, output, authority_key_path, subject_key_path, 3214 subject_key_version, subject, 3215 is_intermediate_authority, signing_helper, 3216 signing_helper_with_files): 3217 """Implements the 'make_atx_certificate' command. 3218 3219 Android Things certificates are required for Android Things public key 3220 metadata. They chain the vbmeta signing key for a particular product back to 3221 a fused, permanent root key. These certificates are fixed-length and fixed- 3222 format with the explicit goal of not parsing ASN.1 in bootloader code. 3223 3224 Arguments: 3225 output: Certificate will be written to this file on success. 3226 authority_key_path: A PEM file path with the authority private key. 3227 If None, then a certificate will be created without a 3228 signature. The signature can be created out-of-band 3229 and appended. 3230 subject_key_path: Path to a PEM or DER subject public key. 3231 subject_key_version: A 64-bit version value. If this is None, the number 3232 of seconds since the epoch is used. 3233 subject: A subject identifier. For Product Signing Key certificates this 3234 should be the same Product ID found in the permanent attributes. 3235 is_intermediate_authority: True if the certificate is for an intermediate 3236 authority. 3237 signing_helper: Program which signs a hash and returns the signature. 3238 signing_helper_with_files: Same as signing_helper but uses files instead. 3239 """ 3240 signed_data = bytearray() 3241 signed_data.extend(struct.pack('<I', 1)) # Format Version 3242 signed_data.extend(encode_rsa_key(subject_key_path)) 3243 hasher = hashlib.sha256() 3244 hasher.update(subject) 3245 signed_data.extend(hasher.digest()) 3246 usage = 'com.google.android.things.vboot' 3247 if is_intermediate_authority: 3248 usage += '.ca' 3249 hasher = hashlib.sha256() 3250 hasher.update(usage) 3251 signed_data.extend(hasher.digest()) 3252 if not subject_key_version: 3253 subject_key_version = int(time.time()) 3254 signed_data.extend(struct.pack('<Q', subject_key_version)) 3255 signature = bytearray() 3256 if authority_key_path: 3257 padding_and_hash = bytearray() 3258 algorithm_name = 'SHA512_RSA4096' 3259 alg = ALGORITHMS[algorithm_name] 3260 hasher = hashlib.sha512() 3261 padding_and_hash.extend(alg.padding) 3262 hasher.update(signed_data) 3263 padding_and_hash.extend(hasher.digest()) 3264 signature.extend(raw_sign(signing_helper, signing_helper_with_files, 3265 algorithm_name, 3266 alg.signature_num_bytes, authority_key_path, 3267 padding_and_hash)) 3268 output.write(signed_data) 3269 output.write(signature) 3270 3271 def make_atx_permanent_attributes(self, output, root_authority_key_path, 3272 product_id): 3273 """Implements the 'make_atx_permanent_attributes' command. 3274 3275 Android Things permanent attributes are designed to be permanent for a 3276 particular product and a hash of these attributes should be fused into 3277 hardware to enforce this. 3278 3279 Arguments: 3280 output: Attributes will be written to this file on success. 3281 root_authority_key_path: Path to a PEM or DER public key for 3282 the root authority. 3283 product_id: A 16-byte Product ID. 3284 3285 Raises: 3286 AvbError: If an argument is incorrect. 3287 """ 3288 EXPECTED_PRODUCT_ID_SIZE = 16 3289 if len(product_id) != EXPECTED_PRODUCT_ID_SIZE: 3290 raise AvbError('Invalid Product ID length.') 3291 output.write(struct.pack('<I', 1)) # Format Version 3292 output.write(encode_rsa_key(root_authority_key_path)) 3293 output.write(product_id) 3294 3295 def make_atx_metadata(self, output, intermediate_key_certificate, 3296 product_key_certificate): 3297 """Implements the 'make_atx_metadata' command. 3298 3299 Android Things metadata are included in vbmeta images to facilitate 3300 verification. The output of this command can be used as the 3301 public_key_metadata argument to other commands. 3302 3303 Arguments: 3304 output: Metadata will be written to this file on success. 3305 intermediate_key_certificate: A certificate file as output by 3306 make_atx_certificate with 3307 is_intermediate_authority set to true. 3308 product_key_certificate: A certificate file as output by 3309 make_atx_certificate with 3310 is_intermediate_authority set to false. 3311 3312 Raises: 3313 AvbError: If an argument is incorrect. 3314 """ 3315 EXPECTED_CERTIFICATE_SIZE = 1620 3316 if len(intermediate_key_certificate) != EXPECTED_CERTIFICATE_SIZE: 3317 raise AvbError('Invalid intermediate key certificate length.') 3318 if len(product_key_certificate) != EXPECTED_CERTIFICATE_SIZE: 3319 raise AvbError('Invalid product key certificate length.') 3320 output.write(struct.pack('<I', 1)) # Format Version 3321 output.write(intermediate_key_certificate) 3322 output.write(product_key_certificate) 3323 3324 3325 def calc_hash_level_offsets(image_size, block_size, digest_size): 3326 """Calculate the offsets of all the hash-levels in a Merkle-tree. 3327 3328 Arguments: 3329 image_size: The size of the image to calculate a Merkle-tree for. 3330 block_size: The block size, e.g. 4096. 3331 digest_size: The size of each hash, e.g. 32 for SHA-256. 3332 3333 Returns: 3334 A tuple where the first argument is an array of offsets and the 3335 second is size of the tree, in bytes. 3336 """ 3337 level_offsets = [] 3338 level_sizes = [] 3339 tree_size = 0 3340 3341 num_levels = 0 3342 size = image_size 3343 while size > block_size: 3344 num_blocks = (size + block_size - 1) / block_size 3345 level_size = round_to_multiple(num_blocks * digest_size, block_size) 3346 3347 level_sizes.append(level_size) 3348 tree_size += level_size 3349 num_levels += 1 3350 3351 size = level_size 3352 3353 for n in range(0, num_levels): 3354 offset = 0 3355 for m in range(n + 1, num_levels): 3356 offset += level_sizes[m] 3357 level_offsets.append(offset) 3358 3359 return level_offsets, tree_size 3360 3361 3362 # See system/extras/libfec/include/fec/io.h for these definitions. 3363 FEC_FOOTER_FORMAT = '<LLLLLQ32s' 3364 FEC_MAGIC = 0xfecfecfe 3365 3366 3367 def calc_fec_data_size(image_size, num_roots): 3368 """Calculates how much space FEC data will take. 3369 3370 Args: 3371 image_size: The size of the image. 3372 num_roots: Number of roots. 3373 3374 Returns: 3375 The number of bytes needed for FEC for an image of the given size 3376 and with the requested number of FEC roots. 3377 3378 Raises: 3379 ValueError: If output from the 'fec' tool is invalid. 3380 3381 """ 3382 p = subprocess.Popen( 3383 ['fec', '--print-fec-size', str(image_size), '--roots', str(num_roots)], 3384 stdout=subprocess.PIPE, 3385 stderr=subprocess.PIPE) 3386 (pout, perr) = p.communicate() 3387 retcode = p.wait() 3388 if retcode != 0: 3389 raise ValueError('Error invoking fec: {}'.format(perr)) 3390 return int(pout) 3391 3392 3393 def generate_fec_data(image_filename, num_roots): 3394 """Generate FEC codes for an image. 3395 3396 Args: 3397 image_filename: The filename of the image. 3398 num_roots: Number of roots. 3399 3400 Returns: 3401 The FEC data blob. 3402 3403 Raises: 3404 ValueError: If output from the 'fec' tool is invalid. 3405 """ 3406 fec_tmpfile = tempfile.NamedTemporaryFile() 3407 subprocess.check_call( 3408 ['fec', '--encode', '--roots', str(num_roots), image_filename, 3409 fec_tmpfile.name], 3410 stderr=open(os.devnull)) 3411 fec_data = fec_tmpfile.read() 3412 footer_size = struct.calcsize(FEC_FOOTER_FORMAT) 3413 footer_data = fec_data[-footer_size:] 3414 (magic, _, _, num_roots, fec_size, _, _) = struct.unpack(FEC_FOOTER_FORMAT, 3415 footer_data) 3416 if magic != FEC_MAGIC: 3417 raise ValueError('Unexpected magic in FEC footer') 3418 return fec_data[0:fec_size] 3419 3420 3421 def generate_hash_tree(image, image_size, block_size, hash_alg_name, salt, 3422 digest_padding, hash_level_offsets, tree_size): 3423 """Generates a Merkle-tree for a file. 3424 3425 Args: 3426 image: The image, as a file. 3427 image_size: The size of the image. 3428 block_size: The block size, e.g. 4096. 3429 hash_alg_name: The hash algorithm, e.g. 'sha256' or 'sha1'. 3430 salt: The salt to use. 3431 digest_padding: The padding for each digest. 3432 hash_level_offsets: The offsets from calc_hash_level_offsets(). 3433 tree_size: The size of the tree, in number of bytes. 3434 3435 Returns: 3436 A tuple where the first element is the top-level hash and the 3437 second element is the hash-tree. 3438 """ 3439 hash_ret = bytearray(tree_size) 3440 hash_src_offset = 0 3441 hash_src_size = image_size 3442 level_num = 0 3443 while hash_src_size > block_size: 3444 level_output = '' 3445 remaining = hash_src_size 3446 while remaining > 0: 3447 hasher = hashlib.new(name=hash_alg_name, string=salt) 3448 # Only read from the file for the first level - for subsequent 3449 # levels, access the array we're building. 3450 if level_num == 0: 3451 image.seek(hash_src_offset + hash_src_size - remaining) 3452 data = image.read(min(remaining, block_size)) 3453 else: 3454 offset = hash_level_offsets[level_num - 1] + hash_src_size - remaining 3455 data = hash_ret[offset:offset + block_size] 3456 hasher.update(data) 3457 3458 remaining -= len(data) 3459 if len(data) < block_size: 3460 hasher.update('\0' * (block_size - len(data))) 3461 level_output += hasher.digest() 3462 if digest_padding > 0: 3463 level_output += '\0' * digest_padding 3464 3465 padding_needed = (round_to_multiple( 3466 len(level_output), block_size) - len(level_output)) 3467 level_output += '\0' * padding_needed 3468 3469 # Copy level-output into resulting tree. 3470 offset = hash_level_offsets[level_num] 3471 hash_ret[offset:offset + len(level_output)] = level_output 3472 3473 # Continue on to the next level. 3474 hash_src_size = len(level_output) 3475 level_num += 1 3476 3477 hasher = hashlib.new(name=hash_alg_name, string=salt) 3478 hasher.update(level_output) 3479 return hasher.digest(), hash_ret 3480 3481 3482 class AvbTool(object): 3483 """Object for avbtool command-line tool.""" 3484 3485 def __init__(self): 3486 """Initializer method.""" 3487 self.avb = Avb() 3488 3489 def _add_common_args(self, sub_parser): 3490 """Adds arguments used by several sub-commands. 3491 3492 Arguments: 3493 sub_parser: The parser to add arguments to. 3494 """ 3495 sub_parser.add_argument('--algorithm', 3496 help='Algorithm to use (default: NONE)', 3497 metavar='ALGORITHM', 3498 default='NONE') 3499 sub_parser.add_argument('--key', 3500 help='Path to RSA private key file', 3501 metavar='KEY', 3502 required=False) 3503 sub_parser.add_argument('--signing_helper', 3504 help='Path to helper used for signing', 3505 metavar='APP', 3506 default=None, 3507 required=False) 3508 sub_parser.add_argument('--signing_helper_with_files', 3509 help='Path to helper used for signing using files', 3510 metavar='APP', 3511 default=None, 3512 required=False) 3513 sub_parser.add_argument('--public_key_metadata', 3514 help='Path to public key metadata file', 3515 metavar='KEY_METADATA', 3516 required=False) 3517 sub_parser.add_argument('--rollback_index', 3518 help='Rollback Index', 3519 type=parse_number, 3520 default=0) 3521 # This is used internally for unit tests. Do not include in --help output. 3522 sub_parser.add_argument('--internal_release_string', 3523 help=argparse.SUPPRESS) 3524 sub_parser.add_argument('--append_to_release_string', 3525 help='Text to append to release string', 3526 metavar='STR') 3527 sub_parser.add_argument('--prop', 3528 help='Add property', 3529 metavar='KEY:VALUE', 3530 action='append') 3531 sub_parser.add_argument('--prop_from_file', 3532 help='Add property from file', 3533 metavar='KEY:PATH', 3534 action='append') 3535 sub_parser.add_argument('--kernel_cmdline', 3536 help='Add kernel cmdline', 3537 metavar='CMDLINE', 3538 action='append') 3539 # TODO(zeuthen): the --setup_rootfs_from_kernel option used to be called 3540 # --generate_dm_verity_cmdline_from_hashtree. Remove support for the latter 3541 # at some future point. 3542 sub_parser.add_argument('--setup_rootfs_from_kernel', 3543 '--generate_dm_verity_cmdline_from_hashtree', 3544 metavar='IMAGE', 3545 help='Adds kernel cmdline to set up IMAGE', 3546 type=argparse.FileType('rb')) 3547 sub_parser.add_argument('--include_descriptors_from_image', 3548 help='Include descriptors from image', 3549 metavar='IMAGE', 3550 action='append', 3551 type=argparse.FileType('rb')) 3552 sub_parser.add_argument('--print_required_libavb_version', 3553 help=('Don\'t store the footer - ' 3554 'instead calculate the required libavb ' 3555 'version for the given options.'), 3556 action='store_true') 3557 # These are only allowed from top-level vbmeta and boot-in-lieu-of-vbmeta. 3558 sub_parser.add_argument('--chain_partition', 3559 help='Allow signed integrity-data for partition', 3560 metavar='PART_NAME:ROLLBACK_SLOT:KEY_PATH', 3561 action='append') 3562 sub_parser.add_argument('--flags', 3563 help='VBMeta flags', 3564 type=parse_number, 3565 default=0) 3566 sub_parser.add_argument('--set_hashtree_disabled_flag', 3567 help='Set the HASHTREE_DISABLED flag', 3568 action='store_true') 3569 3570 def _add_common_footer_args(self, sub_parser): 3571 """Adds arguments used by add_*_footer sub-commands. 3572 3573 Arguments: 3574 sub_parser: The parser to add arguments to. 3575 """ 3576 sub_parser.add_argument('--use_persistent_digest', 3577 help='Use a persistent digest on device instead of ' 3578 'storing the digest in the descriptor. This ' 3579 'cannot be used with A/B so must be combined ' 3580 'with --do_not_use_ab when an A/B suffix is ' 3581 'expected at runtime.', 3582 action='store_true') 3583 sub_parser.add_argument('--do_not_use_ab', 3584 help='The partition does not use A/B even when an ' 3585 'A/B suffix is present. This must not be used ' 3586 'for vbmeta or chained partitions.', 3587 action='store_true') 3588 3589 def _fixup_common_args(self, args): 3590 """Common fixups needed by subcommands. 3591 3592 Arguments: 3593 args: Arguments to modify. 3594 3595 Returns: 3596 The modified arguments. 3597 """ 3598 if args.set_hashtree_disabled_flag: 3599 args.flags |= AVB_VBMETA_IMAGE_FLAGS_HASHTREE_DISABLED 3600 return args 3601 3602 def run(self, argv): 3603 """Command-line processor. 3604 3605 Arguments: 3606 argv: Pass sys.argv from main. 3607 """ 3608 parser = argparse.ArgumentParser() 3609 subparsers = parser.add_subparsers(title='subcommands') 3610 3611 sub_parser = subparsers.add_parser('version', 3612 help='Prints version of avbtool.') 3613 sub_parser.set_defaults(func=self.version) 3614 3615 sub_parser = subparsers.add_parser('extract_public_key', 3616 help='Extract public key.') 3617 sub_parser.add_argument('--key', 3618 help='Path to RSA private key file', 3619 required=True) 3620 sub_parser.add_argument('--output', 3621 help='Output file name', 3622 type=argparse.FileType('wb'), 3623 required=True) 3624 sub_parser.set_defaults(func=self.extract_public_key) 3625 3626 sub_parser = subparsers.add_parser('make_vbmeta_image', 3627 help='Makes a vbmeta image.') 3628 sub_parser.add_argument('--output', 3629 help='Output file name', 3630 type=argparse.FileType('wb')) 3631 sub_parser.add_argument('--padding_size', 3632 metavar='NUMBER', 3633 help='If non-zero, pads output with NUL bytes so ' 3634 'its size is a multiple of NUMBER (default: 0)', 3635 type=parse_number, 3636 default=0) 3637 self._add_common_args(sub_parser) 3638 sub_parser.set_defaults(func=self.make_vbmeta_image) 3639 3640 sub_parser = subparsers.add_parser('add_hash_footer', 3641 help='Add hashes and footer to image.') 3642 sub_parser.add_argument('--image', 3643 help='Image to add hashes to', 3644 type=argparse.FileType('rab+')) 3645 sub_parser.add_argument('--partition_size', 3646 help='Partition size', 3647 type=parse_number) 3648 sub_parser.add_argument('--partition_name', 3649 help='Partition name', 3650 default=None) 3651 sub_parser.add_argument('--hash_algorithm', 3652 help='Hash algorithm to use (default: sha256)', 3653 default='sha256') 3654 sub_parser.add_argument('--salt', 3655 help='Salt in hex (default: /dev/urandom)') 3656 sub_parser.add_argument('--calc_max_image_size', 3657 help=('Don\'t store the footer - ' 3658 'instead calculate the maximum image size ' 3659 'leaving enough room for metadata with ' 3660 'the given partition size.'), 3661 action='store_true') 3662 sub_parser.add_argument('--output_vbmeta_image', 3663 help='Also write vbmeta struct to file', 3664 type=argparse.FileType('wb')) 3665 sub_parser.add_argument('--do_not_append_vbmeta_image', 3666 help=('Do not append vbmeta struct or footer ' 3667 'to the image'), 3668 action='store_true') 3669 self._add_common_args(sub_parser) 3670 self._add_common_footer_args(sub_parser) 3671 sub_parser.set_defaults(func=self.add_hash_footer) 3672 3673 sub_parser = subparsers.add_parser('append_vbmeta_image', 3674 help='Append vbmeta image to image.') 3675 sub_parser.add_argument('--image', 3676 help='Image to append vbmeta blob to', 3677 type=argparse.FileType('rab+')) 3678 sub_parser.add_argument('--partition_size', 3679 help='Partition size', 3680 type=parse_number, 3681 required=True) 3682 sub_parser.add_argument('--vbmeta_image', 3683 help='Image with vbmeta blob to append', 3684 type=argparse.FileType('rb')) 3685 sub_parser.set_defaults(func=self.append_vbmeta_image) 3686 3687 sub_parser = subparsers.add_parser('add_hashtree_footer', 3688 help='Add hashtree and footer to image.') 3689 sub_parser.add_argument('--image', 3690 help='Image to add hashtree to', 3691 type=argparse.FileType('rab+')) 3692 sub_parser.add_argument('--partition_size', 3693 help='Partition size', 3694 type=parse_number) 3695 sub_parser.add_argument('--partition_name', 3696 help='Partition name', 3697 default=None) 3698 sub_parser.add_argument('--hash_algorithm', 3699 help='Hash algorithm to use (default: sha1)', 3700 default='sha1') 3701 sub_parser.add_argument('--salt', 3702 help='Salt in hex (default: /dev/urandom)') 3703 sub_parser.add_argument('--block_size', 3704 help='Block size (default: 4096)', 3705 type=parse_number, 3706 default=4096) 3707 # TODO(zeuthen): The --generate_fec option was removed when we 3708 # moved to generating FEC by default. To avoid breaking existing 3709 # users needing to transition we simply just print a warning below 3710 # in add_hashtree_footer(). Remove this option and the warning at 3711 # some point in the future. 3712 sub_parser.add_argument('--generate_fec', 3713 help=argparse.SUPPRESS, 3714 action='store_true') 3715 sub_parser.add_argument('--do_not_generate_fec', 3716 help='Do not generate forward-error-correction codes', 3717 action='store_true') 3718 sub_parser.add_argument('--fec_num_roots', 3719 help='Number of roots for FEC (default: 2)', 3720 type=parse_number, 3721 default=2) 3722 sub_parser.add_argument('--calc_max_image_size', 3723 help=('Don\'t store the hashtree or footer - ' 3724 'instead calculate the maximum image size ' 3725 'leaving enough room for hashtree ' 3726 'and metadata with the given partition ' 3727 'size.'), 3728 action='store_true') 3729 sub_parser.add_argument('--output_vbmeta_image', 3730 help='Also write vbmeta struct to file', 3731 type=argparse.FileType('wb')) 3732 sub_parser.add_argument('--do_not_append_vbmeta_image', 3733 help=('Do not append vbmeta struct or footer ' 3734 'to the image'), 3735 action='store_true') 3736 # This is different from --setup_rootfs_from_kernel insofar that 3737 # it doesn't take an IMAGE, the generated cmdline will be for the 3738 # hashtree we're adding. 3739 sub_parser.add_argument('--setup_as_rootfs_from_kernel', 3740 action='store_true', 3741 help='Adds kernel cmdline for setting up rootfs') 3742 self._add_common_args(sub_parser) 3743 self._add_common_footer_args(sub_parser) 3744 sub_parser.set_defaults(func=self.add_hashtree_footer) 3745 3746 sub_parser = subparsers.add_parser('erase_footer', 3747 help='Erase footer from an image.') 3748 sub_parser.add_argument('--image', 3749 help='Image with a footer', 3750 type=argparse.FileType('rwb+'), 3751 required=True) 3752 sub_parser.add_argument('--keep_hashtree', 3753 help='Keep the hashtree and FEC in the image', 3754 action='store_true') 3755 sub_parser.set_defaults(func=self.erase_footer) 3756 3757 sub_parser = subparsers.add_parser('resize_image', 3758 help='Resize image with a footer.') 3759 sub_parser.add_argument('--image', 3760 help='Image with a footer', 3761 type=argparse.FileType('rwb+'), 3762 required=True) 3763 sub_parser.add_argument('--partition_size', 3764 help='New partition size', 3765 type=parse_number) 3766 sub_parser.set_defaults(func=self.resize_image) 3767 3768 sub_parser = subparsers.add_parser( 3769 'info_image', 3770 help='Show information about vbmeta or footer.') 3771 sub_parser.add_argument('--image', 3772 help='Image to show information about', 3773 type=argparse.FileType('rb'), 3774 required=True) 3775 sub_parser.add_argument('--output', 3776 help='Write info to file', 3777 type=argparse.FileType('wt'), 3778 default=sys.stdout) 3779 sub_parser.set_defaults(func=self.info_image) 3780 3781 sub_parser = subparsers.add_parser( 3782 'verify_image', 3783 help='Verify an image.') 3784 sub_parser.add_argument('--image', 3785 help='Image to verify', 3786 type=argparse.FileType('rb'), 3787 required=True) 3788 sub_parser.add_argument('--key', 3789 help='Check embedded public key matches KEY', 3790 metavar='KEY', 3791 required=False) 3792 sub_parser.add_argument('--expected_chain_partition', 3793 help='Expected chain partition', 3794 metavar='PART_NAME:ROLLBACK_SLOT:KEY_PATH', 3795 action='append') 3796 sub_parser.set_defaults(func=self.verify_image) 3797 3798 sub_parser = subparsers.add_parser('set_ab_metadata', 3799 help='Set A/B metadata.') 3800 sub_parser.add_argument('--misc_image', 3801 help=('The misc image to modify. If the image does ' 3802 'not exist, it will be created.'), 3803 type=argparse.FileType('r+b'), 3804 required=True) 3805 sub_parser.add_argument('--slot_data', 3806 help=('Slot data of the form "priority", ' 3807 '"tries_remaining", "sucessful_boot" for ' 3808 'slot A followed by the same for slot B, ' 3809 'separated by colons. The default value ' 3810 'is 15:7:0:14:7:0.'), 3811 default='15:7:0:14:7:0') 3812 sub_parser.set_defaults(func=self.set_ab_metadata) 3813 3814 sub_parser = subparsers.add_parser( 3815 'make_atx_certificate', 3816 help='Create an Android Things eXtension (ATX) certificate.') 3817 sub_parser.add_argument('--output', 3818 help='Write certificate to file', 3819 type=argparse.FileType('wb'), 3820 default=sys.stdout) 3821 sub_parser.add_argument('--subject', 3822 help=('Path to subject file'), 3823 type=argparse.FileType('rb'), 3824 required=True) 3825 sub_parser.add_argument('--subject_key', 3826 help=('Path to subject RSA public key file'), 3827 type=argparse.FileType('rb'), 3828 required=True) 3829 sub_parser.add_argument('--subject_key_version', 3830 help=('Version of the subject key'), 3831 type=parse_number, 3832 required=False) 3833 sub_parser.add_argument('--subject_is_intermediate_authority', 3834 help=('Generate an intermediate authority ' 3835 'certificate'), 3836 action='store_true') 3837 sub_parser.add_argument('--authority_key', 3838 help='Path to authority RSA private key file', 3839 required=False) 3840 sub_parser.add_argument('--signing_helper', 3841 help='Path to helper used for signing', 3842 metavar='APP', 3843 default=None, 3844 required=False) 3845 sub_parser.add_argument('--signing_helper_with_files', 3846 help='Path to helper used for signing using files', 3847 metavar='APP', 3848 default=None, 3849 required=False) 3850 sub_parser.set_defaults(func=self.make_atx_certificate) 3851 3852 sub_parser = subparsers.add_parser( 3853 'make_atx_permanent_attributes', 3854 help='Create Android Things eXtension (ATX) permanent attributes.') 3855 sub_parser.add_argument('--output', 3856 help='Write attributes to file', 3857 type=argparse.FileType('wb'), 3858 default=sys.stdout) 3859 sub_parser.add_argument('--root_authority_key', 3860 help='Path to authority RSA public key file', 3861 type=argparse.FileType('rb'), 3862 required=True) 3863 sub_parser.add_argument('--product_id', 3864 help=('Path to Product ID file'), 3865 type=argparse.FileType('rb'), 3866 required=True) 3867 sub_parser.set_defaults(func=self.make_atx_permanent_attributes) 3868 3869 sub_parser = subparsers.add_parser( 3870 'make_atx_metadata', 3871 help='Create Android Things eXtension (ATX) metadata.') 3872 sub_parser.add_argument('--output', 3873 help='Write metadata to file', 3874 type=argparse.FileType('wb'), 3875 default=sys.stdout) 3876 sub_parser.add_argument('--intermediate_key_certificate', 3877 help='Path to intermediate key certificate file', 3878 type=argparse.FileType('rb'), 3879 required=True) 3880 sub_parser.add_argument('--product_key_certificate', 3881 help='Path to product key certificate file', 3882 type=argparse.FileType('rb'), 3883 required=True) 3884 sub_parser.set_defaults(func=self.make_atx_metadata) 3885 3886 args = parser.parse_args(argv[1:]) 3887 try: 3888 args.func(args) 3889 except AvbError as e: 3890 sys.stderr.write('{}: {}\n'.format(argv[0], e.message)) 3891 sys.exit(1) 3892 3893 def version(self, _): 3894 """Implements the 'version' sub-command.""" 3895 print get_release_string() 3896 3897 def extract_public_key(self, args): 3898 """Implements the 'extract_public_key' sub-command.""" 3899 self.avb.extract_public_key(args.key, args.output) 3900 3901 def make_vbmeta_image(self, args): 3902 """Implements the 'make_vbmeta_image' sub-command.""" 3903 args = self._fixup_common_args(args) 3904 self.avb.make_vbmeta_image(args.output, args.chain_partition, 3905 args.algorithm, args.key, 3906 args.public_key_metadata, args.rollback_index, 3907 args.flags, args.prop, args.prop_from_file, 3908 args.kernel_cmdline, 3909 args.setup_rootfs_from_kernel, 3910 args.include_descriptors_from_image, 3911 args.signing_helper, 3912 args.signing_helper_with_files, 3913 args.internal_release_string, 3914 args.append_to_release_string, 3915 args.print_required_libavb_version, 3916 args.padding_size) 3917 3918 def append_vbmeta_image(self, args): 3919 """Implements the 'append_vbmeta_image' sub-command.""" 3920 self.avb.append_vbmeta_image(args.image.name, args.vbmeta_image.name, 3921 args.partition_size) 3922 3923 def add_hash_footer(self, args): 3924 """Implements the 'add_hash_footer' sub-command.""" 3925 args = self._fixup_common_args(args) 3926 self.avb.add_hash_footer(args.image.name if args.image else None, 3927 args.partition_size, 3928 args.partition_name, args.hash_algorithm, 3929 args.salt, args.chain_partition, args.algorithm, 3930 args.key, 3931 args.public_key_metadata, args.rollback_index, 3932 args.flags, args.prop, args.prop_from_file, 3933 args.kernel_cmdline, 3934 args.setup_rootfs_from_kernel, 3935 args.include_descriptors_from_image, 3936 args.calc_max_image_size, 3937 args.signing_helper, 3938 args.signing_helper_with_files, 3939 args.internal_release_string, 3940 args.append_to_release_string, 3941 args.output_vbmeta_image, 3942 args.do_not_append_vbmeta_image, 3943 args.print_required_libavb_version, 3944 args.use_persistent_digest, 3945 args.do_not_use_ab) 3946 3947 def add_hashtree_footer(self, args): 3948 """Implements the 'add_hashtree_footer' sub-command.""" 3949 args = self._fixup_common_args(args) 3950 # TODO(zeuthen): Remove when removing support for the 3951 # '--generate_fec' option above. 3952 if args.generate_fec: 3953 sys.stderr.write('The --generate_fec option is deprecated since FEC ' 3954 'is now generated by default. Use the option ' 3955 '--do_not_generate_fec to not generate FEC.\n') 3956 self.avb.add_hashtree_footer(args.image.name if args.image else None, 3957 args.partition_size, 3958 args.partition_name, 3959 not args.do_not_generate_fec, args.fec_num_roots, 3960 args.hash_algorithm, args.block_size, 3961 args.salt, args.chain_partition, args.algorithm, 3962 args.key, args.public_key_metadata, 3963 args.rollback_index, args.flags, args.prop, 3964 args.prop_from_file, 3965 args.kernel_cmdline, 3966 args.setup_rootfs_from_kernel, 3967 args.setup_as_rootfs_from_kernel, 3968 args.include_descriptors_from_image, 3969 args.calc_max_image_size, 3970 args.signing_helper, 3971 args.signing_helper_with_files, 3972 args.internal_release_string, 3973 args.append_to_release_string, 3974 args.output_vbmeta_image, 3975 args.do_not_append_vbmeta_image, 3976 args.print_required_libavb_version, 3977 args.use_persistent_digest, 3978 args.do_not_use_ab) 3979 3980 def erase_footer(self, args): 3981 """Implements the 'erase_footer' sub-command.""" 3982 self.avb.erase_footer(args.image.name, args.keep_hashtree) 3983 3984 def resize_image(self, args): 3985 """Implements the 'resize_image' sub-command.""" 3986 self.avb.resize_image(args.image.name, args.partition_size) 3987 3988 def set_ab_metadata(self, args): 3989 """Implements the 'set_ab_metadata' sub-command.""" 3990 self.avb.set_ab_metadata(args.misc_image, args.slot_data) 3991 3992 def info_image(self, args): 3993 """Implements the 'info_image' sub-command.""" 3994 self.avb.info_image(args.image.name, args.output) 3995 3996 def verify_image(self, args): 3997 """Implements the 'verify_image' sub-command.""" 3998 self.avb.verify_image(args.image.name, args.key, 3999 args.expected_chain_partition) 4000 4001 def make_atx_certificate(self, args): 4002 """Implements the 'make_atx_certificate' sub-command.""" 4003 self.avb.make_atx_certificate(args.output, args.authority_key, 4004 args.subject_key.name, 4005 args.subject_key_version, 4006 args.subject.read(), 4007 args.subject_is_intermediate_authority, 4008 args.signing_helper, 4009 args.signing_helper_with_files) 4010 4011 def make_atx_permanent_attributes(self, args): 4012 """Implements the 'make_atx_permanent_attributes' sub-command.""" 4013 self.avb.make_atx_permanent_attributes(args.output, 4014 args.root_authority_key.name, 4015 args.product_id.read()) 4016 4017 def make_atx_metadata(self, args): 4018 """Implements the 'make_atx_metadata' sub-command.""" 4019 self.avb.make_atx_metadata(args.output, 4020 args.intermediate_key_certificate.read(), 4021 args.product_key_certificate.read()) 4022 4023 4024 if __name__ == '__main__': 4025 tool = AvbTool() 4026 tool.run(sys.argv) 4027