1#!/usr/bin/env python 2 3# Copyright 2016, The Android Open Source Project 4# 5# Permission is hereby granted, free of charge, to any person 6# obtaining a copy of this software and associated documentation 7# files (the "Software"), to deal in the Software without 8# restriction, including without limitation the rights to use, copy, 9# modify, merge, publish, distribute, sublicense, and/or sell copies 10# of the Software, and to permit persons to whom the Software is 11# furnished to do so, subject to the following conditions: 12# 13# The above copyright notice and this permission notice shall be 14# included in all copies or substantial portions of the Software. 15# 16# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 19# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 20# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 21# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 22# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23# SOFTWARE. 24# 25"""Command-line tool for working with Android Verified Boot images.""" 26 27import argparse 28import binascii 29import bisect 30import hashlib 31import os 32import struct 33import subprocess 34import sys 35import tempfile 36import time 37 38import Crypto.PublicKey.RSA 39 40# Keep in sync with libavb/avb_version.h. 41AVB_VERSION_MAJOR = 1 42AVB_VERSION_MINOR = 0 43AVB_VERSION_SUB = 0 44 45AVB_VBMETA_IMAGE_FLAGS_HASHTREE_DISABLED = 1 46 47 48class AvbError(Exception): 49 """Application-specific errors. 50 51 These errors represent issues for which a stack-trace should not be 52 presented. 53 54 Attributes: 55 message: Error message. 56 """ 57 58 def __init__(self, message): 59 Exception.__init__(self, message) 60 61 62class Algorithm(object): 63 """Contains details about an algorithm. 64 65 See the avb_vbmeta_header.h file for more details about 66 algorithms. 67 68 The constant |ALGORITHMS| is a dictionary from human-readable 69 names (e.g 'SHA256_RSA2048') to instances of this class. 70 71 Attributes: 72 algorithm_type: Integer code corresponding to |AvbAlgorithmType|. 73 hash_num_bytes: Number of bytes used to store the hash. 74 signature_num_bytes: Number of bytes used to store the signature. 75 public_key_num_bytes: Number of bytes used to store the public key. 76 padding: Padding used for signature, if any. 77 """ 78 79 def __init__(self, algorithm_type, hash_num_bytes, signature_num_bytes, 80 public_key_num_bytes, padding): 81 self.algorithm_type = algorithm_type 82 self.hash_num_bytes = hash_num_bytes 83 self.signature_num_bytes = signature_num_bytes 84 self.public_key_num_bytes = public_key_num_bytes 85 self.padding = padding 86 87# This must be kept in sync with the avb_crypto.h file. 88# 89# The PKC1-v1.5 padding is a blob of binary DER of ASN.1 and is 90# obtained from section 5.2.2 of RFC 4880. 91ALGORITHMS = { 92 'NONE': Algorithm( 93 algorithm_type=0, # AVB_ALGORITHM_TYPE_NONE 94 hash_num_bytes=0, 95 signature_num_bytes=0, 96 public_key_num_bytes=0, 97 padding=[]), 98 'SHA256_RSA2048': Algorithm( 99 algorithm_type=1, # AVB_ALGORITHM_TYPE_SHA256_RSA2048 100 hash_num_bytes=32, 101 signature_num_bytes=256, 102 public_key_num_bytes=8 + 2*2048/8, 103 padding=[ 104 # PKCS1-v1_5 padding 105 0x00, 0x01] + [0xff]*202 + [0x00] + [ 106 # ASN.1 header 107 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 108 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 109 0x00, 0x04, 0x20, 110 ]), 111 'SHA256_RSA4096': Algorithm( 112 algorithm_type=2, # AVB_ALGORITHM_TYPE_SHA256_RSA4096 113 hash_num_bytes=32, 114 signature_num_bytes=512, 115 public_key_num_bytes=8 + 2*4096/8, 116 padding=[ 117 # PKCS1-v1_5 padding 118 0x00, 0x01] + [0xff]*458 + [0x00] + [ 119 # ASN.1 header 120 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 121 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 122 0x00, 0x04, 0x20, 123 ]), 124 'SHA256_RSA8192': Algorithm( 125 algorithm_type=3, # AVB_ALGORITHM_TYPE_SHA256_RSA8192 126 hash_num_bytes=32, 127 signature_num_bytes=1024, 128 public_key_num_bytes=8 + 2*8192/8, 129 padding=[ 130 # PKCS1-v1_5 padding 131 0x00, 0x01] + [0xff]*970 + [0x00] + [ 132 # ASN.1 header 133 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 134 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 135 0x00, 0x04, 0x20, 136 ]), 137 'SHA512_RSA2048': Algorithm( 138 algorithm_type=4, # AVB_ALGORITHM_TYPE_SHA512_RSA2048 139 hash_num_bytes=64, 140 signature_num_bytes=256, 141 public_key_num_bytes=8 + 2*2048/8, 142 padding=[ 143 # PKCS1-v1_5 padding 144 0x00, 0x01] + [0xff]*170 + [0x00] + [ 145 # ASN.1 header 146 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 147 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 148 0x00, 0x04, 0x40 149 ]), 150 'SHA512_RSA4096': Algorithm( 151 algorithm_type=5, # AVB_ALGORITHM_TYPE_SHA512_RSA4096 152 hash_num_bytes=64, 153 signature_num_bytes=512, 154 public_key_num_bytes=8 + 2*4096/8, 155 padding=[ 156 # PKCS1-v1_5 padding 157 0x00, 0x01] + [0xff]*426 + [0x00] + [ 158 # ASN.1 header 159 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 160 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 161 0x00, 0x04, 0x40 162 ]), 163 'SHA512_RSA8192': Algorithm( 164 algorithm_type=6, # AVB_ALGORITHM_TYPE_SHA512_RSA8192 165 hash_num_bytes=64, 166 signature_num_bytes=1024, 167 public_key_num_bytes=8 + 2*8192/8, 168 padding=[ 169 # PKCS1-v1_5 padding 170 0x00, 0x01] + [0xff]*938 + [0x00] + [ 171 # ASN.1 header 172 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 173 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 174 0x00, 0x04, 0x40 175 ]), 176} 177 178 179def get_release_string(): 180 """Calculates the release string to use in the VBMeta struct.""" 181 # Keep in sync with libavb/avb_version.c:avb_version_string(). 182 return 'avbtool {}.{}.{}'.format(AVB_VERSION_MAJOR, 183 AVB_VERSION_MINOR, 184 AVB_VERSION_SUB) 185 186 187def round_to_multiple(number, size): 188 """Rounds a number up to nearest multiple of another number. 189 190 Args: 191 number: The number to round up. 192 size: The multiple to round up to. 193 194 Returns: 195 If |number| is a multiple of |size|, returns |number|, otherwise 196 returns |number| + |size|. 197 """ 198 remainder = number % size 199 if remainder == 0: 200 return number 201 return number + size - remainder 202 203 204def round_to_pow2(number): 205 """Rounds a number up to the next power of 2. 206 207 Args: 208 number: The number to round up. 209 210 Returns: 211 If |number| is already a power of 2 then |number| is 212 returned. Otherwise the smallest power of 2 greater than |number| 213 is returned. 214 """ 215 return 2**((number - 1).bit_length()) 216 217 218def write_long(output, num_bits, value): 219 """Writes a long to an output stream using a given amount of bits. 220 221 This number is written big-endian, e.g. with the most significant 222 bit first. 223 224 Arguments: 225 output: The object to write the output to. 226 num_bits: The number of bits to write, e.g. 2048. 227 value: The value to write. 228 """ 229 for bit_pos in range(num_bits, 0, -8): 230 octet = (value >> (bit_pos - 8)) & 0xff 231 output.write(struct.pack('!B', octet)) 232 233 234def encode_long(num_bits, value): 235 """Encodes a long to a bytearray() using a given amount of bits. 236 237 This number is written big-endian, e.g. with the most significant 238 bit first. 239 240 Arguments: 241 num_bits: The number of bits to write, e.g. 2048. 242 value: The value to write. 243 244 Returns: 245 A bytearray() with the encoded long. 246 """ 247 ret = bytearray() 248 for bit_pos in range(num_bits, 0, -8): 249 octet = (value >> (bit_pos - 8)) & 0xff 250 ret.extend(struct.pack('!B', octet)) 251 return ret 252 253 254def egcd(a, b): 255 """Calculate greatest common divisor of two numbers. 256 257 This implementation uses a recursive version of the extended 258 Euclidian algorithm. 259 260 Arguments: 261 a: First number. 262 b: Second number. 263 264 Returns: 265 A tuple (gcd, x, y) that where |gcd| is the greatest common 266 divisor of |a| and |b| and |a|*|x| + |b|*|y| = |gcd|. 267 """ 268 if a == 0: 269 return (b, 0, 1) 270 else: 271 g, y, x = egcd(b % a, a) 272 return (g, x - (b // a) * y, y) 273 274 275def modinv(a, m): 276 """Calculate modular multiplicative inverse of |a| modulo |m|. 277 278 This calculates the number |x| such that |a| * |x| == 1 (modulo 279 |m|). This number only exists if |a| and |m| are co-prime - |None| 280 is returned if this isn't true. 281 282 Arguments: 283 a: The number to calculate a modular inverse of. 284 m: The modulo to use. 285 286 Returns: 287 The modular multiplicative inverse of |a| and |m| or |None| if 288 these numbers are not co-prime. 289 """ 290 gcd, x, _ = egcd(a, m) 291 if gcd != 1: 292 return None # modular inverse does not exist 293 else: 294 return x % m 295 296 297def parse_number(string): 298 """Parse a string as a number. 299 300 This is just a short-hand for int(string, 0) suitable for use in the 301 |type| parameter of |ArgumentParser|'s add_argument() function. An 302 improvement to just using type=int is that this function supports 303 numbers in other bases, e.g. "0x1234". 304 305 Arguments: 306 string: The string to parse. 307 308 Returns: 309 The parsed integer. 310 311 Raises: 312 ValueError: If the number could not be parsed. 313 """ 314 return int(string, 0) 315 316 317def write_rsa_key(output, key): 318 """Writes a public RSA key in |AvbRSAPublicKeyHeader| format. 319 320 This writes the |AvbRSAPublicKeyHeader| as well as the two large 321 numbers (|key_num_bits| bits long) following it. 322 323 Arguments: 324 output: The object to write the output to. 325 key: A Crypto.PublicKey.RSA object. 326 """ 327 # key.e is exponent 328 # key.n is modulus 329 key_num_bits = key.size() + 1 330 # Calculate n0inv = -1/n[0] (mod 2^32) 331 b = 2L**32 332 n0inv = b - modinv(key.n, b) 333 # Calculate rr = r^2 (mod N), where r = 2^(# of key bits) 334 r = 2L**key.n.bit_length() 335 rrmodn = r * r % key.n 336 output.write(struct.pack('!II', key_num_bits, n0inv)) 337 write_long(output, key_num_bits, key.n) 338 write_long(output, key_num_bits, rrmodn) 339 340 341def encode_rsa_key(key): 342 """Encodes a public RSA key in |AvbRSAPublicKeyHeader| format. 343 344 This creates a |AvbRSAPublicKeyHeader| as well as the two large 345 numbers (|key_num_bits| bits long) following it. 346 347 Arguments: 348 key: A Crypto.PublicKey.RSA object. 349 350 Returns: 351 A bytearray() with the |AvbRSAPublicKeyHeader|. 352 """ 353 ret = bytearray() 354 # key.e is exponent 355 # key.n is modulus 356 key_num_bits = key.size() + 1 357 # Calculate n0inv = -1/n[0] (mod 2^32) 358 b = 2L**32 359 n0inv = b - modinv(key.n, b) 360 # Calculate rr = r^2 (mod N), where r = 2^(# of key bits) 361 r = 2L**key.n.bit_length() 362 rrmodn = r * r % key.n 363 ret.extend(struct.pack('!II', key_num_bits, n0inv)) 364 ret.extend(encode_long(key_num_bits, key.n)) 365 ret.extend(encode_long(key_num_bits, rrmodn)) 366 return ret 367 368 369def lookup_algorithm_by_type(alg_type): 370 """Looks up algorithm by type. 371 372 Arguments: 373 alg_type: The integer representing the type. 374 375 Returns: 376 A tuple with the algorithm name and an |Algorithm| instance. 377 378 Raises: 379 Exception: If the algorithm cannot be found 380 """ 381 for alg_name in ALGORITHMS: 382 alg_data = ALGORITHMS[alg_name] 383 if alg_data.algorithm_type == alg_type: 384 return (alg_name, alg_data) 385 raise AvbError('Unknown algorithm type {}'.format(alg_type)) 386 387 388def raw_sign(signing_helper, algorithm_name, key_path, raw_data_to_sign): 389 """Computes a raw RSA signature using |signing_helper| or openssl. 390 391 Arguments: 392 signing_helper: Program which signs a hash and returns the signature. 393 algorithm_name: The algorithm name as per the ALGORITHMS dict. 394 key_path: Path to the private key file. Must be PEM format. 395 raw_data_to_sign: Data to sign (bytearray or str expected). 396 397 Returns: 398 A bytearray containing the signature. 399 400 Raises: 401 Exception: If an error occurs. 402 """ 403 p = None 404 if signing_helper is not None: 405 p = subprocess.Popen( 406 [signing_helper, algorithm_name, key_path], 407 stdin=subprocess.PIPE, 408 stdout=subprocess.PIPE, 409 stderr=subprocess.PIPE) 410 else: 411 p = subprocess.Popen( 412 ['openssl', 'rsautl', '-sign', '-inkey', key_path, '-raw'], 413 stdin=subprocess.PIPE, 414 stdout=subprocess.PIPE, 415 stderr=subprocess.PIPE) 416 (pout, perr) = p.communicate(str(raw_data_to_sign)) 417 retcode = p.wait() 418 if retcode != 0: 419 raise AvbError('Error signing: {}'.format(perr)) 420 return bytearray(pout) 421 422 423class ImageChunk(object): 424 """Data structure used for representing chunks in Android sparse files. 425 426 Attributes: 427 chunk_type: One of TYPE_RAW, TYPE_FILL, or TYPE_DONT_CARE. 428 chunk_offset: Offset in the sparse file where this chunk begins. 429 output_offset: Offset in de-sparsified file where output begins. 430 output_size: Number of bytes in output. 431 input_offset: Offset in sparse file for data if TYPE_RAW otherwise None. 432 fill_data: Blob with data to fill if TYPE_FILL otherwise None. 433 """ 434 435 FORMAT = '<2H2I' 436 TYPE_RAW = 0xcac1 437 TYPE_FILL = 0xcac2 438 TYPE_DONT_CARE = 0xcac3 439 TYPE_CRC32 = 0xcac4 440 441 def __init__(self, chunk_type, chunk_offset, output_offset, output_size, 442 input_offset, fill_data): 443 """Initializes an ImageChunk object. 444 445 Arguments: 446 chunk_type: One of TYPE_RAW, TYPE_FILL, or TYPE_DONT_CARE. 447 chunk_offset: Offset in the sparse file where this chunk begins. 448 output_offset: Offset in de-sparsified file. 449 output_size: Number of bytes in output. 450 input_offset: Offset in sparse file if TYPE_RAW otherwise None. 451 fill_data: Blob with data to fill if TYPE_FILL otherwise None. 452 453 Raises: 454 ValueError: If data is not well-formed. 455 """ 456 self.chunk_type = chunk_type 457 self.chunk_offset = chunk_offset 458 self.output_offset = output_offset 459 self.output_size = output_size 460 self.input_offset = input_offset 461 self.fill_data = fill_data 462 # Check invariants. 463 if self.chunk_type == self.TYPE_RAW: 464 if self.fill_data is not None: 465 raise ValueError('RAW chunk cannot have fill_data set.') 466 if not self.input_offset: 467 raise ValueError('RAW chunk must have input_offset set.') 468 elif self.chunk_type == self.TYPE_FILL: 469 if self.fill_data is None: 470 raise ValueError('FILL chunk must have fill_data set.') 471 if self.input_offset: 472 raise ValueError('FILL chunk cannot have input_offset set.') 473 elif self.chunk_type == self.TYPE_DONT_CARE: 474 if self.fill_data is not None: 475 raise ValueError('DONT_CARE chunk cannot have fill_data set.') 476 if self.input_offset: 477 raise ValueError('DONT_CARE chunk cannot have input_offset set.') 478 else: 479 raise ValueError('Invalid chunk type') 480 481 482class ImageHandler(object): 483 """Abstraction for image I/O with support for Android sparse images. 484 485 This class provides an interface for working with image files that 486 may be using the Android Sparse Image format. When an instance is 487 constructed, we test whether it's an Android sparse file. If so, 488 operations will be on the sparse file by interpreting the sparse 489 format, otherwise they will be directly on the file. Either way the 490 operations do the same. 491 492 For reading, this interface mimics a file object - it has seek(), 493 tell(), and read() methods. For writing, only truncation 494 (truncate()) and appending is supported (append_raw() and 495 append_dont_care()). Additionally, data can only be written in units 496 of the block size. 497 498 Attributes: 499 is_sparse: Whether the file being operated on is sparse. 500 block_size: The block size, typically 4096. 501 image_size: The size of the unsparsified file. 502 """ 503 # See system/core/libsparse/sparse_format.h for details. 504 MAGIC = 0xed26ff3a 505 HEADER_FORMAT = '<I4H4I' 506 507 # These are formats and offset of just the |total_chunks| and 508 # |total_blocks| fields. 509 NUM_CHUNKS_AND_BLOCKS_FORMAT = '<II' 510 NUM_CHUNKS_AND_BLOCKS_OFFSET = 16 511 512 def __init__(self, image_filename): 513 """Initializes an image handler. 514 515 Arguments: 516 image_filename: The name of the file to operate on. 517 518 Raises: 519 ValueError: If data in the file is invalid. 520 """ 521 self._image_filename = image_filename 522 self._read_header() 523 524 def _read_header(self): 525 """Initializes internal data structures used for reading file. 526 527 This may be called multiple times and is typically called after 528 modifying the file (e.g. appending, truncation). 529 530 Raises: 531 ValueError: If data in the file is invalid. 532 """ 533 self.is_sparse = False 534 self.block_size = 4096 535 self._file_pos = 0 536 self._image = open(self._image_filename, 'r+b') 537 self._image.seek(0, os.SEEK_END) 538 self.image_size = self._image.tell() 539 540 self._image.seek(0, os.SEEK_SET) 541 header_bin = self._image.read(struct.calcsize(self.HEADER_FORMAT)) 542 (magic, major_version, minor_version, file_hdr_sz, chunk_hdr_sz, 543 block_size, self._num_total_blocks, self._num_total_chunks, 544 _) = struct.unpack(self.HEADER_FORMAT, header_bin) 545 if magic != self.MAGIC: 546 # Not a sparse image, our job here is done. 547 return 548 if not (major_version == 1 and minor_version == 0): 549 raise ValueError('Encountered sparse image format version {}.{} but ' 550 'only 1.0 is supported'.format(major_version, 551 minor_version)) 552 if file_hdr_sz != struct.calcsize(self.HEADER_FORMAT): 553 raise ValueError('Unexpected file_hdr_sz value {}.'. 554 format(file_hdr_sz)) 555 if chunk_hdr_sz != struct.calcsize(ImageChunk.FORMAT): 556 raise ValueError('Unexpected chunk_hdr_sz value {}.'. 557 format(chunk_hdr_sz)) 558 559 self.block_size = block_size 560 561 # Build an list of chunks by parsing the file. 562 self._chunks = [] 563 564 # Find the smallest offset where only "Don't care" chunks 565 # follow. This will be the size of the content in the sparse 566 # image. 567 offset = 0 568 output_offset = 0 569 for _ in xrange(1, self._num_total_chunks + 1): 570 chunk_offset = self._image.tell() 571 572 header_bin = self._image.read(struct.calcsize(ImageChunk.FORMAT)) 573 (chunk_type, _, chunk_sz, total_sz) = struct.unpack(ImageChunk.FORMAT, 574 header_bin) 575 data_sz = total_sz - struct.calcsize(ImageChunk.FORMAT) 576 577 if chunk_type == ImageChunk.TYPE_RAW: 578 if data_sz != (chunk_sz * self.block_size): 579 raise ValueError('Raw chunk input size ({}) does not match output ' 580 'size ({})'. 581 format(data_sz, chunk_sz*self.block_size)) 582 self._chunks.append(ImageChunk(ImageChunk.TYPE_RAW, 583 chunk_offset, 584 output_offset, 585 chunk_sz*self.block_size, 586 self._image.tell(), 587 None)) 588 self._image.read(data_sz) 589 590 elif chunk_type == ImageChunk.TYPE_FILL: 591 if data_sz != 4: 592 raise ValueError('Fill chunk should have 4 bytes of fill, but this ' 593 'has {}'.format(data_sz)) 594 fill_data = self._image.read(4) 595 self._chunks.append(ImageChunk(ImageChunk.TYPE_FILL, 596 chunk_offset, 597 output_offset, 598 chunk_sz*self.block_size, 599 None, 600 fill_data)) 601 elif chunk_type == ImageChunk.TYPE_DONT_CARE: 602 if data_sz != 0: 603 raise ValueError('Don\'t care chunk input size is non-zero ({})'. 604 format(data_sz)) 605 self._chunks.append(ImageChunk(ImageChunk.TYPE_DONT_CARE, 606 chunk_offset, 607 output_offset, 608 chunk_sz*self.block_size, 609 None, 610 None)) 611 elif chunk_type == ImageChunk.TYPE_CRC32: 612 if data_sz != 4: 613 raise ValueError('CRC32 chunk should have 4 bytes of CRC, but ' 614 'this has {}'.format(data_sz)) 615 self._image.read(4) 616 else: 617 raise ValueError('Unknown chunk type {}'.format(chunk_type)) 618 619 offset += chunk_sz 620 output_offset += chunk_sz*self.block_size 621 622 # Record where sparse data end. 623 self._sparse_end = self._image.tell() 624 625 # Now that we've traversed all chunks, sanity check. 626 if self._num_total_blocks != offset: 627 raise ValueError('The header said we should have {} output blocks, ' 628 'but we saw {}'.format(self._num_total_blocks, offset)) 629 junk_len = len(self._image.read()) 630 if junk_len > 0: 631 raise ValueError('There were {} bytes of extra data at the end of the ' 632 'file.'.format(junk_len)) 633 634 # Assign |image_size|. 635 self.image_size = output_offset 636 637 # This is used when bisecting in read() to find the initial slice. 638 self._chunk_output_offsets = [i.output_offset for i in self._chunks] 639 640 self.is_sparse = True 641 642 def _update_chunks_and_blocks(self): 643 """Helper function to update the image header. 644 645 The the |total_chunks| and |total_blocks| fields in the header 646 will be set to value of the |_num_total_blocks| and 647 |_num_total_chunks| attributes. 648 649 """ 650 self._image.seek(self.NUM_CHUNKS_AND_BLOCKS_OFFSET, os.SEEK_SET) 651 self._image.write(struct.pack(self.NUM_CHUNKS_AND_BLOCKS_FORMAT, 652 self._num_total_blocks, 653 self._num_total_chunks)) 654 655 def append_dont_care(self, num_bytes): 656 """Appends a DONT_CARE chunk to the sparse file. 657 658 The given number of bytes must be a multiple of the block size. 659 660 Arguments: 661 num_bytes: Size in number of bytes of the DONT_CARE chunk. 662 """ 663 assert num_bytes % self.block_size == 0 664 665 if not self.is_sparse: 666 self._image.seek(0, os.SEEK_END) 667 # This is more efficient that writing NUL bytes since it'll add 668 # a hole on file systems that support sparse files (native 669 # sparse, not Android sparse). 670 self._image.truncate(self._image.tell() + num_bytes) 671 self._read_header() 672 return 673 674 self._num_total_chunks += 1 675 self._num_total_blocks += num_bytes / self.block_size 676 self._update_chunks_and_blocks() 677 678 self._image.seek(self._sparse_end, os.SEEK_SET) 679 self._image.write(struct.pack(ImageChunk.FORMAT, 680 ImageChunk.TYPE_DONT_CARE, 681 0, # Reserved 682 num_bytes / self.block_size, 683 struct.calcsize(ImageChunk.FORMAT))) 684 self._read_header() 685 686 def append_raw(self, data): 687 """Appends a RAW chunk to the sparse file. 688 689 The length of the given data must be a multiple of the block size. 690 691 Arguments: 692 data: Data to append. 693 """ 694 assert len(data) % self.block_size == 0 695 696 if not self.is_sparse: 697 self._image.seek(0, os.SEEK_END) 698 self._image.write(data) 699 self._read_header() 700 return 701 702 self._num_total_chunks += 1 703 self._num_total_blocks += len(data) / self.block_size 704 self._update_chunks_and_blocks() 705 706 self._image.seek(self._sparse_end, os.SEEK_SET) 707 self._image.write(struct.pack(ImageChunk.FORMAT, 708 ImageChunk.TYPE_RAW, 709 0, # Reserved 710 len(data) / self.block_size, 711 len(data) + 712 struct.calcsize(ImageChunk.FORMAT))) 713 self._image.write(data) 714 self._read_header() 715 716 def append_fill(self, fill_data, size): 717 """Appends a fill chunk to the sparse file. 718 719 The total length of the fill data must be a multiple of the block size. 720 721 Arguments: 722 fill_data: Fill data to append - must be four bytes. 723 size: Number of chunk - must be a multiple of four and the block size. 724 """ 725 assert len(fill_data) == 4 726 assert size % 4 == 0 727 assert size % self.block_size == 0 728 729 if not self.is_sparse: 730 self._image.seek(0, os.SEEK_END) 731 self._image.write(fill_data * (size/4)) 732 self._read_header() 733 return 734 735 self._num_total_chunks += 1 736 self._num_total_blocks += size / self.block_size 737 self._update_chunks_and_blocks() 738 739 self._image.seek(self._sparse_end, os.SEEK_SET) 740 self._image.write(struct.pack(ImageChunk.FORMAT, 741 ImageChunk.TYPE_FILL, 742 0, # Reserved 743 size / self.block_size, 744 4 + struct.calcsize(ImageChunk.FORMAT))) 745 self._image.write(fill_data) 746 self._read_header() 747 748 def seek(self, offset): 749 """Sets the cursor position for reading from unsparsified file. 750 751 Arguments: 752 offset: Offset to seek to from the beginning of the file. 753 """ 754 self._file_pos = offset 755 756 def read(self, size): 757 """Reads data from the unsparsified file. 758 759 This method may return fewer than |size| bytes of data if the end 760 of the file was encountered. 761 762 The file cursor for reading is advanced by the number of bytes 763 read. 764 765 Arguments: 766 size: Number of bytes to read. 767 768 Returns: 769 The data. 770 771 """ 772 if not self.is_sparse: 773 self._image.seek(self._file_pos) 774 data = self._image.read(size) 775 self._file_pos += len(data) 776 return data 777 778 # Iterate over all chunks. 779 chunk_idx = bisect.bisect_right(self._chunk_output_offsets, 780 self._file_pos) - 1 781 data = bytearray() 782 to_go = size 783 while to_go > 0: 784 chunk = self._chunks[chunk_idx] 785 chunk_pos_offset = self._file_pos - chunk.output_offset 786 chunk_pos_to_go = min(chunk.output_size - chunk_pos_offset, to_go) 787 788 if chunk.chunk_type == ImageChunk.TYPE_RAW: 789 self._image.seek(chunk.input_offset + chunk_pos_offset) 790 data.extend(self._image.read(chunk_pos_to_go)) 791 elif chunk.chunk_type == ImageChunk.TYPE_FILL: 792 all_data = chunk.fill_data*(chunk_pos_to_go/len(chunk.fill_data) + 2) 793 offset_mod = chunk_pos_offset % len(chunk.fill_data) 794 data.extend(all_data[offset_mod:(offset_mod + chunk_pos_to_go)]) 795 else: 796 assert chunk.chunk_type == ImageChunk.TYPE_DONT_CARE 797 data.extend('\0' * chunk_pos_to_go) 798 799 to_go -= chunk_pos_to_go 800 self._file_pos += chunk_pos_to_go 801 chunk_idx += 1 802 # Generate partial read in case of EOF. 803 if chunk_idx >= len(self._chunks): 804 break 805 806 return data 807 808 def tell(self): 809 """Returns the file cursor position for reading from unsparsified file. 810 811 Returns: 812 The file cursor position for reading. 813 """ 814 return self._file_pos 815 816 def truncate(self, size): 817 """Truncates the unsparsified file. 818 819 Arguments: 820 size: Desired size of unsparsified file. 821 822 Raises: 823 ValueError: If desired size isn't a multiple of the block size. 824 """ 825 if not self.is_sparse: 826 self._image.truncate(size) 827 self._read_header() 828 return 829 830 if size % self.block_size != 0: 831 raise ValueError('Cannot truncate to a size which is not a multiple ' 832 'of the block size') 833 834 if size == self.image_size: 835 # Trivial where there's nothing to do. 836 return 837 elif size < self.image_size: 838 chunk_idx = bisect.bisect_right(self._chunk_output_offsets, size) - 1 839 chunk = self._chunks[chunk_idx] 840 if chunk.output_offset != size: 841 # Truncation in the middle of a trunk - need to keep the chunk 842 # and modify it. 843 chunk_idx_for_update = chunk_idx + 1 844 num_to_keep = size - chunk.output_offset 845 assert num_to_keep % self.block_size == 0 846 if chunk.chunk_type == ImageChunk.TYPE_RAW: 847 truncate_at = (chunk.chunk_offset + 848 struct.calcsize(ImageChunk.FORMAT) + num_to_keep) 849 data_sz = num_to_keep 850 elif chunk.chunk_type == ImageChunk.TYPE_FILL: 851 truncate_at = (chunk.chunk_offset + 852 struct.calcsize(ImageChunk.FORMAT) + 4) 853 data_sz = 4 854 else: 855 assert chunk.chunk_type == ImageChunk.TYPE_DONT_CARE 856 truncate_at = chunk.chunk_offset + struct.calcsize(ImageChunk.FORMAT) 857 data_sz = 0 858 chunk_sz = num_to_keep/self.block_size 859 total_sz = data_sz + struct.calcsize(ImageChunk.FORMAT) 860 self._image.seek(chunk.chunk_offset) 861 self._image.write(struct.pack(ImageChunk.FORMAT, 862 chunk.chunk_type, 863 0, # Reserved 864 chunk_sz, 865 total_sz)) 866 chunk.output_size = num_to_keep 867 else: 868 # Truncation at trunk boundary. 869 truncate_at = chunk.chunk_offset 870 chunk_idx_for_update = chunk_idx 871 872 self._num_total_chunks = chunk_idx_for_update 873 self._num_total_blocks = 0 874 for i in range(0, chunk_idx_for_update): 875 self._num_total_blocks += self._chunks[i].output_size / self.block_size 876 self._update_chunks_and_blocks() 877 self._image.truncate(truncate_at) 878 879 # We've modified the file so re-read all data. 880 self._read_header() 881 else: 882 # Truncating to grow - just add a DONT_CARE section. 883 self.append_dont_care(size - self.image_size) 884 885 886class AvbDescriptor(object): 887 """Class for AVB descriptor. 888 889 See the |AvbDescriptor| C struct for more information. 890 891 Attributes: 892 tag: The tag identifying what kind of descriptor this is. 893 data: The data in the descriptor. 894 """ 895 896 SIZE = 16 897 FORMAT_STRING = ('!QQ') # tag, num_bytes_following (descriptor header) 898 899 def __init__(self, data): 900 """Initializes a new property descriptor. 901 902 Arguments: 903 data: If not None, must be a bytearray(). 904 905 Raises: 906 LookupError: If the given descriptor is malformed. 907 """ 908 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 909 910 if data: 911 (self.tag, num_bytes_following) = ( 912 struct.unpack(self.FORMAT_STRING, data[0:self.SIZE])) 913 self.data = data[self.SIZE:self.SIZE + num_bytes_following] 914 else: 915 self.tag = None 916 self.data = None 917 918 def print_desc(self, o): 919 """Print the descriptor. 920 921 Arguments: 922 o: The object to write the output to. 923 """ 924 o.write(' Unknown descriptor:\n') 925 o.write(' Tag: {}\n'.format(self.tag)) 926 if len(self.data) < 256: 927 o.write(' Data: {} ({} bytes)\n'.format( 928 repr(str(self.data)), len(self.data))) 929 else: 930 o.write(' Data: {} bytes\n'.format(len(self.data))) 931 932 def encode(self): 933 """Serializes the descriptor. 934 935 Returns: 936 A bytearray() with the descriptor data. 937 """ 938 num_bytes_following = len(self.data) 939 nbf_with_padding = round_to_multiple(num_bytes_following, 8) 940 padding_size = nbf_with_padding - num_bytes_following 941 desc = struct.pack(self.FORMAT_STRING, self.tag, nbf_with_padding) 942 padding = struct.pack(str(padding_size) + 'x') 943 ret = desc + self.data + padding 944 return bytearray(ret) 945 946 947class AvbPropertyDescriptor(AvbDescriptor): 948 """A class for property descriptors. 949 950 See the |AvbPropertyDescriptor| C struct for more information. 951 952 Attributes: 953 key: The key. 954 value: The key. 955 """ 956 957 TAG = 0 958 SIZE = 32 959 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header) 960 'Q' # key size (bytes) 961 'Q') # value size (bytes) 962 963 def __init__(self, data=None): 964 """Initializes a new property descriptor. 965 966 Arguments: 967 data: If not None, must be a bytearray of size |SIZE|. 968 969 Raises: 970 LookupError: If the given descriptor is malformed. 971 """ 972 AvbDescriptor.__init__(self, None) 973 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 974 975 if data: 976 (tag, num_bytes_following, key_size, 977 value_size) = struct.unpack(self.FORMAT_STRING, data[0:self.SIZE]) 978 expected_size = round_to_multiple( 979 self.SIZE - 16 + key_size + 1 + value_size + 1, 8) 980 if tag != self.TAG or num_bytes_following != expected_size: 981 raise LookupError('Given data does not look like a property ' 982 'descriptor.') 983 self.key = data[self.SIZE:(self.SIZE + key_size)] 984 self.value = data[(self.SIZE + key_size + 1):(self.SIZE + key_size + 1 + 985 value_size)] 986 else: 987 self.key = '' 988 self.value = '' 989 990 def print_desc(self, o): 991 """Print the descriptor. 992 993 Arguments: 994 o: The object to write the output to. 995 """ 996 if len(self.value) < 256: 997 o.write(' Prop: {} -> {}\n'.format(self.key, repr(str(self.value)))) 998 else: 999 o.write(' Prop: {} -> ({} bytes)\n'.format(self.key, len(self.value))) 1000 1001 def encode(self): 1002 """Serializes the descriptor. 1003 1004 Returns: 1005 A bytearray() with the descriptor data. 1006 """ 1007 num_bytes_following = self.SIZE + len(self.key) + len(self.value) + 2 - 16 1008 nbf_with_padding = round_to_multiple(num_bytes_following, 8) 1009 padding_size = nbf_with_padding - num_bytes_following 1010 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding, 1011 len(self.key), len(self.value)) 1012 padding = struct.pack(str(padding_size) + 'x') 1013 ret = desc + self.key + '\0' + self.value + '\0' + padding 1014 return bytearray(ret) 1015 1016 1017class AvbHashtreeDescriptor(AvbDescriptor): 1018 """A class for hashtree descriptors. 1019 1020 See the |AvbHashtreeDescriptor| C struct for more information. 1021 1022 Attributes: 1023 dm_verity_version: dm-verity version used. 1024 image_size: Size of the image, after rounding up to |block_size|. 1025 tree_offset: Offset of the hash tree in the file. 1026 tree_size: Size of the tree. 1027 data_block_size: Data block size 1028 hash_block_size: Hash block size 1029 fec_num_roots: Number of roots used for FEC (0 if FEC is not used). 1030 fec_offset: Offset of FEC data (0 if FEC is not used). 1031 fec_size: Size of FEC data (0 if FEC is not used). 1032 hash_algorithm: Hash algorithm used. 1033 partition_name: Partition name. 1034 salt: Salt used. 1035 root_digest: Root digest. 1036 """ 1037 1038 TAG = 1 1039 RESERVED = 64 1040 SIZE = 116 + RESERVED 1041 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header) 1042 'L' # dm-verity version used 1043 'Q' # image size (bytes) 1044 'Q' # tree offset (bytes) 1045 'Q' # tree size (bytes) 1046 'L' # data block size (bytes) 1047 'L' # hash block size (bytes) 1048 'L' # FEC number of roots 1049 'Q' # FEC offset (bytes) 1050 'Q' # FEC size (bytes) 1051 '32s' # hash algorithm used 1052 'L' # partition name (bytes) 1053 'L' # salt length (bytes) 1054 'L' + # root digest length (bytes) 1055 str(RESERVED) + 's') # reserved 1056 1057 def __init__(self, data=None): 1058 """Initializes a new hashtree descriptor. 1059 1060 Arguments: 1061 data: If not None, must be a bytearray of size |SIZE|. 1062 1063 Raises: 1064 LookupError: If the given descriptor is malformed. 1065 """ 1066 AvbDescriptor.__init__(self, None) 1067 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 1068 1069 if data: 1070 (tag, num_bytes_following, self.dm_verity_version, self.image_size, 1071 self.tree_offset, self.tree_size, self.data_block_size, 1072 self.hash_block_size, self.fec_num_roots, self.fec_offset, self.fec_size, 1073 self.hash_algorithm, partition_name_len, salt_len, 1074 root_digest_len, _) = struct.unpack(self.FORMAT_STRING, 1075 data[0:self.SIZE]) 1076 expected_size = round_to_multiple( 1077 self.SIZE - 16 + partition_name_len + salt_len + root_digest_len, 8) 1078 if tag != self.TAG or num_bytes_following != expected_size: 1079 raise LookupError('Given data does not look like a hashtree ' 1080 'descriptor.') 1081 # Nuke NUL-bytes at the end. 1082 self.hash_algorithm = self.hash_algorithm.split('\0', 1)[0] 1083 o = 0 1084 self.partition_name = str(data[(self.SIZE + o):(self.SIZE + o + 1085 partition_name_len)]) 1086 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8. 1087 self.partition_name.decode('utf-8') 1088 o += partition_name_len 1089 self.salt = data[(self.SIZE + o):(self.SIZE + o + salt_len)] 1090 o += salt_len 1091 self.root_digest = data[(self.SIZE + o):(self.SIZE + o + root_digest_len)] 1092 if root_digest_len != len(hashlib.new(name=self.hash_algorithm).digest()): 1093 raise LookupError('root_digest_len doesn\'t match hash algorithm') 1094 1095 else: 1096 self.dm_verity_version = 0 1097 self.image_size = 0 1098 self.tree_offset = 0 1099 self.tree_size = 0 1100 self.data_block_size = 0 1101 self.hash_block_size = 0 1102 self.fec_num_roots = 0 1103 self.fec_offset = 0 1104 self.fec_size = 0 1105 self.hash_algorithm = '' 1106 self.partition_name = '' 1107 self.salt = bytearray() 1108 self.root_digest = bytearray() 1109 1110 def print_desc(self, o): 1111 """Print the descriptor. 1112 1113 Arguments: 1114 o: The object to write the output to. 1115 """ 1116 o.write(' Hashtree descriptor:\n') 1117 o.write(' Version of dm-verity: {}\n'.format(self.dm_verity_version)) 1118 o.write(' Image Size: {} bytes\n'.format(self.image_size)) 1119 o.write(' Tree Offset: {}\n'.format(self.tree_offset)) 1120 o.write(' Tree Size: {} bytes\n'.format(self.tree_size)) 1121 o.write(' Data Block Size: {} bytes\n'.format( 1122 self.data_block_size)) 1123 o.write(' Hash Block Size: {} bytes\n'.format( 1124 self.hash_block_size)) 1125 o.write(' FEC num roots: {}\n'.format(self.fec_num_roots)) 1126 o.write(' FEC offset: {}\n'.format(self.fec_offset)) 1127 o.write(' FEC size: {} bytes\n'.format(self.fec_size)) 1128 o.write(' Hash Algorithm: {}\n'.format(self.hash_algorithm)) 1129 o.write(' Partition Name: {}\n'.format(self.partition_name)) 1130 o.write(' Salt: {}\n'.format(str(self.salt).encode( 1131 'hex'))) 1132 o.write(' Root Digest: {}\n'.format(str( 1133 self.root_digest).encode('hex'))) 1134 1135 def encode(self): 1136 """Serializes the descriptor. 1137 1138 Returns: 1139 A bytearray() with the descriptor data. 1140 """ 1141 encoded_name = self.partition_name.encode('utf-8') 1142 num_bytes_following = (self.SIZE + len(encoded_name) + len(self.salt) + 1143 len(self.root_digest) - 16) 1144 nbf_with_padding = round_to_multiple(num_bytes_following, 8) 1145 padding_size = nbf_with_padding - num_bytes_following 1146 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding, 1147 self.dm_verity_version, self.image_size, 1148 self.tree_offset, self.tree_size, self.data_block_size, 1149 self.hash_block_size, self.fec_num_roots, 1150 self.fec_offset, self.fec_size, self.hash_algorithm, 1151 len(encoded_name), len(self.salt), len(self.root_digest), 1152 self.RESERVED*'\0') 1153 padding = struct.pack(str(padding_size) + 'x') 1154 ret = desc + encoded_name + self.salt + self.root_digest + padding 1155 return bytearray(ret) 1156 1157 1158class AvbHashDescriptor(AvbDescriptor): 1159 """A class for hash descriptors. 1160 1161 See the |AvbHashDescriptor| C struct for more information. 1162 1163 Attributes: 1164 image_size: Image size, in bytes. 1165 hash_algorithm: Hash algorithm used. 1166 partition_name: Partition name. 1167 salt: Salt used. 1168 digest: The hash value of salt and data combined. 1169 """ 1170 1171 TAG = 2 1172 RESERVED = 64 1173 SIZE = 68 + RESERVED 1174 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header) 1175 'Q' # image size (bytes) 1176 '32s' # hash algorithm used 1177 'L' # partition name (bytes) 1178 'L' # salt length (bytes) 1179 'L' + # digest length (bytes) 1180 str(RESERVED) + 's') # reserved 1181 1182 def __init__(self, data=None): 1183 """Initializes a new hash descriptor. 1184 1185 Arguments: 1186 data: If not None, must be a bytearray of size |SIZE|. 1187 1188 Raises: 1189 LookupError: If the given descriptor is malformed. 1190 """ 1191 AvbDescriptor.__init__(self, None) 1192 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 1193 1194 if data: 1195 (tag, num_bytes_following, self.image_size, self.hash_algorithm, 1196 partition_name_len, salt_len, 1197 digest_len, _) = struct.unpack(self.FORMAT_STRING, data[0:self.SIZE]) 1198 expected_size = round_to_multiple( 1199 self.SIZE - 16 + partition_name_len + salt_len + digest_len, 8) 1200 if tag != self.TAG or num_bytes_following != expected_size: 1201 raise LookupError('Given data does not look like a hash ' 'descriptor.') 1202 # Nuke NUL-bytes at the end. 1203 self.hash_algorithm = self.hash_algorithm.split('\0', 1)[0] 1204 o = 0 1205 self.partition_name = str(data[(self.SIZE + o):(self.SIZE + o + 1206 partition_name_len)]) 1207 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8. 1208 self.partition_name.decode('utf-8') 1209 o += partition_name_len 1210 self.salt = data[(self.SIZE + o):(self.SIZE + o + salt_len)] 1211 o += salt_len 1212 self.digest = data[(self.SIZE + o):(self.SIZE + o + digest_len)] 1213 if digest_len != len(hashlib.new(name=self.hash_algorithm).digest()): 1214 raise LookupError('digest_len doesn\'t match hash algorithm') 1215 1216 else: 1217 self.image_size = 0 1218 self.hash_algorithm = '' 1219 self.partition_name = '' 1220 self.salt = bytearray() 1221 self.digest = bytearray() 1222 1223 def print_desc(self, o): 1224 """Print the descriptor. 1225 1226 Arguments: 1227 o: The object to write the output to. 1228 """ 1229 o.write(' Hash descriptor:\n') 1230 o.write(' Image Size: {} bytes\n'.format(self.image_size)) 1231 o.write(' Hash Algorithm: {}\n'.format(self.hash_algorithm)) 1232 o.write(' Partition Name: {}\n'.format(self.partition_name)) 1233 o.write(' Salt: {}\n'.format(str(self.salt).encode( 1234 'hex'))) 1235 o.write(' Digest: {}\n'.format(str(self.digest).encode( 1236 'hex'))) 1237 1238 def encode(self): 1239 """Serializes the descriptor. 1240 1241 Returns: 1242 A bytearray() with the descriptor data. 1243 """ 1244 encoded_name = self.partition_name.encode('utf-8') 1245 num_bytes_following = ( 1246 self.SIZE + len(encoded_name) + len(self.salt) + len(self.digest) - 16) 1247 nbf_with_padding = round_to_multiple(num_bytes_following, 8) 1248 padding_size = nbf_with_padding - num_bytes_following 1249 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding, 1250 self.image_size, self.hash_algorithm, len(encoded_name), 1251 len(self.salt), len(self.digest), self.RESERVED*'\0') 1252 padding = struct.pack(str(padding_size) + 'x') 1253 ret = desc + encoded_name + self.salt + self.digest + padding 1254 return bytearray(ret) 1255 1256 1257class AvbKernelCmdlineDescriptor(AvbDescriptor): 1258 """A class for kernel command-line descriptors. 1259 1260 See the |AvbKernelCmdlineDescriptor| C struct for more information. 1261 1262 Attributes: 1263 flags: Flags. 1264 kernel_cmdline: The kernel command-line. 1265 """ 1266 1267 TAG = 3 1268 SIZE = 24 1269 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header) 1270 'L' # flags 1271 'L') # cmdline length (bytes) 1272 1273 FLAGS_USE_ONLY_IF_HASHTREE_NOT_DISABLED = (1 << 0) 1274 FLAGS_USE_ONLY_IF_HASHTREE_DISABLED = (1 << 1) 1275 1276 def __init__(self, data=None): 1277 """Initializes a new kernel cmdline descriptor. 1278 1279 Arguments: 1280 data: If not None, must be a bytearray of size |SIZE|. 1281 1282 Raises: 1283 LookupError: If the given descriptor is malformed. 1284 """ 1285 AvbDescriptor.__init__(self, None) 1286 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 1287 1288 if data: 1289 (tag, num_bytes_following, self.flags, kernel_cmdline_length) = ( 1290 struct.unpack(self.FORMAT_STRING, data[0:self.SIZE])) 1291 expected_size = round_to_multiple(self.SIZE - 16 + kernel_cmdline_length, 1292 8) 1293 if tag != self.TAG or num_bytes_following != expected_size: 1294 raise LookupError('Given data does not look like a kernel cmdline ' 1295 'descriptor.') 1296 # Nuke NUL-bytes at the end. 1297 self.kernel_cmdline = str(data[self.SIZE:(self.SIZE + 1298 kernel_cmdline_length)]) 1299 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8. 1300 self.kernel_cmdline.decode('utf-8') 1301 else: 1302 self.flags = 0 1303 self.kernel_cmdline = '' 1304 1305 def print_desc(self, o): 1306 """Print the descriptor. 1307 1308 Arguments: 1309 o: The object to write the output to. 1310 """ 1311 o.write(' Kernel Cmdline descriptor:\n') 1312 o.write(' Flags: {}\n'.format(self.flags)) 1313 o.write(' Kernel Cmdline: {}\n'.format(repr( 1314 self.kernel_cmdline))) 1315 1316 def encode(self): 1317 """Serializes the descriptor. 1318 1319 Returns: 1320 A bytearray() with the descriptor data. 1321 """ 1322 encoded_str = self.kernel_cmdline.encode('utf-8') 1323 num_bytes_following = (self.SIZE + len(encoded_str) - 16) 1324 nbf_with_padding = round_to_multiple(num_bytes_following, 8) 1325 padding_size = nbf_with_padding - num_bytes_following 1326 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding, 1327 self.flags, len(encoded_str)) 1328 padding = struct.pack(str(padding_size) + 'x') 1329 ret = desc + encoded_str + padding 1330 return bytearray(ret) 1331 1332 1333class AvbChainPartitionDescriptor(AvbDescriptor): 1334 """A class for chained partition descriptors. 1335 1336 See the |AvbChainPartitionDescriptor| C struct for more information. 1337 1338 Attributes: 1339 rollback_index_location: The rollback index location to use. 1340 partition_name: Partition name. 1341 public_key: Bytes for the public key. 1342 """ 1343 1344 TAG = 4 1345 RESERVED = 64 1346 SIZE = 28 + RESERVED 1347 FORMAT_STRING = ('!QQ' # tag, num_bytes_following (descriptor header) 1348 'L' # rollback_index_location 1349 'L' # partition_name_size (bytes) 1350 'L' + # public_key_size (bytes) 1351 str(RESERVED) + 's') # reserved 1352 1353 def __init__(self, data=None): 1354 """Initializes a new chain partition descriptor. 1355 1356 Arguments: 1357 data: If not None, must be a bytearray of size |SIZE|. 1358 1359 Raises: 1360 LookupError: If the given descriptor is malformed. 1361 """ 1362 AvbDescriptor.__init__(self, None) 1363 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 1364 1365 if data: 1366 (tag, num_bytes_following, self.rollback_index_location, 1367 partition_name_len, 1368 public_key_len, _) = struct.unpack(self.FORMAT_STRING, data[0:self.SIZE]) 1369 expected_size = round_to_multiple( 1370 self.SIZE - 16 + partition_name_len + public_key_len, 8) 1371 if tag != self.TAG or num_bytes_following != expected_size: 1372 raise LookupError('Given data does not look like a chain partition ' 1373 'descriptor.') 1374 o = 0 1375 self.partition_name = str(data[(self.SIZE + o):(self.SIZE + o + 1376 partition_name_len)]) 1377 # Validate UTF-8 - decode() raises UnicodeDecodeError if not valid UTF-8. 1378 self.partition_name.decode('utf-8') 1379 o += partition_name_len 1380 self.public_key = data[(self.SIZE + o):(self.SIZE + o + public_key_len)] 1381 1382 else: 1383 self.rollback_index_location = 0 1384 self.partition_name = '' 1385 self.public_key = bytearray() 1386 1387 def print_desc(self, o): 1388 """Print the descriptor. 1389 1390 Arguments: 1391 o: The object to write the output to. 1392 """ 1393 o.write(' Chain Partition descriptor:\n') 1394 o.write(' Partition Name: {}\n'.format(self.partition_name)) 1395 o.write(' Rollback Index Location: {}\n'.format( 1396 self.rollback_index_location)) 1397 # Just show the SHA1 of the key, for size reasons. 1398 hexdig = hashlib.sha1(self.public_key).hexdigest() 1399 o.write(' Public key (sha1): {}\n'.format(hexdig)) 1400 1401 def encode(self): 1402 """Serializes the descriptor. 1403 1404 Returns: 1405 A bytearray() with the descriptor data. 1406 """ 1407 encoded_name = self.partition_name.encode('utf-8') 1408 num_bytes_following = ( 1409 self.SIZE + len(encoded_name) + len(self.public_key) - 16) 1410 nbf_with_padding = round_to_multiple(num_bytes_following, 8) 1411 padding_size = nbf_with_padding - num_bytes_following 1412 desc = struct.pack(self.FORMAT_STRING, self.TAG, nbf_with_padding, 1413 self.rollback_index_location, len(encoded_name), 1414 len(self.public_key), self.RESERVED*'\0') 1415 padding = struct.pack(str(padding_size) + 'x') 1416 ret = desc + encoded_name + self.public_key + padding 1417 return bytearray(ret) 1418 1419 1420DESCRIPTOR_CLASSES = [ 1421 AvbPropertyDescriptor, AvbHashtreeDescriptor, AvbHashDescriptor, 1422 AvbKernelCmdlineDescriptor, AvbChainPartitionDescriptor 1423] 1424 1425 1426def parse_descriptors(data): 1427 """Parses a blob of data into descriptors. 1428 1429 Arguments: 1430 data: A bytearray() with encoded descriptors. 1431 1432 Returns: 1433 A list of instances of objects derived from AvbDescriptor. For 1434 unknown descriptors, the class AvbDescriptor is used. 1435 """ 1436 o = 0 1437 ret = [] 1438 while o < len(data): 1439 tag, nb_following = struct.unpack('!2Q', data[o:o + 16]) 1440 if tag < len(DESCRIPTOR_CLASSES): 1441 c = DESCRIPTOR_CLASSES[tag] 1442 else: 1443 c = AvbDescriptor 1444 ret.append(c(bytearray(data[o:o + 16 + nb_following]))) 1445 o += 16 + nb_following 1446 return ret 1447 1448 1449class AvbFooter(object): 1450 """A class for parsing and writing footers. 1451 1452 Footers are stored at the end of partitions and point to where the 1453 AvbVBMeta blob is located. They also contain the original size of 1454 the image before AVB information was added. 1455 1456 Attributes: 1457 magic: Magic for identifying the footer, see |MAGIC|. 1458 version_major: The major version of avbtool that wrote the footer. 1459 version_minor: The minor version of avbtool that wrote the footer. 1460 original_image_size: Original image size. 1461 vbmeta_offset: Offset of where the AvbVBMeta blob is stored. 1462 vbmeta_size: Size of the AvbVBMeta blob. 1463 """ 1464 1465 MAGIC = 'AVBf' 1466 SIZE = 64 1467 RESERVED = 28 1468 FOOTER_VERSION_MAJOR = 1 1469 FOOTER_VERSION_MINOR = 0 1470 FORMAT_STRING = ('!4s2L' # magic, 2 x version. 1471 'Q' # Original image size. 1472 'Q' # Offset of VBMeta blob. 1473 'Q' + # Size of VBMeta blob. 1474 str(RESERVED) + 'x') # padding for reserved bytes 1475 1476 def __init__(self, data=None): 1477 """Initializes a new footer object. 1478 1479 Arguments: 1480 data: If not None, must be a bytearray of size 4096. 1481 1482 Raises: 1483 LookupError: If the given footer is malformed. 1484 struct.error: If the given data has no footer. 1485 """ 1486 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 1487 1488 if data: 1489 (self.magic, self.version_major, self.version_minor, 1490 self.original_image_size, self.vbmeta_offset, 1491 self.vbmeta_size) = struct.unpack(self.FORMAT_STRING, data) 1492 if self.magic != self.MAGIC: 1493 raise LookupError('Given data does not look like a AVB footer.') 1494 else: 1495 self.magic = self.MAGIC 1496 self.version_major = self.FOOTER_VERSION_MAJOR 1497 self.version_minor = self.FOOTER_VERSION_MINOR 1498 self.original_image_size = 0 1499 self.vbmeta_offset = 0 1500 self.vbmeta_size = 0 1501 1502 def encode(self): 1503 """Gets a string representing the binary encoding of the footer. 1504 1505 Returns: 1506 A bytearray() with a binary representation of the footer. 1507 """ 1508 return struct.pack(self.FORMAT_STRING, self.magic, self.version_major, 1509 self.version_minor, self.original_image_size, 1510 self.vbmeta_offset, self.vbmeta_size) 1511 1512 1513class AvbVBMetaHeader(object): 1514 """A class for parsing and writing AVB vbmeta images. 1515 1516 Attributes: 1517 The attributes correspond to the |AvbVBMetaHeader| struct 1518 defined in avb_vbmeta_header.h. 1519 """ 1520 1521 SIZE = 256 1522 1523 # Keep in sync with |reserved0| and |reserved| field of 1524 # |AvbVBMetaImageHeader|. 1525 RESERVED0 = 4 1526 RESERVED = 80 1527 1528 # Keep in sync with |AvbVBMetaImageHeader|. 1529 FORMAT_STRING = ('!4s2L' # magic, 2 x version 1530 '2Q' # 2 x block size 1531 'L' # algorithm type 1532 '2Q' # offset, size (hash) 1533 '2Q' # offset, size (signature) 1534 '2Q' # offset, size (public key) 1535 '2Q' # offset, size (public key metadata) 1536 '2Q' # offset, size (descriptors) 1537 'Q' # rollback_index 1538 'L' + # flags 1539 str(RESERVED0) + 'x' + # padding for reserved bytes 1540 '47sx' + # NUL-terminated release string 1541 str(RESERVED) + 'x') # padding for reserved bytes 1542 1543 def __init__(self, data=None): 1544 """Initializes a new header object. 1545 1546 Arguments: 1547 data: If not None, must be a bytearray of size 8192. 1548 1549 Raises: 1550 Exception: If the given data is malformed. 1551 """ 1552 assert struct.calcsize(self.FORMAT_STRING) == self.SIZE 1553 1554 if data: 1555 (self.magic, self.required_libavb_version_major, 1556 self.required_libavb_version_minor, 1557 self.authentication_data_block_size, self.auxiliary_data_block_size, 1558 self.algorithm_type, self.hash_offset, self.hash_size, 1559 self.signature_offset, self.signature_size, self.public_key_offset, 1560 self.public_key_size, self.public_key_metadata_offset, 1561 self.public_key_metadata_size, self.descriptors_offset, 1562 self.descriptors_size, 1563 self.rollback_index, 1564 self.flags, 1565 self.release_string) = struct.unpack(self.FORMAT_STRING, data) 1566 # Nuke NUL-bytes at the end of the string. 1567 if self.magic != 'AVB0': 1568 raise AvbError('Given image does not look like a vbmeta image.') 1569 else: 1570 self.magic = 'AVB0' 1571 # Start by just requiring version 1.0. Code that adds features 1572 # in a future version can use bump_required_libavb_version_minor() to 1573 # bump the minor. 1574 self.required_libavb_version_major = AVB_VERSION_MAJOR 1575 self.required_libavb_version_minor = 0 1576 self.authentication_data_block_size = 0 1577 self.auxiliary_data_block_size = 0 1578 self.algorithm_type = 0 1579 self.hash_offset = 0 1580 self.hash_size = 0 1581 self.signature_offset = 0 1582 self.signature_size = 0 1583 self.public_key_offset = 0 1584 self.public_key_size = 0 1585 self.public_key_metadata_offset = 0 1586 self.public_key_metadata_size = 0 1587 self.descriptors_offset = 0 1588 self.descriptors_size = 0 1589 self.rollback_index = 0 1590 self.flags = 0 1591 self.release_string = get_release_string() 1592 1593 def bump_required_libavb_version_minor(self, minor): 1594 """Function to bump required_libavb_version_minor. 1595 1596 Call this when writing data that requires a specific libavb 1597 version to parse it. 1598 1599 Arguments: 1600 minor: The minor version of libavb that has support for the feature. 1601 """ 1602 self.required_libavb_version_minor = ( 1603 min(self.required_libavb_version_minor, minor)) 1604 1605 def save(self, output): 1606 """Serializes the header (256 bytes) to disk. 1607 1608 Arguments: 1609 output: The object to write the output to. 1610 """ 1611 output.write(struct.pack( 1612 self.FORMAT_STRING, self.magic, self.required_libavb_version_major, 1613 self.required_libavb_version_minor, self.authentication_data_block_size, 1614 self.auxiliary_data_block_size, self.algorithm_type, self.hash_offset, 1615 self.hash_size, self.signature_offset, self.signature_size, 1616 self.public_key_offset, self.public_key_size, 1617 self.public_key_metadata_offset, self.public_key_metadata_size, 1618 self.descriptors_offset, self.descriptors_size, self.rollback_index, 1619 self.flags, self.release_string)) 1620 1621 def encode(self): 1622 """Serializes the header (256) to a bytearray(). 1623 1624 Returns: 1625 A bytearray() with the encoded header. 1626 """ 1627 return struct.pack(self.FORMAT_STRING, self.magic, 1628 self.required_libavb_version_major, 1629 self.required_libavb_version_minor, 1630 self.authentication_data_block_size, 1631 self.auxiliary_data_block_size, self.algorithm_type, 1632 self.hash_offset, self.hash_size, self.signature_offset, 1633 self.signature_size, self.public_key_offset, 1634 self.public_key_size, self.public_key_metadata_offset, 1635 self.public_key_metadata_size, self.descriptors_offset, 1636 self.descriptors_size, self.rollback_index, self.flags, 1637 self.release_string) 1638 1639 1640class Avb(object): 1641 """Business logic for avbtool command-line tool.""" 1642 1643 # Keep in sync with avb_ab_flow.h. 1644 AB_FORMAT_NO_CRC = '!4sBB2xBBBxBBBx12x' 1645 AB_MAGIC = '\0AB0' 1646 AB_MAJOR_VERSION = 1 1647 AB_MINOR_VERSION = 0 1648 AB_MISC_METADATA_OFFSET = 2048 1649 1650 # Constants for maximum metadata size. These are used to give 1651 # meaningful errors if the value passed in via --partition_size is 1652 # too small and when --calc_max_image_size is used. We use 1653 # conservative figures. 1654 MAX_VBMETA_SIZE = 64 * 1024 1655 MAX_FOOTER_SIZE = 4096 1656 1657 def erase_footer(self, image_filename, keep_hashtree): 1658 """Implements the 'erase_footer' command. 1659 1660 Arguments: 1661 image_filename: File to erase a footer from. 1662 keep_hashtree: If True, keep the hashtree and FEC around. 1663 1664 Raises: 1665 AvbError: If there's no footer in the image. 1666 """ 1667 1668 image = ImageHandler(image_filename) 1669 1670 (footer, _, descriptors, _) = self._parse_image(image) 1671 1672 if not footer: 1673 raise AvbError('Given image does not have a footer.') 1674 1675 new_image_size = None 1676 if not keep_hashtree: 1677 new_image_size = footer.original_image_size 1678 else: 1679 # If requested to keep the hashtree, search for a hashtree 1680 # descriptor to figure out the location and size of the hashtree 1681 # and FEC. 1682 for desc in descriptors: 1683 if isinstance(desc, AvbHashtreeDescriptor): 1684 # The hashtree is always just following the main data so the 1685 # new size is easily derived. 1686 new_image_size = desc.tree_offset + desc.tree_size 1687 # If the image has FEC codes, also keep those. 1688 if desc.fec_offset > 0: 1689 fec_end = desc.fec_offset + desc.fec_size 1690 new_image_size = max(new_image_size, fec_end) 1691 break 1692 if not new_image_size: 1693 raise AvbError('Requested to keep hashtree but no hashtree ' 1694 'descriptor was found.') 1695 1696 # And cut... 1697 image.truncate(new_image_size) 1698 1699 def set_ab_metadata(self, misc_image, slot_data): 1700 """Implements the 'set_ab_metadata' command. 1701 1702 The |slot_data| argument must be of the form 'A_priority:A_tries_remaining: 1703 A_successful_boot:B_priority:B_tries_remaining:B_successful_boot'. 1704 1705 Arguments: 1706 misc_image: The misc image to write to. 1707 slot_data: Slot data as a string 1708 1709 Raises: 1710 AvbError: If slot data is malformed. 1711 """ 1712 tokens = slot_data.split(':') 1713 if len(tokens) != 6: 1714 raise AvbError('Malformed slot data "{}".'.format(slot_data)) 1715 a_priority = int(tokens[0]) 1716 a_tries_remaining = int(tokens[1]) 1717 a_success = True if int(tokens[2]) != 0 else False 1718 b_priority = int(tokens[3]) 1719 b_tries_remaining = int(tokens[4]) 1720 b_success = True if int(tokens[5]) != 0 else False 1721 1722 ab_data_no_crc = struct.pack(self.AB_FORMAT_NO_CRC, 1723 self.AB_MAGIC, 1724 self.AB_MAJOR_VERSION, self.AB_MINOR_VERSION, 1725 a_priority, a_tries_remaining, a_success, 1726 b_priority, b_tries_remaining, b_success) 1727 # Force CRC to be unsigned, see https://bugs.python.org/issue4903 for why. 1728 crc_value = binascii.crc32(ab_data_no_crc) & 0xffffffff 1729 ab_data = ab_data_no_crc + struct.pack('!I', crc_value) 1730 misc_image.seek(self.AB_MISC_METADATA_OFFSET) 1731 misc_image.write(ab_data) 1732 1733 def info_image(self, image_filename, output): 1734 """Implements the 'info_image' command. 1735 1736 Arguments: 1737 image_filename: Image file to get information from (file object). 1738 output: Output file to write human-readable information to (file object). 1739 """ 1740 1741 image = ImageHandler(image_filename) 1742 1743 o = output 1744 1745 (footer, header, descriptors, image_size) = self._parse_image(image) 1746 1747 if footer: 1748 o.write('Footer version: {}.{}\n'.format(footer.version_major, 1749 footer.version_minor)) 1750 o.write('Image size: {} bytes\n'.format(image_size)) 1751 o.write('Original image size: {} bytes\n'.format( 1752 footer.original_image_size)) 1753 o.write('VBMeta offset: {}\n'.format(footer.vbmeta_offset)) 1754 o.write('VBMeta size: {} bytes\n'.format(footer.vbmeta_size)) 1755 o.write('--\n') 1756 1757 (alg_name, _) = lookup_algorithm_by_type(header.algorithm_type) 1758 1759 o.write('Minimum libavb version: {}.{}{}\n'.format( 1760 header.required_libavb_version_major, 1761 header.required_libavb_version_minor, 1762 ' (Sparse)' if image.is_sparse else '')) 1763 o.write('Header Block: {} bytes\n'.format(AvbVBMetaHeader.SIZE)) 1764 o.write('Authentication Block: {} bytes\n'.format( 1765 header.authentication_data_block_size)) 1766 o.write('Auxiliary Block: {} bytes\n'.format( 1767 header.auxiliary_data_block_size)) 1768 o.write('Algorithm: {}\n'.format(alg_name)) 1769 o.write('Rollback Index: {}\n'.format(header.rollback_index)) 1770 o.write('Flags: {}\n'.format(header.flags)) 1771 o.write('Release String: \'{}\'\n'.format( 1772 header.release_string.rstrip('\0'))) 1773 1774 # Print descriptors. 1775 num_printed = 0 1776 o.write('Descriptors:\n') 1777 for desc in descriptors: 1778 desc.print_desc(o) 1779 num_printed += 1 1780 if num_printed == 0: 1781 o.write(' (none)\n') 1782 1783 def _parse_image(self, image): 1784 """Gets information about an image. 1785 1786 The image can either be a vbmeta or an image with a footer. 1787 1788 Arguments: 1789 image: An ImageHandler (vbmeta or footer) with a hashtree descriptor. 1790 1791 Returns: 1792 A tuple where the first argument is a AvbFooter (None if there 1793 is no footer on the image), the second argument is a 1794 AvbVBMetaHeader, the third argument is a list of 1795 AvbDescriptor-derived instances, and the fourth argument is the 1796 size of |image|. 1797 """ 1798 assert isinstance(image, ImageHandler) 1799 footer = None 1800 image.seek(image.image_size - AvbFooter.SIZE) 1801 try: 1802 footer = AvbFooter(image.read(AvbFooter.SIZE)) 1803 except (LookupError, struct.error): 1804 # Nope, just seek back to the start. 1805 image.seek(0) 1806 1807 vbmeta_offset = 0 1808 if footer: 1809 vbmeta_offset = footer.vbmeta_offset 1810 1811 image.seek(vbmeta_offset) 1812 h = AvbVBMetaHeader(image.read(AvbVBMetaHeader.SIZE)) 1813 1814 auth_block_offset = vbmeta_offset + AvbVBMetaHeader.SIZE 1815 aux_block_offset = auth_block_offset + h.authentication_data_block_size 1816 desc_start_offset = aux_block_offset + h.descriptors_offset 1817 image.seek(desc_start_offset) 1818 descriptors = parse_descriptors(image.read(h.descriptors_size)) 1819 1820 return footer, h, descriptors, image.image_size 1821 1822 def _load_vbmeta_blob(self, image): 1823 """Gets the vbmeta struct and associated sections. 1824 1825 The image can either be a vbmeta.img or an image with a footer. 1826 1827 Arguments: 1828 image: An ImageHandler (vbmeta or footer). 1829 1830 Returns: 1831 A blob with the vbmeta struct and other sections. 1832 """ 1833 assert isinstance(image, ImageHandler) 1834 footer = None 1835 image.seek(image.image_size - AvbFooter.SIZE) 1836 try: 1837 footer = AvbFooter(image.read(AvbFooter.SIZE)) 1838 except (LookupError, struct.error): 1839 # Nope, just seek back to the start. 1840 image.seek(0) 1841 1842 vbmeta_offset = 0 1843 if footer: 1844 vbmeta_offset = footer.vbmeta_offset 1845 1846 image.seek(vbmeta_offset) 1847 h = AvbVBMetaHeader(image.read(AvbVBMetaHeader.SIZE)) 1848 1849 image.seek(vbmeta_offset) 1850 data_size = AvbVBMetaHeader.SIZE 1851 data_size += h.authentication_data_block_size 1852 data_size += h.auxiliary_data_block_size 1853 return image.read(data_size) 1854 1855 def _get_cmdline_descriptors_for_dm_verity(self, image): 1856 """Generate kernel cmdline descriptors for dm-verity. 1857 1858 Arguments: 1859 image: An ImageHandler (vbmeta or footer) with a hashtree descriptor. 1860 1861 Returns: 1862 A list with two AvbKernelCmdlineDescriptor with dm-verity kernel cmdline 1863 instructions. There is one for when hashtree is not disabled and one for 1864 when it is. 1865 1866 Raises: 1867 AvbError: If |image| doesn't have a hashtree descriptor. 1868 1869 """ 1870 1871 (_, _, descriptors, _) = self._parse_image(image) 1872 1873 ht = None 1874 for desc in descriptors: 1875 if isinstance(desc, AvbHashtreeDescriptor): 1876 ht = desc 1877 break 1878 1879 if not ht: 1880 raise AvbError('No hashtree descriptor in given image') 1881 1882 c = 'dm="1 vroot none ro 1,' 1883 c += '0' # start 1884 c += ' {}'.format((ht.image_size / 512)) # size (# sectors) 1885 c += ' verity {}'.format(ht.dm_verity_version) # type and version 1886 c += ' PARTUUID=$(ANDROID_SYSTEM_PARTUUID)' # data_dev 1887 c += ' PARTUUID=$(ANDROID_SYSTEM_PARTUUID)' # hash_dev 1888 c += ' {}'.format(ht.data_block_size) # data_block 1889 c += ' {}'.format(ht.hash_block_size) # hash_block 1890 c += ' {}'.format(ht.image_size / ht.data_block_size) # #blocks 1891 c += ' {}'.format(ht.image_size / ht.data_block_size) # hash_offset 1892 c += ' {}'.format(ht.hash_algorithm) # hash_alg 1893 c += ' {}'.format(str(ht.root_digest).encode('hex')) # root_digest 1894 c += ' {}'.format(str(ht.salt).encode('hex')) # salt 1895 if ht.fec_num_roots > 0: 1896 c += ' 10' # number of optional args 1897 c += ' restart_on_corruption' 1898 c += ' ignore_zero_blocks' 1899 c += ' use_fec_from_device PARTUUID=$(ANDROID_SYSTEM_PARTUUID)' 1900 c += ' fec_roots {}'.format(ht.fec_num_roots) 1901 # Note that fec_blocks is the size that FEC covers, *not* the 1902 # size of the FEC data. Since we use FEC for everything up until 1903 # the FEC data, it's the same as the offset. 1904 c += ' fec_blocks {}'.format(ht.fec_offset/ht.data_block_size) 1905 c += ' fec_start {}'.format(ht.fec_offset/ht.data_block_size) 1906 else: 1907 c += ' 2' # number of optional args 1908 c += ' restart_on_corruption' 1909 c += ' ignore_zero_blocks' 1910 c += '" root=/dev/dm-0' 1911 1912 # Now that we have the command-line, generate the descriptor. 1913 desc = AvbKernelCmdlineDescriptor() 1914 desc.kernel_cmdline = c 1915 desc.flags = ( 1916 AvbKernelCmdlineDescriptor.FLAGS_USE_ONLY_IF_HASHTREE_NOT_DISABLED) 1917 1918 # The descriptor for when hashtree verification is disabled is a lot 1919 # simpler - we just set the root to the partition. 1920 desc_no_ht = AvbKernelCmdlineDescriptor() 1921 desc_no_ht.kernel_cmdline = 'root=PARTUUID=$(ANDROID_SYSTEM_PARTUUID)' 1922 desc_no_ht.flags = ( 1923 AvbKernelCmdlineDescriptor.FLAGS_USE_ONLY_IF_HASHTREE_DISABLED) 1924 1925 return [desc, desc_no_ht] 1926 1927 def make_vbmeta_image(self, output, chain_partitions, algorithm_name, 1928 key_path, public_key_metadata_path, rollback_index, 1929 flags, props, props_from_file, kernel_cmdlines, 1930 setup_rootfs_from_kernel, 1931 include_descriptors_from_image, signing_helper, 1932 release_string, 1933 append_to_release_string): 1934 """Implements the 'make_vbmeta_image' command. 1935 1936 Arguments: 1937 output: File to write the image to. 1938 chain_partitions: List of partitions to chain or None. 1939 algorithm_name: Name of algorithm to use. 1940 key_path: Path to key to use or None. 1941 public_key_metadata_path: Path to public key metadata or None. 1942 rollback_index: The rollback index to use. 1943 flags: Flags value to use in the image. 1944 props: Properties to insert (list of strings of the form 'key:value'). 1945 props_from_file: Properties to insert (list of strings 'key:<path>'). 1946 kernel_cmdlines: Kernel cmdlines to insert (list of strings). 1947 setup_rootfs_from_kernel: None or file to generate from. 1948 include_descriptors_from_image: List of file objects with descriptors. 1949 signing_helper: Program which signs a hash and return signature. 1950 release_string: None or avbtool release string to use instead of default. 1951 append_to_release_string: None or string to append. 1952 1953 Raises: 1954 AvbError: If a chained partition is malformed. 1955 """ 1956 1957 descriptors = [] 1958 vbmeta_blob = self._generate_vbmeta_blob( 1959 algorithm_name, key_path, public_key_metadata_path, descriptors, 1960 chain_partitions, rollback_index, flags, props, props_from_file, 1961 kernel_cmdlines, setup_rootfs_from_kernel, 1962 include_descriptors_from_image, signing_helper, release_string, 1963 append_to_release_string) 1964 1965 # Write entire vbmeta blob (header, authentication, auxiliary). 1966 output.seek(0) 1967 output.write(vbmeta_blob) 1968 1969 def _generate_vbmeta_blob(self, algorithm_name, key_path, 1970 public_key_metadata_path, descriptors, 1971 chain_partitions, 1972 rollback_index, flags, props, props_from_file, 1973 kernel_cmdlines, 1974 setup_rootfs_from_kernel, 1975 include_descriptors_from_image, signing_helper, 1976 release_string, append_to_release_string): 1977 """Generates a VBMeta blob. 1978 1979 This blob contains the header (struct AvbVBMetaHeader), the 1980 authentication data block (which contains the hash and signature 1981 for the header and auxiliary block), and the auxiliary block 1982 (which contains descriptors, the public key used, and other data). 1983 1984 The |key| parameter can |None| only if the |algorithm_name| is 1985 'NONE'. 1986 1987 Arguments: 1988 algorithm_name: The algorithm name as per the ALGORITHMS dict. 1989 key_path: The path to the .pem file used to sign the blob. 1990 public_key_metadata_path: Path to public key metadata or None. 1991 descriptors: A list of descriptors to insert or None. 1992 chain_partitions: List of partitions to chain or None. 1993 rollback_index: The rollback index to use. 1994 flags: Flags to use in the image. 1995 props: Properties to insert (List of strings of the form 'key:value'). 1996 props_from_file: Properties to insert (List of strings 'key:<path>'). 1997 kernel_cmdlines: Kernel cmdlines to insert (list of strings). 1998 setup_rootfs_from_kernel: None or file to generate 1999 dm-verity kernel cmdline from. 2000 include_descriptors_from_image: List of file objects for which 2001 to insert descriptors from. 2002 signing_helper: Program which signs a hash and return signature. 2003 release_string: None or avbtool release string. 2004 append_to_release_string: None or string to append. 2005 2006 Returns: 2007 A bytearray() with the VBMeta blob. 2008 2009 Raises: 2010 Exception: If the |algorithm_name| is not found, if no key has 2011 been given and the given algorithm requires one, or the key is 2012 of the wrong size. 2013 2014 """ 2015 try: 2016 alg = ALGORITHMS[algorithm_name] 2017 except KeyError: 2018 raise AvbError('Unknown algorithm with name {}'.format(algorithm_name)) 2019 2020 if not descriptors: 2021 descriptors = [] 2022 2023 # Insert chained partition descriptors, if any 2024 if chain_partitions: 2025 for cp in chain_partitions: 2026 cp_tokens = cp.split(':') 2027 if len(cp_tokens) != 3: 2028 raise AvbError('Malformed chained partition "{}".'.format(cp)) 2029 desc = AvbChainPartitionDescriptor() 2030 desc.partition_name = cp_tokens[0] 2031 desc.rollback_index_location = int(cp_tokens[1]) 2032 if desc.rollback_index_location < 1: 2033 raise AvbError('Rollback index location must be 1 or larger.') 2034 file_path = cp_tokens[2] 2035 desc.public_key = open(file_path, 'rb').read() 2036 descriptors.append(desc) 2037 2038 # Descriptors. 2039 encoded_descriptors = bytearray() 2040 for desc in descriptors: 2041 encoded_descriptors.extend(desc.encode()) 2042 2043 # Add properties. 2044 if props: 2045 for prop in props: 2046 idx = prop.find(':') 2047 if idx == -1: 2048 raise AvbError('Malformed property "{}".'.format(prop)) 2049 desc = AvbPropertyDescriptor() 2050 desc.key = prop[0:idx] 2051 desc.value = prop[(idx + 1):] 2052 encoded_descriptors.extend(desc.encode()) 2053 if props_from_file: 2054 for prop in props_from_file: 2055 idx = prop.find(':') 2056 if idx == -1: 2057 raise AvbError('Malformed property "{}".'.format(prop)) 2058 desc = AvbPropertyDescriptor() 2059 desc.key = prop[0:idx] 2060 desc.value = prop[(idx + 1):] 2061 file_path = prop[(idx + 1):] 2062 desc.value = open(file_path, 'rb').read() 2063 encoded_descriptors.extend(desc.encode()) 2064 2065 # Add AvbKernelCmdline descriptor for dm-verity, if requested. 2066 if setup_rootfs_from_kernel: 2067 image_handler = ImageHandler( 2068 setup_rootfs_from_kernel.name) 2069 cmdline_desc = self._get_cmdline_descriptors_for_dm_verity(image_handler) 2070 encoded_descriptors.extend(cmdline_desc[0].encode()) 2071 encoded_descriptors.extend(cmdline_desc[1].encode()) 2072 2073 # Add kernel command-lines. 2074 if kernel_cmdlines: 2075 for i in kernel_cmdlines: 2076 desc = AvbKernelCmdlineDescriptor() 2077 desc.kernel_cmdline = i 2078 encoded_descriptors.extend(desc.encode()) 2079 2080 # Add descriptors from other images. 2081 if include_descriptors_from_image: 2082 for image in include_descriptors_from_image: 2083 image_handler = ImageHandler(image.name) 2084 (_, _, image_descriptors, _) = self._parse_image(image_handler) 2085 for desc in image_descriptors: 2086 encoded_descriptors.extend(desc.encode()) 2087 2088 # Load public key metadata blob, if requested. 2089 pkmd_blob = [] 2090 if public_key_metadata_path: 2091 with open(public_key_metadata_path) as f: 2092 pkmd_blob = f.read() 2093 2094 key = None 2095 encoded_key = bytearray() 2096 if alg.public_key_num_bytes > 0: 2097 if not key_path: 2098 raise AvbError('Key is required for algorithm {}'.format( 2099 algorithm_name)) 2100 key = Crypto.PublicKey.RSA.importKey(open(key_path).read()) 2101 encoded_key = encode_rsa_key(key) 2102 if len(encoded_key) != alg.public_key_num_bytes: 2103 raise AvbError('Key is wrong size for algorithm {}'.format( 2104 algorithm_name)) 2105 2106 h = AvbVBMetaHeader() 2107 2108 # Override release string, if requested. 2109 if isinstance(release_string, (str, unicode)): 2110 h.release_string = release_string 2111 2112 # Append to release string, if requested. Also insert a space before. 2113 if isinstance(append_to_release_string, (str, unicode)): 2114 h.release_string += ' ' + append_to_release_string 2115 2116 # For the Auxiliary data block, descriptors are stored at offset 0, 2117 # followed by the public key, followed by the public key metadata blob. 2118 h.auxiliary_data_block_size = round_to_multiple( 2119 len(encoded_descriptors) + len(encoded_key) + len(pkmd_blob), 64) 2120 h.descriptors_offset = 0 2121 h.descriptors_size = len(encoded_descriptors) 2122 h.public_key_offset = h.descriptors_size 2123 h.public_key_size = len(encoded_key) 2124 h.public_key_metadata_offset = h.public_key_offset + h.public_key_size 2125 h.public_key_metadata_size = len(pkmd_blob) 2126 2127 # For the Authentication data block, the hash is first and then 2128 # the signature. 2129 h.authentication_data_block_size = round_to_multiple( 2130 alg.hash_num_bytes + alg.signature_num_bytes, 64) 2131 h.algorithm_type = alg.algorithm_type 2132 h.hash_offset = 0 2133 h.hash_size = alg.hash_num_bytes 2134 # Signature offset and size - it's stored right after the hash 2135 # (in Authentication data block). 2136 h.signature_offset = alg.hash_num_bytes 2137 h.signature_size = alg.signature_num_bytes 2138 2139 h.rollback_index = rollback_index 2140 h.flags = flags 2141 2142 # Generate Header data block. 2143 header_data_blob = h.encode() 2144 2145 # Generate Auxiliary data block. 2146 aux_data_blob = bytearray() 2147 aux_data_blob.extend(encoded_descriptors) 2148 aux_data_blob.extend(encoded_key) 2149 aux_data_blob.extend(pkmd_blob) 2150 padding_bytes = h.auxiliary_data_block_size - len(aux_data_blob) 2151 aux_data_blob.extend('\0' * padding_bytes) 2152 2153 # Calculate the hash. 2154 binary_hash = bytearray() 2155 binary_signature = bytearray() 2156 if algorithm_name != 'NONE': 2157 if algorithm_name[0:6] == 'SHA256': 2158 ha = hashlib.sha256() 2159 elif algorithm_name[0:6] == 'SHA512': 2160 ha = hashlib.sha512() 2161 else: 2162 raise AvbError('Unsupported algorithm {}.'.format(algorithm_name)) 2163 ha.update(header_data_blob) 2164 ha.update(aux_data_blob) 2165 binary_hash.extend(ha.digest()) 2166 2167 # Calculate the signature. 2168 padding_and_hash = str(bytearray(alg.padding)) + binary_hash 2169 binary_signature.extend(raw_sign(signing_helper, algorithm_name, key_path, 2170 padding_and_hash)) 2171 2172 # Generate Authentication data block. 2173 auth_data_blob = bytearray() 2174 auth_data_blob.extend(binary_hash) 2175 auth_data_blob.extend(binary_signature) 2176 padding_bytes = h.authentication_data_block_size - len(auth_data_blob) 2177 auth_data_blob.extend('\0' * padding_bytes) 2178 2179 return header_data_blob + auth_data_blob + aux_data_blob 2180 2181 def extract_public_key(self, key_path, output): 2182 """Implements the 'extract_public_key' command. 2183 2184 Arguments: 2185 key_path: The path to a RSA private key file. 2186 output: The file to write to. 2187 """ 2188 key = Crypto.PublicKey.RSA.importKey(open(key_path).read()) 2189 write_rsa_key(output, key) 2190 2191 def append_vbmeta_image(self, image_filename, vbmeta_image_filename, 2192 partition_size): 2193 """Implementation of the append_vbmeta_image command. 2194 2195 Arguments: 2196 image_filename: File to add the footer to. 2197 vbmeta_image_filename: File to get vbmeta struct from. 2198 partition_size: Size of partition. 2199 2200 Raises: 2201 AvbError: If an argument is incorrect. 2202 """ 2203 image = ImageHandler(image_filename) 2204 2205 if partition_size % image.block_size != 0: 2206 raise AvbError('Partition size of {} is not a multiple of the image ' 2207 'block size {}.'.format(partition_size, 2208 image.block_size)) 2209 2210 # If there's already a footer, truncate the image to its original 2211 # size. This way 'avbtool append_vbmeta_image' is idempotent. 2212 image.seek(image.image_size - AvbFooter.SIZE) 2213 try: 2214 footer = AvbFooter(image.read(AvbFooter.SIZE)) 2215 # Existing footer found. Just truncate. 2216 original_image_size = footer.original_image_size 2217 image.truncate(footer.original_image_size) 2218 except (LookupError, struct.error): 2219 original_image_size = image.image_size 2220 2221 # If anything goes wrong from here-on, restore the image back to 2222 # its original size. 2223 try: 2224 vbmeta_image_handler = ImageHandler(vbmeta_image_filename) 2225 vbmeta_blob = self._load_vbmeta_blob(vbmeta_image_handler) 2226 2227 # If the image isn't sparse, its size might not be a multiple of 2228 # the block size. This will screw up padding later so just grow it. 2229 if image.image_size % image.block_size != 0: 2230 assert not image.is_sparse 2231 padding_needed = image.block_size - (image.image_size%image.block_size) 2232 image.truncate(image.image_size + padding_needed) 2233 2234 # The append_raw() method requires content with size being a 2235 # multiple of |block_size| so add padding as needed. Also record 2236 # where this is written to since we'll need to put that in the 2237 # footer. 2238 vbmeta_offset = image.image_size 2239 padding_needed = (round_to_multiple(len(vbmeta_blob), image.block_size) - 2240 len(vbmeta_blob)) 2241 vbmeta_blob_with_padding = vbmeta_blob + '\0'*padding_needed 2242 2243 # Append vbmeta blob and footer 2244 image.append_raw(vbmeta_blob_with_padding) 2245 vbmeta_end_offset = vbmeta_offset + len(vbmeta_blob_with_padding) 2246 2247 # Now insert a DONT_CARE chunk with enough bytes such that the 2248 # final Footer block is at the end of partition_size.. 2249 image.append_dont_care(partition_size - vbmeta_end_offset - 2250 1*image.block_size) 2251 2252 # Generate the Footer that tells where the VBMeta footer 2253 # is. Also put enough padding in the front of the footer since 2254 # we'll write out an entire block. 2255 footer = AvbFooter() 2256 footer.original_image_size = original_image_size 2257 footer.vbmeta_offset = vbmeta_offset 2258 footer.vbmeta_size = len(vbmeta_blob) 2259 footer_blob = footer.encode() 2260 footer_blob_with_padding = ('\0'*(image.block_size - AvbFooter.SIZE) + 2261 footer_blob) 2262 image.append_raw(footer_blob_with_padding) 2263 2264 except: 2265 # Truncate back to original size, then re-raise 2266 image.truncate(original_image_size) 2267 raise 2268 2269 def add_hash_footer(self, image_filename, partition_size, partition_name, 2270 hash_algorithm, salt, chain_partitions, algorithm_name, 2271 key_path, 2272 public_key_metadata_path, rollback_index, flags, props, 2273 props_from_file, kernel_cmdlines, 2274 setup_rootfs_from_kernel, 2275 include_descriptors_from_image, signing_helper, 2276 release_string, append_to_release_string, 2277 output_vbmeta_image, do_not_append_vbmeta_image): 2278 """Implementation of the add_hash_footer on unsparse images. 2279 2280 Arguments: 2281 image_filename: File to add the footer to. 2282 partition_size: Size of partition. 2283 partition_name: Name of partition (without A/B suffix). 2284 hash_algorithm: Hash algorithm to use. 2285 salt: Salt to use as a hexadecimal string or None to use /dev/urandom. 2286 chain_partitions: List of partitions to chain. 2287 algorithm_name: Name of algorithm to use. 2288 key_path: Path to key to use or None. 2289 public_key_metadata_path: Path to public key metadata or None. 2290 rollback_index: Rollback index. 2291 flags: Flags value to use in the image. 2292 props: Properties to insert (List of strings of the form 'key:value'). 2293 props_from_file: Properties to insert (List of strings 'key:<path>'). 2294 kernel_cmdlines: Kernel cmdlines to insert (list of strings). 2295 setup_rootfs_from_kernel: None or file to generate 2296 dm-verity kernel cmdline from. 2297 include_descriptors_from_image: List of file objects for which 2298 to insert descriptors from. 2299 signing_helper: Program which signs a hash and return signature. 2300 release_string: None or avbtool release string. 2301 append_to_release_string: None or string to append. 2302 output_vbmeta_image: If not None, also write vbmeta struct to this file. 2303 do_not_append_vbmeta_image: If True, don't append vbmeta struct. 2304 2305 Raises: 2306 AvbError: If an argument is incorrect. 2307 """ 2308 image = ImageHandler(image_filename) 2309 2310 if partition_size % image.block_size != 0: 2311 raise AvbError('Partition size of {} is not a multiple of the image ' 2312 'block size {}.'.format(partition_size, 2313 image.block_size)) 2314 2315 # If there's already a footer, truncate the image to its original 2316 # size. This way 'avbtool add_hash_footer' is idempotent (modulo 2317 # salts). 2318 image.seek(image.image_size - AvbFooter.SIZE) 2319 try: 2320 footer = AvbFooter(image.read(AvbFooter.SIZE)) 2321 # Existing footer found. Just truncate. 2322 original_image_size = footer.original_image_size 2323 image.truncate(footer.original_image_size) 2324 except (LookupError, struct.error): 2325 original_image_size = image.image_size 2326 2327 # If anything goes wrong from here-on, restore the image back to 2328 # its original size. 2329 try: 2330 # First, calculate the maximum image size such that an image 2331 # this size + metadata (footer + vbmeta struct) fits in 2332 # |partition_size|. 2333 max_metadata_size = self.MAX_VBMETA_SIZE + self.MAX_FOOTER_SIZE 2334 max_image_size = partition_size - max_metadata_size 2335 2336 # If image size exceeds the maximum image size, fail. 2337 if image.image_size > max_image_size: 2338 raise AvbError('Image size of {} exceeds maximum image ' 2339 'size of {} in order to fit in a partition ' 2340 'size of {}.'.format(image.image_size, max_image_size, 2341 partition_size)) 2342 2343 digest_size = len(hashlib.new(name=hash_algorithm).digest()) 2344 if salt: 2345 salt = salt.decode('hex') 2346 else: 2347 if salt is None: 2348 # If salt is not explicitly specified, choose a hash 2349 # that's the same size as the hash size. 2350 hash_size = digest_size 2351 salt = open('/dev/urandom').read(hash_size) 2352 else: 2353 salt = '' 2354 2355 hasher = hashlib.new(name=hash_algorithm, string=salt) 2356 # TODO(zeuthen): might want to read this in chunks to avoid 2357 # memory pressure, then again, this is only supposed to be used 2358 # on kernel/initramfs partitions. Possible optimization. 2359 image.seek(0) 2360 hasher.update(image.read(image.image_size)) 2361 digest = hasher.digest() 2362 2363 h_desc = AvbHashDescriptor() 2364 h_desc.image_size = image.image_size 2365 h_desc.hash_algorithm = hash_algorithm 2366 h_desc.partition_name = partition_name 2367 h_desc.salt = salt 2368 h_desc.digest = digest 2369 2370 # Generate the VBMeta footer. 2371 vbmeta_blob = self._generate_vbmeta_blob( 2372 algorithm_name, key_path, public_key_metadata_path, [h_desc], 2373 chain_partitions, rollback_index, flags, props, props_from_file, 2374 kernel_cmdlines, setup_rootfs_from_kernel, 2375 include_descriptors_from_image, signing_helper, release_string, 2376 append_to_release_string) 2377 2378 # If the image isn't sparse, its size might not be a multiple of 2379 # the block size. This will screw up padding later so just grow it. 2380 if image.image_size % image.block_size != 0: 2381 assert not image.is_sparse 2382 padding_needed = image.block_size - (image.image_size%image.block_size) 2383 image.truncate(image.image_size + padding_needed) 2384 2385 # The append_raw() method requires content with size being a 2386 # multiple of |block_size| so add padding as needed. Also record 2387 # where this is written to since we'll need to put that in the 2388 # footer. 2389 vbmeta_offset = image.image_size 2390 padding_needed = (round_to_multiple(len(vbmeta_blob), image.block_size) - 2391 len(vbmeta_blob)) 2392 vbmeta_blob_with_padding = vbmeta_blob + '\0'*padding_needed 2393 2394 # Write vbmeta blob, if requested. 2395 if output_vbmeta_image: 2396 output_vbmeta_image.write(vbmeta_blob) 2397 2398 # Append vbmeta blob and footer, unless requested not to. 2399 if not do_not_append_vbmeta_image: 2400 image.append_raw(vbmeta_blob_with_padding) 2401 vbmeta_end_offset = vbmeta_offset + len(vbmeta_blob_with_padding) 2402 2403 # Now insert a DONT_CARE chunk with enough bytes such that the 2404 # final Footer block is at the end of partition_size.. 2405 image.append_dont_care(partition_size - vbmeta_end_offset - 2406 1*image.block_size) 2407 2408 # Generate the Footer that tells where the VBMeta footer 2409 # is. Also put enough padding in the front of the footer since 2410 # we'll write out an entire block. 2411 footer = AvbFooter() 2412 footer.original_image_size = original_image_size 2413 footer.vbmeta_offset = vbmeta_offset 2414 footer.vbmeta_size = len(vbmeta_blob) 2415 footer_blob = footer.encode() 2416 footer_blob_with_padding = ('\0'*(image.block_size - AvbFooter.SIZE) + 2417 footer_blob) 2418 image.append_raw(footer_blob_with_padding) 2419 2420 except: 2421 # Truncate back to original size, then re-raise 2422 image.truncate(original_image_size) 2423 raise 2424 2425 def add_hashtree_footer(self, image_filename, partition_size, partition_name, 2426 generate_fec, fec_num_roots, hash_algorithm, 2427 block_size, salt, chain_partitions, algorithm_name, 2428 key_path, 2429 public_key_metadata_path, rollback_index, flags, 2430 props, props_from_file, kernel_cmdlines, 2431 setup_rootfs_from_kernel, 2432 include_descriptors_from_image, 2433 calc_max_image_size, signing_helper, 2434 release_string, append_to_release_string, 2435 output_vbmeta_image, do_not_append_vbmeta_image): 2436 """Implements the 'add_hashtree_footer' command. 2437 2438 See https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity for 2439 more information about dm-verity and these hashes. 2440 2441 Arguments: 2442 image_filename: File to add the footer to. 2443 partition_size: Size of partition. 2444 partition_name: Name of partition (without A/B suffix). 2445 generate_fec: If True, generate FEC codes. 2446 fec_num_roots: Number of roots for FEC. 2447 hash_algorithm: Hash algorithm to use. 2448 block_size: Block size to use. 2449 salt: Salt to use as a hexadecimal string or None to use /dev/urandom. 2450 chain_partitions: List of partitions to chain. 2451 algorithm_name: Name of algorithm to use. 2452 key_path: Path to key to use or None. 2453 public_key_metadata_path: Path to public key metadata or None. 2454 rollback_index: Rollback index. 2455 flags: Flags value to use in the image. 2456 props: Properties to insert (List of strings of the form 'key:value'). 2457 props_from_file: Properties to insert (List of strings 'key:<path>'). 2458 kernel_cmdlines: Kernel cmdlines to insert (list of strings). 2459 setup_rootfs_from_kernel: None or file to generate 2460 dm-verity kernel cmdline from. 2461 include_descriptors_from_image: List of file objects for which 2462 to insert descriptors from. 2463 calc_max_image_size: Don't store the hashtree or footer - instead 2464 calculate the maximum image size leaving enough room for hashtree 2465 and metadata with the given |partition_size|. 2466 signing_helper: Program which signs a hash and return signature. 2467 release_string: None or avbtool release string. 2468 append_to_release_string: None or string to append. 2469 output_vbmeta_image: If not None, also write vbmeta struct to this file. 2470 do_not_append_vbmeta_image: If True, don't append vbmeta struct. 2471 2472 Raises: 2473 AvbError: If an argument is incorrect. 2474 """ 2475 digest_size = len(hashlib.new(name=hash_algorithm).digest()) 2476 digest_padding = round_to_pow2(digest_size) - digest_size 2477 2478 # First, calculate the maximum image size such that an image 2479 # this size + the hashtree + metadata (footer + vbmeta struct) 2480 # fits in |partition_size|. We use very conservative figures for 2481 # metadata. 2482 (_, max_tree_size) = calc_hash_level_offsets( 2483 partition_size, block_size, digest_size + digest_padding) 2484 max_fec_size = 0 2485 if generate_fec: 2486 max_fec_size = calc_fec_data_size(partition_size, fec_num_roots) 2487 max_metadata_size = (max_fec_size + max_tree_size + 2488 self.MAX_VBMETA_SIZE + 2489 self.MAX_FOOTER_SIZE) 2490 max_image_size = partition_size - max_metadata_size 2491 2492 # If we're asked to only calculate the maximum image size, we're done. 2493 if calc_max_image_size: 2494 print '{}'.format(max_image_size) 2495 return 2496 2497 image = ImageHandler(image_filename) 2498 2499 if partition_size % image.block_size != 0: 2500 raise AvbError('Partition size of {} is not a multiple of the image ' 2501 'block size {}.'.format(partition_size, 2502 image.block_size)) 2503 2504 # If there's already a footer, truncate the image to its original 2505 # size. This way 'avbtool add_hashtree_footer' is idempotent 2506 # (modulo salts). 2507 image.seek(image.image_size - AvbFooter.SIZE) 2508 try: 2509 footer = AvbFooter(image.read(AvbFooter.SIZE)) 2510 # Existing footer found. Just truncate. 2511 original_image_size = footer.original_image_size 2512 image.truncate(footer.original_image_size) 2513 except (LookupError, struct.error): 2514 original_image_size = image.image_size 2515 2516 # If anything goes wrong from here-on, restore the image back to 2517 # its original size. 2518 try: 2519 # Ensure image is multiple of block_size. 2520 rounded_image_size = round_to_multiple(image.image_size, block_size) 2521 if rounded_image_size > image.image_size: 2522 image.append_raw('\0' * (rounded_image_size - image.image_size)) 2523 2524 # If image size exceeds the maximum image size, fail. 2525 if image.image_size > max_image_size: 2526 raise AvbError('Image size of {} exceeds maximum image ' 2527 'size of {} in order to fit in a partition ' 2528 'size of {}.'.format(image.image_size, max_image_size, 2529 partition_size)) 2530 2531 if salt: 2532 salt = salt.decode('hex') 2533 else: 2534 if salt is None: 2535 # If salt is not explicitly specified, choose a hash 2536 # that's the same size as the hash size. 2537 hash_size = digest_size 2538 salt = open('/dev/urandom').read(hash_size) 2539 else: 2540 salt = '' 2541 2542 # Hashes are stored upside down so we need to calculate hash 2543 # offsets in advance. 2544 (hash_level_offsets, tree_size) = calc_hash_level_offsets( 2545 image.image_size, block_size, digest_size + digest_padding) 2546 2547 # If the image isn't sparse, its size might not be a multiple of 2548 # the block size. This will screw up padding later so just grow it. 2549 if image.image_size % image.block_size != 0: 2550 assert not image.is_sparse 2551 padding_needed = image.block_size - (image.image_size%image.block_size) 2552 image.truncate(image.image_size + padding_needed) 2553 2554 # Generate the tree and add padding as needed. 2555 tree_offset = image.image_size 2556 root_digest, hash_tree = generate_hash_tree(image, image.image_size, 2557 block_size, 2558 hash_algorithm, salt, 2559 digest_padding, 2560 hash_level_offsets, 2561 tree_size) 2562 2563 # Generate HashtreeDescriptor with details about the tree we 2564 # just generated. 2565 ht_desc = AvbHashtreeDescriptor() 2566 ht_desc.dm_verity_version = 1 2567 ht_desc.image_size = image.image_size 2568 ht_desc.tree_offset = tree_offset 2569 ht_desc.tree_size = tree_size 2570 ht_desc.data_block_size = block_size 2571 ht_desc.hash_block_size = block_size 2572 ht_desc.hash_algorithm = hash_algorithm 2573 ht_desc.partition_name = partition_name 2574 ht_desc.salt = salt 2575 ht_desc.root_digest = root_digest 2576 2577 # Write the hash tree 2578 padding_needed = (round_to_multiple(len(hash_tree), image.block_size) - 2579 len(hash_tree)) 2580 hash_tree_with_padding = hash_tree + '\0'*padding_needed 2581 image.append_raw(hash_tree_with_padding) 2582 len_hashtree_and_fec = len(hash_tree_with_padding) 2583 2584 # Generate FEC codes, if requested. 2585 if generate_fec: 2586 fec_data = generate_fec_data(image_filename, fec_num_roots) 2587 padding_needed = (round_to_multiple(len(fec_data), image.block_size) - 2588 len(fec_data)) 2589 fec_data_with_padding = fec_data + '\0'*padding_needed 2590 fec_offset = image.image_size 2591 image.append_raw(fec_data_with_padding) 2592 len_hashtree_and_fec += len(fec_data_with_padding) 2593 # Update the hashtree descriptor. 2594 ht_desc.fec_num_roots = fec_num_roots 2595 ht_desc.fec_offset = fec_offset 2596 ht_desc.fec_size = len(fec_data) 2597 2598 # Generate the VBMeta footer and add padding as needed. 2599 vbmeta_offset = tree_offset + len_hashtree_and_fec 2600 vbmeta_blob = self._generate_vbmeta_blob( 2601 algorithm_name, key_path, public_key_metadata_path, [ht_desc], 2602 chain_partitions, rollback_index, flags, props, props_from_file, 2603 kernel_cmdlines, setup_rootfs_from_kernel, 2604 include_descriptors_from_image, signing_helper, release_string, 2605 append_to_release_string) 2606 padding_needed = (round_to_multiple(len(vbmeta_blob), image.block_size) - 2607 len(vbmeta_blob)) 2608 vbmeta_blob_with_padding = vbmeta_blob + '\0'*padding_needed 2609 2610 # Write vbmeta blob, if requested. 2611 if output_vbmeta_image: 2612 output_vbmeta_image.write(vbmeta_blob) 2613 2614 # Append vbmeta blob and footer, unless requested not to. 2615 if not do_not_append_vbmeta_image: 2616 image.append_raw(vbmeta_blob_with_padding) 2617 2618 # Now insert a DONT_CARE chunk with enough bytes such that the 2619 # final Footer block is at the end of partition_size.. 2620 image.append_dont_care(partition_size - image.image_size - 2621 1*image.block_size) 2622 2623 # Generate the Footer that tells where the VBMeta footer 2624 # is. Also put enough padding in the front of the footer since 2625 # we'll write out an entire block. 2626 footer = AvbFooter() 2627 footer.original_image_size = original_image_size 2628 footer.vbmeta_offset = vbmeta_offset 2629 footer.vbmeta_size = len(vbmeta_blob) 2630 footer_blob = footer.encode() 2631 footer_blob_with_padding = ('\0'*(image.block_size - AvbFooter.SIZE) + 2632 footer_blob) 2633 image.append_raw(footer_blob_with_padding) 2634 2635 except: 2636 # Truncate back to original size, then re-raise. 2637 image.truncate(original_image_size) 2638 raise 2639 2640 def make_atx_certificate(self, output, authority_key_path, subject_key, 2641 subject_key_version, subject, 2642 is_intermediate_authority, signing_helper): 2643 """Implements the 'make_atx_certificate' command. 2644 2645 Android Things certificates are required for Android Things public key 2646 metadata. They chain the vbmeta signing key for a particular product back to 2647 a fused, permanent root key. These certificates are fixed-length and fixed- 2648 format with the explicit goal of not parsing ASN.1 in bootloader code. 2649 2650 Arguments: 2651 output: Certificate will be written to this file on success. 2652 authority_key_path: A PEM file path with the authority private key. 2653 If None, then a certificate will be created without a 2654 signature. The signature can be created out-of-band 2655 and appended. 2656 subject_key: A PEM or DER subject public key. 2657 subject_key_version: A 64-bit version value. If this is None, the number 2658 of seconds since the epoch is used. 2659 subject: A subject identifier. For Product Signing Key certificates this 2660 should be the same Product ID found in the permanent attributes. 2661 is_intermediate_authority: True if the certificate is for an intermediate 2662 authority. 2663 signing_helper: Program which signs a hash and returns the signature. 2664 """ 2665 signed_data = bytearray() 2666 signed_data.extend(struct.pack('<I', 1)) # Format Version 2667 signed_data.extend( 2668 encode_rsa_key(Crypto.PublicKey.RSA.importKey(subject_key))) 2669 hasher = hashlib.sha256() 2670 hasher.update(subject) 2671 signed_data.extend(hasher.digest()) 2672 usage = 'com.google.android.things.vboot' 2673 if is_intermediate_authority: 2674 usage += '.ca' 2675 hasher = hashlib.sha256() 2676 hasher.update(usage) 2677 signed_data.extend(hasher.digest()) 2678 if not subject_key_version: 2679 subject_key_version = int(time.time()) 2680 signed_data.extend(struct.pack('<Q', subject_key_version)) 2681 signature = bytearray() 2682 if authority_key_path: 2683 padding_and_hash = bytearray() 2684 algorithm_name = 'SHA512_RSA4096' 2685 hasher = hashlib.sha512() 2686 padding_and_hash.extend(ALGORITHMS[algorithm_name].padding) 2687 hasher.update(signed_data) 2688 padding_and_hash.extend(hasher.digest()) 2689 signature.extend(raw_sign(signing_helper, algorithm_name, 2690 authority_key_path, padding_and_hash)) 2691 output.write(signed_data) 2692 output.write(signature) 2693 2694 def make_atx_permanent_attributes(self, output, root_authority_key, 2695 product_id): 2696 """Implements the 'make_atx_permanent_attributes' command. 2697 2698 Android Things permanent attributes are designed to be permanent for a 2699 particular product and a hash of these attributes should be fused into 2700 hardware to enforce this. 2701 2702 Arguments: 2703 output: Attributes will be written to this file on success. 2704 root_authority_key: A PEM or DER public key for the root authority. 2705 product_id: A 16-byte Product ID. 2706 2707 Raises: 2708 AvbError: If an argument is incorrect. 2709 """ 2710 EXPECTED_PRODUCT_ID_SIZE = 16 2711 if len(product_id) != EXPECTED_PRODUCT_ID_SIZE: 2712 raise AvbError('Invalid Product ID length.') 2713 output.write(struct.pack('<I', 1)) # Format Version 2714 write_rsa_key(output, Crypto.PublicKey.RSA.importKey(root_authority_key)) 2715 output.write(product_id) 2716 2717 def make_atx_metadata(self, output, intermediate_key_certificate, 2718 product_key_certificate): 2719 """Implements the 'make_atx_metadata' command. 2720 2721 Android Things metadata are included in vbmeta images to facilitate 2722 verification. The output of this command can be used as the 2723 public_key_metadata argument to other commands. 2724 2725 Arguments: 2726 output: Metadata will be written to this file on success. 2727 intermediate_key_certificate: A certificate file as output by 2728 make_atx_certificate with 2729 is_intermediate_authority set to true. 2730 product_key_certificate: A certificate file as output by 2731 make_atx_certificate with 2732 is_intermediate_authority set to false. 2733 2734 Raises: 2735 AvbError: If an argument is incorrect. 2736 """ 2737 EXPECTED_CERTIFICATE_SIZE = 1620 2738 if len(intermediate_key_certificate) != EXPECTED_CERTIFICATE_SIZE: 2739 raise AvbError('Invalid intermediate key certificate length.') 2740 if len(product_key_certificate) != EXPECTED_CERTIFICATE_SIZE: 2741 raise AvbError('Invalid product key certificate length.') 2742 output.write(struct.pack('<I', 1)) # Format Version 2743 output.write(intermediate_key_certificate) 2744 output.write(product_key_certificate) 2745 2746 2747def calc_hash_level_offsets(image_size, block_size, digest_size): 2748 """Calculate the offsets of all the hash-levels in a Merkle-tree. 2749 2750 Arguments: 2751 image_size: The size of the image to calculate a Merkle-tree for. 2752 block_size: The block size, e.g. 4096. 2753 digest_size: The size of each hash, e.g. 32 for SHA-256. 2754 2755 Returns: 2756 A tuple where the first argument is an array of offsets and the 2757 second is size of the tree, in bytes. 2758 """ 2759 level_offsets = [] 2760 level_sizes = [] 2761 tree_size = 0 2762 2763 num_levels = 0 2764 size = image_size 2765 while size > block_size: 2766 num_blocks = (size + block_size - 1) / block_size 2767 level_size = round_to_multiple(num_blocks * digest_size, block_size) 2768 2769 level_sizes.append(level_size) 2770 tree_size += level_size 2771 num_levels += 1 2772 2773 size = level_size 2774 2775 for n in range(0, num_levels): 2776 offset = 0 2777 for m in range(n + 1, num_levels): 2778 offset += level_sizes[m] 2779 level_offsets.append(offset) 2780 2781 return level_offsets, tree_size 2782 2783 2784# See system/extras/libfec/include/fec/io.h for these definitions. 2785FEC_FOOTER_FORMAT = '<LLLLLQ32s' 2786FEC_MAGIC = 0xfecfecfe 2787 2788 2789def calc_fec_data_size(image_size, num_roots): 2790 """Calculates how much space FEC data will take. 2791 2792 Args: 2793 image_size: The size of the image. 2794 num_roots: Number of roots. 2795 2796 Returns: 2797 The number of bytes needed for FEC for an image of the given size 2798 and with the requested number of FEC roots. 2799 2800 Raises: 2801 ValueError: If output from the 'fec' tool is invalid. 2802 2803 """ 2804 p = subprocess.Popen( 2805 ['fec', '--print-fec-size', str(image_size), '--roots', str(num_roots)], 2806 stdout=subprocess.PIPE, 2807 stderr=subprocess.PIPE) 2808 (pout, perr) = p.communicate() 2809 retcode = p.wait() 2810 if retcode != 0: 2811 raise ValueError('Error invoking fec: {}'.format(perr)) 2812 return int(pout) 2813 2814 2815def generate_fec_data(image_filename, num_roots): 2816 """Generate FEC codes for an image. 2817 2818 Args: 2819 image_filename: The filename of the image. 2820 num_roots: Number of roots. 2821 2822 Returns: 2823 The FEC data blob. 2824 2825 Raises: 2826 ValueError: If output from the 'fec' tool is invalid. 2827 """ 2828 fec_tmpfile = tempfile.NamedTemporaryFile() 2829 subprocess.check_call( 2830 ['fec', '--encode', '--roots', str(num_roots), image_filename, 2831 fec_tmpfile.name], 2832 stderr=open(os.devnull)) 2833 fec_data = fec_tmpfile.read() 2834 footer_size = struct.calcsize(FEC_FOOTER_FORMAT) 2835 footer_data = fec_data[-footer_size:] 2836 (magic, _, _, num_roots, fec_size, _, _) = struct.unpack(FEC_FOOTER_FORMAT, 2837 footer_data) 2838 if magic != FEC_MAGIC: 2839 raise ValueError('Unexpected magic in FEC footer') 2840 return fec_data[0:fec_size] 2841 2842 2843def generate_hash_tree(image, image_size, block_size, hash_alg_name, salt, 2844 digest_padding, hash_level_offsets, tree_size): 2845 """Generates a Merkle-tree for a file. 2846 2847 Args: 2848 image: The image, as a file. 2849 image_size: The size of the image. 2850 block_size: The block size, e.g. 4096. 2851 hash_alg_name: The hash algorithm, e.g. 'sha256' or 'sha1'. 2852 salt: The salt to use. 2853 digest_padding: The padding for each digest. 2854 hash_level_offsets: The offsets from calc_hash_level_offsets(). 2855 tree_size: The size of the tree, in number of bytes. 2856 2857 Returns: 2858 A tuple where the first element is the top-level hash and the 2859 second element is the hash-tree. 2860 """ 2861 hash_ret = bytearray(tree_size) 2862 hash_src_offset = 0 2863 hash_src_size = image_size 2864 level_num = 0 2865 while hash_src_size > block_size: 2866 level_output = '' 2867 remaining = hash_src_size 2868 while remaining > 0: 2869 hasher = hashlib.new(name=hash_alg_name, string=salt) 2870 # Only read from the file for the first level - for subsequent 2871 # levels, access the array we're building. 2872 if level_num == 0: 2873 image.seek(hash_src_offset + hash_src_size - remaining) 2874 data = image.read(min(remaining, block_size)) 2875 else: 2876 offset = hash_level_offsets[level_num - 1] + hash_src_size - remaining 2877 data = hash_ret[offset:offset + block_size] 2878 hasher.update(data) 2879 2880 remaining -= len(data) 2881 if len(data) < block_size: 2882 hasher.update('\0' * (block_size - len(data))) 2883 level_output += hasher.digest() 2884 if digest_padding > 0: 2885 level_output += '\0' * digest_padding 2886 2887 padding_needed = (round_to_multiple( 2888 len(level_output), block_size) - len(level_output)) 2889 level_output += '\0' * padding_needed 2890 2891 # Copy level-output into resulting tree. 2892 offset = hash_level_offsets[level_num] 2893 hash_ret[offset:offset + len(level_output)] = level_output 2894 2895 # Continue on to the next level. 2896 hash_src_size = len(level_output) 2897 level_num += 1 2898 2899 hasher = hashlib.new(name=hash_alg_name, string=salt) 2900 hasher.update(level_output) 2901 return hasher.digest(), hash_ret 2902 2903 2904class AvbTool(object): 2905 """Object for avbtool command-line tool.""" 2906 2907 def __init__(self): 2908 """Initializer method.""" 2909 self.avb = Avb() 2910 2911 def _add_common_args(self, sub_parser): 2912 """Adds arguments used by several sub-commands. 2913 2914 Arguments: 2915 sub_parser: The parser to add arguments to. 2916 """ 2917 sub_parser.add_argument('--algorithm', 2918 help='Algorithm to use (default: NONE)', 2919 metavar='ALGORITHM', 2920 default='NONE') 2921 sub_parser.add_argument('--key', 2922 help='Path to RSA private key file', 2923 metavar='KEY', 2924 required=False) 2925 sub_parser.add_argument('--signing_helper', 2926 help='Path to helper used for signing', 2927 metavar='APP', 2928 default=None, 2929 required=False) 2930 sub_parser.add_argument('--public_key_metadata', 2931 help='Path to public key metadata file', 2932 metavar='KEY_METADATA', 2933 required=False) 2934 sub_parser.add_argument('--rollback_index', 2935 help='Rollback Index', 2936 type=parse_number, 2937 default=0) 2938 # This is used internally for unit tests. Do not include in --help output. 2939 sub_parser.add_argument('--internal_release_string', 2940 help=argparse.SUPPRESS) 2941 sub_parser.add_argument('--append_to_release_string', 2942 help='Text to append to release string', 2943 metavar='STR') 2944 sub_parser.add_argument('--prop', 2945 help='Add property', 2946 metavar='KEY:VALUE', 2947 action='append') 2948 sub_parser.add_argument('--prop_from_file', 2949 help='Add property from file', 2950 metavar='KEY:PATH', 2951 action='append') 2952 sub_parser.add_argument('--kernel_cmdline', 2953 help='Add kernel cmdline', 2954 metavar='CMDLINE', 2955 action='append') 2956 # TODO(zeuthen): the --setup_rootfs_from_kernel option used to be called 2957 # --generate_dm_verity_cmdline_from_hashtree. Remove support for the latter 2958 # at some future point. 2959 sub_parser.add_argument('--setup_rootfs_from_kernel', 2960 '--generate_dm_verity_cmdline_from_hashtree', 2961 metavar='IMAGE', 2962 help='Adds kernel cmdline to set up IMAGE', 2963 type=argparse.FileType('rb')) 2964 sub_parser.add_argument('--include_descriptors_from_image', 2965 help='Include descriptors from image', 2966 metavar='IMAGE', 2967 action='append', 2968 type=argparse.FileType('rb')) 2969 # These are only allowed from top-level vbmeta and boot-in-lieu-of-vbmeta. 2970 sub_parser.add_argument('--chain_partition', 2971 help='Allow signed integrity-data for partition', 2972 metavar='PART_NAME:ROLLBACK_SLOT:KEY_PATH', 2973 action='append') 2974 sub_parser.add_argument('--flags', 2975 help='VBMeta flags', 2976 type=parse_number, 2977 default=0) 2978 sub_parser.add_argument('--set_hashtree_disabled_flag', 2979 help='Set the HASHTREE_DISABLED flag', 2980 action='store_true') 2981 2982 def _fixup_common_args(self, args): 2983 """Common fixups needed by subcommands. 2984 2985 Arguments: 2986 args: Arguments to modify. 2987 2988 Returns: 2989 The modified arguments. 2990 """ 2991 if args.set_hashtree_disabled_flag: 2992 args.flags |= AVB_VBMETA_IMAGE_FLAGS_HASHTREE_DISABLED 2993 return args 2994 2995 def run(self, argv): 2996 """Command-line processor. 2997 2998 Arguments: 2999 argv: Pass sys.argv from main. 3000 """ 3001 parser = argparse.ArgumentParser() 3002 subparsers = parser.add_subparsers(title='subcommands') 3003 3004 sub_parser = subparsers.add_parser('version', 3005 help='Prints version of avbtool.') 3006 sub_parser.set_defaults(func=self.version) 3007 3008 sub_parser = subparsers.add_parser('extract_public_key', 3009 help='Extract public key.') 3010 sub_parser.add_argument('--key', 3011 help='Path to RSA private key file', 3012 required=True) 3013 sub_parser.add_argument('--output', 3014 help='Output file name', 3015 type=argparse.FileType('wb'), 3016 required=True) 3017 sub_parser.set_defaults(func=self.extract_public_key) 3018 3019 sub_parser = subparsers.add_parser('make_vbmeta_image', 3020 help='Makes a vbmeta image.') 3021 sub_parser.add_argument('--output', 3022 help='Output file name', 3023 type=argparse.FileType('wb'), 3024 required=True) 3025 self._add_common_args(sub_parser) 3026 sub_parser.set_defaults(func=self.make_vbmeta_image) 3027 3028 sub_parser = subparsers.add_parser('add_hash_footer', 3029 help='Add hashes and footer to image.') 3030 sub_parser.add_argument('--image', 3031 help='Image to add hashes to', 3032 type=argparse.FileType('rab+')) 3033 sub_parser.add_argument('--partition_size', 3034 help='Partition size', 3035 type=parse_number, 3036 required=True) 3037 sub_parser.add_argument('--partition_name', 3038 help='Partition name', 3039 required=True) 3040 sub_parser.add_argument('--hash_algorithm', 3041 help='Hash algorithm to use (default: sha256)', 3042 default='sha256') 3043 sub_parser.add_argument('--salt', 3044 help='Salt in hex (default: /dev/urandom)') 3045 sub_parser.add_argument('--output_vbmeta_image', 3046 help='Also write vbmeta struct to file', 3047 type=argparse.FileType('wb')) 3048 sub_parser.add_argument('--do_not_append_vbmeta_image', 3049 help=('Do not append vbmeta struct or footer ' 3050 'to the image'), 3051 action='store_true') 3052 self._add_common_args(sub_parser) 3053 sub_parser.set_defaults(func=self.add_hash_footer) 3054 3055 sub_parser = subparsers.add_parser('append_vbmeta_image', 3056 help='Append vbmeta image to image.') 3057 sub_parser.add_argument('--image', 3058 help='Image to append vbmeta blob to', 3059 type=argparse.FileType('rab+')) 3060 sub_parser.add_argument('--partition_size', 3061 help='Partition size', 3062 type=parse_number, 3063 required=True) 3064 sub_parser.add_argument('--vbmeta_image', 3065 help='Image with vbmeta blob to append', 3066 type=argparse.FileType('rb')) 3067 sub_parser.set_defaults(func=self.append_vbmeta_image) 3068 3069 sub_parser = subparsers.add_parser('add_hashtree_footer', 3070 help='Add hashtree and footer to image.') 3071 sub_parser.add_argument('--image', 3072 help='Image to add hashtree to', 3073 type=argparse.FileType('rab+')) 3074 sub_parser.add_argument('--partition_size', 3075 help='Partition size', 3076 type=parse_number, 3077 required=True) 3078 sub_parser.add_argument('--partition_name', 3079 help='Partition name', 3080 default=None) 3081 sub_parser.add_argument('--hash_algorithm', 3082 help='Hash algorithm to use (default: sha1)', 3083 default='sha1') 3084 sub_parser.add_argument('--salt', 3085 help='Salt in hex (default: /dev/urandom)') 3086 sub_parser.add_argument('--block_size', 3087 help='Block size (default: 4096)', 3088 type=parse_number, 3089 default=4096) 3090 sub_parser.add_argument('--generate_fec', 3091 help='Add forward-error-correction codes', 3092 action='store_true') 3093 sub_parser.add_argument('--fec_num_roots', 3094 help='Number of roots for FEC (default: 2)', 3095 type=parse_number, 3096 default=2) 3097 sub_parser.add_argument('--calc_max_image_size', 3098 help=('Don\'t store the hashtree or footer - ' 3099 'instead calculate the maximum image size ' 3100 'leaving enough room for hashtree ' 3101 'and metadata with the given partition ' 3102 'size.'), 3103 action='store_true') 3104 sub_parser.add_argument('--output_vbmeta_image', 3105 help='Also write vbmeta struct to file', 3106 type=argparse.FileType('wb')) 3107 sub_parser.add_argument('--do_not_append_vbmeta_image', 3108 help=('Do not append vbmeta struct or footer ' 3109 'to the image'), 3110 action='store_true') 3111 self._add_common_args(sub_parser) 3112 sub_parser.set_defaults(func=self.add_hashtree_footer) 3113 3114 sub_parser = subparsers.add_parser('erase_footer', 3115 help='Erase footer from an image.') 3116 sub_parser.add_argument('--image', 3117 help='Image with a footer', 3118 type=argparse.FileType('rwb+'), 3119 required=True) 3120 sub_parser.add_argument('--keep_hashtree', 3121 help='Keep the hashtree and FEC in the image', 3122 action='store_true') 3123 sub_parser.set_defaults(func=self.erase_footer) 3124 3125 sub_parser = subparsers.add_parser( 3126 'info_image', 3127 help='Show information about vbmeta or footer.') 3128 sub_parser.add_argument('--image', 3129 help='Image to show information about', 3130 type=argparse.FileType('rb'), 3131 required=True) 3132 sub_parser.add_argument('--output', 3133 help='Write info to file', 3134 type=argparse.FileType('wt'), 3135 default=sys.stdout) 3136 sub_parser.set_defaults(func=self.info_image) 3137 3138 sub_parser = subparsers.add_parser('set_ab_metadata', 3139 help='Set A/B metadata.') 3140 sub_parser.add_argument('--misc_image', 3141 help=('The misc image to modify. If the image does ' 3142 'not exist, it will be created.'), 3143 type=argparse.FileType('r+b'), 3144 required=True) 3145 sub_parser.add_argument('--slot_data', 3146 help=('Slot data of the form "priority", ' 3147 '"tries_remaining", "sucessful_boot" for ' 3148 'slot A followed by the same for slot B, ' 3149 'separated by colons. The default value ' 3150 'is 15:7:0:14:7:0.'), 3151 default='15:7:0:14:7:0') 3152 sub_parser.set_defaults(func=self.set_ab_metadata) 3153 3154 sub_parser = subparsers.add_parser( 3155 'make_atx_certificate', 3156 help='Create an Android Things eXtension (ATX) certificate.') 3157 sub_parser.add_argument('--output', 3158 help='Write certificate to file', 3159 type=argparse.FileType('wb'), 3160 default=sys.stdout) 3161 sub_parser.add_argument('--subject', 3162 help=('Path to subject file'), 3163 type=argparse.FileType('rb'), 3164 required=True) 3165 sub_parser.add_argument('--subject_key', 3166 help=('Path to subject RSA public key file'), 3167 type=argparse.FileType('rb'), 3168 required=True) 3169 sub_parser.add_argument('--subject_key_version', 3170 help=('Version of the subject key'), 3171 type=parse_number, 3172 required=False) 3173 sub_parser.add_argument('--subject_is_intermediate_authority', 3174 help=('Generate an intermediate authority ' 3175 'certificate'), 3176 action='store_true') 3177 sub_parser.add_argument('--authority_key', 3178 help='Path to authority RSA private key file', 3179 required=False) 3180 sub_parser.add_argument('--signing_helper', 3181 help='Path to helper used for signing', 3182 metavar='APP', 3183 default=None, 3184 required=False) 3185 sub_parser.set_defaults(func=self.make_atx_certificate) 3186 3187 sub_parser = subparsers.add_parser( 3188 'make_atx_permanent_attributes', 3189 help='Create Android Things eXtension (ATX) permanent attributes.') 3190 sub_parser.add_argument('--output', 3191 help='Write attributes to file', 3192 type=argparse.FileType('wb'), 3193 default=sys.stdout) 3194 sub_parser.add_argument('--root_authority_key', 3195 help='Path to authority RSA public key file', 3196 type=argparse.FileType('rb'), 3197 required=True) 3198 sub_parser.add_argument('--product_id', 3199 help=('Path to Product ID file'), 3200 type=argparse.FileType('rb'), 3201 required=True) 3202 sub_parser.set_defaults(func=self.make_atx_permanent_attributes) 3203 3204 sub_parser = subparsers.add_parser( 3205 'make_atx_metadata', 3206 help='Create Android Things eXtension (ATX) metadata.') 3207 sub_parser.add_argument('--output', 3208 help='Write metadata to file', 3209 type=argparse.FileType('wb'), 3210 default=sys.stdout) 3211 sub_parser.add_argument('--intermediate_key_certificate', 3212 help='Path to intermediate key certificate file', 3213 type=argparse.FileType('rb'), 3214 required=True) 3215 sub_parser.add_argument('--product_key_certificate', 3216 help='Path to product key certificate file', 3217 type=argparse.FileType('rb'), 3218 required=True) 3219 sub_parser.set_defaults(func=self.make_atx_metadata) 3220 3221 args = parser.parse_args(argv[1:]) 3222 try: 3223 args.func(args) 3224 except AvbError as e: 3225 sys.stderr.write('{}: {}\n'.format(argv[0], e.message)) 3226 sys.exit(1) 3227 3228 def version(self, _): 3229 """Implements the 'version' sub-command.""" 3230 print get_release_string() 3231 3232 def extract_public_key(self, args): 3233 """Implements the 'extract_public_key' sub-command.""" 3234 self.avb.extract_public_key(args.key, args.output) 3235 3236 def make_vbmeta_image(self, args): 3237 """Implements the 'make_vbmeta_image' sub-command.""" 3238 args = self._fixup_common_args(args) 3239 self.avb.make_vbmeta_image(args.output, args.chain_partition, 3240 args.algorithm, args.key, 3241 args.public_key_metadata, args.rollback_index, 3242 args.flags, args.prop, args.prop_from_file, 3243 args.kernel_cmdline, 3244 args.setup_rootfs_from_kernel, 3245 args.include_descriptors_from_image, 3246 args.signing_helper, 3247 args.internal_release_string, 3248 args.append_to_release_string) 3249 3250 def append_vbmeta_image(self, args): 3251 """Implements the 'append_vbmeta_image' sub-command.""" 3252 self.avb.append_vbmeta_image(args.image.name, args.vbmeta_image.name, 3253 args.partition_size) 3254 3255 def add_hash_footer(self, args): 3256 """Implements the 'add_hash_footer' sub-command.""" 3257 args = self._fixup_common_args(args) 3258 self.avb.add_hash_footer(args.image.name, args.partition_size, 3259 args.partition_name, args.hash_algorithm, 3260 args.salt, args.chain_partition, args.algorithm, 3261 args.key, 3262 args.public_key_metadata, args.rollback_index, 3263 args.flags, args.prop, args.prop_from_file, 3264 args.kernel_cmdline, 3265 args.setup_rootfs_from_kernel, 3266 args.include_descriptors_from_image, 3267 args.signing_helper, 3268 args.internal_release_string, 3269 args.append_to_release_string, 3270 args.output_vbmeta_image, 3271 args.do_not_append_vbmeta_image) 3272 3273 def add_hashtree_footer(self, args): 3274 """Implements the 'add_hashtree_footer' sub-command.""" 3275 args = self._fixup_common_args(args) 3276 self.avb.add_hashtree_footer(args.image.name if args.image else None, 3277 args.partition_size, 3278 args.partition_name, 3279 args.generate_fec, args.fec_num_roots, 3280 args.hash_algorithm, args.block_size, 3281 args.salt, args.chain_partition, args.algorithm, 3282 args.key, args.public_key_metadata, 3283 args.rollback_index, args.flags, args.prop, 3284 args.prop_from_file, 3285 args.kernel_cmdline, 3286 args.setup_rootfs_from_kernel, 3287 args.include_descriptors_from_image, 3288 args.calc_max_image_size, args.signing_helper, 3289 args.internal_release_string, 3290 args.append_to_release_string, 3291 args.output_vbmeta_image, 3292 args.do_not_append_vbmeta_image) 3293 3294 def erase_footer(self, args): 3295 """Implements the 'erase_footer' sub-command.""" 3296 self.avb.erase_footer(args.image.name, args.keep_hashtree) 3297 3298 def set_ab_metadata(self, args): 3299 """Implements the 'set_ab_metadata' sub-command.""" 3300 self.avb.set_ab_metadata(args.misc_image, args.slot_data) 3301 3302 def info_image(self, args): 3303 """Implements the 'info_image' sub-command.""" 3304 self.avb.info_image(args.image.name, args.output) 3305 3306 def make_atx_certificate(self, args): 3307 """Implements the 'make_atx_certificate' sub-command.""" 3308 self.avb.make_atx_certificate(args.output, args.authority_key, 3309 args.subject_key.read(), 3310 args.subject_key_version, 3311 args.subject.read(), 3312 args.subject_is_intermediate_authority, 3313 args.signing_helper) 3314 3315 def make_atx_permanent_attributes(self, args): 3316 """Implements the 'make_atx_permanent_attributes' sub-command.""" 3317 self.avb.make_atx_permanent_attributes(args.output, 3318 args.root_authority_key.read(), 3319 args.product_id.read()) 3320 3321 def make_atx_metadata(self, args): 3322 """Implements the 'make_atx_metadata' sub-command.""" 3323 self.avb.make_atx_metadata(args.output, 3324 args.intermediate_key_certificate.read(), 3325 args.product_key_certificate.read()) 3326 3327 3328if __name__ == '__main__': 3329 tool = AvbTool() 3330 tool.run(sys.argv) 3331