1# Copyright (C) 2008 The Android Open Source Project 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14 15import copy 16import errno 17import getopt 18import getpass 19import imp 20import os 21import platform 22import re 23import shlex 24import shutil 25import subprocess 26import sys 27import tempfile 28import threading 29import time 30import zipfile 31 32import blockimgdiff 33import rangelib 34 35from hashlib import sha1 as sha1 36 37 38class Options(object): 39 def __init__(self): 40 platform_search_path = { 41 "linux2": "out/host/linux-x86", 42 "darwin": "out/host/darwin-x86", 43 } 44 45 self.search_path = platform_search_path.get(sys.platform, None) 46 self.signapk_path = "framework/signapk.jar" # Relative to search_path 47 self.extra_signapk_args = [] 48 self.java_path = "java" # Use the one on the path by default. 49 self.java_args = "-Xmx2048m" # JVM Args 50 self.public_key_suffix = ".x509.pem" 51 self.private_key_suffix = ".pk8" 52 # use otatools built boot_signer by default 53 self.boot_signer_path = "boot_signer" 54 self.boot_signer_args = [] 55 self.verity_signer_path = None 56 self.verity_signer_args = [] 57 self.verbose = False 58 self.tempfiles = [] 59 self.device_specific = None 60 self.extras = {} 61 self.info_dict = None 62 self.worker_threads = None 63 64 65OPTIONS = Options() 66 67 68# Values for "certificate" in apkcerts that mean special things. 69SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL") 70 71 72class ExternalError(RuntimeError): 73 pass 74 75 76def Run(args, **kwargs): 77 """Create and return a subprocess.Popen object, printing the command 78 line on the terminal if -v was specified.""" 79 if OPTIONS.verbose: 80 print " running: ", " ".join(args) 81 return subprocess.Popen(args, **kwargs) 82 83 84def CloseInheritedPipes(): 85 """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds 86 before doing other work.""" 87 if platform.system() != "Darwin": 88 return 89 for d in range(3, 1025): 90 try: 91 stat = os.fstat(d) 92 if stat is not None: 93 pipebit = stat[0] & 0x1000 94 if pipebit != 0: 95 os.close(d) 96 except OSError: 97 pass 98 99 100def LoadInfoDict(input_file): 101 """Read and parse the META/misc_info.txt key/value pairs from the 102 input target files and return a dict.""" 103 104 def read_helper(fn): 105 if isinstance(input_file, zipfile.ZipFile): 106 return input_file.read(fn) 107 else: 108 path = os.path.join(input_file, *fn.split("/")) 109 try: 110 with open(path) as f: 111 return f.read() 112 except IOError as e: 113 if e.errno == errno.ENOENT: 114 raise KeyError(fn) 115 d = {} 116 try: 117 d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n")) 118 except KeyError: 119 # ok if misc_info.txt doesn't exist 120 pass 121 122 # backwards compatibility: These values used to be in their own 123 # files. Look for them, in case we're processing an old 124 # target_files zip. 125 126 if "mkyaffs2_extra_flags" not in d: 127 try: 128 d["mkyaffs2_extra_flags"] = read_helper( 129 "META/mkyaffs2-extra-flags.txt").strip() 130 except KeyError: 131 # ok if flags don't exist 132 pass 133 134 if "recovery_api_version" not in d: 135 try: 136 d["recovery_api_version"] = read_helper( 137 "META/recovery-api-version.txt").strip() 138 except KeyError: 139 raise ValueError("can't find recovery API version in input target-files") 140 141 if "tool_extensions" not in d: 142 try: 143 d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip() 144 except KeyError: 145 # ok if extensions don't exist 146 pass 147 148 if "fstab_version" not in d: 149 d["fstab_version"] = "1" 150 151 try: 152 data = read_helper("META/imagesizes.txt") 153 for line in data.split("\n"): 154 if not line: 155 continue 156 name, value = line.split(" ", 1) 157 if not value: 158 continue 159 if name == "blocksize": 160 d[name] = value 161 else: 162 d[name + "_size"] = value 163 except KeyError: 164 pass 165 166 def makeint(key): 167 if key in d: 168 d[key] = int(d[key], 0) 169 170 makeint("recovery_api_version") 171 makeint("blocksize") 172 makeint("system_size") 173 makeint("vendor_size") 174 makeint("userdata_size") 175 makeint("cache_size") 176 makeint("recovery_size") 177 makeint("boot_size") 178 makeint("fstab_version") 179 180 d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"]) 181 d["build.prop"] = LoadBuildProp(read_helper) 182 return d 183 184def LoadBuildProp(read_helper): 185 try: 186 data = read_helper("SYSTEM/build.prop") 187 except KeyError: 188 print "Warning: could not find SYSTEM/build.prop in %s" % zip 189 data = "" 190 return LoadDictionaryFromLines(data.split("\n")) 191 192def LoadDictionaryFromLines(lines): 193 d = {} 194 for line in lines: 195 line = line.strip() 196 if not line or line.startswith("#"): 197 continue 198 if "=" in line: 199 name, value = line.split("=", 1) 200 d[name] = value 201 return d 202 203def LoadRecoveryFSTab(read_helper, fstab_version): 204 class Partition(object): 205 def __init__(self, mount_point, fs_type, device, length, device2, context): 206 self.mount_point = mount_point 207 self.fs_type = fs_type 208 self.device = device 209 self.length = length 210 self.device2 = device2 211 self.context = context 212 213 try: 214 data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab") 215 except KeyError: 216 print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab" 217 data = "" 218 219 if fstab_version == 1: 220 d = {} 221 for line in data.split("\n"): 222 line = line.strip() 223 if not line or line.startswith("#"): 224 continue 225 pieces = line.split() 226 if not 3 <= len(pieces) <= 4: 227 raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,)) 228 options = None 229 if len(pieces) >= 4: 230 if pieces[3].startswith("/"): 231 device2 = pieces[3] 232 if len(pieces) >= 5: 233 options = pieces[4] 234 else: 235 device2 = None 236 options = pieces[3] 237 else: 238 device2 = None 239 240 mount_point = pieces[0] 241 length = 0 242 if options: 243 options = options.split(",") 244 for i in options: 245 if i.startswith("length="): 246 length = int(i[7:]) 247 else: 248 print "%s: unknown option \"%s\"" % (mount_point, i) 249 250 d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1], 251 device=pieces[2], length=length, 252 device2=device2) 253 254 elif fstab_version == 2: 255 d = {} 256 for line in data.split("\n"): 257 line = line.strip() 258 if not line or line.startswith("#"): 259 continue 260 # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags> 261 pieces = line.split() 262 if len(pieces) != 5: 263 raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,)) 264 265 # Ignore entries that are managed by vold 266 options = pieces[4] 267 if "voldmanaged=" in options: 268 continue 269 270 # It's a good line, parse it 271 length = 0 272 options = options.split(",") 273 for i in options: 274 if i.startswith("length="): 275 length = int(i[7:]) 276 else: 277 # Ignore all unknown options in the unified fstab 278 continue 279 280 mount_flags = pieces[3] 281 # Honor the SELinux context if present. 282 context = None 283 for i in mount_flags.split(","): 284 if i.startswith("context="): 285 context = i 286 287 mount_point = pieces[1] 288 d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2], 289 device=pieces[0], length=length, 290 device2=None, context=context) 291 292 else: 293 raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,)) 294 295 return d 296 297 298def DumpInfoDict(d): 299 for k, v in sorted(d.items()): 300 print "%-25s = (%s) %s" % (k, type(v).__name__, v) 301 302 303def BuildBootableImage(sourcedir, fs_config_file, info_dict=None): 304 """Take a kernel, cmdline, and ramdisk directory from the input (in 305 'sourcedir'), and turn them into a boot image. Return the image 306 data, or None if sourcedir does not appear to contains files for 307 building the requested image.""" 308 309 if (not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK) or 310 not os.access(os.path.join(sourcedir, "kernel"), os.F_OK)): 311 return None 312 313 if info_dict is None: 314 info_dict = OPTIONS.info_dict 315 316 ramdisk_img = tempfile.NamedTemporaryFile() 317 img = tempfile.NamedTemporaryFile() 318 319 if os.access(fs_config_file, os.F_OK): 320 cmd = ["mkbootfs", "-f", fs_config_file, os.path.join(sourcedir, "RAMDISK")] 321 else: 322 cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")] 323 p1 = Run(cmd, stdout=subprocess.PIPE) 324 p2 = Run(["minigzip"], 325 stdin=p1.stdout, stdout=ramdisk_img.file.fileno()) 326 327 p2.wait() 328 p1.wait() 329 assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,) 330 assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,) 331 332 # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set 333 mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg" 334 335 cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")] 336 337 fn = os.path.join(sourcedir, "second") 338 if os.access(fn, os.F_OK): 339 cmd.append("--second") 340 cmd.append(fn) 341 342 fn = os.path.join(sourcedir, "cmdline") 343 if os.access(fn, os.F_OK): 344 cmd.append("--cmdline") 345 cmd.append(open(fn).read().rstrip("\n")) 346 347 fn = os.path.join(sourcedir, "base") 348 if os.access(fn, os.F_OK): 349 cmd.append("--base") 350 cmd.append(open(fn).read().rstrip("\n")) 351 352 fn = os.path.join(sourcedir, "pagesize") 353 if os.access(fn, os.F_OK): 354 cmd.append("--pagesize") 355 cmd.append(open(fn).read().rstrip("\n")) 356 357 args = info_dict.get("mkbootimg_args", None) 358 if args and args.strip(): 359 cmd.extend(shlex.split(args)) 360 361 img_unsigned = None 362 if info_dict.get("vboot", None): 363 img_unsigned = tempfile.NamedTemporaryFile() 364 cmd.extend(["--ramdisk", ramdisk_img.name, 365 "--output", img_unsigned.name]) 366 else: 367 cmd.extend(["--ramdisk", ramdisk_img.name, 368 "--output", img.name]) 369 370 p = Run(cmd, stdout=subprocess.PIPE) 371 p.communicate() 372 assert p.returncode == 0, "mkbootimg of %s image failed" % ( 373 os.path.basename(sourcedir),) 374 375 if (info_dict.get("boot_signer", None) == "true" and 376 info_dict.get("verity_key", None)): 377 path = "/" + os.path.basename(sourcedir).lower() 378 cmd = [OPTIONS.boot_signer_path] 379 cmd.extend(OPTIONS.boot_signer_args) 380 cmd.extend([path, img.name, 381 info_dict["verity_key"] + ".pk8", 382 info_dict["verity_key"] + ".x509.pem", img.name]) 383 p = Run(cmd, stdout=subprocess.PIPE) 384 p.communicate() 385 assert p.returncode == 0, "boot_signer of %s image failed" % path 386 387 # Sign the image if vboot is non-empty. 388 elif info_dict.get("vboot", None): 389 path = "/" + os.path.basename(sourcedir).lower() 390 img_keyblock = tempfile.NamedTemporaryFile() 391 cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"], 392 img_unsigned.name, info_dict["vboot_key"] + ".vbpubk", 393 info_dict["vboot_key"] + ".vbprivk", img_keyblock.name, 394 img.name] 395 p = Run(cmd, stdout=subprocess.PIPE) 396 p.communicate() 397 assert p.returncode == 0, "vboot_signer of %s image failed" % path 398 399 # Clean up the temp files. 400 img_unsigned.close() 401 img_keyblock.close() 402 403 img.seek(os.SEEK_SET, 0) 404 data = img.read() 405 406 ramdisk_img.close() 407 img.close() 408 409 return data 410 411 412def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir, 413 info_dict=None): 414 """Return a File object (with name 'name') with the desired bootable 415 image. Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 416 'prebuilt_name', otherwise look for it under 'unpack_dir'/IMAGES, 417 otherwise construct it from the source files in 418 'unpack_dir'/'tree_subdir'.""" 419 420 prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name) 421 if os.path.exists(prebuilt_path): 422 print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,) 423 return File.FromLocalFile(name, prebuilt_path) 424 425 prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name) 426 if os.path.exists(prebuilt_path): 427 print "using prebuilt %s from IMAGES..." % (prebuilt_name,) 428 return File.FromLocalFile(name, prebuilt_path) 429 430 print "building image from target_files %s..." % (tree_subdir,) 431 fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt" 432 data = BuildBootableImage(os.path.join(unpack_dir, tree_subdir), 433 os.path.join(unpack_dir, fs_config), 434 info_dict) 435 if data: 436 return File(name, data) 437 return None 438 439 440def UnzipTemp(filename, pattern=None): 441 """Unzip the given archive into a temporary directory and return the name. 442 443 If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a 444 temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES. 445 446 Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the 447 main file), open for reading. 448 """ 449 450 tmp = tempfile.mkdtemp(prefix="targetfiles-") 451 OPTIONS.tempfiles.append(tmp) 452 453 def unzip_to_dir(filename, dirname): 454 cmd = ["unzip", "-o", "-q", filename, "-d", dirname] 455 if pattern is not None: 456 cmd.append(pattern) 457 p = Run(cmd, stdout=subprocess.PIPE) 458 p.communicate() 459 if p.returncode != 0: 460 raise ExternalError("failed to unzip input target-files \"%s\"" % 461 (filename,)) 462 463 m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE) 464 if m: 465 unzip_to_dir(m.group(1), tmp) 466 unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES")) 467 filename = m.group(1) 468 else: 469 unzip_to_dir(filename, tmp) 470 471 return tmp, zipfile.ZipFile(filename, "r") 472 473 474def GetKeyPasswords(keylist): 475 """Given a list of keys, prompt the user to enter passwords for 476 those which require them. Return a {key: password} dict. password 477 will be None if the key has no password.""" 478 479 no_passwords = [] 480 need_passwords = [] 481 key_passwords = {} 482 devnull = open("/dev/null", "w+b") 483 for k in sorted(keylist): 484 # We don't need a password for things that aren't really keys. 485 if k in SPECIAL_CERT_STRINGS: 486 no_passwords.append(k) 487 continue 488 489 p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix, 490 "-inform", "DER", "-nocrypt"], 491 stdin=devnull.fileno(), 492 stdout=devnull.fileno(), 493 stderr=subprocess.STDOUT) 494 p.communicate() 495 if p.returncode == 0: 496 # Definitely an unencrypted key. 497 no_passwords.append(k) 498 else: 499 p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix, 500 "-inform", "DER", "-passin", "pass:"], 501 stdin=devnull.fileno(), 502 stdout=devnull.fileno(), 503 stderr=subprocess.PIPE) 504 _, stderr = p.communicate() 505 if p.returncode == 0: 506 # Encrypted key with empty string as password. 507 key_passwords[k] = '' 508 elif stderr.startswith('Error decrypting key'): 509 # Definitely encrypted key. 510 # It would have said "Error reading key" if it didn't parse correctly. 511 need_passwords.append(k) 512 else: 513 # Potentially, a type of key that openssl doesn't understand. 514 # We'll let the routines in signapk.jar handle it. 515 no_passwords.append(k) 516 devnull.close() 517 518 key_passwords.update(PasswordManager().GetPasswords(need_passwords)) 519 key_passwords.update(dict.fromkeys(no_passwords, None)) 520 return key_passwords 521 522 523def SignFile(input_name, output_name, key, password, align=None, 524 whole_file=False): 525 """Sign the input_name zip/jar/apk, producing output_name. Use the 526 given key and password (the latter may be None if the key does not 527 have a password. 528 529 If align is an integer > 1, zipalign is run to align stored files in 530 the output zip on 'align'-byte boundaries. 531 532 If whole_file is true, use the "-w" option to SignApk to embed a 533 signature that covers the whole file in the archive comment of the 534 zip file. 535 """ 536 537 if align == 0 or align == 1: 538 align = None 539 540 if align: 541 temp = tempfile.NamedTemporaryFile() 542 sign_name = temp.name 543 else: 544 sign_name = output_name 545 546 cmd = [OPTIONS.java_path, OPTIONS.java_args, "-jar", 547 os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)] 548 cmd.extend(OPTIONS.extra_signapk_args) 549 if whole_file: 550 cmd.append("-w") 551 cmd.extend([key + OPTIONS.public_key_suffix, 552 key + OPTIONS.private_key_suffix, 553 input_name, sign_name]) 554 555 p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) 556 if password is not None: 557 password += "\n" 558 p.communicate(password) 559 if p.returncode != 0: 560 raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,)) 561 562 if align: 563 p = Run(["zipalign", "-f", "-p", str(align), sign_name, output_name]) 564 p.communicate() 565 if p.returncode != 0: 566 raise ExternalError("zipalign failed: return code %s" % (p.returncode,)) 567 temp.close() 568 569 570def CheckSize(data, target, info_dict): 571 """Check the data string passed against the max size limit, if 572 any, for the given target. Raise exception if the data is too big. 573 Print a warning if the data is nearing the maximum size.""" 574 575 if target.endswith(".img"): 576 target = target[:-4] 577 mount_point = "/" + target 578 579 fs_type = None 580 limit = None 581 if info_dict["fstab"]: 582 if mount_point == "/userdata": 583 mount_point = "/data" 584 p = info_dict["fstab"][mount_point] 585 fs_type = p.fs_type 586 device = p.device 587 if "/" in device: 588 device = device[device.rfind("/")+1:] 589 limit = info_dict.get(device + "_size", None) 590 if not fs_type or not limit: 591 return 592 593 if fs_type == "yaffs2": 594 # image size should be increased by 1/64th to account for the 595 # spare area (64 bytes per 2k page) 596 limit = limit / 2048 * (2048+64) 597 size = len(data) 598 pct = float(size) * 100.0 / limit 599 msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit) 600 if pct >= 99.0: 601 raise ExternalError(msg) 602 elif pct >= 95.0: 603 print 604 print " WARNING: ", msg 605 print 606 elif OPTIONS.verbose: 607 print " ", msg 608 609 610def ReadApkCerts(tf_zip): 611 """Given a target_files ZipFile, parse the META/apkcerts.txt file 612 and return a {package: cert} dict.""" 613 certmap = {} 614 for line in tf_zip.read("META/apkcerts.txt").split("\n"): 615 line = line.strip() 616 if not line: 617 continue 618 m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+' 619 r'private_key="(.*)"$', line) 620 if m: 621 name, cert, privkey = m.groups() 622 public_key_suffix_len = len(OPTIONS.public_key_suffix) 623 private_key_suffix_len = len(OPTIONS.private_key_suffix) 624 if cert in SPECIAL_CERT_STRINGS and not privkey: 625 certmap[name] = cert 626 elif (cert.endswith(OPTIONS.public_key_suffix) and 627 privkey.endswith(OPTIONS.private_key_suffix) and 628 cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]): 629 certmap[name] = cert[:-public_key_suffix_len] 630 else: 631 raise ValueError("failed to parse line from apkcerts.txt:\n" + line) 632 return certmap 633 634 635COMMON_DOCSTRING = """ 636 -p (--path) <dir> 637 Prepend <dir>/bin to the list of places to search for binaries 638 run by this script, and expect to find jars in <dir>/framework. 639 640 -s (--device_specific) <file> 641 Path to the python module containing device-specific 642 releasetools code. 643 644 -x (--extra) <key=value> 645 Add a key/value pair to the 'extras' dict, which device-specific 646 extension code may look at. 647 648 -v (--verbose) 649 Show command lines being executed. 650 651 -h (--help) 652 Display this usage message and exit. 653""" 654 655def Usage(docstring): 656 print docstring.rstrip("\n") 657 print COMMON_DOCSTRING 658 659 660def ParseOptions(argv, 661 docstring, 662 extra_opts="", extra_long_opts=(), 663 extra_option_handler=None): 664 """Parse the options in argv and return any arguments that aren't 665 flags. docstring is the calling module's docstring, to be displayed 666 for errors and -h. extra_opts and extra_long_opts are for flags 667 defined by the caller, which are processed by passing them to 668 extra_option_handler.""" 669 670 try: 671 opts, args = getopt.getopt( 672 argv, "hvp:s:x:" + extra_opts, 673 ["help", "verbose", "path=", "signapk_path=", "extra_signapk_args=", 674 "java_path=", "java_args=", "public_key_suffix=", 675 "private_key_suffix=", "boot_signer_path=", "boot_signer_args=", 676 "verity_signer_path=", "verity_signer_args=", "device_specific=", 677 "extra="] + 678 list(extra_long_opts)) 679 except getopt.GetoptError as err: 680 Usage(docstring) 681 print "**", str(err), "**" 682 sys.exit(2) 683 684 for o, a in opts: 685 if o in ("-h", "--help"): 686 Usage(docstring) 687 sys.exit() 688 elif o in ("-v", "--verbose"): 689 OPTIONS.verbose = True 690 elif o in ("-p", "--path"): 691 OPTIONS.search_path = a 692 elif o in ("--signapk_path",): 693 OPTIONS.signapk_path = a 694 elif o in ("--extra_signapk_args",): 695 OPTIONS.extra_signapk_args = shlex.split(a) 696 elif o in ("--java_path",): 697 OPTIONS.java_path = a 698 elif o in ("--java_args",): 699 OPTIONS.java_args = a 700 elif o in ("--public_key_suffix",): 701 OPTIONS.public_key_suffix = a 702 elif o in ("--private_key_suffix",): 703 OPTIONS.private_key_suffix = a 704 elif o in ("--boot_signer_path",): 705 OPTIONS.boot_signer_path = a 706 elif o in ("--boot_signer_args",): 707 OPTIONS.boot_signer_args = shlex.split(a) 708 elif o in ("--verity_signer_path",): 709 OPTIONS.verity_signer_path = a 710 elif o in ("--verity_signer_args",): 711 OPTIONS.verity_signer_args = shlex.split(a) 712 elif o in ("-s", "--device_specific"): 713 OPTIONS.device_specific = a 714 elif o in ("-x", "--extra"): 715 key, value = a.split("=", 1) 716 OPTIONS.extras[key] = value 717 else: 718 if extra_option_handler is None or not extra_option_handler(o, a): 719 assert False, "unknown option \"%s\"" % (o,) 720 721 if OPTIONS.search_path: 722 os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") + 723 os.pathsep + os.environ["PATH"]) 724 725 return args 726 727 728def MakeTempFile(prefix=None, suffix=None): 729 """Make a temp file and add it to the list of things to be deleted 730 when Cleanup() is called. Return the filename.""" 731 fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix) 732 os.close(fd) 733 OPTIONS.tempfiles.append(fn) 734 return fn 735 736 737def Cleanup(): 738 for i in OPTIONS.tempfiles: 739 if os.path.isdir(i): 740 shutil.rmtree(i) 741 else: 742 os.remove(i) 743 744 745class PasswordManager(object): 746 def __init__(self): 747 self.editor = os.getenv("EDITOR", None) 748 self.pwfile = os.getenv("ANDROID_PW_FILE", None) 749 750 def GetPasswords(self, items): 751 """Get passwords corresponding to each string in 'items', 752 returning a dict. (The dict may have keys in addition to the 753 values in 'items'.) 754 755 Uses the passwords in $ANDROID_PW_FILE if available, letting the 756 user edit that file to add more needed passwords. If no editor is 757 available, or $ANDROID_PW_FILE isn't define, prompts the user 758 interactively in the ordinary way. 759 """ 760 761 current = self.ReadFile() 762 763 first = True 764 while True: 765 missing = [] 766 for i in items: 767 if i not in current or not current[i]: 768 missing.append(i) 769 # Are all the passwords already in the file? 770 if not missing: 771 return current 772 773 for i in missing: 774 current[i] = "" 775 776 if not first: 777 print "key file %s still missing some passwords." % (self.pwfile,) 778 answer = raw_input("try to edit again? [y]> ").strip() 779 if answer and answer[0] not in 'yY': 780 raise RuntimeError("key passwords unavailable") 781 first = False 782 783 current = self.UpdateAndReadFile(current) 784 785 def PromptResult(self, current): # pylint: disable=no-self-use 786 """Prompt the user to enter a value (password) for each key in 787 'current' whose value is fales. Returns a new dict with all the 788 values. 789 """ 790 result = {} 791 for k, v in sorted(current.iteritems()): 792 if v: 793 result[k] = v 794 else: 795 while True: 796 result[k] = getpass.getpass( 797 "Enter password for %s key> " % k).strip() 798 if result[k]: 799 break 800 return result 801 802 def UpdateAndReadFile(self, current): 803 if not self.editor or not self.pwfile: 804 return self.PromptResult(current) 805 806 f = open(self.pwfile, "w") 807 os.chmod(self.pwfile, 0o600) 808 f.write("# Enter key passwords between the [[[ ]]] brackets.\n") 809 f.write("# (Additional spaces are harmless.)\n\n") 810 811 first_line = None 812 sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()]) 813 for i, (_, k, v) in enumerate(sorted_list): 814 f.write("[[[ %s ]]] %s\n" % (v, k)) 815 if not v and first_line is None: 816 # position cursor on first line with no password. 817 first_line = i + 4 818 f.close() 819 820 p = Run([self.editor, "+%d" % (first_line,), self.pwfile]) 821 _, _ = p.communicate() 822 823 return self.ReadFile() 824 825 def ReadFile(self): 826 result = {} 827 if self.pwfile is None: 828 return result 829 try: 830 f = open(self.pwfile, "r") 831 for line in f: 832 line = line.strip() 833 if not line or line[0] == '#': 834 continue 835 m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line) 836 if not m: 837 print "failed to parse password file: ", line 838 else: 839 result[m.group(2)] = m.group(1) 840 f.close() 841 except IOError as e: 842 if e.errno != errno.ENOENT: 843 print "error reading password file: ", str(e) 844 return result 845 846 847def ZipWrite(zip_file, filename, arcname=None, perms=0o644, 848 compress_type=None): 849 import datetime 850 851 # http://b/18015246 852 # Python 2.7's zipfile implementation wrongly thinks that zip64 is required 853 # for files larger than 2GiB. We can work around this by adjusting their 854 # limit. Note that `zipfile.writestr()` will not work for strings larger than 855 # 2GiB. The Python interpreter sometimes rejects strings that large (though 856 # it isn't clear to me exactly what circumstances cause this). 857 # `zipfile.write()` must be used directly to work around this. 858 # 859 # This mess can be avoided if we port to python3. 860 saved_zip64_limit = zipfile.ZIP64_LIMIT 861 zipfile.ZIP64_LIMIT = (1 << 32) - 1 862 863 if compress_type is None: 864 compress_type = zip_file.compression 865 if arcname is None: 866 arcname = filename 867 868 saved_stat = os.stat(filename) 869 870 try: 871 # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the 872 # file to be zipped and reset it when we're done. 873 os.chmod(filename, perms) 874 875 # Use a fixed timestamp so the output is repeatable. 876 epoch = datetime.datetime.fromtimestamp(0) 877 timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds() 878 os.utime(filename, (timestamp, timestamp)) 879 880 zip_file.write(filename, arcname=arcname, compress_type=compress_type) 881 finally: 882 os.chmod(filename, saved_stat.st_mode) 883 os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime)) 884 zipfile.ZIP64_LIMIT = saved_zip64_limit 885 886 887def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None, 888 compress_type=None): 889 """Wrap zipfile.writestr() function to work around the zip64 limit. 890 891 Even with the ZIP64_LIMIT workaround, it won't allow writing a string 892 longer than 2GiB. It gives 'OverflowError: size does not fit in an int' 893 when calling crc32(bytes). 894 895 But it still works fine to write a shorter string into a large zip file. 896 We should use ZipWrite() whenever possible, and only use ZipWriteStr() 897 when we know the string won't be too long. 898 """ 899 900 saved_zip64_limit = zipfile.ZIP64_LIMIT 901 zipfile.ZIP64_LIMIT = (1 << 32) - 1 902 903 if not isinstance(zinfo_or_arcname, zipfile.ZipInfo): 904 zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname) 905 zinfo.compress_type = zip_file.compression 906 if perms is None: 907 perms = 0o644 908 else: 909 zinfo = zinfo_or_arcname 910 911 # If compress_type is given, it overrides the value in zinfo. 912 if compress_type is not None: 913 zinfo.compress_type = compress_type 914 915 # If perms is given, it has a priority. 916 if perms is not None: 917 zinfo.external_attr = perms << 16 918 919 # Use a fixed timestamp so the output is repeatable. 920 zinfo.date_time = (2009, 1, 1, 0, 0, 0) 921 922 zip_file.writestr(zinfo, data) 923 zipfile.ZIP64_LIMIT = saved_zip64_limit 924 925 926def ZipClose(zip_file): 927 # http://b/18015246 928 # zipfile also refers to ZIP64_LIMIT during close() when it writes out the 929 # central directory. 930 saved_zip64_limit = zipfile.ZIP64_LIMIT 931 zipfile.ZIP64_LIMIT = (1 << 32) - 1 932 933 zip_file.close() 934 935 zipfile.ZIP64_LIMIT = saved_zip64_limit 936 937 938class DeviceSpecificParams(object): 939 module = None 940 def __init__(self, **kwargs): 941 """Keyword arguments to the constructor become attributes of this 942 object, which is passed to all functions in the device-specific 943 module.""" 944 for k, v in kwargs.iteritems(): 945 setattr(self, k, v) 946 self.extras = OPTIONS.extras 947 948 if self.module is None: 949 path = OPTIONS.device_specific 950 if not path: 951 return 952 try: 953 if os.path.isdir(path): 954 info = imp.find_module("releasetools", [path]) 955 else: 956 d, f = os.path.split(path) 957 b, x = os.path.splitext(f) 958 if x == ".py": 959 f = b 960 info = imp.find_module(f, [d]) 961 print "loaded device-specific extensions from", path 962 self.module = imp.load_module("device_specific", *info) 963 except ImportError: 964 print "unable to load device-specific module; assuming none" 965 966 def _DoCall(self, function_name, *args, **kwargs): 967 """Call the named function in the device-specific module, passing 968 the given args and kwargs. The first argument to the call will be 969 the DeviceSpecific object itself. If there is no module, or the 970 module does not define the function, return the value of the 971 'default' kwarg (which itself defaults to None).""" 972 if self.module is None or not hasattr(self.module, function_name): 973 return kwargs.get("default", None) 974 return getattr(self.module, function_name)(*((self,) + args), **kwargs) 975 976 def FullOTA_Assertions(self): 977 """Called after emitting the block of assertions at the top of a 978 full OTA package. Implementations can add whatever additional 979 assertions they like.""" 980 return self._DoCall("FullOTA_Assertions") 981 982 def FullOTA_InstallBegin(self): 983 """Called at the start of full OTA installation.""" 984 return self._DoCall("FullOTA_InstallBegin") 985 986 def FullOTA_InstallEnd(self): 987 """Called at the end of full OTA installation; typically this is 988 used to install the image for the device's baseband processor.""" 989 return self._DoCall("FullOTA_InstallEnd") 990 991 def IncrementalOTA_Assertions(self): 992 """Called after emitting the block of assertions at the top of an 993 incremental OTA package. Implementations can add whatever 994 additional assertions they like.""" 995 return self._DoCall("IncrementalOTA_Assertions") 996 997 def IncrementalOTA_VerifyBegin(self): 998 """Called at the start of the verification phase of incremental 999 OTA installation; additional checks can be placed here to abort 1000 the script before any changes are made.""" 1001 return self._DoCall("IncrementalOTA_VerifyBegin") 1002 1003 def IncrementalOTA_VerifyEnd(self): 1004 """Called at the end of the verification phase of incremental OTA 1005 installation; additional checks can be placed here to abort the 1006 script before any changes are made.""" 1007 return self._DoCall("IncrementalOTA_VerifyEnd") 1008 1009 def IncrementalOTA_InstallBegin(self): 1010 """Called at the start of incremental OTA installation (after 1011 verification is complete).""" 1012 return self._DoCall("IncrementalOTA_InstallBegin") 1013 1014 def IncrementalOTA_InstallEnd(self): 1015 """Called at the end of incremental OTA installation; typically 1016 this is used to install the image for the device's baseband 1017 processor.""" 1018 return self._DoCall("IncrementalOTA_InstallEnd") 1019 1020class File(object): 1021 def __init__(self, name, data): 1022 self.name = name 1023 self.data = data 1024 self.size = len(data) 1025 self.sha1 = sha1(data).hexdigest() 1026 1027 @classmethod 1028 def FromLocalFile(cls, name, diskname): 1029 f = open(diskname, "rb") 1030 data = f.read() 1031 f.close() 1032 return File(name, data) 1033 1034 def WriteToTemp(self): 1035 t = tempfile.NamedTemporaryFile() 1036 t.write(self.data) 1037 t.flush() 1038 return t 1039 1040 def AddToZip(self, z, compression=None): 1041 ZipWriteStr(z, self.name, self.data, compress_type=compression) 1042 1043DIFF_PROGRAM_BY_EXT = { 1044 ".gz" : "imgdiff", 1045 ".zip" : ["imgdiff", "-z"], 1046 ".jar" : ["imgdiff", "-z"], 1047 ".apk" : ["imgdiff", "-z"], 1048 ".img" : "imgdiff", 1049 } 1050 1051class Difference(object): 1052 def __init__(self, tf, sf, diff_program=None): 1053 self.tf = tf 1054 self.sf = sf 1055 self.patch = None 1056 self.diff_program = diff_program 1057 1058 def ComputePatch(self): 1059 """Compute the patch (as a string of data) needed to turn sf into 1060 tf. Returns the same tuple as GetPatch().""" 1061 1062 tf = self.tf 1063 sf = self.sf 1064 1065 if self.diff_program: 1066 diff_program = self.diff_program 1067 else: 1068 ext = os.path.splitext(tf.name)[1] 1069 diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff") 1070 1071 ttemp = tf.WriteToTemp() 1072 stemp = sf.WriteToTemp() 1073 1074 ext = os.path.splitext(tf.name)[1] 1075 1076 try: 1077 ptemp = tempfile.NamedTemporaryFile() 1078 if isinstance(diff_program, list): 1079 cmd = copy.copy(diff_program) 1080 else: 1081 cmd = [diff_program] 1082 cmd.append(stemp.name) 1083 cmd.append(ttemp.name) 1084 cmd.append(ptemp.name) 1085 p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) 1086 err = [] 1087 def run(): 1088 _, e = p.communicate() 1089 if e: 1090 err.append(e) 1091 th = threading.Thread(target=run) 1092 th.start() 1093 th.join(timeout=300) # 5 mins 1094 if th.is_alive(): 1095 print "WARNING: diff command timed out" 1096 p.terminate() 1097 th.join(5) 1098 if th.is_alive(): 1099 p.kill() 1100 th.join() 1101 1102 if err or p.returncode != 0: 1103 print "WARNING: failure running %s:\n%s\n" % ( 1104 diff_program, "".join(err)) 1105 self.patch = None 1106 return None, None, None 1107 diff = ptemp.read() 1108 finally: 1109 ptemp.close() 1110 stemp.close() 1111 ttemp.close() 1112 1113 self.patch = diff 1114 return self.tf, self.sf, self.patch 1115 1116 1117 def GetPatch(self): 1118 """Return a tuple (target_file, source_file, patch_data). 1119 patch_data may be None if ComputePatch hasn't been called, or if 1120 computing the patch failed.""" 1121 return self.tf, self.sf, self.patch 1122 1123 1124def ComputeDifferences(diffs): 1125 """Call ComputePatch on all the Difference objects in 'diffs'.""" 1126 print len(diffs), "diffs to compute" 1127 1128 # Do the largest files first, to try and reduce the long-pole effect. 1129 by_size = [(i.tf.size, i) for i in diffs] 1130 by_size.sort(reverse=True) 1131 by_size = [i[1] for i in by_size] 1132 1133 lock = threading.Lock() 1134 diff_iter = iter(by_size) # accessed under lock 1135 1136 def worker(): 1137 try: 1138 lock.acquire() 1139 for d in diff_iter: 1140 lock.release() 1141 start = time.time() 1142 d.ComputePatch() 1143 dur = time.time() - start 1144 lock.acquire() 1145 1146 tf, sf, patch = d.GetPatch() 1147 if sf.name == tf.name: 1148 name = tf.name 1149 else: 1150 name = "%s (%s)" % (tf.name, sf.name) 1151 if patch is None: 1152 print "patching failed! %s" % (name,) 1153 else: 1154 print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % ( 1155 dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name) 1156 lock.release() 1157 except Exception as e: 1158 print e 1159 raise 1160 1161 # start worker threads; wait for them all to finish. 1162 threads = [threading.Thread(target=worker) 1163 for i in range(OPTIONS.worker_threads)] 1164 for th in threads: 1165 th.start() 1166 while threads: 1167 threads.pop().join() 1168 1169 1170class BlockDifference(object): 1171 def __init__(self, partition, tgt, src=None, check_first_block=False, 1172 version=None): 1173 self.tgt = tgt 1174 self.src = src 1175 self.partition = partition 1176 self.check_first_block = check_first_block 1177 1178 # Due to http://b/20939131, check_first_block is disabled temporarily. 1179 assert not self.check_first_block 1180 1181 if version is None: 1182 version = 1 1183 if OPTIONS.info_dict: 1184 version = max( 1185 int(i) for i in 1186 OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(",")) 1187 self.version = version 1188 1189 b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads, 1190 version=self.version) 1191 tmpdir = tempfile.mkdtemp() 1192 OPTIONS.tempfiles.append(tmpdir) 1193 self.path = os.path.join(tmpdir, partition) 1194 b.Compute(self.path) 1195 1196 _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict) 1197 1198 def WriteScript(self, script, output_zip, progress=None): 1199 if not self.src: 1200 # write the output unconditionally 1201 script.Print("Patching %s image unconditionally..." % (self.partition,)) 1202 else: 1203 script.Print("Patching %s image after verification." % (self.partition,)) 1204 1205 if progress: 1206 script.ShowProgress(progress, 0) 1207 self._WriteUpdate(script, output_zip) 1208 self._WritePostInstallVerifyScript(script) 1209 1210 def WriteVerifyScript(self, script): 1211 partition = self.partition 1212 if not self.src: 1213 script.Print("Image %s will be patched unconditionally." % (partition,)) 1214 else: 1215 ranges = self.src.care_map.subtract(self.src.clobbered_blocks) 1216 ranges_str = ranges.to_string_raw() 1217 if self.version >= 3: 1218 script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || ' 1219 'block_image_verify("%s", ' 1220 'package_extract_file("%s.transfer.list"), ' 1221 '"%s.new.dat", "%s.patch.dat")) then') % ( 1222 self.device, ranges_str, self.src.TotalSha1(), 1223 self.device, partition, partition, partition)) 1224 else: 1225 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % ( 1226 self.device, ranges_str, self.src.TotalSha1())) 1227 script.Print('Verified %s image...' % (partition,)) 1228 script.AppendExtra('else') 1229 1230 # When generating incrementals for the system and vendor partitions, 1231 # explicitly check the first block (which contains the superblock) of 1232 # the partition to see if it's what we expect. If this check fails, 1233 # give an explicit log message about the partition having been 1234 # remounted R/W (the most likely explanation) and the need to flash to 1235 # get OTAs working again. 1236 if self.check_first_block: 1237 self._CheckFirstBlock(script) 1238 1239 # Abort the OTA update. Note that the incremental OTA cannot be applied 1240 # even if it may match the checksum of the target partition. 1241 # a) If version < 3, operations like move and erase will make changes 1242 # unconditionally and damage the partition. 1243 # b) If version >= 3, it won't even reach here. 1244 script.AppendExtra(('abort("%s partition has unexpected contents");\n' 1245 'endif;') % (partition,)) 1246 1247 def _WritePostInstallVerifyScript(self, script): 1248 partition = self.partition 1249 script.Print('Verifying the updated %s image...' % (partition,)) 1250 # Unlike pre-install verification, clobbered_blocks should not be ignored. 1251 ranges = self.tgt.care_map 1252 ranges_str = ranges.to_string_raw() 1253 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % ( 1254 self.device, ranges_str, 1255 self.tgt.TotalSha1(include_clobbered_blocks=True))) 1256 1257 # Bug: 20881595 1258 # Verify that extended blocks are really zeroed out. 1259 if self.tgt.extended: 1260 ranges_str = self.tgt.extended.to_string_raw() 1261 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % ( 1262 self.device, ranges_str, 1263 self._HashZeroBlocks(self.tgt.extended.size()))) 1264 script.Print('Verified the updated %s image.' % (partition,)) 1265 script.AppendExtra( 1266 'else\n' 1267 ' abort("%s partition has unexpected non-zero contents after OTA ' 1268 'update");\n' 1269 'endif;' % (partition,)) 1270 else: 1271 script.Print('Verified the updated %s image.' % (partition,)) 1272 1273 script.AppendExtra( 1274 'else\n' 1275 ' abort("%s partition has unexpected contents after OTA update");\n' 1276 'endif;' % (partition,)) 1277 1278 def _WriteUpdate(self, script, output_zip): 1279 ZipWrite(output_zip, 1280 '{}.transfer.list'.format(self.path), 1281 '{}.transfer.list'.format(self.partition)) 1282 ZipWrite(output_zip, 1283 '{}.new.dat'.format(self.path), 1284 '{}.new.dat'.format(self.partition)) 1285 ZipWrite(output_zip, 1286 '{}.patch.dat'.format(self.path), 1287 '{}.patch.dat'.format(self.partition), 1288 compress_type=zipfile.ZIP_STORED) 1289 1290 call = ('block_image_update("{device}", ' 1291 'package_extract_file("{partition}.transfer.list"), ' 1292 '"{partition}.new.dat", "{partition}.patch.dat");\n'.format( 1293 device=self.device, partition=self.partition)) 1294 script.AppendExtra(script.WordWrap(call)) 1295 1296 def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use 1297 data = source.ReadRangeSet(ranges) 1298 ctx = sha1() 1299 1300 for p in data: 1301 ctx.update(p) 1302 1303 return ctx.hexdigest() 1304 1305 def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use 1306 """Return the hash value for all zero blocks.""" 1307 zero_block = '\x00' * 4096 1308 ctx = sha1() 1309 for _ in range(num_blocks): 1310 ctx.update(zero_block) 1311 1312 return ctx.hexdigest() 1313 1314 # TODO(tbao): Due to http://b/20939131, block 0 may be changed without 1315 # remounting R/W. Will change the checking to a finer-grained way to 1316 # mask off those bits. 1317 def _CheckFirstBlock(self, script): 1318 r = rangelib.RangeSet((0, 1)) 1319 srchash = self._HashBlocks(self.src, r) 1320 1321 script.AppendExtra(('(range_sha1("%s", "%s") == "%s") || ' 1322 'abort("%s has been remounted R/W; ' 1323 'reflash device to reenable OTA updates");') 1324 % (self.device, r.to_string_raw(), srchash, 1325 self.device)) 1326 1327DataImage = blockimgdiff.DataImage 1328 1329 1330# map recovery.fstab's fs_types to mount/format "partition types" 1331PARTITION_TYPES = { 1332 "yaffs2": "MTD", 1333 "mtd": "MTD", 1334 "ext4": "EMMC", 1335 "emmc": "EMMC", 1336 "f2fs": "EMMC", 1337 "squashfs": "EMMC" 1338} 1339 1340def GetTypeAndDevice(mount_point, info): 1341 fstab = info["fstab"] 1342 if fstab: 1343 return (PARTITION_TYPES[fstab[mount_point].fs_type], 1344 fstab[mount_point].device) 1345 else: 1346 raise KeyError 1347 1348 1349def ParseCertificate(data): 1350 """Parse a PEM-format certificate.""" 1351 cert = [] 1352 save = False 1353 for line in data.split("\n"): 1354 if "--END CERTIFICATE--" in line: 1355 break 1356 if save: 1357 cert.append(line) 1358 if "--BEGIN CERTIFICATE--" in line: 1359 save = True 1360 cert = "".join(cert).decode('base64') 1361 return cert 1362 1363def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img, 1364 info_dict=None): 1365 """Generate a binary patch that creates the recovery image starting 1366 with the boot image. (Most of the space in these images is just the 1367 kernel, which is identical for the two, so the resulting patch 1368 should be efficient.) Add it to the output zip, along with a shell 1369 script that is run from init.rc on first boot to actually do the 1370 patching and install the new recovery image. 1371 1372 recovery_img and boot_img should be File objects for the 1373 corresponding images. info should be the dictionary returned by 1374 common.LoadInfoDict() on the input target_files. 1375 """ 1376 1377 if info_dict is None: 1378 info_dict = OPTIONS.info_dict 1379 1380 diff_program = ["imgdiff"] 1381 path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat") 1382 if os.path.exists(path): 1383 diff_program.append("-b") 1384 diff_program.append(path) 1385 bonus_args = "-b /system/etc/recovery-resource.dat" 1386 else: 1387 bonus_args = "" 1388 1389 d = Difference(recovery_img, boot_img, diff_program=diff_program) 1390 _, _, patch = d.ComputePatch() 1391 output_sink("recovery-from-boot.p", patch) 1392 1393 try: 1394 boot_type, boot_device = GetTypeAndDevice("/boot", info_dict) 1395 recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict) 1396 except KeyError: 1397 return 1398 1399 sh = """#!/system/bin/sh 1400if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then 1401 applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed" 1402else 1403 log -t recovery "Recovery image already installed" 1404fi 1405""" % {'boot_size': boot_img.size, 1406 'boot_sha1': boot_img.sha1, 1407 'recovery_size': recovery_img.size, 1408 'recovery_sha1': recovery_img.sha1, 1409 'boot_type': boot_type, 1410 'boot_device': boot_device, 1411 'recovery_type': recovery_type, 1412 'recovery_device': recovery_device, 1413 'bonus_args': bonus_args} 1414 1415 # The install script location moved from /system/etc to /system/bin 1416 # in the L release. Parse the init.rc file to find out where the 1417 # target-files expects it to be, and put it there. 1418 sh_location = "etc/install-recovery.sh" 1419 try: 1420 with open(os.path.join(input_dir, "BOOT", "RAMDISK", "init.rc")) as f: 1421 for line in f: 1422 m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line) 1423 if m: 1424 sh_location = m.group(1) 1425 print "putting script in", sh_location 1426 break 1427 except (OSError, IOError) as e: 1428 print "failed to read init.rc: %s" % (e,) 1429 1430 output_sink(sh_location, sh) 1431