1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15from __future__ import print_function
16
17import copy
18import errno
19import getopt
20import getpass
21import gzip
22import imp
23import os
24import platform
25import re
26import shlex
27import shutil
28import string
29import subprocess
30import sys
31import tempfile
32import threading
33import time
34import zipfile
35from hashlib import sha1, sha256
36
37import blockimgdiff
38import sparse_img
39
40class Options(object):
41  def __init__(self):
42    platform_search_path = {
43        "linux2": "out/host/linux-x86",
44        "darwin": "out/host/darwin-x86",
45    }
46
47    self.search_path = platform_search_path.get(sys.platform, None)
48    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
49    self.signapk_shared_library_path = "lib64"   # Relative to search_path
50    self.extra_signapk_args = []
51    self.java_path = "java"  # Use the one on the path by default.
52    self.java_args = ["-Xmx2048m"]  # The default JVM args.
53    self.public_key_suffix = ".x509.pem"
54    self.private_key_suffix = ".pk8"
55    # use otatools built boot_signer by default
56    self.boot_signer_path = "boot_signer"
57    self.boot_signer_args = []
58    self.verity_signer_path = None
59    self.verity_signer_args = []
60    self.verbose = False
61    self.tempfiles = []
62    self.device_specific = None
63    self.extras = {}
64    self.info_dict = None
65    self.source_info_dict = None
66    self.target_info_dict = None
67    self.worker_threads = None
68    # Stash size cannot exceed cache_size * threshold.
69    self.cache_size = None
70    self.stash_threshold = 0.8
71
72
73OPTIONS = Options()
74
75
76# Values for "certificate" in apkcerts that mean special things.
77SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
78
79
80# The partitions allowed to be signed by AVB (Android verified boot 2.0).
81AVB_PARTITIONS = ('boot', 'recovery', 'system', 'vendor', 'product', 'dtbo')
82
83
84class ErrorCode(object):
85  """Define error_codes for failures that happen during the actual
86  update package installation.
87
88  Error codes 0-999 are reserved for failures before the package
89  installation (i.e. low battery, package verification failure).
90  Detailed code in 'bootable/recovery/error_code.h' """
91
92  SYSTEM_VERIFICATION_FAILURE = 1000
93  SYSTEM_UPDATE_FAILURE = 1001
94  SYSTEM_UNEXPECTED_CONTENTS = 1002
95  SYSTEM_NONZERO_CONTENTS = 1003
96  SYSTEM_RECOVER_FAILURE = 1004
97  VENDOR_VERIFICATION_FAILURE = 2000
98  VENDOR_UPDATE_FAILURE = 2001
99  VENDOR_UNEXPECTED_CONTENTS = 2002
100  VENDOR_NONZERO_CONTENTS = 2003
101  VENDOR_RECOVER_FAILURE = 2004
102  OEM_PROP_MISMATCH = 3000
103  FINGERPRINT_MISMATCH = 3001
104  THUMBPRINT_MISMATCH = 3002
105  OLDER_BUILD = 3003
106  DEVICE_MISMATCH = 3004
107  BAD_PATCH_FILE = 3005
108  INSUFFICIENT_CACHE_SPACE = 3006
109  TUNE_PARTITION_FAILURE = 3007
110  APPLY_PATCH_FAILURE = 3008
111
112class ExternalError(RuntimeError):
113  pass
114
115
116def Run(args, verbose=None, **kwargs):
117  """Create and return a subprocess.Popen object.
118
119  Caller can specify if the command line should be printed. The global
120  OPTIONS.verbose will be used if not specified.
121  """
122  if verbose is None:
123    verbose = OPTIONS.verbose
124  if verbose:
125    print("  running: ", " ".join(args))
126  return subprocess.Popen(args, **kwargs)
127
128
129def RoundUpTo4K(value):
130  rounded_up = value + 4095
131  return rounded_up - (rounded_up % 4096)
132
133
134def CloseInheritedPipes():
135  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
136  before doing other work."""
137  if platform.system() != "Darwin":
138    return
139  for d in range(3, 1025):
140    try:
141      stat = os.fstat(d)
142      if stat is not None:
143        pipebit = stat[0] & 0x1000
144        if pipebit != 0:
145          os.close(d)
146    except OSError:
147      pass
148
149
150def LoadInfoDict(input_file, input_dir=None):
151  """Read and parse the META/misc_info.txt key/value pairs from the
152  input target files and return a dict."""
153
154  def read_helper(fn):
155    if isinstance(input_file, zipfile.ZipFile):
156      return input_file.read(fn)
157    else:
158      path = os.path.join(input_file, *fn.split("/"))
159      try:
160        with open(path) as f:
161          return f.read()
162      except IOError as e:
163        if e.errno == errno.ENOENT:
164          raise KeyError(fn)
165
166  try:
167    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
168  except KeyError:
169    raise ValueError("can't find META/misc_info.txt in input target-files")
170
171  assert "recovery_api_version" in d
172  assert "fstab_version" in d
173
174  # A few properties are stored as links to the files in the out/ directory.
175  # It works fine with the build system. However, they are no longer available
176  # when (re)generating from target_files zip. If input_dir is not None, we
177  # are doing repacking. Redirect those properties to the actual files in the
178  # unzipped directory.
179  if input_dir is not None:
180    # We carry a copy of file_contexts.bin under META/. If not available,
181    # search BOOT/RAMDISK/. Note that sometimes we may need a different file
182    # to build images than the one running on device, such as when enabling
183    # system_root_image. In that case, we must have the one for image
184    # generation copied to META/.
185    fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts"))
186    fc_config = os.path.join(input_dir, "META", fc_basename)
187    if d.get("system_root_image") == "true":
188      assert os.path.exists(fc_config)
189    if not os.path.exists(fc_config):
190      fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", fc_basename)
191      if not os.path.exists(fc_config):
192        fc_config = None
193
194    if fc_config:
195      d["selinux_fc"] = fc_config
196
197    # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
198    if d.get("system_root_image") == "true":
199      d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
200      d["ramdisk_fs_config"] = os.path.join(
201          input_dir, "META", "root_filesystem_config.txt")
202
203    # Redirect {system,vendor}_base_fs_file.
204    if "system_base_fs_file" in d:
205      basename = os.path.basename(d["system_base_fs_file"])
206      system_base_fs_file = os.path.join(input_dir, "META", basename)
207      if os.path.exists(system_base_fs_file):
208        d["system_base_fs_file"] = system_base_fs_file
209      else:
210        print("Warning: failed to find system base fs file: %s" % (
211            system_base_fs_file,))
212        del d["system_base_fs_file"]
213
214    if "vendor_base_fs_file" in d:
215      basename = os.path.basename(d["vendor_base_fs_file"])
216      vendor_base_fs_file = os.path.join(input_dir, "META", basename)
217      if os.path.exists(vendor_base_fs_file):
218        d["vendor_base_fs_file"] = vendor_base_fs_file
219      else:
220        print("Warning: failed to find vendor base fs file: %s" % (
221            vendor_base_fs_file,))
222        del d["vendor_base_fs_file"]
223
224  def makeint(key):
225    if key in d:
226      d[key] = int(d[key], 0)
227
228  makeint("recovery_api_version")
229  makeint("blocksize")
230  makeint("system_size")
231  makeint("vendor_size")
232  makeint("userdata_size")
233  makeint("cache_size")
234  makeint("recovery_size")
235  makeint("boot_size")
236  makeint("fstab_version")
237
238  system_root_image = d.get("system_root_image", None) == "true"
239  if d.get("no_recovery", None) != "true":
240    recovery_fstab_path = "RECOVERY/RAMDISK/etc/recovery.fstab"
241    d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
242        recovery_fstab_path, system_root_image)
243  elif d.get("recovery_as_boot", None) == "true":
244    recovery_fstab_path = "BOOT/RAMDISK/etc/recovery.fstab"
245    d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
246        recovery_fstab_path, system_root_image)
247  else:
248    d["fstab"] = None
249
250  d["build.prop"] = LoadBuildProp(read_helper, 'SYSTEM/build.prop')
251  d["vendor.build.prop"] = LoadBuildProp(read_helper, 'VENDOR/build.prop')
252
253  # Set up the salt (based on fingerprint or thumbprint) that will be used when
254  # adding AVB footer.
255  if d.get("avb_enable") == "true":
256    fp = None
257    if "build.prop" in d:
258      build_prop = d["build.prop"]
259      if "ro.build.fingerprint" in build_prop:
260        fp = build_prop["ro.build.fingerprint"]
261      elif "ro.build.thumbprint" in build_prop:
262        fp = build_prop["ro.build.thumbprint"]
263    if fp:
264      d["avb_salt"] = sha256(fp).hexdigest()
265
266  return d
267
268
269def LoadBuildProp(read_helper, prop_file):
270  try:
271    data = read_helper(prop_file)
272  except KeyError:
273    print("Warning: could not read %s" % (prop_file,))
274    data = ""
275  return LoadDictionaryFromLines(data.split("\n"))
276
277
278def LoadDictionaryFromLines(lines):
279  d = {}
280  for line in lines:
281    line = line.strip()
282    if not line or line.startswith("#"):
283      continue
284    if "=" in line:
285      name, value = line.split("=", 1)
286      d[name] = value
287  return d
288
289
290def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path,
291                      system_root_image=False):
292  class Partition(object):
293    def __init__(self, mount_point, fs_type, device, length, context):
294      self.mount_point = mount_point
295      self.fs_type = fs_type
296      self.device = device
297      self.length = length
298      self.context = context
299
300  try:
301    data = read_helper(recovery_fstab_path)
302  except KeyError:
303    print("Warning: could not find {}".format(recovery_fstab_path))
304    data = ""
305
306  assert fstab_version == 2
307
308  d = {}
309  for line in data.split("\n"):
310    line = line.strip()
311    if not line or line.startswith("#"):
312      continue
313
314    # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
315    pieces = line.split()
316    if len(pieces) != 5:
317      raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
318
319    # Ignore entries that are managed by vold.
320    options = pieces[4]
321    if "voldmanaged=" in options:
322      continue
323
324    # It's a good line, parse it.
325    length = 0
326    options = options.split(",")
327    for i in options:
328      if i.startswith("length="):
329        length = int(i[7:])
330      else:
331        # Ignore all unknown options in the unified fstab.
332        continue
333
334    mount_flags = pieces[3]
335    # Honor the SELinux context if present.
336    context = None
337    for i in mount_flags.split(","):
338      if i.startswith("context="):
339        context = i
340
341    mount_point = pieces[1]
342    d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
343                               device=pieces[0], length=length, context=context)
344
345  # / is used for the system mount point when the root directory is included in
346  # system. Other areas assume system is always at "/system" so point /system
347  # at /.
348  if system_root_image:
349    assert not d.has_key("/system") and d.has_key("/")
350    d["/system"] = d["/"]
351  return d
352
353
354def DumpInfoDict(d):
355  for k, v in sorted(d.items()):
356    print("%-25s = (%s) %s" % (k, type(v).__name__, v))
357
358
359def AppendAVBSigningArgs(cmd, partition):
360  """Append signing arguments for avbtool."""
361  # e.g., "--key path/to/signing_key --algorithm SHA256_RSA4096"
362  key_path = OPTIONS.info_dict.get("avb_" + partition + "_key_path")
363  algorithm = OPTIONS.info_dict.get("avb_" + partition + "_algorithm")
364  if key_path and algorithm:
365    cmd.extend(["--key", key_path, "--algorithm", algorithm])
366  avb_salt = OPTIONS.info_dict.get("avb_salt")
367  # make_vbmeta_image doesn't like "--salt" (and it's not needed).
368  if avb_salt and partition != "vbmeta":
369    cmd.extend(["--salt", avb_salt])
370
371
372def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
373                        has_ramdisk=False, two_step_image=False):
374  """Build a bootable image from the specified sourcedir.
375
376  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
377  'sourcedir'), and turn them into a boot image. 'two_step_image' indicates if
378  we are building a two-step special image (i.e. building a recovery image to
379  be loaded into /boot in two-step OTAs).
380
381  Return the image data, or None if sourcedir does not appear to contains files
382  for building the requested image.
383  """
384
385  def make_ramdisk():
386    ramdisk_img = tempfile.NamedTemporaryFile()
387
388    if os.access(fs_config_file, os.F_OK):
389      cmd = ["mkbootfs", "-f", fs_config_file,
390             os.path.join(sourcedir, "RAMDISK")]
391    else:
392      cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
393    p1 = Run(cmd, stdout=subprocess.PIPE)
394    p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
395
396    p2.wait()
397    p1.wait()
398    assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
399    assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
400
401    return ramdisk_img
402
403  if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
404    return None
405
406  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
407    return None
408
409  if info_dict is None:
410    info_dict = OPTIONS.info_dict
411
412  img = tempfile.NamedTemporaryFile()
413
414  if has_ramdisk:
415    ramdisk_img = make_ramdisk()
416
417  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
418  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
419
420  cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
421
422  fn = os.path.join(sourcedir, "second")
423  if os.access(fn, os.F_OK):
424    cmd.append("--second")
425    cmd.append(fn)
426
427  fn = os.path.join(sourcedir, "cmdline")
428  if os.access(fn, os.F_OK):
429    cmd.append("--cmdline")
430    cmd.append(open(fn).read().rstrip("\n"))
431
432  fn = os.path.join(sourcedir, "base")
433  if os.access(fn, os.F_OK):
434    cmd.append("--base")
435    cmd.append(open(fn).read().rstrip("\n"))
436
437  fn = os.path.join(sourcedir, "pagesize")
438  if os.access(fn, os.F_OK):
439    cmd.append("--pagesize")
440    cmd.append(open(fn).read().rstrip("\n"))
441
442  args = info_dict.get("mkbootimg_args", None)
443  if args and args.strip():
444    cmd.extend(shlex.split(args))
445
446  args = info_dict.get("mkbootimg_version_args", None)
447  if args and args.strip():
448    cmd.extend(shlex.split(args))
449
450  if has_ramdisk:
451    cmd.extend(["--ramdisk", ramdisk_img.name])
452
453  img_unsigned = None
454  if info_dict.get("vboot", None):
455    img_unsigned = tempfile.NamedTemporaryFile()
456    cmd.extend(["--output", img_unsigned.name])
457  else:
458    cmd.extend(["--output", img.name])
459
460  # "boot" or "recovery", without extension.
461  partition_name = os.path.basename(sourcedir).lower()
462
463  if (partition_name == "recovery" and
464      info_dict.get("include_recovery_dtbo") == "true"):
465    fn = os.path.join(sourcedir, "recovery_dtbo")
466    cmd.extend(["--recovery_dtbo", fn])
467
468  p = Run(cmd, stdout=subprocess.PIPE)
469  p.communicate()
470  assert p.returncode == 0, "mkbootimg of %s image failed" % (partition_name,)
471
472  if (info_dict.get("boot_signer", None) == "true" and
473      info_dict.get("verity_key", None)):
474    # Hard-code the path as "/boot" for two-step special recovery image (which
475    # will be loaded into /boot during the two-step OTA).
476    if two_step_image:
477      path = "/boot"
478    else:
479      path = "/" + partition_name
480    cmd = [OPTIONS.boot_signer_path]
481    cmd.extend(OPTIONS.boot_signer_args)
482    cmd.extend([path, img.name,
483                info_dict["verity_key"] + ".pk8",
484                info_dict["verity_key"] + ".x509.pem", img.name])
485    p = Run(cmd, stdout=subprocess.PIPE)
486    p.communicate()
487    assert p.returncode == 0, "boot_signer of %s image failed" % path
488
489  # Sign the image if vboot is non-empty.
490  elif info_dict.get("vboot", None):
491    path = "/" + partition_name
492    img_keyblock = tempfile.NamedTemporaryFile()
493    # We have switched from the prebuilt futility binary to using the tool
494    # (futility-host) built from the source. Override the setting in the old
495    # TF.zip.
496    futility = info_dict["futility"]
497    if futility.startswith("prebuilts/"):
498      futility = "futility-host"
499    cmd = [info_dict["vboot_signer_cmd"], futility,
500           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
501           info_dict["vboot_key"] + ".vbprivk",
502           info_dict["vboot_subkey"] + ".vbprivk",
503           img_keyblock.name,
504           img.name]
505    p = Run(cmd, stdout=subprocess.PIPE)
506    p.communicate()
507    assert p.returncode == 0, "vboot_signer of %s image failed" % path
508
509    # Clean up the temp files.
510    img_unsigned.close()
511    img_keyblock.close()
512
513  # AVB: if enabled, calculate and add hash to boot.img or recovery.img.
514  if info_dict.get("avb_enable") == "true":
515    avbtool = os.getenv('AVBTOOL') or info_dict["avb_avbtool"]
516    part_size = info_dict[partition_name + "_size"]
517    cmd = [avbtool, "add_hash_footer", "--image", img.name,
518           "--partition_size", str(part_size), "--partition_name",
519           partition_name]
520    AppendAVBSigningArgs(cmd, partition_name)
521    args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
522    if args and args.strip():
523      cmd.extend(shlex.split(args))
524    p = Run(cmd, stdout=subprocess.PIPE)
525    p.communicate()
526    assert p.returncode == 0, "avbtool add_hash_footer of %s failed" % (
527        partition_name,)
528
529  img.seek(os.SEEK_SET, 0)
530  data = img.read()
531
532  if has_ramdisk:
533    ramdisk_img.close()
534  img.close()
535
536  return data
537
538
539def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
540                     info_dict=None, two_step_image=False):
541  """Return a File object with the desired bootable image.
542
543  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
544  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
545  the source files in 'unpack_dir'/'tree_subdir'."""
546
547  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
548  if os.path.exists(prebuilt_path):
549    print("using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,))
550    return File.FromLocalFile(name, prebuilt_path)
551
552  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
553  if os.path.exists(prebuilt_path):
554    print("using prebuilt %s from IMAGES..." % (prebuilt_name,))
555    return File.FromLocalFile(name, prebuilt_path)
556
557  print("building image from target_files %s..." % (tree_subdir,))
558
559  if info_dict is None:
560    info_dict = OPTIONS.info_dict
561
562  # With system_root_image == "true", we don't pack ramdisk into the boot image.
563  # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
564  # for recovery.
565  has_ramdisk = (info_dict.get("system_root_image") != "true" or
566                 prebuilt_name != "boot.img" or
567                 info_dict.get("recovery_as_boot") == "true")
568
569  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
570  data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
571                             os.path.join(unpack_dir, fs_config),
572                             info_dict, has_ramdisk, two_step_image)
573  if data:
574    return File(name, data)
575  return None
576
577
578def Gunzip(in_filename, out_filename):
579  """Gunzip the given gzip compressed file to a given output file.
580  """
581  with gzip.open(in_filename, "rb") as in_file, open(out_filename, "wb") as out_file:
582    shutil.copyfileobj(in_file, out_file)
583
584
585def UnzipTemp(filename, pattern=None):
586  """Unzips the given archive into a temporary directory and returns the name.
587
588  If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a temp dir,
589  then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
590
591  Returns:
592    The name of the temporary directory.
593  """
594
595  def unzip_to_dir(filename, dirname):
596    cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
597    if pattern is not None:
598      cmd.extend(pattern)
599    p = Run(cmd, stdout=subprocess.PIPE)
600    p.communicate()
601    if p.returncode != 0:
602      raise ExternalError("failed to unzip input target-files \"%s\"" %
603                          (filename,))
604
605  tmp = MakeTempDir(prefix="targetfiles-")
606  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
607  if m:
608    unzip_to_dir(m.group(1), tmp)
609    unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
610    filename = m.group(1)
611  else:
612    unzip_to_dir(filename, tmp)
613
614  return tmp
615
616
617def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks):
618  """Returns a SparseImage object suitable for passing to BlockImageDiff.
619
620  This function loads the specified sparse image from the given path, and
621  performs additional processing for OTA purpose. For example, it always adds
622  block 0 to clobbered blocks list. It also detects files that cannot be
623  reconstructed from the block list, for whom we should avoid applying imgdiff.
624
625  Args:
626    which: The partition name, which must be "system" or "vendor".
627    tmpdir: The directory that contains the prebuilt image and block map file.
628    input_zip: The target-files ZIP archive.
629    allow_shared_blocks: Whether having shared blocks is allowed.
630
631  Returns:
632    A SparseImage object, with file_map info loaded.
633  """
634  assert which in ("system", "vendor")
635
636  path = os.path.join(tmpdir, "IMAGES", which + ".img")
637  mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
638
639  # The image and map files must have been created prior to calling
640  # ota_from_target_files.py (since LMP).
641  assert os.path.exists(path) and os.path.exists(mappath)
642
643  # In ext4 filesystems, block 0 might be changed even being mounted R/O. We add
644  # it to clobbered_blocks so that it will be written to the target
645  # unconditionally. Note that they are still part of care_map. (Bug: 20939131)
646  clobbered_blocks = "0"
647
648  image = sparse_img.SparseImage(path, mappath, clobbered_blocks,
649                                 allow_shared_blocks=allow_shared_blocks)
650
651  # block.map may contain less blocks, because mke2fs may skip allocating blocks
652  # if they contain all zeros. We can't reconstruct such a file from its block
653  # list. Tag such entries accordingly. (Bug: 65213616)
654  for entry in image.file_map:
655    # "/system/framework/am.jar" => "SYSTEM/framework/am.jar".
656    arcname = string.replace(entry, which, which.upper(), 1)[1:]
657    # Skip artificial names, such as "__ZERO", "__NONZERO-1".
658    if arcname not in input_zip.namelist():
659      continue
660
661    info = input_zip.getinfo(arcname)
662    ranges = image.file_map[entry]
663
664    # If a RangeSet has been tagged as using shared blocks while loading the
665    # image, its block list must be already incomplete due to that reason. Don't
666    # give it 'incomplete' tag to avoid messing up the imgdiff stats.
667    if ranges.extra.get('uses_shared_blocks'):
668      continue
669
670    if RoundUpTo4K(info.file_size) > ranges.size() * 4096:
671      ranges.extra['incomplete'] = True
672
673  return image
674
675
676def GetKeyPasswords(keylist):
677  """Given a list of keys, prompt the user to enter passwords for
678  those which require them.  Return a {key: password} dict.  password
679  will be None if the key has no password."""
680
681  no_passwords = []
682  need_passwords = []
683  key_passwords = {}
684  devnull = open("/dev/null", "w+b")
685  for k in sorted(keylist):
686    # We don't need a password for things that aren't really keys.
687    if k in SPECIAL_CERT_STRINGS:
688      no_passwords.append(k)
689      continue
690
691    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
692             "-inform", "DER", "-nocrypt"],
693            stdin=devnull.fileno(),
694            stdout=devnull.fileno(),
695            stderr=subprocess.STDOUT)
696    p.communicate()
697    if p.returncode == 0:
698      # Definitely an unencrypted key.
699      no_passwords.append(k)
700    else:
701      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
702               "-inform", "DER", "-passin", "pass:"],
703              stdin=devnull.fileno(),
704              stdout=devnull.fileno(),
705              stderr=subprocess.PIPE)
706      _, stderr = p.communicate()
707      if p.returncode == 0:
708        # Encrypted key with empty string as password.
709        key_passwords[k] = ''
710      elif stderr.startswith('Error decrypting key'):
711        # Definitely encrypted key.
712        # It would have said "Error reading key" if it didn't parse correctly.
713        need_passwords.append(k)
714      else:
715        # Potentially, a type of key that openssl doesn't understand.
716        # We'll let the routines in signapk.jar handle it.
717        no_passwords.append(k)
718  devnull.close()
719
720  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
721  key_passwords.update(dict.fromkeys(no_passwords, None))
722  return key_passwords
723
724
725def GetMinSdkVersion(apk_name):
726  """Get the minSdkVersion delared in the APK. This can be both a decimal number
727  (API Level) or a codename.
728  """
729
730  p = Run(["aapt", "dump", "badging", apk_name], stdout=subprocess.PIPE)
731  output, err = p.communicate()
732  if err:
733    raise ExternalError("Failed to obtain minSdkVersion: aapt return code %s"
734        % (p.returncode,))
735
736  for line in output.split("\n"):
737    # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'
738    m = re.match(r'sdkVersion:\'([^\']*)\'', line)
739    if m:
740      return m.group(1)
741  raise ExternalError("No minSdkVersion returned by aapt")
742
743
744def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
745  """Get the minSdkVersion declared in the APK as a number (API Level). If
746  minSdkVersion is set to a codename, it is translated to a number using the
747  provided map.
748  """
749
750  version = GetMinSdkVersion(apk_name)
751  try:
752    return int(version)
753  except ValueError:
754    # Not a decimal number. Codename?
755    if version in codename_to_api_level_map:
756      return codename_to_api_level_map[version]
757    else:
758      raise ExternalError("Unknown minSdkVersion: '%s'. Known codenames: %s"
759                          % (version, codename_to_api_level_map))
760
761
762def SignFile(input_name, output_name, key, password, min_api_level=None,
763    codename_to_api_level_map=dict(),
764    whole_file=False):
765  """Sign the input_name zip/jar/apk, producing output_name.  Use the
766  given key and password (the latter may be None if the key does not
767  have a password.
768
769  If whole_file is true, use the "-w" option to SignApk to embed a
770  signature that covers the whole file in the archive comment of the
771  zip file.
772
773  min_api_level is the API Level (int) of the oldest platform this file may end
774  up on. If not specified for an APK, the API Level is obtained by interpreting
775  the minSdkVersion attribute of the APK's AndroidManifest.xml.
776
777  codename_to_api_level_map is needed to translate the codename which may be
778  encountered as the APK's minSdkVersion.
779  """
780
781  java_library_path = os.path.join(
782      OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
783
784  cmd = ([OPTIONS.java_path] + OPTIONS.java_args +
785         ["-Djava.library.path=" + java_library_path,
786          "-jar", os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)] +
787         OPTIONS.extra_signapk_args)
788  if whole_file:
789    cmd.append("-w")
790
791  min_sdk_version = min_api_level
792  if min_sdk_version is None:
793    if not whole_file:
794      min_sdk_version = GetMinSdkVersionInt(
795          input_name, codename_to_api_level_map)
796  if min_sdk_version is not None:
797    cmd.extend(["--min-sdk-version", str(min_sdk_version)])
798
799  cmd.extend([key + OPTIONS.public_key_suffix,
800              key + OPTIONS.private_key_suffix,
801              input_name, output_name])
802
803  p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
804  if password is not None:
805    password += "\n"
806  p.communicate(password)
807  if p.returncode != 0:
808    raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
809
810
811def CheckSize(data, target, info_dict):
812  """Checks the data string passed against the max size limit.
813
814  For non-AVB images, raise exception if the data is too big. Print a warning
815  if the data is nearing the maximum size.
816
817  For AVB images, the actual image size should be identical to the limit.
818
819  Args:
820    data: A string that contains all the data for the partition.
821    target: The partition name. The ".img" suffix is optional.
822    info_dict: The dict to be looked up for relevant info.
823  """
824  if target.endswith(".img"):
825    target = target[:-4]
826  mount_point = "/" + target
827
828  fs_type = None
829  limit = None
830  if info_dict["fstab"]:
831    if mount_point == "/userdata":
832      mount_point = "/data"
833    p = info_dict["fstab"][mount_point]
834    fs_type = p.fs_type
835    device = p.device
836    if "/" in device:
837      device = device[device.rfind("/")+1:]
838    limit = info_dict.get(device + "_size", None)
839  if not fs_type or not limit:
840    return
841
842  size = len(data)
843  # target could be 'userdata' or 'cache'. They should follow the non-AVB image
844  # path.
845  if info_dict.get("avb_enable") == "true" and target in AVB_PARTITIONS:
846    if size != limit:
847      raise ExternalError(
848          "Mismatching image size for %s: expected %d actual %d" % (
849              target, limit, size))
850  else:
851    pct = float(size) * 100.0 / limit
852    msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
853    if pct >= 99.0:
854      raise ExternalError(msg)
855    elif pct >= 95.0:
856      print("\n  WARNING: %s\n" % (msg,))
857    elif OPTIONS.verbose:
858      print("  ", msg)
859
860
861def ReadApkCerts(tf_zip):
862  """Parses the APK certs info from a given target-files zip.
863
864  Given a target-files ZipFile, parses the META/apkcerts.txt entry and returns a
865  tuple with the following elements: (1) a dictionary that maps packages to
866  certs (based on the "certificate" and "private_key" attributes in the file;
867  (2) a string representing the extension of compressed APKs in the target files
868  (e.g ".gz", ".bro").
869
870  Args:
871    tf_zip: The input target_files ZipFile (already open).
872
873  Returns:
874    (certmap, ext): certmap is a dictionary that maps packages to certs; ext is
875        the extension string of compressed APKs (e.g. ".gz"), or None if there's
876        no compressed APKs.
877  """
878  certmap = {}
879  compressed_extension = None
880
881  # META/apkcerts.txt contains the info for _all_ the packages known at build
882  # time. Filter out the ones that are not installed.
883  installed_files = set()
884  for name in tf_zip.namelist():
885    basename = os.path.basename(name)
886    if basename:
887      installed_files.add(basename)
888
889  for line in tf_zip.read("META/apkcerts.txt").split("\n"):
890    line = line.strip()
891    if not line:
892      continue
893    m = re.match(
894        r'^name="(?P<NAME>.*)"\s+certificate="(?P<CERT>.*)"\s+'
895        r'private_key="(?P<PRIVKEY>.*?)"(\s+compressed="(?P<COMPRESSED>.*)")?$',
896        line)
897    if not m:
898      continue
899
900    matches = m.groupdict()
901    cert = matches["CERT"]
902    privkey = matches["PRIVKEY"]
903    name = matches["NAME"]
904    this_compressed_extension = matches["COMPRESSED"]
905
906    public_key_suffix_len = len(OPTIONS.public_key_suffix)
907    private_key_suffix_len = len(OPTIONS.private_key_suffix)
908    if cert in SPECIAL_CERT_STRINGS and not privkey:
909      certmap[name] = cert
910    elif (cert.endswith(OPTIONS.public_key_suffix) and
911          privkey.endswith(OPTIONS.private_key_suffix) and
912          cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
913      certmap[name] = cert[:-public_key_suffix_len]
914    else:
915      raise ValueError("Failed to parse line from apkcerts.txt:\n" + line)
916
917    if not this_compressed_extension:
918      continue
919
920    # Only count the installed files.
921    filename = name + '.' + this_compressed_extension
922    if filename not in installed_files:
923      continue
924
925    # Make sure that all the values in the compression map have the same
926    # extension. We don't support multiple compression methods in the same
927    # system image.
928    if compressed_extension:
929      if this_compressed_extension != compressed_extension:
930        raise ValueError(
931            "Multiple compressed extensions: {} vs {}".format(
932                compressed_extension, this_compressed_extension))
933    else:
934      compressed_extension = this_compressed_extension
935
936  return (certmap,
937          ("." + compressed_extension) if compressed_extension else None)
938
939
940COMMON_DOCSTRING = """
941  -p  (--path)  <dir>
942      Prepend <dir>/bin to the list of places to search for binaries
943      run by this script, and expect to find jars in <dir>/framework.
944
945  -s  (--device_specific) <file>
946      Path to the python module containing device-specific
947      releasetools code.
948
949  -x  (--extra)  <key=value>
950      Add a key/value pair to the 'extras' dict, which device-specific
951      extension code may look at.
952
953  -v  (--verbose)
954      Show command lines being executed.
955
956  -h  (--help)
957      Display this usage message and exit.
958"""
959
960def Usage(docstring):
961  print(docstring.rstrip("\n"))
962  print(COMMON_DOCSTRING)
963
964
965def ParseOptions(argv,
966                 docstring,
967                 extra_opts="", extra_long_opts=(),
968                 extra_option_handler=None):
969  """Parse the options in argv and return any arguments that aren't
970  flags.  docstring is the calling module's docstring, to be displayed
971  for errors and -h.  extra_opts and extra_long_opts are for flags
972  defined by the caller, which are processed by passing them to
973  extra_option_handler."""
974
975  try:
976    opts, args = getopt.getopt(
977        argv, "hvp:s:x:" + extra_opts,
978        ["help", "verbose", "path=", "signapk_path=",
979         "signapk_shared_library_path=", "extra_signapk_args=",
980         "java_path=", "java_args=", "public_key_suffix=",
981         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
982         "verity_signer_path=", "verity_signer_args=", "device_specific=",
983         "extra="] +
984        list(extra_long_opts))
985  except getopt.GetoptError as err:
986    Usage(docstring)
987    print("**", str(err), "**")
988    sys.exit(2)
989
990  for o, a in opts:
991    if o in ("-h", "--help"):
992      Usage(docstring)
993      sys.exit()
994    elif o in ("-v", "--verbose"):
995      OPTIONS.verbose = True
996    elif o in ("-p", "--path"):
997      OPTIONS.search_path = a
998    elif o in ("--signapk_path",):
999      OPTIONS.signapk_path = a
1000    elif o in ("--signapk_shared_library_path",):
1001      OPTIONS.signapk_shared_library_path = a
1002    elif o in ("--extra_signapk_args",):
1003      OPTIONS.extra_signapk_args = shlex.split(a)
1004    elif o in ("--java_path",):
1005      OPTIONS.java_path = a
1006    elif o in ("--java_args",):
1007      OPTIONS.java_args = shlex.split(a)
1008    elif o in ("--public_key_suffix",):
1009      OPTIONS.public_key_suffix = a
1010    elif o in ("--private_key_suffix",):
1011      OPTIONS.private_key_suffix = a
1012    elif o in ("--boot_signer_path",):
1013      OPTIONS.boot_signer_path = a
1014    elif o in ("--boot_signer_args",):
1015      OPTIONS.boot_signer_args = shlex.split(a)
1016    elif o in ("--verity_signer_path",):
1017      OPTIONS.verity_signer_path = a
1018    elif o in ("--verity_signer_args",):
1019      OPTIONS.verity_signer_args = shlex.split(a)
1020    elif o in ("-s", "--device_specific"):
1021      OPTIONS.device_specific = a
1022    elif o in ("-x", "--extra"):
1023      key, value = a.split("=", 1)
1024      OPTIONS.extras[key] = value
1025    else:
1026      if extra_option_handler is None or not extra_option_handler(o, a):
1027        assert False, "unknown option \"%s\"" % (o,)
1028
1029  if OPTIONS.search_path:
1030    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
1031                          os.pathsep + os.environ["PATH"])
1032
1033  return args
1034
1035
1036def MakeTempFile(prefix='tmp', suffix=''):
1037  """Make a temp file and add it to the list of things to be deleted
1038  when Cleanup() is called.  Return the filename."""
1039  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
1040  os.close(fd)
1041  OPTIONS.tempfiles.append(fn)
1042  return fn
1043
1044
1045def MakeTempDir(prefix='tmp', suffix=''):
1046  """Makes a temporary dir that will be cleaned up with a call to Cleanup().
1047
1048  Returns:
1049    The absolute pathname of the new directory.
1050  """
1051  dir_name = tempfile.mkdtemp(suffix=suffix, prefix=prefix)
1052  OPTIONS.tempfiles.append(dir_name)
1053  return dir_name
1054
1055
1056def Cleanup():
1057  for i in OPTIONS.tempfiles:
1058    if os.path.isdir(i):
1059      shutil.rmtree(i, ignore_errors=True)
1060    else:
1061      os.remove(i)
1062  del OPTIONS.tempfiles[:]
1063
1064
1065class PasswordManager(object):
1066  def __init__(self):
1067    self.editor = os.getenv("EDITOR", None)
1068    self.pwfile = os.getenv("ANDROID_PW_FILE", None)
1069
1070  def GetPasswords(self, items):
1071    """Get passwords corresponding to each string in 'items',
1072    returning a dict.  (The dict may have keys in addition to the
1073    values in 'items'.)
1074
1075    Uses the passwords in $ANDROID_PW_FILE if available, letting the
1076    user edit that file to add more needed passwords.  If no editor is
1077    available, or $ANDROID_PW_FILE isn't define, prompts the user
1078    interactively in the ordinary way.
1079    """
1080
1081    current = self.ReadFile()
1082
1083    first = True
1084    while True:
1085      missing = []
1086      for i in items:
1087        if i not in current or not current[i]:
1088          missing.append(i)
1089      # Are all the passwords already in the file?
1090      if not missing:
1091        return current
1092
1093      for i in missing:
1094        current[i] = ""
1095
1096      if not first:
1097        print("key file %s still missing some passwords." % (self.pwfile,))
1098        answer = raw_input("try to edit again? [y]> ").strip()
1099        if answer and answer[0] not in 'yY':
1100          raise RuntimeError("key passwords unavailable")
1101      first = False
1102
1103      current = self.UpdateAndReadFile(current)
1104
1105  def PromptResult(self, current): # pylint: disable=no-self-use
1106    """Prompt the user to enter a value (password) for each key in
1107    'current' whose value is fales.  Returns a new dict with all the
1108    values.
1109    """
1110    result = {}
1111    for k, v in sorted(current.iteritems()):
1112      if v:
1113        result[k] = v
1114      else:
1115        while True:
1116          result[k] = getpass.getpass(
1117              "Enter password for %s key> " % k).strip()
1118          if result[k]:
1119            break
1120    return result
1121
1122  def UpdateAndReadFile(self, current):
1123    if not self.editor or not self.pwfile:
1124      return self.PromptResult(current)
1125
1126    f = open(self.pwfile, "w")
1127    os.chmod(self.pwfile, 0o600)
1128    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
1129    f.write("# (Additional spaces are harmless.)\n\n")
1130
1131    first_line = None
1132    sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
1133    for i, (_, k, v) in enumerate(sorted_list):
1134      f.write("[[[  %s  ]]] %s\n" % (v, k))
1135      if not v and first_line is None:
1136        # position cursor on first line with no password.
1137        first_line = i + 4
1138    f.close()
1139
1140    p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
1141    _, _ = p.communicate()
1142
1143    return self.ReadFile()
1144
1145  def ReadFile(self):
1146    result = {}
1147    if self.pwfile is None:
1148      return result
1149    try:
1150      f = open(self.pwfile, "r")
1151      for line in f:
1152        line = line.strip()
1153        if not line or line[0] == '#':
1154          continue
1155        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
1156        if not m:
1157          print("failed to parse password file: ", line)
1158        else:
1159          result[m.group(2)] = m.group(1)
1160      f.close()
1161    except IOError as e:
1162      if e.errno != errno.ENOENT:
1163        print("error reading password file: ", str(e))
1164    return result
1165
1166
1167def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
1168             compress_type=None):
1169  import datetime
1170
1171  # http://b/18015246
1172  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
1173  # for files larger than 2GiB. We can work around this by adjusting their
1174  # limit. Note that `zipfile.writestr()` will not work for strings larger than
1175  # 2GiB. The Python interpreter sometimes rejects strings that large (though
1176  # it isn't clear to me exactly what circumstances cause this).
1177  # `zipfile.write()` must be used directly to work around this.
1178  #
1179  # This mess can be avoided if we port to python3.
1180  saved_zip64_limit = zipfile.ZIP64_LIMIT
1181  zipfile.ZIP64_LIMIT = (1 << 32) - 1
1182
1183  if compress_type is None:
1184    compress_type = zip_file.compression
1185  if arcname is None:
1186    arcname = filename
1187
1188  saved_stat = os.stat(filename)
1189
1190  try:
1191    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
1192    # file to be zipped and reset it when we're done.
1193    os.chmod(filename, perms)
1194
1195    # Use a fixed timestamp so the output is repeatable.
1196    epoch = datetime.datetime.fromtimestamp(0)
1197    timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
1198    os.utime(filename, (timestamp, timestamp))
1199
1200    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
1201  finally:
1202    os.chmod(filename, saved_stat.st_mode)
1203    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
1204    zipfile.ZIP64_LIMIT = saved_zip64_limit
1205
1206
1207def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
1208                compress_type=None):
1209  """Wrap zipfile.writestr() function to work around the zip64 limit.
1210
1211  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
1212  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
1213  when calling crc32(bytes).
1214
1215  But it still works fine to write a shorter string into a large zip file.
1216  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
1217  when we know the string won't be too long.
1218  """
1219
1220  saved_zip64_limit = zipfile.ZIP64_LIMIT
1221  zipfile.ZIP64_LIMIT = (1 << 32) - 1
1222
1223  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
1224    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
1225    zinfo.compress_type = zip_file.compression
1226    if perms is None:
1227      perms = 0o100644
1228  else:
1229    zinfo = zinfo_or_arcname
1230
1231  # If compress_type is given, it overrides the value in zinfo.
1232  if compress_type is not None:
1233    zinfo.compress_type = compress_type
1234
1235  # If perms is given, it has a priority.
1236  if perms is not None:
1237    # If perms doesn't set the file type, mark it as a regular file.
1238    if perms & 0o770000 == 0:
1239      perms |= 0o100000
1240    zinfo.external_attr = perms << 16
1241
1242  # Use a fixed timestamp so the output is repeatable.
1243  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
1244
1245  zip_file.writestr(zinfo, data)
1246  zipfile.ZIP64_LIMIT = saved_zip64_limit
1247
1248
1249def ZipDelete(zip_filename, entries):
1250  """Deletes entries from a ZIP file.
1251
1252  Since deleting entries from a ZIP file is not supported, it shells out to
1253  'zip -d'.
1254
1255  Args:
1256    zip_filename: The name of the ZIP file.
1257    entries: The name of the entry, or the list of names to be deleted.
1258
1259  Raises:
1260    AssertionError: In case of non-zero return from 'zip'.
1261  """
1262  if isinstance(entries, basestring):
1263    entries = [entries]
1264  cmd = ["zip", "-d", zip_filename] + entries
1265  proc = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
1266  stdoutdata, _ = proc.communicate()
1267  assert proc.returncode == 0, "Failed to delete %s:\n%s" % (entries,
1268                                                             stdoutdata)
1269
1270
1271def ZipClose(zip_file):
1272  # http://b/18015246
1273  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
1274  # central directory.
1275  saved_zip64_limit = zipfile.ZIP64_LIMIT
1276  zipfile.ZIP64_LIMIT = (1 << 32) - 1
1277
1278  zip_file.close()
1279
1280  zipfile.ZIP64_LIMIT = saved_zip64_limit
1281
1282
1283class DeviceSpecificParams(object):
1284  module = None
1285  def __init__(self, **kwargs):
1286    """Keyword arguments to the constructor become attributes of this
1287    object, which is passed to all functions in the device-specific
1288    module."""
1289    for k, v in kwargs.iteritems():
1290      setattr(self, k, v)
1291    self.extras = OPTIONS.extras
1292
1293    if self.module is None:
1294      path = OPTIONS.device_specific
1295      if not path:
1296        return
1297      try:
1298        if os.path.isdir(path):
1299          info = imp.find_module("releasetools", [path])
1300        else:
1301          d, f = os.path.split(path)
1302          b, x = os.path.splitext(f)
1303          if x == ".py":
1304            f = b
1305          info = imp.find_module(f, [d])
1306        print("loaded device-specific extensions from", path)
1307        self.module = imp.load_module("device_specific", *info)
1308      except ImportError:
1309        print("unable to load device-specific module; assuming none")
1310
1311  def _DoCall(self, function_name, *args, **kwargs):
1312    """Call the named function in the device-specific module, passing
1313    the given args and kwargs.  The first argument to the call will be
1314    the DeviceSpecific object itself.  If there is no module, or the
1315    module does not define the function, return the value of the
1316    'default' kwarg (which itself defaults to None)."""
1317    if self.module is None or not hasattr(self.module, function_name):
1318      return kwargs.get("default", None)
1319    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
1320
1321  def FullOTA_Assertions(self):
1322    """Called after emitting the block of assertions at the top of a
1323    full OTA package.  Implementations can add whatever additional
1324    assertions they like."""
1325    return self._DoCall("FullOTA_Assertions")
1326
1327  def FullOTA_InstallBegin(self):
1328    """Called at the start of full OTA installation."""
1329    return self._DoCall("FullOTA_InstallBegin")
1330
1331  def FullOTA_InstallEnd(self):
1332    """Called at the end of full OTA installation; typically this is
1333    used to install the image for the device's baseband processor."""
1334    return self._DoCall("FullOTA_InstallEnd")
1335
1336  def IncrementalOTA_Assertions(self):
1337    """Called after emitting the block of assertions at the top of an
1338    incremental OTA package.  Implementations can add whatever
1339    additional assertions they like."""
1340    return self._DoCall("IncrementalOTA_Assertions")
1341
1342  def IncrementalOTA_VerifyBegin(self):
1343    """Called at the start of the verification phase of incremental
1344    OTA installation; additional checks can be placed here to abort
1345    the script before any changes are made."""
1346    return self._DoCall("IncrementalOTA_VerifyBegin")
1347
1348  def IncrementalOTA_VerifyEnd(self):
1349    """Called at the end of the verification phase of incremental OTA
1350    installation; additional checks can be placed here to abort the
1351    script before any changes are made."""
1352    return self._DoCall("IncrementalOTA_VerifyEnd")
1353
1354  def IncrementalOTA_InstallBegin(self):
1355    """Called at the start of incremental OTA installation (after
1356    verification is complete)."""
1357    return self._DoCall("IncrementalOTA_InstallBegin")
1358
1359  def IncrementalOTA_InstallEnd(self):
1360    """Called at the end of incremental OTA installation; typically
1361    this is used to install the image for the device's baseband
1362    processor."""
1363    return self._DoCall("IncrementalOTA_InstallEnd")
1364
1365  def VerifyOTA_Assertions(self):
1366    return self._DoCall("VerifyOTA_Assertions")
1367
1368class File(object):
1369  def __init__(self, name, data, compress_size = None):
1370    self.name = name
1371    self.data = data
1372    self.size = len(data)
1373    self.compress_size = compress_size or self.size
1374    self.sha1 = sha1(data).hexdigest()
1375
1376  @classmethod
1377  def FromLocalFile(cls, name, diskname):
1378    f = open(diskname, "rb")
1379    data = f.read()
1380    f.close()
1381    return File(name, data)
1382
1383  def WriteToTemp(self):
1384    t = tempfile.NamedTemporaryFile()
1385    t.write(self.data)
1386    t.flush()
1387    return t
1388
1389  def WriteToDir(self, d):
1390    with open(os.path.join(d, self.name), "wb") as fp:
1391      fp.write(self.data)
1392
1393  def AddToZip(self, z, compression=None):
1394    ZipWriteStr(z, self.name, self.data, compress_type=compression)
1395
1396DIFF_PROGRAM_BY_EXT = {
1397    ".gz" : "imgdiff",
1398    ".zip" : ["imgdiff", "-z"],
1399    ".jar" : ["imgdiff", "-z"],
1400    ".apk" : ["imgdiff", "-z"],
1401    ".img" : "imgdiff",
1402    }
1403
1404class Difference(object):
1405  def __init__(self, tf, sf, diff_program=None):
1406    self.tf = tf
1407    self.sf = sf
1408    self.patch = None
1409    self.diff_program = diff_program
1410
1411  def ComputePatch(self):
1412    """Compute the patch (as a string of data) needed to turn sf into
1413    tf.  Returns the same tuple as GetPatch()."""
1414
1415    tf = self.tf
1416    sf = self.sf
1417
1418    if self.diff_program:
1419      diff_program = self.diff_program
1420    else:
1421      ext = os.path.splitext(tf.name)[1]
1422      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1423
1424    ttemp = tf.WriteToTemp()
1425    stemp = sf.WriteToTemp()
1426
1427    ext = os.path.splitext(tf.name)[1]
1428
1429    try:
1430      ptemp = tempfile.NamedTemporaryFile()
1431      if isinstance(diff_program, list):
1432        cmd = copy.copy(diff_program)
1433      else:
1434        cmd = [diff_program]
1435      cmd.append(stemp.name)
1436      cmd.append(ttemp.name)
1437      cmd.append(ptemp.name)
1438      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1439      err = []
1440      def run():
1441        _, e = p.communicate()
1442        if e:
1443          err.append(e)
1444      th = threading.Thread(target=run)
1445      th.start()
1446      th.join(timeout=300)   # 5 mins
1447      if th.is_alive():
1448        print("WARNING: diff command timed out")
1449        p.terminate()
1450        th.join(5)
1451        if th.is_alive():
1452          p.kill()
1453          th.join()
1454
1455      if p.returncode != 0:
1456        print("WARNING: failure running %s:\n%s\n" % (
1457            diff_program, "".join(err)))
1458        self.patch = None
1459        return None, None, None
1460      diff = ptemp.read()
1461    finally:
1462      ptemp.close()
1463      stemp.close()
1464      ttemp.close()
1465
1466    self.patch = diff
1467    return self.tf, self.sf, self.patch
1468
1469
1470  def GetPatch(self):
1471    """Return a tuple (target_file, source_file, patch_data).
1472    patch_data may be None if ComputePatch hasn't been called, or if
1473    computing the patch failed."""
1474    return self.tf, self.sf, self.patch
1475
1476
1477def ComputeDifferences(diffs):
1478  """Call ComputePatch on all the Difference objects in 'diffs'."""
1479  print(len(diffs), "diffs to compute")
1480
1481  # Do the largest files first, to try and reduce the long-pole effect.
1482  by_size = [(i.tf.size, i) for i in diffs]
1483  by_size.sort(reverse=True)
1484  by_size = [i[1] for i in by_size]
1485
1486  lock = threading.Lock()
1487  diff_iter = iter(by_size)   # accessed under lock
1488
1489  def worker():
1490    try:
1491      lock.acquire()
1492      for d in diff_iter:
1493        lock.release()
1494        start = time.time()
1495        d.ComputePatch()
1496        dur = time.time() - start
1497        lock.acquire()
1498
1499        tf, sf, patch = d.GetPatch()
1500        if sf.name == tf.name:
1501          name = tf.name
1502        else:
1503          name = "%s (%s)" % (tf.name, sf.name)
1504        if patch is None:
1505          print("patching failed!                                  %s" % (name,))
1506        else:
1507          print("%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1508              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name))
1509      lock.release()
1510    except Exception as e:
1511      print(e)
1512      raise
1513
1514  # start worker threads; wait for them all to finish.
1515  threads = [threading.Thread(target=worker)
1516             for i in range(OPTIONS.worker_threads)]
1517  for th in threads:
1518    th.start()
1519  while threads:
1520    threads.pop().join()
1521
1522
1523class BlockDifference(object):
1524  def __init__(self, partition, tgt, src=None, check_first_block=False,
1525               version=None, disable_imgdiff=False):
1526    self.tgt = tgt
1527    self.src = src
1528    self.partition = partition
1529    self.check_first_block = check_first_block
1530    self.disable_imgdiff = disable_imgdiff
1531
1532    if version is None:
1533      version = max(
1534          int(i) for i in
1535          OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1536    assert version >= 3
1537    self.version = version
1538
1539    b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1540                                    version=self.version,
1541                                    disable_imgdiff=self.disable_imgdiff)
1542    self.path = os.path.join(MakeTempDir(), partition)
1543    b.Compute(self.path)
1544    self._required_cache = b.max_stashed_size
1545    self.touched_src_ranges = b.touched_src_ranges
1546    self.touched_src_sha1 = b.touched_src_sha1
1547
1548    if src is None:
1549      _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1550    else:
1551      _, self.device = GetTypeAndDevice("/" + partition,
1552                                        OPTIONS.source_info_dict)
1553
1554  @property
1555  def required_cache(self):
1556    return self._required_cache
1557
1558  def WriteScript(self, script, output_zip, progress=None):
1559    if not self.src:
1560      # write the output unconditionally
1561      script.Print("Patching %s image unconditionally..." % (self.partition,))
1562    else:
1563      script.Print("Patching %s image after verification." % (self.partition,))
1564
1565    if progress:
1566      script.ShowProgress(progress, 0)
1567    self._WriteUpdate(script, output_zip)
1568    if OPTIONS.verify:
1569      self._WritePostInstallVerifyScript(script)
1570
1571  def WriteStrictVerifyScript(self, script):
1572    """Verify all the blocks in the care_map, including clobbered blocks.
1573
1574    This differs from the WriteVerifyScript() function: a) it prints different
1575    error messages; b) it doesn't allow half-way updated images to pass the
1576    verification."""
1577
1578    partition = self.partition
1579    script.Print("Verifying %s..." % (partition,))
1580    ranges = self.tgt.care_map
1581    ranges_str = ranges.to_string_raw()
1582    script.AppendExtra('range_sha1("%s", "%s") == "%s" && '
1583                       'ui_print("    Verified.") || '
1584                       'ui_print("\\"%s\\" has unexpected contents.");' % (
1585                       self.device, ranges_str,
1586                       self.tgt.TotalSha1(include_clobbered_blocks=True),
1587                       self.device))
1588    script.AppendExtra("")
1589
1590  def WriteVerifyScript(self, script, touched_blocks_only=False):
1591    partition = self.partition
1592
1593    # full OTA
1594    if not self.src:
1595      script.Print("Image %s will be patched unconditionally." % (partition,))
1596
1597    # incremental OTA
1598    else:
1599      if touched_blocks_only:
1600        ranges = self.touched_src_ranges
1601        expected_sha1 = self.touched_src_sha1
1602      else:
1603        ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1604        expected_sha1 = self.src.TotalSha1()
1605
1606      # No blocks to be checked, skipping.
1607      if not ranges:
1608        return
1609
1610      ranges_str = ranges.to_string_raw()
1611      script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1612                          'block_image_verify("%s", '
1613                          'package_extract_file("%s.transfer.list"), '
1614                          '"%s.new.dat", "%s.patch.dat")) then') % (
1615                          self.device, ranges_str, expected_sha1,
1616                          self.device, partition, partition, partition))
1617      script.Print('Verified %s image...' % (partition,))
1618      script.AppendExtra('else')
1619
1620      if self.version >= 4:
1621
1622        # Bug: 21124327
1623        # When generating incrementals for the system and vendor partitions in
1624        # version 4 or newer, explicitly check the first block (which contains
1625        # the superblock) of the partition to see if it's what we expect. If
1626        # this check fails, give an explicit log message about the partition
1627        # having been remounted R/W (the most likely explanation).
1628        if self.check_first_block:
1629          script.AppendExtra('check_first_block("%s");' % (self.device,))
1630
1631        # If version >= 4, try block recovery before abort update
1632        if partition == "system":
1633          code = ErrorCode.SYSTEM_RECOVER_FAILURE
1634        else:
1635          code = ErrorCode.VENDOR_RECOVER_FAILURE
1636        script.AppendExtra((
1637            'ifelse (block_image_recover("{device}", "{ranges}") && '
1638            'block_image_verify("{device}", '
1639            'package_extract_file("{partition}.transfer.list"), '
1640            '"{partition}.new.dat", "{partition}.patch.dat"), '
1641            'ui_print("{partition} recovered successfully."), '
1642            'abort("E{code}: {partition} partition fails to recover"));\n'
1643            'endif;').format(device=self.device, ranges=ranges_str,
1644                             partition=partition, code=code))
1645
1646      # Abort the OTA update. Note that the incremental OTA cannot be applied
1647      # even if it may match the checksum of the target partition.
1648      # a) If version < 3, operations like move and erase will make changes
1649      #    unconditionally and damage the partition.
1650      # b) If version >= 3, it won't even reach here.
1651      else:
1652        if partition == "system":
1653          code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
1654        else:
1655          code = ErrorCode.VENDOR_VERIFICATION_FAILURE
1656        script.AppendExtra((
1657            'abort("E%d: %s partition has unexpected contents");\n'
1658            'endif;') % (code, partition))
1659
1660  def _WritePostInstallVerifyScript(self, script):
1661    partition = self.partition
1662    script.Print('Verifying the updated %s image...' % (partition,))
1663    # Unlike pre-install verification, clobbered_blocks should not be ignored.
1664    ranges = self.tgt.care_map
1665    ranges_str = ranges.to_string_raw()
1666    script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1667                       self.device, ranges_str,
1668                       self.tgt.TotalSha1(include_clobbered_blocks=True)))
1669
1670    # Bug: 20881595
1671    # Verify that extended blocks are really zeroed out.
1672    if self.tgt.extended:
1673      ranges_str = self.tgt.extended.to_string_raw()
1674      script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1675                         self.device, ranges_str,
1676                         self._HashZeroBlocks(self.tgt.extended.size())))
1677      script.Print('Verified the updated %s image.' % (partition,))
1678      if partition == "system":
1679        code = ErrorCode.SYSTEM_NONZERO_CONTENTS
1680      else:
1681        code = ErrorCode.VENDOR_NONZERO_CONTENTS
1682      script.AppendExtra(
1683          'else\n'
1684          '  abort("E%d: %s partition has unexpected non-zero contents after '
1685          'OTA update");\n'
1686          'endif;' % (code, partition))
1687    else:
1688      script.Print('Verified the updated %s image.' % (partition,))
1689
1690    if partition == "system":
1691      code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
1692    else:
1693      code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
1694
1695    script.AppendExtra(
1696        'else\n'
1697        '  abort("E%d: %s partition has unexpected contents after OTA '
1698        'update");\n'
1699        'endif;' % (code, partition))
1700
1701  def _WriteUpdate(self, script, output_zip):
1702    ZipWrite(output_zip,
1703             '{}.transfer.list'.format(self.path),
1704             '{}.transfer.list'.format(self.partition))
1705
1706    # For full OTA, compress the new.dat with brotli with quality 6 to reduce its size. Quailty 9
1707    # almost triples the compression time but doesn't further reduce the size too much.
1708    # For a typical 1.8G system.new.dat
1709    #                       zip  | brotli(quality 6)  | brotli(quality 9)
1710    #   compressed_size:    942M | 869M (~8% reduced) | 854M
1711    #   compression_time:   75s  | 265s               | 719s
1712    #   decompression_time: 15s  | 25s                | 25s
1713
1714    if not self.src:
1715      brotli_cmd = ['brotli', '--quality=6',
1716                    '--output={}.new.dat.br'.format(self.path),
1717                    '{}.new.dat'.format(self.path)]
1718      print("Compressing {}.new.dat with brotli".format(self.partition))
1719      p = Run(brotli_cmd, stdout=subprocess.PIPE)
1720      p.communicate()
1721      assert p.returncode == 0,\
1722          'compression of {}.new.dat failed'.format(self.partition)
1723
1724      new_data_name = '{}.new.dat.br'.format(self.partition)
1725      ZipWrite(output_zip,
1726               '{}.new.dat.br'.format(self.path),
1727               new_data_name,
1728               compress_type=zipfile.ZIP_STORED)
1729    else:
1730      new_data_name = '{}.new.dat'.format(self.partition)
1731      ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name)
1732
1733    ZipWrite(output_zip,
1734             '{}.patch.dat'.format(self.path),
1735             '{}.patch.dat'.format(self.partition),
1736             compress_type=zipfile.ZIP_STORED)
1737
1738    if self.partition == "system":
1739      code = ErrorCode.SYSTEM_UPDATE_FAILURE
1740    else:
1741      code = ErrorCode.VENDOR_UPDATE_FAILURE
1742
1743    call = ('block_image_update("{device}", '
1744            'package_extract_file("{partition}.transfer.list"), '
1745            '"{new_data_name}", "{partition}.patch.dat") ||\n'
1746            '  abort("E{code}: Failed to update {partition} image.");'.format(
1747                device=self.device, partition=self.partition,
1748                new_data_name=new_data_name, code=code))
1749    script.AppendExtra(script.WordWrap(call))
1750
1751  def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1752    data = source.ReadRangeSet(ranges)
1753    ctx = sha1()
1754
1755    for p in data:
1756      ctx.update(p)
1757
1758    return ctx.hexdigest()
1759
1760  def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1761    """Return the hash value for all zero blocks."""
1762    zero_block = '\x00' * 4096
1763    ctx = sha1()
1764    for _ in range(num_blocks):
1765      ctx.update(zero_block)
1766
1767    return ctx.hexdigest()
1768
1769
1770DataImage = blockimgdiff.DataImage
1771
1772# map recovery.fstab's fs_types to mount/format "partition types"
1773PARTITION_TYPES = {
1774    "ext4": "EMMC",
1775    "emmc": "EMMC",
1776    "f2fs": "EMMC",
1777    "squashfs": "EMMC"
1778}
1779
1780def GetTypeAndDevice(mount_point, info):
1781  fstab = info["fstab"]
1782  if fstab:
1783    return (PARTITION_TYPES[fstab[mount_point].fs_type],
1784            fstab[mount_point].device)
1785  else:
1786    raise KeyError
1787
1788
1789def ParseCertificate(data):
1790  """Parses and converts a PEM-encoded certificate into DER-encoded.
1791
1792  This gives the same result as `openssl x509 -in <filename> -outform DER`.
1793
1794  Returns:
1795    The decoded certificate string.
1796  """
1797  cert_buffer = []
1798  save = False
1799  for line in data.split("\n"):
1800    if "--END CERTIFICATE--" in line:
1801      break
1802    if save:
1803      cert_buffer.append(line)
1804    if "--BEGIN CERTIFICATE--" in line:
1805      save = True
1806  cert = "".join(cert_buffer).decode('base64')
1807  return cert
1808
1809
1810def ExtractPublicKey(cert):
1811  """Extracts the public key (PEM-encoded) from the given certificate file.
1812
1813  Args:
1814    cert: The certificate filename.
1815
1816  Returns:
1817    The public key string.
1818
1819  Raises:
1820    AssertionError: On non-zero return from 'openssl'.
1821  """
1822  # The behavior with '-out' is different between openssl 1.1 and openssl 1.0.
1823  # While openssl 1.1 writes the key into the given filename followed by '-out',
1824  # openssl 1.0 (both of 1.0.1 and 1.0.2) doesn't. So we collect the output from
1825  # stdout instead.
1826  cmd = ['openssl', 'x509', '-pubkey', '-noout', '-in', cert]
1827  proc = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1828  pubkey, stderrdata = proc.communicate()
1829  assert proc.returncode == 0, \
1830      'Failed to dump public key from certificate: %s\n%s' % (cert, stderrdata)
1831  return pubkey
1832
1833
1834def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1835                      info_dict=None):
1836  """Generates the recovery-from-boot patch and writes the script to output.
1837
1838  Most of the space in the boot and recovery images is just the kernel, which is
1839  identical for the two, so the resulting patch should be efficient. Add it to
1840  the output zip, along with a shell script that is run from init.rc on first
1841  boot to actually do the patching and install the new recovery image.
1842
1843  Args:
1844    input_dir: The top-level input directory of the target-files.zip.
1845    output_sink: The callback function that writes the result.
1846    recovery_img: File object for the recovery image.
1847    boot_img: File objects for the boot image.
1848    info_dict: A dict returned by common.LoadInfoDict() on the input
1849        target_files. Will use OPTIONS.info_dict if None has been given.
1850  """
1851  if info_dict is None:
1852    info_dict = OPTIONS.info_dict
1853
1854  full_recovery_image = info_dict.get("full_recovery_image") == "true"
1855
1856  if full_recovery_image:
1857    output_sink("etc/recovery.img", recovery_img.data)
1858
1859  else:
1860    system_root_image = info_dict.get("system_root_image") == "true"
1861    path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1862    # With system-root-image, boot and recovery images will have mismatching
1863    # entries (only recovery has the ramdisk entry) (Bug: 72731506). Use bsdiff
1864    # to handle such a case.
1865    if system_root_image:
1866      diff_program = ["bsdiff"]
1867      bonus_args = ""
1868      assert not os.path.exists(path)
1869    else:
1870      diff_program = ["imgdiff"]
1871      if os.path.exists(path):
1872        diff_program.append("-b")
1873        diff_program.append(path)
1874        bonus_args = "-b /system/etc/recovery-resource.dat"
1875      else:
1876        bonus_args = ""
1877
1878    d = Difference(recovery_img, boot_img, diff_program=diff_program)
1879    _, _, patch = d.ComputePatch()
1880    output_sink("recovery-from-boot.p", patch)
1881
1882  try:
1883    # The following GetTypeAndDevice()s need to use the path in the target
1884    # info_dict instead of source_info_dict.
1885    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1886    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1887  except KeyError:
1888    return
1889
1890  if full_recovery_image:
1891    sh = """#!/system/bin/sh
1892if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
1893  applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1894else
1895  log -t recovery "Recovery image already installed"
1896fi
1897""" % {'type': recovery_type,
1898       'device': recovery_device,
1899       'sha1': recovery_img.sha1,
1900       'size': recovery_img.size}
1901  else:
1902    sh = """#!/system/bin/sh
1903if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1904  applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1905else
1906  log -t recovery "Recovery image already installed"
1907fi
1908""" % {'boot_size': boot_img.size,
1909       'boot_sha1': boot_img.sha1,
1910       'recovery_size': recovery_img.size,
1911       'recovery_sha1': recovery_img.sha1,
1912       'boot_type': boot_type,
1913       'boot_device': boot_device,
1914       'recovery_type': recovery_type,
1915       'recovery_device': recovery_device,
1916       'bonus_args': bonus_args}
1917
1918  # The install script location moved from /system/etc to /system/bin
1919  # in the L release.
1920  sh_location = "bin/install-recovery.sh"
1921
1922  print("putting script in", sh_location)
1923
1924  output_sink(sh_location, sh)
1925