1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import copy
16import errno
17import getopt
18import getpass
19import imp
20import os
21import platform
22import re
23import shlex
24import shutil
25import subprocess
26import sys
27import tempfile
28import threading
29import time
30import zipfile
31
32import blockimgdiff
33
34from hashlib import sha1 as sha1
35
36
37class Options(object):
38  def __init__(self):
39    platform_search_path = {
40        "linux2": "out/host/linux-x86",
41        "darwin": "out/host/darwin-x86",
42    }
43
44    self.search_path = platform_search_path.get(sys.platform, None)
45    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
46    self.signapk_shared_library_path = "lib64"   # Relative to search_path
47    self.extra_signapk_args = []
48    self.java_path = "java"  # Use the one on the path by default.
49    self.java_args = "-Xmx2048m" # JVM Args
50    self.public_key_suffix = ".x509.pem"
51    self.private_key_suffix = ".pk8"
52    # use otatools built boot_signer by default
53    self.boot_signer_path = "boot_signer"
54    self.boot_signer_args = []
55    self.verity_signer_path = None
56    self.verity_signer_args = []
57    self.verbose = False
58    self.tempfiles = []
59    self.device_specific = None
60    self.extras = {}
61    self.info_dict = None
62    self.source_info_dict = None
63    self.target_info_dict = None
64    self.worker_threads = None
65    # Stash size cannot exceed cache_size * threshold.
66    self.cache_size = None
67    self.stash_threshold = 0.8
68
69
70OPTIONS = Options()
71
72
73# Values for "certificate" in apkcerts that mean special things.
74SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
75
76class ErrorCode(object):
77  """Define error_codes for failures that happen during the actual
78  update package installation.
79
80  Error codes 0-999 are reserved for failures before the package
81  installation (i.e. low battery, package verification failure).
82  Detailed code in 'bootable/recovery/error_code.h' """
83
84  SYSTEM_VERIFICATION_FAILURE = 1000
85  SYSTEM_UPDATE_FAILURE = 1001
86  SYSTEM_UNEXPECTED_CONTENTS = 1002
87  SYSTEM_NONZERO_CONTENTS = 1003
88  SYSTEM_RECOVER_FAILURE = 1004
89  VENDOR_VERIFICATION_FAILURE = 2000
90  VENDOR_UPDATE_FAILURE = 2001
91  VENDOR_UNEXPECTED_CONTENTS = 2002
92  VENDOR_NONZERO_CONTENTS = 2003
93  VENDOR_RECOVER_FAILURE = 2004
94  OEM_PROP_MISMATCH = 3000
95  FINGERPRINT_MISMATCH = 3001
96  THUMBPRINT_MISMATCH = 3002
97  OLDER_BUILD = 3003
98  DEVICE_MISMATCH = 3004
99  BAD_PATCH_FILE = 3005
100  INSUFFICIENT_CACHE_SPACE = 3006
101  TUNE_PARTITION_FAILURE = 3007
102  APPLY_PATCH_FAILURE = 3008
103
104class ExternalError(RuntimeError):
105  pass
106
107
108def Run(args, **kwargs):
109  """Create and return a subprocess.Popen object, printing the command
110  line on the terminal if -v was specified."""
111  if OPTIONS.verbose:
112    print "  running: ", " ".join(args)
113  return subprocess.Popen(args, **kwargs)
114
115
116def CloseInheritedPipes():
117  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
118  before doing other work."""
119  if platform.system() != "Darwin":
120    return
121  for d in range(3, 1025):
122    try:
123      stat = os.fstat(d)
124      if stat is not None:
125        pipebit = stat[0] & 0x1000
126        if pipebit != 0:
127          os.close(d)
128    except OSError:
129      pass
130
131
132def LoadInfoDict(input_file, input_dir=None):
133  """Read and parse the META/misc_info.txt key/value pairs from the
134  input target files and return a dict."""
135
136  def read_helper(fn):
137    if isinstance(input_file, zipfile.ZipFile):
138      return input_file.read(fn)
139    else:
140      path = os.path.join(input_file, *fn.split("/"))
141      try:
142        with open(path) as f:
143          return f.read()
144      except IOError as e:
145        if e.errno == errno.ENOENT:
146          raise KeyError(fn)
147  d = {}
148  try:
149    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
150  except KeyError:
151    # ok if misc_info.txt doesn't exist
152    pass
153
154  # backwards compatibility: These values used to be in their own
155  # files.  Look for them, in case we're processing an old
156  # target_files zip.
157
158  if "mkyaffs2_extra_flags" not in d:
159    try:
160      d["mkyaffs2_extra_flags"] = read_helper(
161          "META/mkyaffs2-extra-flags.txt").strip()
162    except KeyError:
163      # ok if flags don't exist
164      pass
165
166  if "recovery_api_version" not in d:
167    try:
168      d["recovery_api_version"] = read_helper(
169          "META/recovery-api-version.txt").strip()
170    except KeyError:
171      raise ValueError("can't find recovery API version in input target-files")
172
173  if "tool_extensions" not in d:
174    try:
175      d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
176    except KeyError:
177      # ok if extensions don't exist
178      pass
179
180  if "fstab_version" not in d:
181    d["fstab_version"] = "1"
182
183  # A few properties are stored as links to the files in the out/ directory.
184  # It works fine with the build system. However, they are no longer available
185  # when (re)generating from target_files zip. If input_dir is not None, we
186  # are doing repacking. Redirect those properties to the actual files in the
187  # unzipped directory.
188  if input_dir is not None:
189    # We carry a copy of file_contexts.bin under META/. If not available,
190    # search BOOT/RAMDISK/. Note that sometimes we may need a different file
191    # to build images than the one running on device, such as when enabling
192    # system_root_image. In that case, we must have the one for image
193    # generation copied to META/.
194    fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts"))
195    fc_config = os.path.join(input_dir, "META", fc_basename)
196    if d.get("system_root_image") == "true":
197      assert os.path.exists(fc_config)
198    if not os.path.exists(fc_config):
199      fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", fc_basename)
200      if not os.path.exists(fc_config):
201        fc_config = None
202
203    if fc_config:
204      d["selinux_fc"] = fc_config
205
206    # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
207    if d.get("system_root_image") == "true":
208      d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
209      d["ramdisk_fs_config"] = os.path.join(
210          input_dir, "META", "root_filesystem_config.txt")
211
212    # Redirect {system,vendor}_base_fs_file.
213    if "system_base_fs_file" in d:
214      basename = os.path.basename(d["system_base_fs_file"])
215      system_base_fs_file = os.path.join(input_dir, "META", basename)
216      if os.path.exists(system_base_fs_file):
217        d["system_base_fs_file"] = system_base_fs_file
218      else:
219        print "Warning: failed to find system base fs file: %s" % (
220            system_base_fs_file,)
221        del d["system_base_fs_file"]
222
223    if "vendor_base_fs_file" in d:
224      basename = os.path.basename(d["vendor_base_fs_file"])
225      vendor_base_fs_file = os.path.join(input_dir, "META", basename)
226      if os.path.exists(vendor_base_fs_file):
227        d["vendor_base_fs_file"] = vendor_base_fs_file
228      else:
229        print "Warning: failed to find vendor base fs file: %s" % (
230            vendor_base_fs_file,)
231        del d["vendor_base_fs_file"]
232
233  try:
234    data = read_helper("META/imagesizes.txt")
235    for line in data.split("\n"):
236      if not line:
237        continue
238      name, value = line.split(" ", 1)
239      if not value:
240        continue
241      if name == "blocksize":
242        d[name] = value
243      else:
244        d[name + "_size"] = value
245  except KeyError:
246    pass
247
248  def makeint(key):
249    if key in d:
250      d[key] = int(d[key], 0)
251
252  makeint("recovery_api_version")
253  makeint("blocksize")
254  makeint("system_size")
255  makeint("vendor_size")
256  makeint("userdata_size")
257  makeint("cache_size")
258  makeint("recovery_size")
259  makeint("boot_size")
260  makeint("fstab_version")
261
262  if d.get("no_recovery", False) == "true":
263    d["fstab"] = None
264  else:
265    d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
266                                   d.get("system_root_image", False))
267  d["build.prop"] = LoadBuildProp(read_helper)
268  return d
269
270def LoadBuildProp(read_helper):
271  try:
272    data = read_helper("SYSTEM/build.prop")
273  except KeyError:
274    print "Warning: could not find SYSTEM/build.prop in %s" % zip
275    data = ""
276  return LoadDictionaryFromLines(data.split("\n"))
277
278def LoadDictionaryFromLines(lines):
279  d = {}
280  for line in lines:
281    line = line.strip()
282    if not line or line.startswith("#"):
283      continue
284    if "=" in line:
285      name, value = line.split("=", 1)
286      d[name] = value
287  return d
288
289def LoadRecoveryFSTab(read_helper, fstab_version, system_root_image=False):
290  class Partition(object):
291    def __init__(self, mount_point, fs_type, device, length, device2, context):
292      self.mount_point = mount_point
293      self.fs_type = fs_type
294      self.device = device
295      self.length = length
296      self.device2 = device2
297      self.context = context
298
299  try:
300    data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
301  except KeyError:
302    print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab"
303    data = ""
304
305  if fstab_version == 1:
306    d = {}
307    for line in data.split("\n"):
308      line = line.strip()
309      if not line or line.startswith("#"):
310        continue
311      pieces = line.split()
312      if not 3 <= len(pieces) <= 4:
313        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
314      options = None
315      if len(pieces) >= 4:
316        if pieces[3].startswith("/"):
317          device2 = pieces[3]
318          if len(pieces) >= 5:
319            options = pieces[4]
320        else:
321          device2 = None
322          options = pieces[3]
323      else:
324        device2 = None
325
326      mount_point = pieces[0]
327      length = 0
328      if options:
329        options = options.split(",")
330        for i in options:
331          if i.startswith("length="):
332            length = int(i[7:])
333          else:
334            print "%s: unknown option \"%s\"" % (mount_point, i)
335
336      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
337                                 device=pieces[2], length=length,
338                                 device2=device2)
339
340  elif fstab_version == 2:
341    d = {}
342    for line in data.split("\n"):
343      line = line.strip()
344      if not line or line.startswith("#"):
345        continue
346      # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
347      pieces = line.split()
348      if len(pieces) != 5:
349        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
350
351      # Ignore entries that are managed by vold
352      options = pieces[4]
353      if "voldmanaged=" in options:
354        continue
355
356      # It's a good line, parse it
357      length = 0
358      options = options.split(",")
359      for i in options:
360        if i.startswith("length="):
361          length = int(i[7:])
362        else:
363          # Ignore all unknown options in the unified fstab
364          continue
365
366      mount_flags = pieces[3]
367      # Honor the SELinux context if present.
368      context = None
369      for i in mount_flags.split(","):
370        if i.startswith("context="):
371          context = i
372
373      mount_point = pieces[1]
374      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
375                                 device=pieces[0], length=length,
376                                 device2=None, context=context)
377
378  else:
379    raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
380
381  # / is used for the system mount point when the root directory is included in
382  # system. Other areas assume system is always at "/system" so point /system
383  # at /.
384  if system_root_image:
385    assert not d.has_key("/system") and d.has_key("/")
386    d["/system"] = d["/"]
387  return d
388
389
390def DumpInfoDict(d):
391  for k, v in sorted(d.items()):
392    print "%-25s = (%s) %s" % (k, type(v).__name__, v)
393
394
395def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
396                        has_ramdisk=False):
397  """Build a bootable image from the specified sourcedir.
398
399  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
400  'sourcedir'), and turn them into a boot image.  Return the image data, or
401  None if sourcedir does not appear to contains files for building the
402  requested image."""
403
404  def make_ramdisk():
405    ramdisk_img = tempfile.NamedTemporaryFile()
406
407    if os.access(fs_config_file, os.F_OK):
408      cmd = ["mkbootfs", "-f", fs_config_file,
409             os.path.join(sourcedir, "RAMDISK")]
410    else:
411      cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
412    p1 = Run(cmd, stdout=subprocess.PIPE)
413    p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
414
415    p2.wait()
416    p1.wait()
417    assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
418    assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
419
420    return ramdisk_img
421
422  if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
423    return None
424
425  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
426    return None
427
428  if info_dict is None:
429    info_dict = OPTIONS.info_dict
430
431  img = tempfile.NamedTemporaryFile()
432
433  if has_ramdisk:
434    ramdisk_img = make_ramdisk()
435
436  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
437  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
438
439  cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
440
441  fn = os.path.join(sourcedir, "second")
442  if os.access(fn, os.F_OK):
443    cmd.append("--second")
444    cmd.append(fn)
445
446  fn = os.path.join(sourcedir, "cmdline")
447  if os.access(fn, os.F_OK):
448    cmd.append("--cmdline")
449    cmd.append(open(fn).read().rstrip("\n"))
450
451  fn = os.path.join(sourcedir, "base")
452  if os.access(fn, os.F_OK):
453    cmd.append("--base")
454    cmd.append(open(fn).read().rstrip("\n"))
455
456  fn = os.path.join(sourcedir, "pagesize")
457  if os.access(fn, os.F_OK):
458    cmd.append("--pagesize")
459    cmd.append(open(fn).read().rstrip("\n"))
460
461  args = info_dict.get("mkbootimg_args", None)
462  if args and args.strip():
463    cmd.extend(shlex.split(args))
464
465  args = info_dict.get("mkbootimg_version_args", None)
466  if args and args.strip():
467    cmd.extend(shlex.split(args))
468
469  if has_ramdisk:
470    cmd.extend(["--ramdisk", ramdisk_img.name])
471
472  img_unsigned = None
473  if info_dict.get("vboot", None):
474    img_unsigned = tempfile.NamedTemporaryFile()
475    cmd.extend(["--output", img_unsigned.name])
476  else:
477    cmd.extend(["--output", img.name])
478
479  p = Run(cmd, stdout=subprocess.PIPE)
480  p.communicate()
481  assert p.returncode == 0, "mkbootimg of %s image failed" % (
482      os.path.basename(sourcedir),)
483
484  if (info_dict.get("boot_signer", None) == "true" and
485      info_dict.get("verity_key", None)):
486    path = "/" + os.path.basename(sourcedir).lower()
487    cmd = [OPTIONS.boot_signer_path]
488    cmd.extend(OPTIONS.boot_signer_args)
489    cmd.extend([path, img.name,
490                info_dict["verity_key"] + ".pk8",
491                info_dict["verity_key"] + ".x509.pem", img.name])
492    p = Run(cmd, stdout=subprocess.PIPE)
493    p.communicate()
494    assert p.returncode == 0, "boot_signer of %s image failed" % path
495
496  # Sign the image if vboot is non-empty.
497  elif info_dict.get("vboot", None):
498    path = "/" + os.path.basename(sourcedir).lower()
499    img_keyblock = tempfile.NamedTemporaryFile()
500    cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
501           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
502           info_dict["vboot_key"] + ".vbprivk",
503           info_dict["vboot_subkey"] + ".vbprivk",
504           img_keyblock.name,
505           img.name]
506    p = Run(cmd, stdout=subprocess.PIPE)
507    p.communicate()
508    assert p.returncode == 0, "vboot_signer of %s image failed" % path
509
510    # Clean up the temp files.
511    img_unsigned.close()
512    img_keyblock.close()
513
514  img.seek(os.SEEK_SET, 0)
515  data = img.read()
516
517  if has_ramdisk:
518    ramdisk_img.close()
519  img.close()
520
521  return data
522
523
524def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
525                     info_dict=None):
526  """Return a File object with the desired bootable image.
527
528  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
529  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
530  the source files in 'unpack_dir'/'tree_subdir'."""
531
532  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
533  if os.path.exists(prebuilt_path):
534    print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
535    return File.FromLocalFile(name, prebuilt_path)
536
537  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
538  if os.path.exists(prebuilt_path):
539    print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
540    return File.FromLocalFile(name, prebuilt_path)
541
542  print "building image from target_files %s..." % (tree_subdir,)
543
544  if info_dict is None:
545    info_dict = OPTIONS.info_dict
546
547  # With system_root_image == "true", we don't pack ramdisk into the boot image.
548  # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
549  # for recovery.
550  has_ramdisk = (info_dict.get("system_root_image") != "true" or
551                 prebuilt_name != "boot.img" or
552                 info_dict.get("recovery_as_boot") == "true")
553
554  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
555  data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
556                             os.path.join(unpack_dir, fs_config),
557                             info_dict, has_ramdisk)
558  if data:
559    return File(name, data)
560  return None
561
562
563def UnzipTemp(filename, pattern=None):
564  """Unzip the given archive into a temporary directory and return the name.
565
566  If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
567  temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
568
569  Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
570  main file), open for reading.
571  """
572
573  tmp = tempfile.mkdtemp(prefix="targetfiles-")
574  OPTIONS.tempfiles.append(tmp)
575
576  def unzip_to_dir(filename, dirname):
577    cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
578    if pattern is not None:
579      cmd.append(pattern)
580    p = Run(cmd, stdout=subprocess.PIPE)
581    p.communicate()
582    if p.returncode != 0:
583      raise ExternalError("failed to unzip input target-files \"%s\"" %
584                          (filename,))
585
586  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
587  if m:
588    unzip_to_dir(m.group(1), tmp)
589    unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
590    filename = m.group(1)
591  else:
592    unzip_to_dir(filename, tmp)
593
594  return tmp, zipfile.ZipFile(filename, "r")
595
596
597def GetKeyPasswords(keylist):
598  """Given a list of keys, prompt the user to enter passwords for
599  those which require them.  Return a {key: password} dict.  password
600  will be None if the key has no password."""
601
602  no_passwords = []
603  need_passwords = []
604  key_passwords = {}
605  devnull = open("/dev/null", "w+b")
606  for k in sorted(keylist):
607    # We don't need a password for things that aren't really keys.
608    if k in SPECIAL_CERT_STRINGS:
609      no_passwords.append(k)
610      continue
611
612    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
613             "-inform", "DER", "-nocrypt"],
614            stdin=devnull.fileno(),
615            stdout=devnull.fileno(),
616            stderr=subprocess.STDOUT)
617    p.communicate()
618    if p.returncode == 0:
619      # Definitely an unencrypted key.
620      no_passwords.append(k)
621    else:
622      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
623               "-inform", "DER", "-passin", "pass:"],
624              stdin=devnull.fileno(),
625              stdout=devnull.fileno(),
626              stderr=subprocess.PIPE)
627      _, stderr = p.communicate()
628      if p.returncode == 0:
629        # Encrypted key with empty string as password.
630        key_passwords[k] = ''
631      elif stderr.startswith('Error decrypting key'):
632        # Definitely encrypted key.
633        # It would have said "Error reading key" if it didn't parse correctly.
634        need_passwords.append(k)
635      else:
636        # Potentially, a type of key that openssl doesn't understand.
637        # We'll let the routines in signapk.jar handle it.
638        no_passwords.append(k)
639  devnull.close()
640
641  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
642  key_passwords.update(dict.fromkeys(no_passwords, None))
643  return key_passwords
644
645
646def GetMinSdkVersion(apk_name):
647  """Get the minSdkVersion delared in the APK. This can be both a decimal number
648  (API Level) or a codename.
649  """
650
651  p = Run(["aapt", "dump", "badging", apk_name], stdout=subprocess.PIPE)
652  output, err = p.communicate()
653  if err:
654    raise ExternalError("Failed to obtain minSdkVersion: aapt return code %s"
655        % (p.returncode,))
656
657  for line in output.split("\n"):
658    # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'
659    m = re.match(r'sdkVersion:\'([^\']*)\'', line)
660    if m:
661      return m.group(1)
662  raise ExternalError("No minSdkVersion returned by aapt")
663
664
665def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
666  """Get the minSdkVersion declared in the APK as a number (API Level). If
667  minSdkVersion is set to a codename, it is translated to a number using the
668  provided map.
669  """
670
671  version = GetMinSdkVersion(apk_name)
672  try:
673    return int(version)
674  except ValueError:
675    # Not a decimal number. Codename?
676    if version in codename_to_api_level_map:
677      return codename_to_api_level_map[version]
678    else:
679      raise ExternalError("Unknown minSdkVersion: '%s'. Known codenames: %s"
680                          % (version, codename_to_api_level_map))
681
682
683def SignFile(input_name, output_name, key, password, min_api_level=None,
684    codename_to_api_level_map=dict(),
685    whole_file=False):
686  """Sign the input_name zip/jar/apk, producing output_name.  Use the
687  given key and password (the latter may be None if the key does not
688  have a password.
689
690  If whole_file is true, use the "-w" option to SignApk to embed a
691  signature that covers the whole file in the archive comment of the
692  zip file.
693
694  min_api_level is the API Level (int) of the oldest platform this file may end
695  up on. If not specified for an APK, the API Level is obtained by interpreting
696  the minSdkVersion attribute of the APK's AndroidManifest.xml.
697
698  codename_to_api_level_map is needed to translate the codename which may be
699  encountered as the APK's minSdkVersion.
700  """
701
702  java_library_path = os.path.join(
703      OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
704
705  cmd = [OPTIONS.java_path, OPTIONS.java_args,
706         "-Djava.library.path=" + java_library_path,
707         "-jar",
708         os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
709  cmd.extend(OPTIONS.extra_signapk_args)
710  if whole_file:
711    cmd.append("-w")
712
713  min_sdk_version = min_api_level
714  if min_sdk_version is None:
715    if not whole_file:
716      min_sdk_version = GetMinSdkVersionInt(
717          input_name, codename_to_api_level_map)
718  if min_sdk_version is not None:
719    cmd.extend(["--min-sdk-version", str(min_sdk_version)])
720
721  cmd.extend([key + OPTIONS.public_key_suffix,
722              key + OPTIONS.private_key_suffix,
723              input_name, output_name])
724
725  p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
726  if password is not None:
727    password += "\n"
728  p.communicate(password)
729  if p.returncode != 0:
730    raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
731
732
733def CheckSize(data, target, info_dict):
734  """Check the data string passed against the max size limit, if
735  any, for the given target.  Raise exception if the data is too big.
736  Print a warning if the data is nearing the maximum size."""
737
738  if target.endswith(".img"):
739    target = target[:-4]
740  mount_point = "/" + target
741
742  fs_type = None
743  limit = None
744  if info_dict["fstab"]:
745    if mount_point == "/userdata":
746      mount_point = "/data"
747    p = info_dict["fstab"][mount_point]
748    fs_type = p.fs_type
749    device = p.device
750    if "/" in device:
751      device = device[device.rfind("/")+1:]
752    limit = info_dict.get(device + "_size", None)
753  if not fs_type or not limit:
754    return
755
756  if fs_type == "yaffs2":
757    # image size should be increased by 1/64th to account for the
758    # spare area (64 bytes per 2k page)
759    limit = limit / 2048 * (2048+64)
760  size = len(data)
761  pct = float(size) * 100.0 / limit
762  msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
763  if pct >= 99.0:
764    raise ExternalError(msg)
765  elif pct >= 95.0:
766    print
767    print "  WARNING: ", msg
768    print
769  elif OPTIONS.verbose:
770    print "  ", msg
771
772
773def ReadApkCerts(tf_zip):
774  """Given a target_files ZipFile, parse the META/apkcerts.txt file
775  and return a {package: cert} dict."""
776  certmap = {}
777  for line in tf_zip.read("META/apkcerts.txt").split("\n"):
778    line = line.strip()
779    if not line:
780      continue
781    m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
782                 r'private_key="(.*)"$', line)
783    if m:
784      name, cert, privkey = m.groups()
785      public_key_suffix_len = len(OPTIONS.public_key_suffix)
786      private_key_suffix_len = len(OPTIONS.private_key_suffix)
787      if cert in SPECIAL_CERT_STRINGS and not privkey:
788        certmap[name] = cert
789      elif (cert.endswith(OPTIONS.public_key_suffix) and
790            privkey.endswith(OPTIONS.private_key_suffix) and
791            cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
792        certmap[name] = cert[:-public_key_suffix_len]
793      else:
794        raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
795  return certmap
796
797
798COMMON_DOCSTRING = """
799  -p  (--path)  <dir>
800      Prepend <dir>/bin to the list of places to search for binaries
801      run by this script, and expect to find jars in <dir>/framework.
802
803  -s  (--device_specific) <file>
804      Path to the python module containing device-specific
805      releasetools code.
806
807  -x  (--extra)  <key=value>
808      Add a key/value pair to the 'extras' dict, which device-specific
809      extension code may look at.
810
811  -v  (--verbose)
812      Show command lines being executed.
813
814  -h  (--help)
815      Display this usage message and exit.
816"""
817
818def Usage(docstring):
819  print docstring.rstrip("\n")
820  print COMMON_DOCSTRING
821
822
823def ParseOptions(argv,
824                 docstring,
825                 extra_opts="", extra_long_opts=(),
826                 extra_option_handler=None):
827  """Parse the options in argv and return any arguments that aren't
828  flags.  docstring is the calling module's docstring, to be displayed
829  for errors and -h.  extra_opts and extra_long_opts are for flags
830  defined by the caller, which are processed by passing them to
831  extra_option_handler."""
832
833  try:
834    opts, args = getopt.getopt(
835        argv, "hvp:s:x:" + extra_opts,
836        ["help", "verbose", "path=", "signapk_path=",
837         "signapk_shared_library_path=", "extra_signapk_args=",
838         "java_path=", "java_args=", "public_key_suffix=",
839         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
840         "verity_signer_path=", "verity_signer_args=", "device_specific=",
841         "extra="] +
842        list(extra_long_opts))
843  except getopt.GetoptError as err:
844    Usage(docstring)
845    print "**", str(err), "**"
846    sys.exit(2)
847
848  for o, a in opts:
849    if o in ("-h", "--help"):
850      Usage(docstring)
851      sys.exit()
852    elif o in ("-v", "--verbose"):
853      OPTIONS.verbose = True
854    elif o in ("-p", "--path"):
855      OPTIONS.search_path = a
856    elif o in ("--signapk_path",):
857      OPTIONS.signapk_path = a
858    elif o in ("--signapk_shared_library_path",):
859      OPTIONS.signapk_shared_library_path = a
860    elif o in ("--extra_signapk_args",):
861      OPTIONS.extra_signapk_args = shlex.split(a)
862    elif o in ("--java_path",):
863      OPTIONS.java_path = a
864    elif o in ("--java_args",):
865      OPTIONS.java_args = a
866    elif o in ("--public_key_suffix",):
867      OPTIONS.public_key_suffix = a
868    elif o in ("--private_key_suffix",):
869      OPTIONS.private_key_suffix = a
870    elif o in ("--boot_signer_path",):
871      OPTIONS.boot_signer_path = a
872    elif o in ("--boot_signer_args",):
873      OPTIONS.boot_signer_args = shlex.split(a)
874    elif o in ("--verity_signer_path",):
875      OPTIONS.verity_signer_path = a
876    elif o in ("--verity_signer_args",):
877      OPTIONS.verity_signer_args = shlex.split(a)
878    elif o in ("-s", "--device_specific"):
879      OPTIONS.device_specific = a
880    elif o in ("-x", "--extra"):
881      key, value = a.split("=", 1)
882      OPTIONS.extras[key] = value
883    else:
884      if extra_option_handler is None or not extra_option_handler(o, a):
885        assert False, "unknown option \"%s\"" % (o,)
886
887  if OPTIONS.search_path:
888    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
889                          os.pathsep + os.environ["PATH"])
890
891  return args
892
893
894def MakeTempFile(prefix=None, suffix=None):
895  """Make a temp file and add it to the list of things to be deleted
896  when Cleanup() is called.  Return the filename."""
897  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
898  os.close(fd)
899  OPTIONS.tempfiles.append(fn)
900  return fn
901
902
903def Cleanup():
904  for i in OPTIONS.tempfiles:
905    if os.path.isdir(i):
906      shutil.rmtree(i)
907    else:
908      os.remove(i)
909
910
911class PasswordManager(object):
912  def __init__(self):
913    self.editor = os.getenv("EDITOR", None)
914    self.pwfile = os.getenv("ANDROID_PW_FILE", None)
915
916  def GetPasswords(self, items):
917    """Get passwords corresponding to each string in 'items',
918    returning a dict.  (The dict may have keys in addition to the
919    values in 'items'.)
920
921    Uses the passwords in $ANDROID_PW_FILE if available, letting the
922    user edit that file to add more needed passwords.  If no editor is
923    available, or $ANDROID_PW_FILE isn't define, prompts the user
924    interactively in the ordinary way.
925    """
926
927    current = self.ReadFile()
928
929    first = True
930    while True:
931      missing = []
932      for i in items:
933        if i not in current or not current[i]:
934          missing.append(i)
935      # Are all the passwords already in the file?
936      if not missing:
937        return current
938
939      for i in missing:
940        current[i] = ""
941
942      if not first:
943        print "key file %s still missing some passwords." % (self.pwfile,)
944        answer = raw_input("try to edit again? [y]> ").strip()
945        if answer and answer[0] not in 'yY':
946          raise RuntimeError("key passwords unavailable")
947      first = False
948
949      current = self.UpdateAndReadFile(current)
950
951  def PromptResult(self, current): # pylint: disable=no-self-use
952    """Prompt the user to enter a value (password) for each key in
953    'current' whose value is fales.  Returns a new dict with all the
954    values.
955    """
956    result = {}
957    for k, v in sorted(current.iteritems()):
958      if v:
959        result[k] = v
960      else:
961        while True:
962          result[k] = getpass.getpass(
963              "Enter password for %s key> " % k).strip()
964          if result[k]:
965            break
966    return result
967
968  def UpdateAndReadFile(self, current):
969    if not self.editor or not self.pwfile:
970      return self.PromptResult(current)
971
972    f = open(self.pwfile, "w")
973    os.chmod(self.pwfile, 0o600)
974    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
975    f.write("# (Additional spaces are harmless.)\n\n")
976
977    first_line = None
978    sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
979    for i, (_, k, v) in enumerate(sorted_list):
980      f.write("[[[  %s  ]]] %s\n" % (v, k))
981      if not v and first_line is None:
982        # position cursor on first line with no password.
983        first_line = i + 4
984    f.close()
985
986    p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
987    _, _ = p.communicate()
988
989    return self.ReadFile()
990
991  def ReadFile(self):
992    result = {}
993    if self.pwfile is None:
994      return result
995    try:
996      f = open(self.pwfile, "r")
997      for line in f:
998        line = line.strip()
999        if not line or line[0] == '#':
1000          continue
1001        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
1002        if not m:
1003          print "failed to parse password file: ", line
1004        else:
1005          result[m.group(2)] = m.group(1)
1006      f.close()
1007    except IOError as e:
1008      if e.errno != errno.ENOENT:
1009        print "error reading password file: ", str(e)
1010    return result
1011
1012
1013def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
1014             compress_type=None):
1015  import datetime
1016
1017  # http://b/18015246
1018  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
1019  # for files larger than 2GiB. We can work around this by adjusting their
1020  # limit. Note that `zipfile.writestr()` will not work for strings larger than
1021  # 2GiB. The Python interpreter sometimes rejects strings that large (though
1022  # it isn't clear to me exactly what circumstances cause this).
1023  # `zipfile.write()` must be used directly to work around this.
1024  #
1025  # This mess can be avoided if we port to python3.
1026  saved_zip64_limit = zipfile.ZIP64_LIMIT
1027  zipfile.ZIP64_LIMIT = (1 << 32) - 1
1028
1029  if compress_type is None:
1030    compress_type = zip_file.compression
1031  if arcname is None:
1032    arcname = filename
1033
1034  saved_stat = os.stat(filename)
1035
1036  try:
1037    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
1038    # file to be zipped and reset it when we're done.
1039    os.chmod(filename, perms)
1040
1041    # Use a fixed timestamp so the output is repeatable.
1042    epoch = datetime.datetime.fromtimestamp(0)
1043    timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
1044    os.utime(filename, (timestamp, timestamp))
1045
1046    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
1047  finally:
1048    os.chmod(filename, saved_stat.st_mode)
1049    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
1050    zipfile.ZIP64_LIMIT = saved_zip64_limit
1051
1052
1053def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
1054                compress_type=None):
1055  """Wrap zipfile.writestr() function to work around the zip64 limit.
1056
1057  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
1058  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
1059  when calling crc32(bytes).
1060
1061  But it still works fine to write a shorter string into a large zip file.
1062  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
1063  when we know the string won't be too long.
1064  """
1065
1066  saved_zip64_limit = zipfile.ZIP64_LIMIT
1067  zipfile.ZIP64_LIMIT = (1 << 32) - 1
1068
1069  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
1070    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
1071    zinfo.compress_type = zip_file.compression
1072    if perms is None:
1073      perms = 0o100644
1074  else:
1075    zinfo = zinfo_or_arcname
1076
1077  # If compress_type is given, it overrides the value in zinfo.
1078  if compress_type is not None:
1079    zinfo.compress_type = compress_type
1080
1081  # If perms is given, it has a priority.
1082  if perms is not None:
1083    # If perms doesn't set the file type, mark it as a regular file.
1084    if perms & 0o770000 == 0:
1085      perms |= 0o100000
1086    zinfo.external_attr = perms << 16
1087
1088  # Use a fixed timestamp so the output is repeatable.
1089  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
1090
1091  zip_file.writestr(zinfo, data)
1092  zipfile.ZIP64_LIMIT = saved_zip64_limit
1093
1094
1095def ZipClose(zip_file):
1096  # http://b/18015246
1097  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
1098  # central directory.
1099  saved_zip64_limit = zipfile.ZIP64_LIMIT
1100  zipfile.ZIP64_LIMIT = (1 << 32) - 1
1101
1102  zip_file.close()
1103
1104  zipfile.ZIP64_LIMIT = saved_zip64_limit
1105
1106
1107class DeviceSpecificParams(object):
1108  module = None
1109  def __init__(self, **kwargs):
1110    """Keyword arguments to the constructor become attributes of this
1111    object, which is passed to all functions in the device-specific
1112    module."""
1113    for k, v in kwargs.iteritems():
1114      setattr(self, k, v)
1115    self.extras = OPTIONS.extras
1116
1117    if self.module is None:
1118      path = OPTIONS.device_specific
1119      if not path:
1120        return
1121      try:
1122        if os.path.isdir(path):
1123          info = imp.find_module("releasetools", [path])
1124        else:
1125          d, f = os.path.split(path)
1126          b, x = os.path.splitext(f)
1127          if x == ".py":
1128            f = b
1129          info = imp.find_module(f, [d])
1130        print "loaded device-specific extensions from", path
1131        self.module = imp.load_module("device_specific", *info)
1132      except ImportError:
1133        print "unable to load device-specific module; assuming none"
1134
1135  def _DoCall(self, function_name, *args, **kwargs):
1136    """Call the named function in the device-specific module, passing
1137    the given args and kwargs.  The first argument to the call will be
1138    the DeviceSpecific object itself.  If there is no module, or the
1139    module does not define the function, return the value of the
1140    'default' kwarg (which itself defaults to None)."""
1141    if self.module is None or not hasattr(self.module, function_name):
1142      return kwargs.get("default", None)
1143    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
1144
1145  def FullOTA_Assertions(self):
1146    """Called after emitting the block of assertions at the top of a
1147    full OTA package.  Implementations can add whatever additional
1148    assertions they like."""
1149    return self._DoCall("FullOTA_Assertions")
1150
1151  def FullOTA_InstallBegin(self):
1152    """Called at the start of full OTA installation."""
1153    return self._DoCall("FullOTA_InstallBegin")
1154
1155  def FullOTA_InstallEnd(self):
1156    """Called at the end of full OTA installation; typically this is
1157    used to install the image for the device's baseband processor."""
1158    return self._DoCall("FullOTA_InstallEnd")
1159
1160  def IncrementalOTA_Assertions(self):
1161    """Called after emitting the block of assertions at the top of an
1162    incremental OTA package.  Implementations can add whatever
1163    additional assertions they like."""
1164    return self._DoCall("IncrementalOTA_Assertions")
1165
1166  def IncrementalOTA_VerifyBegin(self):
1167    """Called at the start of the verification phase of incremental
1168    OTA installation; additional checks can be placed here to abort
1169    the script before any changes are made."""
1170    return self._DoCall("IncrementalOTA_VerifyBegin")
1171
1172  def IncrementalOTA_VerifyEnd(self):
1173    """Called at the end of the verification phase of incremental OTA
1174    installation; additional checks can be placed here to abort the
1175    script before any changes are made."""
1176    return self._DoCall("IncrementalOTA_VerifyEnd")
1177
1178  def IncrementalOTA_InstallBegin(self):
1179    """Called at the start of incremental OTA installation (after
1180    verification is complete)."""
1181    return self._DoCall("IncrementalOTA_InstallBegin")
1182
1183  def IncrementalOTA_InstallEnd(self):
1184    """Called at the end of incremental OTA installation; typically
1185    this is used to install the image for the device's baseband
1186    processor."""
1187    return self._DoCall("IncrementalOTA_InstallEnd")
1188
1189  def VerifyOTA_Assertions(self):
1190    return self._DoCall("VerifyOTA_Assertions")
1191
1192class File(object):
1193  def __init__(self, name, data):
1194    self.name = name
1195    self.data = data
1196    self.size = len(data)
1197    self.sha1 = sha1(data).hexdigest()
1198
1199  @classmethod
1200  def FromLocalFile(cls, name, diskname):
1201    f = open(diskname, "rb")
1202    data = f.read()
1203    f.close()
1204    return File(name, data)
1205
1206  def WriteToTemp(self):
1207    t = tempfile.NamedTemporaryFile()
1208    t.write(self.data)
1209    t.flush()
1210    return t
1211
1212  def AddToZip(self, z, compression=None):
1213    ZipWriteStr(z, self.name, self.data, compress_type=compression)
1214
1215DIFF_PROGRAM_BY_EXT = {
1216    ".gz" : "imgdiff",
1217    ".zip" : ["imgdiff", "-z"],
1218    ".jar" : ["imgdiff", "-z"],
1219    ".apk" : ["imgdiff", "-z"],
1220    ".img" : "imgdiff",
1221    }
1222
1223class Difference(object):
1224  def __init__(self, tf, sf, diff_program=None):
1225    self.tf = tf
1226    self.sf = sf
1227    self.patch = None
1228    self.diff_program = diff_program
1229
1230  def ComputePatch(self):
1231    """Compute the patch (as a string of data) needed to turn sf into
1232    tf.  Returns the same tuple as GetPatch()."""
1233
1234    tf = self.tf
1235    sf = self.sf
1236
1237    if self.diff_program:
1238      diff_program = self.diff_program
1239    else:
1240      ext = os.path.splitext(tf.name)[1]
1241      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1242
1243    ttemp = tf.WriteToTemp()
1244    stemp = sf.WriteToTemp()
1245
1246    ext = os.path.splitext(tf.name)[1]
1247
1248    try:
1249      ptemp = tempfile.NamedTemporaryFile()
1250      if isinstance(diff_program, list):
1251        cmd = copy.copy(diff_program)
1252      else:
1253        cmd = [diff_program]
1254      cmd.append(stemp.name)
1255      cmd.append(ttemp.name)
1256      cmd.append(ptemp.name)
1257      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1258      err = []
1259      def run():
1260        _, e = p.communicate()
1261        if e:
1262          err.append(e)
1263      th = threading.Thread(target=run)
1264      th.start()
1265      th.join(timeout=300)   # 5 mins
1266      if th.is_alive():
1267        print "WARNING: diff command timed out"
1268        p.terminate()
1269        th.join(5)
1270        if th.is_alive():
1271          p.kill()
1272          th.join()
1273
1274      if err or p.returncode != 0:
1275        print "WARNING: failure running %s:\n%s\n" % (
1276            diff_program, "".join(err))
1277        self.patch = None
1278        return None, None, None
1279      diff = ptemp.read()
1280    finally:
1281      ptemp.close()
1282      stemp.close()
1283      ttemp.close()
1284
1285    self.patch = diff
1286    return self.tf, self.sf, self.patch
1287
1288
1289  def GetPatch(self):
1290    """Return a tuple (target_file, source_file, patch_data).
1291    patch_data may be None if ComputePatch hasn't been called, or if
1292    computing the patch failed."""
1293    return self.tf, self.sf, self.patch
1294
1295
1296def ComputeDifferences(diffs):
1297  """Call ComputePatch on all the Difference objects in 'diffs'."""
1298  print len(diffs), "diffs to compute"
1299
1300  # Do the largest files first, to try and reduce the long-pole effect.
1301  by_size = [(i.tf.size, i) for i in diffs]
1302  by_size.sort(reverse=True)
1303  by_size = [i[1] for i in by_size]
1304
1305  lock = threading.Lock()
1306  diff_iter = iter(by_size)   # accessed under lock
1307
1308  def worker():
1309    try:
1310      lock.acquire()
1311      for d in diff_iter:
1312        lock.release()
1313        start = time.time()
1314        d.ComputePatch()
1315        dur = time.time() - start
1316        lock.acquire()
1317
1318        tf, sf, patch = d.GetPatch()
1319        if sf.name == tf.name:
1320          name = tf.name
1321        else:
1322          name = "%s (%s)" % (tf.name, sf.name)
1323        if patch is None:
1324          print "patching failed!                                  %s" % (name,)
1325        else:
1326          print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1327              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
1328      lock.release()
1329    except Exception as e:
1330      print e
1331      raise
1332
1333  # start worker threads; wait for them all to finish.
1334  threads = [threading.Thread(target=worker)
1335             for i in range(OPTIONS.worker_threads)]
1336  for th in threads:
1337    th.start()
1338  while threads:
1339    threads.pop().join()
1340
1341
1342class BlockDifference(object):
1343  def __init__(self, partition, tgt, src=None, check_first_block=False,
1344               version=None, disable_imgdiff=False):
1345    self.tgt = tgt
1346    self.src = src
1347    self.partition = partition
1348    self.check_first_block = check_first_block
1349    self.disable_imgdiff = disable_imgdiff
1350
1351    if version is None:
1352      version = 1
1353      if OPTIONS.info_dict:
1354        version = max(
1355            int(i) for i in
1356            OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1357    self.version = version
1358
1359    b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1360                                    version=self.version,
1361                                    disable_imgdiff=self.disable_imgdiff)
1362    tmpdir = tempfile.mkdtemp()
1363    OPTIONS.tempfiles.append(tmpdir)
1364    self.path = os.path.join(tmpdir, partition)
1365    b.Compute(self.path)
1366    self._required_cache = b.max_stashed_size
1367    self.touched_src_ranges = b.touched_src_ranges
1368    self.touched_src_sha1 = b.touched_src_sha1
1369
1370    if src is None:
1371      _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1372    else:
1373      _, self.device = GetTypeAndDevice("/" + partition,
1374                                        OPTIONS.source_info_dict)
1375
1376  @property
1377  def required_cache(self):
1378    return self._required_cache
1379
1380  def WriteScript(self, script, output_zip, progress=None):
1381    if not self.src:
1382      # write the output unconditionally
1383      script.Print("Patching %s image unconditionally..." % (self.partition,))
1384    else:
1385      script.Print("Patching %s image after verification." % (self.partition,))
1386
1387    if progress:
1388      script.ShowProgress(progress, 0)
1389    self._WriteUpdate(script, output_zip)
1390    if OPTIONS.verify:
1391      self._WritePostInstallVerifyScript(script)
1392
1393  def WriteStrictVerifyScript(self, script):
1394    """Verify all the blocks in the care_map, including clobbered blocks.
1395
1396    This differs from the WriteVerifyScript() function: a) it prints different
1397    error messages; b) it doesn't allow half-way updated images to pass the
1398    verification."""
1399
1400    partition = self.partition
1401    script.Print("Verifying %s..." % (partition,))
1402    ranges = self.tgt.care_map
1403    ranges_str = ranges.to_string_raw()
1404    script.AppendExtra('range_sha1("%s", "%s") == "%s" && '
1405                       'ui_print("    Verified.") || '
1406                       'ui_print("\\"%s\\" has unexpected contents.");' % (
1407                       self.device, ranges_str,
1408                       self.tgt.TotalSha1(include_clobbered_blocks=True),
1409                       self.device))
1410    script.AppendExtra("")
1411
1412  def WriteVerifyScript(self, script, touched_blocks_only=False):
1413    partition = self.partition
1414
1415    # full OTA
1416    if not self.src:
1417      script.Print("Image %s will be patched unconditionally." % (partition,))
1418
1419    # incremental OTA
1420    else:
1421      if touched_blocks_only and self.version >= 3:
1422        ranges = self.touched_src_ranges
1423        expected_sha1 = self.touched_src_sha1
1424      else:
1425        ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1426        expected_sha1 = self.src.TotalSha1()
1427
1428      # No blocks to be checked, skipping.
1429      if not ranges:
1430        return
1431
1432      ranges_str = ranges.to_string_raw()
1433      if self.version >= 4:
1434        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1435                            'block_image_verify("%s", '
1436                            'package_extract_file("%s.transfer.list"), '
1437                            '"%s.new.dat", "%s.patch.dat")) then') % (
1438                            self.device, ranges_str, expected_sha1,
1439                            self.device, partition, partition, partition))
1440      elif self.version == 3:
1441        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1442                            'block_image_verify("%s", '
1443                            'package_extract_file("%s.transfer.list"), '
1444                            '"%s.new.dat", "%s.patch.dat")) then') % (
1445                            self.device, ranges_str, expected_sha1,
1446                            self.device, partition, partition, partition))
1447      else:
1448        script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1449                           self.device, ranges_str, self.src.TotalSha1()))
1450      script.Print('Verified %s image...' % (partition,))
1451      script.AppendExtra('else')
1452
1453      if self.version >= 4:
1454
1455        # Bug: 21124327
1456        # When generating incrementals for the system and vendor partitions in
1457        # version 4 or newer, explicitly check the first block (which contains
1458        # the superblock) of the partition to see if it's what we expect. If
1459        # this check fails, give an explicit log message about the partition
1460        # having been remounted R/W (the most likely explanation).
1461        if self.check_first_block:
1462          script.AppendExtra('check_first_block("%s");' % (self.device,))
1463
1464        # If version >= 4, try block recovery before abort update
1465        if partition == "system":
1466          code = ErrorCode.SYSTEM_RECOVER_FAILURE
1467        else:
1468          code = ErrorCode.VENDOR_RECOVER_FAILURE
1469        script.AppendExtra((
1470            'ifelse (block_image_recover("{device}", "{ranges}") && '
1471            'block_image_verify("{device}", '
1472            'package_extract_file("{partition}.transfer.list"), '
1473            '"{partition}.new.dat", "{partition}.patch.dat"), '
1474            'ui_print("{partition} recovered successfully."), '
1475            'abort("E{code}: {partition} partition fails to recover"));\n'
1476            'endif;').format(device=self.device, ranges=ranges_str,
1477                             partition=partition, code=code))
1478
1479      # Abort the OTA update. Note that the incremental OTA cannot be applied
1480      # even if it may match the checksum of the target partition.
1481      # a) If version < 3, operations like move and erase will make changes
1482      #    unconditionally and damage the partition.
1483      # b) If version >= 3, it won't even reach here.
1484      else:
1485        if partition == "system":
1486          code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
1487        else:
1488          code = ErrorCode.VENDOR_VERIFICATION_FAILURE
1489        script.AppendExtra((
1490            'abort("E%d: %s partition has unexpected contents");\n'
1491            'endif;') % (code, partition))
1492
1493  def _WritePostInstallVerifyScript(self, script):
1494    partition = self.partition
1495    script.Print('Verifying the updated %s image...' % (partition,))
1496    # Unlike pre-install verification, clobbered_blocks should not be ignored.
1497    ranges = self.tgt.care_map
1498    ranges_str = ranges.to_string_raw()
1499    script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1500                       self.device, ranges_str,
1501                       self.tgt.TotalSha1(include_clobbered_blocks=True)))
1502
1503    # Bug: 20881595
1504    # Verify that extended blocks are really zeroed out.
1505    if self.tgt.extended:
1506      ranges_str = self.tgt.extended.to_string_raw()
1507      script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1508                         self.device, ranges_str,
1509                         self._HashZeroBlocks(self.tgt.extended.size())))
1510      script.Print('Verified the updated %s image.' % (partition,))
1511      if partition == "system":
1512        code = ErrorCode.SYSTEM_NONZERO_CONTENTS
1513      else:
1514        code = ErrorCode.VENDOR_NONZERO_CONTENTS
1515      script.AppendExtra(
1516          'else\n'
1517          '  abort("E%d: %s partition has unexpected non-zero contents after '
1518          'OTA update");\n'
1519          'endif;' % (code, partition))
1520    else:
1521      script.Print('Verified the updated %s image.' % (partition,))
1522
1523    if partition == "system":
1524      code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
1525    else:
1526      code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
1527
1528    script.AppendExtra(
1529        'else\n'
1530        '  abort("E%d: %s partition has unexpected contents after OTA '
1531        'update");\n'
1532        'endif;' % (code, partition))
1533
1534  def _WriteUpdate(self, script, output_zip):
1535    ZipWrite(output_zip,
1536             '{}.transfer.list'.format(self.path),
1537             '{}.transfer.list'.format(self.partition))
1538    ZipWrite(output_zip,
1539             '{}.new.dat'.format(self.path),
1540             '{}.new.dat'.format(self.partition))
1541    ZipWrite(output_zip,
1542             '{}.patch.dat'.format(self.path),
1543             '{}.patch.dat'.format(self.partition),
1544             compress_type=zipfile.ZIP_STORED)
1545
1546    if self.partition == "system":
1547      code = ErrorCode.SYSTEM_UPDATE_FAILURE
1548    else:
1549      code = ErrorCode.VENDOR_UPDATE_FAILURE
1550
1551    call = ('block_image_update("{device}", '
1552            'package_extract_file("{partition}.transfer.list"), '
1553            '"{partition}.new.dat", "{partition}.patch.dat") ||\n'
1554            '  abort("E{code}: Failed to update {partition} image.");'.format(
1555                device=self.device, partition=self.partition, code=code))
1556    script.AppendExtra(script.WordWrap(call))
1557
1558  def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1559    data = source.ReadRangeSet(ranges)
1560    ctx = sha1()
1561
1562    for p in data:
1563      ctx.update(p)
1564
1565    return ctx.hexdigest()
1566
1567  def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1568    """Return the hash value for all zero blocks."""
1569    zero_block = '\x00' * 4096
1570    ctx = sha1()
1571    for _ in range(num_blocks):
1572      ctx.update(zero_block)
1573
1574    return ctx.hexdigest()
1575
1576
1577DataImage = blockimgdiff.DataImage
1578
1579# map recovery.fstab's fs_types to mount/format "partition types"
1580PARTITION_TYPES = {
1581    "yaffs2": "MTD",
1582    "mtd": "MTD",
1583    "ext4": "EMMC",
1584    "emmc": "EMMC",
1585    "f2fs": "EMMC",
1586    "squashfs": "EMMC"
1587}
1588
1589def GetTypeAndDevice(mount_point, info):
1590  fstab = info["fstab"]
1591  if fstab:
1592    return (PARTITION_TYPES[fstab[mount_point].fs_type],
1593            fstab[mount_point].device)
1594  else:
1595    raise KeyError
1596
1597
1598def ParseCertificate(data):
1599  """Parse a PEM-format certificate."""
1600  cert = []
1601  save = False
1602  for line in data.split("\n"):
1603    if "--END CERTIFICATE--" in line:
1604      break
1605    if save:
1606      cert.append(line)
1607    if "--BEGIN CERTIFICATE--" in line:
1608      save = True
1609  cert = "".join(cert).decode('base64')
1610  return cert
1611
1612def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1613                      info_dict=None):
1614  """Generate a binary patch that creates the recovery image starting
1615  with the boot image.  (Most of the space in these images is just the
1616  kernel, which is identical for the two, so the resulting patch
1617  should be efficient.)  Add it to the output zip, along with a shell
1618  script that is run from init.rc on first boot to actually do the
1619  patching and install the new recovery image.
1620
1621  recovery_img and boot_img should be File objects for the
1622  corresponding images.  info should be the dictionary returned by
1623  common.LoadInfoDict() on the input target_files.
1624  """
1625
1626  if info_dict is None:
1627    info_dict = OPTIONS.info_dict
1628
1629  full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
1630  system_root_image = info_dict.get("system_root_image", None) == "true"
1631
1632  if full_recovery_image:
1633    output_sink("etc/recovery.img", recovery_img.data)
1634
1635  else:
1636    diff_program = ["imgdiff"]
1637    path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1638    if os.path.exists(path):
1639      diff_program.append("-b")
1640      diff_program.append(path)
1641      bonus_args = "-b /system/etc/recovery-resource.dat"
1642    else:
1643      bonus_args = ""
1644
1645    d = Difference(recovery_img, boot_img, diff_program=diff_program)
1646    _, _, patch = d.ComputePatch()
1647    output_sink("recovery-from-boot.p", patch)
1648
1649  try:
1650    # The following GetTypeAndDevice()s need to use the path in the target
1651    # info_dict instead of source_info_dict.
1652    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1653    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1654  except KeyError:
1655    return
1656
1657  if full_recovery_image:
1658    sh = """#!/system/bin/sh
1659if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
1660  applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1661else
1662  log -t recovery "Recovery image already installed"
1663fi
1664""" % {'type': recovery_type,
1665       'device': recovery_device,
1666       'sha1': recovery_img.sha1,
1667       'size': recovery_img.size}
1668  else:
1669    sh = """#!/system/bin/sh
1670if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1671  applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1672else
1673  log -t recovery "Recovery image already installed"
1674fi
1675""" % {'boot_size': boot_img.size,
1676       'boot_sha1': boot_img.sha1,
1677       'recovery_size': recovery_img.size,
1678       'recovery_sha1': recovery_img.sha1,
1679       'boot_type': boot_type,
1680       'boot_device': boot_device,
1681       'recovery_type': recovery_type,
1682       'recovery_device': recovery_device,
1683       'bonus_args': bonus_args}
1684
1685  # The install script location moved from /system/etc to /system/bin
1686  # in the L release.  Parse init.*.rc files to find out where the
1687  # target-files expects it to be, and put it there.
1688  sh_location = "etc/install-recovery.sh"
1689  found = False
1690  if system_root_image:
1691    init_rc_dir = os.path.join(input_dir, "ROOT")
1692  else:
1693    init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
1694  init_rc_files = os.listdir(init_rc_dir)
1695  for init_rc_file in init_rc_files:
1696    if (not init_rc_file.startswith('init.') or
1697        not init_rc_file.endswith('.rc')):
1698      continue
1699
1700    with open(os.path.join(init_rc_dir, init_rc_file)) as f:
1701      for line in f:
1702        m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1703        if m:
1704          sh_location = m.group(1)
1705          found = True
1706          break
1707
1708    if found:
1709      break
1710
1711  print "putting script in", sh_location
1712
1713  output_sink(sh_location, sh)
1714