1#!/usr/bin/env python3
2
3# Copyright 2016, The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9#     http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17
18"""Command-line tool for partitioning Brillo images."""
19
20
21from __future__ import absolute_import
22from __future__ import print_function
23import argparse
24import bisect
25import copy
26import functools
27import json
28import math
29import numbers
30import os
31import struct
32import sys
33import uuid
34import zlib
35from six.moves import range
36
37# Keywords used in JSON files.
38JSON_KEYWORD_SETTINGS = 'settings'
39JSON_KEYWORD_SETTINGS_AB_SUFFIXES = 'ab_suffixes'
40JSON_KEYWORD_SETTINGS_DISK_SIZE = 'disk_size'
41JSON_KEYWORD_SETTINGS_DISK_ALIGNMENT = 'disk_alignment'
42JSON_KEYWORD_SETTINGS_DISK_GUID = 'disk_guid'
43JSON_KEYWORD_SETTINGS_PARTITIONS_OFFSET_BEGIN = 'partitions_offset_begin'
44JSON_KEYWORD_PARTITIONS = 'partitions'
45JSON_KEYWORD_PARTITIONS_LABEL = 'label'
46JSON_KEYWORD_PARTITIONS_OFFSET = 'offset'
47JSON_KEYWORD_PARTITIONS_SIZE = 'size'
48JSON_KEYWORD_PARTITIONS_GROW = 'grow'
49JSON_KEYWORD_PARTITIONS_GUID = 'guid'
50JSON_KEYWORD_PARTITIONS_TYPE_GUID = 'type_guid'
51JSON_KEYWORD_PARTITIONS_FLAGS = 'flags'
52JSON_KEYWORD_PARTITIONS_PERSIST = 'persist'
53JSON_KEYWORD_PARTITIONS_IGNORE = 'ignore'
54JSON_KEYWORD_PARTITIONS_AB = 'ab'
55JSON_KEYWORD_PARTITIONS_AB_EXPANDED = 'ab_expanded'
56JSON_KEYWORD_PARTITIONS_POSITION = 'position'
57JSON_KEYWORD_AUTO = 'auto'
58
59# Possible values for the --type option of the query_partition
60# sub-command.
61QUERY_PARTITION_TYPES = ['size',
62                         'offset',
63                         'guid',
64                         'type_guid',
65                         'flags',
66                         'persist']
67
68BPT_VERSION_MAJOR = 1
69BPT_VERSION_MINOR = 0
70
71DISK_SECTOR_SIZE = 512
72
73GPT_NUM_LBAS = 33
74
75GPT_MIN_PART_NUM = 1
76GPT_MAX_PART_NUM = 128
77
78KNOWN_TYPE_GUIDS = {
79    'brillo_boot': 'bb499290-b57e-49f6-bf41-190386693794',
80    'brillo_bootloader': '4892aeb3-a45f-4c5f-875f-da3303c0795c',
81    'brillo_system': '0f2778c4-5cc1-4300-8670-6c88b7e57ed6',
82    'brillo_odm': 'e99d84d7-2c1b-44cf-8c58-effae2dc2558',
83    'brillo_oem': 'aa3434b2-ddc3-4065-8b1a-18e99ea15cb7',
84    'brillo_userdata': '0bb7e6ed-4424-49c0-9372-7fbab465ab4c',
85    'brillo_misc': '6b2378b0-0fbc-4aa9-a4f6-4d6e17281c47',
86    'brillo_vbmeta': 'b598858a-5fe3-418e-b8c4-824b41f4adfc',
87    'brillo_vendor_specific': '314f99d5-b2bf-4883-8d03-e2f2ce507d6a',
88    'linux_fs': '0fc63daf-8483-4772-8e79-3d69d8477de4',
89    'ms_basic_data': 'ebd0a0a2-b9e5-4433-87c0-68b6b72699c7',
90    'efi_system': 'c12a7328-f81f-11d2-ba4b-00a0c93ec93b'
91}
92
93
94def RoundToMultiple(number, size, round_down=False):
95  """Rounds a number up (or down) to nearest multiple of another number.
96
97  Args:
98    number: The number to round up.
99    size: The multiple to round up to.
100    round_down: If True, the number will be rounded down.
101
102  Returns:
103    If |number| is a multiple of |size|, returns |number|, otherwise
104    returns |number| + |size| - |remainder| (if |round_down| is False) or
105    |number| - |remainder| (if |round_down| is True). Always returns
106    an integer.
107  """
108  remainder = number % size
109  if remainder == 0:
110    return int(number)
111  if round_down:
112    return int(number - remainder)
113  return int(number + size - remainder)
114
115
116def ParseNumber(arg):
117  """Number parser.
118
119  If |arg| is an integer, that value is returned. Otherwise int(arg, 0)
120  is returned.
121
122  This function is suitable for use in the |type| parameter of
123  |ArgumentParser|'s add_argument() function. An improvement to just
124  using type=int is that this function supports numbers in other
125  bases, e.g. "0x1234".
126
127  Arguments:
128    arg: Argument (int or string) to parse.
129
130  Returns:
131    The parsed value, as an integer.
132
133  Raises:
134    ValueError: If the argument could not be parsed.
135  """
136  if isinstance(arg, numbers.Integral):
137    return arg
138  return int(arg, 0)
139
140
141def ParseGuid(arg):
142  """Parser for RFC 4122 GUIDs.
143
144  Arguments:
145    arg: The argument, as a string.
146
147  Returns:
148    UUID in hyphenated format.
149
150  Raises:
151    ValueError: If the given string cannot be parsed.
152  """
153  return str(uuid.UUID(arg))
154
155
156def ParseSize(arg):
157  """Parser for size strings with decimal and binary unit support.
158
159  This supports both integers and strings.
160
161  Arguments:
162    arg: The string to parse.
163
164  Returns:
165    The parsed size in bytes as an integer.
166
167  Raises:
168    ValueError: If the given string cannot be parsed.
169  """
170  if isinstance(arg, numbers.Integral):
171    return arg
172
173  ws_index = arg.find(' ')
174  if ws_index != -1:
175    num = float(arg[0:ws_index])
176    factor = 1
177    if arg.endswith('KiB'):
178      factor = 1024
179    elif arg.endswith('MiB'):
180      factor = 1024*1024
181    elif arg.endswith('GiB'):
182      factor = 1024*1024*1024
183    elif arg.endswith('TiB'):
184      factor = 1024*1024*1024*1024
185    elif arg.endswith('PiB'):
186      factor = 1024*1024*1024*1024*1024
187    elif arg.endswith('kB'):
188      factor = 1000
189    elif arg.endswith('MB'):
190      factor = 1000*1000
191    elif arg.endswith('GB'):
192      factor = 1000*1000*1000
193    elif arg.endswith('TB'):
194      factor = 1000*1000*1000*1000
195    elif arg.endswith('PB'):
196      factor = 1000*1000*1000*1000*1000
197    else:
198      raise ValueError('Cannot parse string "{}"'.format(arg))
199    value = num*factor
200    # If the resulting value isn't an integer, round up.
201    if not value.is_integer():
202      value = int(math.ceil(value))
203  else:
204    value = int(arg, 0)
205  return value
206
207
208class ImageChunk(object):
209  """Data structure used for representing chunks in Android sparse files.
210
211  Attributes:
212    chunk_type: One of TYPE_RAW, TYPE_FILL, or TYPE_DONT_CARE.
213    chunk_offset: Offset in the sparse file where this chunk begins.
214    output_offset: Offset in de-sparsified file where output begins.
215    output_size: Number of bytes in output.
216    input_offset: Offset in sparse file for data if TYPE_RAW otherwise None.
217    fill_data: Blob with data to fill if TYPE_FILL otherwise None.
218  """
219
220  FORMAT = '<2H2I'
221  TYPE_RAW = 0xcac1
222  TYPE_FILL = 0xcac2
223  TYPE_DONT_CARE = 0xcac3
224  TYPE_CRC32 = 0xcac4
225
226  def __init__(self, chunk_type, chunk_offset, output_offset, output_size,
227               input_offset, fill_data):
228    """Initializes an ImageChunk object.
229
230    Arguments:
231      chunk_type: One of TYPE_RAW, TYPE_FILL, or TYPE_DONT_CARE.
232      chunk_offset: Offset in the sparse file where this chunk begins.
233      output_offset: Offset in de-sparsified file.
234      output_size: Number of bytes in output.
235      input_offset: Offset in sparse file if TYPE_RAW otherwise None.
236      fill_data: Blob with data to fill if TYPE_FILL otherwise None.
237
238    Raises:
239      ValueError: If data is not well-formed.
240    """
241    self.chunk_type = chunk_type
242    self.chunk_offset = chunk_offset
243    self.output_offset = output_offset
244    self.output_size = output_size
245    self.input_offset = input_offset
246    self.fill_data = fill_data
247    # Check invariants.
248    if self.chunk_type == self.TYPE_RAW:
249      if self.fill_data is not None:
250        raise ValueError('RAW chunk cannot have fill_data set.')
251      if not self.input_offset:
252        raise ValueError('RAW chunk must have input_offset set.')
253    elif self.chunk_type == self.TYPE_FILL:
254      if self.fill_data is None:
255        raise ValueError('FILL chunk must have fill_data set.')
256      if self.input_offset:
257        raise ValueError('FILL chunk cannot have input_offset set.')
258    elif self.chunk_type == self.TYPE_DONT_CARE:
259      if self.fill_data is not None:
260        raise ValueError('DONT_CARE chunk cannot have fill_data set.')
261      if self.input_offset:
262        raise ValueError('DONT_CARE chunk cannot have input_offset set.')
263    else:
264      raise ValueError('Invalid chunk type')
265
266
267class ImageHandler(object):
268  """Abstraction for image I/O with support for Android sparse images.
269
270  This class provides an interface for working with image files that
271  may be using the Android Sparse Image format. When an instance is
272  constructed, we test whether it's an Android sparse file. If so,
273  operations will be on the sparse file by interpreting the sparse
274  format, otherwise they will be directly on the file. Either way the
275  operations do the same.
276
277  For reading, this interface mimics a file object - it has seek(),
278  tell(), and read() methods. For writing, only truncation
279  (truncate()) and appending is supported (append_raw(),
280  append_fill(), and append_dont_care()). Additionally, data can only
281  be written in units of the block size.
282
283  Attributes:
284    is_sparse: Whether the file being operated on is sparse.
285    block_size: The block size, typically 4096.
286    image_size: The size of the unsparsified file.
287
288  """
289  # See system/core/libsparse/sparse_format.h for details.
290  MAGIC = 0xed26ff3a
291  HEADER_FORMAT = '<I4H4I'
292
293  # These are formats and offset of just the |total_chunks| and
294  # |total_blocks| fields.
295  NUM_CHUNKS_AND_BLOCKS_FORMAT = '<II'
296  NUM_CHUNKS_AND_BLOCKS_OFFSET = 16
297
298  def __init__(self, image_filename):
299    """Initializes an image handler.
300
301    Arguments:
302      image_filename: The name of the file to operate on.
303
304    Raises:
305      ValueError: If data in the file is invalid.
306    """
307    self._image_filename = image_filename
308    self._read_header()
309
310  def _read_header(self):
311    """Initializes internal data structures used for reading file.
312
313    This may be called multiple times and is typically called after
314    modifying the file (e.g. appending, truncation).
315
316    Raises:
317      ValueError: If data in the file is invalid.
318    """
319    self.is_sparse = False
320    self.block_size = 4096
321    self._file_pos = 0
322    self._image = open(self._image_filename, 'r+b')
323    self._image.seek(0, os.SEEK_END)
324    self.image_size = self._image.tell()
325
326    self._image.seek(0, os.SEEK_SET)
327    header_bin = self._image.read(struct.calcsize(self.HEADER_FORMAT))
328    if len(header_bin) < struct.calcsize(self.HEADER_FORMAT):
329      # Not a sparse image, our job here is done.
330      return
331    (magic, major_version, minor_version, file_hdr_sz, chunk_hdr_sz,
332     block_size, self._num_total_blocks, self._num_total_chunks,
333     _) = struct.unpack(self.HEADER_FORMAT, header_bin)
334    if magic != self.MAGIC:
335      # Not a sparse image, our job here is done.
336      return
337    if not (major_version == 1 and minor_version == 0):
338      raise ValueError('Encountered sparse image format version {}.{} but '
339                       'only 1.0 is supported'.format(major_version,
340                                                      minor_version))
341    if file_hdr_sz != struct.calcsize(self.HEADER_FORMAT):
342      raise ValueError('Unexpected file_hdr_sz value {}.'.
343                       format(file_hdr_sz))
344    if chunk_hdr_sz != struct.calcsize(ImageChunk.FORMAT):
345      raise ValueError('Unexpected chunk_hdr_sz value {}.'.
346                       format(chunk_hdr_sz))
347
348    self.block_size = block_size
349
350    # Build an list of chunks by parsing the file.
351    self._chunks = []
352
353    # Find the smallest offset where only "Don't care" chunks
354    # follow. This will be the size of the content in the sparse
355    # image.
356    offset = 0
357    output_offset = 0
358    for _ in range(1, self._num_total_chunks + 1):
359      chunk_offset = self._image.tell()
360
361      header_bin = self._image.read(struct.calcsize(ImageChunk.FORMAT))
362      (chunk_type, _, chunk_sz, total_sz) = struct.unpack(ImageChunk.FORMAT,
363                                                          header_bin)
364      data_sz = total_sz - struct.calcsize(ImageChunk.FORMAT)
365
366      if chunk_type == ImageChunk.TYPE_RAW:
367        if data_sz != (chunk_sz * self.block_size):
368          raise ValueError('Raw chunk input size ({}) does not match output '
369                           'size ({})'.
370                           format(data_sz, chunk_sz*self.block_size))
371        self._chunks.append(ImageChunk(ImageChunk.TYPE_RAW,
372                                       chunk_offset,
373                                       output_offset,
374                                       chunk_sz*self.block_size,
375                                       self._image.tell(),
376                                       None))
377        self._image.read(data_sz)
378
379      elif chunk_type == ImageChunk.TYPE_FILL:
380        if data_sz != 4:
381          raise ValueError('Fill chunk should have 4 bytes of fill, but this '
382                           'has {}'.format(data_sz))
383        fill_data = self._image.read(4)
384        self._chunks.append(ImageChunk(ImageChunk.TYPE_FILL,
385                                       chunk_offset,
386                                       output_offset,
387                                       chunk_sz*self.block_size,
388                                       None,
389                                       fill_data))
390      elif chunk_type == ImageChunk.TYPE_DONT_CARE:
391        if data_sz != 0:
392          raise ValueError('Don\'t care chunk input size is non-zero ({})'.
393                           format(data_sz))
394        self._chunks.append(ImageChunk(ImageChunk.TYPE_DONT_CARE,
395                                       chunk_offset,
396                                       output_offset,
397                                       chunk_sz*self.block_size,
398                                       None,
399                                       None))
400      elif chunk_type == ImageChunk.TYPE_CRC32:
401        if data_sz != 4:
402          raise ValueError('CRC32 chunk should have 4 bytes of CRC, but '
403                           'this has {}'.format(data_sz))
404        self._image.read(4)
405      else:
406        raise ValueError('Unknown chunk type {}'.format(chunk_type))
407
408      offset += chunk_sz
409      output_offset += chunk_sz * self.block_size
410
411    # Record where sparse data end.
412    self._sparse_end = self._image.tell()
413
414    # Now that we've traversed all chunks, sanity check.
415    if self._num_total_blocks != offset:
416      raise ValueError('The header said we should have {} output blocks, '
417                       'but we saw {}'.format(self._num_total_blocks, offset))
418    junk_len = len(self._image.read())
419    if junk_len > 0:
420      raise ValueError('There were {} bytes of extra data at the end of the '
421                       'file.'.format(junk_len))
422
423    # Assign |image_size|.
424    self.image_size = output_offset
425
426    # This is used when bisecting in read() to find the initial slice.
427    self._chunk_output_offsets = [i.output_offset for i in self._chunks]
428
429    self.is_sparse = True
430
431  def _update_chunks_and_blocks(self):
432    """Helper function to update the image header.
433
434    The the |total_chunks| and |total_blocks| fields in the header
435    will be set to value of the |_num_total_blocks| and
436    |_num_total_chunks| attributes.
437
438    """
439    self._image.seek(self.NUM_CHUNKS_AND_BLOCKS_OFFSET, os.SEEK_SET)
440    self._image.write(struct.pack(self.NUM_CHUNKS_AND_BLOCKS_FORMAT,
441                                  self._num_total_blocks,
442                                  self._num_total_chunks))
443
444  def append_dont_care(self, num_bytes):
445    """Appends a DONT_CARE chunk to the sparse file.
446
447    The given number of bytes must be a multiple of the block size.
448
449    Arguments:
450      num_bytes: Size in number of bytes of the DONT_CARE chunk.
451    """
452    assert num_bytes % self.block_size == 0
453
454    if not self.is_sparse:
455      self._image.seek(0, os.SEEK_END)
456      # This is more efficient that writing NUL bytes since it'll add
457      # a hole on file systems that support sparse files (native
458      # sparse, not Android sparse).
459      self._image.truncate(self._image.tell() + num_bytes)
460      self._read_header()
461      return
462
463    self._num_total_chunks += 1
464    self._num_total_blocks += num_bytes // self.block_size
465    self._update_chunks_and_blocks()
466
467    self._image.seek(self._sparse_end, os.SEEK_SET)
468    self._image.write(struct.pack(ImageChunk.FORMAT,
469                                  ImageChunk.TYPE_DONT_CARE,
470                                  0,  # Reserved
471                                  num_bytes // self.block_size,
472                                  struct.calcsize(ImageChunk.FORMAT)))
473    self._read_header()
474
475  def append_raw(self, data):
476    """Appends a RAW chunk to the sparse file.
477
478    The length of the given data must be a multiple of the block size.
479
480    Arguments:
481      data: Data to append.
482    """
483    assert len(data) % self.block_size == 0
484
485    if not self.is_sparse:
486      self._image.seek(0, os.SEEK_END)
487      self._image.write(data)
488      self._read_header()
489      return
490
491    self._num_total_chunks += 1
492    self._num_total_blocks += len(data) // self.block_size
493    self._update_chunks_and_blocks()
494
495    self._image.seek(self._sparse_end, os.SEEK_SET)
496    self._image.write(struct.pack(ImageChunk.FORMAT,
497                                  ImageChunk.TYPE_RAW,
498                                  0,  # Reserved
499                                  len(data) // self.block_size,
500                                  len(data) +
501                                  struct.calcsize(ImageChunk.FORMAT)))
502    self._image.write(data)
503    self._read_header()
504
505  def append_fill(self, fill_data, size):
506    """Appends a fill chunk to the sparse file.
507
508    The total length of the fill data must be a multiple of the block size.
509
510    Arguments:
511      fill_data: Fill data to append - must be four bytes.
512      size: Number of chunk - must be a multiple of four and the block size.
513    """
514    assert len(fill_data) == 4
515    assert size % 4 == 0
516    assert size % self.block_size == 0
517
518    if not self.is_sparse:
519      self._image.seek(0, os.SEEK_END)
520      self._image.write(fill_data * (size // 4))
521      self._read_header()
522      return
523
524    self._num_total_chunks += 1
525    self._num_total_blocks += size // self.block_size
526    self._update_chunks_and_blocks()
527
528    self._image.seek(self._sparse_end, os.SEEK_SET)
529    self._image.write(struct.pack(ImageChunk.FORMAT,
530                                  ImageChunk.TYPE_FILL,
531                                  0,  # Reserved
532                                  size // self.block_size,
533                                  4 + struct.calcsize(ImageChunk.FORMAT)))
534    self._image.write(fill_data)
535    self._read_header()
536
537  def seek(self, offset):
538    """Sets the cursor position for reading from unsparsified file.
539
540    Arguments:
541      offset: Offset to seek to from the beginning of the file.
542    """
543    self._file_pos = offset
544
545  def read(self, size):
546    """Reads data from the unsparsified file.
547
548    This method may return fewer than |size| bytes of data if the end
549    of the file was encountered.
550
551    The file cursor for reading is advanced by the number of bytes
552    read.
553
554    Arguments:
555      size: Number of bytes to read.
556
557    Returns:
558      The data.
559
560    """
561    if not self.is_sparse:
562      self._image.seek(self._file_pos)
563      data = self._image.read(size)
564      self._file_pos += len(data)
565      return data
566
567    # Iterate over all chunks.
568    chunk_idx = bisect.bisect_right(self._chunk_output_offsets,
569                                    self._file_pos) - 1
570    data = bytearray()
571    to_go = size
572    while to_go > 0:
573      chunk = self._chunks[chunk_idx]
574      chunk_pos_offset = self._file_pos - chunk.output_offset
575      chunk_pos_to_go = min(chunk.output_size - chunk_pos_offset, to_go)
576
577      if chunk.chunk_type == ImageChunk.TYPE_RAW:
578        self._image.seek(chunk.input_offset + chunk_pos_offset)
579        data.extend(self._image.read(chunk_pos_to_go))
580      elif chunk.chunk_type == ImageChunk.TYPE_FILL:
581        all_data = chunk.fill_data*(chunk_pos_to_go // len(chunk.fill_data) + 2)
582        offset_mod = chunk_pos_offset % len(chunk.fill_data)
583        data.extend(all_data[offset_mod:(offset_mod + chunk_pos_to_go)])
584      else:
585        assert chunk.chunk_type == ImageChunk.TYPE_DONT_CARE
586        data.extend(b'\0' * chunk_pos_to_go)
587
588      to_go -= chunk_pos_to_go
589      self._file_pos += chunk_pos_to_go
590      chunk_idx += 1
591      # Generate partial read in case of EOF.
592      if chunk_idx >= len(self._chunks):
593        break
594
595    return data
596
597  def tell(self):
598    """Returns the file cursor position for reading from unsparsified file.
599
600    Returns:
601      The file cursor position for reading.
602    """
603    return self._file_pos
604
605  def truncate(self, size):
606    """Truncates the unsparsified file.
607
608    Arguments:
609      size: Desired size of unsparsified file.
610
611    Raises:
612      ValueError: If desired size isn't a multiple of the block size.
613    """
614    if not self.is_sparse:
615      self._image.truncate(size)
616      self._read_header()
617      return
618
619    if size % self.block_size != 0:
620      raise ValueError('Cannot truncate to a size which is not a multiple '
621                       'of the block size')
622
623    if size == self.image_size:
624      # Trivial where there's nothing to do.
625      return
626    elif size < self.image_size:
627      chunk_idx = bisect.bisect_right(self._chunk_output_offsets, size) - 1
628      chunk = self._chunks[chunk_idx]
629      if chunk.output_offset != size:
630        # Truncation in the middle of a trunk - need to keep the chunk
631        # and modify it.
632        chunk_idx_for_update = chunk_idx + 1
633        num_to_keep = size - chunk.output_offset
634        assert num_to_keep % self.block_size == 0
635        if chunk.chunk_type == ImageChunk.TYPE_RAW:
636          truncate_at = (chunk.chunk_offset +
637                         struct.calcsize(ImageChunk.FORMAT) + num_to_keep)
638          data_sz = num_to_keep
639        elif chunk.chunk_type == ImageChunk.TYPE_FILL:
640          truncate_at = (chunk.chunk_offset +
641                         struct.calcsize(ImageChunk.FORMAT) + 4)
642          data_sz = 4
643        else:
644          assert chunk.chunk_type == ImageChunk.TYPE_DONT_CARE
645          truncate_at = chunk.chunk_offset + struct.calcsize(ImageChunk.FORMAT)
646          data_sz = 0
647        chunk_sz = num_to_keep // self.block_size
648        total_sz = data_sz + struct.calcsize(ImageChunk.FORMAT)
649        self._image.seek(chunk.chunk_offset)
650        self._image.write(struct.pack(ImageChunk.FORMAT,
651                                      chunk.chunk_type,
652                                      0,  # Reserved
653                                      chunk_sz,
654                                      total_sz))
655        chunk.output_size = num_to_keep
656      else:
657        # Truncation at trunk boundary.
658        truncate_at = chunk.chunk_offset
659        chunk_idx_for_update = chunk_idx
660
661      self._num_total_chunks = chunk_idx_for_update
662      self._num_total_blocks = 0
663      for i in range(0, chunk_idx_for_update):
664        self._num_total_blocks += self._chunks[i].output_size // self.block_size
665      self._update_chunks_and_blocks()
666      self._image.truncate(truncate_at)
667
668      # We've modified the file so re-read all data.
669      self._read_header()
670    else:
671      # Truncating to grow - just add a DONT_CARE section.
672      self.append_dont_care(size - self.image_size)
673
674
675class GuidGenerator(object):
676  """An interface for obtaining strings that are GUIDs.
677
678  To facilitate unit testing, this abstraction is used instead of the
679  directly using the uuid module.
680  """
681
682  def dispense_guid(self, partition_number):
683    """Dispenses a GUID.
684
685    Arguments:
686      partition_number: The partition number or 0 if requesting a GUID
687                        for the whole disk.
688
689    Returns:
690      A RFC 4122 compliant GUID, as a string.
691    """
692    return str(uuid.uuid4())
693
694
695class Partition(object):
696  """Object representing a partition.
697
698  Attributes:
699    label: The partition label.
700    offset: Offset of the partition on the disk, or None.
701    size: Size of the partition or None if not specified.
702    grow: True if partition has been requested to use all remaining space.
703    guid: Instance GUID (RFC 4122 compliant) as a string or None or 'auto'
704          if it should be automatically generated.
705    type_guid: Type GUID (RFC 4122 compliant) as a string or a known type
706               from the |KNOWN_TYPE_GUIDS| map.
707    flags: GUID flags.
708    persist: If true, sets bit 0 of flags indicating that this partition should
709             not be deleted by the bootloader.
710    ab: If True, the partition is an A/B partition.
711    ab_expanded: If True, the A/B partitions have been generated.
712    ignore: If True, the partition should not be included in the final output.
713    position: The requested position of the partition or 0 if it doesn't matter.
714  """
715
716  def __init__(self):
717    """Initializer method."""
718    self.label = ''
719    self.offset = None
720    self.size = None
721    self.grow = False
722    self.guid = None
723    self.type_guid = None
724    self.flags = 0
725    self.persist = False
726    self.ab = False
727    self.ab_expanded = False
728    self.ignore = False
729    self.position = 0
730
731  def add_info(self, pobj):
732    """Add information to partition.
733
734    Arguments:
735      pobj: A JSON object with information about the partition.
736    """
737    self.label = pobj[JSON_KEYWORD_PARTITIONS_LABEL]
738    value = pobj.get(JSON_KEYWORD_PARTITIONS_OFFSET)
739    if value is not None:
740      self.offset = ParseSize(value)
741    value = pobj.get(JSON_KEYWORD_PARTITIONS_SIZE)
742    if value is not None:
743      self.size = ParseSize(value)
744    value = pobj.get(JSON_KEYWORD_PARTITIONS_GROW)
745    if value is not None:
746      self.grow = value
747    value = pobj.get(JSON_KEYWORD_PARTITIONS_AB)
748    if value is not None:
749      self.ab = value
750    value = pobj.get(JSON_KEYWORD_PARTITIONS_AB_EXPANDED)
751    if value is not None:
752      self.ab_expanded = value
753    value = pobj.get(JSON_KEYWORD_PARTITIONS_GUID)
754    if value is not None:
755      self.guid = value
756    value = pobj.get(JSON_KEYWORD_PARTITIONS_IGNORE)
757    if value is not None:
758      self.ignore = value
759    value = pobj.get(JSON_KEYWORD_PARTITIONS_TYPE_GUID)
760    if value is not None:
761      self.type_guid = str.lower(str(value))
762      if self.type_guid in KNOWN_TYPE_GUIDS:
763        self.type_guid = KNOWN_TYPE_GUIDS[self.type_guid]
764    value = pobj.get(JSON_KEYWORD_PARTITIONS_FLAGS)
765    if value is not None:
766      self.flags = ParseNumber(value)
767    value = pobj.get(JSON_KEYWORD_PARTITIONS_PERSIST)
768    if value is not None:
769      self.persist = value
770      if value:
771        self.flags = self.flags | 0x1
772    value = pobj.get(JSON_KEYWORD_PARTITIONS_POSITION)
773    if value is not None:
774      self.position = ParseNumber(value)
775
776  def expand_guid(self, guid_generator, partition_number):
777    """Assign instance GUID and type GUID if required.
778
779    Arguments:
780      guid_generator: A GuidGenerator object.
781      partition_number: The partition number, starting from 1.
782    """
783    if not self.guid or self.guid == JSON_KEYWORD_AUTO:
784      self.guid = guid_generator.dispense_guid(partition_number)
785    if not self.type_guid:
786      self.type_guid = KNOWN_TYPE_GUIDS['brillo_vendor_specific']
787
788  def validate(self):
789    """Sanity checks data in object."""
790
791    try:
792      _ = uuid.UUID(str(self.guid))
793    except ValueError:
794      raise ValueError('The string "{}" is not a valid GPT instance GUID on '
795                       'partition with label "{}".'.format(
796                           str(self.guid), self.label))
797
798    try:
799      _ = uuid.UUID(str(self.type_guid))
800    except ValueError:
801      raise ValueError('The string "{}" is not a valid GPT type GUID on '
802                       'partition with label "{}".'.format(
803                           str(self.type_guid), self.label))
804
805    if not self.size:
806      if not self.grow:
807        raise ValueError('Size can only be unset if "grow" is True.')
808
809  def cmp(self, other):
810    """Comparison method."""
811    self_position = self.position
812    if self_position == 0:
813      self_position = GPT_MAX_PART_NUM
814    other_position = other.position
815    if other_position == 0:
816      other_position = GPT_MAX_PART_NUM
817    if self_position < other_position:
818      return -1
819    elif self_position > other_position:
820      return 1
821    else:
822      return 0
823
824
825class Settings(object):
826  """An object for holding settings.
827
828  Attributes:
829    ab_suffixes: A list of A/B suffixes to use.
830    disk_size: An integer with the disk size in bytes.
831    partitions_offset_begin: An integer with the disk partitions
832                             offset begin size in bytes.
833    disk_alignment: The alignment to use for partitions.
834    disk_guid: The GUID to use for the disk or None or 'auto' if
835               automatically generated.
836  """
837
838  def __init__(self):
839    """Initializer with defaults."""
840    self.ab_suffixes = ['_a', '_b']
841    self.disk_size = None
842    self.partitions_offset_begin = 0
843    self.disk_alignment = 4096
844    self.disk_guid = JSON_KEYWORD_AUTO
845
846
847class BptError(Exception):
848  """Application-specific errors.
849
850  These errors represent issues for which a stack-trace should not be
851  presented.
852
853  Attributes:
854    message: Error message.
855  """
856
857  def __init__(self, message):
858    Exception.__init__(self, message)
859
860
861class BptParsingError(BptError):
862  """Represents an error with an input file.
863
864  Attributes:
865    message: Error message.
866    filename: Name of the file that caused an error.
867  """
868
869  def __init__(self, filename, message):
870    self.filename = filename
871    BptError.__init__(self, message)
872
873
874class Bpt(object):
875  """Business logic for bpttool command-line tool."""
876
877  def _read_json(self, input_files, ab_collapse=True):
878    """Parses a stack of JSON files into suitable data structures.
879
880    The order of files matters as later files can modify partitions
881    declared in earlier files.
882
883    Arguments:
884      input_files: An ordered list of open files.
885      ab_collapse: If True, collapse A/B partitions.
886
887    Returns:
888      A tuple where the first element is a list of Partition objects
889      and the second element is a Settings object.
890
891    Raises:
892      BptParsingError: If an input file has an error.
893    """
894    partitions = []
895    settings = Settings()
896
897    # Read all input file and merge partitions and settings.
898    for f in input_files:
899      try:
900        obj = json.loads(f.read())
901      except ValueError as e:
902        # Unfortunately we can't easily get the line number where the
903        # error occurred.
904        raise BptParsingError(f.name, e.message)
905
906      sobj = obj.get(JSON_KEYWORD_SETTINGS)
907      if sobj:
908        ab_suffixes = sobj.get(JSON_KEYWORD_SETTINGS_AB_SUFFIXES)
909        if ab_suffixes:
910          settings.ab_suffixes = ab_suffixes
911        disk_size = sobj.get(JSON_KEYWORD_SETTINGS_DISK_SIZE)
912        if disk_size:
913          settings.disk_size = ParseSize(disk_size)
914        partitions_offset_begin = sobj.get(
915                JSON_KEYWORD_SETTINGS_PARTITIONS_OFFSET_BEGIN)
916        if partitions_offset_begin:
917          settings.partitions_offset_begin = ParseSize(partitions_offset_begin)
918        disk_alignment = sobj.get(JSON_KEYWORD_SETTINGS_DISK_ALIGNMENT)
919        if disk_alignment:
920          settings.disk_alignment = ParseSize(disk_alignment)
921        disk_guid = sobj.get(JSON_KEYWORD_SETTINGS_DISK_GUID)
922        if disk_guid:
923          settings.disk_guid = disk_guid
924
925      pobjs = obj.get(JSON_KEYWORD_PARTITIONS)
926      if pobjs:
927        for pobj in pobjs:
928          if ab_collapse and pobj.get(JSON_KEYWORD_PARTITIONS_AB_EXPANDED):
929            # If we encounter an expanded partition, unexpand it. This
930            # is to make it possible to use output-JSON (from this tool)
931            # and stack it with an input-JSON file that e.g. specifies
932            # size='256 GiB' for the 'system' partition.
933            label = pobj[JSON_KEYWORD_PARTITIONS_LABEL]
934            if label.endswith(settings.ab_suffixes[0]):
935              # Modify first A/B copy so it doesn't have the trailing suffix.
936              new_len = len(label) - len(settings.ab_suffixes[0])
937              pobj[JSON_KEYWORD_PARTITIONS_LABEL] = label[0:new_len]
938              pobj[JSON_KEYWORD_PARTITIONS_AB_EXPANDED] = False
939              pobj[JSON_KEYWORD_PARTITIONS_GUID] = JSON_KEYWORD_AUTO
940            else:
941              # Skip other A/B copies.
942              continue
943          # Find or create a partition.
944          p = None
945          for candidate in partitions:
946            if candidate.label == pobj[JSON_KEYWORD_PARTITIONS_LABEL]:
947              p = candidate
948              break
949          if not p:
950            p = Partition()
951            partitions.append(p)
952          p.add_info(pobj)
953
954    return partitions, settings
955
956  def _generate_json(self, partitions, settings):
957    """Generate a string with JSON representing partitions and settings.
958
959    Arguments:
960      partitions: A list of Partition objects.
961      settings: A Settings object.
962
963    Returns:
964      A JSON string.
965    """
966    suffixes_str = '['
967    for n in range(0, len(settings.ab_suffixes)):
968      if n != 0:
969        suffixes_str += ', '
970      suffixes_str += '"{}"'.format(settings.ab_suffixes[n])
971    suffixes_str += ']'
972
973    ret = ('{{\n'
974           '  "' + JSON_KEYWORD_SETTINGS + '": {{\n'
975           '    "' + JSON_KEYWORD_SETTINGS_AB_SUFFIXES + '": {},\n'
976           '    "' + JSON_KEYWORD_SETTINGS_PARTITIONS_OFFSET_BEGIN + '": {},\n'
977           '    "' + JSON_KEYWORD_SETTINGS_DISK_SIZE + '": {},\n'
978           '    "' + JSON_KEYWORD_SETTINGS_DISK_ALIGNMENT + '": {},\n'
979           '    "' + JSON_KEYWORD_SETTINGS_DISK_GUID + '": "{}"\n'
980           '  }},\n'
981           '  "' + JSON_KEYWORD_PARTITIONS + '": [\n').format(
982               suffixes_str,
983               settings.partitions_offset_begin,
984               settings.disk_size,
985               settings.disk_alignment,
986               settings.disk_guid)
987
988    for n in range(0, len(partitions)):
989      p = partitions[n]
990      ret += ('    {{\n'
991              '      "' + JSON_KEYWORD_PARTITIONS_LABEL + '": "{}",\n'
992              '      "' + JSON_KEYWORD_PARTITIONS_OFFSET + '": {},\n'
993              '      "' + JSON_KEYWORD_PARTITIONS_SIZE + '": {},\n'
994              '      "' + JSON_KEYWORD_PARTITIONS_GROW + '": {},\n'
995              '      "' + JSON_KEYWORD_PARTITIONS_GUID + '": "{}",\n'
996              '      "' + JSON_KEYWORD_PARTITIONS_TYPE_GUID + '": "{}",\n'
997              '      "' + JSON_KEYWORD_PARTITIONS_FLAGS + '": "{:#018x}",\n'
998              '      "' + JSON_KEYWORD_PARTITIONS_PERSIST + '": {},\n'
999              '      "' + JSON_KEYWORD_PARTITIONS_IGNORE + '": {},\n'
1000              '      "' + JSON_KEYWORD_PARTITIONS_AB + '": {},\n'
1001              '      "' + JSON_KEYWORD_PARTITIONS_AB_EXPANDED + '": {},\n'
1002              '      "' + JSON_KEYWORD_PARTITIONS_POSITION + '": {}\n'
1003              '    }}{}\n').format(p.label,
1004                                   p.offset,
1005                                   p.size,
1006                                   'true' if p.grow else 'false',
1007                                   p.guid,
1008                                   p.type_guid,
1009                                   p.flags,
1010                                   'true' if p.persist else 'false',
1011                                   'true' if p.ignore else 'false',
1012                                   'true' if p.ab else 'false',
1013                                   'true' if p.ab_expanded else 'false',
1014                                   p.position,
1015                                   '' if n == len(partitions) - 1 else ',')
1016    ret += ('  ]\n'
1017            '}\n')
1018    return ret
1019
1020  def _lba_to_chs(self, lba):
1021    """Converts LBA to CHS.
1022
1023    Arguments:
1024      lba: The sector number to convert.
1025
1026    Returns:
1027      An array containing the CHS encoded the way it's expected in a
1028      MBR partition table.
1029    """
1030    # See https://en.wikipedia.org/wiki/Cylinder-head-sector
1031    num_heads = 255
1032    num_sectors = 63
1033    # If LBA isn't going to fit in CHS, return maximum CHS values.
1034    max_lba = 255*num_heads*num_sectors
1035    if lba > max_lba:
1036      return [255, 255, 255]
1037    c = lba // (num_heads*num_sectors)
1038    h = (lba // num_sectors) % num_heads
1039    s = lba % num_sectors
1040    return [h, (((c>>8) & 0x03)<<6) | (s & 0x3f), c & 0xff]
1041
1042  def _generate_protective_mbr(self, settings):
1043    """Generate Protective MBR.
1044
1045    Arguments:
1046      settings: A Settings object.
1047
1048    Returns:
1049      A string with the binary protective MBR (512 bytes).
1050    """
1051    # See https://en.wikipedia.org/wiki/Master_boot_record for MBR layout.
1052    #
1053    # The first partition starts at offset 446 (0x1be).
1054    lba_start = 1
1055    lba_end = settings.disk_size // DISK_SECTOR_SIZE - 1
1056    start_chs = self._lba_to_chs(lba_start)
1057    end_chs = self._lba_to_chs(lba_end)
1058    pmbr = struct.pack('<446s'     # Bootloader code
1059                       'B'         # Status.
1060                       'BBB'       # CHS start.
1061                       'B'         # Partition type.
1062                       'BBB'       # CHS end.
1063                       'I'         # LBA of partition start.
1064                       'I'         # Number of sectors in partition.
1065                       '48x'       # Padding to get to offset 510 (0x1fe).
1066                       'BB',       # Boot signature.
1067                       b'\xfa\xeb\xfe', # cli ; jmp $ (x86)
1068                       0x00,
1069                       start_chs[0], start_chs[1], start_chs[2],
1070                       0xee,       # MBR Partition Type: GPT protective MBR.
1071                       end_chs[0], end_chs[1], end_chs[2],
1072                       1,          # LBA start
1073                       lba_end,
1074                       0x55, 0xaa)
1075    return pmbr
1076
1077  def _generate_gpt(self, partitions, settings, primary=True):
1078    """Generate GUID Partition Table.
1079
1080    Arguments:
1081      partitions: A list of Partition objects.
1082      settings: A Settings object.
1083      primary: True to generate primary GPT, False to generate secondary.
1084
1085    Returns:
1086      A string with the binary GUID Partition Table (33*512 bytes).
1087    """
1088    # See https://en.wikipedia.org/wiki/Master_boot_record for MBR layout.
1089    #
1090    # The first partition starts at offset 446 (0x1be).
1091
1092    disk_num_lbas = settings.disk_size // DISK_SECTOR_SIZE
1093    if primary:
1094      current_lba = 1
1095      other_lba = disk_num_lbas - 1
1096      partitions_lba = 2
1097    else:
1098      current_lba = disk_num_lbas - 1
1099      other_lba = 1
1100      partitions_lba = disk_num_lbas - GPT_NUM_LBAS
1101    first_usable_lba = GPT_NUM_LBAS + 1
1102    last_usable_lba = disk_num_lbas - GPT_NUM_LBAS - 1
1103
1104    part_array = []
1105    for p in partitions:
1106      part_array.append(struct.pack(
1107          '<16s'    # Partition type GUID.
1108          '16s'     # Partition instance GUID.
1109          'QQ'      # First and last LBA.
1110          'Q'       # Flags.
1111          '72s',    # Name (36 UTF-16LE code units).
1112          uuid.UUID(p.type_guid).bytes_le,
1113          uuid.UUID(p.guid).bytes_le,
1114          p.offset // DISK_SECTOR_SIZE,
1115          (p.offset + p.size) // DISK_SECTOR_SIZE - 1,
1116          p.flags,
1117          p.label.encode(encoding='utf-16le')))
1118
1119    part_array.append(((128 - len(partitions))*128) * b'\0')
1120    part_array_bytes = functools.reduce(lambda x, y: x + y, part_array)
1121
1122    partitions_crc32 = zlib.crc32(part_array_bytes) % (1<<32)
1123
1124    header_crc32 = 0
1125    while True:
1126      header = struct.pack(
1127          '<8s'    # Signature.
1128          '4B'     # Version.
1129          'I'      # Header size.
1130          'I'      # CRC32 (must be zero during calculation).
1131          'I'      # Reserved (must be zero).
1132          'QQ'     # Current and Other LBA.
1133          'QQ'     # First and last usable LBA.
1134          '16s'    # Disk GUID.
1135          'Q'      # Starting LBA of array of partitions.
1136          'I'      # Number of partitions.
1137          'I'      # Partition entry size, in bytes.
1138          'I'      # CRC32 of partition array
1139          '420x',  # Padding to get to 512 bytes.
1140          b'EFI PART',
1141          0x00, 0x00, 0x01, 0x00,
1142          92,
1143          header_crc32,
1144          0x00000000,
1145          current_lba, other_lba,
1146          first_usable_lba, last_usable_lba,
1147          uuid.UUID(settings.disk_guid).bytes_le,
1148          partitions_lba,
1149          128,
1150          128,
1151          partitions_crc32)
1152      if header_crc32 != 0:
1153        break
1154      header_crc32 = zlib.crc32(header[0:92]) % (1<<32)
1155
1156    if primary:
1157      return header + part_array_bytes
1158    else:
1159      return part_array_bytes + header
1160
1161  def _generate_gpt_bin(self, partitions, settings):
1162    """Generate a bytearray representing partitions and settings.
1163
1164    The blob will have three partition tables, laid out one after
1165    another: 1) Protective MBR (512 bytes); 2) Primary GPT (33*512
1166    bytes); and 3) Secondary GPT (33*512 bytes).
1167
1168    The total size will be 34,304 bytes.
1169
1170    Arguments:
1171      partitions: A list of Partition objects.
1172      settings: A Settings object.
1173
1174    Returns:
1175      A bytearray() object.
1176    """
1177    protective_mbr = self._generate_protective_mbr(settings)
1178    primary_gpt = self._generate_gpt(partitions, settings)
1179    secondary_gpt = self._generate_gpt(partitions, settings, primary=False)
1180    ret = protective_mbr + primary_gpt + secondary_gpt
1181    return ret
1182
1183  def _validate_disk_partitions(self, partitions, disk_size):
1184    """Check that a list of partitions have assigned offsets and fits on a
1185       disk of a given size.
1186
1187    This function checks partition offsets and sizes to see if they may fit on
1188    a disk image.
1189
1190    Arguments:
1191      partitions: A list of Partition objects.
1192      settings: Integer size of disk image.
1193
1194    Raises:
1195      BptError: If checked condition is not satisfied.
1196    """
1197    for p in partitions:
1198      if not p.offset or p.offset < (GPT_NUM_LBAS + 1)*DISK_SECTOR_SIZE:
1199        raise BptError('Partition with label "{}" has no offset.'
1200                       .format(p.label))
1201      if not p.size or p.size < 0:
1202        raise BptError('Partition with label "{}" has no size.'
1203                        .format(p.label))
1204      if (p.offset + p.size) > (disk_size - GPT_NUM_LBAS*DISK_SECTOR_SIZE):
1205        raise BptError('Partition with label "{}" exceeds the disk '
1206                       'image size.'.format(p.label))
1207
1208  def make_table(self,
1209                 inputs,
1210                 ab_suffixes=None,
1211                 partitions_offset_begin=None,
1212                 disk_size=None,
1213                 disk_alignment=None,
1214                 disk_guid=None,
1215                 guid_generator=None):
1216    """Implementation of the 'make_table' command.
1217
1218    This function takes a list of input partition definition files,
1219    flattens them, expands A/B partitions, grows partitions, and lays
1220    out partitions according to alignment constraints.
1221
1222    Arguments:
1223      inputs: List of JSON files to parse.
1224      ab_suffixes: List of the A/B suffixes (as a comma-separated string)
1225                   to use or None to not override.
1226      partitions_offset_begin: Size of disk partitions offset
1227                               begin or None to not override.
1228      disk_size: Size of disk or None to not override.
1229      disk_alignment: Disk alignment or None to not override.
1230      disk_guid: Disk GUID as a string or None to not override.
1231      guid_generator: A GuidGenerator or None to use the default.
1232
1233    Returns:
1234      A tuple where the first argument is a JSON string for the resulting
1235      partitions and the second argument is the binary partition tables.
1236
1237    Raises:
1238      BptParsingError: If an input file has an error.
1239      BptError: If another application-specific error occurs
1240    """
1241    partitions, settings = self._read_json(inputs)
1242
1243    # Command-line arguments override anything specified in input
1244    # files.
1245    if disk_size:
1246      settings.disk_size = int(math.ceil(disk_size))
1247    if disk_alignment:
1248      settings.disk_alignment = int(disk_alignment)
1249    if partitions_offset_begin:
1250      settings.partitions_offset_begin = int(partitions_offset_begin)
1251    if ab_suffixes:
1252      settings.ab_suffixes = ab_suffixes.split(',')
1253    if disk_guid:
1254      settings.disk_guid = disk_guid
1255
1256    if not guid_generator:
1257      guid_generator = GuidGenerator()
1258
1259    # We need to know the disk size. Also round it down to ensure it's
1260    # a multiple of the sector size.
1261    if not settings.disk_size:
1262      raise BptError('Disk size not specified. Use --disk_size option '
1263                     'or specify it in an input file.\n')
1264    settings.disk_size = RoundToMultiple(settings.disk_size,
1265                                         DISK_SECTOR_SIZE,
1266                                         round_down=True)
1267
1268    # Alignment must be divisible by disk sector size.
1269    if settings.disk_alignment % DISK_SECTOR_SIZE != 0:
1270      raise BptError(
1271          'Disk alignment size of {} is not divisible by {}.\n'.format(
1272              settings.disk_alignment, DISK_SECTOR_SIZE))
1273
1274    if settings.partitions_offset_begin != 0:
1275      # Disk partitions offset begin size must be
1276      # divisible by disk sector size.
1277      if settings.partitions_offset_begin % settings.disk_alignment != 0:
1278        raise BptError(
1279            'Disk Partitions offset begin size of {} '
1280            'is not divisible by {}.\n'.format(
1281                settings.partitions_offset_begin, settings.disk_alignment))
1282      settings.partitions_offset_begin = max(settings.partitions_offset_begin,
1283                                           DISK_SECTOR_SIZE*(1 + GPT_NUM_LBAS))
1284      settings.partitions_offset_begin = RoundToMultiple(
1285          settings.partitions_offset_begin, settings.disk_alignment)
1286
1287    # Expand A/B partitions and skip ignored partitions.
1288    expanded_partitions = []
1289    for p in partitions:
1290      if p.ignore:
1291        continue
1292      if p.ab and not p.ab_expanded:
1293        p.ab_expanded = True
1294        for suffix in settings.ab_suffixes:
1295          new_p = copy.deepcopy(p)
1296          new_p.label += suffix
1297          expanded_partitions.append(new_p)
1298      else:
1299        expanded_partitions.append(p)
1300    partitions = expanded_partitions
1301
1302    # Expand Disk GUID if needed.
1303    if not settings.disk_guid or settings.disk_guid == JSON_KEYWORD_AUTO:
1304      settings.disk_guid = guid_generator.dispense_guid(0)
1305
1306    # Sort according to 'position' attribute.
1307    partitions = sorted(partitions, key=functools.cmp_to_key(lambda x, y: x.cmp(y)))
1308
1309    # Automatically generate GUIDs if the GUID is unset or set to
1310    # 'auto'. Also validate the rest of the fields.
1311    part_no = 1
1312    for p in partitions:
1313      p.expand_guid(guid_generator, part_no)
1314      p.validate()
1315      part_no += 1
1316
1317    # Idenfify partition to grow and lay out partitions, ignoring the
1318    # one to grow. This way we can figure out how much space is left.
1319    #
1320    # Right now we only support a single 'grow' partition but we could
1321    # support more in the future by splitting up the available bytes
1322    # between them.
1323    grow_part = None
1324    # offset minimal size: DISK_SECTOR_SIZE*(1 + GPT_NUM_LBAS)
1325    offset = max(settings.partitions_offset_begin,
1326                 DISK_SECTOR_SIZE*(1 + GPT_NUM_LBAS))
1327    for p in partitions:
1328      if p.grow:
1329        if grow_part:
1330          raise BptError('Only a single partition can be automatically '
1331                         'grown.\n')
1332        grow_part = p
1333      else:
1334        # Ensure size is a multiple of DISK_SECTOR_SIZE by rounding up
1335        # (user may specify it as e.g. "1.5 GB" which is not divisible
1336        # by 512).
1337        p.size = RoundToMultiple(p.size, DISK_SECTOR_SIZE)
1338        # Align offset to disk alignment.
1339        offset = RoundToMultiple(offset, settings.disk_alignment)
1340        offset += p.size
1341
1342    # After laying out (respecting alignment) all non-grow
1343    # partitions, check that the given disk size is big enough.
1344    if offset > settings.disk_size - DISK_SECTOR_SIZE*GPT_NUM_LBAS:
1345      raise BptError('Disk size of {} bytes is too small for partitions '
1346                     'totaling {} bytes.\n'.format(
1347                         settings.disk_size, offset))
1348
1349    # If we have a grow partition, it'll starts at the next
1350    # available alignment offset and we can calculate its size as
1351    # follows.
1352    if grow_part:
1353      offset = RoundToMultiple(offset, settings.disk_alignment)
1354      grow_part.size = RoundToMultiple(
1355          settings.disk_size - DISK_SECTOR_SIZE*GPT_NUM_LBAS - offset,
1356          settings.disk_alignment,
1357          round_down=True)
1358      if grow_part.size < DISK_SECTOR_SIZE:
1359        raise BptError('Not enough space for partition "{}" to be '
1360                       'automatically grown.\n'.format(grow_part.label))
1361
1362    # Now we can assign partition start offsets for all partitions,
1363    # including the grow partition.
1364    # offset minimal size: DISK_SECTOR_SIZE*(1 + GPT_NUM_LBAS)
1365    offset = max(settings.partitions_offset_begin,
1366                 DISK_SECTOR_SIZE*(1 + GPT_NUM_LBAS))
1367    for p in partitions:
1368      # Align offset.
1369      offset = RoundToMultiple(offset, settings.disk_alignment)
1370      p.offset = offset
1371      offset += p.size
1372    assert offset <= settings.disk_size - DISK_SECTOR_SIZE*GPT_NUM_LBAS
1373
1374    json_str = self._generate_json(partitions, settings)
1375
1376    gpt_bin = self._generate_gpt_bin(partitions, settings)
1377
1378    return json_str, gpt_bin
1379
1380  def make_disk_image(self, output, bpt, images, allow_empty_partitions=False):
1381    """Implementation of the 'make_disk_image' command.
1382
1383    This function takes in a list of partitions images and a bpt file
1384    for the purpose of creating a raw disk image with a protective MBR,
1385    primary and secondary GPT, and content for each partition as specified.
1386
1387    Arguments:
1388      output: Output file where disk image is to be written to.
1389      bpt: BPT JSON file to parse.
1390      images: List of partition image paths to be combined (as specified by
1391              bpt).  Each element is of the form.
1392              'PARTITION_NAME:/PATH/TO/PARTITION_IMAGE'
1393      allow_empty_partitions: If True, partitions defined in |bpt| need not to
1394                              be present in |images|. Otherwise an exception is
1395                              thrown if a partition is referenced in |bpt| but
1396                              not in |images|.
1397
1398    Raises:
1399      BptParsingError: If an image file has an error.
1400      BptError: If another application-specific error occurs.
1401    """
1402    # Generate partition list and settings.
1403    partitions, settings = self._read_json([bpt], ab_collapse=False)
1404
1405    # Validated partition sizes and offsets.
1406    self._validate_disk_partitions(partitions, settings.disk_size)
1407
1408    # Sort according to 'offset' attribute.
1409    partitions = sorted(partitions, key=functools.cmp_to_key(lambda x, y: x.cmp(y)))
1410
1411    # Create necessary tables.
1412    protective_mbr = self._generate_protective_mbr(settings)
1413    primary_gpt = self._generate_gpt(partitions, settings)
1414    secondary_gpt = self._generate_gpt(partitions, settings, primary=False)
1415
1416    # Start at 0 offset for mbr and primary gpt.
1417    output.seek(0)
1418    output.write(protective_mbr)
1419    output.write(primary_gpt)
1420
1421    # Create mapping of partition name to partition image file.
1422    image_file_names = {}
1423    try:
1424      for name_path in images:
1425        name, path = name_path.split(":")
1426        image_file_names[name] = path
1427    except ValueError as e:
1428      raise BptParsingError(name_path, 'Bad image argument {}.'.format(
1429                            images[i]))
1430
1431    # Read image and insert in correct offset.
1432    for p in partitions:
1433      if p.label not in image_file_names:
1434        if allow_empty_partitions:
1435          continue
1436        else:
1437          raise BptParsingError(bpt.name, 'No content specified for partition'
1438                                ' with label {}'.format(p.label))
1439
1440      input_image = ImageHandler(image_file_names[p.label])
1441      output.seek(p.offset)
1442      partition_blob = input_image.read(p.size)
1443      output.write(partition_blob)
1444
1445    # Put secondary GPT and end of disk.
1446    output.seek(settings.disk_size - len(secondary_gpt))
1447    output.write(secondary_gpt)
1448
1449  def query_partition(self, input_file, part_label, query_type, ab_collapse):
1450    """Implementation of the 'query_partition' command.
1451
1452    This reads the partition definition file given by |input_file| and
1453    returns information of type |query_type| for the partition with
1454    label |part_label|.
1455
1456    Arguments:
1457      input_file: A JSON file to parse.
1458      part_label: Label of partition to query information about.
1459      query_type: The information to query, see |QUERY_PARTITION_TYPES|.
1460      ab_collapse: If True, collapse A/B partitions.
1461
1462    Returns:
1463      The requested information as a string or None if there is no
1464      partition with the given label.
1465
1466    Raises:
1467      BptParsingError: If an input file has an error.
1468      BptError: If another application-specific error occurs
1469    """
1470
1471    partitions, _ = self._read_json([input_file], ab_collapse)
1472
1473    part = None
1474    for p in partitions:
1475      if p.label == part_label:
1476        part = p
1477        break
1478
1479    if not part:
1480      return None
1481
1482    value = part.__dict__.get(query_type)
1483    # Print out flags as a hex-value.
1484    if query_type == 'flags':
1485      return '{:#018x}'.format(value)
1486    return str(value)
1487
1488
1489class BptTool(object):
1490  """Object for bpttool command-line tool."""
1491
1492  def __init__(self):
1493    """Initializer method."""
1494    self.bpt = Bpt()
1495
1496  def run(self, argv):
1497    """Command-line processor.
1498
1499    Arguments:
1500      argv: Pass sys.argv from main.
1501    """
1502    parser = argparse.ArgumentParser()
1503    subparsers = parser.add_subparsers(title='subcommands')
1504
1505    sub_parser = subparsers.add_parser(
1506        'version',
1507        help='Prints version of bpttool.')
1508    sub_parser.set_defaults(func=self.version)
1509
1510    sub_parser = subparsers.add_parser(
1511        'make_table',
1512        help='Lays out partitions and creates partition table.')
1513    sub_parser.add_argument('--input',
1514                            help='Path to partition definition file.',
1515                            type=argparse.FileType('r'),
1516                            action='append')
1517    sub_parser.add_argument('--ab_suffixes',
1518                            help='Set or override A/B suffixes.')
1519    sub_parser.add_argument('--partitions_offset_begin',
1520                            help='Set or override disk partitions '
1521                                 'offset begin size.',
1522                            type=ParseSize)
1523    sub_parser.add_argument('--disk_size',
1524                            help='Set or override disk size.',
1525                            type=ParseSize)
1526    sub_parser.add_argument('--disk_alignment',
1527                            help='Set or override disk alignment.',
1528                            type=ParseSize)
1529    sub_parser.add_argument('--disk_guid',
1530                            help='Set or override disk GUID.',
1531                            type=ParseGuid)
1532    sub_parser.add_argument('--output_json',
1533                            help='JSON output file name.',
1534                            type=argparse.FileType('w'))
1535    sub_parser.add_argument('--output_gpt',
1536                            help='Output file name for MBR/GPT/GPT file.',
1537                            type=argparse.FileType('wb'))
1538    sub_parser.set_defaults(func=self.make_table)
1539
1540    sub_parser = subparsers.add_parser(
1541        'make_disk_image',
1542        help='Creates disk image for loaded with partitions.')
1543    sub_parser.add_argument('--output',
1544                            help='Path to image output.',
1545                            type=argparse.FileType('wb'),
1546                            required=True)
1547    sub_parser.add_argument('--input',
1548                            help='Path to bpt file input.',
1549                            type=argparse.FileType('r'),
1550                            required=True)
1551    sub_parser.add_argument('--image',
1552                            help='Partition name and path to image file.',
1553                            metavar='PARTITION_NAME:PATH',
1554                            action='append')
1555    sub_parser.add_argument('--allow_empty_partitions',
1556                            help='Allow skipping partitions in bpt file.',
1557                            action='store_true')
1558    sub_parser.set_defaults(func=self.make_disk_image)
1559
1560    sub_parser = subparsers.add_parser(
1561        'query_partition',
1562        help='Looks up informtion about a partition.')
1563    sub_parser.add_argument('--input',
1564                            help='Path to partition definition file.',
1565                            type=argparse.FileType('r'),
1566                            required=True)
1567    sub_parser.add_argument('--label',
1568                            help='Label of partition to look up.',
1569                            required=True)
1570    sub_parser.add_argument('--ab_collapse',
1571                            help='Collapse A/B partitions.',
1572                            action='store_true')
1573    sub_parser.add_argument('--type',
1574                            help='Type of information to look up.',
1575                            choices=QUERY_PARTITION_TYPES,
1576                            required=True)
1577    sub_parser.set_defaults(func=self.query_partition)
1578
1579    args = parser.parse_args(argv[1:])
1580    args.func(args)
1581
1582  def version(self, _):
1583    """Implements the 'version' sub-command."""
1584    print('{}.{}'.format(BPT_VERSION_MAJOR, BPT_VERSION_MINOR))
1585
1586  def query_partition(self, args):
1587    """Implements the 'query_partition' sub-command."""
1588    try:
1589      result = self.bpt.query_partition(args.input,
1590                                        args.label,
1591                                        args.type,
1592                                        args.ab_collapse)
1593    except BptParsingError as e:
1594      sys.stderr.write('{}: Error parsing: {}\n'.format(e.filename, e.message))
1595      sys.exit(1)
1596    except BptError as e:
1597      sys.stderr.write('{}\n'.format(e.message))
1598      sys.exit(1)
1599
1600    if not result:
1601      sys.stderr.write('No partition with label "{}".\n'.format(args.label))
1602      sys.exit(1)
1603
1604    print(result)
1605
1606  def make_table(self, args):
1607    """Implements the 'make_table' sub-command."""
1608    if not args.input:
1609      sys.stderr.write('Option --input is required one or more times.\n')
1610      sys.exit(1)
1611
1612    try:
1613      (json_str, gpt_bin) = self.bpt.make_table(args.input, args.ab_suffixes,
1614                                                args.partitions_offset_begin,
1615                                                args.disk_size,
1616                                                args.disk_alignment,
1617                                                args.disk_guid)
1618    except BptParsingError as e:
1619      sys.stderr.write('{}: Error parsing: {}\n'.format(e.filename, e.message))
1620      sys.exit(1)
1621    except BptError as e:
1622      sys.stderr.write('{}\n'.format(e.message))
1623      sys.exit(1)
1624
1625    if args.output_json:
1626      args.output_json.write(json_str)
1627    if args.output_gpt:
1628      args.output_gpt.write(gpt_bin)
1629
1630  def make_disk_image(self, args):
1631    """Implements the 'make_disk_image' sub-command."""
1632    if not args.input:
1633      sys.stderr.write('Option --input is required.\n')
1634      sys.exit(1)
1635    if not args.output:
1636      sys.stderr.write('Option --ouptut is required.\n')
1637      sys.exit(1)
1638
1639    try:
1640      self.bpt.make_disk_image(args.output,
1641                               args.input,
1642                               args.image,
1643                               args.allow_empty_partitions)
1644    except BptParsingError as e:
1645      sys.stderr.write('{}: Error parsing: {}\n'.format(e.filename, e.message))
1646      sys.exit(1)
1647    except BptError as e:
1648      sys.stderr.write('{}\n'.format(e.message))
1649      sys.exit(1)
1650
1651if __name__ == '__main__':
1652  tool = BptTool()
1653  tool.run(sys.argv)
1654