1# Copyright (C) 2014 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import bisect
16import os
17import struct
18import threading
19from hashlib import sha1
20
21import rangelib
22
23
24class SparseImage(object):
25  """Wraps a sparse image file into an image object.
26
27  Wraps a sparse image file (and optional file map and clobbered_blocks) into
28  an image object suitable for passing to BlockImageDiff. file_map contains
29  the mapping between files and their blocks. clobbered_blocks contains the set
30  of blocks that should be always written to the target regardless of the old
31  contents (i.e. copying instead of patching). clobbered_blocks should be in
32  the form of a string like "0" or "0 1-5 8".
33  """
34
35  def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None,
36               mode="rb", build_map=True, allow_shared_blocks=False):
37    self.simg_f = f = open(simg_fn, mode)
38
39    header_bin = f.read(28)
40    header = struct.unpack("<I4H4I", header_bin)
41
42    magic = header[0]
43    major_version = header[1]
44    minor_version = header[2]
45    file_hdr_sz = header[3]
46    chunk_hdr_sz = header[4]
47    self.blocksize = blk_sz = header[5]
48    self.total_blocks = total_blks = header[6]
49    self.total_chunks = total_chunks = header[7]
50
51    if magic != 0xED26FF3A:
52      raise ValueError("Magic should be 0xED26FF3A but is 0x%08X" % (magic,))
53    if major_version != 1 or minor_version != 0:
54      raise ValueError("I know about version 1.0, but this is version %u.%u" %
55                       (major_version, minor_version))
56    if file_hdr_sz != 28:
57      raise ValueError("File header size was expected to be 28, but is %u." %
58                       (file_hdr_sz,))
59    if chunk_hdr_sz != 12:
60      raise ValueError("Chunk header size was expected to be 12, but is %u." %
61                       (chunk_hdr_sz,))
62
63    print("Total of %u %u-byte output blocks in %u input chunks."
64          % (total_blks, blk_sz, total_chunks))
65
66    if not build_map:
67      return
68
69    pos = 0   # in blocks
70    care_data = []
71    self.offset_map = offset_map = []
72    self.clobbered_blocks = rangelib.RangeSet(data=clobbered_blocks)
73
74    for i in range(total_chunks):
75      header_bin = f.read(12)
76      header = struct.unpack("<2H2I", header_bin)
77      chunk_type = header[0]
78      chunk_sz = header[2]
79      total_sz = header[3]
80      data_sz = total_sz - 12
81
82      if chunk_type == 0xCAC1:
83        if data_sz != (chunk_sz * blk_sz):
84          raise ValueError(
85              "Raw chunk input size (%u) does not match output size (%u)" %
86              (data_sz, chunk_sz * blk_sz))
87        else:
88          care_data.append(pos)
89          care_data.append(pos + chunk_sz)
90          offset_map.append((pos, chunk_sz, f.tell(), None))
91          pos += chunk_sz
92          f.seek(data_sz, os.SEEK_CUR)
93
94      elif chunk_type == 0xCAC2:
95        fill_data = f.read(4)
96        care_data.append(pos)
97        care_data.append(pos + chunk_sz)
98        offset_map.append((pos, chunk_sz, None, fill_data))
99        pos += chunk_sz
100
101      elif chunk_type == 0xCAC3:
102        if data_sz != 0:
103          raise ValueError("Don't care chunk input size is non-zero (%u)" %
104                           (data_sz))
105        else:
106          pos += chunk_sz
107
108      elif chunk_type == 0xCAC4:
109        raise ValueError("CRC32 chunks are not supported")
110
111      else:
112        raise ValueError("Unknown chunk type 0x%04X not supported" %
113                         (chunk_type,))
114
115    self.generator_lock = threading.Lock()
116
117    self.care_map = rangelib.RangeSet(care_data)
118    self.offset_index = [i[0] for i in offset_map]
119
120    # Bug: 20881595
121    # Introduce extended blocks as a workaround for the bug. dm-verity may
122    # touch blocks that are not in the care_map due to block device
123    # read-ahead. It will fail if such blocks contain non-zeroes. We zero out
124    # the extended blocks explicitly to avoid dm-verity failures. 512 blocks
125    # are the maximum read-ahead we configure for dm-verity block devices.
126    extended = self.care_map.extend(512)
127    all_blocks = rangelib.RangeSet(data=(0, self.total_blocks))
128    extended = extended.intersect(all_blocks).subtract(self.care_map)
129    self.extended = extended
130
131    if file_map_fn:
132      self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks,
133                            allow_shared_blocks)
134    else:
135      self.file_map = {"__DATA": self.care_map}
136
137  def AppendFillChunk(self, data, blocks):
138    f = self.simg_f
139
140    # Append a fill chunk
141    f.seek(0, os.SEEK_END)
142    f.write(struct.pack("<2H3I", 0xCAC2, 0, blocks, 16, data))
143
144    # Update the sparse header
145    self.total_blocks += blocks
146    self.total_chunks += 1
147
148    f.seek(16, os.SEEK_SET)
149    f.write(struct.pack("<2I", self.total_blocks, self.total_chunks))
150
151  def RangeSha1(self, ranges):
152    h = sha1()
153    for data in self._GetRangeData(ranges):
154      h.update(data)
155    return h.hexdigest()
156
157  def ReadRangeSet(self, ranges):
158    return [d for d in self._GetRangeData(ranges)]
159
160  def TotalSha1(self, include_clobbered_blocks=False):
161    """Return the SHA-1 hash of all data in the 'care' regions.
162
163    If include_clobbered_blocks is True, it returns the hash including the
164    clobbered_blocks."""
165    ranges = self.care_map
166    if not include_clobbered_blocks:
167      ranges = ranges.subtract(self.clobbered_blocks)
168    return self.RangeSha1(ranges)
169
170  def WriteRangeDataToFd(self, ranges, fd):
171    for data in self._GetRangeData(ranges):
172      fd.write(data)
173
174  def _GetRangeData(self, ranges):
175    """Generator that produces all the image data in 'ranges'.  The
176    number of individual pieces returned is arbitrary (and in
177    particular is not necessarily equal to the number of ranges in
178    'ranges'.
179
180    Use a lock to protect the generator so that we will not run two
181    instances of this generator on the same object simultaneously."""
182
183    f = self.simg_f
184    with self.generator_lock:
185      for s, e in ranges:
186        to_read = e-s
187        idx = bisect.bisect_right(self.offset_index, s) - 1
188        chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
189
190        # for the first chunk we may be starting partway through it.
191        remain = chunk_len - (s - chunk_start)
192        this_read = min(remain, to_read)
193        if filepos is not None:
194          p = filepos + ((s - chunk_start) * self.blocksize)
195          f.seek(p, os.SEEK_SET)
196          yield f.read(this_read * self.blocksize)
197        else:
198          yield fill_data * (this_read * (self.blocksize >> 2))
199        to_read -= this_read
200
201        while to_read > 0:
202          # continue with following chunks if this range spans multiple chunks.
203          idx += 1
204          chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
205          this_read = min(chunk_len, to_read)
206          if filepos is not None:
207            f.seek(filepos, os.SEEK_SET)
208            yield f.read(this_read * self.blocksize)
209          else:
210            yield fill_data * (this_read * (self.blocksize >> 2))
211          to_read -= this_read
212
213  def LoadFileBlockMap(self, fn, clobbered_blocks, allow_shared_blocks):
214    """Loads the given block map file.
215
216    Args:
217      fn: The filename of the block map file.
218      clobbered_blocks: A RangeSet instance for the clobbered blocks.
219      allow_shared_blocks: Whether having shared blocks is allowed.
220    """
221    remaining = self.care_map
222    self.file_map = out = {}
223
224    with open(fn) as f:
225      for line in f:
226        fn, ranges = line.split(None, 1)
227        ranges = rangelib.RangeSet.parse(ranges)
228
229        if allow_shared_blocks:
230          # Find the shared blocks that have been claimed by others.
231          shared_blocks = ranges.subtract(remaining)
232          if shared_blocks:
233            ranges = ranges.subtract(shared_blocks)
234            if not ranges:
235              continue
236
237            # Tag the entry so that we can skip applying imgdiff on this file.
238            ranges.extra['uses_shared_blocks'] = True
239
240        out[fn] = ranges
241        assert ranges.size() == ranges.intersect(remaining).size()
242
243        # Currently we assume that blocks in clobbered_blocks are not part of
244        # any file.
245        assert not clobbered_blocks.overlaps(ranges)
246        remaining = remaining.subtract(ranges)
247
248    remaining = remaining.subtract(clobbered_blocks)
249
250    # For all the remaining blocks in the care_map (ie, those that
251    # aren't part of the data for any file nor part of the clobbered_blocks),
252    # divide them into blocks that are all zero and blocks that aren't.
253    # (Zero blocks are handled specially because (1) there are usually
254    # a lot of them and (2) bsdiff handles files with long sequences of
255    # repeated bytes especially poorly.)
256
257    zero_blocks = []
258    nonzero_blocks = []
259    reference = '\0' * self.blocksize
260
261    # Workaround for bug 23227672. For squashfs, we don't have a system.map. So
262    # the whole system image will be treated as a single file. But for some
263    # unknown bug, the updater will be killed due to OOM when writing back the
264    # patched image to flash (observed on lenok-userdebug MEA49). Prior to
265    # getting a real fix, we evenly divide the non-zero blocks into smaller
266    # groups (currently 1024 blocks or 4MB per group).
267    # Bug: 23227672
268    MAX_BLOCKS_PER_GROUP = 1024
269    nonzero_groups = []
270
271    f = self.simg_f
272    for s, e in remaining:
273      for b in range(s, e):
274        idx = bisect.bisect_right(self.offset_index, b) - 1
275        chunk_start, _, filepos, fill_data = self.offset_map[idx]
276        if filepos is not None:
277          filepos += (b-chunk_start) * self.blocksize
278          f.seek(filepos, os.SEEK_SET)
279          data = f.read(self.blocksize)
280        else:
281          if fill_data == reference[:4]:   # fill with all zeros
282            data = reference
283          else:
284            data = None
285
286        if data == reference:
287          zero_blocks.append(b)
288          zero_blocks.append(b+1)
289        else:
290          nonzero_blocks.append(b)
291          nonzero_blocks.append(b+1)
292
293          if len(nonzero_blocks) >= MAX_BLOCKS_PER_GROUP:
294            nonzero_groups.append(nonzero_blocks)
295            # Clear the list.
296            nonzero_blocks = []
297
298    if nonzero_blocks:
299      nonzero_groups.append(nonzero_blocks)
300      nonzero_blocks = []
301
302    assert zero_blocks or nonzero_groups or clobbered_blocks
303
304    if zero_blocks:
305      out["__ZERO"] = rangelib.RangeSet(data=zero_blocks)
306    if nonzero_groups:
307      for i, blocks in enumerate(nonzero_groups):
308        out["__NONZERO-%d" % i] = rangelib.RangeSet(data=blocks)
309    if clobbered_blocks:
310      out["__COPY"] = clobbered_blocks
311
312  def ResetFileMap(self):
313    """Throw away the file map and treat the entire image as
314    undifferentiated data."""
315    self.file_map = {"__DATA": self.care_map}
316