1 /*
2  * Copyright (C) 2009 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 /*
18  * This program constructs binary patches for images -- such as boot.img and recovery.img -- that
19  * consist primarily of large chunks of gzipped data interspersed with uncompressed data.  Doing a
20  * naive bsdiff of these files is not useful because small changes in the data lead to large
21  * changes in the compressed bitstream; bsdiff patches of gzipped data are typically as large as
22  * the data itself.
23  *
24  * To patch these usefully, we break the source and target images up into chunks of two types:
25  * "normal" and "gzip".  Normal chunks are simply patched using a plain bsdiff.  Gzip chunks are
26  * first expanded, then a bsdiff is applied to the uncompressed data, then the patched data is
27  * gzipped using the same encoder parameters.  Patched chunks are concatenated together to create
28  * the output file; the output image should be *exactly* the same series of bytes as the target
29  * image used originally to generate the patch.
30  *
31  * To work well with this tool, the gzipped sections of the target image must have been generated
32  * using the same deflate encoder that is available in applypatch, namely, the one in the zlib
33  * library.  In practice this means that images should be compressed using the "minigzip" tool
34  * included in the zlib distribution, not the GNU gzip program.
35  *
36  * An "imgdiff" patch consists of a header describing the chunk structure of the file and any
37  * encoding parameters needed for the gzipped chunks, followed by N bsdiff patches, one per chunk.
38  *
39  * For a diff to be generated, the source and target must be in well-formed zip archive format;
40  * or they are image files with the same "chunk" structure: that is, the same number of gzipped and
41  * normal chunks in the same order.  Android boot and recovery images currently consist of five
42  * chunks: a small normal header, a gzipped kernel, a small normal section, a gzipped ramdisk, and
43  * finally a small normal footer.
44  *
45  * Caveats:  we locate gzipped sections within the source and target images by searching for the
46  * byte sequence 1f8b0800:  1f8b is the gzip magic number; 08 specifies the "deflate" encoding
47  * [the only encoding supported by the gzip standard]; and 00 is the flags byte.  We do not
48  * currently support any extra header fields (which would be indicated by a nonzero flags byte).
49  * We also don't handle the case when that byte sequence appears spuriously in the file.  (Note
50  * that it would have to occur spuriously within a normal chunk to be a problem.)
51  *
52  *
53  * The imgdiff patch header looks like this:
54  *
55  *    "IMGDIFF2"                  (8)   [magic number and version]
56  *    chunk count                 (4)
57  *    for each chunk:
58  *        chunk type              (4)   [CHUNK_{NORMAL, GZIP, DEFLATE, RAW}]
59  *        if chunk type == CHUNK_NORMAL:
60  *           source start         (8)
61  *           source len           (8)
62  *           bsdiff patch offset  (8)   [from start of patch file]
63  *        if chunk type == CHUNK_GZIP:      (version 1 only)
64  *           source start         (8)
65  *           source len           (8)
66  *           bsdiff patch offset  (8)   [from start of patch file]
67  *           source expanded len  (8)   [size of uncompressed source]
68  *           target expected len  (8)   [size of uncompressed target]
69  *           gzip level           (4)
70  *                method          (4)
71  *                windowBits      (4)
72  *                memLevel        (4)
73  *                strategy        (4)
74  *           gzip header len      (4)
75  *           gzip header          (gzip header len)
76  *           gzip footer          (8)
77  *        if chunk type == CHUNK_DEFLATE:   (version 2 only)
78  *           source start         (8)
79  *           source len           (8)
80  *           bsdiff patch offset  (8)   [from start of patch file]
81  *           source expanded len  (8)   [size of uncompressed source]
82  *           target expected len  (8)   [size of uncompressed target]
83  *           gzip level           (4)
84  *                method          (4)
85  *                windowBits      (4)
86  *                memLevel        (4)
87  *                strategy        (4)
88  *        if chunk type == RAW:             (version 2 only)
89  *           target len           (4)
90  *           data                 (target len)
91  *
92  * All integers are little-endian.  "source start" and "source len" specify the section of the
93  * input image that comprises this chunk, including the gzip header and footer for gzip chunks.
94  * "source expanded len" is the size of the uncompressed source data.  "target expected len" is the
95  * size of the uncompressed data after applying the bsdiff patch.  The next five parameters
96  * specify the zlib parameters to be used when compressing the patched data, and the next three
97  * specify the header and footer to be wrapped around the compressed data to create the output
98  * chunk (so that header contents like the timestamp are recreated exactly).
99  *
100  * After the header there are 'chunk count' bsdiff patches; the offset of each from the beginning
101  * of the file is specified in the header.
102  *
103  * This tool can take an optional file of "bonus data".  This is an extra file of data that is
104  * appended to chunk #1 after it is compressed (it must be a CHUNK_DEFLATE chunk).  The same file
105  * must be available (and passed to applypatch with -b) when applying the patch.  This is used to
106  * reduce the size of recovery-from-boot patches by combining the boot image with recovery ramdisk
107  * information that is stored on the system partition.
108  *
109  * When generating the patch between two zip files, this tool has an option "--block-limit" to
110  * split the large source/target files into several pair of pieces, with each piece has at most
111  * *limit* blocks.  When this option is used, we also need to output the split info into the file
112  * path specified by "--split-info".
113  *
114  * Format of split info file:
115  *   2                                      [version of imgdiff]
116  *   n                                      [count of split pieces]
117  *   <patch_size>, <tgt_size>, <src_range>  [size and ranges for split piece#1]
118  *   ...
119  *   <patch_size>, <tgt_size>, <src_range>  [size and ranges for split piece#n]
120  *
121  * To split a pair of large zip files, we walk through the chunks in target zip and search by its
122  * entry_name in the source zip.  If the entry_name is non-empty and a matching entry in source
123  * is found, we'll add the source entry to the current split source image; otherwise we'll skip
124  * this chunk and later do bsdiff between all the skipped trunks and the whole split source image.
125  * We move on to the next pair of pieces if the size of the split source image reaches the block
126  * limit.
127  *
128  * After the split, the target pieces are continuous and block aligned, while the source pieces
129  * are mutually exclusive.  Some of the source blocks may not be used if there's no matching
130  * entry_name in the target; as a result, they won't be included in any of these split source
131  * images.  Then we will generate patches accordingly between each split image pairs; in particular,
132  * the unmatched trunks in the split target will diff against the entire split source image.
133  *
134  * For example:
135  * Input: [src_image, tgt_image]
136  * Split: [src-0, tgt-0; src-1, tgt-1, src-2, tgt-2]
137  * Diff:  [  patch-0;      patch-1;      patch-2]
138  *
139  * Patch: [(src-0, patch-0) = tgt-0; (src-1, patch-1) = tgt-1; (src-2, patch-2) = tgt-2]
140  * Concatenate: [tgt-0 + tgt-1 + tgt-2 = tgt_image]
141  */
142 
143 #include "applypatch/imgdiff.h"
144 
145 #include <errno.h>
146 #include <fcntl.h>
147 #include <getopt.h>
148 #include <stdio.h>
149 #include <stdlib.h>
150 #include <string.h>
151 #include <sys/stat.h>
152 #include <sys/types.h>
153 #include <unistd.h>
154 
155 #include <algorithm>
156 #include <string>
157 #include <vector>
158 
159 #include <android-base/file.h>
160 #include <android-base/logging.h>
161 #include <android-base/memory.h>
162 #include <android-base/parseint.h>
163 #include <android-base/stringprintf.h>
164 #include <android-base/strings.h>
165 #include <android-base/unique_fd.h>
166 #include <bsdiff/bsdiff.h>
167 #include <ziparchive/zip_archive.h>
168 #include <zlib.h>
169 
170 #include "applypatch/imgdiff_image.h"
171 #include "otautil/rangeset.h"
172 
173 using android::base::get_unaligned;
174 
175 static constexpr size_t VERSION = 2;
176 
177 // We assume the header "IMGDIFF#" is 8 bytes.
178 static_assert(VERSION <= 9, "VERSION occupies more than one byte");
179 
180 static constexpr size_t BLOCK_SIZE = 4096;
181 static constexpr size_t BUFFER_SIZE = 0x8000;
182 
183 // If we use this function to write the offset and length (type size_t), their values should not
184 // exceed 2^63; because the signed bit will be casted away.
Write8(int fd,int64_t value)185 static inline bool Write8(int fd, int64_t value) {
186   return android::base::WriteFully(fd, &value, sizeof(int64_t));
187 }
188 
189 // Similarly, the value should not exceed 2^31 if we are casting from size_t (e.g. target chunk
190 // size).
Write4(int fd,int32_t value)191 static inline bool Write4(int fd, int32_t value) {
192   return android::base::WriteFully(fd, &value, sizeof(int32_t));
193 }
194 
195 // Trim the head or tail to align with the block size. Return false if the chunk has nothing left
196 // after alignment.
AlignHead(size_t * start,size_t * length)197 static bool AlignHead(size_t* start, size_t* length) {
198   size_t residual = (*start % BLOCK_SIZE == 0) ? 0 : BLOCK_SIZE - *start % BLOCK_SIZE;
199 
200   if (*length <= residual) {
201     *length = 0;
202     return false;
203   }
204 
205   // Trim the data in the beginning.
206   *start += residual;
207   *length -= residual;
208   return true;
209 }
210 
AlignTail(size_t * start,size_t * length)211 static bool AlignTail(size_t* start, size_t* length) {
212   size_t residual = (*start + *length) % BLOCK_SIZE;
213   if (*length <= residual) {
214     *length = 0;
215     return false;
216   }
217 
218   // Trim the data in the end.
219   *length -= residual;
220   return true;
221 }
222 
223 // Remove the used blocks from the source chunk to make sure the source ranges are mutually
224 // exclusive after split. Return false if we fail to get the non-overlapped ranges. In such
225 // a case, we'll skip the entire source chunk.
RemoveUsedBlocks(size_t * start,size_t * length,const SortedRangeSet & used_ranges)226 static bool RemoveUsedBlocks(size_t* start, size_t* length, const SortedRangeSet& used_ranges) {
227   if (!used_ranges.Overlaps(*start, *length)) {
228     return true;
229   }
230 
231   // TODO find the largest non-overlap chunk.
232   LOG(INFO) << "Removing block " << used_ranges.ToString() << " from " << *start << " - "
233             << *start + *length - 1;
234 
235   // If there's no duplicate entry name, we should only overlap in the head or tail block. Try to
236   // trim both blocks. Skip this source chunk in case it still overlaps with the used ranges.
237   if (AlignHead(start, length) && !used_ranges.Overlaps(*start, *length)) {
238     return true;
239   }
240   if (AlignTail(start, length) && !used_ranges.Overlaps(*start, *length)) {
241     return true;
242   }
243 
244   LOG(WARNING) << "Failed to remove the overlapped block ranges; skip the source";
245   return false;
246 }
247 
248 static const struct option OPTIONS[] = {
249   { "zip-mode", no_argument, nullptr, 'z' },
250   { "bonus-file", required_argument, nullptr, 'b' },
251   { "block-limit", required_argument, nullptr, 0 },
252   { "debug-dir", required_argument, nullptr, 0 },
253   { "split-info", required_argument, nullptr, 0 },
254   { "verbose", no_argument, nullptr, 'v' },
255   { nullptr, 0, nullptr, 0 },
256 };
257 
ImageChunk(int type,size_t start,const std::vector<uint8_t> * file_content,size_t raw_data_len,std::string entry_name)258 ImageChunk::ImageChunk(int type, size_t start, const std::vector<uint8_t>* file_content,
259                        size_t raw_data_len, std::string entry_name)
260     : type_(type),
261       start_(start),
262       input_file_ptr_(file_content),
263       raw_data_len_(raw_data_len),
264       compress_level_(6),
265       entry_name_(std::move(entry_name)) {
266   CHECK(file_content != nullptr) << "input file container can't be nullptr";
267 }
268 
GetRawData() const269 const uint8_t* ImageChunk::GetRawData() const {
270   CHECK_LE(start_ + raw_data_len_, input_file_ptr_->size());
271   return input_file_ptr_->data() + start_;
272 }
273 
DataForPatch() const274 const uint8_t * ImageChunk::DataForPatch() const {
275   if (type_ == CHUNK_DEFLATE) {
276     return uncompressed_data_.data();
277   }
278   return GetRawData();
279 }
280 
DataLengthForPatch() const281 size_t ImageChunk::DataLengthForPatch() const {
282   if (type_ == CHUNK_DEFLATE) {
283     return uncompressed_data_.size();
284   }
285   return raw_data_len_;
286 }
287 
Dump(size_t index) const288 void ImageChunk::Dump(size_t index) const {
289   LOG(INFO) << "chunk: " << index << ", type: " << type_ << ", start: " << start_
290             << ", len: " << DataLengthForPatch() << ", name: " << entry_name_;
291 }
292 
operator ==(const ImageChunk & other) const293 bool ImageChunk::operator==(const ImageChunk& other) const {
294   if (type_ != other.type_) {
295     return false;
296   }
297   return (raw_data_len_ == other.raw_data_len_ &&
298           memcmp(GetRawData(), other.GetRawData(), raw_data_len_) == 0);
299 }
300 
SetUncompressedData(std::vector<uint8_t> data)301 void ImageChunk::SetUncompressedData(std::vector<uint8_t> data) {
302   uncompressed_data_ = std::move(data);
303 }
304 
SetBonusData(const std::vector<uint8_t> & bonus_data)305 bool ImageChunk::SetBonusData(const std::vector<uint8_t>& bonus_data) {
306   if (type_ != CHUNK_DEFLATE) {
307     return false;
308   }
309   uncompressed_data_.insert(uncompressed_data_.end(), bonus_data.begin(), bonus_data.end());
310   return true;
311 }
312 
ChangeDeflateChunkToNormal()313 void ImageChunk::ChangeDeflateChunkToNormal() {
314   if (type_ != CHUNK_DEFLATE) return;
315   type_ = CHUNK_NORMAL;
316   // No need to clear the entry name.
317   uncompressed_data_.clear();
318 }
319 
IsAdjacentNormal(const ImageChunk & other) const320 bool ImageChunk::IsAdjacentNormal(const ImageChunk& other) const {
321   if (type_ != CHUNK_NORMAL || other.type_ != CHUNK_NORMAL) {
322     return false;
323   }
324   return (other.start_ == start_ + raw_data_len_);
325 }
326 
MergeAdjacentNormal(const ImageChunk & other)327 void ImageChunk::MergeAdjacentNormal(const ImageChunk& other) {
328   CHECK(IsAdjacentNormal(other));
329   raw_data_len_ = raw_data_len_ + other.raw_data_len_;
330 }
331 
MakePatch(const ImageChunk & tgt,const ImageChunk & src,std::vector<uint8_t> * patch_data,bsdiff::SuffixArrayIndexInterface ** bsdiff_cache)332 bool ImageChunk::MakePatch(const ImageChunk& tgt, const ImageChunk& src,
333                            std::vector<uint8_t>* patch_data,
334                            bsdiff::SuffixArrayIndexInterface** bsdiff_cache) {
335 #if defined(__ANDROID__)
336   char ptemp[] = "/data/local/tmp/imgdiff-patch-XXXXXX";
337 #else
338   char ptemp[] = "/tmp/imgdiff-patch-XXXXXX";
339 #endif
340 
341   int fd = mkstemp(ptemp);
342   if (fd == -1) {
343     PLOG(ERROR) << "MakePatch failed to create a temporary file";
344     return false;
345   }
346   close(fd);
347 
348   int r = bsdiff::bsdiff(src.DataForPatch(), src.DataLengthForPatch(), tgt.DataForPatch(),
349                          tgt.DataLengthForPatch(), ptemp, bsdiff_cache);
350   if (r != 0) {
351     LOG(ERROR) << "bsdiff() failed: " << r;
352     return false;
353   }
354 
355   android::base::unique_fd patch_fd(open(ptemp, O_RDONLY));
356   if (patch_fd == -1) {
357     PLOG(ERROR) << "Failed to open " << ptemp;
358     return false;
359   }
360   struct stat st;
361   if (fstat(patch_fd, &st) != 0) {
362     PLOG(ERROR) << "Failed to stat patch file " << ptemp;
363     return false;
364   }
365 
366   size_t sz = static_cast<size_t>(st.st_size);
367 
368   patch_data->resize(sz);
369   if (!android::base::ReadFully(patch_fd, patch_data->data(), sz)) {
370     PLOG(ERROR) << "Failed to read " << ptemp;
371     unlink(ptemp);
372     return false;
373   }
374 
375   unlink(ptemp);
376 
377   return true;
378 }
379 
ReconstructDeflateChunk()380 bool ImageChunk::ReconstructDeflateChunk() {
381   if (type_ != CHUNK_DEFLATE) {
382     LOG(ERROR) << "Attempted to reconstruct non-deflate chunk";
383     return false;
384   }
385 
386   // We only check two combinations of encoder parameters:  level 6 (the default) and level 9
387   // (the maximum).
388   for (int level = 6; level <= 9; level += 3) {
389     if (TryReconstruction(level)) {
390       compress_level_ = level;
391       return true;
392     }
393   }
394 
395   return false;
396 }
397 
398 /*
399  * Takes the uncompressed data stored in the chunk, compresses it using the zlib parameters stored
400  * in the chunk, and checks that it matches exactly the compressed data we started with (also
401  * stored in the chunk).
402  */
TryReconstruction(int level)403 bool ImageChunk::TryReconstruction(int level) {
404   z_stream strm;
405   strm.zalloc = Z_NULL;
406   strm.zfree = Z_NULL;
407   strm.opaque = Z_NULL;
408   strm.avail_in = uncompressed_data_.size();
409   strm.next_in = uncompressed_data_.data();
410   int ret = deflateInit2(&strm, level, METHOD, WINDOWBITS, MEMLEVEL, STRATEGY);
411   if (ret < 0) {
412     LOG(ERROR) << "Failed to initialize deflate: " << ret;
413     return false;
414   }
415 
416   std::vector<uint8_t> buffer(BUFFER_SIZE);
417   size_t offset = 0;
418   do {
419     strm.avail_out = buffer.size();
420     strm.next_out = buffer.data();
421     ret = deflate(&strm, Z_FINISH);
422     if (ret < 0) {
423       LOG(ERROR) << "Failed to deflate: " << ret;
424       return false;
425     }
426 
427     size_t compressed_size = buffer.size() - strm.avail_out;
428     if (memcmp(buffer.data(), input_file_ptr_->data() + start_ + offset, compressed_size) != 0) {
429       // mismatch; data isn't the same.
430       deflateEnd(&strm);
431       return false;
432     }
433     offset += compressed_size;
434   } while (ret != Z_STREAM_END);
435   deflateEnd(&strm);
436 
437   if (offset != raw_data_len_) {
438     // mismatch; ran out of data before we should have.
439     return false;
440   }
441   return true;
442 }
443 
PatchChunk(const ImageChunk & tgt,const ImageChunk & src,std::vector<uint8_t> data)444 PatchChunk::PatchChunk(const ImageChunk& tgt, const ImageChunk& src, std::vector<uint8_t> data)
445     : type_(tgt.GetType()),
446       source_start_(src.GetStartOffset()),
447       source_len_(src.GetRawDataLength()),
448       source_uncompressed_len_(src.DataLengthForPatch()),
449       target_start_(tgt.GetStartOffset()),
450       target_len_(tgt.GetRawDataLength()),
451       target_uncompressed_len_(tgt.DataLengthForPatch()),
452       target_compress_level_(tgt.GetCompressLevel()),
453       data_(std::move(data)) {}
454 
455 // Construct a CHUNK_RAW patch from the target data directly.
PatchChunk(const ImageChunk & tgt)456 PatchChunk::PatchChunk(const ImageChunk& tgt)
457     : type_(CHUNK_RAW),
458       source_start_(0),
459       source_len_(0),
460       source_uncompressed_len_(0),
461       target_start_(tgt.GetStartOffset()),
462       target_len_(tgt.GetRawDataLength()),
463       target_uncompressed_len_(tgt.DataLengthForPatch()),
464       target_compress_level_(tgt.GetCompressLevel()),
465       data_(tgt.GetRawData(), tgt.GetRawData() + tgt.GetRawDataLength()) {}
466 
467 // Return true if raw data is smaller than the patch size.
RawDataIsSmaller(const ImageChunk & tgt,size_t patch_size)468 bool PatchChunk::RawDataIsSmaller(const ImageChunk& tgt, size_t patch_size) {
469   size_t target_len = tgt.GetRawDataLength();
470   return target_len < patch_size || (tgt.GetType() == CHUNK_NORMAL && target_len <= 160);
471 }
472 
UpdateSourceOffset(const SortedRangeSet & src_range)473 void PatchChunk::UpdateSourceOffset(const SortedRangeSet& src_range) {
474   if (type_ == CHUNK_DEFLATE) {
475     source_start_ = src_range.GetOffsetInRangeSet(source_start_);
476   }
477 }
478 
479 // Header size:
480 // header_type    4 bytes
481 // CHUNK_NORMAL   8*3 = 24 bytes
482 // CHUNK_DEFLATE  8*5 + 4*5 = 60 bytes
483 // CHUNK_RAW      4 bytes + patch_size
GetHeaderSize() const484 size_t PatchChunk::GetHeaderSize() const {
485   switch (type_) {
486     case CHUNK_NORMAL:
487       return 4 + 8 * 3;
488     case CHUNK_DEFLATE:
489       return 4 + 8 * 5 + 4 * 5;
490     case CHUNK_RAW:
491       return 4 + 4 + data_.size();
492     default:
493       CHECK(false) << "unexpected chunk type: " << type_;  // Should not reach here.
494       return 0;
495   }
496 }
497 
498 // Return the offset of the next patch into the patch data.
WriteHeaderToFd(int fd,size_t offset,size_t index) const499 size_t PatchChunk::WriteHeaderToFd(int fd, size_t offset, size_t index) const {
500   Write4(fd, type_);
501   switch (type_) {
502     case CHUNK_NORMAL:
503       LOG(INFO) << android::base::StringPrintf("chunk %zu: normal   (%10zu, %10zu)  %10zu", index,
504                                                target_start_, target_len_, data_.size());
505       Write8(fd, static_cast<int64_t>(source_start_));
506       Write8(fd, static_cast<int64_t>(source_len_));
507       Write8(fd, static_cast<int64_t>(offset));
508       return offset + data_.size();
509     case CHUNK_DEFLATE:
510       LOG(INFO) << android::base::StringPrintf("chunk %zu: deflate  (%10zu, %10zu)  %10zu", index,
511                                                target_start_, target_len_, data_.size());
512       Write8(fd, static_cast<int64_t>(source_start_));
513       Write8(fd, static_cast<int64_t>(source_len_));
514       Write8(fd, static_cast<int64_t>(offset));
515       Write8(fd, static_cast<int64_t>(source_uncompressed_len_));
516       Write8(fd, static_cast<int64_t>(target_uncompressed_len_));
517       Write4(fd, target_compress_level_);
518       Write4(fd, ImageChunk::METHOD);
519       Write4(fd, ImageChunk::WINDOWBITS);
520       Write4(fd, ImageChunk::MEMLEVEL);
521       Write4(fd, ImageChunk::STRATEGY);
522       return offset + data_.size();
523     case CHUNK_RAW:
524       LOG(INFO) << android::base::StringPrintf("chunk %zu: raw      (%10zu, %10zu)", index,
525                                                target_start_, target_len_);
526       Write4(fd, static_cast<int32_t>(data_.size()));
527       if (!android::base::WriteFully(fd, data_.data(), data_.size())) {
528         CHECK(false) << "Failed to write " << data_.size() << " bytes patch";
529       }
530       return offset;
531     default:
532       CHECK(false) << "unexpected chunk type: " << type_;
533       return offset;
534   }
535 }
536 
PatchSize() const537 size_t PatchChunk::PatchSize() const {
538   if (type_ == CHUNK_RAW) {
539     return GetHeaderSize();
540   }
541   return GetHeaderSize() + data_.size();
542 }
543 
544 // Write the contents of |patch_chunks| to |patch_fd|.
WritePatchDataToFd(const std::vector<PatchChunk> & patch_chunks,int patch_fd)545 bool PatchChunk::WritePatchDataToFd(const std::vector<PatchChunk>& patch_chunks, int patch_fd) {
546   // Figure out how big the imgdiff file header is going to be, so that we can correctly compute
547   // the offset of each bsdiff patch within the file.
548   size_t total_header_size = 12;
549   for (const auto& patch : patch_chunks) {
550     total_header_size += patch.GetHeaderSize();
551   }
552 
553   size_t offset = total_header_size;
554 
555   // Write out the headers.
556   if (!android::base::WriteStringToFd("IMGDIFF" + std::to_string(VERSION), patch_fd)) {
557     PLOG(ERROR) << "Failed to write \"IMGDIFF" << VERSION << "\"";
558     return false;
559   }
560 
561   Write4(patch_fd, static_cast<int32_t>(patch_chunks.size()));
562   LOG(INFO) << "Writing " << patch_chunks.size() << " patch headers...";
563   for (size_t i = 0; i < patch_chunks.size(); ++i) {
564     offset = patch_chunks[i].WriteHeaderToFd(patch_fd, offset, i);
565   }
566 
567   // Append each chunk's bsdiff patch, in order.
568   for (const auto& patch : patch_chunks) {
569     if (patch.type_ == CHUNK_RAW) {
570       continue;
571     }
572     if (!android::base::WriteFully(patch_fd, patch.data_.data(), patch.data_.size())) {
573       PLOG(ERROR) << "Failed to write " << patch.data_.size() << " bytes patch to patch_fd";
574       return false;
575     }
576   }
577 
578   return true;
579 }
580 
operator [](size_t i)581 ImageChunk& Image::operator[](size_t i) {
582   CHECK_LT(i, chunks_.size());
583   return chunks_[i];
584 }
585 
operator [](size_t i) const586 const ImageChunk& Image::operator[](size_t i) const {
587   CHECK_LT(i, chunks_.size());
588   return chunks_[i];
589 }
590 
MergeAdjacentNormalChunks()591 void Image::MergeAdjacentNormalChunks() {
592   size_t merged_last = 0, cur = 0;
593   while (cur < chunks_.size()) {
594     // Look for normal chunks adjacent to the current one. If such chunk exists, extend the
595     // length of the current normal chunk.
596     size_t to_check = cur + 1;
597     while (to_check < chunks_.size() && chunks_[cur].IsAdjacentNormal(chunks_[to_check])) {
598       chunks_[cur].MergeAdjacentNormal(chunks_[to_check]);
599       to_check++;
600     }
601 
602     if (merged_last != cur) {
603       chunks_[merged_last] = std::move(chunks_[cur]);
604     }
605     merged_last++;
606     cur = to_check;
607   }
608   if (merged_last < chunks_.size()) {
609     chunks_.erase(chunks_.begin() + merged_last, chunks_.end());
610   }
611 }
612 
DumpChunks() const613 void Image::DumpChunks() const {
614   std::string type = is_source_ ? "source" : "target";
615   LOG(INFO) << "Dumping chunks for " << type;
616   for (size_t i = 0; i < chunks_.size(); ++i) {
617     chunks_[i].Dump(i);
618   }
619 }
620 
ReadFile(const std::string & filename,std::vector<uint8_t> * file_content)621 bool Image::ReadFile(const std::string& filename, std::vector<uint8_t>* file_content) {
622   CHECK(file_content != nullptr);
623 
624   android::base::unique_fd fd(open(filename.c_str(), O_RDONLY));
625   if (fd == -1) {
626     PLOG(ERROR) << "Failed to open " << filename;
627     return false;
628   }
629   struct stat st;
630   if (fstat(fd, &st) != 0) {
631     PLOG(ERROR) << "Failed to stat " << filename;
632     return false;
633   }
634 
635   size_t sz = static_cast<size_t>(st.st_size);
636   file_content->resize(sz);
637   if (!android::base::ReadFully(fd, file_content->data(), sz)) {
638     PLOG(ERROR) << "Failed to read " << filename;
639     return false;
640   }
641   fd.reset();
642 
643   return true;
644 }
645 
Initialize(const std::string & filename)646 bool ZipModeImage::Initialize(const std::string& filename) {
647   if (!ReadFile(filename, &file_content_)) {
648     return false;
649   }
650 
651   // Omit the trailing zeros before we pass the file to ziparchive handler.
652   size_t zipfile_size;
653   if (!GetZipFileSize(&zipfile_size)) {
654     LOG(ERROR) << "Failed to parse the actual size of " << filename;
655     return false;
656   }
657   ZipArchiveHandle handle;
658   int err = OpenArchiveFromMemory(const_cast<uint8_t*>(file_content_.data()), zipfile_size,
659                                   filename.c_str(), &handle);
660   if (err != 0) {
661     LOG(ERROR) << "Failed to open zip file " << filename << ": " << ErrorCodeString(err);
662     CloseArchive(handle);
663     return false;
664   }
665 
666   if (!InitializeChunks(filename, handle)) {
667     CloseArchive(handle);
668     return false;
669   }
670 
671   CloseArchive(handle);
672   return true;
673 }
674 
675 // Iterate the zip entries and compose the image chunks accordingly.
InitializeChunks(const std::string & filename,ZipArchiveHandle handle)676 bool ZipModeImage::InitializeChunks(const std::string& filename, ZipArchiveHandle handle) {
677   void* cookie;
678   int ret = StartIteration(handle, &cookie);
679   if (ret != 0) {
680     LOG(ERROR) << "Failed to iterate over entries in " << filename << ": " << ErrorCodeString(ret);
681     return false;
682   }
683 
684   // Create a list of deflated zip entries, sorted by offset.
685   std::vector<std::pair<std::string, ZipEntry64>> temp_entries;
686   std::string name;
687   ZipEntry64 entry;
688   while ((ret = Next(cookie, &entry, &name)) == 0) {
689     if (entry.method == kCompressDeflated || limit_ > 0) {
690       temp_entries.emplace_back(name, entry);
691     }
692   }
693 
694   if (ret != -1) {
695     LOG(ERROR) << "Error while iterating over zip entries: " << ErrorCodeString(ret);
696     return false;
697   }
698   std::sort(temp_entries.begin(), temp_entries.end(),
699             [](auto& entry1, auto& entry2) { return entry1.second.offset < entry2.second.offset; });
700 
701   EndIteration(cookie);
702 
703   // For source chunks, we don't need to compose chunks for the metadata.
704   if (is_source_) {
705     for (auto& entry : temp_entries) {
706       if (!AddZipEntryToChunks(handle, entry.first, &entry.second)) {
707         LOG(ERROR) << "Failed to add " << entry.first << " to source chunks";
708         return false;
709       }
710     }
711 
712     // Add the end of zip file (mainly central directory) as a normal chunk.
713     size_t entries_end = 0;
714     if (!temp_entries.empty()) {
715       CHECK_GE(temp_entries.back().second.offset, 0);
716       if (__builtin_add_overflow(temp_entries.back().second.offset,
717                                  temp_entries.back().second.compressed_length, &entries_end)) {
718         LOG(ERROR) << "`entries_end` overflows on entry with offset "
719                    << temp_entries.back().second.offset << " and compressed_length "
720                    << temp_entries.back().second.compressed_length;
721         return false;
722       }
723     }
724     CHECK_LT(entries_end, file_content_.size());
725     chunks_.emplace_back(CHUNK_NORMAL, entries_end, &file_content_,
726                          file_content_.size() - entries_end);
727 
728     return true;
729   }
730 
731   // For target chunks, add the deflate entries as CHUNK_DEFLATE and the contents between two
732   // deflate entries as CHUNK_NORMAL.
733   size_t pos = 0;
734   size_t nextentry = 0;
735   while (pos < file_content_.size()) {
736     if (nextentry < temp_entries.size() &&
737         static_cast<off64_t>(pos) == temp_entries[nextentry].second.offset) {
738       // Add the next zip entry.
739       std::string entry_name = temp_entries[nextentry].first;
740       if (!AddZipEntryToChunks(handle, entry_name, &temp_entries[nextentry].second)) {
741         LOG(ERROR) << "Failed to add " << entry_name << " to target chunks";
742         return false;
743       }
744       if (temp_entries[nextentry].second.compressed_length > std::numeric_limits<size_t>::max()) {
745         LOG(ERROR) << "Entry " << name << " compressed size exceeds size of address space. "
746                    << entry.compressed_length;
747         return false;
748       }
749       if (__builtin_add_overflow(pos, temp_entries[nextentry].second.compressed_length, &pos)) {
750         LOG(ERROR) << "`pos` overflows after adding "
751                    << temp_entries[nextentry].second.compressed_length;
752         return false;
753       }
754       ++nextentry;
755       continue;
756     }
757 
758     // Use a normal chunk to take all the data up to the start of the next entry.
759     size_t raw_data_len;
760     if (nextentry < temp_entries.size()) {
761       raw_data_len = temp_entries[nextentry].second.offset - pos;
762     } else {
763       raw_data_len = file_content_.size() - pos;
764     }
765     chunks_.emplace_back(CHUNK_NORMAL, pos, &file_content_, raw_data_len);
766 
767     pos += raw_data_len;
768   }
769 
770   return true;
771 }
772 
AddZipEntryToChunks(ZipArchiveHandle handle,const std::string & entry_name,ZipEntry64 * entry)773 bool ZipModeImage::AddZipEntryToChunks(ZipArchiveHandle handle, const std::string& entry_name,
774                                        ZipEntry64* entry) {
775   if (entry->compressed_length > std::numeric_limits<size_t>::max()) {
776     LOG(ERROR) << "Failed to add " << entry_name
777                << " because's compressed size exceeds size of address space. "
778                << entry->compressed_length;
779     return false;
780   }
781   size_t compressed_len = entry->compressed_length;
782   if (compressed_len == 0) return true;
783 
784   // Split the entry into several normal chunks if it's too large.
785   if (limit_ > 0 && compressed_len > limit_) {
786     int count = 0;
787     while (compressed_len > 0) {
788       size_t length = std::min(limit_, compressed_len);
789       std::string name = entry_name + "-" + std::to_string(count);
790       chunks_.emplace_back(CHUNK_NORMAL, entry->offset + limit_ * count, &file_content_, length,
791                            name);
792 
793       count++;
794       compressed_len -= length;
795     }
796   } else if (entry->method == kCompressDeflated) {
797     size_t uncompressed_len = entry->uncompressed_length;
798     if (uncompressed_len > std::numeric_limits<size_t>::max()) {
799       LOG(ERROR) << "Failed to add " << entry_name
800                  << " because's compressed size exceeds size of address space. "
801                  << uncompressed_len;
802       return false;
803     }
804     std::vector<uint8_t> uncompressed_data(uncompressed_len);
805     int ret = ExtractToMemory(handle, entry, uncompressed_data.data(), uncompressed_len);
806     if (ret != 0) {
807       LOG(ERROR) << "Failed to extract " << entry_name << " with size " << uncompressed_len << ": "
808                  << ErrorCodeString(ret);
809       return false;
810     }
811     ImageChunk curr(CHUNK_DEFLATE, entry->offset, &file_content_, compressed_len, entry_name);
812     curr.SetUncompressedData(std::move(uncompressed_data));
813     chunks_.push_back(std::move(curr));
814   } else {
815     chunks_.emplace_back(CHUNK_NORMAL, entry->offset, &file_content_, compressed_len, entry_name);
816   }
817 
818   return true;
819 }
820 
821 // EOCD record
822 // offset 0: signature 0x06054b50, 4 bytes
823 // offset 4: number of this disk, 2 bytes
824 // ...
825 // offset 20: comment length, 2 bytes
826 // offset 22: comment, n bytes
GetZipFileSize(size_t * input_file_size)827 bool ZipModeImage::GetZipFileSize(size_t* input_file_size) {
828   if (file_content_.size() < 22) {
829     LOG(ERROR) << "File is too small to be a zip file";
830     return false;
831   }
832 
833   // Look for End of central directory record of the zip file, and calculate the actual
834   // zip_file size.
835   for (int i = file_content_.size() - 22; i >= 0; i--) {
836     if (file_content_[i] == 0x50) {
837       if (get_unaligned<uint32_t>(&file_content_[i]) == 0x06054b50) {
838         // double-check: this archive consists of a single "disk".
839         CHECK_EQ(get_unaligned<uint16_t>(&file_content_[i + 4]), 0);
840 
841         uint16_t comment_length = get_unaligned<uint16_t>(&file_content_[i + 20]);
842         size_t file_size = i + 22 + comment_length;
843         CHECK_LE(file_size, file_content_.size());
844         *input_file_size = file_size;
845         return true;
846       }
847     }
848   }
849 
850   // EOCD not found, this file is likely not a valid zip file.
851   return false;
852 }
853 
PseudoSource() const854 ImageChunk ZipModeImage::PseudoSource() const {
855   CHECK(is_source_);
856   return ImageChunk(CHUNK_NORMAL, 0, &file_content_, file_content_.size());
857 }
858 
FindChunkByName(const std::string & name,bool find_normal) const859 const ImageChunk* ZipModeImage::FindChunkByName(const std::string& name, bool find_normal) const {
860   if (name.empty()) {
861     return nullptr;
862   }
863   for (auto& chunk : chunks_) {
864     if (chunk.GetType() != CHUNK_DEFLATE && !find_normal) {
865       continue;
866     }
867 
868     if (chunk.GetEntryName() == name) {
869       return &chunk;
870     }
871 
872     // Edge case when target chunk is split due to size limit but source chunk isn't.
873     if (name == (chunk.GetEntryName() + "-0") || chunk.GetEntryName() == (name + "-0")) {
874       return &chunk;
875     }
876 
877     // TODO handle the .so files with incremental version number.
878     // (e.g. lib/arm64-v8a/libcronet.59.0.3050.4.so)
879   }
880 
881   return nullptr;
882 }
883 
FindChunkByName(const std::string & name,bool find_normal)884 ImageChunk* ZipModeImage::FindChunkByName(const std::string& name, bool find_normal) {
885   return const_cast<ImageChunk*>(
886       static_cast<const ZipModeImage*>(this)->FindChunkByName(name, find_normal));
887 }
888 
CheckAndProcessChunks(ZipModeImage * tgt_image,ZipModeImage * src_image)889 bool ZipModeImage::CheckAndProcessChunks(ZipModeImage* tgt_image, ZipModeImage* src_image) {
890   for (auto& tgt_chunk : *tgt_image) {
891     if (tgt_chunk.GetType() != CHUNK_DEFLATE) {
892       continue;
893     }
894 
895     ImageChunk* src_chunk = src_image->FindChunkByName(tgt_chunk.GetEntryName());
896     if (src_chunk == nullptr) {
897       tgt_chunk.ChangeDeflateChunkToNormal();
898     } else if (tgt_chunk == *src_chunk) {
899       // If two deflate chunks are identical (eg, the kernel has not changed between two builds),
900       // treat them as normal chunks. This makes applypatch much faster -- it can apply a trivial
901       // patch to the compressed data, rather than uncompressing and recompressing to apply the
902       // trivial patch to the uncompressed data.
903       tgt_chunk.ChangeDeflateChunkToNormal();
904       src_chunk->ChangeDeflateChunkToNormal();
905     } else if (!tgt_chunk.ReconstructDeflateChunk()) {
906       // We cannot recompress the data and get exactly the same bits as are in the input target
907       // image. Treat the chunk as a normal non-deflated chunk.
908       LOG(WARNING) << "Failed to reconstruct target deflate chunk [" << tgt_chunk.GetEntryName()
909                    << "]; treating as normal";
910 
911       tgt_chunk.ChangeDeflateChunkToNormal();
912       src_chunk->ChangeDeflateChunkToNormal();
913     }
914   }
915 
916   // For zips, we only need merge normal chunks for the target:  deflated chunks are matched via
917   // filename, and normal chunks are patched using the entire source file as the source.
918   if (tgt_image->limit_ == 0) {
919     tgt_image->MergeAdjacentNormalChunks();
920     tgt_image->DumpChunks();
921   }
922 
923   return true;
924 }
925 
926 // For each target chunk, look for the corresponding source chunk by the zip_entry name. If
927 // found, add the range of this chunk in the original source file to the block aligned source
928 // ranges. Construct the split src & tgt image once the size of source range reaches limit.
SplitZipModeImageWithLimit(const ZipModeImage & tgt_image,const ZipModeImage & src_image,std::vector<ZipModeImage> * split_tgt_images,std::vector<ZipModeImage> * split_src_images,std::vector<SortedRangeSet> * split_src_ranges)929 bool ZipModeImage::SplitZipModeImageWithLimit(const ZipModeImage& tgt_image,
930                                               const ZipModeImage& src_image,
931                                               std::vector<ZipModeImage>* split_tgt_images,
932                                               std::vector<ZipModeImage>* split_src_images,
933                                               std::vector<SortedRangeSet>* split_src_ranges) {
934   CHECK_EQ(tgt_image.limit_, src_image.limit_);
935   size_t limit = tgt_image.limit_;
936 
937   src_image.DumpChunks();
938   LOG(INFO) << "Splitting " << tgt_image.NumOfChunks() << " tgt chunks...";
939 
940   SortedRangeSet used_src_ranges;  // ranges used for previous split source images.
941 
942   // Reserve the central directory in advance for the last split image.
943   const auto& central_directory = src_image.cend() - 1;
944   CHECK_EQ(CHUNK_NORMAL, central_directory->GetType());
945   used_src_ranges.Insert(central_directory->GetStartOffset(),
946                          central_directory->DataLengthForPatch());
947 
948   SortedRangeSet src_ranges;
949   std::vector<ImageChunk> split_src_chunks;
950   std::vector<ImageChunk> split_tgt_chunks;
951   for (auto tgt = tgt_image.cbegin(); tgt != tgt_image.cend(); tgt++) {
952     const ImageChunk* src = src_image.FindChunkByName(tgt->GetEntryName(), true);
953     if (src == nullptr) {
954       split_tgt_chunks.emplace_back(CHUNK_NORMAL, tgt->GetStartOffset(), &tgt_image.file_content_,
955                                     tgt->GetRawDataLength());
956       continue;
957     }
958 
959     size_t src_offset = src->GetStartOffset();
960     size_t src_length = src->GetRawDataLength();
961 
962     CHECK(src_length > 0);
963     CHECK_LE(src_length, limit);
964 
965     // Make sure this source range hasn't been used before so that the src_range pieces don't
966     // overlap with each other.
967     if (!RemoveUsedBlocks(&src_offset, &src_length, used_src_ranges)) {
968       split_tgt_chunks.emplace_back(CHUNK_NORMAL, tgt->GetStartOffset(), &tgt_image.file_content_,
969                                     tgt->GetRawDataLength());
970     } else if (src_ranges.blocks() * BLOCK_SIZE + src_length <= limit) {
971       src_ranges.Insert(src_offset, src_length);
972 
973       // Add the deflate source chunk if it hasn't been aligned.
974       if (src->GetType() == CHUNK_DEFLATE && src_length == src->GetRawDataLength()) {
975         split_src_chunks.push_back(*src);
976         split_tgt_chunks.push_back(*tgt);
977       } else {
978         // TODO split smarter to avoid alignment of large deflate chunks
979         split_tgt_chunks.emplace_back(CHUNK_NORMAL, tgt->GetStartOffset(), &tgt_image.file_content_,
980                                       tgt->GetRawDataLength());
981       }
982     } else {
983       bool added_image = ZipModeImage::AddSplitImageFromChunkList(
984           tgt_image, src_image, src_ranges, split_tgt_chunks, split_src_chunks, split_tgt_images,
985           split_src_images);
986 
987       split_tgt_chunks.clear();
988       split_src_chunks.clear();
989       // No need to update the split_src_ranges if we don't update the split source images.
990       if (added_image) {
991         used_src_ranges.Insert(src_ranges);
992         split_src_ranges->push_back(std::move(src_ranges));
993       }
994       src_ranges = {};
995 
996       // We don't have enough space for the current chunk; start a new split image and handle
997       // this chunk there.
998       tgt--;
999     }
1000   }
1001 
1002   // TODO Trim it in case the CD exceeds limit too much.
1003   src_ranges.Insert(central_directory->GetStartOffset(), central_directory->DataLengthForPatch());
1004   bool added_image = ZipModeImage::AddSplitImageFromChunkList(tgt_image, src_image, src_ranges,
1005                                                               split_tgt_chunks, split_src_chunks,
1006                                                               split_tgt_images, split_src_images);
1007   if (added_image) {
1008     split_src_ranges->push_back(std::move(src_ranges));
1009   }
1010 
1011   ValidateSplitImages(*split_tgt_images, *split_src_images, *split_src_ranges,
1012                       tgt_image.file_content_.size());
1013 
1014   return true;
1015 }
1016 
AddSplitImageFromChunkList(const ZipModeImage & tgt_image,const ZipModeImage & src_image,const SortedRangeSet & split_src_ranges,const std::vector<ImageChunk> & split_tgt_chunks,const std::vector<ImageChunk> & split_src_chunks,std::vector<ZipModeImage> * split_tgt_images,std::vector<ZipModeImage> * split_src_images)1017 bool ZipModeImage::AddSplitImageFromChunkList(const ZipModeImage& tgt_image,
1018                                               const ZipModeImage& src_image,
1019                                               const SortedRangeSet& split_src_ranges,
1020                                               const std::vector<ImageChunk>& split_tgt_chunks,
1021                                               const std::vector<ImageChunk>& split_src_chunks,
1022                                               std::vector<ZipModeImage>* split_tgt_images,
1023                                               std::vector<ZipModeImage>* split_src_images) {
1024   CHECK(!split_tgt_chunks.empty());
1025 
1026   std::vector<ImageChunk> aligned_tgt_chunks;
1027 
1028   // Align the target chunks in the beginning with BLOCK_SIZE.
1029   size_t i = 0;
1030   while (i < split_tgt_chunks.size()) {
1031     size_t tgt_start = split_tgt_chunks[i].GetStartOffset();
1032     size_t tgt_length = split_tgt_chunks[i].GetRawDataLength();
1033 
1034     // Current ImageChunk is long enough to align.
1035     if (AlignHead(&tgt_start, &tgt_length)) {
1036       aligned_tgt_chunks.emplace_back(CHUNK_NORMAL, tgt_start, &tgt_image.file_content_,
1037                                       tgt_length);
1038       break;
1039     }
1040 
1041     i++;
1042   }
1043 
1044   // Nothing left after alignment in the current split tgt chunks; skip adding the split_tgt_image.
1045   if (i == split_tgt_chunks.size()) {
1046     return false;
1047   }
1048 
1049   aligned_tgt_chunks.insert(aligned_tgt_chunks.end(), split_tgt_chunks.begin() + i + 1,
1050                             split_tgt_chunks.end());
1051   CHECK(!aligned_tgt_chunks.empty());
1052 
1053   // Add a normal chunk to align the contents in the end.
1054   size_t end_offset =
1055       aligned_tgt_chunks.back().GetStartOffset() + aligned_tgt_chunks.back().GetRawDataLength();
1056   if (end_offset % BLOCK_SIZE != 0 && end_offset < tgt_image.file_content_.size()) {
1057     size_t tail_block_length = std::min<size_t>(tgt_image.file_content_.size() - end_offset,
1058                                                 BLOCK_SIZE - (end_offset % BLOCK_SIZE));
1059     aligned_tgt_chunks.emplace_back(CHUNK_NORMAL, end_offset, &tgt_image.file_content_,
1060                                     tail_block_length);
1061   }
1062 
1063   ZipModeImage split_tgt_image(false);
1064   split_tgt_image.Initialize(aligned_tgt_chunks, {});
1065   split_tgt_image.MergeAdjacentNormalChunks();
1066 
1067   // Construct the split source file based on the split src ranges.
1068   std::vector<uint8_t> split_src_content;
1069   for (const auto& r : split_src_ranges) {
1070     size_t end = std::min(src_image.file_content_.size(), r.second * BLOCK_SIZE);
1071     split_src_content.insert(split_src_content.end(),
1072                              src_image.file_content_.begin() + r.first * BLOCK_SIZE,
1073                              src_image.file_content_.begin() + end);
1074   }
1075 
1076   // We should not have an empty src in our design; otherwise we will encounter an error in
1077   // bsdiff since split_src_content.data() == nullptr.
1078   CHECK(!split_src_content.empty());
1079 
1080   ZipModeImage split_src_image(true);
1081   split_src_image.Initialize(split_src_chunks, split_src_content);
1082 
1083   split_tgt_images->push_back(std::move(split_tgt_image));
1084   split_src_images->push_back(std::move(split_src_image));
1085 
1086   return true;
1087 }
1088 
ValidateSplitImages(const std::vector<ZipModeImage> & split_tgt_images,const std::vector<ZipModeImage> & split_src_images,std::vector<SortedRangeSet> & split_src_ranges,size_t total_tgt_size)1089 void ZipModeImage::ValidateSplitImages(const std::vector<ZipModeImage>& split_tgt_images,
1090                                        const std::vector<ZipModeImage>& split_src_images,
1091                                        std::vector<SortedRangeSet>& split_src_ranges,
1092                                        size_t total_tgt_size) {
1093   CHECK_EQ(split_tgt_images.size(), split_src_images.size());
1094 
1095   LOG(INFO) << "Validating " << split_tgt_images.size() << " images";
1096 
1097   // Verify that the target image pieces is continuous and can add up to the total size.
1098   size_t last_offset = 0;
1099   for (const auto& tgt_image : split_tgt_images) {
1100     CHECK(!tgt_image.chunks_.empty());
1101 
1102     CHECK_EQ(last_offset, tgt_image.chunks_.front().GetStartOffset());
1103     CHECK(last_offset % BLOCK_SIZE == 0);
1104 
1105     // Check the target chunks within the split image are continuous.
1106     for (const auto& chunk : tgt_image.chunks_) {
1107       CHECK_EQ(last_offset, chunk.GetStartOffset());
1108       last_offset += chunk.GetRawDataLength();
1109     }
1110   }
1111   CHECK_EQ(total_tgt_size, last_offset);
1112 
1113   // Verify that the source ranges are mutually exclusive.
1114   CHECK_EQ(split_src_images.size(), split_src_ranges.size());
1115   SortedRangeSet used_src_ranges;
1116   for (size_t i = 0; i < split_src_ranges.size(); i++) {
1117     CHECK(!used_src_ranges.Overlaps(split_src_ranges[i]))
1118         << "src range " << split_src_ranges[i].ToString() << " overlaps "
1119         << used_src_ranges.ToString();
1120     used_src_ranges.Insert(split_src_ranges[i]);
1121   }
1122 }
1123 
GeneratePatchesInternal(const ZipModeImage & tgt_image,const ZipModeImage & src_image,std::vector<PatchChunk> * patch_chunks)1124 bool ZipModeImage::GeneratePatchesInternal(const ZipModeImage& tgt_image,
1125                                            const ZipModeImage& src_image,
1126                                            std::vector<PatchChunk>* patch_chunks) {
1127   LOG(INFO) << "Constructing patches for " << tgt_image.NumOfChunks() << " chunks...";
1128   patch_chunks->clear();
1129 
1130   bsdiff::SuffixArrayIndexInterface* bsdiff_cache = nullptr;
1131   for (size_t i = 0; i < tgt_image.NumOfChunks(); i++) {
1132     const auto& tgt_chunk = tgt_image[i];
1133 
1134     if (PatchChunk::RawDataIsSmaller(tgt_chunk, 0)) {
1135       patch_chunks->emplace_back(tgt_chunk);
1136       continue;
1137     }
1138 
1139     const ImageChunk* src_chunk = (tgt_chunk.GetType() != CHUNK_DEFLATE)
1140                                       ? nullptr
1141                                       : src_image.FindChunkByName(tgt_chunk.GetEntryName());
1142 
1143     const auto& src_ref = (src_chunk == nullptr) ? src_image.PseudoSource() : *src_chunk;
1144     bsdiff::SuffixArrayIndexInterface** bsdiff_cache_ptr =
1145         (src_chunk == nullptr) ? &bsdiff_cache : nullptr;
1146 
1147     std::vector<uint8_t> patch_data;
1148     if (!ImageChunk::MakePatch(tgt_chunk, src_ref, &patch_data, bsdiff_cache_ptr)) {
1149       LOG(ERROR) << "Failed to generate patch, name: " << tgt_chunk.GetEntryName();
1150       return false;
1151     }
1152 
1153     LOG(INFO) << "patch " << i << " is " << patch_data.size() << " bytes (of "
1154               << tgt_chunk.GetRawDataLength() << ")";
1155 
1156     if (PatchChunk::RawDataIsSmaller(tgt_chunk, patch_data.size())) {
1157       patch_chunks->emplace_back(tgt_chunk);
1158     } else {
1159       patch_chunks->emplace_back(tgt_chunk, src_ref, std::move(patch_data));
1160     }
1161   }
1162   delete bsdiff_cache;
1163 
1164   CHECK_EQ(patch_chunks->size(), tgt_image.NumOfChunks());
1165   return true;
1166 }
1167 
GeneratePatches(const ZipModeImage & tgt_image,const ZipModeImage & src_image,const std::string & patch_name)1168 bool ZipModeImage::GeneratePatches(const ZipModeImage& tgt_image, const ZipModeImage& src_image,
1169                                    const std::string& patch_name) {
1170   std::vector<PatchChunk> patch_chunks;
1171 
1172   ZipModeImage::GeneratePatchesInternal(tgt_image, src_image, &patch_chunks);
1173 
1174   CHECK_EQ(tgt_image.NumOfChunks(), patch_chunks.size());
1175 
1176   android::base::unique_fd patch_fd(
1177       open(patch_name.c_str(), O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR));
1178   if (patch_fd == -1) {
1179     PLOG(ERROR) << "Failed to open " << patch_name;
1180     return false;
1181   }
1182 
1183   return PatchChunk::WritePatchDataToFd(patch_chunks, patch_fd);
1184 }
1185 
GeneratePatches(const std::vector<ZipModeImage> & split_tgt_images,const std::vector<ZipModeImage> & split_src_images,const std::vector<SortedRangeSet> & split_src_ranges,const std::string & patch_name,const std::string & split_info_file,const std::string & debug_dir)1186 bool ZipModeImage::GeneratePatches(const std::vector<ZipModeImage>& split_tgt_images,
1187                                    const std::vector<ZipModeImage>& split_src_images,
1188                                    const std::vector<SortedRangeSet>& split_src_ranges,
1189                                    const std::string& patch_name,
1190                                    const std::string& split_info_file,
1191                                    const std::string& debug_dir) {
1192   LOG(INFO) << "Constructing patches for " << split_tgt_images.size() << " split images...";
1193 
1194   android::base::unique_fd patch_fd(
1195       open(patch_name.c_str(), O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR));
1196   if (patch_fd == -1) {
1197     PLOG(ERROR) << "Failed to open " << patch_name;
1198     return false;
1199   }
1200 
1201   std::vector<std::string> split_info_list;
1202   for (size_t i = 0; i < split_tgt_images.size(); i++) {
1203     std::vector<PatchChunk> patch_chunks;
1204     if (!ZipModeImage::GeneratePatchesInternal(split_tgt_images[i], split_src_images[i],
1205                                                &patch_chunks)) {
1206       LOG(ERROR) << "Failed to generate split patch";
1207       return false;
1208     }
1209 
1210     size_t total_patch_size = 12;
1211     for (auto& p : patch_chunks) {
1212       p.UpdateSourceOffset(split_src_ranges[i]);
1213       total_patch_size += p.PatchSize();
1214     }
1215 
1216     if (!PatchChunk::WritePatchDataToFd(patch_chunks, patch_fd)) {
1217       return false;
1218     }
1219 
1220     size_t split_tgt_size = split_tgt_images[i].chunks_.back().GetStartOffset() +
1221                             split_tgt_images[i].chunks_.back().GetRawDataLength() -
1222                             split_tgt_images[i].chunks_.front().GetStartOffset();
1223     std::string split_info = android::base::StringPrintf(
1224         "%zu %zu %s", total_patch_size, split_tgt_size, split_src_ranges[i].ToString().c_str());
1225     split_info_list.push_back(split_info);
1226 
1227     // Write the split source & patch into the debug directory.
1228     if (!debug_dir.empty()) {
1229       std::string src_name = android::base::StringPrintf("%s/src-%zu", debug_dir.c_str(), i);
1230       android::base::unique_fd fd(
1231           open(src_name.c_str(), O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR));
1232 
1233       if (fd == -1) {
1234         PLOG(ERROR) << "Failed to open " << src_name;
1235         return false;
1236       }
1237       if (!android::base::WriteFully(fd, split_src_images[i].PseudoSource().DataForPatch(),
1238                                      split_src_images[i].PseudoSource().DataLengthForPatch())) {
1239         PLOG(ERROR) << "Failed to write split source data into " << src_name;
1240         return false;
1241       }
1242 
1243       std::string patch_name = android::base::StringPrintf("%s/patch-%zu", debug_dir.c_str(), i);
1244       fd.reset(open(patch_name.c_str(), O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR));
1245 
1246       if (fd == -1) {
1247         PLOG(ERROR) << "Failed to open " << patch_name;
1248         return false;
1249       }
1250       if (!PatchChunk::WritePatchDataToFd(patch_chunks, fd)) {
1251         return false;
1252       }
1253     }
1254   }
1255 
1256   // Store the split in the following format:
1257   // Line 0:   imgdiff version#
1258   // Line 1:   number of pieces
1259   // Line 2:   patch_size_1 tgt_size_1 src_range_1
1260   // ...
1261   // Line n+1: patch_size_n tgt_size_n src_range_n
1262   std::string split_info_string = android::base::StringPrintf(
1263       "%zu\n%zu\n", VERSION, split_info_list.size()) + android::base::Join(split_info_list, '\n');
1264   if (!android::base::WriteStringToFile(split_info_string, split_info_file)) {
1265     PLOG(ERROR) << "Failed to write split info to " << split_info_file;
1266     return false;
1267   }
1268 
1269   return true;
1270 }
1271 
Initialize(const std::string & filename)1272 bool ImageModeImage::Initialize(const std::string& filename) {
1273   if (!ReadFile(filename, &file_content_)) {
1274     return false;
1275   }
1276 
1277   size_t sz = file_content_.size();
1278   size_t pos = 0;
1279   while (pos < sz) {
1280     // 0x00 no header flags, 0x08 deflate compression, 0x1f8b gzip magic number
1281     if (sz - pos >= 4 && get_unaligned<uint32_t>(file_content_.data() + pos) == 0x00088b1f) {
1282       // 'pos' is the offset of the start of a gzip chunk.
1283       size_t chunk_offset = pos;
1284 
1285       // The remaining data is too small to be a gzip chunk; treat them as a normal chunk.
1286       if (sz - pos < GZIP_HEADER_LEN + GZIP_FOOTER_LEN) {
1287         chunks_.emplace_back(CHUNK_NORMAL, pos, &file_content_, sz - pos);
1288         break;
1289       }
1290 
1291       // We need three chunks for the deflated image in total, one normal chunk for the header,
1292       // one deflated chunk for the body, and another normal chunk for the footer.
1293       chunks_.emplace_back(CHUNK_NORMAL, pos, &file_content_, GZIP_HEADER_LEN);
1294       pos += GZIP_HEADER_LEN;
1295 
1296       // We must decompress this chunk in order to discover where it ends, and so we can update
1297       // the uncompressed_data of the image body and its length.
1298 
1299       z_stream strm;
1300       strm.zalloc = Z_NULL;
1301       strm.zfree = Z_NULL;
1302       strm.opaque = Z_NULL;
1303       strm.avail_in = sz - pos;
1304       strm.next_in = file_content_.data() + pos;
1305 
1306       // -15 means we are decoding a 'raw' deflate stream; zlib will
1307       // not expect zlib headers.
1308       int ret = inflateInit2(&strm, -15);
1309       if (ret < 0) {
1310         LOG(ERROR) << "Failed to initialize inflate: " << ret;
1311         return false;
1312       }
1313 
1314       size_t allocated = BUFFER_SIZE;
1315       std::vector<uint8_t> uncompressed_data(allocated);
1316       size_t uncompressed_len = 0, raw_data_len = 0;
1317       do {
1318         strm.avail_out = allocated - uncompressed_len;
1319         strm.next_out = uncompressed_data.data() + uncompressed_len;
1320         ret = inflate(&strm, Z_NO_FLUSH);
1321         if (ret < 0) {
1322           LOG(WARNING) << "Inflate failed [" << strm.msg << "] at offset [" << chunk_offset
1323                        << "]; treating as a normal chunk";
1324           break;
1325         }
1326         uncompressed_len = allocated - strm.avail_out;
1327         if (strm.avail_out == 0) {
1328           allocated *= 2;
1329           uncompressed_data.resize(allocated);
1330         }
1331       } while (ret != Z_STREAM_END);
1332 
1333       raw_data_len = sz - strm.avail_in - pos;
1334       inflateEnd(&strm);
1335 
1336       if (ret < 0) {
1337         continue;
1338       }
1339 
1340       // The footer contains the size of the uncompressed data.  Double-check to make sure that it
1341       // matches the size of the data we got when we actually did the decompression.
1342       size_t footer_index = pos + raw_data_len + GZIP_FOOTER_LEN - 4;
1343       if (sz - footer_index < 4) {
1344         LOG(WARNING) << "invalid footer position; treating as a normal chunk";
1345         continue;
1346       }
1347       size_t footer_size = get_unaligned<uint32_t>(file_content_.data() + footer_index);
1348       if (footer_size != uncompressed_len) {
1349         LOG(WARNING) << "footer size " << footer_size << " != " << uncompressed_len
1350                      << "; treating as a normal chunk";
1351         continue;
1352       }
1353 
1354       ImageChunk body(CHUNK_DEFLATE, pos, &file_content_, raw_data_len);
1355       uncompressed_data.resize(uncompressed_len);
1356       body.SetUncompressedData(std::move(uncompressed_data));
1357       chunks_.push_back(std::move(body));
1358 
1359       pos += raw_data_len;
1360 
1361       // create a normal chunk for the footer
1362       chunks_.emplace_back(CHUNK_NORMAL, pos, &file_content_, GZIP_FOOTER_LEN);
1363 
1364       pos += GZIP_FOOTER_LEN;
1365     } else {
1366       // Use a normal chunk to take all the contents until the next gzip chunk (or EOF); we expect
1367       // the number of chunks to be small (5 for typical boot and recovery images).
1368 
1369       // Scan forward until we find a gzip header.
1370       size_t data_len = 0;
1371       while (data_len + pos < sz) {
1372         if (data_len + pos + 4 <= sz &&
1373             get_unaligned<uint32_t>(file_content_.data() + pos + data_len) == 0x00088b1f) {
1374           break;
1375         }
1376         data_len++;
1377       }
1378       chunks_.emplace_back(CHUNK_NORMAL, pos, &file_content_, data_len);
1379 
1380       pos += data_len;
1381     }
1382   }
1383 
1384   return true;
1385 }
1386 
SetBonusData(const std::vector<uint8_t> & bonus_data)1387 bool ImageModeImage::SetBonusData(const std::vector<uint8_t>& bonus_data) {
1388   CHECK(is_source_);
1389   if (chunks_.size() < 2 || !chunks_[1].SetBonusData(bonus_data)) {
1390     LOG(ERROR) << "Failed to set bonus data";
1391     DumpChunks();
1392     return false;
1393   }
1394 
1395   LOG(INFO) << "  using " << bonus_data.size() << " bytes of bonus data";
1396   return true;
1397 }
1398 
1399 // In Image Mode, verify that the source and target images have the same chunk structure (ie, the
1400 // same sequence of deflate and normal chunks).
CheckAndProcessChunks(ImageModeImage * tgt_image,ImageModeImage * src_image)1401 bool ImageModeImage::CheckAndProcessChunks(ImageModeImage* tgt_image, ImageModeImage* src_image) {
1402   // In image mode, merge the gzip header and footer in with any adjacent normal chunks.
1403   tgt_image->MergeAdjacentNormalChunks();
1404   src_image->MergeAdjacentNormalChunks();
1405 
1406   if (tgt_image->NumOfChunks() != src_image->NumOfChunks()) {
1407     LOG(ERROR) << "Source and target don't have same number of chunks!";
1408     tgt_image->DumpChunks();
1409     src_image->DumpChunks();
1410     return false;
1411   }
1412   for (size_t i = 0; i < tgt_image->NumOfChunks(); ++i) {
1413     if ((*tgt_image)[i].GetType() != (*src_image)[i].GetType()) {
1414       LOG(ERROR) << "Source and target don't have same chunk structure! (chunk " << i << ")";
1415       tgt_image->DumpChunks();
1416       src_image->DumpChunks();
1417       return false;
1418     }
1419   }
1420 
1421   for (size_t i = 0; i < tgt_image->NumOfChunks(); ++i) {
1422     auto& tgt_chunk = (*tgt_image)[i];
1423     auto& src_chunk = (*src_image)[i];
1424     if (tgt_chunk.GetType() != CHUNK_DEFLATE) {
1425       continue;
1426     }
1427 
1428     // If two deflate chunks are identical treat them as normal chunks.
1429     if (tgt_chunk == src_chunk) {
1430       tgt_chunk.ChangeDeflateChunkToNormal();
1431       src_chunk.ChangeDeflateChunkToNormal();
1432     } else if (!tgt_chunk.ReconstructDeflateChunk()) {
1433       // We cannot recompress the data and get exactly the same bits as are in the input target
1434       // image, fall back to normal
1435       LOG(WARNING) << "Failed to reconstruct target deflate chunk " << i << " ["
1436                    << tgt_chunk.GetEntryName() << "]; treating as normal";
1437       tgt_chunk.ChangeDeflateChunkToNormal();
1438       src_chunk.ChangeDeflateChunkToNormal();
1439     }
1440   }
1441 
1442   // For images, we need to maintain the parallel structure of the chunk lists, so do the merging
1443   // in both the source and target lists.
1444   tgt_image->MergeAdjacentNormalChunks();
1445   src_image->MergeAdjacentNormalChunks();
1446   if (tgt_image->NumOfChunks() != src_image->NumOfChunks()) {
1447     // This shouldn't happen.
1448     LOG(ERROR) << "Merging normal chunks went awry";
1449     return false;
1450   }
1451 
1452   return true;
1453 }
1454 
1455 // In image mode, generate patches against the given source chunks and bonus_data; write the
1456 // result to |patch_name|.
GeneratePatches(const ImageModeImage & tgt_image,const ImageModeImage & src_image,const std::string & patch_name)1457 bool ImageModeImage::GeneratePatches(const ImageModeImage& tgt_image,
1458                                      const ImageModeImage& src_image,
1459                                      const std::string& patch_name) {
1460   LOG(INFO) << "Constructing patches for " << tgt_image.NumOfChunks() << " chunks...";
1461   std::vector<PatchChunk> patch_chunks;
1462   patch_chunks.reserve(tgt_image.NumOfChunks());
1463 
1464   for (size_t i = 0; i < tgt_image.NumOfChunks(); i++) {
1465     const auto& tgt_chunk = tgt_image[i];
1466     const auto& src_chunk = src_image[i];
1467 
1468     if (PatchChunk::RawDataIsSmaller(tgt_chunk, 0)) {
1469       patch_chunks.emplace_back(tgt_chunk);
1470       continue;
1471     }
1472 
1473     std::vector<uint8_t> patch_data;
1474     if (!ImageChunk::MakePatch(tgt_chunk, src_chunk, &patch_data, nullptr)) {
1475       LOG(ERROR) << "Failed to generate patch for target chunk " << i;
1476       return false;
1477     }
1478     LOG(INFO) << "patch " << i << " is " << patch_data.size() << " bytes (of "
1479               << tgt_chunk.GetRawDataLength() << ")";
1480 
1481     if (PatchChunk::RawDataIsSmaller(tgt_chunk, patch_data.size())) {
1482       patch_chunks.emplace_back(tgt_chunk);
1483     } else {
1484       patch_chunks.emplace_back(tgt_chunk, src_chunk, std::move(patch_data));
1485     }
1486   }
1487 
1488   CHECK_EQ(tgt_image.NumOfChunks(), patch_chunks.size());
1489 
1490   android::base::unique_fd patch_fd(
1491       open(patch_name.c_str(), O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR));
1492   if (patch_fd == -1) {
1493     PLOG(ERROR) << "Failed to open " << patch_name;
1494     return false;
1495   }
1496 
1497   return PatchChunk::WritePatchDataToFd(patch_chunks, patch_fd);
1498 }
1499 
imgdiff(int argc,const char ** argv)1500 int imgdiff(int argc, const char** argv) {
1501   bool verbose = false;
1502   bool zip_mode = false;
1503   std::vector<uint8_t> bonus_data;
1504   size_t blocks_limit = 0;
1505   std::string split_info_file;
1506   std::string debug_dir;
1507 
1508   int opt;
1509   int option_index;
1510   optind = 0;  // Reset the getopt state so that we can call it multiple times for test.
1511 
1512   while ((opt = getopt_long(argc, const_cast<char**>(argv), "zb:v", OPTIONS, &option_index)) !=
1513          -1) {
1514     switch (opt) {
1515       case 'z':
1516         zip_mode = true;
1517         break;
1518       case 'b': {
1519         android::base::unique_fd fd(open(optarg, O_RDONLY));
1520         if (fd == -1) {
1521           PLOG(ERROR) << "Failed to open bonus file " << optarg;
1522           return 1;
1523         }
1524         struct stat st;
1525         if (fstat(fd, &st) != 0) {
1526           PLOG(ERROR) << "Failed to stat bonus file " << optarg;
1527           return 1;
1528         }
1529 
1530         size_t bonus_size = st.st_size;
1531         bonus_data.resize(bonus_size);
1532         if (!android::base::ReadFully(fd, bonus_data.data(), bonus_size)) {
1533           PLOG(ERROR) << "Failed to read bonus file " << optarg;
1534           return 1;
1535         }
1536         break;
1537       }
1538       case 'v':
1539         verbose = true;
1540         break;
1541       case 0: {
1542         std::string name = OPTIONS[option_index].name;
1543         if (name == "block-limit" && !android::base::ParseUint(optarg, &blocks_limit)) {
1544           LOG(ERROR) << "Failed to parse size blocks_limit: " << optarg;
1545           return 1;
1546         } else if (name == "split-info") {
1547           split_info_file = optarg;
1548         } else if (name == "debug-dir") {
1549           debug_dir = optarg;
1550         }
1551         break;
1552       }
1553       default:
1554         LOG(ERROR) << "unexpected opt: " << static_cast<char>(opt);
1555         return 2;
1556     }
1557   }
1558 
1559   if (!verbose) {
1560     android::base::SetMinimumLogSeverity(android::base::WARNING);
1561   }
1562 
1563   if (argc - optind != 3) {
1564     LOG(ERROR) << "usage: " << argv[0] << " [options] <src-img> <tgt-img> <patch-file>";
1565     LOG(ERROR)
1566         << "  -z <zip-mode>,    Generate patches in zip mode, src and tgt should be zip files.\n"
1567            "  -b <bonus-file>,  Bonus file in addition to src, image mode only.\n"
1568            "  --block-limit,    For large zips, split the src and tgt based on the block limit;\n"
1569            "                    and generate patches between each pair of pieces. Concatenate "
1570            "these\n"
1571            "                    patches together and output them into <patch-file>.\n"
1572            "  --split-info,     Output the split information (patch_size, tgt_size, src_ranges);\n"
1573            "                    zip mode with block-limit only.\n"
1574            "  --debug-dir,      Debug directory to put the split srcs and patches, zip mode only.\n"
1575            "  -v, --verbose,    Enable verbose logging.";
1576     return 2;
1577   }
1578 
1579   if (zip_mode) {
1580     ZipModeImage src_image(true, blocks_limit * BLOCK_SIZE);
1581     ZipModeImage tgt_image(false, blocks_limit * BLOCK_SIZE);
1582 
1583     if (!src_image.Initialize(argv[optind])) {
1584       return 1;
1585     }
1586     if (!tgt_image.Initialize(argv[optind + 1])) {
1587       return 1;
1588     }
1589 
1590     if (!ZipModeImage::CheckAndProcessChunks(&tgt_image, &src_image)) {
1591       return 1;
1592     }
1593 
1594     // Compute bsdiff patches for each chunk's data (the uncompressed data, in the case of
1595     // deflate chunks).
1596     if (blocks_limit > 0) {
1597       if (split_info_file.empty()) {
1598         LOG(ERROR) << "split-info path cannot be empty when generating patches with a block-limit";
1599         return 1;
1600       }
1601 
1602       std::vector<ZipModeImage> split_tgt_images;
1603       std::vector<ZipModeImage> split_src_images;
1604       std::vector<SortedRangeSet> split_src_ranges;
1605       ZipModeImage::SplitZipModeImageWithLimit(tgt_image, src_image, &split_tgt_images,
1606                                                &split_src_images, &split_src_ranges);
1607 
1608       if (!ZipModeImage::GeneratePatches(split_tgt_images, split_src_images, split_src_ranges,
1609                                          argv[optind + 2], split_info_file, debug_dir)) {
1610         return 1;
1611       }
1612 
1613     } else if (!ZipModeImage::GeneratePatches(tgt_image, src_image, argv[optind + 2])) {
1614       return 1;
1615     }
1616   } else {
1617     ImageModeImage src_image(true);
1618     ImageModeImage tgt_image(false);
1619 
1620     if (!src_image.Initialize(argv[optind])) {
1621       return 1;
1622     }
1623     if (!tgt_image.Initialize(argv[optind + 1])) {
1624       return 1;
1625     }
1626 
1627     if (!ImageModeImage::CheckAndProcessChunks(&tgt_image, &src_image)) {
1628       return 1;
1629     }
1630 
1631     if (!bonus_data.empty() && !src_image.SetBonusData(bonus_data)) {
1632       return 1;
1633     }
1634 
1635     if (!ImageModeImage::GeneratePatches(tgt_image, src_image, argv[optind + 2])) {
1636       return 1;
1637     }
1638   }
1639 
1640   return 0;
1641 }
1642