1 //
2 // Copyright (C) 2012 The Android Open Source Project
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16 
17 #include "update_engine/payload_consumer/delta_performer.h"
18 
19 #include <endian.h>
20 #include <errno.h>
21 #include <linux/fs.h>
22 
23 #include <algorithm>
24 #include <cstring>
25 #include <memory>
26 #include <string>
27 #include <vector>
28 
29 #include <applypatch/imgpatch.h>
30 #include <base/files/file_util.h>
31 #include <base/format_macros.h>
32 #include <base/strings/string_number_conversions.h>
33 #include <base/strings/string_util.h>
34 #include <base/strings/stringprintf.h>
35 #include <brillo/data_encoding.h>
36 #include <brillo/make_unique_ptr.h>
37 #include <bspatch.h>
38 #include <google/protobuf/repeated_field.h>
39 
40 #include "update_engine/common/constants.h"
41 #include "update_engine/common/hardware_interface.h"
42 #include "update_engine/common/prefs_interface.h"
43 #include "update_engine/common/subprocess.h"
44 #include "update_engine/common/terminator.h"
45 #include "update_engine/payload_consumer/bzip_extent_writer.h"
46 #include "update_engine/payload_consumer/download_action.h"
47 #include "update_engine/payload_consumer/extent_writer.h"
48 #if USE_MTD
49 #include "update_engine/payload_consumer/mtd_file_descriptor.h"
50 #endif
51 #include "update_engine/payload_consumer/payload_constants.h"
52 #include "update_engine/payload_consumer/payload_verifier.h"
53 #include "update_engine/payload_consumer/xz_extent_writer.h"
54 
55 using google::protobuf::RepeatedPtrField;
56 using std::min;
57 using std::string;
58 using std::vector;
59 
60 namespace chromeos_update_engine {
61 
62 const uint64_t DeltaPerformer::kDeltaVersionOffset = sizeof(kDeltaMagic);
63 const uint64_t DeltaPerformer::kDeltaVersionSize = 8;
64 const uint64_t DeltaPerformer::kDeltaManifestSizeOffset =
65     kDeltaVersionOffset + kDeltaVersionSize;
66 const uint64_t DeltaPerformer::kDeltaManifestSizeSize = 8;
67 const uint64_t DeltaPerformer::kDeltaMetadataSignatureSizeSize = 4;
68 const uint64_t DeltaPerformer::kMaxPayloadHeaderSize = 24;
69 const uint64_t DeltaPerformer::kSupportedMajorPayloadVersion = 2;
70 const uint32_t DeltaPerformer::kSupportedMinorPayloadVersion = 3;
71 
72 const unsigned DeltaPerformer::kProgressLogMaxChunks = 10;
73 const unsigned DeltaPerformer::kProgressLogTimeoutSeconds = 30;
74 const unsigned DeltaPerformer::kProgressDownloadWeight = 50;
75 const unsigned DeltaPerformer::kProgressOperationsWeight = 50;
76 
77 namespace {
78 const int kUpdateStateOperationInvalid = -1;
79 const int kMaxResumedUpdateFailures = 10;
80 #if USE_MTD
81 const int kUbiVolumeAttachTimeout = 5 * 60;
82 #endif
83 
CreateFileDescriptor(const char * path)84 FileDescriptorPtr CreateFileDescriptor(const char* path) {
85   FileDescriptorPtr ret;
86 #if USE_MTD
87   if (strstr(path, "/dev/ubi") == path) {
88     if (!UbiFileDescriptor::IsUbi(path)) {
89       // The volume might not have been attached at boot time.
90       int volume_no;
91       if (utils::SplitPartitionName(path, nullptr, &volume_no)) {
92         utils::TryAttachingUbiVolume(volume_no, kUbiVolumeAttachTimeout);
93       }
94     }
95     if (UbiFileDescriptor::IsUbi(path)) {
96       LOG(INFO) << path << " is a UBI device.";
97       ret.reset(new UbiFileDescriptor);
98     }
99   } else if (MtdFileDescriptor::IsMtd(path)) {
100     LOG(INFO) << path << " is an MTD device.";
101     ret.reset(new MtdFileDescriptor);
102   } else {
103     LOG(INFO) << path << " is not an MTD nor a UBI device.";
104 #endif
105     ret.reset(new EintrSafeFileDescriptor);
106 #if USE_MTD
107   }
108 #endif
109   return ret;
110 }
111 
112 // Opens path for read/write. On success returns an open FileDescriptor
113 // and sets *err to 0. On failure, sets *err to errno and returns nullptr.
OpenFile(const char * path,int mode,int * err)114 FileDescriptorPtr OpenFile(const char* path, int mode, int* err) {
115   // Try to mark the block device read-only based on the mode. Ignore any
116   // failure since this won't work when passing regular files.
117   utils::SetBlockDeviceReadOnly(path, (mode & O_ACCMODE) == O_RDONLY);
118 
119   FileDescriptorPtr fd = CreateFileDescriptor(path);
120 #if USE_MTD
121   // On NAND devices, we can either read, or write, but not both. So here we
122   // use O_WRONLY.
123   if (UbiFileDescriptor::IsUbi(path) || MtdFileDescriptor::IsMtd(path)) {
124     mode = O_WRONLY;
125   }
126 #endif
127   if (!fd->Open(path, mode, 000)) {
128     *err = errno;
129     PLOG(ERROR) << "Unable to open file " << path;
130     return nullptr;
131   }
132   *err = 0;
133   return fd;
134 }
135 
136 // Discard the tail of the block device referenced by |fd|, from the offset
137 // |data_size| until the end of the block device. Returns whether the data was
138 // discarded.
DiscardPartitionTail(const FileDescriptorPtr & fd,uint64_t data_size)139 bool DiscardPartitionTail(const FileDescriptorPtr& fd, uint64_t data_size) {
140   uint64_t part_size = fd->BlockDevSize();
141   if (!part_size || part_size <= data_size)
142     return false;
143 
144   struct blkioctl_request {
145     int number;
146     const char* name;
147   };
148   const vector<blkioctl_request> blkioctl_requests = {
149       {BLKSECDISCARD, "BLKSECDISCARD"},
150       {BLKDISCARD, "BLKDISCARD"},
151 #ifdef BLKZEROOUT
152       {BLKZEROOUT, "BLKZEROOUT"},
153 #endif
154   };
155   for (const auto& req : blkioctl_requests) {
156     int error = 0;
157     if (fd->BlkIoctl(req.number, data_size, part_size - data_size, &error) &&
158         error == 0) {
159       return true;
160     }
161     LOG(WARNING) << "Error discarding the last "
162                  << (part_size - data_size) / 1024 << " KiB using ioctl("
163                  << req.name << ")";
164   }
165   return false;
166 }
167 
168 }  // namespace
169 
170 
171 // Computes the ratio of |part| and |total|, scaled to |norm|, using integer
172 // arithmetic.
IntRatio(uint64_t part,uint64_t total,uint64_t norm)173 static uint64_t IntRatio(uint64_t part, uint64_t total, uint64_t norm) {
174   return part * norm / total;
175 }
176 
LogProgress(const char * message_prefix)177 void DeltaPerformer::LogProgress(const char* message_prefix) {
178   // Format operations total count and percentage.
179   string total_operations_str("?");
180   string completed_percentage_str("");
181   if (num_total_operations_) {
182     total_operations_str = std::to_string(num_total_operations_);
183     // Upcasting to 64-bit to avoid overflow, back to size_t for formatting.
184     completed_percentage_str =
185         base::StringPrintf(" (%" PRIu64 "%%)",
186                            IntRatio(next_operation_num_, num_total_operations_,
187                                     100));
188   }
189 
190   // Format download total count and percentage.
191   size_t payload_size = install_plan_->payload_size;
192   string payload_size_str("?");
193   string downloaded_percentage_str("");
194   if (payload_size) {
195     payload_size_str = std::to_string(payload_size);
196     // Upcasting to 64-bit to avoid overflow, back to size_t for formatting.
197     downloaded_percentage_str =
198         base::StringPrintf(" (%" PRIu64 "%%)",
199                            IntRatio(total_bytes_received_, payload_size, 100));
200   }
201 
202   LOG(INFO) << (message_prefix ? message_prefix : "") << next_operation_num_
203             << "/" << total_operations_str << " operations"
204             << completed_percentage_str << ", " << total_bytes_received_
205             << "/" << payload_size_str << " bytes downloaded"
206             << downloaded_percentage_str << ", overall progress "
207             << overall_progress_ << "%";
208 }
209 
UpdateOverallProgress(bool force_log,const char * message_prefix)210 void DeltaPerformer::UpdateOverallProgress(bool force_log,
211                                            const char* message_prefix) {
212   // Compute our download and overall progress.
213   unsigned new_overall_progress = 0;
214   static_assert(kProgressDownloadWeight + kProgressOperationsWeight == 100,
215                 "Progress weights don't add up");
216   // Only consider download progress if its total size is known; otherwise
217   // adjust the operations weight to compensate for the absence of download
218   // progress. Also, make sure to cap the download portion at
219   // kProgressDownloadWeight, in case we end up downloading more than we
220   // initially expected (this indicates a problem, but could generally happen).
221   // TODO(garnold) the correction of operations weight when we do not have the
222   // total payload size, as well as the conditional guard below, should both be
223   // eliminated once we ensure that the payload_size in the install plan is
224   // always given and is non-zero. This currently isn't the case during unit
225   // tests (see chromium-os:37969).
226   size_t payload_size = install_plan_->payload_size;
227   unsigned actual_operations_weight = kProgressOperationsWeight;
228   if (payload_size)
229     new_overall_progress += min(
230         static_cast<unsigned>(IntRatio(total_bytes_received_, payload_size,
231                                        kProgressDownloadWeight)),
232         kProgressDownloadWeight);
233   else
234     actual_operations_weight += kProgressDownloadWeight;
235 
236   // Only add completed operations if their total number is known; we definitely
237   // expect an update to have at least one operation, so the expectation is that
238   // this will eventually reach |actual_operations_weight|.
239   if (num_total_operations_)
240     new_overall_progress += IntRatio(next_operation_num_, num_total_operations_,
241                                      actual_operations_weight);
242 
243   // Progress ratio cannot recede, unless our assumptions about the total
244   // payload size, total number of operations, or the monotonicity of progress
245   // is breached.
246   if (new_overall_progress < overall_progress_) {
247     LOG(WARNING) << "progress counter receded from " << overall_progress_
248                  << "% down to " << new_overall_progress << "%; this is a bug";
249     force_log = true;
250   }
251   overall_progress_ = new_overall_progress;
252 
253   // Update chunk index, log as needed: if forced by called, or we completed a
254   // progress chunk, or a timeout has expired.
255   base::Time curr_time = base::Time::Now();
256   unsigned curr_progress_chunk =
257       overall_progress_ * kProgressLogMaxChunks / 100;
258   if (force_log || curr_progress_chunk > last_progress_chunk_ ||
259       curr_time > forced_progress_log_time_) {
260     forced_progress_log_time_ = curr_time + forced_progress_log_wait_;
261     LogProgress(message_prefix);
262   }
263   last_progress_chunk_ = curr_progress_chunk;
264 }
265 
266 
CopyDataToBuffer(const char ** bytes_p,size_t * count_p,size_t max)267 size_t DeltaPerformer::CopyDataToBuffer(const char** bytes_p, size_t* count_p,
268                                         size_t max) {
269   const size_t count = *count_p;
270   if (!count)
271     return 0;  // Special case shortcut.
272   size_t read_len = min(count, max - buffer_.size());
273   const char* bytes_start = *bytes_p;
274   const char* bytes_end = bytes_start + read_len;
275   buffer_.insert(buffer_.end(), bytes_start, bytes_end);
276   *bytes_p = bytes_end;
277   *count_p = count - read_len;
278   return read_len;
279 }
280 
281 
HandleOpResult(bool op_result,const char * op_type_name,ErrorCode * error)282 bool DeltaPerformer::HandleOpResult(bool op_result, const char* op_type_name,
283                                     ErrorCode* error) {
284   if (op_result)
285     return true;
286 
287   size_t partition_first_op_num =
288       current_partition_ ? acc_num_operations_[current_partition_ - 1] : 0;
289   LOG(ERROR) << "Failed to perform " << op_type_name << " operation "
290              << next_operation_num_ << ", which is the operation "
291              << next_operation_num_ - partition_first_op_num
292              << " in partition \""
293              << partitions_[current_partition_].partition_name() << "\"";
294   if (*error == ErrorCode::kSuccess)
295     *error = ErrorCode::kDownloadOperationExecutionError;
296   return false;
297 }
298 
Close()299 int DeltaPerformer::Close() {
300   int err = -CloseCurrentPartition();
301   LOG_IF(ERROR, !payload_hash_calculator_.Finalize() ||
302                 !signed_hash_calculator_.Finalize())
303       << "Unable to finalize the hash.";
304   if (!buffer_.empty()) {
305     LOG(INFO) << "Discarding " << buffer_.size() << " unused downloaded bytes";
306     if (err >= 0)
307       err = 1;
308   }
309   return -err;
310 }
311 
CloseCurrentPartition()312 int DeltaPerformer::CloseCurrentPartition() {
313   int err = 0;
314   if (source_fd_ && !source_fd_->Close()) {
315     err = errno;
316     PLOG(ERROR) << "Error closing source partition";
317     if (!err)
318       err = 1;
319   }
320   source_fd_.reset();
321   source_path_.clear();
322 
323   if (target_fd_ && !target_fd_->Close()) {
324     err = errno;
325     PLOG(ERROR) << "Error closing target partition";
326     if (!err)
327       err = 1;
328   }
329   target_fd_.reset();
330   target_path_.clear();
331   return -err;
332 }
333 
OpenCurrentPartition()334 bool DeltaPerformer::OpenCurrentPartition() {
335   if (current_partition_ >= partitions_.size())
336     return false;
337 
338   const PartitionUpdate& partition = partitions_[current_partition_];
339   // Open source fds if we have a delta payload with minor version >= 2.
340   if (install_plan_->payload_type == InstallPayloadType::kDelta &&
341       GetMinorVersion() != kInPlaceMinorPayloadVersion) {
342     source_path_ = install_plan_->partitions[current_partition_].source_path;
343     int err;
344     source_fd_ = OpenFile(source_path_.c_str(), O_RDONLY, &err);
345     if (!source_fd_) {
346       LOG(ERROR) << "Unable to open source partition "
347                  << partition.partition_name() << " on slot "
348                  << BootControlInterface::SlotName(install_plan_->source_slot)
349                  << ", file " << source_path_;
350       return false;
351     }
352   }
353 
354   target_path_ = install_plan_->partitions[current_partition_].target_path;
355   int err;
356   target_fd_ = OpenFile(target_path_.c_str(), O_RDWR, &err);
357   if (!target_fd_) {
358     LOG(ERROR) << "Unable to open target partition "
359                << partition.partition_name() << " on slot "
360                << BootControlInterface::SlotName(install_plan_->target_slot)
361                << ", file " << target_path_;
362     return false;
363   }
364 
365   LOG(INFO) << "Applying " << partition.operations().size()
366             << " operations to partition \"" << partition.partition_name()
367             << "\"";
368 
369   // Discard the end of the partition, but ignore failures.
370   DiscardPartitionTail(
371       target_fd_, install_plan_->partitions[current_partition_].target_size);
372 
373   return true;
374 }
375 
376 namespace {
377 
LogPartitionInfoHash(const PartitionInfo & info,const string & tag)378 void LogPartitionInfoHash(const PartitionInfo& info, const string& tag) {
379   string sha256 = brillo::data_encoding::Base64Encode(info.hash());
380   LOG(INFO) << "PartitionInfo " << tag << " sha256: " << sha256
381             << " size: " << info.size();
382 }
383 
LogPartitionInfo(const vector<PartitionUpdate> & partitions)384 void LogPartitionInfo(const vector<PartitionUpdate>& partitions) {
385   for (const PartitionUpdate& partition : partitions) {
386     LogPartitionInfoHash(partition.old_partition_info(),
387                          "old " + partition.partition_name());
388     LogPartitionInfoHash(partition.new_partition_info(),
389                          "new " + partition.partition_name());
390   }
391 }
392 
393 }  // namespace
394 
GetMetadataSignatureSizeOffset(uint64_t * out_offset) const395 bool DeltaPerformer::GetMetadataSignatureSizeOffset(
396     uint64_t* out_offset) const {
397   if (GetMajorVersion() == kBrilloMajorPayloadVersion) {
398     *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize;
399     return true;
400   }
401   return false;
402 }
403 
GetManifestOffset(uint64_t * out_offset) const404 bool DeltaPerformer::GetManifestOffset(uint64_t* out_offset) const {
405   // Actual manifest begins right after the manifest size field or
406   // metadata signature size field if major version >= 2.
407   if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
408     *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize;
409     return true;
410   }
411   if (major_payload_version_ == kBrilloMajorPayloadVersion) {
412     *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize +
413                   kDeltaMetadataSignatureSizeSize;
414     return true;
415   }
416   LOG(ERROR) << "Unknown major payload version: " << major_payload_version_;
417   return false;
418 }
419 
GetMetadataSize() const420 uint64_t DeltaPerformer::GetMetadataSize() const {
421   return metadata_size_;
422 }
423 
GetMajorVersion() const424 uint64_t DeltaPerformer::GetMajorVersion() const {
425   return major_payload_version_;
426 }
427 
GetMinorVersion() const428 uint32_t DeltaPerformer::GetMinorVersion() const {
429   if (manifest_.has_minor_version()) {
430     return manifest_.minor_version();
431   } else {
432     return install_plan_->payload_type == InstallPayloadType::kDelta
433                ? kSupportedMinorPayloadVersion
434                : kFullPayloadMinorVersion;
435   }
436 }
437 
GetManifest(DeltaArchiveManifest * out_manifest_p) const438 bool DeltaPerformer::GetManifest(DeltaArchiveManifest* out_manifest_p) const {
439   if (!manifest_parsed_)
440     return false;
441   *out_manifest_p = manifest_;
442   return true;
443 }
444 
IsHeaderParsed() const445 bool DeltaPerformer::IsHeaderParsed() const {
446   return metadata_size_ != 0;
447 }
448 
ParsePayloadMetadata(const brillo::Blob & payload,ErrorCode * error)449 DeltaPerformer::MetadataParseResult DeltaPerformer::ParsePayloadMetadata(
450     const brillo::Blob& payload, ErrorCode* error) {
451   *error = ErrorCode::kSuccess;
452   uint64_t manifest_offset;
453 
454   if (!IsHeaderParsed()) {
455     // Ensure we have data to cover the major payload version.
456     if (payload.size() < kDeltaManifestSizeOffset)
457       return kMetadataParseInsufficientData;
458 
459     // Validate the magic string.
460     if (memcmp(payload.data(), kDeltaMagic, sizeof(kDeltaMagic)) != 0) {
461       LOG(ERROR) << "Bad payload format -- invalid delta magic.";
462       *error = ErrorCode::kDownloadInvalidMetadataMagicString;
463       return kMetadataParseError;
464     }
465 
466     // Extract the payload version from the metadata.
467     static_assert(sizeof(major_payload_version_) == kDeltaVersionSize,
468                   "Major payload version size mismatch");
469     memcpy(&major_payload_version_,
470            &payload[kDeltaVersionOffset],
471            kDeltaVersionSize);
472     // switch big endian to host
473     major_payload_version_ = be64toh(major_payload_version_);
474 
475     if (major_payload_version_ != supported_major_version_ &&
476         major_payload_version_ != kChromeOSMajorPayloadVersion) {
477       LOG(ERROR) << "Bad payload format -- unsupported payload version: "
478           << major_payload_version_;
479       *error = ErrorCode::kUnsupportedMajorPayloadVersion;
480       return kMetadataParseError;
481     }
482 
483     // Get the manifest offset now that we have payload version.
484     if (!GetManifestOffset(&manifest_offset)) {
485       *error = ErrorCode::kUnsupportedMajorPayloadVersion;
486       return kMetadataParseError;
487     }
488     // Check again with the manifest offset.
489     if (payload.size() < manifest_offset)
490       return kMetadataParseInsufficientData;
491 
492     // Next, parse the manifest size.
493     static_assert(sizeof(manifest_size_) == kDeltaManifestSizeSize,
494                   "manifest_size size mismatch");
495     memcpy(&manifest_size_,
496            &payload[kDeltaManifestSizeOffset],
497            kDeltaManifestSizeSize);
498     manifest_size_ = be64toh(manifest_size_);  // switch big endian to host
499 
500     if (GetMajorVersion() == kBrilloMajorPayloadVersion) {
501       // Parse the metadata signature size.
502       static_assert(sizeof(metadata_signature_size_) ==
503                     kDeltaMetadataSignatureSizeSize,
504                     "metadata_signature_size size mismatch");
505       uint64_t metadata_signature_size_offset;
506       if (!GetMetadataSignatureSizeOffset(&metadata_signature_size_offset)) {
507         *error = ErrorCode::kError;
508         return kMetadataParseError;
509       }
510       memcpy(&metadata_signature_size_,
511              &payload[metadata_signature_size_offset],
512              kDeltaMetadataSignatureSizeSize);
513       metadata_signature_size_ = be32toh(metadata_signature_size_);
514     }
515 
516     // If the metadata size is present in install plan, check for it immediately
517     // even before waiting for that many number of bytes to be downloaded in the
518     // payload. This will prevent any attack which relies on us downloading data
519     // beyond the expected metadata size.
520     metadata_size_ = manifest_offset + manifest_size_;
521     if (install_plan_->hash_checks_mandatory) {
522       if (install_plan_->metadata_size != metadata_size_) {
523         LOG(ERROR) << "Mandatory metadata size in Omaha response ("
524                    << install_plan_->metadata_size
525                    << ") is missing/incorrect, actual = " << metadata_size_;
526         *error = ErrorCode::kDownloadInvalidMetadataSize;
527         return kMetadataParseError;
528       }
529     }
530   }
531 
532   // Now that we have validated the metadata size, we should wait for the full
533   // metadata and its signature (if exist) to be read in before we can parse it.
534   if (payload.size() < metadata_size_ + metadata_signature_size_)
535     return kMetadataParseInsufficientData;
536 
537   // Log whether we validated the size or simply trusting what's in the payload
538   // here. This is logged here (after we received the full metadata data) so
539   // that we just log once (instead of logging n times) if it takes n
540   // DeltaPerformer::Write calls to download the full manifest.
541   if (install_plan_->metadata_size == metadata_size_) {
542     LOG(INFO) << "Manifest size in payload matches expected value from Omaha";
543   } else {
544     // For mandatory-cases, we'd have already returned a kMetadataParseError
545     // above. We'll be here only for non-mandatory cases. Just send a UMA stat.
546     LOG(WARNING) << "Ignoring missing/incorrect metadata size ("
547                  << install_plan_->metadata_size
548                  << ") in Omaha response as validation is not mandatory. "
549                  << "Trusting metadata size in payload = " << metadata_size_;
550   }
551 
552   // We have the full metadata in |payload|. Verify its integrity
553   // and authenticity based on the information we have in Omaha response.
554   *error = ValidateMetadataSignature(payload);
555   if (*error != ErrorCode::kSuccess) {
556     if (install_plan_->hash_checks_mandatory) {
557       // The autoupdate_CatchBadSignatures test checks for this string
558       // in log-files. Keep in sync.
559       LOG(ERROR) << "Mandatory metadata signature validation failed";
560       return kMetadataParseError;
561     }
562 
563     // For non-mandatory cases, just send a UMA stat.
564     LOG(WARNING) << "Ignoring metadata signature validation failures";
565     *error = ErrorCode::kSuccess;
566   }
567 
568   if (!GetManifestOffset(&manifest_offset)) {
569     *error = ErrorCode::kUnsupportedMajorPayloadVersion;
570     return kMetadataParseError;
571   }
572   // The payload metadata is deemed valid, it's safe to parse the protobuf.
573   if (!manifest_.ParseFromArray(&payload[manifest_offset], manifest_size_)) {
574     LOG(ERROR) << "Unable to parse manifest in update file.";
575     *error = ErrorCode::kDownloadManifestParseError;
576     return kMetadataParseError;
577   }
578 
579   manifest_parsed_ = true;
580   return kMetadataParseSuccess;
581 }
582 
583 // Wrapper around write. Returns true if all requested bytes
584 // were written, or false on any error, regardless of progress
585 // and stores an action exit code in |error|.
Write(const void * bytes,size_t count,ErrorCode * error)586 bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode *error) {
587   *error = ErrorCode::kSuccess;
588 
589   const char* c_bytes = reinterpret_cast<const char*>(bytes);
590 
591   // Update the total byte downloaded count and the progress logs.
592   total_bytes_received_ += count;
593   UpdateOverallProgress(false, "Completed ");
594 
595   while (!manifest_valid_) {
596     // Read data up to the needed limit; this is either maximium payload header
597     // size, or the full metadata size (once it becomes known).
598     const bool do_read_header = !IsHeaderParsed();
599     CopyDataToBuffer(&c_bytes, &count,
600                      (do_read_header ? kMaxPayloadHeaderSize :
601                       metadata_size_ + metadata_signature_size_));
602 
603     MetadataParseResult result = ParsePayloadMetadata(buffer_, error);
604     if (result == kMetadataParseError)
605       return false;
606     if (result == kMetadataParseInsufficientData) {
607       // If we just processed the header, make an attempt on the manifest.
608       if (do_read_header && IsHeaderParsed())
609         continue;
610 
611       return true;
612     }
613 
614     // Checks the integrity of the payload manifest.
615     if ((*error = ValidateManifest()) != ErrorCode::kSuccess)
616       return false;
617     manifest_valid_ = true;
618 
619     // Clear the download buffer.
620     DiscardBuffer(false, metadata_size_);
621 
622     // This populates |partitions_| and the |install_plan.partitions| with the
623     // list of partitions from the manifest.
624     if (!ParseManifestPartitions(error))
625       return false;
626 
627     num_total_operations_ = 0;
628     for (const auto& partition : partitions_) {
629       num_total_operations_ += partition.operations_size();
630       acc_num_operations_.push_back(num_total_operations_);
631     }
632 
633     LOG_IF(WARNING, !prefs_->SetInt64(kPrefsManifestMetadataSize,
634                                       metadata_size_))
635         << "Unable to save the manifest metadata size.";
636     LOG_IF(WARNING, !prefs_->SetInt64(kPrefsManifestSignatureSize,
637                                       metadata_signature_size_))
638         << "Unable to save the manifest signature size.";
639 
640     if (!PrimeUpdateState()) {
641       *error = ErrorCode::kDownloadStateInitializationError;
642       LOG(ERROR) << "Unable to prime the update state.";
643       return false;
644     }
645 
646     if (!OpenCurrentPartition()) {
647       *error = ErrorCode::kInstallDeviceOpenError;
648       return false;
649     }
650 
651     if (next_operation_num_ > 0)
652       UpdateOverallProgress(true, "Resuming after ");
653     LOG(INFO) << "Starting to apply update payload operations";
654   }
655 
656   while (next_operation_num_ < num_total_operations_) {
657     // Check if we should cancel the current attempt for any reason.
658     // In this case, *error will have already been populated with the reason
659     // why we're canceling.
660     if (download_delegate_ && download_delegate_->ShouldCancel(error))
661       return false;
662 
663     // We know there are more operations to perform because we didn't reach the
664     // |num_total_operations_| limit yet.
665     while (next_operation_num_ >= acc_num_operations_[current_partition_]) {
666       CloseCurrentPartition();
667       current_partition_++;
668       if (!OpenCurrentPartition()) {
669         *error = ErrorCode::kInstallDeviceOpenError;
670         return false;
671       }
672     }
673     const size_t partition_operation_num = next_operation_num_ - (
674         current_partition_ ? acc_num_operations_[current_partition_ - 1] : 0);
675 
676     const InstallOperation& op =
677         partitions_[current_partition_].operations(partition_operation_num);
678 
679     CopyDataToBuffer(&c_bytes, &count, op.data_length());
680 
681     // Check whether we received all of the next operation's data payload.
682     if (!CanPerformInstallOperation(op))
683       return true;
684 
685     // Validate the operation only if the metadata signature is present.
686     // Otherwise, keep the old behavior. This serves as a knob to disable
687     // the validation logic in case we find some regression after rollout.
688     // NOTE: If hash checks are mandatory and if metadata_signature is empty,
689     // we would have already failed in ParsePayloadMetadata method and thus not
690     // even be here. So no need to handle that case again here.
691     if (!install_plan_->metadata_signature.empty()) {
692       // Note: Validate must be called only if CanPerformInstallOperation is
693       // called. Otherwise, we might be failing operations before even if there
694       // isn't sufficient data to compute the proper hash.
695       *error = ValidateOperationHash(op);
696       if (*error != ErrorCode::kSuccess) {
697         if (install_plan_->hash_checks_mandatory) {
698           LOG(ERROR) << "Mandatory operation hash check failed";
699           return false;
700         }
701 
702         // For non-mandatory cases, just send a UMA stat.
703         LOG(WARNING) << "Ignoring operation validation errors";
704         *error = ErrorCode::kSuccess;
705       }
706     }
707 
708     // Makes sure we unblock exit when this operation completes.
709     ScopedTerminatorExitUnblocker exit_unblocker =
710         ScopedTerminatorExitUnblocker();  // Avoids a compiler unused var bug.
711 
712     bool op_result;
713     switch (op.type()) {
714       case InstallOperation::REPLACE:
715       case InstallOperation::REPLACE_BZ:
716       case InstallOperation::REPLACE_XZ:
717         op_result = PerformReplaceOperation(op);
718         break;
719       case InstallOperation::ZERO:
720       case InstallOperation::DISCARD:
721         op_result = PerformZeroOrDiscardOperation(op);
722         break;
723       case InstallOperation::MOVE:
724         op_result = PerformMoveOperation(op);
725         break;
726       case InstallOperation::BSDIFF:
727         op_result = PerformBsdiffOperation(op);
728         break;
729       case InstallOperation::SOURCE_COPY:
730         op_result = PerformSourceCopyOperation(op, error);
731         break;
732       case InstallOperation::SOURCE_BSDIFF:
733         op_result = PerformSourceBsdiffOperation(op, error);
734         break;
735       case InstallOperation::IMGDIFF:
736         op_result = PerformImgdiffOperation(op, error);
737         break;
738       default:
739        op_result = false;
740     }
741     if (!HandleOpResult(op_result, InstallOperationTypeName(op.type()), error))
742       return false;
743 
744     next_operation_num_++;
745     UpdateOverallProgress(false, "Completed ");
746     CheckpointUpdateProgress();
747   }
748 
749   // In major version 2, we don't add dummy operation to the payload.
750   // If we already extracted the signature we should skip this step.
751   if (major_payload_version_ == kBrilloMajorPayloadVersion &&
752       manifest_.has_signatures_offset() && manifest_.has_signatures_size() &&
753       signatures_message_data_.empty()) {
754     if (manifest_.signatures_offset() != buffer_offset_) {
755       LOG(ERROR) << "Payload signatures offset points to blob offset "
756                  << manifest_.signatures_offset()
757                  << " but signatures are expected at offset "
758                  << buffer_offset_;
759       *error = ErrorCode::kDownloadPayloadVerificationError;
760       return false;
761     }
762     CopyDataToBuffer(&c_bytes, &count, manifest_.signatures_size());
763     // Needs more data to cover entire signature.
764     if (buffer_.size() < manifest_.signatures_size())
765       return true;
766     if (!ExtractSignatureMessage()) {
767       LOG(ERROR) << "Extract payload signature failed.";
768       *error = ErrorCode::kDownloadPayloadVerificationError;
769       return false;
770     }
771     DiscardBuffer(true, 0);
772     // Since we extracted the SignatureMessage we need to advance the
773     // checkpoint, otherwise we would reload the signature and try to extract
774     // it again.
775     CheckpointUpdateProgress();
776   }
777 
778   return true;
779 }
780 
IsManifestValid()781 bool DeltaPerformer::IsManifestValid() {
782   return manifest_valid_;
783 }
784 
ParseManifestPartitions(ErrorCode * error)785 bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) {
786   if (major_payload_version_ == kBrilloMajorPayloadVersion) {
787     partitions_.clear();
788     for (const PartitionUpdate& partition : manifest_.partitions()) {
789       partitions_.push_back(partition);
790     }
791     manifest_.clear_partitions();
792   } else if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
793     LOG(INFO) << "Converting update information from old format.";
794     PartitionUpdate root_part;
795     root_part.set_partition_name(kLegacyPartitionNameRoot);
796 #ifdef __ANDROID__
797     LOG(WARNING) << "Legacy payload major version provided to an Android "
798                     "build. Assuming no post-install. Please use major version "
799                     "2 or newer.";
800     root_part.set_run_postinstall(false);
801 #else
802     root_part.set_run_postinstall(true);
803 #endif  // __ANDROID__
804     if (manifest_.has_old_rootfs_info()) {
805       *root_part.mutable_old_partition_info() = manifest_.old_rootfs_info();
806       manifest_.clear_old_rootfs_info();
807     }
808     if (manifest_.has_new_rootfs_info()) {
809       *root_part.mutable_new_partition_info() = manifest_.new_rootfs_info();
810       manifest_.clear_new_rootfs_info();
811     }
812     *root_part.mutable_operations() = manifest_.install_operations();
813     manifest_.clear_install_operations();
814     partitions_.push_back(std::move(root_part));
815 
816     PartitionUpdate kern_part;
817     kern_part.set_partition_name(kLegacyPartitionNameKernel);
818     kern_part.set_run_postinstall(false);
819     if (manifest_.has_old_kernel_info()) {
820       *kern_part.mutable_old_partition_info() = manifest_.old_kernel_info();
821       manifest_.clear_old_kernel_info();
822     }
823     if (manifest_.has_new_kernel_info()) {
824       *kern_part.mutable_new_partition_info() = manifest_.new_kernel_info();
825       manifest_.clear_new_kernel_info();
826     }
827     *kern_part.mutable_operations() = manifest_.kernel_install_operations();
828     manifest_.clear_kernel_install_operations();
829     partitions_.push_back(std::move(kern_part));
830   }
831 
832   // Fill in the InstallPlan::partitions based on the partitions from the
833   // payload.
834   install_plan_->partitions.clear();
835   for (const auto& partition : partitions_) {
836     InstallPlan::Partition install_part;
837     install_part.name = partition.partition_name();
838     install_part.run_postinstall =
839         partition.has_run_postinstall() && partition.run_postinstall();
840     if (install_part.run_postinstall) {
841       install_part.postinstall_path =
842           (partition.has_postinstall_path() ? partition.postinstall_path()
843                                             : kPostinstallDefaultScript);
844       install_part.filesystem_type = partition.filesystem_type();
845       install_part.postinstall_optional = partition.postinstall_optional();
846     }
847 
848     if (partition.has_old_partition_info()) {
849       const PartitionInfo& info = partition.old_partition_info();
850       install_part.source_size = info.size();
851       install_part.source_hash.assign(info.hash().begin(), info.hash().end());
852     }
853 
854     if (!partition.has_new_partition_info()) {
855       LOG(ERROR) << "Unable to get new partition hash info on partition "
856                  << install_part.name << ".";
857       *error = ErrorCode::kDownloadNewPartitionInfoError;
858       return false;
859     }
860     const PartitionInfo& info = partition.new_partition_info();
861     install_part.target_size = info.size();
862     install_part.target_hash.assign(info.hash().begin(), info.hash().end());
863 
864     install_plan_->partitions.push_back(install_part);
865   }
866 
867   if (!install_plan_->LoadPartitionsFromSlots(boot_control_)) {
868     LOG(ERROR) << "Unable to determine all the partition devices.";
869     *error = ErrorCode::kInstallDeviceOpenError;
870     return false;
871   }
872   LogPartitionInfo(partitions_);
873   return true;
874 }
875 
CanPerformInstallOperation(const chromeos_update_engine::InstallOperation & operation)876 bool DeltaPerformer::CanPerformInstallOperation(
877     const chromeos_update_engine::InstallOperation& operation) {
878   // If we don't have a data blob we can apply it right away.
879   if (!operation.has_data_offset() && !operation.has_data_length())
880     return true;
881 
882   // See if we have the entire data blob in the buffer
883   if (operation.data_offset() < buffer_offset_) {
884     LOG(ERROR) << "we threw away data it seems?";
885     return false;
886   }
887 
888   return (operation.data_offset() + operation.data_length() <=
889           buffer_offset_ + buffer_.size());
890 }
891 
PerformReplaceOperation(const InstallOperation & operation)892 bool DeltaPerformer::PerformReplaceOperation(
893     const InstallOperation& operation) {
894   CHECK(operation.type() == InstallOperation::REPLACE ||
895         operation.type() == InstallOperation::REPLACE_BZ ||
896         operation.type() == InstallOperation::REPLACE_XZ);
897 
898   // Since we delete data off the beginning of the buffer as we use it,
899   // the data we need should be exactly at the beginning of the buffer.
900   TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
901   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
902 
903   // Extract the signature message if it's in this operation.
904   if (ExtractSignatureMessageFromOperation(operation)) {
905     // If this is dummy replace operation, we ignore it after extracting the
906     // signature.
907     DiscardBuffer(true, 0);
908     return true;
909   }
910 
911   // Setup the ExtentWriter stack based on the operation type.
912   std::unique_ptr<ExtentWriter> writer =
913     brillo::make_unique_ptr(new ZeroPadExtentWriter(
914       brillo::make_unique_ptr(new DirectExtentWriter())));
915 
916   if (operation.type() == InstallOperation::REPLACE_BZ) {
917     writer.reset(new BzipExtentWriter(std::move(writer)));
918   } else if (operation.type() == InstallOperation::REPLACE_XZ) {
919     writer.reset(new XzExtentWriter(std::move(writer)));
920   }
921 
922   // Create a vector of extents to pass to the ExtentWriter.
923   vector<Extent> extents;
924   for (int i = 0; i < operation.dst_extents_size(); i++) {
925     extents.push_back(operation.dst_extents(i));
926   }
927 
928   TEST_AND_RETURN_FALSE(writer->Init(target_fd_, extents, block_size_));
929   TEST_AND_RETURN_FALSE(writer->Write(buffer_.data(), operation.data_length()));
930   TEST_AND_RETURN_FALSE(writer->End());
931 
932   // Update buffer
933   DiscardBuffer(true, buffer_.size());
934   return true;
935 }
936 
PerformZeroOrDiscardOperation(const InstallOperation & operation)937 bool DeltaPerformer::PerformZeroOrDiscardOperation(
938     const InstallOperation& operation) {
939   CHECK(operation.type() == InstallOperation::DISCARD ||
940         operation.type() == InstallOperation::ZERO);
941 
942   // These operations have no blob.
943   TEST_AND_RETURN_FALSE(!operation.has_data_offset());
944   TEST_AND_RETURN_FALSE(!operation.has_data_length());
945 
946 #ifdef BLKZEROOUT
947   bool attempt_ioctl = true;
948   int request =
949       (operation.type() == InstallOperation::ZERO ? BLKZEROOUT : BLKDISCARD);
950 #else  // !defined(BLKZEROOUT)
951   bool attempt_ioctl = false;
952   int request = 0;
953 #endif  // !defined(BLKZEROOUT)
954 
955   brillo::Blob zeros;
956   for (const Extent& extent : operation.dst_extents()) {
957     const uint64_t start = extent.start_block() * block_size_;
958     const uint64_t length = extent.num_blocks() * block_size_;
959     if (attempt_ioctl) {
960       int result = 0;
961       if (target_fd_->BlkIoctl(request, start, length, &result) && result == 0)
962         continue;
963       attempt_ioctl = false;
964       zeros.resize(16 * block_size_);
965     }
966     // In case of failure, we fall back to writing 0 to the selected region.
967     for (uint64_t offset = 0; offset < length; offset += zeros.size()) {
968       uint64_t chunk_length = min(length - offset,
969                                   static_cast<uint64_t>(zeros.size()));
970       TEST_AND_RETURN_FALSE(
971           utils::PWriteAll(target_fd_, zeros.data(), chunk_length, start + offset));
972     }
973   }
974   return true;
975 }
976 
PerformMoveOperation(const InstallOperation & operation)977 bool DeltaPerformer::PerformMoveOperation(const InstallOperation& operation) {
978   // Calculate buffer size. Note, this function doesn't do a sliding
979   // window to copy in case the source and destination blocks overlap.
980   // If we wanted to do a sliding window, we could program the server
981   // to generate deltas that effectively did a sliding window.
982 
983   uint64_t blocks_to_read = 0;
984   for (int i = 0; i < operation.src_extents_size(); i++)
985     blocks_to_read += operation.src_extents(i).num_blocks();
986 
987   uint64_t blocks_to_write = 0;
988   for (int i = 0; i < operation.dst_extents_size(); i++)
989     blocks_to_write += operation.dst_extents(i).num_blocks();
990 
991   DCHECK_EQ(blocks_to_write, blocks_to_read);
992   brillo::Blob buf(blocks_to_write * block_size_);
993 
994   // Read in bytes.
995   ssize_t bytes_read = 0;
996   for (int i = 0; i < operation.src_extents_size(); i++) {
997     ssize_t bytes_read_this_iteration = 0;
998     const Extent& extent = operation.src_extents(i);
999     const size_t bytes = extent.num_blocks() * block_size_;
1000     TEST_AND_RETURN_FALSE(extent.start_block() != kSparseHole);
1001     TEST_AND_RETURN_FALSE(utils::PReadAll(target_fd_,
1002                                           &buf[bytes_read],
1003                                           bytes,
1004                                           extent.start_block() * block_size_,
1005                                           &bytes_read_this_iteration));
1006     TEST_AND_RETURN_FALSE(
1007         bytes_read_this_iteration == static_cast<ssize_t>(bytes));
1008     bytes_read += bytes_read_this_iteration;
1009   }
1010 
1011   // Write bytes out.
1012   ssize_t bytes_written = 0;
1013   for (int i = 0; i < operation.dst_extents_size(); i++) {
1014     const Extent& extent = operation.dst_extents(i);
1015     const size_t bytes = extent.num_blocks() * block_size_;
1016     TEST_AND_RETURN_FALSE(extent.start_block() != kSparseHole);
1017     TEST_AND_RETURN_FALSE(utils::PWriteAll(target_fd_,
1018                                            &buf[bytes_written],
1019                                            bytes,
1020                                            extent.start_block() * block_size_));
1021     bytes_written += bytes;
1022   }
1023   DCHECK_EQ(bytes_written, bytes_read);
1024   DCHECK_EQ(bytes_written, static_cast<ssize_t>(buf.size()));
1025   return true;
1026 }
1027 
1028 namespace {
1029 
1030 // Takes |extents| and fills an empty vector |blocks| with a block index for
1031 // each block in |extents|. For example, [(3, 2), (8, 1)] would give [3, 4, 8].
ExtentsToBlocks(const RepeatedPtrField<Extent> & extents,vector<uint64_t> * blocks)1032 void ExtentsToBlocks(const RepeatedPtrField<Extent>& extents,
1033                      vector<uint64_t>* blocks) {
1034   for (const Extent& ext : extents) {
1035     for (uint64_t j = 0; j < ext.num_blocks(); j++)
1036       blocks->push_back(ext.start_block() + j);
1037   }
1038 }
1039 
1040 // Takes |extents| and returns the number of blocks in those extents.
GetBlockCount(const RepeatedPtrField<Extent> & extents)1041 uint64_t GetBlockCount(const RepeatedPtrField<Extent>& extents) {
1042   uint64_t sum = 0;
1043   for (const Extent& ext : extents) {
1044     sum += ext.num_blocks();
1045   }
1046   return sum;
1047 }
1048 
1049 // Compare |calculated_hash| with source hash in |operation|, return false and
1050 // dump hash and set |error| if don't match.
ValidateSourceHash(const brillo::Blob & calculated_hash,const InstallOperation & operation,ErrorCode * error)1051 bool ValidateSourceHash(const brillo::Blob& calculated_hash,
1052                         const InstallOperation& operation,
1053                         ErrorCode* error) {
1054   brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
1055                                     operation.src_sha256_hash().end());
1056   if (calculated_hash != expected_source_hash) {
1057     LOG(ERROR) << "The hash of the source data on disk for this operation "
1058                << "doesn't match the expected value. This could mean that the "
1059                << "delta update payload was targeted for another version, or "
1060                << "that the source partition was modified after it was "
1061                << "installed, for example, by mounting a filesystem.";
1062     LOG(ERROR) << "Expected:   sha256|hex = "
1063                << base::HexEncode(expected_source_hash.data(),
1064                                   expected_source_hash.size());
1065     LOG(ERROR) << "Calculated: sha256|hex = "
1066                << base::HexEncode(calculated_hash.data(),
1067                                   calculated_hash.size());
1068 
1069     vector<string> source_extents;
1070     for (const Extent& ext : operation.src_extents()) {
1071       source_extents.push_back(
1072           base::StringPrintf("%" PRIu64 ":%" PRIu64,
1073                              static_cast<uint64_t>(ext.start_block()),
1074                              static_cast<uint64_t>(ext.num_blocks())));
1075     }
1076     LOG(ERROR) << "Operation source (offset:size) in blocks: "
1077                << base::JoinString(source_extents, ",");
1078 
1079     *error = ErrorCode::kDownloadStateInitializationError;
1080     return false;
1081   }
1082   return true;
1083 }
1084 
1085 }  // namespace
1086 
PerformSourceCopyOperation(const InstallOperation & operation,ErrorCode * error)1087 bool DeltaPerformer::PerformSourceCopyOperation(
1088     const InstallOperation& operation, ErrorCode* error) {
1089   if (operation.has_src_length())
1090     TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0);
1091   if (operation.has_dst_length())
1092     TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
1093 
1094   uint64_t blocks_to_read = GetBlockCount(operation.src_extents());
1095   uint64_t blocks_to_write = GetBlockCount(operation.dst_extents());
1096   TEST_AND_RETURN_FALSE(blocks_to_write ==  blocks_to_read);
1097 
1098   // Create vectors of all the individual src/dst blocks.
1099   vector<uint64_t> src_blocks;
1100   vector<uint64_t> dst_blocks;
1101   ExtentsToBlocks(operation.src_extents(), &src_blocks);
1102   ExtentsToBlocks(operation.dst_extents(), &dst_blocks);
1103   DCHECK_EQ(src_blocks.size(), blocks_to_read);
1104   DCHECK_EQ(src_blocks.size(), dst_blocks.size());
1105 
1106   brillo::Blob buf(block_size_);
1107   ssize_t bytes_read = 0;
1108   HashCalculator source_hasher;
1109   // Read/write one block at a time.
1110   for (uint64_t i = 0; i < blocks_to_read; i++) {
1111     ssize_t bytes_read_this_iteration = 0;
1112     uint64_t src_block = src_blocks[i];
1113     uint64_t dst_block = dst_blocks[i];
1114 
1115     // Read in bytes.
1116     TEST_AND_RETURN_FALSE(
1117         utils::PReadAll(source_fd_,
1118                         buf.data(),
1119                         block_size_,
1120                         src_block * block_size_,
1121                         &bytes_read_this_iteration));
1122 
1123     // Write bytes out.
1124     TEST_AND_RETURN_FALSE(
1125         utils::PWriteAll(target_fd_,
1126                          buf.data(),
1127                          block_size_,
1128                          dst_block * block_size_));
1129 
1130     bytes_read += bytes_read_this_iteration;
1131     TEST_AND_RETURN_FALSE(bytes_read_this_iteration ==
1132                           static_cast<ssize_t>(block_size_));
1133 
1134     if (operation.has_src_sha256_hash())
1135       TEST_AND_RETURN_FALSE(source_hasher.Update(buf.data(), buf.size()));
1136   }
1137 
1138   if (operation.has_src_sha256_hash()) {
1139     TEST_AND_RETURN_FALSE(source_hasher.Finalize());
1140     TEST_AND_RETURN_FALSE(
1141         ValidateSourceHash(source_hasher.raw_hash(), operation, error));
1142   }
1143 
1144   DCHECK_EQ(bytes_read, static_cast<ssize_t>(blocks_to_read * block_size_));
1145   return true;
1146 }
1147 
ExtentsToBsdiffPositionsString(const RepeatedPtrField<Extent> & extents,uint64_t block_size,uint64_t full_length,string * positions_string)1148 bool DeltaPerformer::ExtentsToBsdiffPositionsString(
1149     const RepeatedPtrField<Extent>& extents,
1150     uint64_t block_size,
1151     uint64_t full_length,
1152     string* positions_string) {
1153   string ret;
1154   uint64_t length = 0;
1155   for (const Extent& extent : extents) {
1156     int64_t start = extent.start_block() * block_size;
1157     uint64_t this_length =
1158         min(full_length - length,
1159             static_cast<uint64_t>(extent.num_blocks()) * block_size);
1160     ret += base::StringPrintf("%" PRIi64 ":%" PRIu64 ",", start, this_length);
1161     length += this_length;
1162   }
1163   TEST_AND_RETURN_FALSE(length == full_length);
1164   if (!ret.empty())
1165     ret.resize(ret.size() - 1);  // Strip trailing comma off
1166   *positions_string = ret;
1167   return true;
1168 }
1169 
PerformBsdiffOperation(const InstallOperation & operation)1170 bool DeltaPerformer::PerformBsdiffOperation(const InstallOperation& operation) {
1171   // Since we delete data off the beginning of the buffer as we use it,
1172   // the data we need should be exactly at the beginning of the buffer.
1173   TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
1174   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
1175 
1176   string input_positions;
1177   TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.src_extents(),
1178                                                        block_size_,
1179                                                        operation.src_length(),
1180                                                        &input_positions));
1181   string output_positions;
1182   TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.dst_extents(),
1183                                                        block_size_,
1184                                                        operation.dst_length(),
1185                                                        &output_positions));
1186 
1187   TEST_AND_RETURN_FALSE(bsdiff::bspatch(target_path_.c_str(),
1188                                         target_path_.c_str(),
1189                                         buffer_.data(),
1190                                         buffer_.size(),
1191                                         input_positions.c_str(),
1192                                         output_positions.c_str()) == 0);
1193   DiscardBuffer(true, buffer_.size());
1194 
1195   if (operation.dst_length() % block_size_) {
1196     // Zero out rest of final block.
1197     // TODO(adlr): build this into bspatch; it's more efficient that way.
1198     const Extent& last_extent =
1199         operation.dst_extents(operation.dst_extents_size() - 1);
1200     const uint64_t end_byte =
1201         (last_extent.start_block() + last_extent.num_blocks()) * block_size_;
1202     const uint64_t begin_byte =
1203         end_byte - (block_size_ - operation.dst_length() % block_size_);
1204     brillo::Blob zeros(end_byte - begin_byte);
1205     TEST_AND_RETURN_FALSE(
1206         utils::PWriteAll(target_fd_, zeros.data(), end_byte - begin_byte, begin_byte));
1207   }
1208   return true;
1209 }
1210 
PerformSourceBsdiffOperation(const InstallOperation & operation,ErrorCode * error)1211 bool DeltaPerformer::PerformSourceBsdiffOperation(
1212     const InstallOperation& operation, ErrorCode* error) {
1213   // Since we delete data off the beginning of the buffer as we use it,
1214   // the data we need should be exactly at the beginning of the buffer.
1215   TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
1216   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
1217   if (operation.has_src_length())
1218     TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0);
1219   if (operation.has_dst_length())
1220     TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
1221 
1222   if (operation.has_src_sha256_hash()) {
1223     HashCalculator source_hasher;
1224     const uint64_t kMaxBlocksToRead = 512;  // 2MB if block size is 4KB
1225     brillo::Blob buf(kMaxBlocksToRead * block_size_);
1226     for (const Extent& extent : operation.src_extents()) {
1227       for (uint64_t i = 0; i < extent.num_blocks(); i += kMaxBlocksToRead) {
1228         uint64_t blocks_to_read = min(
1229             kMaxBlocksToRead, static_cast<uint64_t>(extent.num_blocks()) - i);
1230         ssize_t bytes_to_read = blocks_to_read * block_size_;
1231         ssize_t bytes_read_this_iteration = 0;
1232         TEST_AND_RETURN_FALSE(
1233             utils::PReadAll(source_fd_, buf.data(), bytes_to_read,
1234                             (extent.start_block() + i) * block_size_,
1235                             &bytes_read_this_iteration));
1236         TEST_AND_RETURN_FALSE(bytes_read_this_iteration == bytes_to_read);
1237         TEST_AND_RETURN_FALSE(source_hasher.Update(buf.data(), bytes_to_read));
1238       }
1239     }
1240     TEST_AND_RETURN_FALSE(source_hasher.Finalize());
1241     TEST_AND_RETURN_FALSE(
1242         ValidateSourceHash(source_hasher.raw_hash(), operation, error));
1243   }
1244 
1245   string input_positions;
1246   TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.src_extents(),
1247                                                        block_size_,
1248                                                        operation.src_length(),
1249                                                        &input_positions));
1250   string output_positions;
1251   TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.dst_extents(),
1252                                                        block_size_,
1253                                                        operation.dst_length(),
1254                                                        &output_positions));
1255 
1256   TEST_AND_RETURN_FALSE(bsdiff::bspatch(source_path_.c_str(),
1257                                         target_path_.c_str(),
1258                                         buffer_.data(),
1259                                         buffer_.size(),
1260                                         input_positions.c_str(),
1261                                         output_positions.c_str()) == 0);
1262   DiscardBuffer(true, buffer_.size());
1263   return true;
1264 }
1265 
PerformImgdiffOperation(const InstallOperation & operation,ErrorCode * error)1266 bool DeltaPerformer::PerformImgdiffOperation(const InstallOperation& operation,
1267                                              ErrorCode* error) {
1268   // Since we delete data off the beginning of the buffer as we use it,
1269   // the data we need should be exactly at the beginning of the buffer.
1270   TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
1271   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
1272 
1273   uint64_t src_blocks = GetBlockCount(operation.src_extents());
1274   brillo::Blob src_data(src_blocks * block_size_);
1275 
1276   ssize_t bytes_read = 0;
1277   for (const Extent& extent : operation.src_extents()) {
1278     ssize_t bytes_read_this_iteration = 0;
1279     ssize_t bytes_to_read = extent.num_blocks() * block_size_;
1280     TEST_AND_RETURN_FALSE(utils::PReadAll(source_fd_,
1281                                           &src_data[bytes_read],
1282                                           bytes_to_read,
1283                                           extent.start_block() * block_size_,
1284                                           &bytes_read_this_iteration));
1285     TEST_AND_RETURN_FALSE(bytes_read_this_iteration == bytes_to_read);
1286     bytes_read += bytes_read_this_iteration;
1287   }
1288 
1289   if (operation.has_src_sha256_hash()) {
1290     brillo::Blob src_hash;
1291     TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfData(src_data, &src_hash));
1292     TEST_AND_RETURN_FALSE(ValidateSourceHash(src_hash, operation, error));
1293   }
1294 
1295   vector<Extent> target_extents(operation.dst_extents().begin(),
1296                                 operation.dst_extents().end());
1297   DirectExtentWriter writer;
1298   TEST_AND_RETURN_FALSE(writer.Init(target_fd_, target_extents, block_size_));
1299   TEST_AND_RETURN_FALSE(
1300       ApplyImagePatch(src_data.data(),
1301                       src_data.size(),
1302                       buffer_.data(),
1303                       operation.data_length(),
1304                       [](const unsigned char* data, ssize_t len, void* token) {
1305                         return reinterpret_cast<ExtentWriter*>(token)
1306                                        ->Write(data, len)
1307                                    ? len
1308                                    : 0;
1309                       },
1310                       &writer) == 0);
1311   TEST_AND_RETURN_FALSE(writer.End());
1312 
1313   DiscardBuffer(true, buffer_.size());
1314   return true;
1315 }
1316 
ExtractSignatureMessageFromOperation(const InstallOperation & operation)1317 bool DeltaPerformer::ExtractSignatureMessageFromOperation(
1318     const InstallOperation& operation) {
1319   if (operation.type() != InstallOperation::REPLACE ||
1320       !manifest_.has_signatures_offset() ||
1321       manifest_.signatures_offset() != operation.data_offset()) {
1322     return false;
1323   }
1324   TEST_AND_RETURN_FALSE(manifest_.has_signatures_size() &&
1325                         manifest_.signatures_size() == operation.data_length());
1326   TEST_AND_RETURN_FALSE(ExtractSignatureMessage());
1327   return true;
1328 }
1329 
ExtractSignatureMessage()1330 bool DeltaPerformer::ExtractSignatureMessage() {
1331   TEST_AND_RETURN_FALSE(signatures_message_data_.empty());
1332   TEST_AND_RETURN_FALSE(buffer_offset_ == manifest_.signatures_offset());
1333   TEST_AND_RETURN_FALSE(buffer_.size() >= manifest_.signatures_size());
1334   signatures_message_data_.assign(
1335       buffer_.begin(),
1336       buffer_.begin() + manifest_.signatures_size());
1337 
1338   // Save the signature blob because if the update is interrupted after the
1339   // download phase we don't go through this path anymore. Some alternatives to
1340   // consider:
1341   //
1342   // 1. On resume, re-download the signature blob from the server and re-verify
1343   // it.
1344   //
1345   // 2. Verify the signature as soon as it's received and don't checkpoint the
1346   // blob and the signed sha-256 context.
1347   LOG_IF(WARNING, !prefs_->SetString(kPrefsUpdateStateSignatureBlob,
1348                                      string(signatures_message_data_.begin(),
1349                                             signatures_message_data_.end())))
1350       << "Unable to store the signature blob.";
1351 
1352   LOG(INFO) << "Extracted signature data of size "
1353             << manifest_.signatures_size() << " at "
1354             << manifest_.signatures_offset();
1355   return true;
1356 }
1357 
GetPublicKeyFromResponse(base::FilePath * out_tmp_key)1358 bool DeltaPerformer::GetPublicKeyFromResponse(base::FilePath *out_tmp_key) {
1359   if (hardware_->IsOfficialBuild() ||
1360       utils::FileExists(public_key_path_.c_str()) ||
1361       install_plan_->public_key_rsa.empty())
1362     return false;
1363 
1364   if (!utils::DecodeAndStoreBase64String(install_plan_->public_key_rsa,
1365                                          out_tmp_key))
1366     return false;
1367 
1368   return true;
1369 }
1370 
ValidateMetadataSignature(const brillo::Blob & payload)1371 ErrorCode DeltaPerformer::ValidateMetadataSignature(
1372     const brillo::Blob& payload) {
1373   if (payload.size() < metadata_size_ + metadata_signature_size_)
1374     return ErrorCode::kDownloadMetadataSignatureError;
1375 
1376   brillo::Blob metadata_signature_blob, metadata_signature_protobuf_blob;
1377   if (!install_plan_->metadata_signature.empty()) {
1378     // Convert base64-encoded signature to raw bytes.
1379     if (!brillo::data_encoding::Base64Decode(
1380         install_plan_->metadata_signature, &metadata_signature_blob)) {
1381       LOG(ERROR) << "Unable to decode base64 metadata signature: "
1382                  << install_plan_->metadata_signature;
1383       return ErrorCode::kDownloadMetadataSignatureError;
1384     }
1385   } else if (major_payload_version_ == kBrilloMajorPayloadVersion) {
1386     metadata_signature_protobuf_blob.assign(payload.begin() + metadata_size_,
1387                                             payload.begin() + metadata_size_ +
1388                                             metadata_signature_size_);
1389   }
1390 
1391   if (metadata_signature_blob.empty() &&
1392       metadata_signature_protobuf_blob.empty()) {
1393     if (install_plan_->hash_checks_mandatory) {
1394       LOG(ERROR) << "Missing mandatory metadata signature in both Omaha "
1395                  << "response and payload.";
1396       return ErrorCode::kDownloadMetadataSignatureMissingError;
1397     }
1398 
1399     LOG(WARNING) << "Cannot validate metadata as the signature is empty";
1400     return ErrorCode::kSuccess;
1401   }
1402 
1403   // See if we should use the public RSA key in the Omaha response.
1404   base::FilePath path_to_public_key(public_key_path_);
1405   base::FilePath tmp_key;
1406   if (GetPublicKeyFromResponse(&tmp_key))
1407     path_to_public_key = tmp_key;
1408   ScopedPathUnlinker tmp_key_remover(tmp_key.value());
1409   if (tmp_key.empty())
1410     tmp_key_remover.set_should_remove(false);
1411 
1412   LOG(INFO) << "Verifying metadata hash signature using public key: "
1413             << path_to_public_key.value();
1414 
1415   HashCalculator metadata_hasher;
1416   metadata_hasher.Update(payload.data(), metadata_size_);
1417   if (!metadata_hasher.Finalize()) {
1418     LOG(ERROR) << "Unable to compute actual hash of manifest";
1419     return ErrorCode::kDownloadMetadataSignatureVerificationError;
1420   }
1421 
1422   brillo::Blob calculated_metadata_hash = metadata_hasher.raw_hash();
1423   PayloadVerifier::PadRSA2048SHA256Hash(&calculated_metadata_hash);
1424   if (calculated_metadata_hash.empty()) {
1425     LOG(ERROR) << "Computed actual hash of metadata is empty.";
1426     return ErrorCode::kDownloadMetadataSignatureVerificationError;
1427   }
1428 
1429   if (!metadata_signature_blob.empty()) {
1430     brillo::Blob expected_metadata_hash;
1431     if (!PayloadVerifier::GetRawHashFromSignature(metadata_signature_blob,
1432                                                   path_to_public_key.value(),
1433                                                   &expected_metadata_hash)) {
1434       LOG(ERROR) << "Unable to compute expected hash from metadata signature";
1435       return ErrorCode::kDownloadMetadataSignatureError;
1436     }
1437     if (calculated_metadata_hash != expected_metadata_hash) {
1438       LOG(ERROR) << "Manifest hash verification failed. Expected hash = ";
1439       utils::HexDumpVector(expected_metadata_hash);
1440       LOG(ERROR) << "Calculated hash = ";
1441       utils::HexDumpVector(calculated_metadata_hash);
1442       return ErrorCode::kDownloadMetadataSignatureMismatch;
1443     }
1444   } else {
1445     if (!PayloadVerifier::VerifySignature(metadata_signature_protobuf_blob,
1446                                           path_to_public_key.value(),
1447                                           calculated_metadata_hash)) {
1448       LOG(ERROR) << "Manifest hash verification failed.";
1449       return ErrorCode::kDownloadMetadataSignatureMismatch;
1450     }
1451   }
1452 
1453   // The autoupdate_CatchBadSignatures test checks for this string in
1454   // log-files. Keep in sync.
1455   LOG(INFO) << "Metadata hash signature matches value in Omaha response.";
1456   return ErrorCode::kSuccess;
1457 }
1458 
ValidateManifest()1459 ErrorCode DeltaPerformer::ValidateManifest() {
1460   // Perform assorted checks to sanity check the manifest, make sure it
1461   // matches data from other sources, and that it is a supported version.
1462 
1463   bool has_old_fields =
1464       (manifest_.has_old_kernel_info() || manifest_.has_old_rootfs_info());
1465   for (const PartitionUpdate& partition : manifest_.partitions()) {
1466     has_old_fields = has_old_fields || partition.has_old_partition_info();
1467   }
1468 
1469   // The presence of an old partition hash is the sole indicator for a delta
1470   // update.
1471   InstallPayloadType actual_payload_type =
1472       has_old_fields ? InstallPayloadType::kDelta : InstallPayloadType::kFull;
1473 
1474   if (install_plan_->payload_type == InstallPayloadType::kUnknown) {
1475     LOG(INFO) << "Detected a '"
1476               << InstallPayloadTypeToString(actual_payload_type)
1477               << "' payload.";
1478     install_plan_->payload_type = actual_payload_type;
1479   } else if (install_plan_->payload_type != actual_payload_type) {
1480     LOG(ERROR) << "InstallPlan expected a '"
1481                << InstallPayloadTypeToString(install_plan_->payload_type)
1482                << "' payload but the downloaded manifest contains a '"
1483                << InstallPayloadTypeToString(actual_payload_type)
1484                << "' payload.";
1485     return ErrorCode::kPayloadMismatchedType;
1486   }
1487 
1488   // Check that the minor version is compatible.
1489   if (actual_payload_type == InstallPayloadType::kFull) {
1490     if (manifest_.minor_version() != kFullPayloadMinorVersion) {
1491       LOG(ERROR) << "Manifest contains minor version "
1492                  << manifest_.minor_version()
1493                  << ", but all full payloads should have version "
1494                  << kFullPayloadMinorVersion << ".";
1495       return ErrorCode::kUnsupportedMinorPayloadVersion;
1496     }
1497   } else {
1498     if (manifest_.minor_version() != supported_minor_version_) {
1499       LOG(ERROR) << "Manifest contains minor version "
1500                  << manifest_.minor_version()
1501                  << " not the supported "
1502                  << supported_minor_version_;
1503       return ErrorCode::kUnsupportedMinorPayloadVersion;
1504     }
1505   }
1506 
1507   if (major_payload_version_ != kChromeOSMajorPayloadVersion) {
1508     if (manifest_.has_old_rootfs_info() ||
1509         manifest_.has_new_rootfs_info() ||
1510         manifest_.has_old_kernel_info() ||
1511         manifest_.has_new_kernel_info() ||
1512         manifest_.install_operations_size() != 0 ||
1513         manifest_.kernel_install_operations_size() != 0) {
1514       LOG(ERROR) << "Manifest contains deprecated field only supported in "
1515                  << "major payload version 1, but the payload major version is "
1516                  << major_payload_version_;
1517       return ErrorCode::kPayloadMismatchedType;
1518     }
1519   }
1520 
1521   // TODO(garnold) we should be adding more and more manifest checks, such as
1522   // partition boundaries etc (see chromium-os:37661).
1523 
1524   return ErrorCode::kSuccess;
1525 }
1526 
ValidateOperationHash(const InstallOperation & operation)1527 ErrorCode DeltaPerformer::ValidateOperationHash(
1528     const InstallOperation& operation) {
1529   if (!operation.data_sha256_hash().size()) {
1530     if (!operation.data_length()) {
1531       // Operations that do not have any data blob won't have any operation hash
1532       // either. So, these operations are always considered validated since the
1533       // metadata that contains all the non-data-blob portions of the operation
1534       // has already been validated. This is true for both HTTP and HTTPS cases.
1535       return ErrorCode::kSuccess;
1536     }
1537 
1538     // No hash is present for an operation that has data blobs. This shouldn't
1539     // happen normally for any client that has this code, because the
1540     // corresponding update should have been produced with the operation
1541     // hashes. So if it happens it means either we've turned operation hash
1542     // generation off in DeltaDiffGenerator or it's a regression of some sort.
1543     // One caveat though: The last operation is a dummy signature operation
1544     // that doesn't have a hash at the time the manifest is created. So we
1545     // should not complaint about that operation. This operation can be
1546     // recognized by the fact that it's offset is mentioned in the manifest.
1547     if (manifest_.signatures_offset() &&
1548         manifest_.signatures_offset() == operation.data_offset()) {
1549       LOG(INFO) << "Skipping hash verification for signature operation "
1550                 << next_operation_num_ + 1;
1551     } else {
1552       if (install_plan_->hash_checks_mandatory) {
1553         LOG(ERROR) << "Missing mandatory operation hash for operation "
1554                    << next_operation_num_ + 1;
1555         return ErrorCode::kDownloadOperationHashMissingError;
1556       }
1557 
1558       LOG(WARNING) << "Cannot validate operation " << next_operation_num_ + 1
1559                    << " as there's no operation hash in manifest";
1560     }
1561     return ErrorCode::kSuccess;
1562   }
1563 
1564   brillo::Blob expected_op_hash;
1565   expected_op_hash.assign(operation.data_sha256_hash().data(),
1566                           (operation.data_sha256_hash().data() +
1567                            operation.data_sha256_hash().size()));
1568 
1569   HashCalculator operation_hasher;
1570   operation_hasher.Update(buffer_.data(), operation.data_length());
1571   if (!operation_hasher.Finalize()) {
1572     LOG(ERROR) << "Unable to compute actual hash of operation "
1573                << next_operation_num_;
1574     return ErrorCode::kDownloadOperationHashVerificationError;
1575   }
1576 
1577   brillo::Blob calculated_op_hash = operation_hasher.raw_hash();
1578   if (calculated_op_hash != expected_op_hash) {
1579     LOG(ERROR) << "Hash verification failed for operation "
1580                << next_operation_num_ << ". Expected hash = ";
1581     utils::HexDumpVector(expected_op_hash);
1582     LOG(ERROR) << "Calculated hash over " << operation.data_length()
1583                << " bytes at offset: " << operation.data_offset() << " = ";
1584     utils::HexDumpVector(calculated_op_hash);
1585     return ErrorCode::kDownloadOperationHashMismatch;
1586   }
1587 
1588   return ErrorCode::kSuccess;
1589 }
1590 
1591 #define TEST_AND_RETURN_VAL(_retval, _condition)                \
1592   do {                                                          \
1593     if (!(_condition)) {                                        \
1594       LOG(ERROR) << "VerifyPayload failure: " << #_condition;   \
1595       return _retval;                                           \
1596     }                                                           \
1597   } while (0);
1598 
VerifyPayload(const string & update_check_response_hash,const uint64_t update_check_response_size)1599 ErrorCode DeltaPerformer::VerifyPayload(
1600     const string& update_check_response_hash,
1601     const uint64_t update_check_response_size) {
1602 
1603   // See if we should use the public RSA key in the Omaha response.
1604   base::FilePath path_to_public_key(public_key_path_);
1605   base::FilePath tmp_key;
1606   if (GetPublicKeyFromResponse(&tmp_key))
1607     path_to_public_key = tmp_key;
1608   ScopedPathUnlinker tmp_key_remover(tmp_key.value());
1609   if (tmp_key.empty())
1610     tmp_key_remover.set_should_remove(false);
1611 
1612   LOG(INFO) << "Verifying payload using public key: "
1613             << path_to_public_key.value();
1614 
1615   // Verifies the download size.
1616   TEST_AND_RETURN_VAL(ErrorCode::kPayloadSizeMismatchError,
1617                       update_check_response_size ==
1618                       metadata_size_ + metadata_signature_size_ +
1619                       buffer_offset_);
1620 
1621   // Verifies the payload hash.
1622   const string& payload_hash_data = payload_hash_calculator_.hash();
1623   TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadVerificationError,
1624                       !payload_hash_data.empty());
1625   TEST_AND_RETURN_VAL(ErrorCode::kPayloadHashMismatchError,
1626                       payload_hash_data == update_check_response_hash);
1627 
1628   // Verifies the signed payload hash.
1629   if (!utils::FileExists(path_to_public_key.value().c_str())) {
1630     LOG(WARNING) << "Not verifying signed delta payload -- missing public key.";
1631     return ErrorCode::kSuccess;
1632   }
1633   TEST_AND_RETURN_VAL(ErrorCode::kSignedDeltaPayloadExpectedError,
1634                       !signatures_message_data_.empty());
1635   brillo::Blob hash_data = signed_hash_calculator_.raw_hash();
1636   TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError,
1637                       PayloadVerifier::PadRSA2048SHA256Hash(&hash_data));
1638   TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError,
1639                       !hash_data.empty());
1640 
1641   if (!PayloadVerifier::VerifySignature(
1642       signatures_message_data_, path_to_public_key.value(), hash_data)) {
1643     // The autoupdate_CatchBadSignatures test checks for this string
1644     // in log-files. Keep in sync.
1645     LOG(ERROR) << "Public key verification failed, thus update failed.";
1646     return ErrorCode::kDownloadPayloadPubKeyVerificationError;
1647   }
1648 
1649   LOG(INFO) << "Payload hash matches value in payload.";
1650 
1651   // At this point, we are guaranteed to have downloaded a full payload, i.e
1652   // the one whose size matches the size mentioned in Omaha response. If any
1653   // errors happen after this, it's likely a problem with the payload itself or
1654   // the state of the system and not a problem with the URL or network.  So,
1655   // indicate that to the download delegate so that AU can backoff
1656   // appropriately.
1657   if (download_delegate_)
1658     download_delegate_->DownloadComplete();
1659 
1660   return ErrorCode::kSuccess;
1661 }
1662 
DiscardBuffer(bool do_advance_offset,size_t signed_hash_buffer_size)1663 void DeltaPerformer::DiscardBuffer(bool do_advance_offset,
1664                                    size_t signed_hash_buffer_size) {
1665   // Update the buffer offset.
1666   if (do_advance_offset)
1667     buffer_offset_ += buffer_.size();
1668 
1669   // Hash the content.
1670   payload_hash_calculator_.Update(buffer_.data(), buffer_.size());
1671   signed_hash_calculator_.Update(buffer_.data(), signed_hash_buffer_size);
1672 
1673   // Swap content with an empty vector to ensure that all memory is released.
1674   brillo::Blob().swap(buffer_);
1675 }
1676 
CanResumeUpdate(PrefsInterface * prefs,const string & update_check_response_hash)1677 bool DeltaPerformer::CanResumeUpdate(PrefsInterface* prefs,
1678                                      const string& update_check_response_hash) {
1679   int64_t next_operation = kUpdateStateOperationInvalid;
1680   if (!(prefs->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) &&
1681         next_operation != kUpdateStateOperationInvalid &&
1682         next_operation > 0))
1683     return false;
1684 
1685   string interrupted_hash;
1686   if (!(prefs->GetString(kPrefsUpdateCheckResponseHash, &interrupted_hash) &&
1687         !interrupted_hash.empty() &&
1688         interrupted_hash == update_check_response_hash))
1689     return false;
1690 
1691   int64_t resumed_update_failures;
1692   // Note that storing this value is optional, but if it is there it should not
1693   // be more than the limit.
1694   if (prefs->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures) &&
1695       resumed_update_failures > kMaxResumedUpdateFailures)
1696     return false;
1697 
1698   // Sanity check the rest.
1699   int64_t next_data_offset = -1;
1700   if (!(prefs->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) &&
1701         next_data_offset >= 0))
1702     return false;
1703 
1704   string sha256_context;
1705   if (!(prefs->GetString(kPrefsUpdateStateSHA256Context, &sha256_context) &&
1706         !sha256_context.empty()))
1707     return false;
1708 
1709   int64_t manifest_metadata_size = 0;
1710   if (!(prefs->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size) &&
1711         manifest_metadata_size > 0))
1712     return false;
1713 
1714   int64_t manifest_signature_size = 0;
1715   if (!(prefs->GetInt64(kPrefsManifestSignatureSize,
1716                         &manifest_signature_size) &&
1717         manifest_signature_size >= 0))
1718     return false;
1719 
1720   return true;
1721 }
1722 
ResetUpdateProgress(PrefsInterface * prefs,bool quick)1723 bool DeltaPerformer::ResetUpdateProgress(PrefsInterface* prefs, bool quick) {
1724   TEST_AND_RETURN_FALSE(prefs->SetInt64(kPrefsUpdateStateNextOperation,
1725                                         kUpdateStateOperationInvalid));
1726   if (!quick) {
1727     prefs->SetString(kPrefsUpdateCheckResponseHash, "");
1728     prefs->SetInt64(kPrefsUpdateStateNextDataOffset, -1);
1729     prefs->SetInt64(kPrefsUpdateStateNextDataLength, 0);
1730     prefs->SetString(kPrefsUpdateStateSHA256Context, "");
1731     prefs->SetString(kPrefsUpdateStateSignedSHA256Context, "");
1732     prefs->SetString(kPrefsUpdateStateSignatureBlob, "");
1733     prefs->SetInt64(kPrefsManifestMetadataSize, -1);
1734     prefs->SetInt64(kPrefsManifestSignatureSize, -1);
1735     prefs->SetInt64(kPrefsResumedUpdateFailures, 0);
1736   }
1737   return true;
1738 }
1739 
CheckpointUpdateProgress()1740 bool DeltaPerformer::CheckpointUpdateProgress() {
1741   Terminator::set_exit_blocked(true);
1742   if (last_updated_buffer_offset_ != buffer_offset_) {
1743     // Resets the progress in case we die in the middle of the state update.
1744     ResetUpdateProgress(prefs_, true);
1745     TEST_AND_RETURN_FALSE(
1746         prefs_->SetString(kPrefsUpdateStateSHA256Context,
1747                           payload_hash_calculator_.GetContext()));
1748     TEST_AND_RETURN_FALSE(
1749         prefs_->SetString(kPrefsUpdateStateSignedSHA256Context,
1750                           signed_hash_calculator_.GetContext()));
1751     TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextDataOffset,
1752                                            buffer_offset_));
1753     last_updated_buffer_offset_ = buffer_offset_;
1754 
1755     if (next_operation_num_ < num_total_operations_) {
1756       size_t partition_index = current_partition_;
1757       while (next_operation_num_ >= acc_num_operations_[partition_index])
1758         partition_index++;
1759       const size_t partition_operation_num = next_operation_num_ - (
1760           partition_index ? acc_num_operations_[partition_index - 1] : 0);
1761       const InstallOperation& op =
1762           partitions_[partition_index].operations(partition_operation_num);
1763       TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextDataLength,
1764                                              op.data_length()));
1765     } else {
1766       TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextDataLength,
1767                                              0));
1768     }
1769   }
1770   TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextOperation,
1771                                          next_operation_num_));
1772   return true;
1773 }
1774 
PrimeUpdateState()1775 bool DeltaPerformer::PrimeUpdateState() {
1776   CHECK(manifest_valid_);
1777   block_size_ = manifest_.block_size();
1778 
1779   int64_t next_operation = kUpdateStateOperationInvalid;
1780   if (!prefs_->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) ||
1781       next_operation == kUpdateStateOperationInvalid ||
1782       next_operation <= 0) {
1783     // Initiating a new update, no more state needs to be initialized.
1784     return true;
1785   }
1786   next_operation_num_ = next_operation;
1787 
1788   // Resuming an update -- load the rest of the update state.
1789   int64_t next_data_offset = -1;
1790   TEST_AND_RETURN_FALSE(prefs_->GetInt64(kPrefsUpdateStateNextDataOffset,
1791                                          &next_data_offset) &&
1792                         next_data_offset >= 0);
1793   buffer_offset_ = next_data_offset;
1794 
1795   // The signed hash context and the signature blob may be empty if the
1796   // interrupted update didn't reach the signature.
1797   string signed_hash_context;
1798   if (prefs_->GetString(kPrefsUpdateStateSignedSHA256Context,
1799                         &signed_hash_context)) {
1800     TEST_AND_RETURN_FALSE(
1801         signed_hash_calculator_.SetContext(signed_hash_context));
1802   }
1803 
1804   string signature_blob;
1805   if (prefs_->GetString(kPrefsUpdateStateSignatureBlob, &signature_blob)) {
1806     signatures_message_data_.assign(signature_blob.begin(),
1807                                     signature_blob.end());
1808   }
1809 
1810   string hash_context;
1811   TEST_AND_RETURN_FALSE(prefs_->GetString(kPrefsUpdateStateSHA256Context,
1812                                           &hash_context) &&
1813                         payload_hash_calculator_.SetContext(hash_context));
1814 
1815   int64_t manifest_metadata_size = 0;
1816   TEST_AND_RETURN_FALSE(prefs_->GetInt64(kPrefsManifestMetadataSize,
1817                                          &manifest_metadata_size) &&
1818                         manifest_metadata_size > 0);
1819   metadata_size_ = manifest_metadata_size;
1820 
1821   int64_t manifest_signature_size = 0;
1822   TEST_AND_RETURN_FALSE(
1823       prefs_->GetInt64(kPrefsManifestSignatureSize, &manifest_signature_size) &&
1824       manifest_signature_size >= 0);
1825   metadata_signature_size_ = manifest_signature_size;
1826 
1827   // Advance the download progress to reflect what doesn't need to be
1828   // re-downloaded.
1829   total_bytes_received_ += buffer_offset_;
1830 
1831   // Speculatively count the resume as a failure.
1832   int64_t resumed_update_failures;
1833   if (prefs_->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures)) {
1834     resumed_update_failures++;
1835   } else {
1836     resumed_update_failures = 1;
1837   }
1838   prefs_->SetInt64(kPrefsResumedUpdateFailures, resumed_update_failures);
1839   return true;
1840 }
1841 
1842 }  // namespace chromeos_update_engine
1843