1 //
2 // Copyright (C) 2012 The Android Open Source Project
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16 
17 #include "update_engine/payload_consumer/delta_performer.h"
18 
19 #include <errno.h>
20 #include <linux/fs.h>
21 
22 #include <algorithm>
23 #include <cstring>
24 #include <map>
25 #include <memory>
26 #include <set>
27 #include <string>
28 #include <utility>
29 #include <vector>
30 
31 #include <base/files/file_util.h>
32 #include <base/format_macros.h>
33 #include <base/metrics/histogram_macros.h>
34 #include <base/strings/string_number_conversions.h>
35 #include <base/strings/string_util.h>
36 #include <base/strings/stringprintf.h>
37 #include <base/time/time.h>
38 #include <brillo/data_encoding.h>
39 #include <bsdiff/bspatch.h>
40 #include <google/protobuf/repeated_field.h>
41 #include <puffin/puffpatch.h>
42 
43 #include "update_engine/common/constants.h"
44 #include "update_engine/common/download_action.h"
45 #include "update_engine/common/error_code.h"
46 #include "update_engine/common/error_code_utils.h"
47 #include "update_engine/common/hardware_interface.h"
48 #include "update_engine/common/prefs_interface.h"
49 #include "update_engine/common/subprocess.h"
50 #include "update_engine/common/terminator.h"
51 #include "update_engine/common/utils.h"
52 #include "update_engine/payload_consumer/bzip_extent_writer.h"
53 #include "update_engine/payload_consumer/cached_file_descriptor.h"
54 #include "update_engine/payload_consumer/certificate_parser_interface.h"
55 #include "update_engine/payload_consumer/extent_reader.h"
56 #include "update_engine/payload_consumer/extent_writer.h"
57 #include "update_engine/payload_consumer/partition_update_generator_interface.h"
58 #include "update_engine/payload_consumer/partition_writer.h"
59 #if USE_FEC
60 #include "update_engine/payload_consumer/fec_file_descriptor.h"
61 #endif  // USE_FEC
62 #include "update_engine/payload_consumer/file_descriptor_utils.h"
63 #include "update_engine/payload_consumer/mount_history.h"
64 #include "update_engine/payload_consumer/payload_constants.h"
65 #include "update_engine/payload_consumer/payload_verifier.h"
66 #include "update_engine/payload_consumer/xz_extent_writer.h"
67 
68 using google::protobuf::RepeatedPtrField;
69 using std::min;
70 using std::string;
71 using std::vector;
72 
73 namespace chromeos_update_engine {
74 const unsigned DeltaPerformer::kProgressLogMaxChunks = 10;
75 const unsigned DeltaPerformer::kProgressLogTimeoutSeconds = 30;
76 const unsigned DeltaPerformer::kProgressDownloadWeight = 50;
77 const unsigned DeltaPerformer::kProgressOperationsWeight = 50;
78 const uint64_t DeltaPerformer::kCheckpointFrequencySeconds = 1;
79 
80 namespace {
81 const int kUpdateStateOperationInvalid = -1;
82 const int kMaxResumedUpdateFailures = 10;
83 
84 }  // namespace
85 
86 // Computes the ratio of |part| and |total|, scaled to |norm|, using integer
87 // arithmetic.
IntRatio(uint64_t part,uint64_t total,uint64_t norm)88 static uint64_t IntRatio(uint64_t part, uint64_t total, uint64_t norm) {
89   return part * norm / total;
90 }
91 
LogProgress(const char * message_prefix)92 void DeltaPerformer::LogProgress(const char* message_prefix) {
93   // Format operations total count and percentage.
94   string total_operations_str("?");
95   string completed_percentage_str("");
96   if (num_total_operations_) {
97     total_operations_str = std::to_string(num_total_operations_);
98     // Upcasting to 64-bit to avoid overflow, back to size_t for formatting.
99     completed_percentage_str = base::StringPrintf(
100         " (%" PRIu64 "%%)",
101         IntRatio(next_operation_num_, num_total_operations_, 100));
102   }
103 
104   // Format download total count and percentage.
105   size_t payload_size = payload_->size;
106   string payload_size_str("?");
107   string downloaded_percentage_str("");
108   if (payload_size) {
109     payload_size_str = std::to_string(payload_size);
110     // Upcasting to 64-bit to avoid overflow, back to size_t for formatting.
111     downloaded_percentage_str = base::StringPrintf(
112         " (%" PRIu64 "%%)", IntRatio(total_bytes_received_, payload_size, 100));
113   }
114 
115   LOG(INFO) << (message_prefix ? message_prefix : "") << next_operation_num_
116             << "/" << total_operations_str << " operations"
117             << completed_percentage_str << ", " << total_bytes_received_ << "/"
118             << payload_size_str << " bytes downloaded"
119             << downloaded_percentage_str << ", overall progress "
120             << overall_progress_ << "%";
121 }
122 
UpdateOverallProgress(bool force_log,const char * message_prefix)123 void DeltaPerformer::UpdateOverallProgress(bool force_log,
124                                            const char* message_prefix) {
125   // Compute our download and overall progress.
126   unsigned new_overall_progress = 0;
127   static_assert(kProgressDownloadWeight + kProgressOperationsWeight == 100,
128                 "Progress weights don't add up");
129   // Only consider download progress if its total size is known; otherwise
130   // adjust the operations weight to compensate for the absence of download
131   // progress. Also, make sure to cap the download portion at
132   // kProgressDownloadWeight, in case we end up downloading more than we
133   // initially expected (this indicates a problem, but could generally happen).
134   // TODO(garnold) the correction of operations weight when we do not have the
135   // total payload size, as well as the conditional guard below, should both be
136   // eliminated once we ensure that the payload_size in the install plan is
137   // always given and is non-zero. This currently isn't the case during unit
138   // tests (see chromium-os:37969).
139   size_t payload_size = payload_->size;
140   unsigned actual_operations_weight = kProgressOperationsWeight;
141   if (payload_size)
142     new_overall_progress +=
143         min(static_cast<unsigned>(IntRatio(
144                 total_bytes_received_, payload_size, kProgressDownloadWeight)),
145             kProgressDownloadWeight);
146   else
147     actual_operations_weight += kProgressDownloadWeight;
148 
149   // Only add completed operations if their total number is known; we definitely
150   // expect an update to have at least one operation, so the expectation is that
151   // this will eventually reach |actual_operations_weight|.
152   if (num_total_operations_)
153     new_overall_progress += IntRatio(
154         next_operation_num_, num_total_operations_, actual_operations_weight);
155 
156   // Progress ratio cannot recede, unless our assumptions about the total
157   // payload size, total number of operations, or the monotonicity of progress
158   // is breached.
159   if (new_overall_progress < overall_progress_) {
160     LOG(WARNING) << "progress counter receded from " << overall_progress_
161                  << "% down to " << new_overall_progress << "%; this is a bug";
162     force_log = true;
163   }
164   overall_progress_ = new_overall_progress;
165 
166   // Update chunk index, log as needed: if forced by called, or we completed a
167   // progress chunk, or a timeout has expired.
168   base::TimeTicks curr_time = base::TimeTicks::Now();
169   unsigned curr_progress_chunk =
170       overall_progress_ * kProgressLogMaxChunks / 100;
171   if (force_log || curr_progress_chunk > last_progress_chunk_ ||
172       curr_time > forced_progress_log_time_) {
173     forced_progress_log_time_ = curr_time + forced_progress_log_wait_;
174     LogProgress(message_prefix);
175   }
176   last_progress_chunk_ = curr_progress_chunk;
177 }
178 
CopyDataToBuffer(const char ** bytes_p,size_t * count_p,size_t max)179 size_t DeltaPerformer::CopyDataToBuffer(const char** bytes_p,
180                                         size_t* count_p,
181                                         size_t max) {
182   const size_t count = *count_p;
183   if (!count)
184     return 0;  // Special case shortcut.
185   size_t read_len = min(count, max - buffer_.size());
186   const char* bytes_start = *bytes_p;
187   const char* bytes_end = bytes_start + read_len;
188   buffer_.reserve(max);
189   buffer_.insert(buffer_.end(), bytes_start, bytes_end);
190   *bytes_p = bytes_end;
191   *count_p = count - read_len;
192   return read_len;
193 }
194 
HandleOpResult(bool op_result,const char * op_type_name,ErrorCode * error)195 bool DeltaPerformer::HandleOpResult(bool op_result,
196                                     const char* op_type_name,
197                                     ErrorCode* error) {
198   if (op_result)
199     return true;
200 
201   LOG(ERROR) << "Failed to perform " << op_type_name << " operation "
202              << next_operation_num_ << ", which is the operation "
203              << GetPartitionOperationNum() << " in partition \""
204              << partitions_[current_partition_].partition_name() << "\"";
205   if (*error == ErrorCode::kSuccess)
206     *error = ErrorCode::kDownloadOperationExecutionError;
207   return false;
208 }
209 
Close()210 int DeltaPerformer::Close() {
211   int err = -CloseCurrentPartition();
212   LOG_IF(ERROR,
213          !payload_hash_calculator_.Finalize() ||
214              !signed_hash_calculator_.Finalize())
215       << "Unable to finalize the hash.";
216   if (!buffer_.empty()) {
217     LOG(INFO) << "Discarding " << buffer_.size() << " unused downloaded bytes";
218     if (err >= 0)
219       err = 1;
220   }
221   return -err;
222 }
223 
CloseCurrentPartition()224 int DeltaPerformer::CloseCurrentPartition() {
225   if (!partition_writer_) {
226     return 0;
227   }
228   int err = partition_writer_->Close();
229   partition_writer_ = nullptr;
230   return err;
231 }
232 
OpenCurrentPartition()233 bool DeltaPerformer::OpenCurrentPartition() {
234   if (current_partition_ >= partitions_.size())
235     return false;
236 
237   const PartitionUpdate& partition = partitions_[current_partition_];
238   size_t num_previous_partitions =
239       install_plan_->partitions.size() - partitions_.size();
240   const InstallPlan::Partition& install_part =
241       install_plan_->partitions[num_previous_partitions + current_partition_];
242   auto dynamic_control = boot_control_->GetDynamicPartitionControl();
243   partition_writer_ = CreatePartitionWriter(
244       partition,
245       install_part,
246       dynamic_control,
247       block_size_,
248       interactive_,
249       IsDynamicPartition(install_part.name, install_plan_->target_slot));
250   // Open source fds if we have a delta payload, or for partitions in the
251   // partial update.
252   bool source_may_exist = manifest_.partial_update() ||
253                           payload_->type == InstallPayloadType::kDelta;
254   const size_t partition_operation_num = GetPartitionOperationNum();
255 
256   TEST_AND_RETURN_FALSE(partition_writer_->Init(
257       install_plan_, source_may_exist, partition_operation_num));
258   CheckpointUpdateProgress(true);
259   return true;
260 }
261 
GetPartitionOperationNum()262 size_t DeltaPerformer::GetPartitionOperationNum() {
263   return next_operation_num_ -
264          (current_partition_ ? acc_num_operations_[current_partition_ - 1] : 0);
265 }
266 
267 namespace {
268 
LogPartitionInfoHash(const PartitionInfo & info,const string & tag)269 void LogPartitionInfoHash(const PartitionInfo& info, const string& tag) {
270   string sha256 = brillo::data_encoding::Base64Encode(info.hash());
271   LOG(INFO) << "PartitionInfo " << tag << " sha256: " << sha256
272             << " size: " << info.size();
273 }
274 
LogPartitionInfo(const vector<PartitionUpdate> & partitions)275 void LogPartitionInfo(const vector<PartitionUpdate>& partitions) {
276   for (const PartitionUpdate& partition : partitions) {
277     if (partition.has_old_partition_info()) {
278       LogPartitionInfoHash(partition.old_partition_info(),
279                            "old " + partition.partition_name());
280     }
281     LogPartitionInfoHash(partition.new_partition_info(),
282                          "new " + partition.partition_name());
283   }
284 }
285 
286 }  // namespace
287 
IsHeaderParsed() const288 bool DeltaPerformer::IsHeaderParsed() const {
289   return metadata_size_ != 0;
290 }
291 
ParsePayloadMetadata(const brillo::Blob & payload,ErrorCode * error)292 MetadataParseResult DeltaPerformer::ParsePayloadMetadata(
293     const brillo::Blob& payload, ErrorCode* error) {
294   *error = ErrorCode::kSuccess;
295 
296   if (!IsHeaderParsed()) {
297     MetadataParseResult result =
298         payload_metadata_.ParsePayloadHeader(payload, error);
299     if (result != MetadataParseResult::kSuccess)
300       return result;
301 
302     metadata_size_ = payload_metadata_.GetMetadataSize();
303     metadata_signature_size_ = payload_metadata_.GetMetadataSignatureSize();
304     major_payload_version_ = payload_metadata_.GetMajorVersion();
305 
306     // If the metadata size is present in install plan, check for it immediately
307     // even before waiting for that many number of bytes to be downloaded in the
308     // payload. This will prevent any attack which relies on us downloading data
309     // beyond the expected metadata size.
310     if (install_plan_->hash_checks_mandatory) {
311       if (payload_->metadata_size != metadata_size_) {
312         LOG(ERROR) << "Mandatory metadata size in Omaha response ("
313                    << payload_->metadata_size
314                    << ") is missing/incorrect, actual = " << metadata_size_;
315         *error = ErrorCode::kDownloadInvalidMetadataSize;
316         return MetadataParseResult::kError;
317       }
318     }
319 
320     // Check that the |metadata signature size_| and |metadata_size_| are not
321     // very big numbers. This is necessary since |update_engine| needs to write
322     // these values into the buffer before being able to use them, and if an
323     // attacker sets these values to a very big number, the buffer will overflow
324     // and |update_engine| will crash. A simple way of solving this is to check
325     // that the size of both values is smaller than the payload itself.
326     if (metadata_size_ + metadata_signature_size_ > payload_->size) {
327       LOG(ERROR) << "The size of the metadata_size(" << metadata_size_ << ")"
328                  << " or metadata signature(" << metadata_signature_size_ << ")"
329                  << " is greater than the size of the payload"
330                  << "(" << payload_->size << ")";
331       *error = ErrorCode::kDownloadInvalidMetadataSize;
332       return MetadataParseResult::kError;
333     }
334   }
335 
336   // Now that we have validated the metadata size, we should wait for the full
337   // metadata and its signature (if exist) to be read in before we can parse it.
338   if (payload.size() < metadata_size_ + metadata_signature_size_)
339     return MetadataParseResult::kInsufficientData;
340 
341   // Log whether we validated the size or simply trusting what's in the payload
342   // here. This is logged here (after we received the full metadata data) so
343   // that we just log once (instead of logging n times) if it takes n
344   // DeltaPerformer::Write calls to download the full manifest.
345   if (payload_->metadata_size == metadata_size_) {
346     LOG(INFO) << "Manifest size in payload matches expected value from Omaha";
347   } else {
348     // For mandatory-cases, we'd have already returned a kMetadataParseError
349     // above. We'll be here only for non-mandatory cases. Just send a UMA stat.
350     LOG(WARNING) << "Ignoring missing/incorrect metadata size ("
351                  << payload_->metadata_size
352                  << ") in Omaha response as validation is not mandatory. "
353                  << "Trusting metadata size in payload = " << metadata_size_;
354   }
355 
356   // NOLINTNEXTLINE(whitespace/braces)
357   auto [payload_verifier, perform_verification] = CreatePayloadVerifier();
358   if (!payload_verifier) {
359     LOG(ERROR) << "Failed to create payload verifier.";
360     *error = ErrorCode::kDownloadMetadataSignatureVerificationError;
361     if (perform_verification) {
362       return MetadataParseResult::kError;
363     }
364   } else {
365     // We have the full metadata in |payload|. Verify its integrity
366     // and authenticity based on the information we have in Omaha response.
367     *error = payload_metadata_.ValidateMetadataSignature(
368         payload, payload_->metadata_signature, *payload_verifier);
369   }
370   if (*error != ErrorCode::kSuccess) {
371     if (install_plan_->hash_checks_mandatory) {
372       // The autoupdate_CatchBadSignatures test checks for this string
373       // in log-files. Keep in sync.
374       LOG(ERROR) << "Mandatory metadata signature validation failed";
375       return MetadataParseResult::kError;
376     }
377 
378     // For non-mandatory cases, just send a UMA stat.
379     LOG(WARNING) << "Ignoring metadata signature validation failures";
380     *error = ErrorCode::kSuccess;
381   }
382 
383   // The payload metadata is deemed valid, it's safe to parse the protobuf.
384   if (!payload_metadata_.GetManifest(payload, &manifest_)) {
385     LOG(ERROR) << "Unable to parse manifest in update file.";
386     *error = ErrorCode::kDownloadManifestParseError;
387     return MetadataParseResult::kError;
388   }
389 
390   manifest_parsed_ = true;
391   return MetadataParseResult::kSuccess;
392 }
393 
394 #define OP_DURATION_HISTOGRAM(_op_name, _start_time)                         \
395   LOCAL_HISTOGRAM_CUSTOM_TIMES(                                              \
396       "UpdateEngine.DownloadAction.InstallOperation::" _op_name ".Duration", \
397       (base::TimeTicks::Now() - _start_time),                                \
398       base::TimeDelta::FromMilliseconds(10),                                 \
399       base::TimeDelta::FromMinutes(5),                                       \
400       20);
401 
402 // Wrapper around write. Returns true if all requested bytes
403 // were written, or false on any error, regardless of progress
404 // and stores an action exit code in |error|.
Write(const void * bytes,size_t count,ErrorCode * error)405 bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode* error) {
406   *error = ErrorCode::kSuccess;
407   const char* c_bytes = reinterpret_cast<const char*>(bytes);
408 
409   // Update the total byte downloaded count and the progress logs.
410   total_bytes_received_ += count;
411   UpdateOverallProgress(false, "Completed ");
412 
413   while (!manifest_valid_) {
414     // Read data up to the needed limit; this is either maximium payload header
415     // size, or the full metadata size (once it becomes known).
416     const bool do_read_header = !IsHeaderParsed();
417     CopyDataToBuffer(
418         &c_bytes,
419         &count,
420         (do_read_header ? kMaxPayloadHeaderSize
421                         : metadata_size_ + metadata_signature_size_));
422 
423     MetadataParseResult result = ParsePayloadMetadata(buffer_, error);
424     if (result == MetadataParseResult::kError)
425       return false;
426     if (result == MetadataParseResult::kInsufficientData) {
427       // If we just processed the header, make an attempt on the manifest.
428       if (do_read_header && IsHeaderParsed())
429         continue;
430 
431       return true;
432     }
433 
434     // Checks the integrity of the payload manifest.
435     if ((*error = ValidateManifest()) != ErrorCode::kSuccess)
436       return false;
437     manifest_valid_ = true;
438     if (!install_plan_->is_resume) {
439       auto begin = reinterpret_cast<const char*>(buffer_.data());
440       prefs_->SetString(kPrefsManifestBytes, {begin, buffer_.size()});
441     }
442 
443     // Clear the download buffer.
444     DiscardBuffer(false, metadata_size_);
445 
446     block_size_ = manifest_.block_size();
447 
448     // This populates |partitions_| and the |install_plan.partitions| with the
449     // list of partitions from the manifest.
450     if (!ParseManifestPartitions(error))
451       return false;
452 
453     // |install_plan.partitions| was filled in, nothing need to be done here if
454     // the payload was already applied, returns false to terminate http fetcher,
455     // but keep |error| as ErrorCode::kSuccess.
456     if (payload_->already_applied)
457       return false;
458 
459     num_total_operations_ = 0;
460     for (const auto& partition : partitions_) {
461       num_total_operations_ += partition.operations_size();
462       acc_num_operations_.push_back(num_total_operations_);
463     }
464 
465     LOG_IF(WARNING,
466            !prefs_->SetInt64(kPrefsManifestMetadataSize, metadata_size_))
467         << "Unable to save the manifest metadata size.";
468     LOG_IF(WARNING,
469            !prefs_->SetInt64(kPrefsManifestSignatureSize,
470                              metadata_signature_size_))
471         << "Unable to save the manifest signature size.";
472 
473     if (!PrimeUpdateState()) {
474       *error = ErrorCode::kDownloadStateInitializationError;
475       LOG(ERROR) << "Unable to prime the update state.";
476       return false;
477     }
478 
479     if (next_operation_num_ < acc_num_operations_[current_partition_]) {
480       if (!OpenCurrentPartition()) {
481         *error = ErrorCode::kInstallDeviceOpenError;
482         return false;
483       }
484     }
485 
486     if (next_operation_num_ > 0)
487       UpdateOverallProgress(true, "Resuming after ");
488     LOG(INFO) << "Starting to apply update payload operations";
489   }
490 
491   while (next_operation_num_ < num_total_operations_) {
492     // Check if we should cancel the current attempt for any reason.
493     // In this case, *error will have already been populated with the reason
494     // why we're canceling.
495     if (download_delegate_ && download_delegate_->ShouldCancel(error))
496       return false;
497 
498     // We know there are more operations to perform because we didn't reach the
499     // |num_total_operations_| limit yet.
500     if (next_operation_num_ >= acc_num_operations_[current_partition_]) {
501       if (partition_writer_) {
502         TEST_AND_RETURN_FALSE(partition_writer_->FinishedInstallOps());
503       }
504       CloseCurrentPartition();
505       // Skip until there are operations for current_partition_.
506       while (next_operation_num_ >= acc_num_operations_[current_partition_]) {
507         current_partition_++;
508       }
509       if (!OpenCurrentPartition()) {
510         *error = ErrorCode::kInstallDeviceOpenError;
511         return false;
512       }
513     }
514 
515     const InstallOperation& op =
516         partitions_[current_partition_].operations(GetPartitionOperationNum());
517 
518     CopyDataToBuffer(&c_bytes, &count, op.data_length());
519 
520     // Check whether we received all of the next operation's data payload.
521     if (!CanPerformInstallOperation(op))
522       return true;
523 
524     // Validate the operation unconditionally. This helps prevent the
525     // exploitation of vulnerabilities in the patching libraries, e.g. bspatch.
526     // The hash of the patch data for a given operation is embedded in the
527     // payload metadata; and thus has been verified against the public key on
528     // device.
529     // Note: Validate must be called only if CanPerformInstallOperation is
530     // called. Otherwise, we might be failing operations before even if there
531     // isn't sufficient data to compute the proper hash.
532     *error = ValidateOperationHash(op);
533     if (*error != ErrorCode::kSuccess) {
534       if (install_plan_->hash_checks_mandatory) {
535         LOG(ERROR) << "Mandatory operation hash check failed";
536         return false;
537       }
538 
539       // For non-mandatory cases, just send a UMA stat.
540       LOG(WARNING) << "Ignoring operation validation errors";
541       *error = ErrorCode::kSuccess;
542     }
543 
544     // Makes sure we unblock exit when this operation completes.
545     ScopedTerminatorExitUnblocker exit_unblocker =
546         ScopedTerminatorExitUnblocker();  // Avoids a compiler unused var bug.
547 
548     base::TimeTicks op_start_time = base::TimeTicks::Now();
549 
550     bool op_result;
551     switch (op.type()) {
552       case InstallOperation::REPLACE:
553       case InstallOperation::REPLACE_BZ:
554       case InstallOperation::REPLACE_XZ:
555         op_result = PerformReplaceOperation(op);
556         OP_DURATION_HISTOGRAM("REPLACE", op_start_time);
557         break;
558       case InstallOperation::ZERO:
559       case InstallOperation::DISCARD:
560         op_result = PerformZeroOrDiscardOperation(op);
561         OP_DURATION_HISTOGRAM("ZERO_OR_DISCARD", op_start_time);
562         break;
563       case InstallOperation::SOURCE_COPY:
564         op_result = PerformSourceCopyOperation(op, error);
565         OP_DURATION_HISTOGRAM("SOURCE_COPY", op_start_time);
566         break;
567       case InstallOperation::SOURCE_BSDIFF:
568       case InstallOperation::BROTLI_BSDIFF:
569         op_result = PerformSourceBsdiffOperation(op, error);
570         OP_DURATION_HISTOGRAM("SOURCE_BSDIFF", op_start_time);
571         break;
572       case InstallOperation::PUFFDIFF:
573         op_result = PerformPuffDiffOperation(op, error);
574         OP_DURATION_HISTOGRAM("PUFFDIFF", op_start_time);
575         break;
576       default:
577         op_result = false;
578     }
579     if (!HandleOpResult(op_result, InstallOperationTypeName(op.type()), error))
580       return false;
581 
582     next_operation_num_++;
583     UpdateOverallProgress(false, "Completed ");
584     CheckpointUpdateProgress(false);
585   }
586 
587   if (partition_writer_) {
588     TEST_AND_RETURN_FALSE(partition_writer_->FinishedInstallOps());
589   }
590   CloseCurrentPartition();
591 
592   // In major version 2, we don't add unused operation to the payload.
593   // If we already extracted the signature we should skip this step.
594   if (manifest_.has_signatures_offset() && manifest_.has_signatures_size() &&
595       signatures_message_data_.empty()) {
596     if (manifest_.signatures_offset() != buffer_offset_) {
597       LOG(ERROR) << "Payload signatures offset points to blob offset "
598                  << manifest_.signatures_offset()
599                  << " but signatures are expected at offset " << buffer_offset_;
600       *error = ErrorCode::kDownloadPayloadVerificationError;
601       return false;
602     }
603     CopyDataToBuffer(&c_bytes, &count, manifest_.signatures_size());
604     // Needs more data to cover entire signature.
605     if (buffer_.size() < manifest_.signatures_size())
606       return true;
607     if (!ExtractSignatureMessage()) {
608       LOG(ERROR) << "Extract payload signature failed.";
609       *error = ErrorCode::kDownloadPayloadVerificationError;
610       return false;
611     }
612     DiscardBuffer(true, 0);
613     // Since we extracted the SignatureMessage we need to advance the
614     // checkpoint, otherwise we would reload the signature and try to extract
615     // it again.
616     // This is the last checkpoint for an update, force this checkpoint to be
617     // saved.
618     CheckpointUpdateProgress(true);
619   }
620 
621   return true;
622 }
623 
IsManifestValid()624 bool DeltaPerformer::IsManifestValid() {
625   return manifest_valid_;
626 }
627 
ParseManifestPartitions(ErrorCode * error)628 bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) {
629   partitions_.clear();
630   for (const PartitionUpdate& partition : manifest_.partitions()) {
631     partitions_.push_back(partition);
632   }
633 
634   // For VAB and partial updates, the partition preparation will copy the
635   // dynamic partitions metadata to the target metadata slot, and rename the
636   // slot suffix of the partitions in the metadata.
637   if (install_plan_->target_slot != BootControlInterface::kInvalidSlot) {
638     uint64_t required_size = 0;
639     if (!PreparePartitionsForUpdate(&required_size)) {
640       if (required_size > 0) {
641         *error = ErrorCode::kNotEnoughSpace;
642       } else {
643         *error = ErrorCode::kInstallDeviceOpenError;
644       }
645       return false;
646     }
647   }
648 
649   // Partitions in manifest are no longer needed after preparing partitions.
650   manifest_.clear_partitions();
651   // TODO(xunchang) TBD: allow partial update only on devices with dynamic
652   // partition.
653   if (manifest_.partial_update()) {
654     std::set<std::string> touched_partitions;
655     for (const auto& partition_update : partitions_) {
656       touched_partitions.insert(partition_update.partition_name());
657     }
658 
659     auto generator = partition_update_generator::Create(boot_control_,
660                                                         manifest_.block_size());
661     std::vector<PartitionUpdate> untouched_static_partitions;
662     TEST_AND_RETURN_FALSE(
663         generator->GenerateOperationsForPartitionsNotInPayload(
664             install_plan_->source_slot,
665             install_plan_->target_slot,
666             touched_partitions,
667             &untouched_static_partitions));
668     partitions_.insert(partitions_.end(),
669                        untouched_static_partitions.begin(),
670                        untouched_static_partitions.end());
671 
672     // Save the untouched dynamic partitions in install plan.
673     std::vector<std::string> dynamic_partitions;
674     if (!boot_control_->GetDynamicPartitionControl()
675              ->ListDynamicPartitionsForSlot(install_plan_->source_slot,
676                                             boot_control_->GetCurrentSlot(),
677                                             &dynamic_partitions)) {
678       LOG(ERROR) << "Failed to load dynamic partitions from slot "
679                  << install_plan_->source_slot;
680       return false;
681     }
682     install_plan_->untouched_dynamic_partitions.clear();
683     for (const auto& name : dynamic_partitions) {
684       if (touched_partitions.find(name) == touched_partitions.end()) {
685         install_plan_->untouched_dynamic_partitions.push_back(name);
686       }
687     }
688   }
689 
690   // Fill in the InstallPlan::partitions based on the partitions from the
691   // payload.
692   for (const auto& partition : partitions_) {
693     InstallPlan::Partition install_part;
694     install_part.name = partition.partition_name();
695     install_part.run_postinstall =
696         partition.has_run_postinstall() && partition.run_postinstall();
697     if (install_part.run_postinstall) {
698       install_part.postinstall_path =
699           (partition.has_postinstall_path() ? partition.postinstall_path()
700                                             : kPostinstallDefaultScript);
701       install_part.filesystem_type = partition.filesystem_type();
702       install_part.postinstall_optional = partition.postinstall_optional();
703     }
704 
705     if (partition.has_old_partition_info()) {
706       const PartitionInfo& info = partition.old_partition_info();
707       install_part.source_size = info.size();
708       install_part.source_hash.assign(info.hash().begin(), info.hash().end());
709     }
710 
711     if (!partition.has_new_partition_info()) {
712       LOG(ERROR) << "Unable to get new partition hash info on partition "
713                  << install_part.name << ".";
714       *error = ErrorCode::kDownloadNewPartitionInfoError;
715       return false;
716     }
717     const PartitionInfo& info = partition.new_partition_info();
718     install_part.target_size = info.size();
719     install_part.target_hash.assign(info.hash().begin(), info.hash().end());
720 
721     install_part.block_size = block_size_;
722     if (partition.has_hash_tree_extent()) {
723       Extent extent = partition.hash_tree_data_extent();
724       install_part.hash_tree_data_offset = extent.start_block() * block_size_;
725       install_part.hash_tree_data_size = extent.num_blocks() * block_size_;
726       extent = partition.hash_tree_extent();
727       install_part.hash_tree_offset = extent.start_block() * block_size_;
728       install_part.hash_tree_size = extent.num_blocks() * block_size_;
729       uint64_t hash_tree_data_end =
730           install_part.hash_tree_data_offset + install_part.hash_tree_data_size;
731       if (install_part.hash_tree_offset < hash_tree_data_end) {
732         LOG(ERROR) << "Invalid hash tree extents, hash tree data ends at "
733                    << hash_tree_data_end << ", but hash tree starts at "
734                    << install_part.hash_tree_offset;
735         *error = ErrorCode::kDownloadNewPartitionInfoError;
736         return false;
737       }
738       install_part.hash_tree_algorithm = partition.hash_tree_algorithm();
739       install_part.hash_tree_salt.assign(partition.hash_tree_salt().begin(),
740                                          partition.hash_tree_salt().end());
741     }
742     if (partition.has_fec_extent()) {
743       Extent extent = partition.fec_data_extent();
744       install_part.fec_data_offset = extent.start_block() * block_size_;
745       install_part.fec_data_size = extent.num_blocks() * block_size_;
746       extent = partition.fec_extent();
747       install_part.fec_offset = extent.start_block() * block_size_;
748       install_part.fec_size = extent.num_blocks() * block_size_;
749       uint64_t fec_data_end =
750           install_part.fec_data_offset + install_part.fec_data_size;
751       if (install_part.fec_offset < fec_data_end) {
752         LOG(ERROR) << "Invalid fec extents, fec data ends at " << fec_data_end
753                    << ", but fec starts at " << install_part.fec_offset;
754         *error = ErrorCode::kDownloadNewPartitionInfoError;
755         return false;
756       }
757       install_part.fec_roots = partition.fec_roots();
758     }
759 
760     install_plan_->partitions.push_back(install_part);
761   }
762 
763   // TODO(xunchang) only need to load the partitions for those in payload.
764   // Because we have already loaded the other once when generating SOURCE_COPY
765   // operations.
766   if (!install_plan_->LoadPartitionsFromSlots(boot_control_)) {
767     LOG(ERROR) << "Unable to determine all the partition devices.";
768     *error = ErrorCode::kInstallDeviceOpenError;
769     return false;
770   }
771   LogPartitionInfo(partitions_);
772   return true;
773 }
774 
PreparePartitionsForUpdate(uint64_t * required_size)775 bool DeltaPerformer::PreparePartitionsForUpdate(uint64_t* required_size) {
776   // Call static PreparePartitionsForUpdate with hash from
777   // kPrefsUpdateCheckResponseHash to ensure hash of payload that space is
778   // preallocated for is the same as the hash of payload being applied.
779   string update_check_response_hash;
780   ignore_result(prefs_->GetString(kPrefsUpdateCheckResponseHash,
781                                   &update_check_response_hash));
782   return PreparePartitionsForUpdate(prefs_,
783                                     boot_control_,
784                                     install_plan_->target_slot,
785                                     manifest_,
786                                     update_check_response_hash,
787                                     required_size);
788 }
789 
PreparePartitionsForUpdate(PrefsInterface * prefs,BootControlInterface * boot_control,BootControlInterface::Slot target_slot,const DeltaArchiveManifest & manifest,const std::string & update_check_response_hash,uint64_t * required_size)790 bool DeltaPerformer::PreparePartitionsForUpdate(
791     PrefsInterface* prefs,
792     BootControlInterface* boot_control,
793     BootControlInterface::Slot target_slot,
794     const DeltaArchiveManifest& manifest,
795     const std::string& update_check_response_hash,
796     uint64_t* required_size) {
797   string last_hash;
798   ignore_result(
799       prefs->GetString(kPrefsDynamicPartitionMetadataUpdated, &last_hash));
800 
801   bool is_resume = !update_check_response_hash.empty() &&
802                    last_hash == update_check_response_hash;
803 
804   if (is_resume) {
805     LOG(INFO) << "Using previously prepared partitions for update. hash = "
806               << last_hash;
807   } else {
808     LOG(INFO) << "Preparing partitions for new update. last hash = "
809               << last_hash << ", new hash = " << update_check_response_hash;
810     ResetUpdateProgress(prefs, false);
811   }
812 
813   if (!boot_control->GetDynamicPartitionControl()->PreparePartitionsForUpdate(
814           boot_control->GetCurrentSlot(),
815           target_slot,
816           manifest,
817           !is_resume /* should update */,
818           required_size)) {
819     LOG(ERROR) << "Unable to initialize partition metadata for slot "
820                << BootControlInterface::SlotName(target_slot);
821     return false;
822   }
823 
824   TEST_AND_RETURN_FALSE(prefs->SetString(kPrefsDynamicPartitionMetadataUpdated,
825                                          update_check_response_hash));
826   LOG(INFO) << "PreparePartitionsForUpdate done.";
827 
828   return true;
829 }
830 
CanPerformInstallOperation(const chromeos_update_engine::InstallOperation & operation)831 bool DeltaPerformer::CanPerformInstallOperation(
832     const chromeos_update_engine::InstallOperation& operation) {
833   // If we don't have a data blob we can apply it right away.
834   if (!operation.has_data_offset() && !operation.has_data_length())
835     return true;
836 
837   // See if we have the entire data blob in the buffer
838   if (operation.data_offset() < buffer_offset_) {
839     LOG(ERROR) << "we threw away data it seems?";
840     return false;
841   }
842 
843   return (operation.data_offset() + operation.data_length() <=
844           buffer_offset_ + buffer_.size());
845 }
846 
PerformReplaceOperation(const InstallOperation & operation)847 bool DeltaPerformer::PerformReplaceOperation(
848     const InstallOperation& operation) {
849   CHECK(operation.type() == InstallOperation::REPLACE ||
850         operation.type() == InstallOperation::REPLACE_BZ ||
851         operation.type() == InstallOperation::REPLACE_XZ);
852 
853   // Since we delete data off the beginning of the buffer as we use it,
854   // the data we need should be exactly at the beginning of the buffer.
855   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
856 
857   TEST_AND_RETURN_FALSE(partition_writer_->PerformReplaceOperation(
858       operation, buffer_.data(), buffer_.size()));
859   // Update buffer
860   DiscardBuffer(true, buffer_.size());
861   return true;
862 }
863 
PerformZeroOrDiscardOperation(const InstallOperation & operation)864 bool DeltaPerformer::PerformZeroOrDiscardOperation(
865     const InstallOperation& operation) {
866   CHECK(operation.type() == InstallOperation::DISCARD ||
867         operation.type() == InstallOperation::ZERO);
868 
869   // These operations have no blob.
870   TEST_AND_RETURN_FALSE(!operation.has_data_offset());
871   TEST_AND_RETURN_FALSE(!operation.has_data_length());
872 
873   return partition_writer_->PerformZeroOrDiscardOperation(operation);
874 }
875 
ValidateSourceHash(const brillo::Blob & calculated_hash,const InstallOperation & operation,const FileDescriptorPtr source_fd,ErrorCode * error)876 bool PartitionWriter::ValidateSourceHash(const brillo::Blob& calculated_hash,
877                                          const InstallOperation& operation,
878                                          const FileDescriptorPtr source_fd,
879                                          ErrorCode* error) {
880   brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
881                                     operation.src_sha256_hash().end());
882   if (calculated_hash != expected_source_hash) {
883     LOG(ERROR) << "The hash of the source data on disk for this operation "
884                << "doesn't match the expected value. This could mean that the "
885                << "delta update payload was targeted for another version, or "
886                << "that the source partition was modified after it was "
887                << "installed, for example, by mounting a filesystem.";
888     LOG(ERROR) << "Expected:   sha256|hex = "
889                << base::HexEncode(expected_source_hash.data(),
890                                   expected_source_hash.size());
891     LOG(ERROR) << "Calculated: sha256|hex = "
892                << base::HexEncode(calculated_hash.data(),
893                                   calculated_hash.size());
894 
895     vector<string> source_extents;
896     for (const Extent& ext : operation.src_extents()) {
897       source_extents.push_back(
898           base::StringPrintf("%" PRIu64 ":%" PRIu64,
899                              static_cast<uint64_t>(ext.start_block()),
900                              static_cast<uint64_t>(ext.num_blocks())));
901     }
902     LOG(ERROR) << "Operation source (offset:size) in blocks: "
903                << base::JoinString(source_extents, ",");
904 
905     // Log remount history if this device is an ext4 partition.
906     LogMountHistory(source_fd);
907 
908     *error = ErrorCode::kDownloadStateInitializationError;
909     return false;
910   }
911   return true;
912 }
913 
PerformSourceCopyOperation(const InstallOperation & operation,ErrorCode * error)914 bool DeltaPerformer::PerformSourceCopyOperation(
915     const InstallOperation& operation, ErrorCode* error) {
916   if (operation.has_src_length())
917     TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0);
918   if (operation.has_dst_length())
919     TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
920   return partition_writer_->PerformSourceCopyOperation(operation, error);
921 }
922 
ExtentsToBsdiffPositionsString(const RepeatedPtrField<Extent> & extents,uint64_t block_size,uint64_t full_length,string * positions_string)923 bool DeltaPerformer::ExtentsToBsdiffPositionsString(
924     const RepeatedPtrField<Extent>& extents,
925     uint64_t block_size,
926     uint64_t full_length,
927     string* positions_string) {
928   string ret;
929   uint64_t length = 0;
930   for (const Extent& extent : extents) {
931     int64_t start = extent.start_block() * block_size;
932     uint64_t this_length =
933         min(full_length - length,
934             static_cast<uint64_t>(extent.num_blocks()) * block_size);
935     ret += base::StringPrintf("%" PRIi64 ":%" PRIu64 ",", start, this_length);
936     length += this_length;
937   }
938   TEST_AND_RETURN_FALSE(length == full_length);
939   if (!ret.empty())
940     ret.resize(ret.size() - 1);  // Strip trailing comma off
941   *positions_string = ret;
942   return true;
943 }
944 
PerformSourceBsdiffOperation(const InstallOperation & operation,ErrorCode * error)945 bool DeltaPerformer::PerformSourceBsdiffOperation(
946     const InstallOperation& operation, ErrorCode* error) {
947   // Since we delete data off the beginning of the buffer as we use it,
948   // the data we need should be exactly at the beginning of the buffer.
949   TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
950   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
951   if (operation.has_src_length())
952     TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0);
953   if (operation.has_dst_length())
954     TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
955 
956   TEST_AND_RETURN_FALSE(partition_writer_->PerformSourceBsdiffOperation(
957       operation, error, buffer_.data(), buffer_.size()));
958   DiscardBuffer(true, buffer_.size());
959   return true;
960 }
961 
PerformPuffDiffOperation(const InstallOperation & operation,ErrorCode * error)962 bool DeltaPerformer::PerformPuffDiffOperation(const InstallOperation& operation,
963                                               ErrorCode* error) {
964   // Since we delete data off the beginning of the buffer as we use it,
965   // the data we need should be exactly at the beginning of the buffer.
966   TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
967   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
968   TEST_AND_RETURN_FALSE(partition_writer_->PerformPuffDiffOperation(
969       operation, error, buffer_.data(), buffer_.size()));
970   DiscardBuffer(true, buffer_.size());
971   return true;
972 }
973 
ExtractSignatureMessage()974 bool DeltaPerformer::ExtractSignatureMessage() {
975   TEST_AND_RETURN_FALSE(signatures_message_data_.empty());
976   TEST_AND_RETURN_FALSE(buffer_offset_ == manifest_.signatures_offset());
977   TEST_AND_RETURN_FALSE(buffer_.size() >= manifest_.signatures_size());
978   signatures_message_data_.assign(
979       buffer_.begin(), buffer_.begin() + manifest_.signatures_size());
980 
981   LOG(INFO) << "Extracted signature data of size "
982             << manifest_.signatures_size() << " at "
983             << manifest_.signatures_offset();
984   return true;
985 }
986 
GetPublicKey(string * out_public_key)987 bool DeltaPerformer::GetPublicKey(string* out_public_key) {
988   out_public_key->clear();
989 
990   if (utils::FileExists(public_key_path_.c_str())) {
991     LOG(INFO) << "Verifying using public key: " << public_key_path_;
992     return utils::ReadFile(public_key_path_, out_public_key);
993   }
994 
995   // If this is an official build then we are not allowed to use public key
996   // from Omaha response.
997   if (!hardware_->IsOfficialBuild() && !install_plan_->public_key_rsa.empty()) {
998     LOG(INFO) << "Verifying using public key from Omaha response.";
999     return brillo::data_encoding::Base64Decode(install_plan_->public_key_rsa,
1000                                                out_public_key);
1001   }
1002   LOG(INFO) << "No public keys found for verification.";
1003   return true;
1004 }
1005 
1006 std::pair<std::unique_ptr<PayloadVerifier>, bool>
CreatePayloadVerifier()1007 DeltaPerformer::CreatePayloadVerifier() {
1008   if (utils::FileExists(update_certificates_path_.c_str())) {
1009     LOG(INFO) << "Verifying using certificates: " << update_certificates_path_;
1010     return {
1011         PayloadVerifier::CreateInstanceFromZipPath(update_certificates_path_),
1012         true};
1013   }
1014 
1015   string public_key;
1016   if (!GetPublicKey(&public_key)) {
1017     LOG(ERROR) << "Failed to read public key";
1018     return {nullptr, true};
1019   }
1020 
1021   // Skips the verification if the public key is empty.
1022   if (public_key.empty()) {
1023     return {nullptr, false};
1024   }
1025   return {PayloadVerifier::CreateInstance(public_key), true};
1026 }
1027 
ValidateManifest()1028 ErrorCode DeltaPerformer::ValidateManifest() {
1029   // Perform assorted checks to validation check the manifest, make sure it
1030   // matches data from other sources, and that it is a supported version.
1031   bool has_old_fields = std::any_of(manifest_.partitions().begin(),
1032                                     manifest_.partitions().end(),
1033                                     [](const PartitionUpdate& partition) {
1034                                       return partition.has_old_partition_info();
1035                                     });
1036 
1037   // The presence of an old partition hash is the sole indicator for a delta
1038   // update. Also, always treat the partial update as delta so that we can
1039   // perform the minor version check correctly.
1040   InstallPayloadType actual_payload_type =
1041       (has_old_fields || manifest_.partial_update())
1042           ? InstallPayloadType::kDelta
1043           : InstallPayloadType::kFull;
1044 
1045   if (payload_->type == InstallPayloadType::kUnknown) {
1046     LOG(INFO) << "Detected a '"
1047               << InstallPayloadTypeToString(actual_payload_type)
1048               << "' payload.";
1049     payload_->type = actual_payload_type;
1050   } else if (payload_->type != actual_payload_type) {
1051     LOG(ERROR) << "InstallPlan expected a '"
1052                << InstallPayloadTypeToString(payload_->type)
1053                << "' payload but the downloaded manifest contains a '"
1054                << InstallPayloadTypeToString(actual_payload_type)
1055                << "' payload.";
1056     return ErrorCode::kPayloadMismatchedType;
1057   }
1058   // Check that the minor version is compatible.
1059   // TODO(xunchang) increment minor version & add check for partial update
1060   if (actual_payload_type == InstallPayloadType::kFull) {
1061     if (manifest_.minor_version() != kFullPayloadMinorVersion) {
1062       LOG(ERROR) << "Manifest contains minor version "
1063                  << manifest_.minor_version()
1064                  << ", but all full payloads should have version "
1065                  << kFullPayloadMinorVersion << ".";
1066       return ErrorCode::kUnsupportedMinorPayloadVersion;
1067     }
1068   } else {
1069     if (manifest_.minor_version() < kMinSupportedMinorPayloadVersion ||
1070         manifest_.minor_version() > kMaxSupportedMinorPayloadVersion) {
1071       LOG(ERROR) << "Manifest contains minor version "
1072                  << manifest_.minor_version()
1073                  << " not in the range of supported minor versions ["
1074                  << kMinSupportedMinorPayloadVersion << ", "
1075                  << kMaxSupportedMinorPayloadVersion << "].";
1076       return ErrorCode::kUnsupportedMinorPayloadVersion;
1077     }
1078   }
1079 
1080   ErrorCode error_code = CheckTimestampError();
1081   if (error_code != ErrorCode::kSuccess) {
1082     if (error_code == ErrorCode::kPayloadTimestampError) {
1083       if (!hardware_->AllowDowngrade()) {
1084         return ErrorCode::kPayloadTimestampError;
1085       }
1086       LOG(INFO) << "The current OS build allows downgrade, continuing to apply"
1087                    " the payload with an older timestamp.";
1088     } else {
1089       LOG(ERROR) << "Timestamp check returned "
1090                  << utils::ErrorCodeToString(error_code);
1091       return error_code;
1092     }
1093   }
1094 
1095   // TODO(crbug.com/37661) we should be adding more and more manifest checks,
1096   // such as partition boundaries, etc.
1097 
1098   return ErrorCode::kSuccess;
1099 }
1100 
CheckTimestampError() const1101 ErrorCode DeltaPerformer::CheckTimestampError() const {
1102   bool is_partial_update =
1103       manifest_.has_partial_update() && manifest_.partial_update();
1104   const auto& partitions = manifest_.partitions();
1105 
1106   // Check version field for a given PartitionUpdate object. If an error
1107   // is encountered, set |error_code| accordingly. If downgrade is detected,
1108   // |downgrade_detected| is set. Return true if the program should continue
1109   // to check the next partition or not, or false if it should exit early due
1110   // to errors.
1111   auto&& timestamp_valid = [this](const PartitionUpdate& partition,
1112                                   bool allow_empty_version,
1113                                   bool* downgrade_detected) -> ErrorCode {
1114     const auto& partition_name = partition.partition_name();
1115     if (!partition.has_version()) {
1116       if (hardware_->GetVersionForLogging(partition_name).empty()) {
1117         LOG(INFO) << partition_name << " does't have version, skipping "
1118                   << "downgrade check.";
1119         return ErrorCode::kSuccess;
1120       }
1121 
1122       if (allow_empty_version) {
1123         return ErrorCode::kSuccess;
1124       }
1125       LOG(ERROR)
1126           << "PartitionUpdate " << partition_name
1127           << " doesn't have a version field. Not allowed in partial updates.";
1128       return ErrorCode::kDownloadManifestParseError;
1129     }
1130 
1131     auto error_code =
1132         hardware_->IsPartitionUpdateValid(partition_name, partition.version());
1133     switch (error_code) {
1134       case ErrorCode::kSuccess:
1135         break;
1136       case ErrorCode::kPayloadTimestampError:
1137         *downgrade_detected = true;
1138         LOG(WARNING) << "PartitionUpdate " << partition_name
1139                      << " has an older version than partition on device.";
1140         break;
1141       default:
1142         LOG(ERROR) << "IsPartitionUpdateValid(" << partition_name
1143                    << ") returned" << utils::ErrorCodeToString(error_code);
1144         break;
1145     }
1146     return error_code;
1147   };
1148 
1149   bool downgrade_detected = false;
1150 
1151   if (is_partial_update) {
1152     // for partial updates, all partition MUST have valid timestamps
1153     // But max_timestamp can be empty
1154     for (const auto& partition : partitions) {
1155       auto error_code = timestamp_valid(
1156           partition, false /* allow_empty_version */, &downgrade_detected);
1157       if (error_code != ErrorCode::kSuccess &&
1158           error_code != ErrorCode::kPayloadTimestampError) {
1159         return error_code;
1160       }
1161     }
1162     if (downgrade_detected) {
1163       return ErrorCode::kPayloadTimestampError;
1164     }
1165     return ErrorCode::kSuccess;
1166   }
1167 
1168   // For non-partial updates, check max_timestamp first.
1169   if (manifest_.max_timestamp() < hardware_->GetBuildTimestamp()) {
1170     LOG(ERROR) << "The current OS build timestamp ("
1171                << hardware_->GetBuildTimestamp()
1172                << ") is newer than the maximum timestamp in the manifest ("
1173                << manifest_.max_timestamp() << ")";
1174     return ErrorCode::kPayloadTimestampError;
1175   }
1176   // Otherwise... partitions can have empty timestamps.
1177   for (const auto& partition : partitions) {
1178     auto error_code = timestamp_valid(
1179         partition, true /* allow_empty_version */, &downgrade_detected);
1180     if (error_code != ErrorCode::kSuccess &&
1181         error_code != ErrorCode::kPayloadTimestampError) {
1182       return error_code;
1183     }
1184   }
1185   if (downgrade_detected) {
1186     return ErrorCode::kPayloadTimestampError;
1187   }
1188   return ErrorCode::kSuccess;
1189 }
1190 
ValidateOperationHash(const InstallOperation & operation)1191 ErrorCode DeltaPerformer::ValidateOperationHash(
1192     const InstallOperation& operation) {
1193   if (!operation.data_sha256_hash().size()) {
1194     if (!operation.data_length()) {
1195       // Operations that do not have any data blob won't have any operation
1196       // hash either. So, these operations are always considered validated
1197       // since the metadata that contains all the non-data-blob portions of
1198       // the operation has already been validated. This is true for both HTTP
1199       // and HTTPS cases.
1200       return ErrorCode::kSuccess;
1201     }
1202 
1203     // No hash is present for an operation that has data blobs. This shouldn't
1204     // happen normally for any client that has this code, because the
1205     // corresponding update should have been produced with the operation
1206     // hashes. So if it happens it means either we've turned operation hash
1207     // generation off in DeltaDiffGenerator or it's a regression of some sort.
1208     // One caveat though: The last operation is a unused signature operation
1209     // that doesn't have a hash at the time the manifest is created. So we
1210     // should not complaint about that operation. This operation can be
1211     // recognized by the fact that it's offset is mentioned in the manifest.
1212     if (manifest_.signatures_offset() &&
1213         manifest_.signatures_offset() == operation.data_offset()) {
1214       LOG(INFO) << "Skipping hash verification for signature operation "
1215                 << next_operation_num_ + 1;
1216     } else {
1217       if (install_plan_->hash_checks_mandatory) {
1218         LOG(ERROR) << "Missing mandatory operation hash for operation "
1219                    << next_operation_num_ + 1;
1220         return ErrorCode::kDownloadOperationHashMissingError;
1221       }
1222 
1223       LOG(WARNING) << "Cannot validate operation " << next_operation_num_ + 1
1224                    << " as there's no operation hash in manifest";
1225     }
1226     return ErrorCode::kSuccess;
1227   }
1228 
1229   brillo::Blob expected_op_hash;
1230   expected_op_hash.assign(operation.data_sha256_hash().data(),
1231                           (operation.data_sha256_hash().data() +
1232                            operation.data_sha256_hash().size()));
1233 
1234   brillo::Blob calculated_op_hash;
1235   if (!HashCalculator::RawHashOfBytes(
1236           buffer_.data(), operation.data_length(), &calculated_op_hash)) {
1237     LOG(ERROR) << "Unable to compute actual hash of operation "
1238                << next_operation_num_;
1239     return ErrorCode::kDownloadOperationHashVerificationError;
1240   }
1241 
1242   if (calculated_op_hash != expected_op_hash) {
1243     LOG(ERROR) << "Hash verification failed for operation "
1244                << next_operation_num_ << ". Expected hash = ";
1245     utils::HexDumpVector(expected_op_hash);
1246     LOG(ERROR) << "Calculated hash over " << operation.data_length()
1247                << " bytes at offset: " << operation.data_offset() << " = ";
1248     utils::HexDumpVector(calculated_op_hash);
1249     return ErrorCode::kDownloadOperationHashMismatch;
1250   }
1251 
1252   return ErrorCode::kSuccess;
1253 }
1254 
1255 #define TEST_AND_RETURN_VAL(_retval, _condition)              \
1256   do {                                                        \
1257     if (!(_condition)) {                                      \
1258       LOG(ERROR) << "VerifyPayload failure: " << #_condition; \
1259       return _retval;                                         \
1260     }                                                         \
1261   } while (0);
1262 
VerifyPayload(const brillo::Blob & update_check_response_hash,const uint64_t update_check_response_size)1263 ErrorCode DeltaPerformer::VerifyPayload(
1264     const brillo::Blob& update_check_response_hash,
1265     const uint64_t update_check_response_size) {
1266   // Verifies the download size.
1267   if (update_check_response_size !=
1268       metadata_size_ + metadata_signature_size_ + buffer_offset_) {
1269     LOG(ERROR) << "update_check_response_size (" << update_check_response_size
1270                << ") doesn't match metadata_size (" << metadata_size_
1271                << ") + metadata_signature_size (" << metadata_signature_size_
1272                << ") + buffer_offset (" << buffer_offset_ << ").";
1273     return ErrorCode::kPayloadSizeMismatchError;
1274   }
1275 
1276   // Verifies the payload hash.
1277   TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadVerificationError,
1278                       !payload_hash_calculator_.raw_hash().empty());
1279   TEST_AND_RETURN_VAL(
1280       ErrorCode::kPayloadHashMismatchError,
1281       payload_hash_calculator_.raw_hash() == update_check_response_hash);
1282 
1283   // NOLINTNEXTLINE(whitespace/braces)
1284   auto [payload_verifier, perform_verification] = CreatePayloadVerifier();
1285   if (!perform_verification) {
1286     LOG(WARNING) << "Not verifying signed delta payload -- missing public key.";
1287     return ErrorCode::kSuccess;
1288   }
1289   if (!payload_verifier) {
1290     LOG(ERROR) << "Failed to create the payload verifier.";
1291     return ErrorCode::kDownloadPayloadPubKeyVerificationError;
1292   }
1293 
1294   TEST_AND_RETURN_VAL(ErrorCode::kSignedDeltaPayloadExpectedError,
1295                       !signatures_message_data_.empty());
1296   brillo::Blob hash_data = signed_hash_calculator_.raw_hash();
1297   TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError,
1298                       hash_data.size() == kSHA256Size);
1299 
1300   if (!payload_verifier->VerifySignature(signatures_message_data_, hash_data)) {
1301     // The autoupdate_CatchBadSignatures test checks for this string
1302     // in log-files. Keep in sync.
1303     LOG(ERROR) << "Public key verification failed, thus update failed.";
1304     return ErrorCode::kDownloadPayloadPubKeyVerificationError;
1305   }
1306 
1307   LOG(INFO) << "Payload hash matches value in payload.";
1308   return ErrorCode::kSuccess;
1309 }
1310 
DiscardBuffer(bool do_advance_offset,size_t signed_hash_buffer_size)1311 void DeltaPerformer::DiscardBuffer(bool do_advance_offset,
1312                                    size_t signed_hash_buffer_size) {
1313   // Update the buffer offset.
1314   if (do_advance_offset)
1315     buffer_offset_ += buffer_.size();
1316 
1317   // Hash the content.
1318   payload_hash_calculator_.Update(buffer_.data(), buffer_.size());
1319   signed_hash_calculator_.Update(buffer_.data(), signed_hash_buffer_size);
1320 
1321   // Swap content with an empty vector to ensure that all memory is released.
1322   brillo::Blob().swap(buffer_);
1323 }
1324 
CanResumeUpdate(PrefsInterface * prefs,const string & update_check_response_hash)1325 bool DeltaPerformer::CanResumeUpdate(PrefsInterface* prefs,
1326                                      const string& update_check_response_hash) {
1327   int64_t next_operation = kUpdateStateOperationInvalid;
1328   if (!(prefs->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) &&
1329         next_operation != kUpdateStateOperationInvalid && next_operation > 0))
1330     return false;
1331 
1332   string interrupted_hash;
1333   if (!(prefs->GetString(kPrefsUpdateCheckResponseHash, &interrupted_hash) &&
1334         !interrupted_hash.empty() &&
1335         interrupted_hash == update_check_response_hash))
1336     return false;
1337 
1338   int64_t resumed_update_failures;
1339   // Note that storing this value is optional, but if it is there it should
1340   // not be more than the limit.
1341   if (prefs->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures) &&
1342       resumed_update_failures > kMaxResumedUpdateFailures)
1343     return false;
1344 
1345   // Validation check the rest.
1346   int64_t next_data_offset = -1;
1347   if (!(prefs->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) &&
1348         next_data_offset >= 0))
1349     return false;
1350 
1351   string sha256_context;
1352   if (!(prefs->GetString(kPrefsUpdateStateSHA256Context, &sha256_context) &&
1353         !sha256_context.empty()))
1354     return false;
1355 
1356   int64_t manifest_metadata_size = 0;
1357   if (!(prefs->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size) &&
1358         manifest_metadata_size > 0))
1359     return false;
1360 
1361   int64_t manifest_signature_size = 0;
1362   if (!(prefs->GetInt64(kPrefsManifestSignatureSize,
1363                         &manifest_signature_size) &&
1364         manifest_signature_size >= 0))
1365     return false;
1366 
1367   return true;
1368 }
1369 
ResetUpdateProgress(PrefsInterface * prefs,bool quick,bool skip_dynamic_partititon_metadata_updated)1370 bool DeltaPerformer::ResetUpdateProgress(
1371     PrefsInterface* prefs,
1372     bool quick,
1373     bool skip_dynamic_partititon_metadata_updated) {
1374   TEST_AND_RETURN_FALSE(prefs->SetInt64(kPrefsUpdateStateNextOperation,
1375                                         kUpdateStateOperationInvalid));
1376   if (!quick) {
1377     prefs->SetInt64(kPrefsUpdateStateNextDataOffset, -1);
1378     prefs->SetInt64(kPrefsUpdateStateNextDataLength, 0);
1379     prefs->SetString(kPrefsUpdateStateSHA256Context, "");
1380     prefs->SetString(kPrefsUpdateStateSignedSHA256Context, "");
1381     prefs->SetString(kPrefsUpdateStateSignatureBlob, "");
1382     prefs->SetInt64(kPrefsManifestMetadataSize, -1);
1383     prefs->SetInt64(kPrefsManifestSignatureSize, -1);
1384     prefs->SetInt64(kPrefsResumedUpdateFailures, 0);
1385     prefs->Delete(kPrefsPostInstallSucceeded);
1386     prefs->Delete(kPrefsVerityWritten);
1387 
1388     if (!skip_dynamic_partititon_metadata_updated) {
1389       LOG(INFO) << "Resetting recorded hash for prepared partitions.";
1390       prefs->Delete(kPrefsDynamicPartitionMetadataUpdated);
1391     }
1392   }
1393   return true;
1394 }
1395 
ShouldCheckpoint()1396 bool DeltaPerformer::ShouldCheckpoint() {
1397   base::TimeTicks curr_time = base::TimeTicks::Now();
1398   if (curr_time > update_checkpoint_time_) {
1399     update_checkpoint_time_ = curr_time + update_checkpoint_wait_;
1400     return true;
1401   }
1402   return false;
1403 }
1404 
CheckpointUpdateProgress(bool force)1405 bool DeltaPerformer::CheckpointUpdateProgress(bool force) {
1406   if (!force && !ShouldCheckpoint()) {
1407     return false;
1408   }
1409   Terminator::set_exit_blocked(true);
1410   if (last_updated_operation_num_ != next_operation_num_ || force) {
1411     // Resets the progress in case we die in the middle of the state update.
1412     ResetUpdateProgress(prefs_, true);
1413     if (!signatures_message_data_.empty()) {
1414       // Save the signature blob because if the update is interrupted after the
1415       // download phase we don't go through this path anymore. Some alternatives
1416       // to consider:
1417       //
1418       // 1. On resume, re-download the signature blob from the server and
1419       // re-verify it.
1420       //
1421       // 2. Verify the signature as soon as it's received and don't checkpoint
1422       // the blob and the signed sha-256 context.
1423       LOG_IF(WARNING,
1424              !prefs_->SetString(kPrefsUpdateStateSignatureBlob,
1425                                 signatures_message_data_))
1426           << "Unable to store the signature blob.";
1427     }
1428     TEST_AND_RETURN_FALSE(prefs_->SetString(
1429         kPrefsUpdateStateSHA256Context, payload_hash_calculator_.GetContext()));
1430     TEST_AND_RETURN_FALSE(
1431         prefs_->SetString(kPrefsUpdateStateSignedSHA256Context,
1432                           signed_hash_calculator_.GetContext()));
1433     TEST_AND_RETURN_FALSE(
1434         prefs_->SetInt64(kPrefsUpdateStateNextDataOffset, buffer_offset_));
1435     last_updated_operation_num_ = next_operation_num_;
1436 
1437     if (next_operation_num_ < num_total_operations_) {
1438       size_t partition_index = current_partition_;
1439       while (next_operation_num_ >= acc_num_operations_[partition_index]) {
1440         partition_index++;
1441       }
1442       const size_t partition_operation_num =
1443           next_operation_num_ -
1444           (partition_index ? acc_num_operations_[partition_index - 1] : 0);
1445       const InstallOperation& op =
1446           partitions_[partition_index].operations(partition_operation_num);
1447       TEST_AND_RETURN_FALSE(
1448           prefs_->SetInt64(kPrefsUpdateStateNextDataLength, op.data_length()));
1449     } else {
1450       TEST_AND_RETURN_FALSE(
1451           prefs_->SetInt64(kPrefsUpdateStateNextDataLength, 0));
1452     }
1453     if (partition_writer_) {
1454       partition_writer_->CheckpointUpdateProgress(GetPartitionOperationNum());
1455     } else {
1456       CHECK_EQ(next_operation_num_, num_total_operations_)
1457           << "Partition writer is null, we are expected to finish all "
1458              "operations: "
1459           << next_operation_num_ << "/" << num_total_operations_;
1460     }
1461   }
1462   TEST_AND_RETURN_FALSE(
1463       prefs_->SetInt64(kPrefsUpdateStateNextOperation, next_operation_num_));
1464   return true;
1465 }
1466 
PrimeUpdateState()1467 bool DeltaPerformer::PrimeUpdateState() {
1468   CHECK(manifest_valid_);
1469 
1470   int64_t next_operation = kUpdateStateOperationInvalid;
1471   if (!prefs_->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) ||
1472       next_operation == kUpdateStateOperationInvalid || next_operation <= 0) {
1473     // Initiating a new update, no more state needs to be initialized.
1474     return true;
1475   }
1476   next_operation_num_ = next_operation;
1477 
1478   // Resuming an update -- load the rest of the update state.
1479   int64_t next_data_offset = -1;
1480   TEST_AND_RETURN_FALSE(
1481       prefs_->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) &&
1482       next_data_offset >= 0);
1483   buffer_offset_ = next_data_offset;
1484 
1485   // The signed hash context and the signature blob may be empty if the
1486   // interrupted update didn't reach the signature.
1487   string signed_hash_context;
1488   if (prefs_->GetString(kPrefsUpdateStateSignedSHA256Context,
1489                         &signed_hash_context)) {
1490     TEST_AND_RETURN_FALSE(
1491         signed_hash_calculator_.SetContext(signed_hash_context));
1492   }
1493 
1494   prefs_->GetString(kPrefsUpdateStateSignatureBlob, &signatures_message_data_);
1495 
1496   string hash_context;
1497   TEST_AND_RETURN_FALSE(
1498       prefs_->GetString(kPrefsUpdateStateSHA256Context, &hash_context) &&
1499       payload_hash_calculator_.SetContext(hash_context));
1500 
1501   int64_t manifest_metadata_size = 0;
1502   TEST_AND_RETURN_FALSE(
1503       prefs_->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size) &&
1504       manifest_metadata_size > 0);
1505   metadata_size_ = manifest_metadata_size;
1506 
1507   int64_t manifest_signature_size = 0;
1508   TEST_AND_RETURN_FALSE(
1509       prefs_->GetInt64(kPrefsManifestSignatureSize, &manifest_signature_size) &&
1510       manifest_signature_size >= 0);
1511   metadata_signature_size_ = manifest_signature_size;
1512 
1513   // Advance the download progress to reflect what doesn't need to be
1514   // re-downloaded.
1515   total_bytes_received_ += buffer_offset_;
1516 
1517   // Speculatively count the resume as a failure.
1518   int64_t resumed_update_failures;
1519   if (prefs_->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures)) {
1520     resumed_update_failures++;
1521   } else {
1522     resumed_update_failures = 1;
1523   }
1524   prefs_->SetInt64(kPrefsResumedUpdateFailures, resumed_update_failures);
1525   return true;
1526 }
1527 
IsDynamicPartition(const std::string & part_name,uint32_t slot)1528 bool DeltaPerformer::IsDynamicPartition(const std::string& part_name,
1529                                         uint32_t slot) {
1530   return boot_control_->GetDynamicPartitionControl()->IsDynamicPartition(
1531       part_name, slot);
1532 }
1533 
CreatePartitionWriter(const PartitionUpdate & partition_update,const InstallPlan::Partition & install_part,DynamicPartitionControlInterface * dynamic_control,size_t block_size,bool is_interactive,bool is_dynamic_partition)1534 std::unique_ptr<PartitionWriter> DeltaPerformer::CreatePartitionWriter(
1535     const PartitionUpdate& partition_update,
1536     const InstallPlan::Partition& install_part,
1537     DynamicPartitionControlInterface* dynamic_control,
1538     size_t block_size,
1539     bool is_interactive,
1540     bool is_dynamic_partition) {
1541   return partition_writer::CreatePartitionWriter(
1542       partition_update,
1543       install_part,
1544       dynamic_control,
1545       block_size_,
1546       interactive_,
1547       IsDynamicPartition(install_part.name, install_plan_->target_slot));
1548 }
1549 
1550 }  // namespace chromeos_update_engine
1551