1 //
2 // Copyright (C) 2012 The Android Open Source Project
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16
17 #include "update_engine/payload_consumer/delta_performer.h"
18
19 #include <linux/fs.h>
20
21 #include <algorithm>
22 #include <chrono>
23 #include <cstring>
24 #include <memory>
25 #include <set>
26 #include <string>
27 #include <utility>
28 #include <vector>
29
30 #include <android-base/properties.h>
31 #include <android-base/strings.h>
32 #include <base/files/file_util.h>
33 #include <base/format_macros.h>
34 #include <base/metrics/histogram_macros.h>
35 #include <base/strings/string_number_conversions.h>
36 #include <android-base/stringprintf.h>
37 #include <base/time/time.h>
38 #include <brillo/data_encoding.h>
39 #include <bsdiff/bspatch.h>
40 #include <google/protobuf/repeated_field.h>
41 #include <puffin/puffpatch.h>
42
43 #include "libsnapshot/cow_format.h"
44 #include "update_engine/common/constants.h"
45 #include "update_engine/common/download_action.h"
46 #include "update_engine/common/error_code.h"
47 #include "update_engine/common/error_code_utils.h"
48 #include "update_engine/common/hardware_interface.h"
49 #include "update_engine/common/prefs_interface.h"
50 #include "update_engine/common/terminator.h"
51 #include "update_engine/common/utils.h"
52 #include "update_engine/payload_consumer/partition_update_generator_interface.h"
53 #include "update_engine/payload_consumer/partition_writer.h"
54 #include "update_engine/update_metadata.pb.h"
55 #if USE_FEC
56 #include "update_engine/payload_consumer/fec_file_descriptor.h"
57 #endif // USE_FEC
58 #include "update_engine/payload_consumer/payload_constants.h"
59 #include "update_engine/payload_consumer/payload_verifier.h"
60
61 using google::protobuf::RepeatedPtrField;
62 using std::min;
63 using std::string;
64 using std::vector;
65
66 namespace chromeos_update_engine {
67 const unsigned DeltaPerformer::kProgressLogMaxChunks = 10;
68 const unsigned DeltaPerformer::kProgressLogTimeoutSeconds = 30;
69 const unsigned DeltaPerformer::kProgressDownloadWeight = 50;
70 const unsigned DeltaPerformer::kProgressOperationsWeight = 50;
71 const uint64_t DeltaPerformer::kCheckpointFrequencySeconds = 1;
72
73 namespace {
74 const int kUpdateStateOperationInvalid = -1;
75 const int kMaxResumedUpdateFailures = 10;
76
77 } // namespace
78
79 // Computes the ratio of |part| and |total|, scaled to |norm|, using integer
80 // arithmetic.
IntRatio(uint64_t part,uint64_t total,uint64_t norm)81 static uint64_t IntRatio(uint64_t part, uint64_t total, uint64_t norm) {
82 return part * norm / total;
83 }
84
LogProgress(const char * message_prefix)85 void DeltaPerformer::LogProgress(const char* message_prefix) {
86 // Format operations total count and percentage.
87 string total_operations_str("?");
88 string completed_percentage_str("");
89 if (num_total_operations_) {
90 total_operations_str = std::to_string(num_total_operations_);
91 // Upcasting to 64-bit to avoid overflow, back to size_t for formatting.
92 completed_percentage_str = android::base::StringPrintf(
93 " (%" PRIu64 "%%)",
94 IntRatio(next_operation_num_, num_total_operations_, 100));
95 }
96
97 // Format download total count and percentage.
98 size_t payload_size = payload_->size;
99 string payload_size_str("?");
100 string downloaded_percentage_str("");
101 if (payload_size) {
102 payload_size_str = std::to_string(payload_size);
103 // Upcasting to 64-bit to avoid overflow, back to size_t for formatting.
104 downloaded_percentage_str = android::base::StringPrintf(
105 " (%" PRIu64 "%%)", IntRatio(total_bytes_received_, payload_size, 100));
106 }
107
108 LOG(INFO) << (message_prefix ? message_prefix : "") << next_operation_num_
109 << "/" << total_operations_str << " operations"
110 << completed_percentage_str << ", " << total_bytes_received_ << "/"
111 << payload_size_str << " bytes downloaded"
112 << downloaded_percentage_str << ", overall progress "
113 << overall_progress_ << "%";
114 }
115
UpdateOverallProgress(bool force_log,const char * message_prefix)116 void DeltaPerformer::UpdateOverallProgress(bool force_log,
117 const char* message_prefix) {
118 // Compute our download and overall progress.
119 unsigned new_overall_progress = 0;
120 static_assert(kProgressDownloadWeight + kProgressOperationsWeight == 100,
121 "Progress weights don't add up");
122 // Only consider download progress if its total size is known; otherwise
123 // adjust the operations weight to compensate for the absence of download
124 // progress. Also, make sure to cap the download portion at
125 // kProgressDownloadWeight, in case we end up downloading more than we
126 // initially expected (this indicates a problem, but could generally happen).
127 // TODO(garnold) the correction of operations weight when we do not have the
128 // total payload size, as well as the conditional guard below, should both be
129 // eliminated once we ensure that the payload_size in the install plan is
130 // always given and is non-zero. This currently isn't the case during unit
131 // tests (see chromium-os:37969).
132 size_t payload_size = payload_->size;
133 unsigned actual_operations_weight = kProgressOperationsWeight;
134 if (payload_size)
135 new_overall_progress +=
136 min(static_cast<unsigned>(IntRatio(
137 total_bytes_received_, payload_size, kProgressDownloadWeight)),
138 kProgressDownloadWeight);
139 else
140 actual_operations_weight += kProgressDownloadWeight;
141
142 // Only add completed operations if their total number is known; we definitely
143 // expect an update to have at least one operation, so the expectation is that
144 // this will eventually reach |actual_operations_weight|.
145 if (num_total_operations_)
146 new_overall_progress += IntRatio(
147 next_operation_num_, num_total_operations_, actual_operations_weight);
148
149 // Progress ratio cannot recede, unless our assumptions about the total
150 // payload size, total number of operations, or the monotonicity of progress
151 // is breached.
152 if (new_overall_progress < overall_progress_) {
153 LOG(WARNING) << "progress counter receded from " << overall_progress_
154 << "% down to " << new_overall_progress << "%; this is a bug";
155 force_log = true;
156 }
157 overall_progress_ = new_overall_progress;
158
159 // Update chunk index, log as needed: if forced by called, or we completed a
160 // progress chunk, or a timeout has expired.
161 base::TimeTicks curr_time = base::TimeTicks::Now();
162 unsigned curr_progress_chunk =
163 overall_progress_ * kProgressLogMaxChunks / 100;
164 if (force_log || curr_progress_chunk > last_progress_chunk_ ||
165 curr_time > forced_progress_log_time_) {
166 forced_progress_log_time_ = curr_time + forced_progress_log_wait_;
167 LogProgress(message_prefix);
168 }
169 last_progress_chunk_ = curr_progress_chunk;
170 }
171
CopyDataToBuffer(const char ** bytes_p,size_t * count_p,size_t max)172 size_t DeltaPerformer::CopyDataToBuffer(const char** bytes_p,
173 size_t* count_p,
174 size_t max) {
175 const size_t count = *count_p;
176 if (!count)
177 return 0; // Special case shortcut.
178 size_t read_len = min(count, max - buffer_.size());
179 const char* bytes_start = *bytes_p;
180 const char* bytes_end = bytes_start + read_len;
181 buffer_.reserve(max);
182 buffer_.insert(buffer_.end(), bytes_start, bytes_end);
183 *bytes_p = bytes_end;
184 *count_p = count - read_len;
185 return read_len;
186 }
187
HandleOpResult(bool op_result,const char * op_type_name,ErrorCode * error)188 bool DeltaPerformer::HandleOpResult(bool op_result,
189 const char* op_type_name,
190 ErrorCode* error) {
191 if (op_result)
192 return true;
193
194 LOG(ERROR) << "Failed to perform " << op_type_name << " operation "
195 << next_operation_num_ << ", which is the operation "
196 << GetPartitionOperationNum() << " in partition \""
197 << partitions_[current_partition_].partition_name() << "\"";
198 if (*error == ErrorCode::kSuccess)
199 *error = ErrorCode::kDownloadOperationExecutionError;
200 return false;
201 }
202
Close()203 int DeltaPerformer::Close() {
204 // Checkpoint update progress before canceling, so that subsequent attempts
205 // can resume from exactly where update_engine left last time.
206 CheckpointUpdateProgress(true);
207 int err = -CloseCurrentPartition();
208 LOG_IF(ERROR,
209 !payload_hash_calculator_.Finalize() ||
210 !signed_hash_calculator_.Finalize())
211 << "Unable to finalize the hash.";
212 if (!buffer_.empty()) {
213 LOG(INFO) << "Discarding " << buffer_.size() << " unused downloaded bytes";
214 if (err >= 0)
215 err = 1;
216 }
217 return -err;
218 }
219
CloseCurrentPartition()220 int DeltaPerformer::CloseCurrentPartition() {
221 if (!partition_writer_) {
222 return 0;
223 }
224 int err = partition_writer_->Close();
225 partition_writer_ = nullptr;
226 return err;
227 }
228
OpenCurrentPartition()229 bool DeltaPerformer::OpenCurrentPartition() {
230 if (current_partition_ >= partitions_.size())
231 return false;
232
233 const PartitionUpdate& partition = partitions_[current_partition_];
234 size_t num_previous_partitions =
235 install_plan_->partitions.size() - partitions_.size();
236 const InstallPlan::Partition& install_part =
237 install_plan_->partitions[num_previous_partitions + current_partition_];
238 auto dynamic_control = boot_control_->GetDynamicPartitionControl();
239 partition_writer_ = CreatePartitionWriter(
240 partition,
241 install_part,
242 dynamic_control,
243 block_size_,
244 interactive_,
245 IsDynamicPartition(install_part.name, install_plan_->target_slot));
246 // Open source fds if we have a delta payload, or for partitions in the
247 // partial update.
248 const bool source_may_exist = manifest_.partial_update() ||
249 payload_->type == InstallPayloadType::kDelta;
250 const size_t partition_operation_num = GetPartitionOperationNum();
251
252 TEST_AND_RETURN_FALSE(partition_writer_->Init(
253 install_plan_, source_may_exist, partition_operation_num));
254 CheckpointUpdateProgress(true);
255 return true;
256 }
257
GetPartitionOperationNum()258 size_t DeltaPerformer::GetPartitionOperationNum() {
259 return next_operation_num_ -
260 (current_partition_ ? acc_num_operations_[current_partition_ - 1] : 0);
261 }
262
263 namespace {
264
LogPartitionInfoHash(const PartitionInfo & info,const string & tag)265 void LogPartitionInfoHash(const PartitionInfo& info, const string& tag) {
266 string sha256 = HexEncode(info.hash());
267 LOG(INFO) << "PartitionInfo " << tag << " sha256: " << sha256
268 << " size: " << info.size();
269 }
270
LogPartitionInfo(const vector<PartitionUpdate> & partitions)271 void LogPartitionInfo(const vector<PartitionUpdate>& partitions) {
272 for (const PartitionUpdate& partition : partitions) {
273 if (partition.has_old_partition_info()) {
274 LogPartitionInfoHash(partition.old_partition_info(),
275 "old " + partition.partition_name());
276 }
277 LogPartitionInfoHash(partition.new_partition_info(),
278 "new " + partition.partition_name());
279 }
280 }
281
282 } // namespace
283
IsHeaderParsed() const284 bool DeltaPerformer::IsHeaderParsed() const {
285 return metadata_size_ != 0;
286 }
287
ParsePayloadMetadata(const brillo::Blob & payload,ErrorCode * error)288 MetadataParseResult DeltaPerformer::ParsePayloadMetadata(
289 const brillo::Blob& payload, ErrorCode* error) {
290 *error = ErrorCode::kSuccess;
291
292 if (!IsHeaderParsed()) {
293 MetadataParseResult result =
294 payload_metadata_.ParsePayloadHeader(payload, error);
295 if (result != MetadataParseResult::kSuccess)
296 return result;
297
298 metadata_size_ = payload_metadata_.GetMetadataSize();
299 metadata_signature_size_ = payload_metadata_.GetMetadataSignatureSize();
300 major_payload_version_ = payload_metadata_.GetMajorVersion();
301
302 // If the metadata size is present in install plan, check for it immediately
303 // even before waiting for that many number of bytes to be downloaded in the
304 // payload. This will prevent any attack which relies on us downloading data
305 // beyond the expected metadata size.
306 if (install_plan_->hash_checks_mandatory) {
307 if (payload_->metadata_size != metadata_size_) {
308 LOG(ERROR) << "Mandatory metadata size in Omaha response ("
309 << payload_->metadata_size
310 << ") is missing/incorrect, actual = " << metadata_size_;
311 *error = ErrorCode::kDownloadInvalidMetadataSize;
312 return MetadataParseResult::kError;
313 }
314 }
315
316 // Check that the |metadata signature size_| and |metadata_size_| are not
317 // very big numbers. This is necessary since |update_engine| needs to write
318 // these values into the buffer before being able to use them, and if an
319 // attacker sets these values to a very big number, the buffer will overflow
320 // and |update_engine| will crash. A simple way of solving this is to check
321 // that the size of both values is smaller than the payload itself.
322 if (metadata_size_ + metadata_signature_size_ > payload_->size) {
323 LOG(ERROR) << "The size of the metadata_size(" << metadata_size_ << ")"
324 << " or metadata signature(" << metadata_signature_size_ << ")"
325 << " is greater than the size of the payload" << "("
326 << payload_->size << ")";
327 *error = ErrorCode::kDownloadInvalidMetadataSize;
328 return MetadataParseResult::kError;
329 }
330 }
331
332 // Now that we have validated the metadata size, we should wait for the full
333 // metadata and its signature (if exist) to be read in before we can parse it.
334 if (payload.size() < metadata_size_ + metadata_signature_size_)
335 return MetadataParseResult::kInsufficientData;
336
337 // Log whether we validated the size or simply trusting what's in the payload
338 // here. This is logged here (after we received the full metadata data) so
339 // that we just log once (instead of logging n times) if it takes n
340 // DeltaPerformer::Write calls to download the full manifest.
341 if (payload_->metadata_size == metadata_size_) {
342 LOG(INFO) << "Manifest size in payload matches expected value from Omaha";
343 } else {
344 // For mandatory-cases, we'd have already returned a kMetadataParseError
345 // above. We'll be here only for non-mandatory cases. Just send a UMA stat.
346 LOG(WARNING) << "Ignoring missing/incorrect metadata size ("
347 << payload_->metadata_size
348 << ") in Omaha response as validation is not mandatory. "
349 << "Trusting metadata size in payload = " << metadata_size_;
350 }
351
352 // NOLINTNEXTLINE(whitespace/braces)
353 auto [payload_verifier, perform_verification] = CreatePayloadVerifier();
354 if (!payload_verifier) {
355 LOG(ERROR) << "Failed to create payload verifier.";
356 *error = ErrorCode::kDownloadMetadataSignatureVerificationError;
357 if (perform_verification) {
358 return MetadataParseResult::kError;
359 }
360 } else {
361 // We have the full metadata in |payload|. Verify its integrity
362 // and authenticity based on the information we have in Omaha response.
363 *error = payload_metadata_.ValidateMetadataSignature(
364 payload, payload_->metadata_signature, *payload_verifier);
365 }
366 if (*error != ErrorCode::kSuccess) {
367 if (install_plan_->hash_checks_mandatory) {
368 // The autoupdate_CatchBadSignatures test checks for this string
369 // in log-files. Keep in sync.
370 LOG(ERROR) << "Mandatory metadata signature validation failed";
371 return MetadataParseResult::kError;
372 }
373
374 // For non-mandatory cases, just send a UMA stat.
375 LOG(WARNING) << "Ignoring metadata signature validation failures";
376 *error = ErrorCode::kSuccess;
377 }
378
379 // The payload metadata is deemed valid, it's safe to parse the protobuf.
380 if (!payload_metadata_.GetManifest(payload, &manifest_)) {
381 LOG(ERROR) << "Unable to parse manifest in update file.";
382 *error = ErrorCode::kDownloadManifestParseError;
383 return MetadataParseResult::kError;
384 }
385
386 manifest_parsed_ = true;
387 return MetadataParseResult::kSuccess;
388 }
389
390 #define OP_DURATION_HISTOGRAM(_op_name, _start_time) \
391 LOCAL_HISTOGRAM_CUSTOM_TIMES( \
392 "UpdateEngine.DownloadAction.InstallOperation::" + string(_op_name) + \
393 ".Duration", \
394 (base::TimeTicks::Now() - _start_time), \
395 base::TimeDelta::FromMilliseconds(10), \
396 base::TimeDelta::FromMinutes(5), \
397 20);
398
CheckSPLDowngrade()399 bool DeltaPerformer::CheckSPLDowngrade() {
400 if (!manifest_.has_security_patch_level()) {
401 return true;
402 }
403 if (manifest_.security_patch_level().empty()) {
404 return true;
405 }
406 const auto new_spl = manifest_.security_patch_level();
407 const auto current_spl =
408 android::base::GetProperty("ro.build.version.security_patch", "");
409 if (current_spl.empty()) {
410 LOG(WARNING) << "Failed to get ro.build.version.security_patch, unable to "
411 "determine if this OTA is a SPL downgrade. Assuming this "
412 "OTA is not SPL downgrade.";
413 return true;
414 }
415 if (new_spl < current_spl) {
416 const auto avb_state =
417 android::base::GetProperty("ro.boot.verifiedbootstate", "green");
418 if (android::base::EqualsIgnoreCase(avb_state, "green")) {
419 LOG(ERROR) << "Target build SPL " << new_spl
420 << " is older than current build's SPL " << current_spl
421 << ", this OTA is an SPL downgrade. Your device's "
422 "ro.boot.verifiedbootstate="
423 << avb_state
424 << ", it probably has a locked bootlaoder. Since a locked "
425 "bootloader will reject SPL downgrade no matter what, we "
426 "will reject this OTA.";
427 return false;
428 }
429 install_plan_->powerwash_required = true;
430 LOG(WARNING)
431 << "Target build SPL " << new_spl
432 << " is older than current build's SPL " << current_spl
433 << ", this OTA is an SPL downgrade. Data wipe will be required";
434 }
435 return true;
436 }
437
438 // Wrapper around write. Returns true if all requested bytes
439 // were written, or false on any error, regardless of progress
440 // and stores an action exit code in |error|.
Write(const void * bytes,size_t count,ErrorCode * error)441 bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode* error) {
442 if (!error) {
443 LOG(INFO) << "Error Code is not initialized";
444 return false;
445 }
446 *error = ErrorCode::kSuccess;
447 const char* c_bytes = reinterpret_cast<const char*>(bytes);
448
449 // Update the total byte downloaded count and the progress logs.
450 total_bytes_received_ += count;
451 UpdateOverallProgress(false, "Completed ");
452
453 while (!manifest_valid_) {
454 bool insufficient_bytes = false;
455 if (!ParseManifest(&c_bytes, &count, error, &insufficient_bytes)) {
456 LOG(ERROR) << "Failed to parse manifest";
457 return false;
458 }
459 if (insufficient_bytes) {
460 return true;
461 }
462 }
463
464 while (next_operation_num_ < num_total_operations_) {
465 // Check if we should cancel the current attempt for any reason.
466 // In this case, *error will have already been populated with the reason
467 // why we're canceling.
468 if (download_delegate_ && download_delegate_->ShouldCancel(error))
469 return false;
470
471 // We know there are more operations to perform because we didn't reach the
472 // |num_total_operations_| limit yet.
473 if (next_operation_num_ >= acc_num_operations_[current_partition_]) {
474 if (partition_writer_) {
475 if (!partition_writer_->FinishedInstallOps()) {
476 *error = ErrorCode::kDownloadWriteError;
477 return false;
478 }
479 }
480 const auto err = CloseCurrentPartition();
481 if (err < 0) {
482 LOG(ERROR) << "Failed to close partition "
483 << partitions_[current_partition_].partition_name() << " "
484 << strerror(-err);
485 return false;
486 }
487 // Skip until there are operations for current_partition_.
488 while (next_operation_num_ >= acc_num_operations_[current_partition_]) {
489 current_partition_++;
490 }
491 if (!OpenCurrentPartition()) {
492 *error = ErrorCode::kInstallDeviceOpenError;
493 return false;
494 }
495 }
496
497 const InstallOperation& op =
498 partitions_[current_partition_].operations(GetPartitionOperationNum());
499
500 CopyDataToBuffer(&c_bytes, &count, op.data_length());
501
502 // Check whether we received all of the next operation's data payload.
503 if (!CanPerformInstallOperation(op))
504 return true;
505 if (!ProcessOperation(&op, error)) {
506 LOG(ERROR) << "unable to process operation: "
507 << InstallOperationTypeName(op.type())
508 << " Error: " << utils::ErrorCodeToString(*error);
509 return false;
510 }
511
512 next_operation_num_++;
513 UpdateOverallProgress(false, "Completed ");
514 CheckpointUpdateProgress(false);
515 }
516
517 if (partition_writer_) {
518 TEST_AND_RETURN_FALSE(partition_writer_->FinishedInstallOps());
519 }
520 CloseCurrentPartition();
521
522 // In major version 2, we don't add unused operation to the payload.
523 // If we already extracted the signature we should skip this step.
524 if (manifest_.has_signatures_offset() && manifest_.has_signatures_size() &&
525 signatures_message_data_.empty()) {
526 if (manifest_.signatures_offset() != buffer_offset_) {
527 LOG(ERROR) << "Payload signatures offset points to blob offset "
528 << manifest_.signatures_offset()
529 << " but signatures are expected at offset " << buffer_offset_;
530 *error = ErrorCode::kDownloadPayloadVerificationError;
531 return false;
532 }
533 CopyDataToBuffer(&c_bytes, &count, manifest_.signatures_size());
534 // Needs more data to cover entire signature.
535 if (buffer_.size() < manifest_.signatures_size())
536 return true;
537 if (!ExtractSignatureMessage()) {
538 LOG(ERROR) << "Extract payload signature failed.";
539 *error = ErrorCode::kDownloadPayloadVerificationError;
540 return false;
541 }
542 DiscardBuffer(true, 0);
543 // Since we extracted the SignatureMessage we need to advance the
544 // checkpoint, otherwise we would reload the signature and try to extract
545 // it again.
546 // This is the last checkpoint for an update, force this checkpoint to be
547 // saved.
548 CheckpointUpdateProgress(true);
549 }
550
551 return true;
552 }
553
ParseManifest(const char ** c_bytes,size_t * count,ErrorCode * error,bool * should_return)554 bool DeltaPerformer::ParseManifest(const char** c_bytes,
555 size_t* count,
556 ErrorCode* error,
557 bool* should_return) {
558 // Read data up to the needed limit; this is either maximium payload header
559 // size, or the full metadata size (once it becomes known).
560 const bool do_read_header = !IsHeaderParsed();
561 CopyDataToBuffer(
562 c_bytes,
563 count,
564 (do_read_header ? kMaxPayloadHeaderSize
565 : metadata_size_ + metadata_signature_size_));
566 MetadataParseResult result = ParsePayloadMetadata(buffer_, error);
567 if (result == MetadataParseResult::kError)
568 return false;
569 if (result == MetadataParseResult::kInsufficientData) {
570 // If we just processed the header, make an attempt on the manifest.
571 if (do_read_header && IsHeaderParsed()) {
572 return true;
573 }
574 *should_return = true;
575 return true;
576 }
577
578 // Checks the integrity of the payload manifest.
579 if ((*error = ValidateManifest()) != ErrorCode::kSuccess)
580 return false;
581 manifest_valid_ = true;
582 if (!install_plan_->is_resume) {
583 auto begin = reinterpret_cast<const char*>(buffer_.data());
584 prefs_->SetString(kPrefsManifestBytes, {begin, buffer_.size()});
585 }
586
587 // Clear the download buffer.
588 DiscardBuffer(false, metadata_size_);
589
590 block_size_ = manifest_.block_size();
591
592 if (!install_plan_->spl_downgrade && !CheckSPLDowngrade()) {
593 *error = ErrorCode::kPayloadTimestampError;
594 return false;
595 }
596
597 // update estimate_cow_size if VABC is disabled
598 // new_cow_size per partition = partition_size - (#blocks in Copy
599 // operations part of the partition)
600 if (install_plan_->vabc_none) {
601 LOG(INFO) << "Setting Virtual AB Compression algorithm to none. This "
602 "would also disable VABC XOR as XOR only saves space if "
603 "compression is enabled.";
604 manifest_.mutable_dynamic_partition_metadata()->set_vabc_compression_param(
605 "none");
606 for (auto& partition : *manifest_.mutable_partitions()) {
607 if (!partition.has_estimate_cow_size()) {
608 continue;
609 }
610 auto new_cow_size = partition.new_partition_info().size();
611 for (const auto& operation : partition.merge_operations()) {
612 if (operation.type() == CowMergeOperation::COW_COPY) {
613 new_cow_size -=
614 operation.dst_extent().num_blocks() * manifest_.block_size();
615 }
616 }
617 // Remove all COW_XOR merge ops, as XOR without compression is useless.
618 // It increases CPU usage but does not reduce space usage at all.
619 auto&& merge_ops = *partition.mutable_merge_operations();
620 merge_ops.erase(std::remove_if(merge_ops.begin(),
621 merge_ops.end(),
622 [](const auto& op) {
623 return op.type() ==
624 CowMergeOperation::COW_XOR;
625 }),
626 merge_ops.end());
627
628 // Every block written to COW device will come with a header which
629 // stores src/dst block info along with other data.
630 const auto cow_metadata_size = partition.new_partition_info().size() /
631 manifest_.block_size() *
632 sizeof(android::snapshot::CowOperation);
633 // update_engine will emit a label op every op or every two seconds,
634 // whichever one is longer. In the worst case, we add 1 label per
635 // InstallOp. So take size of label ops into account.
636 const auto label_ops_size =
637 partition.operations_size() * sizeof(android::snapshot::CowOperation);
638 // Adding extra 2MB headroom just for any unexpected space usage.
639 // If we overrun reserved COW size, entire OTA will fail
640 // and no way for user to retry OTA
641 partition.set_estimate_cow_size(new_cow_size + (1024 * 1024 * 2) +
642 cow_metadata_size + label_ops_size);
643 // Setting op count max to 0 will defer to num_blocks as the op buffer
644 // size.
645 partition.set_estimate_op_count_max(0);
646 LOG(INFO) << "New COW size for partition " << partition.partition_name()
647 << " is " << partition.estimate_cow_size();
648 }
649 }
650 if (install_plan_->disable_vabc) {
651 manifest_.mutable_dynamic_partition_metadata()->set_vabc_enabled(false);
652 }
653 if (install_plan_->enable_threading) {
654 manifest_.mutable_dynamic_partition_metadata()
655 ->mutable_vabc_feature_set()
656 ->set_threaded(install_plan_->enable_threading.value());
657 LOG(INFO) << "Attempting to "
658 << (install_plan_->enable_threading.value() ? "enable"
659 : "disable")
660 << " multi-threaded compression for VABC";
661 }
662 if (install_plan_->batched_writes) {
663 manifest_.mutable_dynamic_partition_metadata()
664 ->mutable_vabc_feature_set()
665 ->set_batch_writes(true);
666 LOG(INFO) << "Attempting to enable batched writes for VABC";
667 }
668
669 // This populates |partitions_| and the |install_plan.partitions| with the
670 // list of partitions from the manifest.
671 if (!ParseManifestPartitions(error))
672 return false;
673
674 // |install_plan.partitions| was filled in, nothing need to be done here if
675 // the payload was already applied, returns false to terminate http fetcher,
676 // but keep |error| as ErrorCode::kSuccess.
677 if (payload_->already_applied)
678 return false;
679
680 num_total_operations_ = 0;
681 for (const auto& partition : partitions_) {
682 num_total_operations_ += partition.operations_size();
683 acc_num_operations_.push_back(num_total_operations_);
684 }
685
686 LOG_IF(WARNING, !prefs_->SetInt64(kPrefsManifestMetadataSize, metadata_size_))
687 << "Unable to save the manifest metadata size.";
688 LOG_IF(
689 WARNING,
690 !prefs_->SetInt64(kPrefsManifestSignatureSize, metadata_signature_size_))
691 << "Unable to save the manifest signature size.";
692
693 if (!PrimeUpdateState()) {
694 *error = ErrorCode::kDownloadStateInitializationError;
695 LOG(ERROR) << "Unable to prime the update state.";
696 return false;
697 }
698
699 if (next_operation_num_ < acc_num_operations_[current_partition_]) {
700 if (!OpenCurrentPartition()) {
701 *error = ErrorCode::kInstallDeviceOpenError;
702 return false;
703 }
704 }
705
706 if (next_operation_num_ > 0)
707 UpdateOverallProgress(true, "Resuming after ");
708 LOG(INFO) << "Starting to apply update payload operations";
709 return true;
710 }
ProcessOperation(const InstallOperation * op,ErrorCode * error)711 bool DeltaPerformer::ProcessOperation(const InstallOperation* op,
712 ErrorCode* error) {
713 // Validate the operation unconditionally. This helps prevent the
714 // exploitation of vulnerabilities in the patching libraries, e.g. bspatch.
715 // The hash of the patch data for a given operation is embedded in the
716 // payload metadata; and thus has been verified against the public key on
717 // device.
718 // Note: Validate must be called only if CanPerformInstallOperation is
719 // called. Otherwise, we might be failing operations before even if there
720 // isn't sufficient data to compute the proper hash.
721 *error = ValidateOperationHash(*op);
722 if (*error != ErrorCode::kSuccess) {
723 if (install_plan_->hash_checks_mandatory) {
724 LOG(ERROR) << "Mandatory operation hash check failed";
725 return false;
726 }
727
728 // For non-mandatory cases, just send a UMA stat.
729 LOG(WARNING) << "Ignoring operation validation errors";
730 *error = ErrorCode::kSuccess;
731 }
732
733 // Makes sure we unblock exit when this operation completes.
734 ScopedTerminatorExitUnblocker exit_unblocker =
735 ScopedTerminatorExitUnblocker(); // Avoids a compiler unused var bug.
736
737 base::TimeTicks op_start_time = base::TimeTicks::Now();
738
739 bool op_result{};
740 const string op_name = InstallOperationTypeName(op->type());
741 switch (op->type()) {
742 case InstallOperation::REPLACE:
743 case InstallOperation::REPLACE_BZ:
744 case InstallOperation::REPLACE_XZ:
745 op_result = PerformReplaceOperation(*op);
746 OP_DURATION_HISTOGRAM("REPLACE", op_start_time);
747 break;
748 case InstallOperation::ZERO:
749 case InstallOperation::DISCARD:
750 op_result = PerformZeroOrDiscardOperation(*op);
751 OP_DURATION_HISTOGRAM("ZERO_OR_DISCARD", op_start_time);
752 break;
753 case InstallOperation::SOURCE_COPY:
754 op_result = PerformSourceCopyOperation(*op, error);
755 OP_DURATION_HISTOGRAM("SOURCE_COPY", op_start_time);
756 break;
757 case InstallOperation::SOURCE_BSDIFF:
758 case InstallOperation::BROTLI_BSDIFF:
759 case InstallOperation::PUFFDIFF:
760 case InstallOperation::ZUCCHINI:
761 case InstallOperation::LZ4DIFF_PUFFDIFF:
762 case InstallOperation::LZ4DIFF_BSDIFF:
763 op_result = PerformDiffOperation(*op, error);
764 OP_DURATION_HISTOGRAM(op_name, op_start_time);
765 break;
766 default:
767 op_result = false;
768 }
769 if (!HandleOpResult(op_result, op_name.c_str(), error))
770 return false;
771
772 return true;
773 }
774
IsManifestValid()775 bool DeltaPerformer::IsManifestValid() {
776 return manifest_valid_;
777 }
778
ParseManifestPartitions(ErrorCode * error)779 bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) {
780 partitions_.assign(manifest_.partitions().begin(),
781 manifest_.partitions().end());
782
783 // For VAB and partial updates, the partition preparation will copy the
784 // dynamic partitions metadata to the target metadata slot, and rename the
785 // slot suffix of the partitions in the metadata.
786 if (install_plan_->target_slot != BootControlInterface::kInvalidSlot) {
787 uint64_t required_size = 0;
788 if (!PreparePartitionsForUpdate(&required_size, error)) {
789 if (*error == ErrorCode::kOverlayfsenabledError) {
790 return false;
791 } else if (required_size > 0) {
792 *error = ErrorCode::kNotEnoughSpace;
793 } else {
794 *error = ErrorCode::kInstallDeviceOpenError;
795 }
796 return false;
797 }
798 }
799
800 // Partitions in manifest are no longer needed after preparing partitions.
801 manifest_.clear_partitions();
802 // TODO(xunchang) TBD: allow partial update only on devices with dynamic
803 // partition.
804 if (manifest_.partial_update()) {
805 std::set<std::string> touched_partitions;
806 for (const auto& partition_update : partitions_) {
807 touched_partitions.insert(partition_update.partition_name());
808 }
809
810 auto generator = partition_update_generator::Create(boot_control_,
811 manifest_.block_size());
812 std::vector<PartitionUpdate> untouched_static_partitions;
813 if (!generator->GenerateOperationsForPartitionsNotInPayload(
814 install_plan_->source_slot,
815 install_plan_->target_slot,
816 touched_partitions,
817 &untouched_static_partitions)) {
818 LOG(ERROR)
819 << "Failed to generate operations for partitions not in payload "
820 << android::base::Join(touched_partitions, ", ");
821 *error = ErrorCode::kDownloadStateInitializationError;
822 return false;
823 }
824 partitions_.insert(partitions_.end(),
825 untouched_static_partitions.begin(),
826 untouched_static_partitions.end());
827
828 // Save the untouched dynamic partitions in install plan.
829 std::vector<std::string> dynamic_partitions;
830 if (!boot_control_->GetDynamicPartitionControl()
831 ->ListDynamicPartitionsForSlot(install_plan_->source_slot,
832 boot_control_->GetCurrentSlot(),
833 &dynamic_partitions)) {
834 LOG(ERROR) << "Failed to load dynamic partitions from slot "
835 << install_plan_->source_slot;
836 return false;
837 }
838 install_plan_->untouched_dynamic_partitions.clear();
839 for (const auto& name : dynamic_partitions) {
840 if (touched_partitions.find(name) == touched_partitions.end()) {
841 install_plan_->untouched_dynamic_partitions.push_back(name);
842 }
843 }
844 }
845
846 const auto start = std::chrono::system_clock::now();
847 if (!install_plan_->ParsePartitions(
848 partitions_, boot_control_, block_size_, error)) {
849 return false;
850 }
851 const auto duration = std::chrono::system_clock::now() - start;
852 LOG(INFO)
853 << "ParsePartitions done. took "
854 << std::chrono::duration_cast<std::chrono::milliseconds>(duration).count()
855 << " ms";
856
857 auto&& has_verity = [](const auto& part) {
858 return part.fec_extent().num_blocks() > 0 ||
859 part.hash_tree_extent().num_blocks() > 0;
860 };
861 if (!std::any_of(partitions_.begin(), partitions_.end(), has_verity)) {
862 install_plan_->write_verity = false;
863 }
864
865 LogPartitionInfo(partitions_);
866 return true;
867 }
868
PreparePartitionsForUpdate(uint64_t * required_size,ErrorCode * error)869 bool DeltaPerformer::PreparePartitionsForUpdate(uint64_t* required_size,
870 ErrorCode* error) {
871 // Call static PreparePartitionsForUpdate with hash from
872 // kPrefsUpdateCheckResponseHash to ensure hash of payload that space is
873 // preallocated for is the same as the hash of payload being applied.
874 string update_check_response_hash;
875 ignore_result(prefs_->GetString(kPrefsUpdateCheckResponseHash,
876 &update_check_response_hash));
877 return PreparePartitionsForUpdate(prefs_,
878 boot_control_,
879 install_plan_->target_slot,
880 manifest_,
881 update_check_response_hash,
882 required_size,
883 error);
884 }
885
PreparePartitionsForUpdate(PrefsInterface * prefs,BootControlInterface * boot_control,BootControlInterface::Slot target_slot,const DeltaArchiveManifest & manifest,const std::string & update_check_response_hash,uint64_t * required_size,ErrorCode * error)886 bool DeltaPerformer::PreparePartitionsForUpdate(
887 PrefsInterface* prefs,
888 BootControlInterface* boot_control,
889 BootControlInterface::Slot target_slot,
890 const DeltaArchiveManifest& manifest,
891 const std::string& update_check_response_hash,
892 uint64_t* required_size,
893 ErrorCode* error) {
894 string last_hash;
895 ignore_result(
896 prefs->GetString(kPrefsDynamicPartitionMetadataUpdated, &last_hash));
897
898 bool is_resume = !update_check_response_hash.empty() &&
899 last_hash == update_check_response_hash;
900
901 if (is_resume) {
902 LOG(INFO) << "Using previously prepared partitions for update. hash = "
903 << last_hash;
904 } else {
905 LOG(INFO) << "Preparing partitions for new update. last hash = "
906 << last_hash << ", new hash = " << update_check_response_hash;
907 ResetUpdateProgress(prefs, false);
908 }
909
910 const auto start = std::chrono::system_clock::now();
911 if (!boot_control->GetDynamicPartitionControl()->PreparePartitionsForUpdate(
912 boot_control->GetCurrentSlot(),
913 target_slot,
914 manifest,
915 !is_resume /* should update */,
916 required_size,
917 error)) {
918 LOG(ERROR) << "Unable to initialize partition metadata for slot "
919 << BootControlInterface::SlotName(target_slot) << " "
920 << utils::ErrorCodeToString(*error);
921 return false;
922 }
923 const auto duration = std::chrono::system_clock::now() - start;
924
925 TEST_AND_RETURN_FALSE(prefs->SetString(kPrefsDynamicPartitionMetadataUpdated,
926 update_check_response_hash));
927 LOG(INFO)
928 << "PreparePartitionsForUpdate done. took "
929 << std::chrono::duration_cast<std::chrono::milliseconds>(duration).count()
930 << " ms";
931
932 return true;
933 }
934
CanPerformInstallOperation(const chromeos_update_engine::InstallOperation & operation)935 bool DeltaPerformer::CanPerformInstallOperation(
936 const chromeos_update_engine::InstallOperation& operation) {
937 // If we don't have a data blob we can apply it right away.
938 if (!operation.has_data_offset() && !operation.has_data_length())
939 return true;
940
941 // See if we have the entire data blob in the buffer
942 if (operation.data_offset() < buffer_offset_) {
943 LOG(ERROR) << "we threw away data it seems?";
944 return false;
945 }
946
947 return (operation.data_offset() + operation.data_length() <=
948 buffer_offset_ + buffer_.size());
949 }
950
PerformReplaceOperation(const InstallOperation & operation)951 bool DeltaPerformer::PerformReplaceOperation(
952 const InstallOperation& operation) {
953 CHECK(operation.type() == InstallOperation::REPLACE ||
954 operation.type() == InstallOperation::REPLACE_BZ ||
955 operation.type() == InstallOperation::REPLACE_XZ);
956
957 // Since we delete data off the beginning of the buffer as we use it,
958 // the data we need should be exactly at the beginning of the buffer.
959 TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
960
961 TEST_AND_RETURN_FALSE(partition_writer_->PerformReplaceOperation(
962 operation, buffer_.data(), buffer_.size()));
963 // Update buffer
964 DiscardBuffer(true, buffer_.size());
965 return true;
966 }
967
PerformZeroOrDiscardOperation(const InstallOperation & operation)968 bool DeltaPerformer::PerformZeroOrDiscardOperation(
969 const InstallOperation& operation) {
970 CHECK(operation.type() == InstallOperation::DISCARD ||
971 operation.type() == InstallOperation::ZERO);
972
973 // These operations have no blob.
974 TEST_AND_RETURN_FALSE(!operation.has_data_offset());
975 TEST_AND_RETURN_FALSE(!operation.has_data_length());
976
977 return partition_writer_->PerformZeroOrDiscardOperation(operation);
978 }
979
PerformSourceCopyOperation(const InstallOperation & operation,ErrorCode * error)980 bool DeltaPerformer::PerformSourceCopyOperation(
981 const InstallOperation& operation, ErrorCode* error) {
982 if (operation.has_src_length())
983 TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0);
984 if (operation.has_dst_length())
985 TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
986 return partition_writer_->PerformSourceCopyOperation(operation, error);
987 }
988
ExtentsToBsdiffPositionsString(const RepeatedPtrField<Extent> & extents,uint64_t block_size,uint64_t full_length,string * positions_string)989 bool DeltaPerformer::ExtentsToBsdiffPositionsString(
990 const RepeatedPtrField<Extent>& extents,
991 uint64_t block_size,
992 uint64_t full_length,
993 string* positions_string) {
994 string ret;
995 uint64_t length = 0;
996 for (const Extent& extent : extents) {
997 int64_t start = extent.start_block() * block_size;
998 uint64_t this_length =
999 min(full_length - length,
1000 static_cast<uint64_t>(extent.num_blocks()) * block_size);
1001 ret += android::base::StringPrintf(
1002 "%" PRIi64 ":%" PRIu64 ",", start, this_length);
1003 length += this_length;
1004 }
1005 TEST_AND_RETURN_FALSE(length == full_length);
1006 if (!ret.empty())
1007 ret.resize(ret.size() - 1); // Strip trailing comma off
1008 *positions_string = ret;
1009 return true;
1010 }
1011
PerformDiffOperation(const InstallOperation & operation,ErrorCode * error)1012 bool DeltaPerformer::PerformDiffOperation(const InstallOperation& operation,
1013 ErrorCode* error) {
1014 // Since we delete data off the beginning of the buffer as we use it,
1015 // the data we need should be exactly at the beginning of the buffer.
1016 TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
1017 TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
1018 if (operation.has_src_length())
1019 TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0);
1020 if (operation.has_dst_length())
1021 TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
1022
1023 TEST_AND_RETURN_FALSE(partition_writer_->PerformDiffOperation(
1024 operation, error, buffer_.data(), buffer_.size()));
1025 DiscardBuffer(true, buffer_.size());
1026 return true;
1027 }
1028
ExtractSignatureMessage()1029 bool DeltaPerformer::ExtractSignatureMessage() {
1030 TEST_AND_RETURN_FALSE(signatures_message_data_.empty());
1031 TEST_AND_RETURN_FALSE(buffer_offset_ == manifest_.signatures_offset());
1032 TEST_AND_RETURN_FALSE(buffer_.size() >= manifest_.signatures_size());
1033 signatures_message_data_.assign(
1034 buffer_.begin(), buffer_.begin() + manifest_.signatures_size());
1035
1036 LOG(INFO) << "Extracted signature data of size "
1037 << manifest_.signatures_size() << " at "
1038 << manifest_.signatures_offset();
1039 return true;
1040 }
1041
GetPublicKey(string * out_public_key)1042 bool DeltaPerformer::GetPublicKey(string* out_public_key) {
1043 out_public_key->clear();
1044
1045 if (utils::FileExists(public_key_path_.c_str())) {
1046 LOG(INFO) << "Verifying using public key: " << public_key_path_;
1047 return utils::ReadFile(public_key_path_, out_public_key);
1048 }
1049
1050 // If this is an official build then we are not allowed to use public key
1051 // from Omaha response.
1052 if (!hardware_->IsOfficialBuild() && !install_plan_->public_key_rsa.empty()) {
1053 LOG(INFO) << "Verifying using public key from Omaha response.";
1054 return brillo::data_encoding::Base64Decode(install_plan_->public_key_rsa,
1055 out_public_key);
1056 }
1057 LOG(INFO) << "No public keys found for verification.";
1058 return true;
1059 }
1060
1061 std::pair<std::unique_ptr<PayloadVerifier>, bool>
CreatePayloadVerifier()1062 DeltaPerformer::CreatePayloadVerifier() {
1063 if (utils::FileExists(update_certificates_path_.c_str())) {
1064 LOG(INFO) << "Verifying using certificates: " << update_certificates_path_;
1065 return {
1066 PayloadVerifier::CreateInstanceFromZipPath(update_certificates_path_),
1067 true};
1068 }
1069
1070 string public_key;
1071 if (!GetPublicKey(&public_key)) {
1072 LOG(ERROR) << "Failed to read public key";
1073 return {nullptr, true};
1074 }
1075
1076 // Skips the verification if the public key is empty.
1077 if (public_key.empty()) {
1078 return {nullptr, false};
1079 }
1080 LOG(INFO) << "Verifing using public key: " << public_key;
1081 return {PayloadVerifier::CreateInstance(public_key), true};
1082 }
1083
ValidateManifest()1084 ErrorCode DeltaPerformer::ValidateManifest() {
1085 // Perform assorted checks to validation check the manifest, make sure it
1086 // matches data from other sources, and that it is a supported version.
1087 bool has_old_fields = std::any_of(manifest_.partitions().begin(),
1088 manifest_.partitions().end(),
1089 [](const PartitionUpdate& partition) {
1090 return partition.has_old_partition_info();
1091 });
1092
1093 // The presence of an old partition hash is the sole indicator for a delta
1094 // update. Also, always treat the partial update as delta so that we can
1095 // perform the minor version check correctly.
1096 InstallPayloadType actual_payload_type =
1097 (has_old_fields || manifest_.partial_update())
1098 ? InstallPayloadType::kDelta
1099 : InstallPayloadType::kFull;
1100
1101 if (payload_->type == InstallPayloadType::kUnknown) {
1102 LOG(INFO) << "Detected a '"
1103 << InstallPayloadTypeToString(actual_payload_type)
1104 << "' payload.";
1105 payload_->type = actual_payload_type;
1106 } else if (payload_->type != actual_payload_type) {
1107 LOG(ERROR) << "InstallPlan expected a '"
1108 << InstallPayloadTypeToString(payload_->type)
1109 << "' payload but the downloaded manifest contains a '"
1110 << InstallPayloadTypeToString(actual_payload_type)
1111 << "' payload.";
1112 return ErrorCode::kPayloadMismatchedType;
1113 }
1114 // Check that the minor version is compatible.
1115 // TODO(xunchang) increment minor version & add check for partial update
1116 if (actual_payload_type == InstallPayloadType::kFull) {
1117 if (manifest_.minor_version() != kFullPayloadMinorVersion) {
1118 LOG(ERROR) << "Manifest contains minor version "
1119 << manifest_.minor_version()
1120 << ", but all full payloads should have version "
1121 << kFullPayloadMinorVersion << ".";
1122 return ErrorCode::kUnsupportedMinorPayloadVersion;
1123 }
1124 } else {
1125 if (manifest_.minor_version() < kMinSupportedMinorPayloadVersion ||
1126 manifest_.minor_version() > kMaxSupportedMinorPayloadVersion) {
1127 LOG(ERROR) << "Manifest contains minor version "
1128 << manifest_.minor_version()
1129 << " not in the range of supported minor versions ["
1130 << kMinSupportedMinorPayloadVersion << ", "
1131 << kMaxSupportedMinorPayloadVersion << "].";
1132 return ErrorCode::kUnsupportedMinorPayloadVersion;
1133 }
1134 }
1135
1136 ErrorCode error_code = CheckTimestampError();
1137 if (error_code != ErrorCode::kSuccess) {
1138 if (error_code == ErrorCode::kPayloadTimestampError) {
1139 if (!hardware_->AllowDowngrade()) {
1140 return ErrorCode::kPayloadTimestampError;
1141 }
1142 LOG(INFO) << "The current OS build allows downgrade, continuing to apply"
1143 " the payload with an older timestamp.";
1144 } else {
1145 LOG(ERROR) << "Timestamp check returned "
1146 << utils::ErrorCodeToString(error_code);
1147 return error_code;
1148 }
1149 }
1150
1151 // TODO(crbug.com/37661) we should be adding more and more manifest checks,
1152 // such as partition boundaries, etc.
1153
1154 return ErrorCode::kSuccess;
1155 }
1156
CheckTimestampError() const1157 ErrorCode DeltaPerformer::CheckTimestampError() const {
1158 bool is_partial_update =
1159 manifest_.has_partial_update() && manifest_.partial_update();
1160 const auto& partitions = manifest_.partitions();
1161
1162 // Check version field for a given PartitionUpdate object. If an error
1163 // is encountered, set |error_code| accordingly. If downgrade is detected,
1164 // |downgrade_detected| is set. Return true if the program should continue
1165 // to check the next partition or not, or false if it should exit early due
1166 // to errors.
1167 auto&& timestamp_valid = [this](const PartitionUpdate& partition,
1168 bool allow_empty_version,
1169 bool* downgrade_detected) -> ErrorCode {
1170 const auto& partition_name = partition.partition_name();
1171 if (!partition.has_version()) {
1172 if (hardware_->GetVersionForLogging(partition_name).empty()) {
1173 LOG(INFO) << partition_name << " does't have version, skipping "
1174 << "downgrade check.";
1175 return ErrorCode::kSuccess;
1176 }
1177
1178 if (allow_empty_version) {
1179 return ErrorCode::kSuccess;
1180 }
1181 LOG(ERROR)
1182 << "PartitionUpdate " << partition_name
1183 << " doesn't have a version field. Not allowed in partial updates.";
1184 return ErrorCode::kDownloadManifestParseError;
1185 }
1186
1187 auto error_code =
1188 hardware_->IsPartitionUpdateValid(partition_name, partition.version());
1189 switch (error_code) {
1190 case ErrorCode::kSuccess:
1191 break;
1192 case ErrorCode::kPayloadTimestampError:
1193 *downgrade_detected = true;
1194 LOG(WARNING) << "PartitionUpdate " << partition_name
1195 << " has an older version than partition on device.";
1196 break;
1197 default:
1198 LOG(ERROR) << "IsPartitionUpdateValid(" << partition_name
1199 << ") returned" << utils::ErrorCodeToString(error_code);
1200 break;
1201 }
1202 return error_code;
1203 };
1204
1205 bool downgrade_detected = false;
1206
1207 if (is_partial_update) {
1208 // for partial updates, all partition MUST have valid timestamps
1209 // But max_timestamp can be empty
1210 for (const auto& partition : partitions) {
1211 auto error_code = timestamp_valid(
1212 partition, false /* allow_empty_version */, &downgrade_detected);
1213 if (error_code != ErrorCode::kSuccess &&
1214 error_code != ErrorCode::kPayloadTimestampError) {
1215 return error_code;
1216 }
1217 }
1218 if (downgrade_detected) {
1219 return ErrorCode::kPayloadTimestampError;
1220 }
1221 return ErrorCode::kSuccess;
1222 }
1223
1224 // For non-partial updates, check max_timestamp first.
1225 if (manifest_.max_timestamp() < hardware_->GetBuildTimestamp()) {
1226 LOG(ERROR) << "The current OS build timestamp ("
1227 << hardware_->GetBuildTimestamp()
1228 << ") is newer than the maximum timestamp in the manifest ("
1229 << manifest_.max_timestamp() << ")";
1230 return ErrorCode::kPayloadTimestampError;
1231 }
1232 // Otherwise... partitions can have empty timestamps.
1233 for (const auto& partition : partitions) {
1234 auto error_code = timestamp_valid(
1235 partition, true /* allow_empty_version */, &downgrade_detected);
1236 if (error_code != ErrorCode::kSuccess &&
1237 error_code != ErrorCode::kPayloadTimestampError) {
1238 return error_code;
1239 }
1240 }
1241 if (downgrade_detected) {
1242 return ErrorCode::kPayloadTimestampError;
1243 }
1244 return ErrorCode::kSuccess;
1245 }
1246
ValidateOperationHash(const InstallOperation & operation)1247 ErrorCode DeltaPerformer::ValidateOperationHash(
1248 const InstallOperation& operation) {
1249 if (!operation.data_sha256_hash().size()) {
1250 if (!operation.data_length()) {
1251 // Operations that do not have any data blob won't have any operation
1252 // hash either. So, these operations are always considered validated
1253 // since the metadata that contains all the non-data-blob portions of
1254 // the operation has already been validated. This is true for both HTTP
1255 // and HTTPS cases.
1256 return ErrorCode::kSuccess;
1257 }
1258
1259 // No hash is present for an operation that has data blobs. This shouldn't
1260 // happen normally for any client that has this code, because the
1261 // corresponding update should have been produced with the operation
1262 // hashes. So if it happens it means either we've turned operation hash
1263 // generation off in DeltaDiffGenerator or it's a regression of some sort.
1264 // One caveat though: The last operation is a unused signature operation
1265 // that doesn't have a hash at the time the manifest is created. So we
1266 // should not complaint about that operation. This operation can be
1267 // recognized by the fact that it's offset is mentioned in the manifest.
1268 if (manifest_.signatures_offset() &&
1269 manifest_.signatures_offset() == operation.data_offset()) {
1270 LOG(INFO) << "Skipping hash verification for signature operation "
1271 << next_operation_num_ + 1;
1272 } else {
1273 if (install_plan_->hash_checks_mandatory) {
1274 LOG(ERROR) << "Missing mandatory operation hash for operation "
1275 << next_operation_num_ + 1;
1276 return ErrorCode::kDownloadOperationHashMissingError;
1277 }
1278
1279 LOG(WARNING) << "Cannot validate operation " << next_operation_num_ + 1
1280 << " as there's no operation hash in manifest";
1281 }
1282 return ErrorCode::kSuccess;
1283 }
1284
1285 brillo::Blob expected_op_hash;
1286 expected_op_hash.assign(operation.data_sha256_hash().data(),
1287 (operation.data_sha256_hash().data() +
1288 operation.data_sha256_hash().size()));
1289
1290 brillo::Blob calculated_op_hash;
1291 if (!HashCalculator::RawHashOfBytes(
1292 buffer_.data(), operation.data_length(), &calculated_op_hash)) {
1293 LOG(ERROR) << "Unable to compute actual hash of operation "
1294 << next_operation_num_;
1295 return ErrorCode::kDownloadOperationHashVerificationError;
1296 }
1297
1298 if (calculated_op_hash != expected_op_hash) {
1299 LOG(ERROR) << "Hash verification failed for operation "
1300 << next_operation_num_
1301 << ". Expected hash = " << HexEncode(expected_op_hash);
1302 LOG(ERROR) << "Calculated hash over " << operation.data_length()
1303 << " bytes at offset: " << operation.data_offset() << " = "
1304 << HexEncode(calculated_op_hash);
1305 return ErrorCode::kDownloadOperationHashMismatch;
1306 }
1307
1308 return ErrorCode::kSuccess;
1309 }
1310
1311 #define TEST_AND_RETURN_VAL(_retval, _condition) \
1312 do { \
1313 if (!(_condition)) { \
1314 LOG(ERROR) << "VerifyPayload failure: " << #_condition; \
1315 return _retval; \
1316 } \
1317 } while (0);
1318
VerifyPayload(const brillo::Blob & update_check_response_hash,const uint64_t update_check_response_size)1319 ErrorCode DeltaPerformer::VerifyPayload(
1320 const brillo::Blob& update_check_response_hash,
1321 const uint64_t update_check_response_size) {
1322 // Verifies the download size.
1323 if (update_check_response_size !=
1324 metadata_size_ + metadata_signature_size_ + buffer_offset_) {
1325 LOG(ERROR) << "update_check_response_size (" << update_check_response_size
1326 << ") doesn't match metadata_size (" << metadata_size_
1327 << ") + metadata_signature_size (" << metadata_signature_size_
1328 << ") + buffer_offset (" << buffer_offset_ << ").";
1329 return ErrorCode::kPayloadSizeMismatchError;
1330 }
1331
1332 // Verifies the payload hash.
1333 TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadVerificationError,
1334 !payload_hash_calculator_.raw_hash().empty());
1335 if (payload_hash_calculator_.raw_hash() != update_check_response_hash) {
1336 LOG(ERROR) << "Actual hash: "
1337 << HexEncode(payload_hash_calculator_.raw_hash())
1338 << ", expected hash: " << HexEncode(update_check_response_hash);
1339 return ErrorCode::kPayloadHashMismatchError;
1340 }
1341
1342 // NOLINTNEXTLINE(whitespace/braces)
1343 auto [payload_verifier, perform_verification] = CreatePayloadVerifier();
1344 if (!perform_verification) {
1345 LOG(WARNING) << "Not verifying signed delta payload -- missing public key.";
1346 return ErrorCode::kSuccess;
1347 }
1348 if (!payload_verifier) {
1349 LOG(ERROR) << "Failed to create the payload verifier.";
1350 return ErrorCode::kDownloadPayloadPubKeyVerificationError;
1351 }
1352
1353 TEST_AND_RETURN_VAL(ErrorCode::kSignedDeltaPayloadExpectedError,
1354 !signatures_message_data_.empty());
1355 brillo::Blob hash_data = signed_hash_calculator_.raw_hash();
1356 TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError,
1357 hash_data.size() == kSHA256Size);
1358
1359 if (!payload_verifier->VerifySignature(signatures_message_data_, hash_data)) {
1360 // The autoupdate_CatchBadSignatures test checks for this string
1361 // in log-files. Keep in sync.
1362 LOG(ERROR) << "Public key verification failed, thus update failed.";
1363 return ErrorCode::kDownloadPayloadPubKeyVerificationError;
1364 }
1365
1366 LOG(INFO) << "Payload hash matches value in payload.";
1367 return ErrorCode::kSuccess;
1368 }
1369
DiscardBuffer(bool do_advance_offset,size_t signed_hash_buffer_size)1370 void DeltaPerformer::DiscardBuffer(bool do_advance_offset,
1371 size_t signed_hash_buffer_size) {
1372 // Update the buffer offset.
1373 if (do_advance_offset)
1374 buffer_offset_ += buffer_.size();
1375
1376 // Hash the content.
1377 payload_hash_calculator_.Update(buffer_.data(), buffer_.size());
1378 signed_hash_calculator_.Update(buffer_.data(), signed_hash_buffer_size);
1379
1380 // Swap content with an empty vector to ensure that all memory is released.
1381 brillo::Blob().swap(buffer_);
1382 }
1383
CanResumeUpdate(PrefsInterface * prefs,const string & update_check_response_hash)1384 bool DeltaPerformer::CanResumeUpdate(PrefsInterface* prefs,
1385 const string& update_check_response_hash) {
1386 int64_t next_operation = kUpdateStateOperationInvalid;
1387 if (!(prefs->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) &&
1388 next_operation != kUpdateStateOperationInvalid && next_operation > 0)) {
1389 LOG(WARNING) << "Failed to resume update " << kPrefsUpdateStateNextOperation
1390 << " invalid: " << next_operation;
1391 return false;
1392 }
1393
1394 string interrupted_hash;
1395 if (!(prefs->GetString(kPrefsUpdateCheckResponseHash, &interrupted_hash) &&
1396 !interrupted_hash.empty() &&
1397 interrupted_hash == update_check_response_hash)) {
1398 LOG(WARNING) << "Failed to resume update " << kPrefsUpdateCheckResponseHash
1399 << " mismatch, last hash: " << interrupted_hash
1400 << ", current hash: " << update_check_response_hash << "";
1401 return false;
1402 }
1403
1404 int64_t resumed_update_failures{};
1405 // Note that storing this value is optional, but if it is there it should
1406 // not be more than the limit.
1407 if (prefs->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures) &&
1408 resumed_update_failures > kMaxResumedUpdateFailures) {
1409 LOG(WARNING) << "Failed to resume update " << kPrefsResumedUpdateFailures
1410 << " has value " << resumed_update_failures
1411 << " is over the limit " << kMaxResumedUpdateFailures;
1412 return false;
1413 }
1414
1415 // Validation check the rest.
1416 int64_t next_data_offset = -1;
1417 if (!(prefs->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) &&
1418 next_data_offset >= 0)) {
1419 LOG(WARNING) << "Failed to resume update "
1420 << kPrefsUpdateStateNextDataOffset
1421 << " invalid: " << next_data_offset;
1422 return false;
1423 }
1424
1425 string sha256_context;
1426 if (!(prefs->GetString(kPrefsUpdateStateSHA256Context, &sha256_context) &&
1427 !sha256_context.empty())) {
1428 LOG(WARNING) << "Failed to resume update " << kPrefsUpdateStateSHA256Context
1429 << " is empty.";
1430 return false;
1431 }
1432
1433 int64_t manifest_metadata_size = 0;
1434 if (!(prefs->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size) &&
1435 manifest_metadata_size > 0)) {
1436 LOG(WARNING) << "Failed to resume update " << kPrefsManifestMetadataSize
1437 << " invalid: " << manifest_metadata_size;
1438 return false;
1439 }
1440
1441 int64_t manifest_signature_size = 0;
1442 if (!(prefs->GetInt64(kPrefsManifestSignatureSize,
1443 &manifest_signature_size) &&
1444 manifest_signature_size >= 0)) {
1445 LOG(WARNING) << "Failed to resume update " << kPrefsManifestSignatureSize
1446 << " invalid: " << manifest_signature_size;
1447 return false;
1448 }
1449
1450 return true;
1451 }
1452
ResetUpdateProgress(PrefsInterface * prefs,bool quick,bool skip_dynamic_partititon_metadata_updated)1453 bool DeltaPerformer::ResetUpdateProgress(
1454 PrefsInterface* prefs,
1455 bool quick,
1456 bool skip_dynamic_partititon_metadata_updated) {
1457 TEST_AND_RETURN_FALSE(prefs->SetInt64(kPrefsUpdateStateNextOperation,
1458 kUpdateStateOperationInvalid));
1459 if (!quick) {
1460 prefs->SetInt64(kPrefsUpdateStateNextDataOffset, -1);
1461 prefs->SetInt64(kPrefsUpdateStateNextDataLength, 0);
1462 prefs->SetString(kPrefsUpdateStateSHA256Context, "");
1463 prefs->SetString(kPrefsUpdateStateSignedSHA256Context, "");
1464 prefs->SetString(kPrefsUpdateStateSignatureBlob, "");
1465 prefs->SetInt64(kPrefsManifestMetadataSize, -1);
1466 prefs->SetInt64(kPrefsManifestSignatureSize, -1);
1467 prefs->SetInt64(kPrefsResumedUpdateFailures, 0);
1468 prefs->Delete(kPrefsPostInstallSucceeded);
1469 prefs->Delete(kPrefsVerityWritten);
1470 if (!skip_dynamic_partititon_metadata_updated) {
1471 LOG(INFO) << "Resetting recorded hash for prepared partitions.";
1472 prefs->Delete(kPrefsDynamicPartitionMetadataUpdated);
1473 }
1474 }
1475 return true;
1476 }
1477
ShouldCheckpoint()1478 bool DeltaPerformer::ShouldCheckpoint() {
1479 base::TimeTicks curr_time = base::TimeTicks::Now();
1480 if (curr_time > update_checkpoint_time_) {
1481 update_checkpoint_time_ = curr_time + update_checkpoint_wait_;
1482 return true;
1483 }
1484 return false;
1485 }
1486
CheckpointUpdateProgress(bool force)1487 bool DeltaPerformer::CheckpointUpdateProgress(bool force) {
1488 if (!force && !ShouldCheckpoint()) {
1489 return false;
1490 }
1491 Terminator::set_exit_blocked(true);
1492 LOG_IF(WARNING, !prefs_->StartTransaction())
1493 << "unable to start transaction in checkpointing";
1494 DEFER {
1495 prefs_->CancelTransaction();
1496 };
1497 if (last_updated_operation_num_ != next_operation_num_ || force) {
1498 if (!signatures_message_data_.empty()) {
1499 // Save the signature blob because if the update is interrupted after the
1500 // download phase we don't go through this path anymore. Some alternatives
1501 // to consider:
1502 //
1503 // 1. On resume, re-download the signature blob from the server and
1504 // re-verify it.
1505 //
1506 // 2. Verify the signature as soon as it's received and don't checkpoint
1507 // the blob and the signed sha-256 context.
1508 LOG_IF(WARNING,
1509 !prefs_->SetString(kPrefsUpdateStateSignatureBlob,
1510 signatures_message_data_))
1511 << "Unable to store the signature blob.";
1512 }
1513 TEST_AND_RETURN_FALSE(prefs_->SetString(
1514 kPrefsUpdateStateSHA256Context, payload_hash_calculator_.GetContext()));
1515 TEST_AND_RETURN_FALSE(
1516 prefs_->SetString(kPrefsUpdateStateSignedSHA256Context,
1517 signed_hash_calculator_.GetContext()));
1518 TEST_AND_RETURN_FALSE(
1519 prefs_->SetInt64(kPrefsUpdateStateNextDataOffset, buffer_offset_));
1520 last_updated_operation_num_ = next_operation_num_;
1521
1522 if (next_operation_num_ < num_total_operations_) {
1523 size_t partition_index = current_partition_;
1524 while (next_operation_num_ >= acc_num_operations_[partition_index]) {
1525 partition_index++;
1526 }
1527 const size_t partition_operation_num =
1528 next_operation_num_ -
1529 (partition_index ? acc_num_operations_[partition_index - 1] : 0);
1530 const InstallOperation& op =
1531 partitions_[partition_index].operations(partition_operation_num);
1532 TEST_AND_RETURN_FALSE(
1533 prefs_->SetInt64(kPrefsUpdateStateNextDataLength, op.data_length()));
1534 } else {
1535 TEST_AND_RETURN_FALSE(
1536 prefs_->SetInt64(kPrefsUpdateStateNextDataLength, 0));
1537 }
1538 if (partition_writer_) {
1539 partition_writer_->CheckpointUpdateProgress(GetPartitionOperationNum());
1540 } else {
1541 CHECK_EQ(next_operation_num_, num_total_operations_)
1542 << "Partition writer is null, we are expected to finish all "
1543 "operations: "
1544 << next_operation_num_ << "/" << num_total_operations_;
1545 }
1546 }
1547 TEST_AND_RETURN_FALSE(
1548 prefs_->SetInt64(kPrefsUpdateStateNextOperation, next_operation_num_));
1549 if (!prefs_->SubmitTransaction()) {
1550 LOG(ERROR) << "Failed to submit transaction in checkpointing";
1551 }
1552 return true;
1553 }
1554
PrimeUpdateState()1555 bool DeltaPerformer::PrimeUpdateState() {
1556 CHECK(manifest_valid_);
1557
1558 int64_t next_operation = kUpdateStateOperationInvalid;
1559 if (!prefs_->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) ||
1560 next_operation == kUpdateStateOperationInvalid || next_operation <= 0) {
1561 // Initiating a new update, no more state needs to be initialized.
1562 return true;
1563 }
1564 next_operation_num_ = next_operation;
1565
1566 // Resuming an update -- load the rest of the update state.
1567 int64_t next_data_offset = -1;
1568 TEST_AND_RETURN_FALSE(
1569 prefs_->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) &&
1570 next_data_offset >= 0);
1571 buffer_offset_ = next_data_offset;
1572
1573 // The signed hash context and the signature blob may be empty if the
1574 // interrupted update didn't reach the signature.
1575 string signed_hash_context;
1576 if (prefs_->GetString(kPrefsUpdateStateSignedSHA256Context,
1577 &signed_hash_context)) {
1578 TEST_AND_RETURN_FALSE(
1579 signed_hash_calculator_.SetContext(signed_hash_context));
1580 }
1581
1582 prefs_->GetString(kPrefsUpdateStateSignatureBlob, &signatures_message_data_);
1583
1584 string hash_context;
1585 TEST_AND_RETURN_FALSE(
1586 prefs_->GetString(kPrefsUpdateStateSHA256Context, &hash_context) &&
1587 payload_hash_calculator_.SetContext(hash_context));
1588
1589 int64_t manifest_metadata_size = 0;
1590 TEST_AND_RETURN_FALSE(
1591 prefs_->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size) &&
1592 manifest_metadata_size > 0);
1593 metadata_size_ = manifest_metadata_size;
1594
1595 int64_t manifest_signature_size = 0;
1596 TEST_AND_RETURN_FALSE(
1597 prefs_->GetInt64(kPrefsManifestSignatureSize, &manifest_signature_size) &&
1598 manifest_signature_size >= 0);
1599 metadata_signature_size_ = manifest_signature_size;
1600
1601 // Advance the download progress to reflect what doesn't need to be
1602 // re-downloaded.
1603 total_bytes_received_ += buffer_offset_;
1604
1605 // Speculatively count the resume as a failure.
1606 int64_t resumed_update_failures{};
1607 if (prefs_->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures)) {
1608 resumed_update_failures++;
1609 } else {
1610 resumed_update_failures = 1;
1611 }
1612 prefs_->SetInt64(kPrefsResumedUpdateFailures, resumed_update_failures);
1613 return true;
1614 }
1615
IsDynamicPartition(const std::string & part_name,uint32_t slot)1616 bool DeltaPerformer::IsDynamicPartition(const std::string& part_name,
1617 uint32_t slot) {
1618 return boot_control_->GetDynamicPartitionControl()->IsDynamicPartition(
1619 part_name, slot);
1620 }
1621
CreatePartitionWriter(const PartitionUpdate & partition_update,const InstallPlan::Partition & install_part,DynamicPartitionControlInterface * dynamic_control,size_t block_size,bool is_interactive,bool is_dynamic_partition)1622 std::unique_ptr<PartitionWriterInterface> DeltaPerformer::CreatePartitionWriter(
1623 const PartitionUpdate& partition_update,
1624 const InstallPlan::Partition& install_part,
1625 DynamicPartitionControlInterface* dynamic_control,
1626 size_t block_size,
1627 bool is_interactive,
1628 bool is_dynamic_partition) {
1629 return partition_writer::CreatePartitionWriter(
1630 partition_update,
1631 install_part,
1632 dynamic_control,
1633 block_size_,
1634 interactive_,
1635 IsDynamicPartition(install_part.name, install_plan_->target_slot));
1636 }
1637
1638 } // namespace chromeos_update_engine
1639