1 // 2 // Copyright (C) 2012 The Android Open Source Project 3 // 4 // Licensed under the Apache License, Version 2.0 (the "License"); 5 // you may not use this file except in compliance with the License. 6 // You may obtain a copy of the License at 7 // 8 // http://www.apache.org/licenses/LICENSE-2.0 9 // 10 // Unless required by applicable law or agreed to in writing, software 11 // distributed under the License is distributed on an "AS IS" BASIS, 12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 // See the License for the specific language governing permissions and 14 // limitations under the License. 15 // 16 17 #include "update_engine/payload_consumer/delta_performer.h" 18 19 #include <errno.h> 20 #include <linux/fs.h> 21 22 #include <algorithm> 23 #include <cstring> 24 #include <map> 25 #include <memory> 26 #include <string> 27 #include <utility> 28 #include <vector> 29 30 #include <base/files/file_util.h> 31 #include <base/format_macros.h> 32 #include <base/metrics/histogram_macros.h> 33 #include <base/strings/string_number_conversions.h> 34 #include <base/strings/string_util.h> 35 #include <base/strings/stringprintf.h> 36 #include <base/time/time.h> 37 #include <brillo/data_encoding.h> 38 #include <bsdiff/bspatch.h> 39 #include <google/protobuf/repeated_field.h> 40 #include <puffin/puffpatch.h> 41 42 #include "update_engine/common/constants.h" 43 #include "update_engine/common/hardware_interface.h" 44 #include "update_engine/common/prefs_interface.h" 45 #include "update_engine/common/subprocess.h" 46 #include "update_engine/common/terminator.h" 47 #include "update_engine/payload_consumer/bzip_extent_writer.h" 48 #include "update_engine/payload_consumer/cached_file_descriptor.h" 49 #include "update_engine/payload_consumer/certificate_parser_interface.h" 50 #include "update_engine/payload_consumer/download_action.h" 51 #include "update_engine/payload_consumer/extent_reader.h" 52 #include "update_engine/payload_consumer/extent_writer.h" 53 #if USE_FEC 54 #include "update_engine/payload_consumer/fec_file_descriptor.h" 55 #endif // USE_FEC 56 #include "update_engine/payload_consumer/file_descriptor_utils.h" 57 #include "update_engine/payload_consumer/mount_history.h" 58 #if USE_MTD 59 #include "update_engine/payload_consumer/mtd_file_descriptor.h" 60 #endif // USE_MTD 61 #include "update_engine/payload_consumer/payload_constants.h" 62 #include "update_engine/payload_consumer/payload_verifier.h" 63 #include "update_engine/payload_consumer/xz_extent_writer.h" 64 65 using google::protobuf::RepeatedPtrField; 66 using std::min; 67 using std::string; 68 using std::vector; 69 70 namespace chromeos_update_engine { 71 const unsigned DeltaPerformer::kProgressLogMaxChunks = 10; 72 const unsigned DeltaPerformer::kProgressLogTimeoutSeconds = 30; 73 const unsigned DeltaPerformer::kProgressDownloadWeight = 50; 74 const unsigned DeltaPerformer::kProgressOperationsWeight = 50; 75 const uint64_t DeltaPerformer::kCheckpointFrequencySeconds = 1; 76 77 namespace { 78 const int kUpdateStateOperationInvalid = -1; 79 const int kMaxResumedUpdateFailures = 10; 80 #if USE_MTD 81 const int kUbiVolumeAttachTimeout = 5 * 60; 82 #endif 83 84 const uint64_t kCacheSize = 1024 * 1024; // 1MB 85 86 FileDescriptorPtr CreateFileDescriptor(const char* path) { 87 FileDescriptorPtr ret; 88 #if USE_MTD 89 if (strstr(path, "/dev/ubi") == path) { 90 if (!UbiFileDescriptor::IsUbi(path)) { 91 // The volume might not have been attached at boot time. 92 int volume_no; 93 if (utils::SplitPartitionName(path, nullptr, &volume_no)) { 94 utils::TryAttachingUbiVolume(volume_no, kUbiVolumeAttachTimeout); 95 } 96 } 97 if (UbiFileDescriptor::IsUbi(path)) { 98 LOG(INFO) << path << " is a UBI device."; 99 ret.reset(new UbiFileDescriptor); 100 } 101 } else if (MtdFileDescriptor::IsMtd(path)) { 102 LOG(INFO) << path << " is an MTD device."; 103 ret.reset(new MtdFileDescriptor); 104 } else { 105 LOG(INFO) << path << " is not an MTD nor a UBI device."; 106 #endif 107 ret.reset(new EintrSafeFileDescriptor); 108 #if USE_MTD 109 } 110 #endif 111 return ret; 112 } 113 114 // Opens path for read/write. On success returns an open FileDescriptor 115 // and sets *err to 0. On failure, sets *err to errno and returns nullptr. 116 FileDescriptorPtr OpenFile(const char* path, 117 int mode, 118 bool cache_writes, 119 int* err) { 120 // Try to mark the block device read-only based on the mode. Ignore any 121 // failure since this won't work when passing regular files. 122 bool read_only = (mode & O_ACCMODE) == O_RDONLY; 123 utils::SetBlockDeviceReadOnly(path, read_only); 124 125 FileDescriptorPtr fd = CreateFileDescriptor(path); 126 if (cache_writes && !read_only) { 127 fd = FileDescriptorPtr(new CachedFileDescriptor(fd, kCacheSize)); 128 LOG(INFO) << "Caching writes."; 129 } 130 #if USE_MTD 131 // On NAND devices, we can either read, or write, but not both. So here we 132 // use O_WRONLY. 133 if (UbiFileDescriptor::IsUbi(path) || MtdFileDescriptor::IsMtd(path)) { 134 mode = O_WRONLY; 135 } 136 #endif 137 if (!fd->Open(path, mode, 000)) { 138 *err = errno; 139 PLOG(ERROR) << "Unable to open file " << path; 140 return nullptr; 141 } 142 *err = 0; 143 return fd; 144 } 145 146 // Discard the tail of the block device referenced by |fd|, from the offset 147 // |data_size| until the end of the block device. Returns whether the data was 148 // discarded. 149 bool DiscardPartitionTail(const FileDescriptorPtr& fd, uint64_t data_size) { 150 uint64_t part_size = fd->BlockDevSize(); 151 if (!part_size || part_size <= data_size) 152 return false; 153 154 struct blkioctl_request { 155 int number; 156 const char* name; 157 }; 158 const vector<blkioctl_request> blkioctl_requests = { 159 {BLKDISCARD, "BLKDISCARD"}, 160 {BLKSECDISCARD, "BLKSECDISCARD"}, 161 #ifdef BLKZEROOUT 162 {BLKZEROOUT, "BLKZEROOUT"}, 163 #endif 164 }; 165 for (const auto& req : blkioctl_requests) { 166 int error = 0; 167 if (fd->BlkIoctl(req.number, data_size, part_size - data_size, &error) && 168 error == 0) { 169 return true; 170 } 171 LOG(WARNING) << "Error discarding the last " 172 << (part_size - data_size) / 1024 << " KiB using ioctl(" 173 << req.name << ")"; 174 } 175 return false; 176 } 177 178 } // namespace 179 180 // Computes the ratio of |part| and |total|, scaled to |norm|, using integer 181 // arithmetic. 182 static uint64_t IntRatio(uint64_t part, uint64_t total, uint64_t norm) { 183 return part * norm / total; 184 } 185 186 void DeltaPerformer::LogProgress(const char* message_prefix) { 187 // Format operations total count and percentage. 188 string total_operations_str("?"); 189 string completed_percentage_str(""); 190 if (num_total_operations_) { 191 total_operations_str = std::to_string(num_total_operations_); 192 // Upcasting to 64-bit to avoid overflow, back to size_t for formatting. 193 completed_percentage_str = base::StringPrintf( 194 " (%" PRIu64 "%%)", 195 IntRatio(next_operation_num_, num_total_operations_, 100)); 196 } 197 198 // Format download total count and percentage. 199 size_t payload_size = payload_->size; 200 string payload_size_str("?"); 201 string downloaded_percentage_str(""); 202 if (payload_size) { 203 payload_size_str = std::to_string(payload_size); 204 // Upcasting to 64-bit to avoid overflow, back to size_t for formatting. 205 downloaded_percentage_str = base::StringPrintf( 206 " (%" PRIu64 "%%)", IntRatio(total_bytes_received_, payload_size, 100)); 207 } 208 209 LOG(INFO) << (message_prefix ? message_prefix : "") << next_operation_num_ 210 << "/" << total_operations_str << " operations" 211 << completed_percentage_str << ", " << total_bytes_received_ << "/" 212 << payload_size_str << " bytes downloaded" 213 << downloaded_percentage_str << ", overall progress " 214 << overall_progress_ << "%"; 215 } 216 217 void DeltaPerformer::UpdateOverallProgress(bool force_log, 218 const char* message_prefix) { 219 // Compute our download and overall progress. 220 unsigned new_overall_progress = 0; 221 static_assert(kProgressDownloadWeight + kProgressOperationsWeight == 100, 222 "Progress weights don't add up"); 223 // Only consider download progress if its total size is known; otherwise 224 // adjust the operations weight to compensate for the absence of download 225 // progress. Also, make sure to cap the download portion at 226 // kProgressDownloadWeight, in case we end up downloading more than we 227 // initially expected (this indicates a problem, but could generally happen). 228 // TODO(garnold) the correction of operations weight when we do not have the 229 // total payload size, as well as the conditional guard below, should both be 230 // eliminated once we ensure that the payload_size in the install plan is 231 // always given and is non-zero. This currently isn't the case during unit 232 // tests (see chromium-os:37969). 233 size_t payload_size = payload_->size; 234 unsigned actual_operations_weight = kProgressOperationsWeight; 235 if (payload_size) 236 new_overall_progress += 237 min(static_cast<unsigned>(IntRatio( 238 total_bytes_received_, payload_size, kProgressDownloadWeight)), 239 kProgressDownloadWeight); 240 else 241 actual_operations_weight += kProgressDownloadWeight; 242 243 // Only add completed operations if their total number is known; we definitely 244 // expect an update to have at least one operation, so the expectation is that 245 // this will eventually reach |actual_operations_weight|. 246 if (num_total_operations_) 247 new_overall_progress += IntRatio( 248 next_operation_num_, num_total_operations_, actual_operations_weight); 249 250 // Progress ratio cannot recede, unless our assumptions about the total 251 // payload size, total number of operations, or the monotonicity of progress 252 // is breached. 253 if (new_overall_progress < overall_progress_) { 254 LOG(WARNING) << "progress counter receded from " << overall_progress_ 255 << "% down to " << new_overall_progress << "%; this is a bug"; 256 force_log = true; 257 } 258 overall_progress_ = new_overall_progress; 259 260 // Update chunk index, log as needed: if forced by called, or we completed a 261 // progress chunk, or a timeout has expired. 262 base::TimeTicks curr_time = base::TimeTicks::Now(); 263 unsigned curr_progress_chunk = 264 overall_progress_ * kProgressLogMaxChunks / 100; 265 if (force_log || curr_progress_chunk > last_progress_chunk_ || 266 curr_time > forced_progress_log_time_) { 267 forced_progress_log_time_ = curr_time + forced_progress_log_wait_; 268 LogProgress(message_prefix); 269 } 270 last_progress_chunk_ = curr_progress_chunk; 271 } 272 273 size_t DeltaPerformer::CopyDataToBuffer(const char** bytes_p, 274 size_t* count_p, 275 size_t max) { 276 const size_t count = *count_p; 277 if (!count) 278 return 0; // Special case shortcut. 279 size_t read_len = min(count, max - buffer_.size()); 280 const char* bytes_start = *bytes_p; 281 const char* bytes_end = bytes_start + read_len; 282 buffer_.reserve(max); 283 buffer_.insert(buffer_.end(), bytes_start, bytes_end); 284 *bytes_p = bytes_end; 285 *count_p = count - read_len; 286 return read_len; 287 } 288 289 bool DeltaPerformer::HandleOpResult(bool op_result, 290 const char* op_type_name, 291 ErrorCode* error) { 292 if (op_result) 293 return true; 294 295 size_t partition_first_op_num = 296 current_partition_ ? acc_num_operations_[current_partition_ - 1] : 0; 297 LOG(ERROR) << "Failed to perform " << op_type_name << " operation " 298 << next_operation_num_ << ", which is the operation " 299 << next_operation_num_ - partition_first_op_num 300 << " in partition \"" 301 << partitions_[current_partition_].partition_name() << "\""; 302 if (*error == ErrorCode::kSuccess) 303 *error = ErrorCode::kDownloadOperationExecutionError; 304 return false; 305 } 306 307 int DeltaPerformer::Close() { 308 int err = -CloseCurrentPartition(); 309 LOG_IF(ERROR, 310 !payload_hash_calculator_.Finalize() || 311 !signed_hash_calculator_.Finalize()) 312 << "Unable to finalize the hash."; 313 if (!buffer_.empty()) { 314 LOG(INFO) << "Discarding " << buffer_.size() << " unused downloaded bytes"; 315 if (err >= 0) 316 err = 1; 317 } 318 return -err; 319 } 320 321 int DeltaPerformer::CloseCurrentPartition() { 322 int err = 0; 323 if (source_fd_ && !source_fd_->Close()) { 324 err = errno; 325 PLOG(ERROR) << "Error closing source partition"; 326 if (!err) 327 err = 1; 328 } 329 source_fd_.reset(); 330 if (source_ecc_fd_ && !source_ecc_fd_->Close()) { 331 err = errno; 332 PLOG(ERROR) << "Error closing ECC source partition"; 333 if (!err) 334 err = 1; 335 } 336 source_ecc_fd_.reset(); 337 source_ecc_open_failure_ = false; 338 source_path_.clear(); 339 340 if (target_fd_ && !target_fd_->Close()) { 341 err = errno; 342 PLOG(ERROR) << "Error closing target partition"; 343 if (!err) 344 err = 1; 345 } 346 target_fd_.reset(); 347 target_path_.clear(); 348 return -err; 349 } 350 351 bool DeltaPerformer::OpenCurrentPartition() { 352 if (current_partition_ >= partitions_.size()) 353 return false; 354 355 const PartitionUpdate& partition = partitions_[current_partition_]; 356 size_t num_previous_partitions = 357 install_plan_->partitions.size() - partitions_.size(); 358 const InstallPlan::Partition& install_part = 359 install_plan_->partitions[num_previous_partitions + current_partition_]; 360 // Open source fds if we have a delta payload with minor version >= 2. 361 if (payload_->type == InstallPayloadType::kDelta && 362 GetMinorVersion() != kInPlaceMinorPayloadVersion && 363 // With dynamic partitions we could create a new partition in a 364 // delta payload, and we shouldn't open source partition in that case. 365 install_part.source_size > 0) { 366 source_path_ = install_part.source_path; 367 int err; 368 source_fd_ = OpenFile(source_path_.c_str(), O_RDONLY, false, &err); 369 if (!source_fd_) { 370 LOG(ERROR) << "Unable to open source partition " 371 << partition.partition_name() << " on slot " 372 << BootControlInterface::SlotName(install_plan_->source_slot) 373 << ", file " << source_path_; 374 return false; 375 } 376 } 377 378 target_path_ = install_part.target_path; 379 int err; 380 381 int flags = O_RDWR; 382 if (!interactive_) 383 flags |= O_DSYNC; 384 385 LOG(INFO) << "Opening " << target_path_ << " partition with" 386 << (interactive_ ? "out" : "") << " O_DSYNC"; 387 388 target_fd_ = OpenFile(target_path_.c_str(), flags, true, &err); 389 if (!target_fd_) { 390 LOG(ERROR) << "Unable to open target partition " 391 << partition.partition_name() << " on slot " 392 << BootControlInterface::SlotName(install_plan_->target_slot) 393 << ", file " << target_path_; 394 return false; 395 } 396 397 LOG(INFO) << "Applying " << partition.operations().size() 398 << " operations to partition \"" << partition.partition_name() 399 << "\""; 400 401 // Discard the end of the partition, but ignore failures. 402 DiscardPartitionTail(target_fd_, install_part.target_size); 403 404 return true; 405 } 406 407 bool DeltaPerformer::OpenCurrentECCPartition() { 408 if (source_ecc_fd_) 409 return true; 410 411 if (source_ecc_open_failure_) 412 return false; 413 414 if (current_partition_ >= partitions_.size()) 415 return false; 416 417 // No support for ECC in minor version 1 or full payloads. 418 if (payload_->type == InstallPayloadType::kFull || 419 GetMinorVersion() == kInPlaceMinorPayloadVersion) 420 return false; 421 422 #if USE_FEC 423 const PartitionUpdate& partition = partitions_[current_partition_]; 424 size_t num_previous_partitions = 425 install_plan_->partitions.size() - partitions_.size(); 426 const InstallPlan::Partition& install_part = 427 install_plan_->partitions[num_previous_partitions + current_partition_]; 428 string path = install_part.source_path; 429 FileDescriptorPtr fd(new FecFileDescriptor()); 430 if (!fd->Open(path.c_str(), O_RDONLY, 0)) { 431 PLOG(ERROR) << "Unable to open ECC source partition " 432 << partition.partition_name() << " on slot " 433 << BootControlInterface::SlotName(install_plan_->source_slot) 434 << ", file " << path; 435 source_ecc_open_failure_ = true; 436 return false; 437 } 438 source_ecc_fd_ = fd; 439 #else 440 // No support for ECC compiled. 441 source_ecc_open_failure_ = true; 442 #endif // USE_FEC 443 444 return !source_ecc_open_failure_; 445 } 446 447 namespace { 448 449 void LogPartitionInfoHash(const PartitionInfo& info, const string& tag) { 450 string sha256 = brillo::data_encoding::Base64Encode(info.hash()); 451 LOG(INFO) << "PartitionInfo " << tag << " sha256: " << sha256 452 << " size: " << info.size(); 453 } 454 455 void LogPartitionInfo(const vector<PartitionUpdate>& partitions) { 456 for (const PartitionUpdate& partition : partitions) { 457 if (partition.has_old_partition_info()) { 458 LogPartitionInfoHash(partition.old_partition_info(), 459 "old " + partition.partition_name()); 460 } 461 LogPartitionInfoHash(partition.new_partition_info(), 462 "new " + partition.partition_name()); 463 } 464 } 465 466 } // namespace 467 468 uint32_t DeltaPerformer::GetMinorVersion() const { 469 if (manifest_.has_minor_version()) { 470 return manifest_.minor_version(); 471 } 472 return payload_->type == InstallPayloadType::kDelta 473 ? kMaxSupportedMinorPayloadVersion 474 : kFullPayloadMinorVersion; 475 } 476 477 bool DeltaPerformer::IsHeaderParsed() const { 478 return metadata_size_ != 0; 479 } 480 481 MetadataParseResult DeltaPerformer::ParsePayloadMetadata( 482 const brillo::Blob& payload, ErrorCode* error) { 483 *error = ErrorCode::kSuccess; 484 485 if (!IsHeaderParsed()) { 486 MetadataParseResult result = 487 payload_metadata_.ParsePayloadHeader(payload, error); 488 if (result != MetadataParseResult::kSuccess) 489 return result; 490 491 metadata_size_ = payload_metadata_.GetMetadataSize(); 492 metadata_signature_size_ = payload_metadata_.GetMetadataSignatureSize(); 493 major_payload_version_ = payload_metadata_.GetMajorVersion(); 494 495 // If the metadata size is present in install plan, check for it immediately 496 // even before waiting for that many number of bytes to be downloaded in the 497 // payload. This will prevent any attack which relies on us downloading data 498 // beyond the expected metadata size. 499 if (install_plan_->hash_checks_mandatory) { 500 if (payload_->metadata_size != metadata_size_) { 501 LOG(ERROR) << "Mandatory metadata size in Omaha response (" 502 << payload_->metadata_size 503 << ") is missing/incorrect, actual = " << metadata_size_; 504 *error = ErrorCode::kDownloadInvalidMetadataSize; 505 return MetadataParseResult::kError; 506 } 507 } 508 } 509 510 // Now that we have validated the metadata size, we should wait for the full 511 // metadata and its signature (if exist) to be read in before we can parse it. 512 if (payload.size() < metadata_size_ + metadata_signature_size_) 513 return MetadataParseResult::kInsufficientData; 514 515 // Log whether we validated the size or simply trusting what's in the payload 516 // here. This is logged here (after we received the full metadata data) so 517 // that we just log once (instead of logging n times) if it takes n 518 // DeltaPerformer::Write calls to download the full manifest. 519 if (payload_->metadata_size == metadata_size_) { 520 LOG(INFO) << "Manifest size in payload matches expected value from Omaha"; 521 } else { 522 // For mandatory-cases, we'd have already returned a kMetadataParseError 523 // above. We'll be here only for non-mandatory cases. Just send a UMA stat. 524 LOG(WARNING) << "Ignoring missing/incorrect metadata size (" 525 << payload_->metadata_size 526 << ") in Omaha response as validation is not mandatory. " 527 << "Trusting metadata size in payload = " << metadata_size_; 528 } 529 530 auto [payload_verifier, perform_verification] = CreatePayloadVerifier(); 531 if (!payload_verifier) { 532 LOG(ERROR) << "Failed to create payload verifier."; 533 *error = ErrorCode::kDownloadMetadataSignatureVerificationError; 534 if (perform_verification) { 535 return MetadataParseResult::kError; 536 } 537 } else { 538 // We have the full metadata in |payload|. Verify its integrity 539 // and authenticity based on the information we have in Omaha response. 540 *error = payload_metadata_.ValidateMetadataSignature( 541 payload, payload_->metadata_signature, *payload_verifier); 542 } 543 if (*error != ErrorCode::kSuccess) { 544 if (install_plan_->hash_checks_mandatory) { 545 // The autoupdate_CatchBadSignatures test checks for this string 546 // in log-files. Keep in sync. 547 LOG(ERROR) << "Mandatory metadata signature validation failed"; 548 return MetadataParseResult::kError; 549 } 550 551 // For non-mandatory cases, just send a UMA stat. 552 LOG(WARNING) << "Ignoring metadata signature validation failures"; 553 *error = ErrorCode::kSuccess; 554 } 555 556 // The payload metadata is deemed valid, it's safe to parse the protobuf. 557 if (!payload_metadata_.GetManifest(payload, &manifest_)) { 558 LOG(ERROR) << "Unable to parse manifest in update file."; 559 *error = ErrorCode::kDownloadManifestParseError; 560 return MetadataParseResult::kError; 561 } 562 563 manifest_parsed_ = true; 564 return MetadataParseResult::kSuccess; 565 } 566 567 #define OP_DURATION_HISTOGRAM(_op_name, _start_time) \ 568 LOCAL_HISTOGRAM_CUSTOM_TIMES( \ 569 "UpdateEngine.DownloadAction.InstallOperation::" _op_name ".Duration", \ 570 base::TimeTicks::Now() - _start_time, \ 571 base::TimeDelta::FromMilliseconds(10), \ 572 base::TimeDelta::FromMinutes(5), \ 573 20); 574 575 // Wrapper around write. Returns true if all requested bytes 576 // were written, or false on any error, regardless of progress 577 // and stores an action exit code in |error|. 578 bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode* error) { 579 *error = ErrorCode::kSuccess; 580 const char* c_bytes = reinterpret_cast<const char*>(bytes); 581 582 // Update the total byte downloaded count and the progress logs. 583 total_bytes_received_ += count; 584 UpdateOverallProgress(false, "Completed "); 585 586 while (!manifest_valid_) { 587 // Read data up to the needed limit; this is either maximium payload header 588 // size, or the full metadata size (once it becomes known). 589 const bool do_read_header = !IsHeaderParsed(); 590 CopyDataToBuffer( 591 &c_bytes, 592 &count, 593 (do_read_header ? kMaxPayloadHeaderSize 594 : metadata_size_ + metadata_signature_size_)); 595 596 MetadataParseResult result = ParsePayloadMetadata(buffer_, error); 597 if (result == MetadataParseResult::kError) 598 return false; 599 if (result == MetadataParseResult::kInsufficientData) { 600 // If we just processed the header, make an attempt on the manifest. 601 if (do_read_header && IsHeaderParsed()) 602 continue; 603 604 return true; 605 } 606 607 // Checks the integrity of the payload manifest. 608 if ((*error = ValidateManifest()) != ErrorCode::kSuccess) 609 return false; 610 manifest_valid_ = true; 611 612 // Clear the download buffer. 613 DiscardBuffer(false, metadata_size_); 614 615 block_size_ = manifest_.block_size(); 616 617 // This populates |partitions_| and the |install_plan.partitions| with the 618 // list of partitions from the manifest. 619 if (!ParseManifestPartitions(error)) 620 return false; 621 622 // |install_plan.partitions| was filled in, nothing need to be done here if 623 // the payload was already applied, returns false to terminate http fetcher, 624 // but keep |error| as ErrorCode::kSuccess. 625 if (payload_->already_applied) 626 return false; 627 628 num_total_operations_ = 0; 629 for (const auto& partition : partitions_) { 630 num_total_operations_ += partition.operations_size(); 631 acc_num_operations_.push_back(num_total_operations_); 632 } 633 634 LOG_IF(WARNING, 635 !prefs_->SetInt64(kPrefsManifestMetadataSize, metadata_size_)) 636 << "Unable to save the manifest metadata size."; 637 LOG_IF(WARNING, 638 !prefs_->SetInt64(kPrefsManifestSignatureSize, 639 metadata_signature_size_)) 640 << "Unable to save the manifest signature size."; 641 642 if (!PrimeUpdateState()) { 643 *error = ErrorCode::kDownloadStateInitializationError; 644 LOG(ERROR) << "Unable to prime the update state."; 645 return false; 646 } 647 648 if (next_operation_num_ < acc_num_operations_[current_partition_]) { 649 if (!OpenCurrentPartition()) { 650 *error = ErrorCode::kInstallDeviceOpenError; 651 return false; 652 } 653 } 654 655 if (next_operation_num_ > 0) 656 UpdateOverallProgress(true, "Resuming after "); 657 LOG(INFO) << "Starting to apply update payload operations"; 658 } 659 660 while (next_operation_num_ < num_total_operations_) { 661 // Check if we should cancel the current attempt for any reason. 662 // In this case, *error will have already been populated with the reason 663 // why we're canceling. 664 if (download_delegate_ && download_delegate_->ShouldCancel(error)) 665 return false; 666 667 // We know there are more operations to perform because we didn't reach the 668 // |num_total_operations_| limit yet. 669 if (next_operation_num_ >= acc_num_operations_[current_partition_]) { 670 CloseCurrentPartition(); 671 // Skip until there are operations for current_partition_. 672 while (next_operation_num_ >= acc_num_operations_[current_partition_]) { 673 current_partition_++; 674 } 675 if (!OpenCurrentPartition()) { 676 *error = ErrorCode::kInstallDeviceOpenError; 677 return false; 678 } 679 } 680 const size_t partition_operation_num = 681 next_operation_num_ - 682 (current_partition_ ? acc_num_operations_[current_partition_ - 1] : 0); 683 684 const InstallOperation& op = 685 partitions_[current_partition_].operations(partition_operation_num); 686 687 CopyDataToBuffer(&c_bytes, &count, op.data_length()); 688 689 // Check whether we received all of the next operation's data payload. 690 if (!CanPerformInstallOperation(op)) 691 return true; 692 693 // Validate the operation only if the metadata signature is present. 694 // Otherwise, keep the old behavior. This serves as a knob to disable 695 // the validation logic in case we find some regression after rollout. 696 // NOTE: If hash checks are mandatory and if metadata_signature is empty, 697 // we would have already failed in ParsePayloadMetadata method and thus not 698 // even be here. So no need to handle that case again here. 699 if (!payload_->metadata_signature.empty()) { 700 // Note: Validate must be called only if CanPerformInstallOperation is 701 // called. Otherwise, we might be failing operations before even if there 702 // isn't sufficient data to compute the proper hash. 703 *error = ValidateOperationHash(op); 704 if (*error != ErrorCode::kSuccess) { 705 if (install_plan_->hash_checks_mandatory) { 706 LOG(ERROR) << "Mandatory operation hash check failed"; 707 return false; 708 } 709 710 // For non-mandatory cases, just send a UMA stat. 711 LOG(WARNING) << "Ignoring operation validation errors"; 712 *error = ErrorCode::kSuccess; 713 } 714 } 715 716 // Makes sure we unblock exit when this operation completes. 717 ScopedTerminatorExitUnblocker exit_unblocker = 718 ScopedTerminatorExitUnblocker(); // Avoids a compiler unused var bug. 719 720 base::TimeTicks op_start_time = base::TimeTicks::Now(); 721 722 bool op_result; 723 switch (op.type()) { 724 case InstallOperation::REPLACE: 725 case InstallOperation::REPLACE_BZ: 726 case InstallOperation::REPLACE_XZ: 727 op_result = PerformReplaceOperation(op); 728 OP_DURATION_HISTOGRAM("REPLACE", op_start_time); 729 break; 730 case InstallOperation::ZERO: 731 case InstallOperation::DISCARD: 732 op_result = PerformZeroOrDiscardOperation(op); 733 OP_DURATION_HISTOGRAM("ZERO_OR_DISCARD", op_start_time); 734 break; 735 case InstallOperation::MOVE: 736 op_result = PerformMoveOperation(op); 737 OP_DURATION_HISTOGRAM("MOVE", op_start_time); 738 break; 739 case InstallOperation::BSDIFF: 740 op_result = PerformBsdiffOperation(op); 741 OP_DURATION_HISTOGRAM("BSDIFF", op_start_time); 742 break; 743 case InstallOperation::SOURCE_COPY: 744 op_result = PerformSourceCopyOperation(op, error); 745 OP_DURATION_HISTOGRAM("SOURCE_COPY", op_start_time); 746 break; 747 case InstallOperation::SOURCE_BSDIFF: 748 case InstallOperation::BROTLI_BSDIFF: 749 op_result = PerformSourceBsdiffOperation(op, error); 750 OP_DURATION_HISTOGRAM("SOURCE_BSDIFF", op_start_time); 751 break; 752 case InstallOperation::PUFFDIFF: 753 op_result = PerformPuffDiffOperation(op, error); 754 OP_DURATION_HISTOGRAM("PUFFDIFF", op_start_time); 755 break; 756 default: 757 op_result = false; 758 } 759 if (!HandleOpResult(op_result, InstallOperationTypeName(op.type()), error)) 760 return false; 761 762 if (!target_fd_->Flush()) { 763 return false; 764 } 765 766 next_operation_num_++; 767 UpdateOverallProgress(false, "Completed "); 768 CheckpointUpdateProgress(false); 769 } 770 771 // In major version 2, we don't add dummy operation to the payload. 772 // If we already extracted the signature we should skip this step. 773 if (major_payload_version_ == kBrilloMajorPayloadVersion && 774 manifest_.has_signatures_offset() && manifest_.has_signatures_size() && 775 signatures_message_data_.empty()) { 776 if (manifest_.signatures_offset() != buffer_offset_) { 777 LOG(ERROR) << "Payload signatures offset points to blob offset " 778 << manifest_.signatures_offset() 779 << " but signatures are expected at offset " << buffer_offset_; 780 *error = ErrorCode::kDownloadPayloadVerificationError; 781 return false; 782 } 783 CopyDataToBuffer(&c_bytes, &count, manifest_.signatures_size()); 784 // Needs more data to cover entire signature. 785 if (buffer_.size() < manifest_.signatures_size()) 786 return true; 787 if (!ExtractSignatureMessage()) { 788 LOG(ERROR) << "Extract payload signature failed."; 789 *error = ErrorCode::kDownloadPayloadVerificationError; 790 return false; 791 } 792 DiscardBuffer(true, 0); 793 // Since we extracted the SignatureMessage we need to advance the 794 // checkpoint, otherwise we would reload the signature and try to extract 795 // it again. 796 // This is the last checkpoint for an update, force this checkpoint to be 797 // saved. 798 CheckpointUpdateProgress(true); 799 } 800 801 return true; 802 } 803 804 bool DeltaPerformer::IsManifestValid() { 805 return manifest_valid_; 806 } 807 808 bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) { 809 if (major_payload_version_ == kBrilloMajorPayloadVersion) { 810 partitions_.clear(); 811 for (const PartitionUpdate& partition : manifest_.partitions()) { 812 partitions_.push_back(partition); 813 } 814 } else if (major_payload_version_ == kChromeOSMajorPayloadVersion) { 815 LOG(INFO) << "Converting update information from old format."; 816 PartitionUpdate root_part; 817 root_part.set_partition_name(kPartitionNameRoot); 818 #ifdef __ANDROID__ 819 LOG(WARNING) << "Legacy payload major version provided to an Android " 820 "build. Assuming no post-install. Please use major version " 821 "2 or newer."; 822 root_part.set_run_postinstall(false); 823 #else 824 root_part.set_run_postinstall(true); 825 #endif // __ANDROID__ 826 if (manifest_.has_old_rootfs_info()) { 827 *root_part.mutable_old_partition_info() = manifest_.old_rootfs_info(); 828 manifest_.clear_old_rootfs_info(); 829 } 830 if (manifest_.has_new_rootfs_info()) { 831 *root_part.mutable_new_partition_info() = manifest_.new_rootfs_info(); 832 manifest_.clear_new_rootfs_info(); 833 } 834 *root_part.mutable_operations() = manifest_.install_operations(); 835 manifest_.clear_install_operations(); 836 partitions_.push_back(std::move(root_part)); 837 838 PartitionUpdate kern_part; 839 kern_part.set_partition_name(kPartitionNameKernel); 840 kern_part.set_run_postinstall(false); 841 if (manifest_.has_old_kernel_info()) { 842 *kern_part.mutable_old_partition_info() = manifest_.old_kernel_info(); 843 manifest_.clear_old_kernel_info(); 844 } 845 if (manifest_.has_new_kernel_info()) { 846 *kern_part.mutable_new_partition_info() = manifest_.new_kernel_info(); 847 manifest_.clear_new_kernel_info(); 848 } 849 *kern_part.mutable_operations() = manifest_.kernel_install_operations(); 850 manifest_.clear_kernel_install_operations(); 851 partitions_.push_back(std::move(kern_part)); 852 } 853 854 // Fill in the InstallPlan::partitions based on the partitions from the 855 // payload. 856 for (const auto& partition : partitions_) { 857 InstallPlan::Partition install_part; 858 install_part.name = partition.partition_name(); 859 install_part.run_postinstall = 860 partition.has_run_postinstall() && partition.run_postinstall(); 861 if (install_part.run_postinstall) { 862 install_part.postinstall_path = 863 (partition.has_postinstall_path() ? partition.postinstall_path() 864 : kPostinstallDefaultScript); 865 install_part.filesystem_type = partition.filesystem_type(); 866 install_part.postinstall_optional = partition.postinstall_optional(); 867 } 868 869 if (partition.has_old_partition_info()) { 870 const PartitionInfo& info = partition.old_partition_info(); 871 install_part.source_size = info.size(); 872 install_part.source_hash.assign(info.hash().begin(), info.hash().end()); 873 } 874 875 if (!partition.has_new_partition_info()) { 876 LOG(ERROR) << "Unable to get new partition hash info on partition " 877 << install_part.name << "."; 878 *error = ErrorCode::kDownloadNewPartitionInfoError; 879 return false; 880 } 881 const PartitionInfo& info = partition.new_partition_info(); 882 install_part.target_size = info.size(); 883 install_part.target_hash.assign(info.hash().begin(), info.hash().end()); 884 885 install_part.block_size = block_size_; 886 if (partition.has_hash_tree_extent()) { 887 Extent extent = partition.hash_tree_data_extent(); 888 install_part.hash_tree_data_offset = extent.start_block() * block_size_; 889 install_part.hash_tree_data_size = extent.num_blocks() * block_size_; 890 extent = partition.hash_tree_extent(); 891 install_part.hash_tree_offset = extent.start_block() * block_size_; 892 install_part.hash_tree_size = extent.num_blocks() * block_size_; 893 uint64_t hash_tree_data_end = 894 install_part.hash_tree_data_offset + install_part.hash_tree_data_size; 895 if (install_part.hash_tree_offset < hash_tree_data_end) { 896 LOG(ERROR) << "Invalid hash tree extents, hash tree data ends at " 897 << hash_tree_data_end << ", but hash tree starts at " 898 << install_part.hash_tree_offset; 899 *error = ErrorCode::kDownloadNewPartitionInfoError; 900 return false; 901 } 902 install_part.hash_tree_algorithm = partition.hash_tree_algorithm(); 903 install_part.hash_tree_salt.assign(partition.hash_tree_salt().begin(), 904 partition.hash_tree_salt().end()); 905 } 906 if (partition.has_fec_extent()) { 907 Extent extent = partition.fec_data_extent(); 908 install_part.fec_data_offset = extent.start_block() * block_size_; 909 install_part.fec_data_size = extent.num_blocks() * block_size_; 910 extent = partition.fec_extent(); 911 install_part.fec_offset = extent.start_block() * block_size_; 912 install_part.fec_size = extent.num_blocks() * block_size_; 913 uint64_t fec_data_end = 914 install_part.fec_data_offset + install_part.fec_data_size; 915 if (install_part.fec_offset < fec_data_end) { 916 LOG(ERROR) << "Invalid fec extents, fec data ends at " << fec_data_end 917 << ", but fec starts at " << install_part.fec_offset; 918 *error = ErrorCode::kDownloadNewPartitionInfoError; 919 return false; 920 } 921 install_part.fec_roots = partition.fec_roots(); 922 } 923 924 install_plan_->partitions.push_back(install_part); 925 } 926 927 if (install_plan_->target_slot != BootControlInterface::kInvalidSlot) { 928 uint64_t required_size = 0; 929 if (!PreparePartitionsForUpdate(&required_size)) { 930 if (required_size > 0) { 931 *error = ErrorCode::kNotEnoughSpace; 932 } else { 933 *error = ErrorCode::kInstallDeviceOpenError; 934 } 935 return false; 936 } 937 } 938 939 if (major_payload_version_ == kBrilloMajorPayloadVersion) { 940 manifest_.clear_partitions(); 941 } 942 943 if (!install_plan_->LoadPartitionsFromSlots(boot_control_)) { 944 LOG(ERROR) << "Unable to determine all the partition devices."; 945 *error = ErrorCode::kInstallDeviceOpenError; 946 return false; 947 } 948 LogPartitionInfo(partitions_); 949 return true; 950 } 951 952 bool DeltaPerformer::PreparePartitionsForUpdate(uint64_t* required_size) { 953 // Call static PreparePartitionsForUpdate with hash from 954 // kPrefsUpdateCheckResponseHash to ensure hash of payload that space is 955 // preallocated for is the same as the hash of payload being applied. 956 string update_check_response_hash; 957 ignore_result(prefs_->GetString(kPrefsUpdateCheckResponseHash, 958 &update_check_response_hash)); 959 return PreparePartitionsForUpdate(prefs_, 960 boot_control_, 961 install_plan_->target_slot, 962 manifest_, 963 update_check_response_hash, 964 required_size); 965 } 966 967 bool DeltaPerformer::PreparePartitionsForUpdate( 968 PrefsInterface* prefs, 969 BootControlInterface* boot_control, 970 BootControlInterface::Slot target_slot, 971 const DeltaArchiveManifest& manifest, 972 const std::string& update_check_response_hash, 973 uint64_t* required_size) { 974 string last_hash; 975 ignore_result( 976 prefs->GetString(kPrefsDynamicPartitionMetadataUpdated, &last_hash)); 977 978 bool is_resume = !update_check_response_hash.empty() && 979 last_hash == update_check_response_hash; 980 981 if (is_resume) { 982 LOG(INFO) << "Using previously prepared partitions for update. hash = " 983 << last_hash; 984 } else { 985 LOG(INFO) << "Preparing partitions for new update. last hash = " 986 << last_hash << ", new hash = " << update_check_response_hash; 987 } 988 989 if (!boot_control->GetDynamicPartitionControl()->PreparePartitionsForUpdate( 990 boot_control->GetCurrentSlot(), 991 target_slot, 992 manifest, 993 !is_resume /* should update */, 994 required_size)) { 995 LOG(ERROR) << "Unable to initialize partition metadata for slot " 996 << BootControlInterface::SlotName(target_slot); 997 return false; 998 } 999 1000 TEST_AND_RETURN_FALSE(prefs->SetString(kPrefsDynamicPartitionMetadataUpdated, 1001 update_check_response_hash)); 1002 LOG(INFO) << "PreparePartitionsForUpdate done."; 1003 1004 return true; 1005 } 1006 1007 bool DeltaPerformer::CanPerformInstallOperation( 1008 const chromeos_update_engine::InstallOperation& operation) { 1009 // If we don't have a data blob we can apply it right away. 1010 if (!operation.has_data_offset() && !operation.has_data_length()) 1011 return true; 1012 1013 // See if we have the entire data blob in the buffer 1014 if (operation.data_offset() < buffer_offset_) { 1015 LOG(ERROR) << "we threw away data it seems?"; 1016 return false; 1017 } 1018 1019 return (operation.data_offset() + operation.data_length() <= 1020 buffer_offset_ + buffer_.size()); 1021 } 1022 1023 bool DeltaPerformer::PerformReplaceOperation( 1024 const InstallOperation& operation) { 1025 CHECK(operation.type() == InstallOperation::REPLACE || 1026 operation.type() == InstallOperation::REPLACE_BZ || 1027 operation.type() == InstallOperation::REPLACE_XZ); 1028 1029 // Since we delete data off the beginning of the buffer as we use it, 1030 // the data we need should be exactly at the beginning of the buffer. 1031 TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset()); 1032 TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length()); 1033 1034 // Extract the signature message if it's in this operation. 1035 if (ExtractSignatureMessageFromOperation(operation)) { 1036 // If this is dummy replace operation, we ignore it after extracting the 1037 // signature. 1038 DiscardBuffer(true, 0); 1039 return true; 1040 } 1041 1042 // Setup the ExtentWriter stack based on the operation type. 1043 std::unique_ptr<ExtentWriter> writer = std::make_unique<DirectExtentWriter>(); 1044 1045 if (operation.type() == InstallOperation::REPLACE_BZ) { 1046 writer.reset(new BzipExtentWriter(std::move(writer))); 1047 } else if (operation.type() == InstallOperation::REPLACE_XZ) { 1048 writer.reset(new XzExtentWriter(std::move(writer))); 1049 } 1050 1051 TEST_AND_RETURN_FALSE( 1052 writer->Init(target_fd_, operation.dst_extents(), block_size_)); 1053 TEST_AND_RETURN_FALSE(writer->Write(buffer_.data(), operation.data_length())); 1054 1055 // Update buffer 1056 DiscardBuffer(true, buffer_.size()); 1057 return true; 1058 } 1059 1060 bool DeltaPerformer::PerformZeroOrDiscardOperation( 1061 const InstallOperation& operation) { 1062 CHECK(operation.type() == InstallOperation::DISCARD || 1063 operation.type() == InstallOperation::ZERO); 1064 1065 // These operations have no blob. 1066 TEST_AND_RETURN_FALSE(!operation.has_data_offset()); 1067 TEST_AND_RETURN_FALSE(!operation.has_data_length()); 1068 1069 #ifdef BLKZEROOUT 1070 bool attempt_ioctl = true; 1071 int request = 1072 (operation.type() == InstallOperation::ZERO ? BLKZEROOUT : BLKDISCARD); 1073 #else // !defined(BLKZEROOUT) 1074 bool attempt_ioctl = false; 1075 int request = 0; 1076 #endif // !defined(BLKZEROOUT) 1077 1078 brillo::Blob zeros; 1079 for (const Extent& extent : operation.dst_extents()) { 1080 const uint64_t start = extent.start_block() * block_size_; 1081 const uint64_t length = extent.num_blocks() * block_size_; 1082 if (attempt_ioctl) { 1083 int result = 0; 1084 if (target_fd_->BlkIoctl(request, start, length, &result) && result == 0) 1085 continue; 1086 attempt_ioctl = false; 1087 } 1088 // In case of failure, we fall back to writing 0 to the selected region. 1089 zeros.resize(16 * block_size_); 1090 for (uint64_t offset = 0; offset < length; offset += zeros.size()) { 1091 uint64_t chunk_length = 1092 min(length - offset, static_cast<uint64_t>(zeros.size())); 1093 TEST_AND_RETURN_FALSE(utils::PWriteAll( 1094 target_fd_, zeros.data(), chunk_length, start + offset)); 1095 } 1096 } 1097 return true; 1098 } 1099 1100 bool DeltaPerformer::PerformMoveOperation(const InstallOperation& operation) { 1101 // Calculate buffer size. Note, this function doesn't do a sliding 1102 // window to copy in case the source and destination blocks overlap. 1103 // If we wanted to do a sliding window, we could program the server 1104 // to generate deltas that effectively did a sliding window. 1105 1106 uint64_t blocks_to_read = 0; 1107 for (int i = 0; i < operation.src_extents_size(); i++) 1108 blocks_to_read += operation.src_extents(i).num_blocks(); 1109 1110 uint64_t blocks_to_write = 0; 1111 for (int i = 0; i < operation.dst_extents_size(); i++) 1112 blocks_to_write += operation.dst_extents(i).num_blocks(); 1113 1114 DCHECK_EQ(blocks_to_write, blocks_to_read); 1115 brillo::Blob buf(blocks_to_write * block_size_); 1116 1117 // Read in bytes. 1118 ssize_t bytes_read = 0; 1119 for (int i = 0; i < operation.src_extents_size(); i++) { 1120 ssize_t bytes_read_this_iteration = 0; 1121 const Extent& extent = operation.src_extents(i); 1122 const size_t bytes = extent.num_blocks() * block_size_; 1123 TEST_AND_RETURN_FALSE(extent.start_block() != kSparseHole); 1124 TEST_AND_RETURN_FALSE(utils::PReadAll(target_fd_, 1125 &buf[bytes_read], 1126 bytes, 1127 extent.start_block() * block_size_, 1128 &bytes_read_this_iteration)); 1129 TEST_AND_RETURN_FALSE(bytes_read_this_iteration == 1130 static_cast<ssize_t>(bytes)); 1131 bytes_read += bytes_read_this_iteration; 1132 } 1133 1134 // Write bytes out. 1135 ssize_t bytes_written = 0; 1136 for (int i = 0; i < operation.dst_extents_size(); i++) { 1137 const Extent& extent = operation.dst_extents(i); 1138 const size_t bytes = extent.num_blocks() * block_size_; 1139 TEST_AND_RETURN_FALSE(extent.start_block() != kSparseHole); 1140 TEST_AND_RETURN_FALSE(utils::PWriteAll(target_fd_, 1141 &buf[bytes_written], 1142 bytes, 1143 extent.start_block() * block_size_)); 1144 bytes_written += bytes; 1145 } 1146 DCHECK_EQ(bytes_written, bytes_read); 1147 DCHECK_EQ(bytes_written, static_cast<ssize_t>(buf.size())); 1148 return true; 1149 } 1150 1151 bool DeltaPerformer::ValidateSourceHash(const brillo::Blob& calculated_hash, 1152 const InstallOperation& operation, 1153 const FileDescriptorPtr source_fd, 1154 ErrorCode* error) { 1155 brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(), 1156 operation.src_sha256_hash().end()); 1157 if (calculated_hash != expected_source_hash) { 1158 LOG(ERROR) << "The hash of the source data on disk for this operation " 1159 << "doesn't match the expected value. This could mean that the " 1160 << "delta update payload was targeted for another version, or " 1161 << "that the source partition was modified after it was " 1162 << "installed, for example, by mounting a filesystem."; 1163 LOG(ERROR) << "Expected: sha256|hex = " 1164 << base::HexEncode(expected_source_hash.data(), 1165 expected_source_hash.size()); 1166 LOG(ERROR) << "Calculated: sha256|hex = " 1167 << base::HexEncode(calculated_hash.data(), 1168 calculated_hash.size()); 1169 1170 vector<string> source_extents; 1171 for (const Extent& ext : operation.src_extents()) { 1172 source_extents.push_back( 1173 base::StringPrintf("%" PRIu64 ":%" PRIu64, 1174 static_cast<uint64_t>(ext.start_block()), 1175 static_cast<uint64_t>(ext.num_blocks()))); 1176 } 1177 LOG(ERROR) << "Operation source (offset:size) in blocks: " 1178 << base::JoinString(source_extents, ","); 1179 1180 // Log remount history if this device is an ext4 partition. 1181 LogMountHistory(source_fd); 1182 1183 *error = ErrorCode::kDownloadStateInitializationError; 1184 return false; 1185 } 1186 return true; 1187 } 1188 1189 bool DeltaPerformer::PerformSourceCopyOperation( 1190 const InstallOperation& operation, ErrorCode* error) { 1191 if (operation.has_src_length()) 1192 TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0); 1193 if (operation.has_dst_length()) 1194 TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0); 1195 1196 TEST_AND_RETURN_FALSE(source_fd_ != nullptr); 1197 1198 // The device may optimize the SOURCE_COPY operation. 1199 // Being this a device-specific optimization let DynamicPartitionController 1200 // decide it the operation should be skipped. 1201 const PartitionUpdate& partition = partitions_[current_partition_]; 1202 const auto& partition_control = boot_control_->GetDynamicPartitionControl(); 1203 1204 InstallOperation buf; 1205 bool should_optimize = partition_control->OptimizeOperation( 1206 partition.partition_name(), operation, &buf); 1207 const InstallOperation& optimized = should_optimize ? buf : operation; 1208 1209 if (operation.has_src_sha256_hash()) { 1210 bool read_ok; 1211 brillo::Blob source_hash; 1212 brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(), 1213 operation.src_sha256_hash().end()); 1214 1215 // We fall back to use the error corrected device if the hash of the raw 1216 // device doesn't match or there was an error reading the source partition. 1217 // Note that this code will also fall back if writing the target partition 1218 // fails. 1219 if (should_optimize) { 1220 // Hash operation.src_extents(), then copy optimized.src_extents to 1221 // optimized.dst_extents. 1222 read_ok = 1223 fd_utils::ReadAndHashExtents( 1224 source_fd_, operation.src_extents(), block_size_, &source_hash) && 1225 fd_utils::CopyAndHashExtents(source_fd_, 1226 optimized.src_extents(), 1227 target_fd_, 1228 optimized.dst_extents(), 1229 block_size_, 1230 nullptr /* skip hashing */); 1231 } else { 1232 read_ok = fd_utils::CopyAndHashExtents(source_fd_, 1233 operation.src_extents(), 1234 target_fd_, 1235 operation.dst_extents(), 1236 block_size_, 1237 &source_hash); 1238 } 1239 if (read_ok && expected_source_hash == source_hash) 1240 return true; 1241 1242 if (!OpenCurrentECCPartition()) { 1243 // The following function call will return false since the source hash 1244 // mismatches, but we still want to call it so it prints the appropriate 1245 // log message. 1246 return ValidateSourceHash(source_hash, operation, source_fd_, error); 1247 } 1248 1249 LOG(WARNING) << "Source hash from RAW device mismatched: found " 1250 << base::HexEncode(source_hash.data(), source_hash.size()) 1251 << ", expected " 1252 << base::HexEncode(expected_source_hash.data(), 1253 expected_source_hash.size()); 1254 1255 if (should_optimize) { 1256 TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents( 1257 source_ecc_fd_, operation.src_extents(), block_size_, &source_hash)); 1258 TEST_AND_RETURN_FALSE( 1259 fd_utils::CopyAndHashExtents(source_ecc_fd_, 1260 optimized.src_extents(), 1261 target_fd_, 1262 optimized.dst_extents(), 1263 block_size_, 1264 nullptr /* skip hashing */)); 1265 } else { 1266 TEST_AND_RETURN_FALSE( 1267 fd_utils::CopyAndHashExtents(source_ecc_fd_, 1268 operation.src_extents(), 1269 target_fd_, 1270 operation.dst_extents(), 1271 block_size_, 1272 &source_hash)); 1273 } 1274 TEST_AND_RETURN_FALSE( 1275 ValidateSourceHash(source_hash, operation, source_ecc_fd_, error)); 1276 // At this point reading from the the error corrected device worked, but 1277 // reading from the raw device failed, so this is considered a recovered 1278 // failure. 1279 source_ecc_recovered_failures_++; 1280 } else { 1281 // When the operation doesn't include a source hash, we attempt the error 1282 // corrected device first since we can't verify the block in the raw device 1283 // at this point, but we fall back to the raw device since the error 1284 // corrected device can be shorter or not available. 1285 1286 if (OpenCurrentECCPartition() && 1287 fd_utils::CopyAndHashExtents(source_ecc_fd_, 1288 optimized.src_extents(), 1289 target_fd_, 1290 optimized.dst_extents(), 1291 block_size_, 1292 nullptr)) { 1293 return true; 1294 } 1295 TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_fd_, 1296 optimized.src_extents(), 1297 target_fd_, 1298 optimized.dst_extents(), 1299 block_size_, 1300 nullptr)); 1301 } 1302 return true; 1303 } 1304 1305 FileDescriptorPtr DeltaPerformer::ChooseSourceFD( 1306 const InstallOperation& operation, ErrorCode* error) { 1307 if (source_fd_ == nullptr) { 1308 LOG(ERROR) << "ChooseSourceFD fail: source_fd_ == nullptr"; 1309 return nullptr; 1310 } 1311 1312 if (!operation.has_src_sha256_hash()) { 1313 // When the operation doesn't include a source hash, we attempt the error 1314 // corrected device first since we can't verify the block in the raw device 1315 // at this point, but we first need to make sure all extents are readable 1316 // since the error corrected device can be shorter or not available. 1317 if (OpenCurrentECCPartition() && 1318 fd_utils::ReadAndHashExtents( 1319 source_ecc_fd_, operation.src_extents(), block_size_, nullptr)) { 1320 return source_ecc_fd_; 1321 } 1322 return source_fd_; 1323 } 1324 1325 brillo::Blob source_hash; 1326 brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(), 1327 operation.src_sha256_hash().end()); 1328 if (fd_utils::ReadAndHashExtents( 1329 source_fd_, operation.src_extents(), block_size_, &source_hash) && 1330 source_hash == expected_source_hash) { 1331 return source_fd_; 1332 } 1333 // We fall back to use the error corrected device if the hash of the raw 1334 // device doesn't match or there was an error reading the source partition. 1335 if (!OpenCurrentECCPartition()) { 1336 // The following function call will return false since the source hash 1337 // mismatches, but we still want to call it so it prints the appropriate 1338 // log message. 1339 ValidateSourceHash(source_hash, operation, source_fd_, error); 1340 return nullptr; 1341 } 1342 LOG(WARNING) << "Source hash from RAW device mismatched: found " 1343 << base::HexEncode(source_hash.data(), source_hash.size()) 1344 << ", expected " 1345 << base::HexEncode(expected_source_hash.data(), 1346 expected_source_hash.size()); 1347 1348 if (fd_utils::ReadAndHashExtents( 1349 source_ecc_fd_, operation.src_extents(), block_size_, &source_hash) && 1350 ValidateSourceHash(source_hash, operation, source_ecc_fd_, error)) { 1351 // At this point reading from the the error corrected device worked, but 1352 // reading from the raw device failed, so this is considered a recovered 1353 // failure. 1354 source_ecc_recovered_failures_++; 1355 return source_ecc_fd_; 1356 } 1357 return nullptr; 1358 } 1359 1360 bool DeltaPerformer::ExtentsToBsdiffPositionsString( 1361 const RepeatedPtrField<Extent>& extents, 1362 uint64_t block_size, 1363 uint64_t full_length, 1364 string* positions_string) { 1365 string ret; 1366 uint64_t length = 0; 1367 for (const Extent& extent : extents) { 1368 int64_t start = extent.start_block() * block_size; 1369 uint64_t this_length = 1370 min(full_length - length, 1371 static_cast<uint64_t>(extent.num_blocks()) * block_size); 1372 ret += base::StringPrintf("%" PRIi64 ":%" PRIu64 ",", start, this_length); 1373 length += this_length; 1374 } 1375 TEST_AND_RETURN_FALSE(length == full_length); 1376 if (!ret.empty()) 1377 ret.resize(ret.size() - 1); // Strip trailing comma off 1378 *positions_string = ret; 1379 return true; 1380 } 1381 1382 bool DeltaPerformer::PerformBsdiffOperation(const InstallOperation& operation) { 1383 // Since we delete data off the beginning of the buffer as we use it, 1384 // the data we need should be exactly at the beginning of the buffer. 1385 TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset()); 1386 TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length()); 1387 1388 string input_positions; 1389 TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.src_extents(), 1390 block_size_, 1391 operation.src_length(), 1392 &input_positions)); 1393 string output_positions; 1394 TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.dst_extents(), 1395 block_size_, 1396 operation.dst_length(), 1397 &output_positions)); 1398 1399 TEST_AND_RETURN_FALSE(bsdiff::bspatch(target_path_.c_str(), 1400 target_path_.c_str(), 1401 buffer_.data(), 1402 buffer_.size(), 1403 input_positions.c_str(), 1404 output_positions.c_str()) == 0); 1405 DiscardBuffer(true, buffer_.size()); 1406 1407 if (operation.dst_length() % block_size_) { 1408 // Zero out rest of final block. 1409 // TODO(adlr): build this into bspatch; it's more efficient that way. 1410 const Extent& last_extent = 1411 operation.dst_extents(operation.dst_extents_size() - 1); 1412 const uint64_t end_byte = 1413 (last_extent.start_block() + last_extent.num_blocks()) * block_size_; 1414 const uint64_t begin_byte = 1415 end_byte - (block_size_ - operation.dst_length() % block_size_); 1416 brillo::Blob zeros(end_byte - begin_byte); 1417 TEST_AND_RETURN_FALSE(utils::PWriteAll( 1418 target_fd_, zeros.data(), end_byte - begin_byte, begin_byte)); 1419 } 1420 return true; 1421 } 1422 1423 namespace { 1424 1425 class BsdiffExtentFile : public bsdiff::FileInterface { 1426 public: 1427 BsdiffExtentFile(std::unique_ptr<ExtentReader> reader, size_t size) 1428 : BsdiffExtentFile(std::move(reader), nullptr, size) {} 1429 BsdiffExtentFile(std::unique_ptr<ExtentWriter> writer, size_t size) 1430 : BsdiffExtentFile(nullptr, std::move(writer), size) {} 1431 1432 ~BsdiffExtentFile() override = default; 1433 1434 bool Read(void* buf, size_t count, size_t* bytes_read) override { 1435 TEST_AND_RETURN_FALSE(reader_->Read(buf, count)); 1436 *bytes_read = count; 1437 offset_ += count; 1438 return true; 1439 } 1440 1441 bool Write(const void* buf, size_t count, size_t* bytes_written) override { 1442 TEST_AND_RETURN_FALSE(writer_->Write(buf, count)); 1443 *bytes_written = count; 1444 offset_ += count; 1445 return true; 1446 } 1447 1448 bool Seek(off_t pos) override { 1449 if (reader_ != nullptr) { 1450 TEST_AND_RETURN_FALSE(reader_->Seek(pos)); 1451 offset_ = pos; 1452 } else { 1453 // For writes technically there should be no change of position, or it 1454 // should be equivalent of current offset. 1455 TEST_AND_RETURN_FALSE(offset_ == static_cast<uint64_t>(pos)); 1456 } 1457 return true; 1458 } 1459 1460 bool Close() override { return true; } 1461 1462 bool GetSize(uint64_t* size) override { 1463 *size = size_; 1464 return true; 1465 } 1466 1467 private: 1468 BsdiffExtentFile(std::unique_ptr<ExtentReader> reader, 1469 std::unique_ptr<ExtentWriter> writer, 1470 size_t size) 1471 : reader_(std::move(reader)), 1472 writer_(std::move(writer)), 1473 size_(size), 1474 offset_(0) {} 1475 1476 std::unique_ptr<ExtentReader> reader_; 1477 std::unique_ptr<ExtentWriter> writer_; 1478 uint64_t size_; 1479 uint64_t offset_; 1480 1481 DISALLOW_COPY_AND_ASSIGN(BsdiffExtentFile); 1482 }; 1483 1484 } // namespace 1485 1486 bool DeltaPerformer::PerformSourceBsdiffOperation( 1487 const InstallOperation& operation, ErrorCode* error) { 1488 // Since we delete data off the beginning of the buffer as we use it, 1489 // the data we need should be exactly at the beginning of the buffer. 1490 TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset()); 1491 TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length()); 1492 if (operation.has_src_length()) 1493 TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0); 1494 if (operation.has_dst_length()) 1495 TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0); 1496 1497 FileDescriptorPtr source_fd = ChooseSourceFD(operation, error); 1498 TEST_AND_RETURN_FALSE(source_fd != nullptr); 1499 1500 auto reader = std::make_unique<DirectExtentReader>(); 1501 TEST_AND_RETURN_FALSE( 1502 reader->Init(source_fd, operation.src_extents(), block_size_)); 1503 auto src_file = std::make_unique<BsdiffExtentFile>( 1504 std::move(reader), 1505 utils::BlocksInExtents(operation.src_extents()) * block_size_); 1506 1507 auto writer = std::make_unique<DirectExtentWriter>(); 1508 TEST_AND_RETURN_FALSE( 1509 writer->Init(target_fd_, operation.dst_extents(), block_size_)); 1510 auto dst_file = std::make_unique<BsdiffExtentFile>( 1511 std::move(writer), 1512 utils::BlocksInExtents(operation.dst_extents()) * block_size_); 1513 1514 TEST_AND_RETURN_FALSE(bsdiff::bspatch(std::move(src_file), 1515 std::move(dst_file), 1516 buffer_.data(), 1517 buffer_.size()) == 0); 1518 DiscardBuffer(true, buffer_.size()); 1519 return true; 1520 } 1521 1522 namespace { 1523 1524 // A class to be passed to |puffpatch| for reading from |source_fd_| and writing 1525 // into |target_fd_|. 1526 class PuffinExtentStream : public puffin::StreamInterface { 1527 public: 1528 // Constructor for creating a stream for reading from an |ExtentReader|. 1529 PuffinExtentStream(std::unique_ptr<ExtentReader> reader, uint64_t size) 1530 : PuffinExtentStream(std::move(reader), nullptr, size) {} 1531 1532 // Constructor for creating a stream for writing to an |ExtentWriter|. 1533 PuffinExtentStream(std::unique_ptr<ExtentWriter> writer, uint64_t size) 1534 : PuffinExtentStream(nullptr, std::move(writer), size) {} 1535 1536 ~PuffinExtentStream() override = default; 1537 1538 bool GetSize(uint64_t* size) const override { 1539 *size = size_; 1540 return true; 1541 } 1542 1543 bool GetOffset(uint64_t* offset) const override { 1544 *offset = offset_; 1545 return true; 1546 } 1547 1548 bool Seek(uint64_t offset) override { 1549 if (is_read_) { 1550 TEST_AND_RETURN_FALSE(reader_->Seek(offset)); 1551 offset_ = offset; 1552 } else { 1553 // For writes technically there should be no change of position, or it 1554 // should equivalent of current offset. 1555 TEST_AND_RETURN_FALSE(offset_ == offset); 1556 } 1557 return true; 1558 } 1559 1560 bool Read(void* buffer, size_t count) override { 1561 TEST_AND_RETURN_FALSE(is_read_); 1562 TEST_AND_RETURN_FALSE(reader_->Read(buffer, count)); 1563 offset_ += count; 1564 return true; 1565 } 1566 1567 bool Write(const void* buffer, size_t count) override { 1568 TEST_AND_RETURN_FALSE(!is_read_); 1569 TEST_AND_RETURN_FALSE(writer_->Write(buffer, count)); 1570 offset_ += count; 1571 return true; 1572 } 1573 1574 bool Close() override { return true; } 1575 1576 private: 1577 PuffinExtentStream(std::unique_ptr<ExtentReader> reader, 1578 std::unique_ptr<ExtentWriter> writer, 1579 uint64_t size) 1580 : reader_(std::move(reader)), 1581 writer_(std::move(writer)), 1582 size_(size), 1583 offset_(0), 1584 is_read_(reader_ ? true : false) {} 1585 1586 std::unique_ptr<ExtentReader> reader_; 1587 std::unique_ptr<ExtentWriter> writer_; 1588 uint64_t size_; 1589 uint64_t offset_; 1590 bool is_read_; 1591 1592 DISALLOW_COPY_AND_ASSIGN(PuffinExtentStream); 1593 }; 1594 1595 } // namespace 1596 1597 bool DeltaPerformer::PerformPuffDiffOperation(const InstallOperation& operation, 1598 ErrorCode* error) { 1599 // Since we delete data off the beginning of the buffer as we use it, 1600 // the data we need should be exactly at the beginning of the buffer. 1601 TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset()); 1602 TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length()); 1603 1604 FileDescriptorPtr source_fd = ChooseSourceFD(operation, error); 1605 TEST_AND_RETURN_FALSE(source_fd != nullptr); 1606 1607 auto reader = std::make_unique<DirectExtentReader>(); 1608 TEST_AND_RETURN_FALSE( 1609 reader->Init(source_fd, operation.src_extents(), block_size_)); 1610 puffin::UniqueStreamPtr src_stream(new PuffinExtentStream( 1611 std::move(reader), 1612 utils::BlocksInExtents(operation.src_extents()) * block_size_)); 1613 1614 auto writer = std::make_unique<DirectExtentWriter>(); 1615 TEST_AND_RETURN_FALSE( 1616 writer->Init(target_fd_, operation.dst_extents(), block_size_)); 1617 puffin::UniqueStreamPtr dst_stream(new PuffinExtentStream( 1618 std::move(writer), 1619 utils::BlocksInExtents(operation.dst_extents()) * block_size_)); 1620 1621 const size_t kMaxCacheSize = 5 * 1024 * 1024; // Total 5MB cache. 1622 TEST_AND_RETURN_FALSE(puffin::PuffPatch(std::move(src_stream), 1623 std::move(dst_stream), 1624 buffer_.data(), 1625 buffer_.size(), 1626 kMaxCacheSize)); 1627 DiscardBuffer(true, buffer_.size()); 1628 return true; 1629 } 1630 1631 bool DeltaPerformer::ExtractSignatureMessageFromOperation( 1632 const InstallOperation& operation) { 1633 if (operation.type() != InstallOperation::REPLACE || 1634 !manifest_.has_signatures_offset() || 1635 manifest_.signatures_offset() != operation.data_offset()) { 1636 return false; 1637 } 1638 TEST_AND_RETURN_FALSE(manifest_.has_signatures_size() && 1639 manifest_.signatures_size() == operation.data_length()); 1640 TEST_AND_RETURN_FALSE(ExtractSignatureMessage()); 1641 return true; 1642 } 1643 1644 bool DeltaPerformer::ExtractSignatureMessage() { 1645 TEST_AND_RETURN_FALSE(signatures_message_data_.empty()); 1646 TEST_AND_RETURN_FALSE(buffer_offset_ == manifest_.signatures_offset()); 1647 TEST_AND_RETURN_FALSE(buffer_.size() >= manifest_.signatures_size()); 1648 signatures_message_data_.assign( 1649 buffer_.begin(), buffer_.begin() + manifest_.signatures_size()); 1650 1651 // Save the signature blob because if the update is interrupted after the 1652 // download phase we don't go through this path anymore. Some alternatives to 1653 // consider: 1654 // 1655 // 1. On resume, re-download the signature blob from the server and re-verify 1656 // it. 1657 // 1658 // 2. Verify the signature as soon as it's received and don't checkpoint the 1659 // blob and the signed sha-256 context. 1660 LOG_IF(WARNING, 1661 !prefs_->SetString(kPrefsUpdateStateSignatureBlob, 1662 signatures_message_data_)) 1663 << "Unable to store the signature blob."; 1664 1665 LOG(INFO) << "Extracted signature data of size " 1666 << manifest_.signatures_size() << " at " 1667 << manifest_.signatures_offset(); 1668 return true; 1669 } 1670 1671 bool DeltaPerformer::GetPublicKey(string* out_public_key) { 1672 out_public_key->clear(); 1673 1674 if (utils::FileExists(public_key_path_.c_str())) { 1675 LOG(INFO) << "Verifying using public key: " << public_key_path_; 1676 return utils::ReadFile(public_key_path_, out_public_key); 1677 } 1678 1679 // If this is an official build then we are not allowed to use public key from 1680 // Omaha response. 1681 if (!hardware_->IsOfficialBuild() && !install_plan_->public_key_rsa.empty()) { 1682 LOG(INFO) << "Verifying using public key from Omaha response."; 1683 return brillo::data_encoding::Base64Decode(install_plan_->public_key_rsa, 1684 out_public_key); 1685 } 1686 LOG(INFO) << "No public keys found for verification."; 1687 return true; 1688 } 1689 1690 std::pair<std::unique_ptr<PayloadVerifier>, bool> 1691 DeltaPerformer::CreatePayloadVerifier() { 1692 if (utils::FileExists(update_certificates_path_.c_str())) { 1693 LOG(INFO) << "Verifying using certificates: " << update_certificates_path_; 1694 return { 1695 PayloadVerifier::CreateInstanceFromZipPath(update_certificates_path_), 1696 true}; 1697 } 1698 1699 string public_key; 1700 if (!GetPublicKey(&public_key)) { 1701 LOG(ERROR) << "Failed to read public key"; 1702 return {nullptr, true}; 1703 } 1704 1705 // Skips the verification if the public key is empty. 1706 if (public_key.empty()) { 1707 return {nullptr, false}; 1708 } 1709 return {PayloadVerifier::CreateInstance(public_key), true}; 1710 } 1711 1712 ErrorCode DeltaPerformer::ValidateManifest() { 1713 // Perform assorted checks to sanity check the manifest, make sure it 1714 // matches data from other sources, and that it is a supported version. 1715 1716 bool has_old_fields = 1717 (manifest_.has_old_kernel_info() || manifest_.has_old_rootfs_info()); 1718 for (const PartitionUpdate& partition : manifest_.partitions()) { 1719 has_old_fields = has_old_fields || partition.has_old_partition_info(); 1720 } 1721 1722 // The presence of an old partition hash is the sole indicator for a delta 1723 // update. 1724 InstallPayloadType actual_payload_type = 1725 has_old_fields ? InstallPayloadType::kDelta : InstallPayloadType::kFull; 1726 1727 if (payload_->type == InstallPayloadType::kUnknown) { 1728 LOG(INFO) << "Detected a '" 1729 << InstallPayloadTypeToString(actual_payload_type) 1730 << "' payload."; 1731 payload_->type = actual_payload_type; 1732 } else if (payload_->type != actual_payload_type) { 1733 LOG(ERROR) << "InstallPlan expected a '" 1734 << InstallPayloadTypeToString(payload_->type) 1735 << "' payload but the downloaded manifest contains a '" 1736 << InstallPayloadTypeToString(actual_payload_type) 1737 << "' payload."; 1738 return ErrorCode::kPayloadMismatchedType; 1739 } 1740 1741 // Check that the minor version is compatible. 1742 if (actual_payload_type == InstallPayloadType::kFull) { 1743 if (manifest_.minor_version() != kFullPayloadMinorVersion) { 1744 LOG(ERROR) << "Manifest contains minor version " 1745 << manifest_.minor_version() 1746 << ", but all full payloads should have version " 1747 << kFullPayloadMinorVersion << "."; 1748 return ErrorCode::kUnsupportedMinorPayloadVersion; 1749 } 1750 } else { 1751 if (manifest_.minor_version() < kMinSupportedMinorPayloadVersion || 1752 manifest_.minor_version() > kMaxSupportedMinorPayloadVersion) { 1753 LOG(ERROR) << "Manifest contains minor version " 1754 << manifest_.minor_version() 1755 << " not in the range of supported minor versions [" 1756 << kMinSupportedMinorPayloadVersion << ", " 1757 << kMaxSupportedMinorPayloadVersion << "]."; 1758 return ErrorCode::kUnsupportedMinorPayloadVersion; 1759 } 1760 } 1761 1762 if (major_payload_version_ != kChromeOSMajorPayloadVersion) { 1763 if (manifest_.has_old_rootfs_info() || manifest_.has_new_rootfs_info() || 1764 manifest_.has_old_kernel_info() || manifest_.has_new_kernel_info() || 1765 manifest_.install_operations_size() != 0 || 1766 manifest_.kernel_install_operations_size() != 0) { 1767 LOG(ERROR) << "Manifest contains deprecated field only supported in " 1768 << "major payload version 1, but the payload major version is " 1769 << major_payload_version_; 1770 return ErrorCode::kPayloadMismatchedType; 1771 } 1772 } 1773 1774 if (manifest_.max_timestamp() < hardware_->GetBuildTimestamp()) { 1775 LOG(ERROR) << "The current OS build timestamp (" 1776 << hardware_->GetBuildTimestamp() 1777 << ") is newer than the maximum timestamp in the manifest (" 1778 << manifest_.max_timestamp() << ")"; 1779 if (!hardware_->AllowDowngrade()) { 1780 return ErrorCode::kPayloadTimestampError; 1781 } 1782 LOG(INFO) << "The current OS build allows downgrade, continuing to apply" 1783 " the payload with an older timestamp."; 1784 } 1785 1786 if (major_payload_version_ == kChromeOSMajorPayloadVersion) { 1787 if (manifest_.has_dynamic_partition_metadata()) { 1788 LOG(ERROR) 1789 << "Should not contain dynamic_partition_metadata for major version " 1790 << kChromeOSMajorPayloadVersion 1791 << ". Please use major version 2 or above."; 1792 return ErrorCode::kPayloadMismatchedType; 1793 } 1794 } 1795 1796 // TODO(garnold) we should be adding more and more manifest checks, such as 1797 // partition boundaries etc (see chromium-os:37661). 1798 1799 return ErrorCode::kSuccess; 1800 } 1801 1802 ErrorCode DeltaPerformer::ValidateOperationHash( 1803 const InstallOperation& operation) { 1804 if (!operation.data_sha256_hash().size()) { 1805 if (!operation.data_length()) { 1806 // Operations that do not have any data blob won't have any operation hash 1807 // either. So, these operations are always considered validated since the 1808 // metadata that contains all the non-data-blob portions of the operation 1809 // has already been validated. This is true for both HTTP and HTTPS cases. 1810 return ErrorCode::kSuccess; 1811 } 1812 1813 // No hash is present for an operation that has data blobs. This shouldn't 1814 // happen normally for any client that has this code, because the 1815 // corresponding update should have been produced with the operation 1816 // hashes. So if it happens it means either we've turned operation hash 1817 // generation off in DeltaDiffGenerator or it's a regression of some sort. 1818 // One caveat though: The last operation is a dummy signature operation 1819 // that doesn't have a hash at the time the manifest is created. So we 1820 // should not complaint about that operation. This operation can be 1821 // recognized by the fact that it's offset is mentioned in the manifest. 1822 if (manifest_.signatures_offset() && 1823 manifest_.signatures_offset() == operation.data_offset()) { 1824 LOG(INFO) << "Skipping hash verification for signature operation " 1825 << next_operation_num_ + 1; 1826 } else { 1827 if (install_plan_->hash_checks_mandatory) { 1828 LOG(ERROR) << "Missing mandatory operation hash for operation " 1829 << next_operation_num_ + 1; 1830 return ErrorCode::kDownloadOperationHashMissingError; 1831 } 1832 1833 LOG(WARNING) << "Cannot validate operation " << next_operation_num_ + 1 1834 << " as there's no operation hash in manifest"; 1835 } 1836 return ErrorCode::kSuccess; 1837 } 1838 1839 brillo::Blob expected_op_hash; 1840 expected_op_hash.assign(operation.data_sha256_hash().data(), 1841 (operation.data_sha256_hash().data() + 1842 operation.data_sha256_hash().size())); 1843 1844 brillo::Blob calculated_op_hash; 1845 if (!HashCalculator::RawHashOfBytes( 1846 buffer_.data(), operation.data_length(), &calculated_op_hash)) { 1847 LOG(ERROR) << "Unable to compute actual hash of operation " 1848 << next_operation_num_; 1849 return ErrorCode::kDownloadOperationHashVerificationError; 1850 } 1851 1852 if (calculated_op_hash != expected_op_hash) { 1853 LOG(ERROR) << "Hash verification failed for operation " 1854 << next_operation_num_ << ". Expected hash = "; 1855 utils::HexDumpVector(expected_op_hash); 1856 LOG(ERROR) << "Calculated hash over " << operation.data_length() 1857 << " bytes at offset: " << operation.data_offset() << " = "; 1858 utils::HexDumpVector(calculated_op_hash); 1859 return ErrorCode::kDownloadOperationHashMismatch; 1860 } 1861 1862 return ErrorCode::kSuccess; 1863 } 1864 1865 #define TEST_AND_RETURN_VAL(_retval, _condition) \ 1866 do { \ 1867 if (!(_condition)) { \ 1868 LOG(ERROR) << "VerifyPayload failure: " << #_condition; \ 1869 return _retval; \ 1870 } \ 1871 } while (0); 1872 1873 ErrorCode DeltaPerformer::VerifyPayload( 1874 const brillo::Blob& update_check_response_hash, 1875 const uint64_t update_check_response_size) { 1876 // Verifies the download size. 1877 if (update_check_response_size != 1878 metadata_size_ + metadata_signature_size_ + buffer_offset_) { 1879 LOG(ERROR) << "update_check_response_size (" << update_check_response_size 1880 << ") doesn't match metadata_size (" << metadata_size_ 1881 << ") + metadata_signature_size (" << metadata_signature_size_ 1882 << ") + buffer_offset (" << buffer_offset_ << ")."; 1883 return ErrorCode::kPayloadSizeMismatchError; 1884 } 1885 1886 // Verifies the payload hash. 1887 TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadVerificationError, 1888 !payload_hash_calculator_.raw_hash().empty()); 1889 TEST_AND_RETURN_VAL( 1890 ErrorCode::kPayloadHashMismatchError, 1891 payload_hash_calculator_.raw_hash() == update_check_response_hash); 1892 1893 TEST_AND_RETURN_VAL(ErrorCode::kSignedDeltaPayloadExpectedError, 1894 !signatures_message_data_.empty()); 1895 brillo::Blob hash_data = signed_hash_calculator_.raw_hash(); 1896 TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError, 1897 hash_data.size() == kSHA256Size); 1898 1899 auto [payload_verifier, perform_verification] = CreatePayloadVerifier(); 1900 if (!perform_verification) { 1901 LOG(WARNING) << "Not verifying signed delta payload -- missing public key."; 1902 return ErrorCode::kSuccess; 1903 } 1904 if (!payload_verifier) { 1905 LOG(ERROR) << "Failed to create the payload verifier."; 1906 return ErrorCode::kDownloadPayloadPubKeyVerificationError; 1907 } 1908 if (!payload_verifier->VerifySignature(signatures_message_data_, hash_data)) { 1909 // The autoupdate_CatchBadSignatures test checks for this string 1910 // in log-files. Keep in sync. 1911 LOG(ERROR) << "Public key verification failed, thus update failed."; 1912 return ErrorCode::kDownloadPayloadPubKeyVerificationError; 1913 } 1914 1915 LOG(INFO) << "Payload hash matches value in payload."; 1916 return ErrorCode::kSuccess; 1917 } 1918 1919 void DeltaPerformer::DiscardBuffer(bool do_advance_offset, 1920 size_t signed_hash_buffer_size) { 1921 // Update the buffer offset. 1922 if (do_advance_offset) 1923 buffer_offset_ += buffer_.size(); 1924 1925 // Hash the content. 1926 payload_hash_calculator_.Update(buffer_.data(), buffer_.size()); 1927 signed_hash_calculator_.Update(buffer_.data(), signed_hash_buffer_size); 1928 1929 // Swap content with an empty vector to ensure that all memory is released. 1930 brillo::Blob().swap(buffer_); 1931 } 1932 1933 bool DeltaPerformer::CanResumeUpdate(PrefsInterface* prefs, 1934 const string& update_check_response_hash) { 1935 int64_t next_operation = kUpdateStateOperationInvalid; 1936 if (!(prefs->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) && 1937 next_operation != kUpdateStateOperationInvalid && next_operation > 0)) 1938 return false; 1939 1940 string interrupted_hash; 1941 if (!(prefs->GetString(kPrefsUpdateCheckResponseHash, &interrupted_hash) && 1942 !interrupted_hash.empty() && 1943 interrupted_hash == update_check_response_hash)) 1944 return false; 1945 1946 int64_t resumed_update_failures; 1947 // Note that storing this value is optional, but if it is there it should not 1948 // be more than the limit. 1949 if (prefs->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures) && 1950 resumed_update_failures > kMaxResumedUpdateFailures) 1951 return false; 1952 1953 // Sanity check the rest. 1954 int64_t next_data_offset = -1; 1955 if (!(prefs->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) && 1956 next_data_offset >= 0)) 1957 return false; 1958 1959 string sha256_context; 1960 if (!(prefs->GetString(kPrefsUpdateStateSHA256Context, &sha256_context) && 1961 !sha256_context.empty())) 1962 return false; 1963 1964 int64_t manifest_metadata_size = 0; 1965 if (!(prefs->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size) && 1966 manifest_metadata_size > 0)) 1967 return false; 1968 1969 int64_t manifest_signature_size = 0; 1970 if (!(prefs->GetInt64(kPrefsManifestSignatureSize, 1971 &manifest_signature_size) && 1972 manifest_signature_size >= 0)) 1973 return false; 1974 1975 return true; 1976 } 1977 1978 bool DeltaPerformer::ResetUpdateProgress( 1979 PrefsInterface* prefs, 1980 bool quick, 1981 bool skip_dynamic_partititon_metadata_updated) { 1982 TEST_AND_RETURN_FALSE(prefs->SetInt64(kPrefsUpdateStateNextOperation, 1983 kUpdateStateOperationInvalid)); 1984 if (!quick) { 1985 prefs->SetInt64(kPrefsUpdateStateNextDataOffset, -1); 1986 prefs->SetInt64(kPrefsUpdateStateNextDataLength, 0); 1987 prefs->SetString(kPrefsUpdateStateSHA256Context, ""); 1988 prefs->SetString(kPrefsUpdateStateSignedSHA256Context, ""); 1989 prefs->SetString(kPrefsUpdateStateSignatureBlob, ""); 1990 prefs->SetInt64(kPrefsManifestMetadataSize, -1); 1991 prefs->SetInt64(kPrefsManifestSignatureSize, -1); 1992 prefs->SetInt64(kPrefsResumedUpdateFailures, 0); 1993 prefs->Delete(kPrefsPostInstallSucceeded); 1994 prefs->Delete(kPrefsVerityWritten); 1995 1996 if (!skip_dynamic_partititon_metadata_updated) { 1997 LOG(INFO) << "Resetting recorded hash for prepared partitions."; 1998 prefs->Delete(kPrefsDynamicPartitionMetadataUpdated); 1999 } 2000 } 2001 return true; 2002 } 2003 2004 bool DeltaPerformer::CheckpointUpdateProgress(bool force) { 2005 base::TimeTicks curr_time = base::TimeTicks::Now(); 2006 if (force || curr_time > update_checkpoint_time_) { 2007 update_checkpoint_time_ = curr_time + update_checkpoint_wait_; 2008 } else { 2009 return false; 2010 } 2011 2012 Terminator::set_exit_blocked(true); 2013 if (last_updated_buffer_offset_ != buffer_offset_) { 2014 // Resets the progress in case we die in the middle of the state update. 2015 ResetUpdateProgress(prefs_, true); 2016 TEST_AND_RETURN_FALSE(prefs_->SetString( 2017 kPrefsUpdateStateSHA256Context, payload_hash_calculator_.GetContext())); 2018 TEST_AND_RETURN_FALSE( 2019 prefs_->SetString(kPrefsUpdateStateSignedSHA256Context, 2020 signed_hash_calculator_.GetContext())); 2021 TEST_AND_RETURN_FALSE( 2022 prefs_->SetInt64(kPrefsUpdateStateNextDataOffset, buffer_offset_)); 2023 last_updated_buffer_offset_ = buffer_offset_; 2024 2025 if (next_operation_num_ < num_total_operations_) { 2026 size_t partition_index = current_partition_; 2027 while (next_operation_num_ >= acc_num_operations_[partition_index]) 2028 partition_index++; 2029 const size_t partition_operation_num = 2030 next_operation_num_ - 2031 (partition_index ? acc_num_operations_[partition_index - 1] : 0); 2032 const InstallOperation& op = 2033 partitions_[partition_index].operations(partition_operation_num); 2034 TEST_AND_RETURN_FALSE( 2035 prefs_->SetInt64(kPrefsUpdateStateNextDataLength, op.data_length())); 2036 } else { 2037 TEST_AND_RETURN_FALSE( 2038 prefs_->SetInt64(kPrefsUpdateStateNextDataLength, 0)); 2039 } 2040 } 2041 TEST_AND_RETURN_FALSE( 2042 prefs_->SetInt64(kPrefsUpdateStateNextOperation, next_operation_num_)); 2043 return true; 2044 } 2045 2046 bool DeltaPerformer::PrimeUpdateState() { 2047 CHECK(manifest_valid_); 2048 2049 int64_t next_operation = kUpdateStateOperationInvalid; 2050 if (!prefs_->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) || 2051 next_operation == kUpdateStateOperationInvalid || next_operation <= 0) { 2052 // Initiating a new update, no more state needs to be initialized. 2053 return true; 2054 } 2055 next_operation_num_ = next_operation; 2056 2057 // Resuming an update -- load the rest of the update state. 2058 int64_t next_data_offset = -1; 2059 TEST_AND_RETURN_FALSE( 2060 prefs_->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) && 2061 next_data_offset >= 0); 2062 buffer_offset_ = next_data_offset; 2063 2064 // The signed hash context and the signature blob may be empty if the 2065 // interrupted update didn't reach the signature. 2066 string signed_hash_context; 2067 if (prefs_->GetString(kPrefsUpdateStateSignedSHA256Context, 2068 &signed_hash_context)) { 2069 TEST_AND_RETURN_FALSE( 2070 signed_hash_calculator_.SetContext(signed_hash_context)); 2071 } 2072 2073 prefs_->GetString(kPrefsUpdateStateSignatureBlob, &signatures_message_data_); 2074 2075 string hash_context; 2076 TEST_AND_RETURN_FALSE( 2077 prefs_->GetString(kPrefsUpdateStateSHA256Context, &hash_context) && 2078 payload_hash_calculator_.SetContext(hash_context)); 2079 2080 int64_t manifest_metadata_size = 0; 2081 TEST_AND_RETURN_FALSE( 2082 prefs_->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size) && 2083 manifest_metadata_size > 0); 2084 metadata_size_ = manifest_metadata_size; 2085 2086 int64_t manifest_signature_size = 0; 2087 TEST_AND_RETURN_FALSE( 2088 prefs_->GetInt64(kPrefsManifestSignatureSize, &manifest_signature_size) && 2089 manifest_signature_size >= 0); 2090 metadata_signature_size_ = manifest_signature_size; 2091 2092 // Advance the download progress to reflect what doesn't need to be 2093 // re-downloaded. 2094 total_bytes_received_ += buffer_offset_; 2095 2096 // Speculatively count the resume as a failure. 2097 int64_t resumed_update_failures; 2098 if (prefs_->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures)) { 2099 resumed_update_failures++; 2100 } else { 2101 resumed_update_failures = 1; 2102 } 2103 prefs_->SetInt64(kPrefsResumedUpdateFailures, resumed_update_failures); 2104 return true; 2105 } 2106 2107 } // namespace chromeos_update_engine 2108