1 // 2 // Copyright (C) 2015 The Android Open Source Project 3 // 4 // Licensed under the Apache License, Version 2.0 (the "License"); 5 // you may not use this file except in compliance with the License. 6 // You may obtain a copy of the License at 7 // 8 // http://www.apache.org/licenses/LICENSE-2.0 9 // 10 // Unless required by applicable law or agreed to in writing, software 11 // distributed under the License is distributed on an "AS IS" BASIS, 12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 // See the License for the specific language governing permissions and 14 // limitations under the License. 15 // 16 17 #include "update_engine/payload_generator/ab_generator.h" 18 19 #include <algorithm> 20 #include <utility> 21 22 #include <base/strings/stringprintf.h> 23 24 #include "update_engine/common/hash_calculator.h" 25 #include "update_engine/common/utils.h" 26 #include "update_engine/payload_consumer/payload_constants.h" 27 #include "update_engine/payload_generator/annotated_operation.h" 28 #include "update_engine/payload_generator/bzip.h" 29 #include "update_engine/payload_generator/delta_diff_generator.h" 30 #include "update_engine/payload_generator/delta_diff_utils.h" 31 32 using chromeos_update_engine::diff_utils::IsAReplaceOperation; 33 using std::string; 34 using std::vector; 35 36 namespace chromeos_update_engine { 37 38 bool ABGenerator::GenerateOperations(const PayloadGenerationConfig& config, 39 const PartitionConfig& old_part, 40 const PartitionConfig& new_part, 41 BlobFileWriter* blob_file, 42 vector<AnnotatedOperation>* aops) { 43 TEST_AND_RETURN_FALSE(old_part.name == new_part.name); 44 45 ssize_t hard_chunk_blocks = 46 (config.hard_chunk_size == -1 47 ? -1 48 : config.hard_chunk_size / config.block_size); 49 size_t soft_chunk_blocks = config.soft_chunk_size / config.block_size; 50 51 aops->clear(); 52 TEST_AND_RETURN_FALSE(diff_utils::DeltaReadPartition(aops, 53 old_part, 54 new_part, 55 hard_chunk_blocks, 56 soft_chunk_blocks, 57 config.version, 58 blob_file)); 59 LOG(INFO) << "done reading " << new_part.name; 60 61 SortOperationsByDestination(aops); 62 63 // Use the soft_chunk_size when merging operations to prevent merging all 64 // the operations into a huge one if there's no hard limit. 65 size_t merge_chunk_blocks = soft_chunk_blocks; 66 if (hard_chunk_blocks != -1 && 67 static_cast<size_t>(hard_chunk_blocks) < soft_chunk_blocks) { 68 merge_chunk_blocks = hard_chunk_blocks; 69 } 70 71 LOG(INFO) << "Merging " << aops->size() << " operations."; 72 TEST_AND_RETURN_FALSE(MergeOperations( 73 aops, config.version, merge_chunk_blocks, new_part.path, blob_file)); 74 LOG(INFO) << aops->size() << " operations after merge."; 75 76 if (config.version.minor >= kOpSrcHashMinorPayloadVersion) 77 TEST_AND_RETURN_FALSE(AddSourceHash(aops, old_part.path)); 78 79 return true; 80 } 81 82 void ABGenerator::SortOperationsByDestination( 83 vector<AnnotatedOperation>* aops) { 84 sort(aops->begin(), aops->end(), diff_utils::CompareAopsByDestination); 85 } 86 87 bool ABGenerator::FragmentOperations(const PayloadVersion& version, 88 vector<AnnotatedOperation>* aops, 89 const string& target_part_path, 90 BlobFileWriter* blob_file) { 91 vector<AnnotatedOperation> fragmented_aops; 92 for (const AnnotatedOperation& aop : *aops) { 93 // Only do split if the operation has more than one dst extents. 94 if (aop.op.dst_extents_size() > 1) { 95 if (aop.op.type() == InstallOperation::SOURCE_COPY) { 96 TEST_AND_RETURN_FALSE(SplitSourceCopy(aop, &fragmented_aops)); 97 continue; 98 } 99 if (IsAReplaceOperation(aop.op.type())) { 100 TEST_AND_RETURN_FALSE(SplitAReplaceOp( 101 version, aop, target_part_path, &fragmented_aops, blob_file)); 102 continue; 103 } 104 } 105 fragmented_aops.push_back(aop); 106 } 107 *aops = std::move(fragmented_aops); 108 return true; 109 } 110 111 bool ABGenerator::SplitSourceCopy(const AnnotatedOperation& original_aop, 112 vector<AnnotatedOperation>* result_aops) { 113 InstallOperation original_op = original_aop.op; 114 TEST_AND_RETURN_FALSE(original_op.type() == InstallOperation::SOURCE_COPY); 115 // Keeps track of the index of curr_src_ext. 116 int curr_src_ext_index = 0; 117 Extent curr_src_ext = original_op.src_extents(curr_src_ext_index); 118 for (int i = 0; i < original_op.dst_extents_size(); i++) { 119 const Extent& dst_ext = original_op.dst_extents(i); 120 // The new operation which will have only one dst extent. 121 InstallOperation new_op; 122 uint64_t blocks_left = dst_ext.num_blocks(); 123 while (blocks_left > 0) { 124 if (curr_src_ext.num_blocks() <= blocks_left) { 125 // If the curr_src_ext is smaller than dst_ext, add it. 126 blocks_left -= curr_src_ext.num_blocks(); 127 *(new_op.add_src_extents()) = curr_src_ext; 128 if (curr_src_ext_index + 1 < original_op.src_extents().size()) { 129 curr_src_ext = original_op.src_extents(++curr_src_ext_index); 130 } else { 131 break; 132 } 133 } else { 134 // Split src_exts that are bigger than the dst_ext we're dealing with. 135 Extent first_ext; 136 first_ext.set_num_blocks(blocks_left); 137 first_ext.set_start_block(curr_src_ext.start_block()); 138 *(new_op.add_src_extents()) = first_ext; 139 // Keep the second half of the split op. 140 curr_src_ext.set_num_blocks(curr_src_ext.num_blocks() - blocks_left); 141 curr_src_ext.set_start_block(curr_src_ext.start_block() + blocks_left); 142 blocks_left -= first_ext.num_blocks(); 143 } 144 } 145 // Fix up our new operation and add it to the results. 146 new_op.set_type(InstallOperation::SOURCE_COPY); 147 *(new_op.add_dst_extents()) = dst_ext; 148 149 AnnotatedOperation new_aop; 150 new_aop.op = new_op; 151 new_aop.name = base::StringPrintf("%s:%d", original_aop.name.c_str(), i); 152 result_aops->push_back(new_aop); 153 } 154 if (curr_src_ext_index != original_op.src_extents().size() - 1) { 155 LOG(FATAL) << "Incorrectly split SOURCE_COPY operation. Did not use all " 156 << "source extents."; 157 } 158 return true; 159 } 160 161 bool ABGenerator::SplitAReplaceOp(const PayloadVersion& version, 162 const AnnotatedOperation& original_aop, 163 const string& target_part_path, 164 vector<AnnotatedOperation>* result_aops, 165 BlobFileWriter* blob_file) { 166 InstallOperation original_op = original_aop.op; 167 TEST_AND_RETURN_FALSE(IsAReplaceOperation(original_op.type())); 168 const bool is_replace = original_op.type() == InstallOperation::REPLACE; 169 170 uint64_t data_offset = original_op.data_offset(); 171 for (int i = 0; i < original_op.dst_extents_size(); i++) { 172 const Extent& dst_ext = original_op.dst_extents(i); 173 // Make a new operation with only one dst extent. 174 InstallOperation new_op; 175 *(new_op.add_dst_extents()) = dst_ext; 176 uint64_t data_size = dst_ext.num_blocks() * kBlockSize; 177 // If this is a REPLACE, attempt to reuse portions of the existing blob. 178 if (is_replace) { 179 new_op.set_type(InstallOperation::REPLACE); 180 new_op.set_data_length(data_size); 181 new_op.set_data_offset(data_offset); 182 data_offset += data_size; 183 } 184 185 AnnotatedOperation new_aop; 186 new_aop.op = new_op; 187 new_aop.name = base::StringPrintf("%s:%d", original_aop.name.c_str(), i); 188 TEST_AND_RETURN_FALSE( 189 AddDataAndSetType(&new_aop, version, target_part_path, blob_file)); 190 191 result_aops->push_back(new_aop); 192 } 193 return true; 194 } 195 196 bool ABGenerator::MergeOperations(vector<AnnotatedOperation>* aops, 197 const PayloadVersion& version, 198 size_t chunk_blocks, 199 const string& target_part_path, 200 BlobFileWriter* blob_file) { 201 vector<AnnotatedOperation> new_aops; 202 for (const AnnotatedOperation& curr_aop : *aops) { 203 if (new_aops.empty()) { 204 new_aops.push_back(curr_aop); 205 continue; 206 } 207 AnnotatedOperation& last_aop = new_aops.back(); 208 bool last_is_a_replace = IsAReplaceOperation(last_aop.op.type()); 209 210 if (last_aop.op.dst_extents_size() <= 0 || 211 curr_aop.op.dst_extents_size() <= 0) { 212 new_aops.push_back(curr_aop); 213 continue; 214 } 215 uint32_t last_dst_idx = last_aop.op.dst_extents_size() - 1; 216 uint32_t last_end_block = 217 last_aop.op.dst_extents(last_dst_idx).start_block() + 218 last_aop.op.dst_extents(last_dst_idx).num_blocks(); 219 uint32_t curr_start_block = curr_aop.op.dst_extents(0).start_block(); 220 uint32_t combined_block_count = 221 last_aop.op.dst_extents(last_dst_idx).num_blocks() + 222 curr_aop.op.dst_extents(0).num_blocks(); 223 bool is_a_replace = IsAReplaceOperation(curr_aop.op.type()); 224 225 bool is_delta_op = curr_aop.op.type() == InstallOperation::SOURCE_COPY; 226 if (((is_delta_op && (last_aop.op.type() == curr_aop.op.type())) || 227 (is_a_replace && last_is_a_replace)) && 228 last_end_block == curr_start_block && 229 combined_block_count <= chunk_blocks) { 230 // If the operations have the same type (which is a type that we can 231 // merge), are contiguous, are fragmented to have one destination extent, 232 // and their combined block count would be less than chunk size, merge 233 // them. 234 last_aop.name = base::StringPrintf( 235 "%s,%s", last_aop.name.c_str(), curr_aop.name.c_str()); 236 237 if (is_delta_op) { 238 ExtendExtents(last_aop.op.mutable_src_extents(), 239 curr_aop.op.src_extents()); 240 } 241 ExtendExtents(last_aop.op.mutable_dst_extents(), 242 curr_aop.op.dst_extents()); 243 // Set the data length to zero so we know to add the blob later. 244 if (is_a_replace) 245 last_aop.op.set_data_length(0); 246 } else { 247 // Otherwise just include the extent as is. 248 new_aops.push_back(curr_aop); 249 } 250 } 251 252 // Set the blobs for REPLACE/REPLACE_BZ/REPLACE_XZ operations that have been 253 // merged. 254 for (AnnotatedOperation& curr_aop : new_aops) { 255 if (curr_aop.op.data_length() == 0 && 256 IsAReplaceOperation(curr_aop.op.type())) { 257 TEST_AND_RETURN_FALSE( 258 AddDataAndSetType(&curr_aop, version, target_part_path, blob_file)); 259 } 260 } 261 262 *aops = new_aops; 263 return true; 264 } 265 266 bool ABGenerator::AddDataAndSetType(AnnotatedOperation* aop, 267 const PayloadVersion& version, 268 const string& target_part_path, 269 BlobFileWriter* blob_file) { 270 TEST_AND_RETURN_FALSE(IsAReplaceOperation(aop->op.type())); 271 272 vector<Extent> dst_extents; 273 ExtentsToVector(aop->op.dst_extents(), &dst_extents); 274 brillo::Blob data(utils::BlocksInExtents(dst_extents) * kBlockSize); 275 TEST_AND_RETURN_FALSE(utils::ReadExtents( 276 target_part_path, dst_extents, &data, data.size(), kBlockSize)); 277 278 brillo::Blob blob; 279 InstallOperation::Type op_type; 280 TEST_AND_RETURN_FALSE( 281 diff_utils::GenerateBestFullOperation(data, version, &blob, &op_type)); 282 283 // If the operation doesn't point to a data blob or points to a data blob of 284 // a different type then we add it. 285 if (aop->op.type() != op_type || aop->op.data_length() != blob.size()) { 286 aop->op.set_type(op_type); 287 aop->SetOperationBlob(blob, blob_file); 288 } 289 290 return true; 291 } 292 293 bool ABGenerator::AddSourceHash(vector<AnnotatedOperation>* aops, 294 const string& source_part_path) { 295 for (AnnotatedOperation& aop : *aops) { 296 if (aop.op.src_extents_size() == 0) 297 continue; 298 299 vector<Extent> src_extents; 300 ExtentsToVector(aop.op.src_extents(), &src_extents); 301 brillo::Blob src_data, src_hash; 302 uint64_t src_length = 303 aop.op.has_src_length() 304 ? aop.op.src_length() 305 : utils::BlocksInExtents(aop.op.src_extents()) * kBlockSize; 306 TEST_AND_RETURN_FALSE(utils::ReadExtents( 307 source_part_path, src_extents, &src_data, src_length, kBlockSize)); 308 TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfData(src_data, &src_hash)); 309 aop.op.set_src_sha256_hash(src_hash.data(), src_hash.size()); 310 } 311 return true; 312 } 313 314 } // namespace chromeos_update_engine 315