Home | History | Annotate | Download | only in payload_generator
      1 //
      2 // Copyright (C) 2015 The Android Open Source Project
      3 //
      4 // Licensed under the Apache License, Version 2.0 (the "License");
      5 // you may not use this file except in compliance with the License.
      6 // You may obtain a copy of the License at
      7 //
      8 //      http://www.apache.org/licenses/LICENSE-2.0
      9 //
     10 // Unless required by applicable law or agreed to in writing, software
     11 // distributed under the License is distributed on an "AS IS" BASIS,
     12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13 // See the License for the specific language governing permissions and
     14 // limitations under the License.
     15 //
     16 
     17 #include "update_engine/payload_generator/inplace_generator.h"
     18 
     19 #include <algorithm>
     20 #include <map>
     21 #include <set>
     22 #include <string>
     23 #include <utility>
     24 #include <vector>
     25 
     26 #include "update_engine/common/utils.h"
     27 #include "update_engine/payload_consumer/payload_constants.h"
     28 #include "update_engine/payload_generator/cycle_breaker.h"
     29 #include "update_engine/payload_generator/delta_diff_generator.h"
     30 #include "update_engine/payload_generator/delta_diff_utils.h"
     31 #include "update_engine/payload_generator/extent_ranges.h"
     32 #include "update_engine/payload_generator/graph_types.h"
     33 #include "update_engine/payload_generator/graph_utils.h"
     34 #include "update_engine/payload_generator/topological_sort.h"
     35 #include "update_engine/update_metadata.pb.h"
     36 
     37 using std::make_pair;
     38 using std::map;
     39 using std::pair;
     40 using std::set;
     41 using std::string;
     42 using std::vector;
     43 
     44 namespace chromeos_update_engine {
     45 
     46 using Block = InplaceGenerator::Block;
     47 
     48 namespace {
     49 
     50 // The only PayloadVersion supported by this implementation.
     51 const PayloadVersion kInPlacePayloadVersion{kChromeOSMajorPayloadVersion,
     52                                             kInPlaceMinorPayloadVersion};
     53 
     54 // This class allocates non-existent temp blocks, starting from
     55 // kTempBlockStart. Other code is responsible for converting these
     56 // temp blocks into real blocks, as the client can't read or write to
     57 // these blocks.
     58 class DummyExtentAllocator {
     59  public:
     60   vector<Extent> Allocate(const uint64_t block_count) {
     61     vector<Extent> ret(1);
     62     ret[0].set_start_block(next_block_);
     63     ret[0].set_num_blocks(block_count);
     64     next_block_ += block_count;
     65     return ret;
     66   }
     67 
     68  private:
     69   uint64_t next_block_{kTempBlockStart};
     70 };
     71 
     72 // Takes a vector of blocks and returns an equivalent vector of Extent
     73 // objects.
     74 vector<Extent> CompressExtents(const vector<uint64_t>& blocks) {
     75   vector<Extent> new_extents;
     76   for (uint64_t block : blocks) {
     77     AppendBlockToExtents(&new_extents, block);
     78   }
     79   return new_extents;
     80 }
     81 
     82 // Helper class to compare two operations by start block of the first Extent in
     83 // their destination extents given the index of the operations in the graph.
     84 class IndexedInstallOperationsDstComparator {
     85  public:
     86   explicit IndexedInstallOperationsDstComparator(Graph* graph)
     87     : graph_(graph) {}
     88 
     89   // Compares the operations in the vertex a and b of graph_.
     90   bool operator()(size_t a, size_t b) const {
     91     return diff_utils::CompareAopsByDestination((*graph_)[a].aop,
     92                                                 (*graph_)[b].aop);
     93   }
     94 
     95  private:
     96   const Graph* const graph_;
     97 };
     98 
     99 }  // namespace
    100 
    101 void InplaceGenerator::CheckGraph(const Graph& graph) {
    102   for (const Vertex& v : graph) {
    103     CHECK(v.aop.op.has_type());
    104   }
    105 }
    106 
    107 void InplaceGenerator::SubstituteBlocks(
    108     Vertex* vertex,
    109     const vector<Extent>& remove_extents,
    110     const vector<Extent>& replace_extents) {
    111   // First, expand out the blocks that op reads from
    112   vector<uint64_t> read_blocks =
    113       ExpandExtents(vertex->aop.op.src_extents());
    114   {
    115     // Expand remove_extents and replace_extents
    116     vector<uint64_t> remove_extents_expanded = ExpandExtents(remove_extents);
    117     vector<uint64_t> replace_extents_expanded = ExpandExtents(replace_extents);
    118     CHECK_EQ(remove_extents_expanded.size(), replace_extents_expanded.size());
    119     map<uint64_t, uint64_t> conversion;
    120     for (vector<uint64_t>::size_type i = 0;
    121          i < replace_extents_expanded.size(); i++) {
    122       conversion[remove_extents_expanded[i]] = replace_extents_expanded[i];
    123     }
    124     ApplyMap(&read_blocks, conversion);
    125     for (auto& edge_prop_pair : vertex->out_edges) {
    126       vector<uint64_t> write_before_deps_expanded = ExpandExtents(
    127           edge_prop_pair.second.write_extents);
    128       ApplyMap(&write_before_deps_expanded, conversion);
    129       edge_prop_pair.second.write_extents =
    130           CompressExtents(write_before_deps_expanded);
    131     }
    132   }
    133   // Convert read_blocks back to extents
    134   vertex->aop.op.clear_src_extents();
    135   vector<Extent> new_extents = CompressExtents(read_blocks);
    136   StoreExtents(new_extents, vertex->aop.op.mutable_src_extents());
    137 }
    138 
    139 bool InplaceGenerator::CutEdges(Graph* graph,
    140                                 const set<Edge>& edges,
    141                                 vector<CutEdgeVertexes>* out_cuts) {
    142   DummyExtentAllocator scratch_allocator;
    143   vector<CutEdgeVertexes> cuts;
    144   cuts.reserve(edges.size());
    145 
    146   uint64_t scratch_blocks_used = 0;
    147   for (const Edge& edge : edges) {
    148     cuts.resize(cuts.size() + 1);
    149     vector<Extent> old_extents =
    150         (*graph)[edge.first].out_edges[edge.second].extents;
    151     // Choose some scratch space
    152     scratch_blocks_used += graph_utils::EdgeWeight(*graph, edge);
    153     cuts.back().tmp_extents =
    154         scratch_allocator.Allocate(graph_utils::EdgeWeight(*graph, edge));
    155     // create vertex to copy original->scratch
    156     cuts.back().new_vertex = graph->size();
    157     graph->emplace_back();
    158     cuts.back().old_src = edge.first;
    159     cuts.back().old_dst = edge.second;
    160 
    161     EdgeProperties& cut_edge_properties =
    162         (*graph)[edge.first].out_edges.find(edge.second)->second;
    163 
    164     // This should never happen, as we should only be cutting edges between
    165     // real file nodes, and write-before relationships are created from
    166     // a real file node to a temp copy node:
    167     CHECK(cut_edge_properties.write_extents.empty())
    168         << "Can't cut edge that has write-before relationship.";
    169 
    170     // make node depend on the copy operation
    171     (*graph)[edge.first].out_edges.insert(make_pair(graph->size() - 1,
    172                                                     cut_edge_properties));
    173 
    174     // Set src/dst extents and other proto variables for copy operation
    175     graph->back().aop.op.set_type(InstallOperation::MOVE);
    176     StoreExtents(cut_edge_properties.extents,
    177                  graph->back().aop.op.mutable_src_extents());
    178     StoreExtents(cuts.back().tmp_extents,
    179                  graph->back().aop.op.mutable_dst_extents());
    180     graph->back().aop.op.set_src_length(
    181         graph_utils::EdgeWeight(*graph, edge) * kBlockSize);
    182     graph->back().aop.op.set_dst_length(graph->back().aop.op.src_length());
    183 
    184     // make the dest node read from the scratch space
    185     SubstituteBlocks(
    186         &((*graph)[edge.second]),
    187         (*graph)[edge.first].out_edges[edge.second].extents,
    188         cuts.back().tmp_extents);
    189 
    190     // delete the old edge
    191     CHECK_EQ(static_cast<Graph::size_type>(1),
    192              (*graph)[edge.first].out_edges.erase(edge.second));
    193 
    194     // Add an edge from dst to copy operation
    195     EdgeProperties write_before_edge_properties;
    196     write_before_edge_properties.write_extents = cuts.back().tmp_extents;
    197     (*graph)[edge.second].out_edges.insert(
    198         make_pair(graph->size() - 1, write_before_edge_properties));
    199   }
    200   out_cuts->swap(cuts);
    201   return true;
    202 }
    203 
    204 // Creates all the edges for the graph. Writers of a block point to
    205 // readers of the same block. This is because for an edge A->B, B
    206 // must complete before A executes.
    207 void InplaceGenerator::CreateEdges(
    208     Graph* graph,
    209     const vector<Block>& blocks) {
    210   for (vector<Block>::size_type i = 0;
    211        i < blocks.size(); i++) {
    212     // Blocks with both a reader and writer get an edge
    213     if (blocks[i].reader == Vertex::kInvalidIndex ||
    214         blocks[i].writer == Vertex::kInvalidIndex)
    215       continue;
    216     // Don't have a node depend on itself
    217     if (blocks[i].reader == blocks[i].writer)
    218       continue;
    219     // See if there's already an edge we can add onto
    220     Vertex::EdgeMap::iterator edge_it =
    221         (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader);
    222     if (edge_it == (*graph)[blocks[i].writer].out_edges.end()) {
    223       // No existing edge. Create one
    224       (*graph)[blocks[i].writer].out_edges.insert(
    225           make_pair(blocks[i].reader, EdgeProperties()));
    226       edge_it = (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader);
    227       CHECK(edge_it != (*graph)[blocks[i].writer].out_edges.end());
    228     }
    229     AppendBlockToExtents(&edge_it->second.extents, i);
    230   }
    231 }
    232 
    233 namespace {
    234 
    235 class SortCutsByTopoOrderLess {
    236  public:
    237   explicit SortCutsByTopoOrderLess(
    238       const vector<vector<Vertex::Index>::size_type>& table)
    239       : table_(table) {}
    240   bool operator()(const CutEdgeVertexes& a, const CutEdgeVertexes& b) {
    241     return table_[a.old_dst] < table_[b.old_dst];
    242   }
    243  private:
    244   const vector<vector<Vertex::Index>::size_type>& table_;
    245 };
    246 
    247 }  // namespace
    248 
    249 void InplaceGenerator::GenerateReverseTopoOrderMap(
    250     const vector<Vertex::Index>& op_indexes,
    251     vector<vector<Vertex::Index>::size_type>* reverse_op_indexes) {
    252   vector<vector<Vertex::Index>::size_type> table(op_indexes.size());
    253   for (vector<Vertex::Index>::size_type i = 0, e = op_indexes.size();
    254        i != e; ++i) {
    255     Vertex::Index node = op_indexes[i];
    256     if (table.size() < (node + 1)) {
    257       table.resize(node + 1);
    258     }
    259     table[node] = i;
    260   }
    261   reverse_op_indexes->swap(table);
    262 }
    263 
    264 void InplaceGenerator::SortCutsByTopoOrder(
    265     const vector<Vertex::Index>& op_indexes,
    266     vector<CutEdgeVertexes>* cuts) {
    267   // first, make a reverse lookup table.
    268   vector<vector<Vertex::Index>::size_type> table;
    269   GenerateReverseTopoOrderMap(op_indexes, &table);
    270   SortCutsByTopoOrderLess less(table);
    271   sort(cuts->begin(), cuts->end(), less);
    272 }
    273 
    274 void InplaceGenerator::MoveAndSortFullOpsToBack(
    275     Graph* graph,
    276     vector<Vertex::Index>* op_indexes) {
    277   vector<Vertex::Index> ret;
    278   vector<Vertex::Index> full_ops;
    279   ret.reserve(op_indexes->size());
    280   for (auto op_index : *op_indexes) {
    281     InstallOperation_Type type = (*graph)[op_index].aop.op.type();
    282     if (type == InstallOperation::REPLACE ||
    283         type == InstallOperation::REPLACE_BZ) {
    284       full_ops.push_back(op_index);
    285     } else {
    286       ret.push_back(op_index);
    287     }
    288   }
    289   LOG(INFO) << "Stats: " << full_ops.size() << " full ops out of "
    290             << (full_ops.size() + ret.size()) << " total ops.";
    291   // Sort full ops according to their dst_extents.
    292   sort(full_ops.begin(), full_ops.end(),
    293        IndexedInstallOperationsDstComparator(graph));
    294   ret.insert(ret.end(), full_ops.begin(), full_ops.end());
    295   op_indexes->swap(ret);
    296 }
    297 
    298 namespace {
    299 
    300 template<typename T>
    301 bool TempBlocksExistInExtents(const T& extents) {
    302   for (int i = 0, e = extents.size(); i < e; ++i) {
    303     Extent extent = GetElement(extents, i);
    304     uint64_t start = extent.start_block();
    305     uint64_t num = extent.num_blocks();
    306     if (start >= kTempBlockStart || (start + num) >= kTempBlockStart) {
    307       LOG(ERROR) << "temp block!";
    308       LOG(ERROR) << "start: " << start << ", num: " << num;
    309       LOG(ERROR) << "kTempBlockStart: " << kTempBlockStart;
    310       LOG(ERROR) << "returning true";
    311       return true;
    312     }
    313     // check for wrap-around, which would be a bug:
    314     CHECK(start <= (start + num));
    315   }
    316   return false;
    317 }
    318 
    319 // Converts the cuts, which must all have the same |old_dst| member,
    320 // to full. It does this by converting the |old_dst| to REPLACE or
    321 // REPLACE_BZ, dropping all incoming edges to |old_dst|, and marking
    322 // all temp nodes invalid.
    323 bool ConvertCutsToFull(
    324     Graph* graph,
    325     const string& new_part,
    326     BlobFileWriter* blob_file,
    327     vector<Vertex::Index>* op_indexes,
    328     vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
    329     const vector<CutEdgeVertexes>& cuts) {
    330   CHECK(!cuts.empty());
    331   set<Vertex::Index> deleted_nodes;
    332   for (const CutEdgeVertexes& cut : cuts) {
    333     TEST_AND_RETURN_FALSE(InplaceGenerator::ConvertCutToFullOp(
    334         graph,
    335         cut,
    336         new_part,
    337         blob_file));
    338     deleted_nodes.insert(cut.new_vertex);
    339   }
    340   deleted_nodes.insert(cuts[0].old_dst);
    341 
    342   vector<Vertex::Index> new_op_indexes;
    343   new_op_indexes.reserve(op_indexes->size());
    344   for (Vertex::Index vertex_index : *op_indexes) {
    345     if (utils::SetContainsKey(deleted_nodes, vertex_index))
    346       continue;
    347     new_op_indexes.push_back(vertex_index);
    348   }
    349   new_op_indexes.push_back(cuts[0].old_dst);
    350   op_indexes->swap(new_op_indexes);
    351   InplaceGenerator::GenerateReverseTopoOrderMap(*op_indexes,
    352                                                 reverse_op_indexes);
    353   return true;
    354 }
    355 
    356 // Tries to assign temp blocks for a collection of cuts, all of which share
    357 // the same old_dst member. If temp blocks can't be found, old_dst will be
    358 // converted to a REPLACE or REPLACE_BZ operation. Returns true on success,
    359 // which can happen even if blocks are converted to full. Returns false
    360 // on exceptional error cases.
    361 bool AssignBlockForAdjoiningCuts(
    362     Graph* graph,
    363     const string& new_part,
    364     BlobFileWriter* blob_file,
    365     vector<Vertex::Index>* op_indexes,
    366     vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
    367     const vector<CutEdgeVertexes>& cuts) {
    368   CHECK(!cuts.empty());
    369   const Vertex::Index old_dst = cuts[0].old_dst;
    370   // Calculate # of blocks needed
    371   uint64_t blocks_needed = 0;
    372   vector<uint64_t> cuts_blocks_needed(cuts.size());
    373   for (vector<CutEdgeVertexes>::size_type i = 0; i < cuts.size(); ++i) {
    374     uint64_t cut_blocks_needed = 0;
    375     for (const Extent& extent : cuts[i].tmp_extents) {
    376       cut_blocks_needed += extent.num_blocks();
    377     }
    378     blocks_needed += cut_blocks_needed;
    379     cuts_blocks_needed[i] = cut_blocks_needed;
    380   }
    381 
    382   // Find enough blocks
    383   ExtentRanges scratch_ranges;
    384   // Each block that's supplying temp blocks and the corresponding blocks:
    385   typedef vector<pair<Vertex::Index, ExtentRanges>> SupplierVector;
    386   SupplierVector block_suppliers;
    387   uint64_t scratch_blocks_found = 0;
    388   for (vector<Vertex::Index>::size_type i = (*reverse_op_indexes)[old_dst] + 1,
    389            e = op_indexes->size(); i < e; ++i) {
    390     Vertex::Index test_node = (*op_indexes)[i];
    391     if (!(*graph)[test_node].valid)
    392       continue;
    393     // See if this node has sufficient blocks
    394     ExtentRanges ranges;
    395     ranges.AddRepeatedExtents((*graph)[test_node].aop.op.dst_extents());
    396     ranges.SubtractExtent(ExtentForRange(
    397         kTempBlockStart, kSparseHole - kTempBlockStart));
    398     ranges.SubtractRepeatedExtents((*graph)[test_node].aop.op.src_extents());
    399     // For now, for simplicity, subtract out all blocks in read-before
    400     // dependencies.
    401     for (Vertex::EdgeMap::const_iterator edge_i =
    402              (*graph)[test_node].out_edges.begin(),
    403              edge_e = (*graph)[test_node].out_edges.end();
    404          edge_i != edge_e; ++edge_i) {
    405       ranges.SubtractExtents(edge_i->second.extents);
    406     }
    407 
    408     // Prevent using the block 0 as scratch space due to crbug.com/480751.
    409     if (ranges.ContainsBlock(0)) {
    410       LOG(INFO) << "Removing block 0 from the selected scratch range in vertex "
    411                 << i;
    412       ranges.SubtractBlock(0);
    413     }
    414 
    415     if (ranges.blocks() == 0)
    416       continue;
    417 
    418     if (ranges.blocks() + scratch_blocks_found > blocks_needed) {
    419       // trim down ranges
    420       vector<Extent> new_ranges = ranges.GetExtentsForBlockCount(
    421           blocks_needed - scratch_blocks_found);
    422       ranges = ExtentRanges();
    423       ranges.AddExtents(new_ranges);
    424     }
    425     scratch_ranges.AddRanges(ranges);
    426     block_suppliers.push_back(make_pair(test_node, ranges));
    427     scratch_blocks_found += ranges.blocks();
    428     if (scratch_ranges.blocks() >= blocks_needed)
    429       break;
    430   }
    431   if (scratch_ranges.blocks() < blocks_needed) {
    432     LOG(INFO) << "Unable to find sufficient scratch";
    433     TEST_AND_RETURN_FALSE(ConvertCutsToFull(graph,
    434                                             new_part,
    435                                             blob_file,
    436                                             op_indexes,
    437                                             reverse_op_indexes,
    438                                             cuts));
    439     return true;
    440   }
    441   // Use the scratch we found
    442   TEST_AND_RETURN_FALSE(scratch_ranges.blocks() == scratch_blocks_found);
    443 
    444   // Make all the suppliers depend on this node
    445   for (const auto& index_range_pair : block_suppliers) {
    446     graph_utils::AddReadBeforeDepExtents(
    447         &(*graph)[index_range_pair.first],
    448         old_dst,
    449         index_range_pair.second.GetExtentsForBlockCount(
    450             index_range_pair.second.blocks()));
    451   }
    452 
    453   // Replace temp blocks in each cut
    454   for (vector<CutEdgeVertexes>::size_type i = 0; i < cuts.size(); ++i) {
    455     const CutEdgeVertexes& cut = cuts[i];
    456     vector<Extent> real_extents =
    457         scratch_ranges.GetExtentsForBlockCount(cuts_blocks_needed[i]);
    458     scratch_ranges.SubtractExtents(real_extents);
    459 
    460     // Fix the old dest node w/ the real blocks
    461     InplaceGenerator::SubstituteBlocks(&(*graph)[old_dst],
    462                                          cut.tmp_extents,
    463                                          real_extents);
    464 
    465     // Fix the new node w/ the real blocks. Since the new node is just a
    466     // copy operation, we can replace all the dest extents w/ the real
    467     // blocks.
    468     InstallOperation* op = &(*graph)[cut.new_vertex].aop.op;
    469     op->clear_dst_extents();
    470     StoreExtents(real_extents, op->mutable_dst_extents());
    471   }
    472   return true;
    473 }
    474 
    475 }  // namespace
    476 
    477 bool InplaceGenerator::AssignTempBlocks(
    478     Graph* graph,
    479     const string& new_part,
    480     BlobFileWriter* blob_file,
    481     vector<Vertex::Index>* op_indexes,
    482     vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
    483     const vector<CutEdgeVertexes>& cuts) {
    484   CHECK(!cuts.empty());
    485 
    486   // group of cuts w/ the same old_dst:
    487   vector<CutEdgeVertexes> cuts_group;
    488 
    489   for (vector<CutEdgeVertexes>::size_type i = cuts.size() - 1, e = 0;
    490        true ; --i) {
    491     LOG(INFO) << "Fixing temp blocks in cut " << i
    492               << ": old dst: " << cuts[i].old_dst << " new vertex: "
    493               << cuts[i].new_vertex << " path: "
    494               << (*graph)[cuts[i].old_dst].aop.name;
    495 
    496     if (cuts_group.empty() || (cuts_group[0].old_dst == cuts[i].old_dst)) {
    497       cuts_group.push_back(cuts[i]);
    498     } else {
    499       CHECK(!cuts_group.empty());
    500       TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(graph,
    501                                                         new_part,
    502                                                         blob_file,
    503                                                         op_indexes,
    504                                                         reverse_op_indexes,
    505                                                         cuts_group));
    506       cuts_group.clear();
    507       cuts_group.push_back(cuts[i]);
    508     }
    509 
    510     if (i == e) {
    511       // break out of for() loop
    512       break;
    513     }
    514   }
    515   CHECK(!cuts_group.empty());
    516   TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(graph,
    517                                                     new_part,
    518                                                     blob_file,
    519                                                     op_indexes,
    520                                                     reverse_op_indexes,
    521                                                     cuts_group));
    522   return true;
    523 }
    524 
    525 bool InplaceGenerator::NoTempBlocksRemain(const Graph& graph) {
    526   size_t idx = 0;
    527   for (Graph::const_iterator it = graph.begin(), e = graph.end(); it != e;
    528        ++it, ++idx) {
    529     if (!it->valid)
    530       continue;
    531     const InstallOperation& op = it->aop.op;
    532     if (TempBlocksExistInExtents(op.dst_extents()) ||
    533         TempBlocksExistInExtents(op.src_extents())) {
    534       LOG(INFO) << "bad extents in node " << idx;
    535       LOG(INFO) << "so yeah";
    536       return false;
    537     }
    538 
    539     // Check out-edges:
    540     for (const auto& edge_prop_pair : it->out_edges) {
    541       if (TempBlocksExistInExtents(edge_prop_pair.second.extents) ||
    542           TempBlocksExistInExtents(edge_prop_pair.second.write_extents)) {
    543         LOG(INFO) << "bad out edge in node " << idx;
    544         LOG(INFO) << "so yeah";
    545         return false;
    546       }
    547     }
    548   }
    549   return true;
    550 }
    551 
    552 bool InplaceGenerator::ConvertCutToFullOp(Graph* graph,
    553                                           const CutEdgeVertexes& cut,
    554                                           const string& new_part,
    555                                           BlobFileWriter* blob_file) {
    556   // Drop all incoming edges, keep all outgoing edges
    557 
    558   // Keep all outgoing edges
    559   if ((*graph)[cut.old_dst].aop.op.type() != InstallOperation::REPLACE_BZ &&
    560       (*graph)[cut.old_dst].aop.op.type() != InstallOperation::REPLACE) {
    561     Vertex::EdgeMap out_edges = (*graph)[cut.old_dst].out_edges;
    562     graph_utils::DropWriteBeforeDeps(&out_edges);
    563 
    564     // Replace the operation with a REPLACE or REPLACE_BZ to generate the same
    565     // |new_extents| list of blocks and update the graph.
    566     vector<AnnotatedOperation> new_aop;
    567     vector<Extent> new_extents;
    568     ExtentsToVector((*graph)[cut.old_dst].aop.op.dst_extents(),
    569                     &new_extents);
    570     TEST_AND_RETURN_FALSE(diff_utils::DeltaReadFile(
    571         &new_aop,
    572         "",  // old_part
    573         new_part,
    574         vector<Extent>(),  // old_extents
    575         new_extents,
    576         (*graph)[cut.old_dst].aop.name,
    577         -1,  // chunk_blocks, forces to have a single operation.
    578         kInPlacePayloadVersion,
    579         blob_file));
    580     TEST_AND_RETURN_FALSE(new_aop.size() == 1);
    581     TEST_AND_RETURN_FALSE(AddInstallOpToGraph(
    582       graph, cut.old_dst, nullptr, new_aop.front().op, new_aop.front().name));
    583 
    584     (*graph)[cut.old_dst].out_edges = out_edges;
    585 
    586     // Right now we don't have doubly-linked edges, so we have to scan
    587     // the whole graph.
    588     graph_utils::DropIncomingEdgesTo(graph, cut.old_dst);
    589   }
    590 
    591   // Delete temp node
    592   (*graph)[cut.old_src].out_edges.erase(cut.new_vertex);
    593   CHECK((*graph)[cut.old_dst].out_edges.find(cut.new_vertex) ==
    594         (*graph)[cut.old_dst].out_edges.end());
    595   (*graph)[cut.new_vertex].valid = false;
    596   LOG(INFO) << "marked node invalid: " << cut.new_vertex;
    597   return true;
    598 }
    599 
    600 bool InplaceGenerator::ConvertGraphToDag(Graph* graph,
    601                                          const string& new_part,
    602                                          BlobFileWriter* blob_file,
    603                                          vector<Vertex::Index>* final_order,
    604                                          Vertex::Index scratch_vertex) {
    605   CycleBreaker cycle_breaker;
    606   LOG(INFO) << "Finding cycles...";
    607   set<Edge> cut_edges;
    608   cycle_breaker.BreakCycles(*graph, &cut_edges);
    609   LOG(INFO) << "done finding cycles";
    610   CheckGraph(*graph);
    611 
    612   // Calculate number of scratch blocks needed
    613 
    614   LOG(INFO) << "Cutting cycles...";
    615   vector<CutEdgeVertexes> cuts;
    616   TEST_AND_RETURN_FALSE(CutEdges(graph, cut_edges, &cuts));
    617   LOG(INFO) << "done cutting cycles";
    618   LOG(INFO) << "There are " << cuts.size() << " cuts.";
    619   CheckGraph(*graph);
    620 
    621   LOG(INFO) << "Creating initial topological order...";
    622   TopologicalSort(*graph, final_order);
    623   LOG(INFO) << "done with initial topo order";
    624   CheckGraph(*graph);
    625 
    626   LOG(INFO) << "Moving full ops to the back";
    627   MoveAndSortFullOpsToBack(graph, final_order);
    628   LOG(INFO) << "done moving full ops to back";
    629 
    630   vector<vector<Vertex::Index>::size_type> inverse_final_order;
    631   GenerateReverseTopoOrderMap(*final_order, &inverse_final_order);
    632 
    633   SortCutsByTopoOrder(*final_order, &cuts);
    634 
    635   if (!cuts.empty())
    636     TEST_AND_RETURN_FALSE(AssignTempBlocks(graph,
    637                                            new_part,
    638                                            blob_file,
    639                                            final_order,
    640                                            &inverse_final_order,
    641                                            cuts));
    642   LOG(INFO) << "Making sure all temp blocks have been allocated";
    643 
    644   // Remove the scratch node, if any
    645   if (scratch_vertex != Vertex::kInvalidIndex) {
    646     final_order->erase(final_order->begin() +
    647                        inverse_final_order[scratch_vertex]);
    648     (*graph)[scratch_vertex].valid = false;
    649     GenerateReverseTopoOrderMap(*final_order, &inverse_final_order);
    650   }
    651 
    652   graph_utils::DumpGraph(*graph);
    653   CHECK(NoTempBlocksRemain(*graph));
    654   LOG(INFO) << "done making sure all temp blocks are allocated";
    655   return true;
    656 }
    657 
    658 void InplaceGenerator::CreateScratchNode(uint64_t start_block,
    659                                          uint64_t num_blocks,
    660                                          Vertex* vertex) {
    661   vertex->aop.name = "<scratch>";
    662   vertex->aop.op.set_type(InstallOperation::REPLACE_BZ);
    663   vertex->aop.op.set_data_offset(0);
    664   vertex->aop.op.set_data_length(0);
    665   Extent* extent = vertex->aop.op.add_dst_extents();
    666   extent->set_start_block(start_block);
    667   extent->set_num_blocks(num_blocks);
    668 }
    669 
    670 bool InplaceGenerator::AddInstallOpToBlocksVector(
    671     const InstallOperation& operation,
    672     const Graph& graph,
    673     Vertex::Index vertex,
    674     vector<Block>* blocks) {
    675   // See if this is already present.
    676   TEST_AND_RETURN_FALSE(operation.dst_extents_size() > 0);
    677 
    678   enum BlockField { READER = 0, WRITER, BLOCK_FIELD_COUNT };
    679   for (int field = READER; field < BLOCK_FIELD_COUNT; field++) {
    680     const char* past_participle = (field == READER) ? "read" : "written";
    681     const google::protobuf::RepeatedPtrField<Extent>& extents =
    682         (field == READER) ? operation.src_extents() : operation.dst_extents();
    683     Vertex::Index Block::*access_type = (field == READER) ?
    684         &Block::reader : &Block::writer;
    685 
    686     for (const Extent& extent : extents) {
    687       for (uint64_t block = extent.start_block();
    688            block < (extent.start_block() + extent.num_blocks()); block++) {
    689         if ((*blocks)[block].*access_type != Vertex::kInvalidIndex) {
    690           LOG(FATAL) << "Block " << block << " is already "
    691                      << past_participle << " by "
    692                      << (*blocks)[block].*access_type << "("
    693                      << graph[(*blocks)[block].*access_type].aop.name
    694                      << ") and also " << vertex << "("
    695                      << graph[vertex].aop.name << ")";
    696         }
    697         (*blocks)[block].*access_type = vertex;
    698       }
    699     }
    700   }
    701   return true;
    702 }
    703 
    704 bool InplaceGenerator::AddInstallOpToGraph(Graph* graph,
    705                                            Vertex::Index existing_vertex,
    706                                            vector<Block>* blocks,
    707                                            const InstallOperation& operation,
    708                                            const string& op_name) {
    709   Vertex::Index vertex = existing_vertex;
    710   if (vertex == Vertex::kInvalidIndex) {
    711     graph->emplace_back();
    712     vertex = graph->size() - 1;
    713   }
    714   (*graph)[vertex].aop.op = operation;
    715   CHECK((*graph)[vertex].aop.op.has_type());
    716   (*graph)[vertex].aop.name = op_name;
    717 
    718   if (blocks)
    719     TEST_AND_RETURN_FALSE(InplaceGenerator::AddInstallOpToBlocksVector(
    720         (*graph)[vertex].aop.op,
    721         *graph,
    722         vertex,
    723         blocks));
    724   return true;
    725 }
    726 
    727 void InplaceGenerator::ApplyMap(vector<uint64_t>* collection,
    728                                 const map<uint64_t, uint64_t>& the_map) {
    729   for (uint64_t& elem : *collection) {
    730     const auto& map_it = the_map.find(elem);
    731     if (map_it != the_map.end())
    732       elem = map_it->second;
    733   }
    734 }
    735 
    736 bool InplaceGenerator::ResolveReadAfterWriteDependencies(
    737     const PartitionConfig& old_part,
    738     const PartitionConfig& new_part,
    739     uint64_t partition_size,
    740     size_t block_size,
    741     BlobFileWriter* blob_file,
    742     vector<AnnotatedOperation>* aops) {
    743   // Convert the operations to the graph.
    744   Graph graph;
    745   CheckGraph(graph);
    746   vector<Block> blocks(std::max(old_part.size, new_part.size) / block_size);
    747   for (const auto& aop : *aops) {
    748     AddInstallOpToGraph(
    749         &graph, Vertex::kInvalidIndex, &blocks, aop.op, aop.name);
    750   }
    751   CheckGraph(graph);
    752 
    753   // Final scratch block (if there's space)
    754   Vertex::Index scratch_vertex = Vertex::kInvalidIndex;
    755   if (blocks.size() < (partition_size / block_size)) {
    756     scratch_vertex = graph.size();
    757     graph.emplace_back();
    758     size_t scratch_blocks = (partition_size / block_size) - blocks.size();
    759     LOG(INFO) << "Added " << scratch_blocks << " scratch space blocks.";
    760     CreateScratchNode(blocks.size(), scratch_blocks, &graph.back());
    761   }
    762   CheckGraph(graph);
    763 
    764   LOG(INFO) << "Creating edges...";
    765   CreateEdges(&graph, blocks);
    766   LOG(INFO) << "Done creating edges";
    767   CheckGraph(graph);
    768 
    769   vector<Vertex::Index> final_order;
    770   TEST_AND_RETURN_FALSE(ConvertGraphToDag(
    771       &graph,
    772       new_part.path,
    773       blob_file,
    774       &final_order,
    775       scratch_vertex));
    776 
    777   // Copy operations over to the |aops| vector in the final_order generated by
    778   // the topological sort.
    779   aops->clear();
    780   aops->reserve(final_order.size());
    781   for (const Vertex::Index vertex_index : final_order) {
    782     const Vertex& vertex = graph[vertex_index];
    783     aops->push_back(vertex.aop);
    784   }
    785   return true;
    786 }
    787 
    788 bool InplaceGenerator::GenerateOperations(
    789     const PayloadGenerationConfig& config,
    790     const PartitionConfig& old_part,
    791     const PartitionConfig& new_part,
    792     BlobFileWriter* blob_file,
    793     vector<AnnotatedOperation>* aops) {
    794   TEST_AND_RETURN_FALSE(old_part.name == new_part.name);
    795   TEST_AND_RETURN_FALSE(config.version.major == kInPlacePayloadVersion.major);
    796   TEST_AND_RETURN_FALSE(config.version.minor == kInPlacePayloadVersion.minor);
    797 
    798   ssize_t hard_chunk_blocks = (config.hard_chunk_size == -1 ? -1 :
    799                                config.hard_chunk_size / config.block_size);
    800   size_t soft_chunk_blocks = config.soft_chunk_size / config.block_size;
    801   uint64_t partition_size = new_part.size;
    802   if (new_part.name == kLegacyPartitionNameRoot)
    803     partition_size = config.rootfs_partition_size;
    804 
    805   LOG(INFO) << "Delta compressing " << new_part.name << " partition...";
    806   TEST_AND_RETURN_FALSE(diff_utils::DeltaReadPartition(aops,
    807                                                        old_part,
    808                                                        new_part,
    809                                                        hard_chunk_blocks,
    810                                                        soft_chunk_blocks,
    811                                                        config.version,
    812                                                        blob_file));
    813   LOG(INFO) << "Done reading " << new_part.name;
    814 
    815   TEST_AND_RETURN_FALSE(ResolveReadAfterWriteDependencies(
    816       old_part, new_part, partition_size, config.block_size, blob_file, aops));
    817   LOG(INFO) << "Done reordering " << new_part.name;
    818   return true;
    819 }
    820 
    821 };  // namespace chromeos_update_engine
    822