Home | History | Annotate | Download | only in dex
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "mir_graph.h"
     18 
     19 #include <inttypes.h>
     20 #include <queue>
     21 #include <unistd.h>
     22 
     23 #include "base/bit_vector-inl.h"
     24 #include "base/logging.h"
     25 #include "base/stl_util.h"
     26 #include "base/stringprintf.h"
     27 #include "base/scoped_arena_containers.h"
     28 #include "compiler_ir.h"
     29 #include "dex_file-inl.h"
     30 #include "dex_flags.h"
     31 #include "dex_instruction-inl.h"
     32 #include "driver/compiler_driver.h"
     33 #include "driver/dex_compilation_unit.h"
     34 #include "dex/quick/quick_compiler.h"
     35 #include "leb128.h"
     36 #include "pass_driver_me_post_opt.h"
     37 #include "stack.h"
     38 #include "utils.h"
     39 
     40 namespace art {
     41 
     42 #define MAX_PATTERN_LEN 5
     43 
     44 const char* MIRGraph::extended_mir_op_names_[kMirOpLast - kMirOpFirst] = {
     45   "Phi",
     46   "Copy",
     47   "FusedCmplFloat",
     48   "FusedCmpgFloat",
     49   "FusedCmplDouble",
     50   "FusedCmpgDouble",
     51   "FusedCmpLong",
     52   "Nop",
     53   "OpNullCheck",
     54   "OpRangeCheck",
     55   "OpDivZeroCheck",
     56   "Check",
     57   "Select",
     58   "ConstVector",
     59   "MoveVector",
     60   "PackedMultiply",
     61   "PackedAddition",
     62   "PackedSubtract",
     63   "PackedShiftLeft",
     64   "PackedSignedShiftRight",
     65   "PackedUnsignedShiftRight",
     66   "PackedAnd",
     67   "PackedOr",
     68   "PackedXor",
     69   "PackedAddReduce",
     70   "PackedReduce",
     71   "PackedSet",
     72   "ReserveVectorRegisters",
     73   "ReturnVectorRegisters",
     74   "MemBarrier",
     75   "PackedArrayGet",
     76   "PackedArrayPut",
     77   "MaddInt",
     78   "MsubInt",
     79   "MaddLong",
     80   "MsubLong",
     81 };
     82 
     83 MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
     84     : reg_location_(nullptr),
     85       block_id_map_(std::less<unsigned int>(), arena->Adapter()),
     86       cu_(cu),
     87       ssa_base_vregs_(arena->Adapter(kArenaAllocSSAToDalvikMap)),
     88       ssa_subscripts_(arena->Adapter(kArenaAllocSSAToDalvikMap)),
     89       vreg_to_ssa_map_(nullptr),
     90       ssa_last_defs_(nullptr),
     91       is_constant_v_(nullptr),
     92       constant_values_(nullptr),
     93       use_counts_(arena->Adapter()),
     94       raw_use_counts_(arena->Adapter()),
     95       num_reachable_blocks_(0),
     96       max_num_reachable_blocks_(0),
     97       dfs_orders_up_to_date_(false),
     98       domination_up_to_date_(false),
     99       mir_ssa_rep_up_to_date_(false),
    100       topological_order_up_to_date_(false),
    101       dfs_order_(arena->Adapter(kArenaAllocDfsPreOrder)),
    102       dfs_post_order_(arena->Adapter(kArenaAllocDfsPostOrder)),
    103       dom_post_order_traversal_(arena->Adapter(kArenaAllocDomPostOrder)),
    104       topological_order_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
    105       topological_order_loop_ends_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
    106       topological_order_indexes_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
    107       topological_order_loop_head_stack_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
    108       max_nested_loops_(0u),
    109       i_dom_list_(nullptr),
    110       temp_scoped_alloc_(),
    111       block_list_(arena->Adapter(kArenaAllocBBList)),
    112       try_block_addr_(nullptr),
    113       entry_block_(nullptr),
    114       exit_block_(nullptr),
    115       current_code_item_(nullptr),
    116       m_units_(arena->Adapter()),
    117       method_stack_(arena->Adapter()),
    118       current_method_(kInvalidEntry),
    119       current_offset_(kInvalidEntry),
    120       def_count_(0),
    121       opcode_count_(nullptr),
    122       num_ssa_regs_(0),
    123       extended_basic_blocks_(arena->Adapter()),
    124       method_sreg_(0),
    125       attributes_(METHOD_IS_LEAF),  // Start with leaf assumption, change on encountering invoke.
    126       checkstats_(nullptr),
    127       arena_(arena),
    128       backward_branches_(0),
    129       forward_branches_(0),
    130       num_non_special_compiler_temps_(0),
    131       max_available_special_compiler_temps_(1),  // We only need the method ptr as a special temp for now.
    132       requested_backend_temp_(false),
    133       compiler_temps_committed_(false),
    134       punt_to_interpreter_(false),
    135       merged_df_flags_(0u),
    136       ifield_lowering_infos_(arena->Adapter(kArenaAllocLoweringInfo)),
    137       sfield_lowering_infos_(arena->Adapter(kArenaAllocLoweringInfo)),
    138       method_lowering_infos_(arena->Adapter(kArenaAllocLoweringInfo)),
    139       suspend_checks_in_loops_(nullptr) {
    140   memset(&temp_, 0, sizeof(temp_));
    141   use_counts_.reserve(256);
    142   raw_use_counts_.reserve(256);
    143   block_list_.reserve(100);
    144   try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */);
    145 
    146 
    147   if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
    148     // X86 requires a temp to keep track of the method address.
    149     // TODO For x86_64, addressing can be done with RIP. When that is implemented,
    150     // this needs to be updated to reserve 0 temps for BE.
    151     max_available_non_special_compiler_temps_ = cu_->target64 ? 2 : 1;
    152     reserved_temps_for_backend_ = max_available_non_special_compiler_temps_;
    153   } else {
    154     // Other architectures do not have a known lower bound for non-special temps.
    155     // We allow the update of the max to happen at BE initialization stage and simply set 0 for now.
    156     max_available_non_special_compiler_temps_ = 0;
    157     reserved_temps_for_backend_ = 0;
    158   }
    159 }
    160 
    161 MIRGraph::~MIRGraph() {
    162   STLDeleteElements(&block_list_);
    163   STLDeleteElements(&m_units_);
    164 }
    165 
    166 /*
    167  * Parse an instruction, return the length of the instruction
    168  */
    169 int MIRGraph::ParseInsn(const uint16_t* code_ptr, MIR::DecodedInstruction* decoded_instruction) {
    170   const Instruction* inst = Instruction::At(code_ptr);
    171   decoded_instruction->opcode = inst->Opcode();
    172   decoded_instruction->vA = inst->HasVRegA() ? inst->VRegA() : 0;
    173   decoded_instruction->vB = inst->HasVRegB() ? inst->VRegB() : 0;
    174   decoded_instruction->vB_wide = inst->HasWideVRegB() ? inst->WideVRegB() : 0;
    175   decoded_instruction->vC = inst->HasVRegC() ?  inst->VRegC() : 0;
    176   if (inst->HasVarArgs()) {
    177     inst->GetVarArgs(decoded_instruction->arg);
    178   }
    179   return inst->SizeInCodeUnits();
    180 }
    181 
    182 
    183 /* Split an existing block from the specified code offset into two */
    184 BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset,
    185                                  BasicBlock* orig_block, BasicBlock** immed_pred_block_p) {
    186   DCHECK_GT(code_offset, orig_block->start_offset);
    187   MIR* insn = orig_block->first_mir_insn;
    188   MIR* prev = nullptr;  // Will be set to instruction before split.
    189   while (insn) {
    190     if (insn->offset == code_offset) break;
    191     prev = insn;
    192     insn = insn->next;
    193   }
    194   if (insn == nullptr) {
    195     LOG(FATAL) << "Break split failed";
    196   }
    197   // Now insn is at the instruction where we want to split, namely
    198   // insn will be the first instruction of the "bottom" block.
    199   // Similarly, prev will be the last instruction of the "top" block
    200 
    201   BasicBlock* bottom_block = CreateNewBB(kDalvikByteCode);
    202 
    203   bottom_block->start_offset = code_offset;
    204   bottom_block->first_mir_insn = insn;
    205   bottom_block->last_mir_insn = orig_block->last_mir_insn;
    206 
    207   /* If this block was terminated by a return, conditional branch or throw,
    208    * the flag needs to go with the bottom block
    209    */
    210   bottom_block->terminated_by_return = orig_block->terminated_by_return;
    211   orig_block->terminated_by_return = false;
    212 
    213   bottom_block->conditional_branch = orig_block->conditional_branch;
    214   orig_block->conditional_branch = false;
    215 
    216   bottom_block->explicit_throw = orig_block->explicit_throw;
    217   orig_block->explicit_throw = false;
    218 
    219   /* Handle the taken path */
    220   bottom_block->taken = orig_block->taken;
    221   if (bottom_block->taken != NullBasicBlockId) {
    222     orig_block->taken = NullBasicBlockId;
    223     BasicBlock* bb_taken = GetBasicBlock(bottom_block->taken);
    224     bb_taken->ErasePredecessor(orig_block->id);
    225     bb_taken->predecessors.push_back(bottom_block->id);
    226   }
    227 
    228   /* Handle the fallthrough path */
    229   bottom_block->fall_through = orig_block->fall_through;
    230   orig_block->fall_through = bottom_block->id;
    231   bottom_block->predecessors.push_back(orig_block->id);
    232   if (bottom_block->fall_through != NullBasicBlockId) {
    233     BasicBlock* bb_fall_through = GetBasicBlock(bottom_block->fall_through);
    234     bb_fall_through->ErasePredecessor(orig_block->id);
    235     bb_fall_through->predecessors.push_back(bottom_block->id);
    236   }
    237 
    238   /* Handle the successor list */
    239   if (orig_block->successor_block_list_type != kNotUsed) {
    240     bottom_block->successor_block_list_type = orig_block->successor_block_list_type;
    241     bottom_block->successor_blocks.swap(orig_block->successor_blocks);
    242     orig_block->successor_block_list_type = kNotUsed;
    243     DCHECK(orig_block->successor_blocks.empty());  // Empty after the swap() above.
    244     for (SuccessorBlockInfo* successor_block_info : bottom_block->successor_blocks) {
    245       BasicBlock* bb = GetBasicBlock(successor_block_info->block);
    246       if (bb != nullptr) {
    247         bb->ErasePredecessor(orig_block->id);
    248         bb->predecessors.push_back(bottom_block->id);
    249       }
    250     }
    251   }
    252 
    253   orig_block->last_mir_insn = prev;
    254   prev->next = nullptr;
    255 
    256   /*
    257    * Update the immediate predecessor block pointer so that outgoing edges
    258    * can be applied to the proper block.
    259    */
    260   if (immed_pred_block_p) {
    261     DCHECK_EQ(*immed_pred_block_p, orig_block);
    262     *immed_pred_block_p = bottom_block;
    263   }
    264 
    265   // Associate dex instructions in the bottom block with the new container.
    266   DCHECK(insn != nullptr);
    267   DCHECK(insn != orig_block->first_mir_insn);
    268   DCHECK(insn == bottom_block->first_mir_insn);
    269   DCHECK_EQ(insn->offset, bottom_block->start_offset);
    270   // Scan the "bottom" instructions, remapping them to the
    271   // newly created "bottom" block.
    272   MIR* p = insn;
    273   p->bb = bottom_block->id;
    274   while (p != bottom_block->last_mir_insn) {
    275     p = p->next;
    276     DCHECK(p != nullptr);
    277     p->bb = bottom_block->id;
    278   }
    279 
    280   return bottom_block;
    281 }
    282 
    283 /*
    284  * Given a code offset, find out the block that starts with it. If the offset
    285  * is in the middle of an existing block, split it into two.  If immed_pred_block_p
    286  * is not non-null and is the block being split, update *immed_pred_block_p to
    287  * point to the bottom block so that outgoing edges can be set up properly
    288  * (by the caller)
    289  * Utilizes a map for fast lookup of the typical cases.
    290  */
    291 BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool create,
    292                                 BasicBlock** immed_pred_block_p,
    293                                 ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
    294   if (UNLIKELY(code_offset >= current_code_item_->insns_size_in_code_units_)) {
    295     // There can be a fall-through out of the method code. We shall record such a block
    296     // here (assuming create==true) and check that it's dead at the end of InlineMethod().
    297     // Though we're only aware of the cases where code_offset is exactly the same as
    298     // insns_size_in_code_units_, treat greater code_offset the same just in case.
    299     code_offset = current_code_item_->insns_size_in_code_units_;
    300   }
    301 
    302   int block_id = (*dex_pc_to_block_map)[code_offset];
    303   BasicBlock* bb = GetBasicBlock(block_id);
    304 
    305   if ((bb != nullptr) && (bb->start_offset == code_offset)) {
    306     // Does this containing block start with the desired instruction?
    307     return bb;
    308   }
    309 
    310   // No direct hit.
    311   if (!create) {
    312     return nullptr;
    313   }
    314 
    315   if (bb != nullptr) {
    316     // The target exists somewhere in an existing block.
    317     BasicBlock* bottom_block = SplitBlock(code_offset, bb, bb == *immed_pred_block_p ?  immed_pred_block_p : nullptr);
    318     DCHECK(bottom_block != nullptr);
    319     MIR* p = bottom_block->first_mir_insn;
    320     BasicBlock* orig_block = bb;
    321     DCHECK_EQ((*dex_pc_to_block_map)[p->offset], orig_block->id);
    322     // Scan the "bottom" instructions, remapping them to the
    323     // newly created "bottom" block.
    324     (*dex_pc_to_block_map)[p->offset] = bottom_block->id;
    325     while (p != bottom_block->last_mir_insn) {
    326       p = p->next;
    327       DCHECK(p != nullptr);
    328       int opcode = p->dalvikInsn.opcode;
    329       /*
    330        * Some messiness here to ensure that we only enter real opcodes and only the
    331        * first half of a potentially throwing instruction that has been split into
    332        * CHECK and work portions. Since the 2nd half of a split operation is always
    333        * the first in a BasicBlock, we can't hit it here.
    334        */
    335       if ((opcode == kMirOpCheck) || !MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
    336         BasicBlockId mapped_id = (*dex_pc_to_block_map)[p->offset];
    337         // At first glance the instructions should all be mapped to orig_block.
    338         // However, multiple instructions may correspond to the same dex, hence an earlier
    339         // instruction may have already moved the mapping for dex to bottom_block.
    340         DCHECK((mapped_id == orig_block->id) || (mapped_id == bottom_block->id));
    341         (*dex_pc_to_block_map)[p->offset] = bottom_block->id;
    342       }
    343     }
    344     return bottom_block;
    345   }
    346 
    347   // Create a new block.
    348   bb = CreateNewBB(kDalvikByteCode);
    349   bb->start_offset = code_offset;
    350   (*dex_pc_to_block_map)[bb->start_offset] = bb->id;
    351   return bb;
    352 }
    353 
    354 
    355 /* Identify code range in try blocks and set up the empty catch blocks */
    356 void MIRGraph::ProcessTryCatchBlocks(ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
    357   int tries_size = current_code_item_->tries_size_;
    358   DexOffset offset;
    359 
    360   if (tries_size == 0) {
    361     return;
    362   }
    363 
    364   for (int i = 0; i < tries_size; i++) {
    365     const DexFile::TryItem* pTry =
    366         DexFile::GetTryItems(*current_code_item_, i);
    367     DexOffset start_offset = pTry->start_addr_;
    368     DexOffset end_offset = start_offset + pTry->insn_count_;
    369     for (offset = start_offset; offset < end_offset; offset++) {
    370       try_block_addr_->SetBit(offset);
    371     }
    372   }
    373 
    374   // Iterate over each of the handlers to enqueue the empty Catch blocks.
    375   const uint8_t* handlers_ptr = DexFile::GetCatchHandlerData(*current_code_item_, 0);
    376   uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
    377   for (uint32_t idx = 0; idx < handlers_size; idx++) {
    378     CatchHandlerIterator iterator(handlers_ptr);
    379     for (; iterator.HasNext(); iterator.Next()) {
    380       uint32_t address = iterator.GetHandlerAddress();
    381       FindBlock(address, true /*create*/, /* immed_pred_block_p */ nullptr, dex_pc_to_block_map);
    382     }
    383     handlers_ptr = iterator.EndDataPointer();
    384   }
    385 }
    386 
    387 bool MIRGraph::IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset,
    388                                      NarrowDexOffset catch_offset) {
    389   // Catches for monitor-exit during stack unwinding have the pattern
    390   //   move-exception (move)* (goto)? monitor-exit throw
    391   // In the currently generated dex bytecode we see these catching a bytecode range including
    392   // either its own or an identical monitor-exit, http://b/15745363 . This function checks if
    393   // it's the case for a given monitor-exit and catch block so that we can ignore it.
    394   // (We don't want to ignore all monitor-exit catches since one could enclose a synchronized
    395   // block in a try-block and catch the NPE, Error or Throwable and we should let it through;
    396   // even though a throwing monitor-exit certainly indicates a bytecode error.)
    397   const Instruction* monitor_exit = Instruction::At(current_code_item_->insns_ + monitor_exit_offset);
    398   DCHECK(monitor_exit->Opcode() == Instruction::MONITOR_EXIT);
    399   int monitor_reg = monitor_exit->VRegA_11x();
    400   const Instruction* check_insn = Instruction::At(current_code_item_->insns_ + catch_offset);
    401   if (check_insn->Opcode() == Instruction::MOVE_EXCEPTION) {
    402     if (check_insn->VRegA_11x() == monitor_reg) {
    403       // Unexpected move-exception to the same register. Probably not the pattern we're looking for.
    404       return false;
    405     }
    406     check_insn = check_insn->Next();
    407   }
    408   while (true) {
    409     int dest = -1;
    410     bool wide = false;
    411     switch (check_insn->Opcode()) {
    412       case Instruction::MOVE_WIDE:
    413         wide = true;
    414         FALLTHROUGH_INTENDED;
    415       case Instruction::MOVE_OBJECT:
    416       case Instruction::MOVE:
    417         dest = check_insn->VRegA_12x();
    418         break;
    419 
    420       case Instruction::MOVE_WIDE_FROM16:
    421         wide = true;
    422         FALLTHROUGH_INTENDED;
    423       case Instruction::MOVE_OBJECT_FROM16:
    424       case Instruction::MOVE_FROM16:
    425         dest = check_insn->VRegA_22x();
    426         break;
    427 
    428       case Instruction::MOVE_WIDE_16:
    429         wide = true;
    430         FALLTHROUGH_INTENDED;
    431       case Instruction::MOVE_OBJECT_16:
    432       case Instruction::MOVE_16:
    433         dest = check_insn->VRegA_32x();
    434         break;
    435 
    436       case Instruction::GOTO:
    437       case Instruction::GOTO_16:
    438       case Instruction::GOTO_32:
    439         check_insn = check_insn->RelativeAt(check_insn->GetTargetOffset());
    440         FALLTHROUGH_INTENDED;
    441       default:
    442         return check_insn->Opcode() == Instruction::MONITOR_EXIT &&
    443             check_insn->VRegA_11x() == monitor_reg;
    444     }
    445 
    446     if (dest == monitor_reg || (wide && dest + 1 == monitor_reg)) {
    447       return false;
    448     }
    449 
    450     check_insn = check_insn->Next();
    451   }
    452 }
    453 
    454 /* Process instructions with the kBranch flag */
    455 BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
    456                                        int width, int flags, const uint16_t* code_ptr,
    457                                        const uint16_t* code_end,
    458                                        ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
    459   DexOffset target = cur_offset;
    460   switch (insn->dalvikInsn.opcode) {
    461     case Instruction::GOTO:
    462     case Instruction::GOTO_16:
    463     case Instruction::GOTO_32:
    464       target += insn->dalvikInsn.vA;
    465       break;
    466     case Instruction::IF_EQ:
    467     case Instruction::IF_NE:
    468     case Instruction::IF_LT:
    469     case Instruction::IF_GE:
    470     case Instruction::IF_GT:
    471     case Instruction::IF_LE:
    472       cur_block->conditional_branch = true;
    473       target += insn->dalvikInsn.vC;
    474       break;
    475     case Instruction::IF_EQZ:
    476     case Instruction::IF_NEZ:
    477     case Instruction::IF_LTZ:
    478     case Instruction::IF_GEZ:
    479     case Instruction::IF_GTZ:
    480     case Instruction::IF_LEZ:
    481       cur_block->conditional_branch = true;
    482       target += insn->dalvikInsn.vB;
    483       break;
    484     default:
    485       LOG(FATAL) << "Unexpected opcode(" << insn->dalvikInsn.opcode << ") with kBranch set";
    486   }
    487   CountBranch(target);
    488   BasicBlock* taken_block = FindBlock(target, /* create */ true,
    489                                       /* immed_pred_block_p */ &cur_block,
    490                                       dex_pc_to_block_map);
    491   DCHECK(taken_block != nullptr);
    492   cur_block->taken = taken_block->id;
    493   taken_block->predecessors.push_back(cur_block->id);
    494 
    495   /* Always terminate the current block for conditional branches */
    496   if (flags & Instruction::kContinue) {
    497     BasicBlock* fallthrough_block = FindBlock(cur_offset +  width,
    498                                              /* create */
    499                                              true,
    500                                              /* immed_pred_block_p */
    501                                              &cur_block,
    502                                              dex_pc_to_block_map);
    503     DCHECK(fallthrough_block != nullptr);
    504     cur_block->fall_through = fallthrough_block->id;
    505     fallthrough_block->predecessors.push_back(cur_block->id);
    506   } else if (code_ptr < code_end) {
    507     FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr, dex_pc_to_block_map);
    508   }
    509   return cur_block;
    510 }
    511 
    512 /* Process instructions with the kSwitch flag */
    513 BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
    514                                        int width, int flags,
    515                                        ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
    516   UNUSED(flags);
    517   const uint16_t* switch_data =
    518       reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset +
    519           static_cast<int32_t>(insn->dalvikInsn.vB));
    520   int size;
    521   const int* keyTable;
    522   const int* target_table;
    523   int i;
    524   int first_key;
    525 
    526   /*
    527    * Packed switch data format:
    528    *  ushort ident = 0x0100   magic value
    529    *  ushort size             number of entries in the table
    530    *  int first_key           first (and lowest) switch case value
    531    *  int targets[size]       branch targets, relative to switch opcode
    532    *
    533    * Total size is (4+size*2) 16-bit code units.
    534    */
    535   if (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) {
    536     DCHECK_EQ(static_cast<int>(switch_data[0]),
    537               static_cast<int>(Instruction::kPackedSwitchSignature));
    538     size = switch_data[1];
    539     first_key = switch_data[2] | (switch_data[3] << 16);
    540     target_table = reinterpret_cast<const int*>(&switch_data[4]);
    541     keyTable = nullptr;        // Make the compiler happy.
    542   /*
    543    * Sparse switch data format:
    544    *  ushort ident = 0x0200   magic value
    545    *  ushort size             number of entries in the table; > 0
    546    *  int keys[size]          keys, sorted low-to-high; 32-bit aligned
    547    *  int targets[size]       branch targets, relative to switch opcode
    548    *
    549    * Total size is (2+size*4) 16-bit code units.
    550    */
    551   } else {
    552     DCHECK_EQ(static_cast<int>(switch_data[0]),
    553               static_cast<int>(Instruction::kSparseSwitchSignature));
    554     size = switch_data[1];
    555     keyTable = reinterpret_cast<const int*>(&switch_data[2]);
    556     target_table = reinterpret_cast<const int*>(&switch_data[2 + size*2]);
    557     first_key = 0;   // To make the compiler happy.
    558   }
    559 
    560   if (cur_block->successor_block_list_type != kNotUsed) {
    561     LOG(FATAL) << "Successor block list already in use: "
    562                << static_cast<int>(cur_block->successor_block_list_type);
    563   }
    564   cur_block->successor_block_list_type =
    565       (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?  kPackedSwitch : kSparseSwitch;
    566   cur_block->successor_blocks.reserve(size);
    567 
    568   for (i = 0; i < size; i++) {
    569     BasicBlock* case_block = FindBlock(cur_offset + target_table[i],  /* create */ true,
    570                                        /* immed_pred_block_p */ &cur_block,
    571                                        dex_pc_to_block_map);
    572     DCHECK(case_block != nullptr);
    573     SuccessorBlockInfo* successor_block_info =
    574         static_cast<SuccessorBlockInfo*>(arena_->Alloc(sizeof(SuccessorBlockInfo),
    575                                                        kArenaAllocSuccessor));
    576     successor_block_info->block = case_block->id;
    577     successor_block_info->key =
    578         (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
    579         first_key + i : keyTable[i];
    580     cur_block->successor_blocks.push_back(successor_block_info);
    581     case_block->predecessors.push_back(cur_block->id);
    582   }
    583 
    584   /* Fall-through case */
    585   BasicBlock* fallthrough_block = FindBlock(cur_offset +  width, /* create */ true,
    586                                             /* immed_pred_block_p */ nullptr,
    587                                             dex_pc_to_block_map);
    588   DCHECK(fallthrough_block != nullptr);
    589   cur_block->fall_through = fallthrough_block->id;
    590   fallthrough_block->predecessors.push_back(cur_block->id);
    591   return cur_block;
    592 }
    593 
    594 /* Process instructions with the kThrow flag */
    595 BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
    596                                       int width, int flags, ArenaBitVector* try_block_addr,
    597                                       const uint16_t* code_ptr, const uint16_t* code_end,
    598                                       ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
    599   UNUSED(flags);
    600   bool in_try_block = try_block_addr->IsBitSet(cur_offset);
    601   bool is_throw = (insn->dalvikInsn.opcode == Instruction::THROW);
    602 
    603   /* In try block */
    604   if (in_try_block) {
    605     CatchHandlerIterator iterator(*current_code_item_, cur_offset);
    606 
    607     if (cur_block->successor_block_list_type != kNotUsed) {
    608       LOG(INFO) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
    609       LOG(FATAL) << "Successor block list already in use: "
    610                  << static_cast<int>(cur_block->successor_block_list_type);
    611     }
    612 
    613     for (; iterator.HasNext(); iterator.Next()) {
    614       BasicBlock* catch_block = FindBlock(iterator.GetHandlerAddress(), false /* create */,
    615                                           nullptr /* immed_pred_block_p */,
    616                                           dex_pc_to_block_map);
    617       if (insn->dalvikInsn.opcode == Instruction::MONITOR_EXIT &&
    618           IsBadMonitorExitCatch(insn->offset, catch_block->start_offset)) {
    619         // Don't allow monitor-exit to catch its own exception, http://b/15745363 .
    620         continue;
    621       }
    622       if (cur_block->successor_block_list_type == kNotUsed) {
    623         cur_block->successor_block_list_type = kCatch;
    624       }
    625       catch_block->catch_entry = true;
    626       if (kIsDebugBuild) {
    627         catches_.insert(catch_block->start_offset);
    628       }
    629       SuccessorBlockInfo* successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
    630           (arena_->Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessor));
    631       successor_block_info->block = catch_block->id;
    632       successor_block_info->key = iterator.GetHandlerTypeIndex();
    633       cur_block->successor_blocks.push_back(successor_block_info);
    634       catch_block->predecessors.push_back(cur_block->id);
    635     }
    636     in_try_block = (cur_block->successor_block_list_type != kNotUsed);
    637   }
    638   bool build_all_edges =
    639       (cu_->disable_opt & (1 << kSuppressExceptionEdges)) || is_throw || in_try_block;
    640   if (!in_try_block && build_all_edges) {
    641     BasicBlock* eh_block = CreateNewBB(kExceptionHandling);
    642     cur_block->taken = eh_block->id;
    643     eh_block->start_offset = cur_offset;
    644     eh_block->predecessors.push_back(cur_block->id);
    645   }
    646 
    647   if (is_throw) {
    648     cur_block->explicit_throw = true;
    649     if (code_ptr < code_end) {
    650       // Force creation of new block following THROW via side-effect.
    651       FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr, dex_pc_to_block_map);
    652     }
    653     if (!in_try_block) {
    654        // Don't split a THROW that can't rethrow - we're done.
    655       return cur_block;
    656     }
    657   }
    658 
    659   if (!build_all_edges) {
    660     /*
    661      * Even though there is an exception edge here, control cannot return to this
    662      * method.  Thus, for the purposes of dataflow analysis and optimization, we can
    663      * ignore the edge.  Doing this reduces compile time, and increases the scope
    664      * of the basic-block level optimization pass.
    665      */
    666     return cur_block;
    667   }
    668 
    669   /*
    670    * Split the potentially-throwing instruction into two parts.
    671    * The first half will be a pseudo-op that captures the exception
    672    * edges and terminates the basic block.  It always falls through.
    673    * Then, create a new basic block that begins with the throwing instruction
    674    * (minus exceptions).  Note: this new basic block must NOT be entered into
    675    * the block_map.  If the potentially-throwing instruction is the target of a
    676    * future branch, we need to find the check psuedo half.  The new
    677    * basic block containing the work portion of the instruction should
    678    * only be entered via fallthrough from the block containing the
    679    * pseudo exception edge MIR.  Note also that this new block is
    680    * not automatically terminated after the work portion, and may
    681    * contain following instructions.
    682    *
    683    * Note also that the dex_pc_to_block_map entry for the potentially
    684    * throwing instruction will refer to the original basic block.
    685    */
    686   BasicBlock* new_block = CreateNewBB(kDalvikByteCode);
    687   new_block->start_offset = insn->offset;
    688   cur_block->fall_through = new_block->id;
    689   new_block->predecessors.push_back(cur_block->id);
    690   MIR* new_insn = NewMIR();
    691   *new_insn = *insn;
    692   insn->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpCheck);
    693   // Associate the two halves.
    694   insn->meta.throw_insn = new_insn;
    695   new_block->AppendMIR(new_insn);
    696   return new_block;
    697 }
    698 
    699 /* Parse a Dex method and insert it into the MIRGraph at the current insert point. */
    700 void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
    701                            InvokeType invoke_type ATTRIBUTE_UNUSED, uint16_t class_def_idx,
    702                            uint32_t method_idx, jobject class_loader, const DexFile& dex_file) {
    703   current_code_item_ = code_item;
    704   method_stack_.push_back(std::make_pair(current_method_, current_offset_));
    705   current_method_ = m_units_.size();
    706   current_offset_ = 0;
    707   // TODO: will need to snapshot stack image and use that as the mir context identification.
    708   m_units_.push_back(new (arena_) DexCompilationUnit(
    709       cu_, class_loader, Runtime::Current()->GetClassLinker(), dex_file,
    710       current_code_item_, class_def_idx, method_idx, access_flags,
    711       cu_->compiler_driver->GetVerifiedMethod(&dex_file, method_idx)));
    712   const uint16_t* code_ptr = current_code_item_->insns_;
    713   const uint16_t* code_end =
    714       current_code_item_->insns_ + current_code_item_->insns_size_in_code_units_;
    715 
    716   // TODO: need to rework expansion of block list & try_block_addr when inlining activated.
    717   // TUNING: use better estimate of basic blocks for following resize.
    718   block_list_.reserve(block_list_.size() + current_code_item_->insns_size_in_code_units_);
    719   // FindBlock lookup cache.
    720   ScopedArenaAllocator allocator(&cu_->arena_stack);
    721   ScopedArenaVector<uint16_t> dex_pc_to_block_map(allocator.Adapter());
    722   dex_pc_to_block_map.resize(current_code_item_->insns_size_in_code_units_ +
    723                              1 /* Fall-through on last insn; dead or punt to interpreter. */);
    724 
    725   // TODO: replace with explicit resize routine.  Using automatic extension side effect for now.
    726   try_block_addr_->SetBit(current_code_item_->insns_size_in_code_units_);
    727   try_block_addr_->ClearBit(current_code_item_->insns_size_in_code_units_);
    728 
    729   // If this is the first method, set up default entry and exit blocks.
    730   if (current_method_ == 0) {
    731     DCHECK(entry_block_ == nullptr);
    732     DCHECK(exit_block_ == nullptr);
    733     DCHECK_EQ(GetNumBlocks(), 0U);
    734     // Use id 0 to represent a null block.
    735     BasicBlock* null_block = CreateNewBB(kNullBlock);
    736     DCHECK_EQ(null_block->id, NullBasicBlockId);
    737     null_block->hidden = true;
    738     entry_block_ = CreateNewBB(kEntryBlock);
    739     exit_block_ = CreateNewBB(kExitBlock);
    740   } else {
    741     UNIMPLEMENTED(FATAL) << "Nested inlining not implemented.";
    742     /*
    743      * Will need to manage storage for ins & outs, push prevous state and update
    744      * insert point.
    745      */
    746   }
    747 
    748   /* Current block to record parsed instructions */
    749   BasicBlock* cur_block = CreateNewBB(kDalvikByteCode);
    750   DCHECK_EQ(current_offset_, 0U);
    751   cur_block->start_offset = current_offset_;
    752   // TODO: for inlining support, insert at the insert point rather than entry block.
    753   entry_block_->fall_through = cur_block->id;
    754   cur_block->predecessors.push_back(entry_block_->id);
    755 
    756   /* Identify code range in try blocks and set up the empty catch blocks */
    757   ProcessTryCatchBlocks(&dex_pc_to_block_map);
    758 
    759   uint64_t merged_df_flags = 0u;
    760 
    761   /* Parse all instructions and put them into containing basic blocks */
    762   while (code_ptr < code_end) {
    763     MIR *insn = NewMIR();
    764     insn->offset = current_offset_;
    765     insn->m_unit_index = current_method_;
    766     int width = ParseInsn(code_ptr, &insn->dalvikInsn);
    767     Instruction::Code opcode = insn->dalvikInsn.opcode;
    768     if (opcode_count_ != nullptr) {
    769       opcode_count_[static_cast<int>(opcode)]++;
    770     }
    771 
    772     int flags = insn->dalvikInsn.FlagsOf();
    773     int verify_flags = Instruction::VerifyFlagsOf(insn->dalvikInsn.opcode);
    774 
    775     uint64_t df_flags = GetDataFlowAttributes(insn);
    776     merged_df_flags |= df_flags;
    777 
    778     if (df_flags & DF_HAS_DEFS) {
    779       def_count_ += (df_flags & DF_A_WIDE) ? 2 : 1;
    780     }
    781 
    782     if (df_flags & DF_LVN) {
    783       cur_block->use_lvn = true;  // Run local value numbering on this basic block.
    784     }
    785 
    786     // Check for inline data block signatures.
    787     if (opcode == Instruction::NOP) {
    788       // A simple NOP will have a width of 1 at this point, embedded data NOP > 1.
    789       if ((width == 1) && ((current_offset_ & 0x1) == 0x1) && ((code_end - code_ptr) > 1)) {
    790         // Could be an aligning nop.  If an embedded data NOP follows, treat pair as single unit.
    791         uint16_t following_raw_instruction = code_ptr[1];
    792         if ((following_raw_instruction == Instruction::kSparseSwitchSignature) ||
    793             (following_raw_instruction == Instruction::kPackedSwitchSignature) ||
    794             (following_raw_instruction == Instruction::kArrayDataSignature)) {
    795           width += Instruction::At(code_ptr + 1)->SizeInCodeUnits();
    796         }
    797       }
    798       if (width == 1) {
    799         // It is a simple nop - treat normally.
    800         cur_block->AppendMIR(insn);
    801       } else {
    802         DCHECK(cur_block->fall_through == NullBasicBlockId);
    803         DCHECK(cur_block->taken == NullBasicBlockId);
    804         // Unreachable instruction, mark for no continuation and end basic block.
    805         flags &= ~Instruction::kContinue;
    806         FindBlock(current_offset_ + width, /* create */ true,
    807                   /* immed_pred_block_p */ nullptr, &dex_pc_to_block_map);
    808       }
    809     } else {
    810       cur_block->AppendMIR(insn);
    811     }
    812 
    813     // Associate the starting dex_pc for this opcode with its containing basic block.
    814     dex_pc_to_block_map[insn->offset] = cur_block->id;
    815 
    816     code_ptr += width;
    817 
    818     if (flags & Instruction::kBranch) {
    819       cur_block = ProcessCanBranch(cur_block, insn, current_offset_,
    820                                    width, flags, code_ptr, code_end, &dex_pc_to_block_map);
    821     } else if (flags & Instruction::kReturn) {
    822       cur_block->terminated_by_return = true;
    823       cur_block->fall_through = exit_block_->id;
    824       exit_block_->predecessors.push_back(cur_block->id);
    825       /*
    826        * Terminate the current block if there are instructions
    827        * afterwards.
    828        */
    829       if (code_ptr < code_end) {
    830         /*
    831          * Create a fallthrough block for real instructions
    832          * (incl. NOP).
    833          */
    834          FindBlock(current_offset_ + width, /* create */ true,
    835                    /* immed_pred_block_p */ nullptr, &dex_pc_to_block_map);
    836       }
    837     } else if (flags & Instruction::kThrow) {
    838       cur_block = ProcessCanThrow(cur_block, insn, current_offset_, width, flags, try_block_addr_,
    839                                   code_ptr, code_end, &dex_pc_to_block_map);
    840     } else if (flags & Instruction::kSwitch) {
    841       cur_block = ProcessCanSwitch(cur_block, insn, current_offset_, width,
    842                                    flags, &dex_pc_to_block_map);
    843     }
    844     if (verify_flags & Instruction::kVerifyVarArgRange ||
    845         verify_flags & Instruction::kVerifyVarArgRangeNonZero) {
    846       /*
    847        * The Quick backend's runtime model includes a gap between a method's
    848        * argument ("in") vregs and the rest of its vregs.  Handling a range instruction
    849        * which spans the gap is somewhat complicated, and should not happen
    850        * in normal usage of dx.  Punt to the interpreter.
    851        */
    852       int first_reg_in_range = insn->dalvikInsn.vC;
    853       int last_reg_in_range = first_reg_in_range + insn->dalvikInsn.vA - 1;
    854       if (IsInVReg(first_reg_in_range) != IsInVReg(last_reg_in_range)) {
    855         punt_to_interpreter_ = true;
    856       }
    857     }
    858     current_offset_ += width;
    859     BasicBlock* next_block = FindBlock(current_offset_, /* create */ false,
    860                                        /* immed_pred_block_p */ nullptr,
    861                                        &dex_pc_to_block_map);
    862     if (next_block) {
    863       /*
    864        * The next instruction could be the target of a previously parsed
    865        * forward branch so a block is already created. If the current
    866        * instruction is not an unconditional branch, connect them through
    867        * the fall-through link.
    868        */
    869       DCHECK(cur_block->fall_through == NullBasicBlockId ||
    870              GetBasicBlock(cur_block->fall_through) == next_block ||
    871              GetBasicBlock(cur_block->fall_through) == exit_block_);
    872 
    873       if ((cur_block->fall_through == NullBasicBlockId) && (flags & Instruction::kContinue)) {
    874         cur_block->fall_through = next_block->id;
    875         next_block->predecessors.push_back(cur_block->id);
    876       }
    877       cur_block = next_block;
    878     }
    879   }
    880   merged_df_flags_ = merged_df_flags;
    881 
    882   if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
    883     DumpCFG("/sdcard/1_post_parse_cfg/", true);
    884   }
    885 
    886   if (cu_->verbose) {
    887     DumpMIRGraph();
    888   }
    889 
    890   // Check if there's been a fall-through out of the method code.
    891   BasicBlockId out_bb_id = dex_pc_to_block_map[current_code_item_->insns_size_in_code_units_];
    892   if (UNLIKELY(out_bb_id != NullBasicBlockId)) {
    893     // Eagerly calculate DFS order to determine if the block is dead.
    894     DCHECK(!DfsOrdersUpToDate());
    895     ComputeDFSOrders();
    896     BasicBlock* out_bb = GetBasicBlock(out_bb_id);
    897     DCHECK(out_bb != nullptr);
    898     if (out_bb->block_type != kDead) {
    899       LOG(WARNING) << "Live fall-through out of method in " << PrettyMethod(method_idx, dex_file);
    900       SetPuntToInterpreter(true);
    901     }
    902   }
    903 }
    904 
    905 void MIRGraph::ShowOpcodeStats() {
    906   DCHECK(opcode_count_ != nullptr);
    907   LOG(INFO) << "Opcode Count";
    908   for (int i = 0; i < kNumPackedOpcodes; i++) {
    909     if (opcode_count_[i] != 0) {
    910       LOG(INFO) << "-C- " << Instruction::Name(static_cast<Instruction::Code>(i))
    911                 << " " << opcode_count_[i];
    912     }
    913   }
    914 }
    915 
    916 uint64_t MIRGraph::GetDataFlowAttributes(Instruction::Code opcode) {
    917   DCHECK_LT((size_t) opcode, (sizeof(oat_data_flow_attributes_) / sizeof(oat_data_flow_attributes_[0])));
    918   return oat_data_flow_attributes_[opcode];
    919 }
    920 
    921 uint64_t MIRGraph::GetDataFlowAttributes(MIR* mir) {
    922   DCHECK(mir != nullptr);
    923   Instruction::Code opcode = mir->dalvikInsn.opcode;
    924   return GetDataFlowAttributes(opcode);
    925 }
    926 
    927 // The path can easily surpass FS limits because of parameters etc. Use pathconf to get FS
    928 // restrictions here. Note that a successful invocation will return an actual value. If the path
    929 // is too long for some reason, the return will be ENAMETOOLONG. Then cut off part of the name.
    930 //
    931 // It's possible the path is not valid, or some other errors appear. In that case return false.
    932 static bool CreateDumpFile(std::string& fname, const char* dir_prefix, NarrowDexOffset start_offset,
    933                            const char *suffix, int nr, std::string* output) {
    934   std::string dir = StringPrintf("./%s", dir_prefix);
    935   int64_t max_name_length = pathconf(dir.c_str(), _PC_NAME_MAX);
    936   if (max_name_length <= 0) {
    937     PLOG(ERROR) << "Could not get file name restrictions for " << dir;
    938     return false;
    939   }
    940 
    941   std::string name = StringPrintf("%s%x%s_%d.dot", fname.c_str(), start_offset,
    942                                   suffix == nullptr ? "" : suffix, nr);
    943   std::string fpath;
    944   if (static_cast<int64_t>(name.size()) > max_name_length) {
    945     std::string suffix_str = StringPrintf("_%d.dot", nr);
    946     name = name.substr(0, static_cast<size_t>(max_name_length) - suffix_str.size()) + suffix_str;
    947   }
    948   // Sanity check.
    949   DCHECK_LE(name.size(), static_cast<size_t>(max_name_length));
    950 
    951   *output = StringPrintf("%s%s", dir_prefix, name.c_str());
    952   return true;
    953 }
    954 
    955 // TODO: use a configurable base prefix, and adjust callers to supply pass name.
    956 /* Dump the CFG into a DOT graph */
    957 void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suffix) {
    958   FILE* file;
    959   static AtomicInteger cnt(0);
    960 
    961   // Increment counter to get a unique file number.
    962   cnt++;
    963   int nr = cnt.LoadRelaxed();
    964 
    965   std::string fname(PrettyMethod(cu_->method_idx, *cu_->dex_file));
    966   ReplaceSpecialChars(fname);
    967   std::string fpath;
    968   if (!CreateDumpFile(fname, dir_prefix, GetBasicBlock(GetEntryBlock()->fall_through)->start_offset,
    969                       suffix, nr, &fpath)) {
    970     LOG(ERROR) << "Could not create dump file name for " << fname;
    971     return;
    972   }
    973   file = fopen(fpath.c_str(), "w");
    974   if (file == nullptr) {
    975     PLOG(ERROR) << "Could not open " << fpath << " for DumpCFG.";
    976     return;
    977   }
    978   fprintf(file, "digraph G {\n");
    979 
    980   fprintf(file, "  rankdir=TB\n");
    981 
    982   int num_blocks = all_blocks ? GetNumBlocks() : num_reachable_blocks_;
    983   int idx;
    984 
    985   for (idx = 0; idx < num_blocks; idx++) {
    986     int block_idx = all_blocks ? idx : dfs_order_[idx];
    987     BasicBlock* bb = GetBasicBlock(block_idx);
    988     if (bb == nullptr) continue;
    989     if (bb->block_type == kDead) continue;
    990     if (bb->hidden) continue;
    991     if (bb->block_type == kEntryBlock) {
    992       fprintf(file, "  entry_%d [shape=Mdiamond];\n", bb->id);
    993     } else if (bb->block_type == kExitBlock) {
    994       fprintf(file, "  exit_%d [shape=Mdiamond];\n", bb->id);
    995     } else if (bb->block_type == kDalvikByteCode) {
    996       fprintf(file, "  block%04x_%d [shape=record,label = \"{ \\\n",
    997               bb->start_offset, bb->id);
    998       const MIR* mir;
    999         fprintf(file, "    {block id %d\\l}%s\\\n", bb->id,
   1000                 bb->first_mir_insn ? " | " : " ");
   1001         for (mir = bb->first_mir_insn; mir; mir = mir->next) {
   1002             int opcode = mir->dalvikInsn.opcode;
   1003             fprintf(file, "    {%04x %s %s %s %s %s %s %s %s %s\\l}%s\\\n", mir->offset,
   1004                       mir->ssa_rep ? GetDalvikDisassembly(mir) :
   1005                       !MIR::DecodedInstruction::IsPseudoMirOp(opcode) ?
   1006                         Instruction::Name(mir->dalvikInsn.opcode) :
   1007                         extended_mir_op_names_[opcode - kMirOpFirst],
   1008                       (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0 ? " no_rangecheck" : " ",
   1009                       (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0 ? " no_nullcheck" : " ",
   1010                       (mir->optimization_flags & MIR_IGNORE_SUSPEND_CHECK) != 0 ? " no_suspendcheck" : " ",
   1011                       (mir->optimization_flags & MIR_STORE_NON_TEMPORAL) != 0 ? " non_temporal" : " ",
   1012                       (mir->optimization_flags & MIR_CALLEE) != 0 ? " inlined" : " ",
   1013                       (mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) != 0 ? " cl_inited" : " ",
   1014                       (mir->optimization_flags & MIR_CLASS_IS_IN_DEX_CACHE) != 0 ? " cl_in_cache" : " ",
   1015                       (mir->optimization_flags & MIR_IGNORE_DIV_ZERO_CHECK) != 0 ? " no_div_check" : " ",
   1016                       mir->next ? " | " : " ");
   1017         }
   1018         fprintf(file, "  }\"];\n\n");
   1019     } else if (bb->block_type == kExceptionHandling) {
   1020       char block_name[BLOCK_NAME_LEN];
   1021 
   1022       GetBlockName(bb, block_name);
   1023       fprintf(file, "  %s [shape=invhouse];\n", block_name);
   1024     }
   1025 
   1026     char block_name1[BLOCK_NAME_LEN], block_name2[BLOCK_NAME_LEN];
   1027 
   1028     if (bb->taken != NullBasicBlockId) {
   1029       GetBlockName(bb, block_name1);
   1030       GetBlockName(GetBasicBlock(bb->taken), block_name2);
   1031       fprintf(file, "  %s:s -> %s:n [style=dotted]\n",
   1032               block_name1, block_name2);
   1033     }
   1034     if (bb->fall_through != NullBasicBlockId) {
   1035       GetBlockName(bb, block_name1);
   1036       GetBlockName(GetBasicBlock(bb->fall_through), block_name2);
   1037       fprintf(file, "  %s:s -> %s:n\n", block_name1, block_name2);
   1038     }
   1039 
   1040     if (bb->successor_block_list_type != kNotUsed) {
   1041       fprintf(file, "  succ%04x_%d [shape=%s,label = \"{ \\\n",
   1042               bb->start_offset, bb->id,
   1043               (bb->successor_block_list_type == kCatch) ?  "Mrecord" : "record");
   1044 
   1045       int last_succ_id = static_cast<int>(bb->successor_blocks.size() - 1u);
   1046       int succ_id = 0;
   1047       for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) {
   1048         BasicBlock* dest_block = GetBasicBlock(successor_block_info->block);
   1049         fprintf(file, "    {<f%d> %04x: %04x\\l}%s\\\n",
   1050                 succ_id,
   1051                 successor_block_info->key,
   1052                 dest_block->start_offset,
   1053                 (succ_id != last_succ_id) ? " | " : " ");
   1054         ++succ_id;
   1055       }
   1056       fprintf(file, "  }\"];\n\n");
   1057 
   1058       GetBlockName(bb, block_name1);
   1059       fprintf(file, "  %s:s -> succ%04x_%d:n [style=dashed]\n",
   1060               block_name1, bb->start_offset, bb->id);
   1061 
   1062       // Link the successor pseudo-block with all of its potential targets.
   1063       succ_id = 0;
   1064       for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) {
   1065         BasicBlock* dest_block = GetBasicBlock(successor_block_info->block);
   1066 
   1067         GetBlockName(dest_block, block_name2);
   1068         fprintf(file, "  succ%04x_%d:f%d:e -> %s:n\n", bb->start_offset,
   1069                 bb->id, succ_id++, block_name2);
   1070       }
   1071     }
   1072     fprintf(file, "\n");
   1073 
   1074     if (cu_->verbose) {
   1075       /* Display the dominator tree */
   1076       GetBlockName(bb, block_name1);
   1077       fprintf(file, "  cfg%s [label=\"%s\", shape=none];\n",
   1078               block_name1, block_name1);
   1079       if (bb->i_dom) {
   1080         GetBlockName(GetBasicBlock(bb->i_dom), block_name2);
   1081         fprintf(file, "  cfg%s:s -> cfg%s:n\n\n", block_name2, block_name1);
   1082       }
   1083     }
   1084   }
   1085   fprintf(file, "}\n");
   1086   fclose(file);
   1087 }
   1088 
   1089 /* Insert an MIR instruction to the end of a basic block. */
   1090 void BasicBlock::AppendMIR(MIR* mir) {
   1091   // Insert it after the last MIR.
   1092   InsertMIRListAfter(last_mir_insn, mir, mir);
   1093 }
   1094 
   1095 void BasicBlock::AppendMIRList(MIR* first_list_mir, MIR* last_list_mir) {
   1096   // Insert it after the last MIR.
   1097   InsertMIRListAfter(last_mir_insn, first_list_mir, last_list_mir);
   1098 }
   1099 
   1100 void BasicBlock::AppendMIRList(const std::vector<MIR*>& insns) {
   1101   for (std::vector<MIR*>::const_iterator it = insns.begin(); it != insns.end(); it++) {
   1102     MIR* new_mir = *it;
   1103 
   1104     // Add a copy of each MIR.
   1105     InsertMIRListAfter(last_mir_insn, new_mir, new_mir);
   1106   }
   1107 }
   1108 
   1109 /* Insert a MIR instruction after the specified MIR. */
   1110 void BasicBlock::InsertMIRAfter(MIR* current_mir, MIR* new_mir) {
   1111   InsertMIRListAfter(current_mir, new_mir, new_mir);
   1112 }
   1113 
   1114 void BasicBlock::InsertMIRListAfter(MIR* insert_after, MIR* first_list_mir, MIR* last_list_mir) {
   1115   // If no MIR, we are done.
   1116   if (first_list_mir == nullptr || last_list_mir == nullptr) {
   1117     return;
   1118   }
   1119 
   1120   // If insert_after is null, assume BB is empty.
   1121   if (insert_after == nullptr) {
   1122     first_mir_insn = first_list_mir;
   1123     last_mir_insn = last_list_mir;
   1124     last_list_mir->next = nullptr;
   1125   } else {
   1126     MIR* after_list = insert_after->next;
   1127     insert_after->next = first_list_mir;
   1128     last_list_mir->next = after_list;
   1129     if (after_list == nullptr) {
   1130       last_mir_insn = last_list_mir;
   1131     }
   1132   }
   1133 
   1134   // Set this BB to be the basic block of the MIRs.
   1135   MIR* last = last_list_mir->next;
   1136   for (MIR* mir = first_list_mir; mir != last; mir = mir->next) {
   1137     mir->bb = id;
   1138   }
   1139 }
   1140 
   1141 /* Insert an MIR instruction to the head of a basic block. */
   1142 void BasicBlock::PrependMIR(MIR* mir) {
   1143   InsertMIRListBefore(first_mir_insn, mir, mir);
   1144 }
   1145 
   1146 void BasicBlock::PrependMIRList(MIR* first_list_mir, MIR* last_list_mir) {
   1147   // Insert it before the first MIR.
   1148   InsertMIRListBefore(first_mir_insn, first_list_mir, last_list_mir);
   1149 }
   1150 
   1151 void BasicBlock::PrependMIRList(const std::vector<MIR*>& to_add) {
   1152   for (std::vector<MIR*>::const_iterator it = to_add.begin(); it != to_add.end(); it++) {
   1153     MIR* mir = *it;
   1154 
   1155     InsertMIRListBefore(first_mir_insn, mir, mir);
   1156   }
   1157 }
   1158 
   1159 /* Insert a MIR instruction before the specified MIR. */
   1160 void BasicBlock::InsertMIRBefore(MIR* current_mir, MIR* new_mir) {
   1161   // Insert as a single element list.
   1162   return InsertMIRListBefore(current_mir, new_mir, new_mir);
   1163 }
   1164 
   1165 MIR* BasicBlock::FindPreviousMIR(MIR* mir) {
   1166   MIR* current = first_mir_insn;
   1167 
   1168   while (current != nullptr) {
   1169     MIR* next = current->next;
   1170 
   1171     if (next == mir) {
   1172       return current;
   1173     }
   1174 
   1175     current = next;
   1176   }
   1177 
   1178   return nullptr;
   1179 }
   1180 
   1181 void BasicBlock::InsertMIRListBefore(MIR* insert_before, MIR* first_list_mir, MIR* last_list_mir) {
   1182   // If no MIR, we are done.
   1183   if (first_list_mir == nullptr || last_list_mir == nullptr) {
   1184     return;
   1185   }
   1186 
   1187   // If insert_before is null, assume BB is empty.
   1188   if (insert_before == nullptr) {
   1189     first_mir_insn = first_list_mir;
   1190     last_mir_insn = last_list_mir;
   1191     last_list_mir->next = nullptr;
   1192   } else {
   1193     if (first_mir_insn == insert_before) {
   1194       last_list_mir->next = first_mir_insn;
   1195       first_mir_insn = first_list_mir;
   1196     } else {
   1197       // Find the preceding MIR.
   1198       MIR* before_list = FindPreviousMIR(insert_before);
   1199       DCHECK(before_list != nullptr);
   1200       before_list->next = first_list_mir;
   1201       last_list_mir->next = insert_before;
   1202     }
   1203   }
   1204 
   1205   // Set this BB to be the basic block of the MIRs.
   1206   for (MIR* mir = first_list_mir; mir != last_list_mir->next; mir = mir->next) {
   1207     mir->bb = id;
   1208   }
   1209 }
   1210 
   1211 bool BasicBlock::RemoveMIR(MIR* mir) {
   1212   // Remove as a single element list.
   1213   return RemoveMIRList(mir, mir);
   1214 }
   1215 
   1216 bool BasicBlock::RemoveMIRList(MIR* first_list_mir, MIR* last_list_mir) {
   1217   if (first_list_mir == nullptr) {
   1218     return false;
   1219   }
   1220 
   1221   // Try to find the MIR.
   1222   MIR* before_list = nullptr;
   1223   MIR* after_list = nullptr;
   1224 
   1225   // If we are removing from the beginning of the MIR list.
   1226   if (first_mir_insn == first_list_mir) {
   1227     before_list = nullptr;
   1228   } else {
   1229     before_list = FindPreviousMIR(first_list_mir);
   1230     if (before_list == nullptr) {
   1231       // We did not find the mir.
   1232       return false;
   1233     }
   1234   }
   1235 
   1236   // Remove the BB information and also find the after_list.
   1237   for (MIR* mir = first_list_mir; mir != last_list_mir->next; mir = mir->next) {
   1238     mir->bb = NullBasicBlockId;
   1239   }
   1240 
   1241   after_list = last_list_mir->next;
   1242 
   1243   // If there is nothing before the list, after_list is the first_mir.
   1244   if (before_list == nullptr) {
   1245     first_mir_insn = after_list;
   1246   } else {
   1247     before_list->next = after_list;
   1248   }
   1249 
   1250   // If there is nothing after the list, before_list is last_mir.
   1251   if (after_list == nullptr) {
   1252     last_mir_insn = before_list;
   1253   }
   1254 
   1255   return true;
   1256 }
   1257 
   1258 MIR* BasicBlock::GetFirstNonPhiInsn() {
   1259   MIR* mir = first_mir_insn;
   1260   while (mir != nullptr && static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi) {
   1261     mir = mir->next;
   1262   }
   1263   return mir;
   1264 }
   1265 
   1266 MIR* BasicBlock::GetNextUnconditionalMir(MIRGraph* mir_graph, MIR* current) {
   1267   MIR* next_mir = nullptr;
   1268 
   1269   if (current != nullptr) {
   1270     next_mir = current->next;
   1271   }
   1272 
   1273   if (next_mir == nullptr) {
   1274     // Only look for next MIR that follows unconditionally.
   1275     if ((taken == NullBasicBlockId) && (fall_through != NullBasicBlockId)) {
   1276       next_mir = mir_graph->GetBasicBlock(fall_through)->first_mir_insn;
   1277     }
   1278   }
   1279 
   1280   return next_mir;
   1281 }
   1282 
   1283 static void FillTypeSizeString(uint32_t type_size, std::string* decoded_mir) {
   1284   DCHECK(decoded_mir != nullptr);
   1285   OpSize type = static_cast<OpSize>(type_size >> 16);
   1286   uint16_t vect_size = (type_size & 0xFFFF);
   1287 
   1288   // Now print the type and vector size.
   1289   std::stringstream ss;
   1290   ss << " (type:";
   1291   ss << type;
   1292   ss << " vectsize:";
   1293   ss << vect_size;
   1294   ss << ")";
   1295 
   1296   decoded_mir->append(ss.str());
   1297 }
   1298 
   1299 void MIRGraph::DisassembleExtendedInstr(const MIR* mir, std::string* decoded_mir) {
   1300   DCHECK(decoded_mir != nullptr);
   1301   int opcode = mir->dalvikInsn.opcode;
   1302   SSARepresentation* ssa_rep = mir->ssa_rep;
   1303   int defs = (ssa_rep != nullptr) ? ssa_rep->num_defs : 0;
   1304   int uses = (ssa_rep != nullptr) ? ssa_rep->num_uses : 0;
   1305 
   1306   if (opcode < kMirOpFirst) {
   1307     return;  // It is not an extended instruction.
   1308   }
   1309 
   1310   decoded_mir->append(extended_mir_op_names_[opcode - kMirOpFirst]);
   1311 
   1312   switch (opcode) {
   1313     case kMirOpPhi: {
   1314       if (defs > 0 && uses > 0) {
   1315         BasicBlockId* incoming = mir->meta.phi_incoming;
   1316         decoded_mir->append(StringPrintf(" %s = (%s",
   1317                            GetSSANameWithConst(ssa_rep->defs[0], true).c_str(),
   1318                            GetSSANameWithConst(ssa_rep->uses[0], true).c_str()));
   1319         decoded_mir->append(StringPrintf(":%d", incoming[0]));
   1320         for (int i = 1; i < uses; i++) {
   1321           decoded_mir->append(StringPrintf(", %s:%d", GetSSANameWithConst(ssa_rep->uses[i], true).c_str(), incoming[i]));
   1322         }
   1323         decoded_mir->append(")");
   1324       }
   1325       break;
   1326     }
   1327     case kMirOpCopy:
   1328       if (ssa_rep != nullptr) {
   1329         decoded_mir->append(" ");
   1330         decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[0], false));
   1331         if (defs > 1) {
   1332           decoded_mir->append(", ");
   1333           decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[1], false));
   1334         }
   1335         decoded_mir->append(" = ");
   1336         decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[0], false));
   1337         if (uses > 1) {
   1338           decoded_mir->append(", ");
   1339           decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[1], false));
   1340         }
   1341       } else {
   1342         decoded_mir->append(StringPrintf(" v%d = v%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB));
   1343       }
   1344       break;
   1345     case kMirOpFusedCmplFloat:
   1346     case kMirOpFusedCmpgFloat:
   1347     case kMirOpFusedCmplDouble:
   1348     case kMirOpFusedCmpgDouble:
   1349     case kMirOpFusedCmpLong:
   1350       if (ssa_rep != nullptr) {
   1351         decoded_mir->append(" ");
   1352         decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[0], false));
   1353         for (int i = 1; i < uses; i++) {
   1354           decoded_mir->append(", ");
   1355           decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[i], false));
   1356         }
   1357       } else {
   1358         decoded_mir->append(StringPrintf(" v%d, v%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB));
   1359       }
   1360       break;
   1361     case kMirOpMoveVector:
   1362       decoded_mir->append(StringPrintf(" vect%d = vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB));
   1363       FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
   1364       break;
   1365     case kMirOpPackedAddition:
   1366       decoded_mir->append(StringPrintf(" vect%d = vect%d + vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
   1367       FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
   1368       break;
   1369     case kMirOpPackedMultiply:
   1370       decoded_mir->append(StringPrintf(" vect%d = vect%d * vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
   1371       FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
   1372       break;
   1373     case kMirOpPackedSubtract:
   1374       decoded_mir->append(StringPrintf(" vect%d = vect%d - vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
   1375       FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
   1376       break;
   1377     case kMirOpPackedAnd:
   1378       decoded_mir->append(StringPrintf(" vect%d = vect%d & vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
   1379       FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
   1380       break;
   1381     case kMirOpPackedOr:
   1382       decoded_mir->append(StringPrintf(" vect%d = vect%d \\| vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
   1383       FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
   1384       break;
   1385     case kMirOpPackedXor:
   1386       decoded_mir->append(StringPrintf(" vect%d = vect%d ^ vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
   1387       FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
   1388       break;
   1389     case kMirOpPackedShiftLeft:
   1390       decoded_mir->append(StringPrintf(" vect%d = vect%d \\<\\< %d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
   1391       FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
   1392       break;
   1393     case kMirOpPackedUnsignedShiftRight:
   1394       decoded_mir->append(StringPrintf(" vect%d = vect%d \\>\\>\\> %d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
   1395       FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
   1396       break;
   1397     case kMirOpPackedSignedShiftRight:
   1398       decoded_mir->append(StringPrintf(" vect%d = vect%d \\>\\> %d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
   1399       FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
   1400       break;
   1401     case kMirOpConstVector:
   1402       decoded_mir->append(StringPrintf(" vect%d = %x, %x, %x, %x", mir->dalvikInsn.vA, mir->dalvikInsn.arg[0],
   1403                                       mir->dalvikInsn.arg[1], mir->dalvikInsn.arg[2], mir->dalvikInsn.arg[3]));
   1404       break;
   1405     case kMirOpPackedSet:
   1406       if (ssa_rep != nullptr) {
   1407         decoded_mir->append(StringPrintf(" vect%d = %s", mir->dalvikInsn.vA,
   1408               GetSSANameWithConst(ssa_rep->uses[0], false).c_str()));
   1409         if (uses > 1) {
   1410           decoded_mir->append(", ");
   1411           decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[1], false));
   1412         }
   1413       } else {
   1414         decoded_mir->append(StringPrintf(" vect%d = v%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB));
   1415       }
   1416       FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
   1417       break;
   1418     case kMirOpPackedAddReduce:
   1419       if (ssa_rep != nullptr) {
   1420         decoded_mir->append(" ");
   1421         decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[0], false));
   1422         if (defs > 1) {
   1423           decoded_mir->append(", ");
   1424           decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[1], false));
   1425         }
   1426         decoded_mir->append(StringPrintf(" = vect%d + %s", mir->dalvikInsn.vB,
   1427             GetSSANameWithConst(ssa_rep->uses[0], false).c_str()));
   1428         if (uses > 1) {
   1429           decoded_mir->append(", ");
   1430           decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[1], false));
   1431         }
   1432       } else {
   1433         decoded_mir->append(StringPrintf("v%d = vect%d + v%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB, mir->dalvikInsn.vA));
   1434       }
   1435       FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
   1436       break;
   1437     case kMirOpPackedReduce:
   1438       if (ssa_rep != nullptr) {
   1439         decoded_mir->append(" ");
   1440         decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[0], false));
   1441         if (defs > 1) {
   1442           decoded_mir->append(", ");
   1443           decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[1], false));
   1444         }
   1445         decoded_mir->append(StringPrintf(" = vect%d (extr_idx:%d)", mir->dalvikInsn.vB, mir->dalvikInsn.arg[0]));
   1446       } else {
   1447         decoded_mir->append(StringPrintf(" v%d = vect%d (extr_idx:%d)", mir->dalvikInsn.vA,
   1448                                          mir->dalvikInsn.vB, mir->dalvikInsn.arg[0]));
   1449       }
   1450       FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
   1451       break;
   1452     case kMirOpReserveVectorRegisters:
   1453     case kMirOpReturnVectorRegisters:
   1454       decoded_mir->append(StringPrintf(" vect%d - vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB));
   1455       break;
   1456     case kMirOpMemBarrier: {
   1457       decoded_mir->append(" type:");
   1458       std::stringstream ss;
   1459       ss << static_cast<MemBarrierKind>(mir->dalvikInsn.vA);
   1460       decoded_mir->append(ss.str());
   1461       break;
   1462     }
   1463     case kMirOpPackedArrayGet:
   1464     case kMirOpPackedArrayPut:
   1465       decoded_mir->append(StringPrintf(" vect%d", mir->dalvikInsn.vA));
   1466       if (ssa_rep != nullptr) {
   1467         decoded_mir->append(StringPrintf(", %s[%s]",
   1468                                         GetSSANameWithConst(ssa_rep->uses[0], false).c_str(),
   1469                                         GetSSANameWithConst(ssa_rep->uses[1], false).c_str()));
   1470       } else {
   1471         decoded_mir->append(StringPrintf(", v%d[v%d]", mir->dalvikInsn.vB, mir->dalvikInsn.vC));
   1472       }
   1473       FillTypeSizeString(mir->dalvikInsn.arg[0], decoded_mir);
   1474       break;
   1475     case kMirOpMaddInt:
   1476     case kMirOpMsubInt:
   1477     case kMirOpMaddLong:
   1478     case kMirOpMsubLong:
   1479       if (ssa_rep != nullptr) {
   1480         decoded_mir->append(" ");
   1481         decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[0], false));
   1482         if (defs > 1) {
   1483           decoded_mir->append(", ");
   1484           decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[1], false));
   1485         }
   1486         for (int i = 0; i < uses; i++) {
   1487           decoded_mir->append(", ");
   1488           decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[i], false));
   1489         }
   1490       } else {
   1491         decoded_mir->append(StringPrintf(" v%d, v%d, v%d, v%d",
   1492                                          mir->dalvikInsn.vA, mir->dalvikInsn.vB,
   1493                                          mir->dalvikInsn.vC, mir->dalvikInsn.arg[0]));
   1494       }
   1495       break;
   1496     default:
   1497       break;
   1498   }
   1499 }
   1500 
   1501 char* MIRGraph::GetDalvikDisassembly(const MIR* mir) {
   1502   MIR::DecodedInstruction insn = mir->dalvikInsn;
   1503   std::string str;
   1504   int flags = 0;
   1505   int opcode = insn.opcode;
   1506   char* ret;
   1507   bool nop = false;
   1508   SSARepresentation* ssa_rep = mir->ssa_rep;
   1509   Instruction::Format dalvik_format = Instruction::k10x;  // Default to no-operand format.
   1510 
   1511   // Handle special cases that recover the original dalvik instruction.
   1512   if (opcode == kMirOpCheck) {
   1513     str.append(extended_mir_op_names_[opcode - kMirOpFirst]);
   1514     str.append(": ");
   1515     // Recover the original Dex instruction.
   1516     insn = mir->meta.throw_insn->dalvikInsn;
   1517     ssa_rep = mir->meta.throw_insn->ssa_rep;
   1518     opcode = insn.opcode;
   1519   } else if (opcode == kMirOpNop) {
   1520     str.append("[");
   1521     if (mir->offset < current_code_item_->insns_size_in_code_units_) {
   1522       // Recover original opcode.
   1523       insn.opcode = Instruction::At(current_code_item_->insns_ + mir->offset)->Opcode();
   1524       opcode = insn.opcode;
   1525     }
   1526     nop = true;
   1527   }
   1528   int defs = (ssa_rep != nullptr) ? ssa_rep->num_defs : 0;
   1529   int uses = (ssa_rep != nullptr) ? ssa_rep->num_uses : 0;
   1530 
   1531   if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
   1532     // Note that this does not check the MIR's opcode in all cases. In cases where it
   1533     // recovered dalvik instruction, it uses opcode of that instead of the extended one.
   1534     DisassembleExtendedInstr(mir, &str);
   1535   } else {
   1536     dalvik_format = Instruction::FormatOf(insn.opcode);
   1537     flags = insn.FlagsOf();
   1538     str.append(Instruction::Name(insn.opcode));
   1539 
   1540     // For invokes-style formats, treat wide regs as a pair of singles.
   1541     bool show_singles = ((dalvik_format == Instruction::k35c) ||
   1542                          (dalvik_format == Instruction::k3rc));
   1543     if (defs != 0) {
   1544       str.append(" ");
   1545       str.append(GetSSANameWithConst(ssa_rep->defs[0], false));
   1546       if (defs > 1) {
   1547         str.append(", ");
   1548         str.append(GetSSANameWithConst(ssa_rep->defs[1], false));
   1549       }
   1550       if (uses != 0) {
   1551         str.append(", ");
   1552       }
   1553     }
   1554     for (int i = 0; i < uses; i++) {
   1555       str.append(" ");
   1556       str.append(GetSSANameWithConst(ssa_rep->uses[i], show_singles));
   1557       if (!show_singles && (reg_location_ != nullptr) && reg_location_[i].wide) {
   1558         // For the listing, skip the high sreg.
   1559         i++;
   1560       }
   1561       if (i != (uses - 1)) {
   1562         str.append(",");
   1563       }
   1564     }
   1565 
   1566     switch (dalvik_format) {
   1567       case Instruction::k11n:  // Add one immediate from vB.
   1568       case Instruction::k21s:
   1569       case Instruction::k31i:
   1570       case Instruction::k21h:
   1571         str.append(StringPrintf(", #0x%x", insn.vB));
   1572         break;
   1573       case Instruction::k51l:  // Add one wide immediate.
   1574         str.append(StringPrintf(", #%" PRId64, insn.vB_wide));
   1575         break;
   1576       case Instruction::k21c:  // One register, one string/type/method index.
   1577       case Instruction::k31c:
   1578         str.append(StringPrintf(", index #0x%x", insn.vB));
   1579         break;
   1580       case Instruction::k22c:  // Two registers, one string/type/method index.
   1581         str.append(StringPrintf(", index #0x%x", insn.vC));
   1582         break;
   1583       case Instruction::k22s:  // Add one immediate from vC.
   1584       case Instruction::k22b:
   1585         str.append(StringPrintf(", #0x%x", insn.vC));
   1586         break;
   1587       default:
   1588         // Nothing left to print.
   1589         break;
   1590     }
   1591 
   1592     if ((flags & Instruction::kBranch) != 0) {
   1593       // For branches, decode the instructions to print out the branch targets.
   1594       int offset = 0;
   1595       switch (dalvik_format) {
   1596         case Instruction::k21t:
   1597           offset = insn.vB;
   1598           break;
   1599         case Instruction::k22t:
   1600           offset = insn.vC;
   1601           break;
   1602         case Instruction::k10t:
   1603         case Instruction::k20t:
   1604         case Instruction::k30t:
   1605           offset = insn.vA;
   1606           break;
   1607         default:
   1608           LOG(FATAL) << "Unexpected branch format " << dalvik_format << " from " << insn.opcode;
   1609           break;
   1610       }
   1611       str.append(StringPrintf(", 0x%x (%c%x)", mir->offset + offset,
   1612                               offset > 0 ? '+' : '-', offset > 0 ? offset : -offset));
   1613     }
   1614 
   1615     if (nop) {
   1616       str.append("]--optimized away");
   1617     }
   1618   }
   1619   int length = str.length() + 1;
   1620   ret = arena_->AllocArray<char>(length, kArenaAllocDFInfo);
   1621   strncpy(ret, str.c_str(), length);
   1622   return ret;
   1623 }
   1624 
   1625 /* Turn method name into a legal Linux file name */
   1626 void MIRGraph::ReplaceSpecialChars(std::string& str) {
   1627   static const struct { const char before; const char after; } match[] = {
   1628     {'/', '-'}, {';', '#'}, {' ', '#'}, {'$', '+'},
   1629     {'(', '@'}, {')', '@'}, {'<', '='}, {'>', '='}
   1630   };
   1631   for (unsigned int i = 0; i < sizeof(match)/sizeof(match[0]); i++) {
   1632     std::replace(str.begin(), str.end(), match[i].before, match[i].after);
   1633   }
   1634 }
   1635 
   1636 std::string MIRGraph::GetSSAName(int ssa_reg) {
   1637   // TODO: This value is needed for debugging. Currently, we compute this and then copy to the
   1638   //       arena. We should be smarter and just place straight into the arena, or compute the
   1639   //       value more lazily.
   1640   int vreg = SRegToVReg(ssa_reg);
   1641   if (vreg >= static_cast<int>(GetFirstTempVR())) {
   1642     return StringPrintf("t%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg));
   1643   } else {
   1644     return StringPrintf("v%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg));
   1645   }
   1646 }
   1647 
   1648 // Similar to GetSSAName, but if ssa name represents an immediate show that as well.
   1649 std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only) {
   1650   if (reg_location_ == nullptr) {
   1651     // Pre-SSA - just use the standard name.
   1652     return GetSSAName(ssa_reg);
   1653   }
   1654   if (IsConst(reg_location_[ssa_reg])) {
   1655     if (!singles_only && reg_location_[ssa_reg].wide &&
   1656         !reg_location_[ssa_reg].high_word) {
   1657       return StringPrintf("v%d_%d#0x%" PRIx64, SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg),
   1658                           ConstantValueWide(reg_location_[ssa_reg]));
   1659     } else {
   1660       return StringPrintf("v%d_%d#0x%x", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg),
   1661                           ConstantValue(reg_location_[ssa_reg]));
   1662     }
   1663   } else {
   1664     int vreg = SRegToVReg(ssa_reg);
   1665     if (vreg >= static_cast<int>(GetFirstTempVR())) {
   1666       return StringPrintf("t%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg));
   1667     } else {
   1668       return StringPrintf("v%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg));
   1669     }
   1670   }
   1671 }
   1672 
   1673 void MIRGraph::GetBlockName(BasicBlock* bb, char* name) {
   1674   switch (bb->block_type) {
   1675     case kEntryBlock:
   1676       snprintf(name, BLOCK_NAME_LEN, "entry_%d", bb->id);
   1677       break;
   1678     case kExitBlock:
   1679       snprintf(name, BLOCK_NAME_LEN, "exit_%d", bb->id);
   1680       break;
   1681     case kDalvikByteCode:
   1682       snprintf(name, BLOCK_NAME_LEN, "block%04x_%d", bb->start_offset, bb->id);
   1683       break;
   1684     case kExceptionHandling:
   1685       snprintf(name, BLOCK_NAME_LEN, "exception%04x_%d", bb->start_offset,
   1686                bb->id);
   1687       break;
   1688     default:
   1689       snprintf(name, BLOCK_NAME_LEN, "_%d", bb->id);
   1690       break;
   1691   }
   1692 }
   1693 
   1694 const char* MIRGraph::GetShortyFromMethodReference(const MethodReference& target_method) {
   1695   const DexFile::MethodId& method_id =
   1696       target_method.dex_file->GetMethodId(target_method.dex_method_index);
   1697   return target_method.dex_file->GetShorty(method_id.proto_idx_);
   1698 }
   1699 
   1700 /* Debug Utility - dump a compilation unit */
   1701 void MIRGraph::DumpMIRGraph() {
   1702   const char* block_type_names[] = {
   1703     "Null Block",
   1704     "Entry Block",
   1705     "Code Block",
   1706     "Exit Block",
   1707     "Exception Handling",
   1708     "Catch Block"
   1709   };
   1710 
   1711   LOG(INFO) << "Compiling " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
   1712   LOG(INFO) << GetInsns(0) << " insns";
   1713   LOG(INFO) << GetNumBlocks() << " blocks in total";
   1714 
   1715   for (BasicBlock* bb : block_list_) {
   1716     LOG(INFO) << StringPrintf("Block %d (%s) (insn %04x - %04x%s)",
   1717         bb->id,
   1718         block_type_names[bb->block_type],
   1719         bb->start_offset,
   1720         bb->last_mir_insn ? bb->last_mir_insn->offset : bb->start_offset,
   1721         bb->last_mir_insn ? "" : " empty");
   1722     if (bb->taken != NullBasicBlockId) {
   1723       LOG(INFO) << "  Taken branch: block " << bb->taken
   1724                 << "(0x" << std::hex << GetBasicBlock(bb->taken)->start_offset << ")";
   1725     }
   1726     if (bb->fall_through != NullBasicBlockId) {
   1727       LOG(INFO) << "  Fallthrough : block " << bb->fall_through
   1728                 << " (0x" << std::hex << GetBasicBlock(bb->fall_through)->start_offset << ")";
   1729     }
   1730   }
   1731 }
   1732 
   1733 /*
   1734  * Build an array of location records for the incoming arguments.
   1735  * Note: one location record per word of arguments, with dummy
   1736  * high-word loc for wide arguments.  Also pull up any following
   1737  * MOVE_RESULT and incorporate it into the invoke.
   1738  */
   1739 CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range) {
   1740   CallInfo* info = static_cast<CallInfo*>(arena_->Alloc(sizeof(CallInfo),
   1741                                                         kArenaAllocMisc));
   1742   MIR* move_result_mir = FindMoveResult(bb, mir);
   1743   if (move_result_mir == nullptr) {
   1744     info->result.location = kLocInvalid;
   1745   } else {
   1746     info->result = GetRawDest(move_result_mir);
   1747     move_result_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
   1748   }
   1749   info->num_arg_words = mir->ssa_rep->num_uses;
   1750   info->args = (info->num_arg_words == 0) ? nullptr :
   1751       arena_->AllocArray<RegLocation>(info->num_arg_words, kArenaAllocMisc);
   1752   for (size_t i = 0; i < info->num_arg_words; i++) {
   1753     info->args[i] = GetRawSrc(mir, i);
   1754   }
   1755   info->opt_flags = mir->optimization_flags;
   1756   info->type = type;
   1757   info->is_range = is_range;
   1758   if (IsInstructionQuickInvoke(mir->dalvikInsn.opcode)) {
   1759     const auto& method_info = GetMethodLoweringInfo(mir);
   1760     info->method_ref = method_info.GetTargetMethod();
   1761   } else {
   1762     info->method_ref = MethodReference(GetCurrentDexCompilationUnit()->GetDexFile(),
   1763                                        mir->dalvikInsn.vB);
   1764   }
   1765   info->index = mir->dalvikInsn.vB;
   1766   info->offset = mir->offset;
   1767   info->mir = mir;
   1768   return info;
   1769 }
   1770 
   1771 // Allocate a new MIR.
   1772 MIR* MIRGraph::NewMIR() {
   1773   MIR* mir = new (arena_) MIR();
   1774   return mir;
   1775 }
   1776 
   1777 // Allocate a new basic block.
   1778 BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id) {
   1779   BasicBlock* bb = new (arena_) BasicBlock(block_id, block_type, arena_);
   1780 
   1781   // TUNING: better estimate of the exit block predecessors?
   1782   bb->predecessors.reserve((block_type == kExitBlock) ? 2048 : 2);
   1783   block_id_map_.Put(block_id, block_id);
   1784   return bb;
   1785 }
   1786 
   1787 void MIRGraph::InitializeConstantPropagation() {
   1788   is_constant_v_ = new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false);
   1789   constant_values_ = arena_->AllocArray<int>(GetNumSSARegs(), kArenaAllocDFInfo);
   1790 }
   1791 
   1792 void MIRGraph::InitializeMethodUses() {
   1793   // The gate starts by initializing the use counts.
   1794   int num_ssa_regs = GetNumSSARegs();
   1795   use_counts_.clear();
   1796   use_counts_.reserve(num_ssa_regs + 32);
   1797   use_counts_.resize(num_ssa_regs, 0u);
   1798   raw_use_counts_.clear();
   1799   raw_use_counts_.reserve(num_ssa_regs + 32);
   1800   raw_use_counts_.resize(num_ssa_regs, 0u);
   1801 }
   1802 
   1803 void MIRGraph::SSATransformationStart() {
   1804   DCHECK(temp_scoped_alloc_.get() == nullptr);
   1805   temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
   1806   temp_.ssa.num_vregs = GetNumOfCodeAndTempVRs();
   1807   temp_.ssa.work_live_vregs = new (temp_scoped_alloc_.get()) ArenaBitVector(
   1808       temp_scoped_alloc_.get(), temp_.ssa.num_vregs, false, kBitMapRegisterV);
   1809 }
   1810 
   1811 void MIRGraph::SSATransformationEnd() {
   1812   // Verify the dataflow information after the pass.
   1813   if (cu_->enable_debug & (1 << kDebugVerifyDataflow)) {
   1814     VerifyDataflow();
   1815   }
   1816 
   1817   temp_.ssa.num_vregs = 0u;
   1818   temp_.ssa.work_live_vregs = nullptr;
   1819   DCHECK(temp_.ssa.def_block_matrix == nullptr);
   1820   temp_.ssa.phi_node_blocks = nullptr;
   1821   DCHECK(temp_scoped_alloc_.get() != nullptr);
   1822   temp_scoped_alloc_.reset();
   1823 
   1824   // Update the maximum number of reachable blocks.
   1825   max_num_reachable_blocks_ = num_reachable_blocks_;
   1826 
   1827   // Mark MIR SSA representations as up to date.
   1828   mir_ssa_rep_up_to_date_ = true;
   1829 }
   1830 
   1831 size_t MIRGraph::GetNumDalvikInsns() const {
   1832   size_t cumulative_size = 0u;
   1833   bool counted_current_item = false;
   1834   const uint8_t size_for_null_code_item = 2u;
   1835 
   1836   for (auto it : m_units_) {
   1837     const DexFile::CodeItem* code_item = it->GetCodeItem();
   1838     // Even if the code item is null, we still count non-zero value so that
   1839     // each m_unit is counted as having impact.
   1840     cumulative_size += (code_item == nullptr ?
   1841         size_for_null_code_item : code_item->insns_size_in_code_units_);
   1842     if (code_item == current_code_item_) {
   1843       counted_current_item = true;
   1844     }
   1845   }
   1846 
   1847   // If the current code item was not counted yet, count it now.
   1848   // This can happen for example in unit tests where some fields like m_units_
   1849   // are not initialized.
   1850   if (counted_current_item == false) {
   1851     cumulative_size += (current_code_item_ == nullptr ?
   1852         size_for_null_code_item : current_code_item_->insns_size_in_code_units_);
   1853   }
   1854 
   1855   return cumulative_size;
   1856 }
   1857 
   1858 static BasicBlock* SelectTopologicalSortOrderFallBack(
   1859     MIRGraph* mir_graph, const ArenaBitVector* current_loop,
   1860     const ScopedArenaVector<size_t>* visited_cnt_values, ScopedArenaAllocator* allocator,
   1861     ScopedArenaVector<BasicBlockId>* tmp_stack) {
   1862   // No true loop head has been found but there may be true loop heads after the mess we need
   1863   // to resolve. To avoid taking one of those, pick the candidate with the highest number of
   1864   // reachable unvisited nodes. That candidate will surely be a part of a loop.
   1865   BasicBlock* fall_back = nullptr;
   1866   size_t fall_back_num_reachable = 0u;
   1867   // Reuse the same bit vector for each candidate to mark reachable unvisited blocks.
   1868   ArenaBitVector candidate_reachable(allocator, mir_graph->GetNumBlocks(), false, kBitMapMisc);
   1869   AllNodesIterator iter(mir_graph);
   1870   for (BasicBlock* candidate = iter.Next(); candidate != nullptr; candidate = iter.Next()) {
   1871     if (candidate->hidden ||                            // Hidden, or
   1872         candidate->visited ||                           // already processed, or
   1873         (*visited_cnt_values)[candidate->id] == 0u ||   // no processed predecessors, or
   1874         (current_loop != nullptr &&                     // outside current loop.
   1875          !current_loop->IsBitSet(candidate->id))) {
   1876       continue;
   1877     }
   1878     DCHECK(tmp_stack->empty());
   1879     tmp_stack->push_back(candidate->id);
   1880     candidate_reachable.ClearAllBits();
   1881     size_t num_reachable = 0u;
   1882     while (!tmp_stack->empty()) {
   1883       BasicBlockId current_id = tmp_stack->back();
   1884       tmp_stack->pop_back();
   1885       BasicBlock* current_bb = mir_graph->GetBasicBlock(current_id);
   1886       DCHECK(current_bb != nullptr);
   1887       ChildBlockIterator child_iter(current_bb, mir_graph);
   1888       BasicBlock* child_bb = child_iter.Next();
   1889       for ( ; child_bb != nullptr; child_bb = child_iter.Next()) {
   1890         DCHECK(!child_bb->hidden);
   1891         if (child_bb->visited ||                            // Already processed, or
   1892             (current_loop != nullptr &&                     // outside current loop.
   1893              !current_loop->IsBitSet(child_bb->id))) {
   1894           continue;
   1895         }
   1896         if (!candidate_reachable.IsBitSet(child_bb->id)) {
   1897           candidate_reachable.SetBit(child_bb->id);
   1898           tmp_stack->push_back(child_bb->id);
   1899           num_reachable += 1u;
   1900         }
   1901       }
   1902     }
   1903     if (fall_back_num_reachable < num_reachable) {
   1904       fall_back_num_reachable = num_reachable;
   1905       fall_back = candidate;
   1906     }
   1907   }
   1908   return fall_back;
   1909 }
   1910 
   1911 // Compute from which unvisited blocks is bb_id reachable through unvisited blocks.
   1912 static void ComputeUnvisitedReachableFrom(MIRGraph* mir_graph, BasicBlockId bb_id,
   1913                                           ArenaBitVector* reachable,
   1914                                           ScopedArenaVector<BasicBlockId>* tmp_stack) {
   1915   // NOTE: Loop heads indicated by the "visited" flag.
   1916   DCHECK(tmp_stack->empty());
   1917   reachable->ClearAllBits();
   1918   tmp_stack->push_back(bb_id);
   1919   while (!tmp_stack->empty()) {
   1920     BasicBlockId current_id = tmp_stack->back();
   1921     tmp_stack->pop_back();
   1922     BasicBlock* current_bb = mir_graph->GetBasicBlock(current_id);
   1923     DCHECK(current_bb != nullptr);
   1924     for (BasicBlockId pred_id : current_bb->predecessors) {
   1925       BasicBlock* pred_bb = mir_graph->GetBasicBlock(pred_id);
   1926       DCHECK(pred_bb != nullptr);
   1927       if (!pred_bb->visited && !reachable->IsBitSet(pred_bb->id)) {
   1928         reachable->SetBit(pred_bb->id);
   1929         tmp_stack->push_back(pred_bb->id);
   1930       }
   1931     }
   1932   }
   1933 }
   1934 
   1935 void MIRGraph::ComputeTopologicalSortOrder() {
   1936   ScopedArenaAllocator allocator(&cu_->arena_stack);
   1937   unsigned int num_blocks = GetNumBlocks();
   1938 
   1939   ScopedArenaQueue<BasicBlock*> q(allocator.Adapter());
   1940   ScopedArenaVector<size_t> visited_cnt_values(num_blocks, 0u, allocator.Adapter());
   1941   ScopedArenaVector<BasicBlockId> loop_head_stack(allocator.Adapter());
   1942   size_t max_nested_loops = 0u;
   1943   ArenaBitVector loop_exit_blocks(&allocator, num_blocks, false, kBitMapMisc);
   1944   loop_exit_blocks.ClearAllBits();
   1945 
   1946   // Count the number of blocks to process and add the entry block(s).
   1947   unsigned int num_blocks_to_process = 0u;
   1948   for (BasicBlock* bb : block_list_) {
   1949     if (bb->hidden == true) {
   1950       continue;
   1951     }
   1952 
   1953     num_blocks_to_process += 1u;
   1954 
   1955     if (bb->predecessors.size() == 0u) {
   1956       // Add entry block to the queue.
   1957       q.push(bb);
   1958     }
   1959   }
   1960 
   1961   // Clear the topological order arrays.
   1962   topological_order_.clear();
   1963   topological_order_.reserve(num_blocks);
   1964   topological_order_loop_ends_.clear();
   1965   topological_order_loop_ends_.resize(num_blocks, 0u);
   1966   topological_order_indexes_.clear();
   1967   topological_order_indexes_.resize(num_blocks, static_cast<uint16_t>(-1));
   1968 
   1969   // Mark all blocks as unvisited.
   1970   ClearAllVisitedFlags();
   1971 
   1972   // For loop heads, keep track from which blocks they are reachable not going through other
   1973   // loop heads. Other loop heads are excluded to detect the heads of nested loops. The children
   1974   // in this set go into the loop body, the other children are jumping over the loop.
   1975   ScopedArenaVector<ArenaBitVector*> loop_head_reachable_from(allocator.Adapter());
   1976   loop_head_reachable_from.resize(num_blocks, nullptr);
   1977   // Reuse the same temp stack whenever calculating a loop_head_reachable_from[loop_head_id].
   1978   ScopedArenaVector<BasicBlockId> tmp_stack(allocator.Adapter());
   1979 
   1980   while (num_blocks_to_process != 0u) {
   1981     BasicBlock* bb = nullptr;
   1982     if (!q.empty()) {
   1983       num_blocks_to_process -= 1u;
   1984       // Get top.
   1985       bb = q.front();
   1986       q.pop();
   1987       if (bb->visited) {
   1988         // Loop head: it was already processed, mark end and copy exit blocks to the queue.
   1989         DCHECK(q.empty()) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
   1990         uint16_t idx = static_cast<uint16_t>(topological_order_.size());
   1991         topological_order_loop_ends_[topological_order_indexes_[bb->id]] = idx;
   1992         DCHECK_EQ(loop_head_stack.back(), bb->id);
   1993         loop_head_stack.pop_back();
   1994         ArenaBitVector* reachable =
   1995             loop_head_stack.empty() ? nullptr : loop_head_reachable_from[loop_head_stack.back()];
   1996         for (BasicBlockId candidate_id : loop_exit_blocks.Indexes()) {
   1997           if (reachable == nullptr || reachable->IsBitSet(candidate_id)) {
   1998             q.push(GetBasicBlock(candidate_id));
   1999             // NOTE: The BitVectorSet::IndexIterator will not check the pointed-to bit again,
   2000             // so clearing the bit has no effect on the iterator.
   2001             loop_exit_blocks.ClearBit(candidate_id);
   2002           }
   2003         }
   2004         continue;
   2005       }
   2006     } else {
   2007       // Find the new loop head.
   2008       AllNodesIterator iter(this);
   2009       while (true) {
   2010         BasicBlock* candidate = iter.Next();
   2011         if (candidate == nullptr) {
   2012           // We did not find a true loop head, fall back to a reachable block in any loop.
   2013           ArenaBitVector* current_loop =
   2014               loop_head_stack.empty() ? nullptr : loop_head_reachable_from[loop_head_stack.back()];
   2015           bb = SelectTopologicalSortOrderFallBack(this, current_loop, &visited_cnt_values,
   2016                                                   &allocator, &tmp_stack);
   2017           DCHECK(bb != nullptr) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
   2018           if (kIsDebugBuild && cu_->dex_file != nullptr) {
   2019             LOG(INFO) << "Topological sort order: Using fall-back in "
   2020                 << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " BB #" << bb->id
   2021                 << " @0x" << std::hex << bb->start_offset
   2022                 << ", num_blocks = " << std::dec << num_blocks;
   2023           }
   2024           break;
   2025         }
   2026         if (candidate->hidden ||                            // Hidden, or
   2027             candidate->visited ||                           // already processed, or
   2028             visited_cnt_values[candidate->id] == 0u ||      // no processed predecessors, or
   2029             (!loop_head_stack.empty() &&                    // outside current loop.
   2030              !loop_head_reachable_from[loop_head_stack.back()]->IsBitSet(candidate->id))) {
   2031           continue;
   2032         }
   2033 
   2034         for (BasicBlockId pred_id : candidate->predecessors) {
   2035           BasicBlock* pred_bb = GetBasicBlock(pred_id);
   2036           DCHECK(pred_bb != nullptr);
   2037           if (pred_bb != candidate && !pred_bb->visited &&
   2038               !pred_bb->dominators->IsBitSet(candidate->id)) {
   2039             candidate = nullptr;  // Set candidate to null to indicate failure.
   2040             break;
   2041           }
   2042         }
   2043         if (candidate != nullptr) {
   2044           bb = candidate;
   2045           break;
   2046         }
   2047       }
   2048       // Compute blocks from which the loop head is reachable and process those blocks first.
   2049       ArenaBitVector* reachable =
   2050           new (&allocator) ArenaBitVector(&allocator, num_blocks, false, kBitMapMisc);
   2051       loop_head_reachable_from[bb->id] = reachable;
   2052       ComputeUnvisitedReachableFrom(this, bb->id, reachable, &tmp_stack);
   2053       // Now mark as loop head. (Even if it's only a fall back when we don't find a true loop.)
   2054       loop_head_stack.push_back(bb->id);
   2055       max_nested_loops = std::max(max_nested_loops, loop_head_stack.size());
   2056     }
   2057 
   2058     DCHECK_EQ(bb->hidden, false);
   2059     DCHECK_EQ(bb->visited, false);
   2060     bb->visited = true;
   2061     bb->nesting_depth = loop_head_stack.size();
   2062 
   2063     // Now add the basic block.
   2064     uint16_t idx = static_cast<uint16_t>(topological_order_.size());
   2065     topological_order_indexes_[bb->id] = idx;
   2066     topological_order_.push_back(bb->id);
   2067 
   2068     // Update visited_cnt_values for children.
   2069     ChildBlockIterator succIter(bb, this);
   2070     BasicBlock* successor = succIter.Next();
   2071     for ( ; successor != nullptr; successor = succIter.Next()) {
   2072       if (successor->hidden) {
   2073         continue;
   2074       }
   2075 
   2076       // One more predecessor was visited.
   2077       visited_cnt_values[successor->id] += 1u;
   2078       if (visited_cnt_values[successor->id] == successor->predecessors.size()) {
   2079         if (loop_head_stack.empty() ||
   2080             loop_head_reachable_from[loop_head_stack.back()]->IsBitSet(successor->id)) {
   2081           q.push(successor);
   2082         } else {
   2083           DCHECK(!loop_exit_blocks.IsBitSet(successor->id));
   2084           loop_exit_blocks.SetBit(successor->id);
   2085         }
   2086       }
   2087     }
   2088   }
   2089 
   2090   // Prepare the loop head stack for iteration.
   2091   topological_order_loop_head_stack_.clear();
   2092   topological_order_loop_head_stack_.reserve(max_nested_loops);
   2093   max_nested_loops_ = max_nested_loops;
   2094   topological_order_up_to_date_ = true;
   2095 }
   2096 
   2097 bool BasicBlock::IsExceptionBlock() const {
   2098   if (block_type == kExceptionHandling) {
   2099     return true;
   2100   }
   2101   return false;
   2102 }
   2103 
   2104 ChildBlockIterator::ChildBlockIterator(BasicBlock* bb, MIRGraph* mir_graph)
   2105     : basic_block_(bb), mir_graph_(mir_graph), visited_fallthrough_(false),
   2106       visited_taken_(false), have_successors_(false) {
   2107   // Check if we actually do have successors.
   2108   if (basic_block_ != 0 && basic_block_->successor_block_list_type != kNotUsed) {
   2109     have_successors_ = true;
   2110     successor_iter_ = basic_block_->successor_blocks.cbegin();
   2111   }
   2112 }
   2113 
   2114 BasicBlock* ChildBlockIterator::Next() {
   2115   // We check if we have a basic block. If we don't we cannot get next child.
   2116   if (basic_block_ == nullptr) {
   2117     return nullptr;
   2118   }
   2119 
   2120   // If we haven't visited fallthrough, return that.
   2121   if (visited_fallthrough_ == false) {
   2122     visited_fallthrough_ = true;
   2123 
   2124     BasicBlock* result = mir_graph_->GetBasicBlock(basic_block_->fall_through);
   2125     if (result != nullptr) {
   2126       return result;
   2127     }
   2128   }
   2129 
   2130   // If we haven't visited taken, return that.
   2131   if (visited_taken_ == false) {
   2132     visited_taken_ = true;
   2133 
   2134     BasicBlock* result = mir_graph_->GetBasicBlock(basic_block_->taken);
   2135     if (result != nullptr) {
   2136       return result;
   2137     }
   2138   }
   2139 
   2140   // We visited both taken and fallthrough. Now check if we have successors we need to visit.
   2141   if (have_successors_ == true) {
   2142     // Get information about next successor block.
   2143     auto end = basic_block_->successor_blocks.cend();
   2144     while (successor_iter_ != end) {
   2145       SuccessorBlockInfo* successor_block_info = *successor_iter_;
   2146       ++successor_iter_;
   2147       // If block was replaced by zero block, take next one.
   2148       if (successor_block_info->block != NullBasicBlockId) {
   2149         return mir_graph_->GetBasicBlock(successor_block_info->block);
   2150       }
   2151     }
   2152   }
   2153 
   2154   // We do not have anything.
   2155   return nullptr;
   2156 }
   2157 
   2158 BasicBlock* BasicBlock::Copy(CompilationUnit* c_unit) {
   2159   MIRGraph* mir_graph = c_unit->mir_graph.get();
   2160   return Copy(mir_graph);
   2161 }
   2162 
   2163 BasicBlock* BasicBlock::Copy(MIRGraph* mir_graph) {
   2164   BasicBlock* result_bb = mir_graph->CreateNewBB(block_type);
   2165 
   2166   // We don't do a memcpy style copy here because it would lead to a lot of things
   2167   // to clean up. Let us do it by hand instead.
   2168   // Copy in taken and fallthrough.
   2169   result_bb->fall_through = fall_through;
   2170   result_bb->taken = taken;
   2171 
   2172   // Copy successor links if needed.
   2173   ArenaAllocator* arena = mir_graph->GetArena();
   2174 
   2175   result_bb->successor_block_list_type = successor_block_list_type;
   2176   if (result_bb->successor_block_list_type != kNotUsed) {
   2177     result_bb->successor_blocks.reserve(successor_blocks.size());
   2178     for (SuccessorBlockInfo* sbi_old : successor_blocks) {
   2179       SuccessorBlockInfo* sbi_new = static_cast<SuccessorBlockInfo*>(
   2180           arena->Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessor));
   2181       memcpy(sbi_new, sbi_old, sizeof(SuccessorBlockInfo));
   2182       result_bb->successor_blocks.push_back(sbi_new);
   2183     }
   2184   }
   2185 
   2186   // Copy offset, method.
   2187   result_bb->start_offset = start_offset;
   2188 
   2189   // Now copy instructions.
   2190   for (MIR* mir = first_mir_insn; mir != 0; mir = mir->next) {
   2191     // Get a copy first.
   2192     MIR* copy = mir->Copy(mir_graph);
   2193 
   2194     // Append it.
   2195     result_bb->AppendMIR(copy);
   2196   }
   2197 
   2198   return result_bb;
   2199 }
   2200 
   2201 MIR* MIR::Copy(MIRGraph* mir_graph) {
   2202   MIR* res = mir_graph->NewMIR();
   2203   *res = *this;
   2204 
   2205   // Remove links
   2206   res->next = nullptr;
   2207   res->bb = NullBasicBlockId;
   2208   res->ssa_rep = nullptr;
   2209 
   2210   return res;
   2211 }
   2212 
   2213 MIR* MIR::Copy(CompilationUnit* c_unit) {
   2214   return Copy(c_unit->mir_graph.get());
   2215 }
   2216 
   2217 uint32_t SSARepresentation::GetStartUseIndex(Instruction::Code opcode) {
   2218   // Default result.
   2219   int res = 0;
   2220 
   2221   // We are basically setting the iputs to their igets counterparts.
   2222   switch (opcode) {
   2223     case Instruction::IPUT:
   2224     case Instruction::IPUT_OBJECT:
   2225     case Instruction::IPUT_BOOLEAN:
   2226     case Instruction::IPUT_BYTE:
   2227     case Instruction::IPUT_CHAR:
   2228     case Instruction::IPUT_SHORT:
   2229     case Instruction::IPUT_QUICK:
   2230     case Instruction::IPUT_OBJECT_QUICK:
   2231     case Instruction::IPUT_BOOLEAN_QUICK:
   2232     case Instruction::IPUT_BYTE_QUICK:
   2233     case Instruction::IPUT_CHAR_QUICK:
   2234     case Instruction::IPUT_SHORT_QUICK:
   2235     case Instruction::APUT:
   2236     case Instruction::APUT_OBJECT:
   2237     case Instruction::APUT_BOOLEAN:
   2238     case Instruction::APUT_BYTE:
   2239     case Instruction::APUT_CHAR:
   2240     case Instruction::APUT_SHORT:
   2241     case Instruction::SPUT:
   2242     case Instruction::SPUT_OBJECT:
   2243     case Instruction::SPUT_BOOLEAN:
   2244     case Instruction::SPUT_BYTE:
   2245     case Instruction::SPUT_CHAR:
   2246     case Instruction::SPUT_SHORT:
   2247       // Skip the VR containing what to store.
   2248       res = 1;
   2249       break;
   2250     case Instruction::IPUT_WIDE:
   2251     case Instruction::IPUT_WIDE_QUICK:
   2252     case Instruction::APUT_WIDE:
   2253     case Instruction::SPUT_WIDE:
   2254       // Skip the two VRs containing what to store.
   2255       res = 2;
   2256       break;
   2257     default:
   2258       // Do nothing in the general case.
   2259       break;
   2260   }
   2261 
   2262   return res;
   2263 }
   2264 
   2265 /**
   2266  * @brief Given a decoded instruction, it checks whether the instruction
   2267  * sets a constant and if it does, more information is provided about the
   2268  * constant being set.
   2269  * @param ptr_value pointer to a 64-bit holder for the constant.
   2270  * @param wide Updated by function whether a wide constant is being set by bytecode.
   2271  * @return Returns false if the decoded instruction does not represent a constant bytecode.
   2272  */
   2273 bool MIR::DecodedInstruction::GetConstant(int64_t* ptr_value, bool* wide) const {
   2274   bool sets_const = true;
   2275   int64_t value = vB;
   2276 
   2277   DCHECK(ptr_value != nullptr);
   2278   DCHECK(wide != nullptr);
   2279 
   2280   switch (opcode) {
   2281     case Instruction::CONST_4:
   2282     case Instruction::CONST_16:
   2283     case Instruction::CONST:
   2284       *wide = false;
   2285       value <<= 32;      // In order to get the sign extend.
   2286       value >>= 32;
   2287       break;
   2288     case Instruction::CONST_HIGH16:
   2289       *wide = false;
   2290       value <<= 48;      // In order to get the sign extend.
   2291       value >>= 32;
   2292       break;
   2293     case Instruction::CONST_WIDE_16:
   2294     case Instruction::CONST_WIDE_32:
   2295       *wide = true;
   2296       value <<= 32;      // In order to get the sign extend.
   2297       value >>= 32;
   2298       break;
   2299     case Instruction::CONST_WIDE:
   2300       *wide = true;
   2301       value = vB_wide;
   2302       break;
   2303     case Instruction::CONST_WIDE_HIGH16:
   2304       *wide = true;
   2305       value <<= 48;      // In order to get the sign extend.
   2306       break;
   2307     default:
   2308       sets_const = false;
   2309       break;
   2310   }
   2311 
   2312   if (sets_const) {
   2313     *ptr_value = value;
   2314   }
   2315 
   2316   return sets_const;
   2317 }
   2318 
   2319 void BasicBlock::ResetOptimizationFlags(uint16_t reset_flags) {
   2320   // Reset flags for all MIRs in bb.
   2321   for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
   2322     mir->optimization_flags &= (~reset_flags);
   2323   }
   2324 }
   2325 
   2326 void BasicBlock::Kill(MIRGraph* mir_graph) {
   2327   for (BasicBlockId pred_id : predecessors) {
   2328     BasicBlock* pred_bb = mir_graph->GetBasicBlock(pred_id);
   2329     DCHECK(pred_bb != nullptr);
   2330 
   2331     // Sadly we have to go through the children by hand here.
   2332     pred_bb->ReplaceChild(id, NullBasicBlockId);
   2333   }
   2334   predecessors.clear();
   2335 
   2336   // Mark as dead and hidden.
   2337   block_type = kDead;
   2338   hidden = true;
   2339 
   2340   // Detach it from its MIRs so we don't generate code for them. Also detached MIRs
   2341   // are updated to know that they no longer have a parent.
   2342   for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
   2343     mir->bb = NullBasicBlockId;
   2344   }
   2345   first_mir_insn = nullptr;
   2346   last_mir_insn = nullptr;
   2347 
   2348   data_flow_info = nullptr;
   2349 
   2350   // Erase this bb from all children's predecessors and kill unreachable children.
   2351   ChildBlockIterator iter(this, mir_graph);
   2352   for (BasicBlock* succ_bb = iter.Next(); succ_bb != nullptr; succ_bb = iter.Next()) {
   2353     succ_bb->ErasePredecessor(id);
   2354   }
   2355 
   2356   // Remove links to children.
   2357   fall_through = NullBasicBlockId;
   2358   taken = NullBasicBlockId;
   2359   successor_block_list_type = kNotUsed;
   2360 
   2361   if (kIsDebugBuild) {
   2362     if (catch_entry) {
   2363       DCHECK_EQ(mir_graph->catches_.count(start_offset), 1u);
   2364       mir_graph->catches_.erase(start_offset);
   2365     }
   2366   }
   2367 }
   2368 
   2369 bool BasicBlock::IsSSALiveOut(const CompilationUnit* c_unit, int ssa_reg) {
   2370   // In order to determine if the ssa reg is live out, we scan all the MIRs. We remember
   2371   // the last SSA number of the same dalvik register. At the end, if it is different than ssa_reg,
   2372   // then it is not live out of this BB.
   2373   int dalvik_reg = c_unit->mir_graph->SRegToVReg(ssa_reg);
   2374 
   2375   int last_ssa_reg = -1;
   2376 
   2377   // Walk through the MIRs backwards.
   2378   for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
   2379     // Get ssa rep.
   2380     SSARepresentation *ssa_rep = mir->ssa_rep;
   2381 
   2382     // Go through the defines for this MIR.
   2383     for (int i = 0; i < ssa_rep->num_defs; i++) {
   2384       DCHECK(ssa_rep->defs != nullptr);
   2385 
   2386       // Get the ssa reg.
   2387       int def_ssa_reg = ssa_rep->defs[i];
   2388 
   2389       // Get dalvik reg.
   2390       int def_dalvik_reg = c_unit->mir_graph->SRegToVReg(def_ssa_reg);
   2391 
   2392       // Compare dalvik regs.
   2393       if (dalvik_reg == def_dalvik_reg) {
   2394         // We found a def of the register that we are being asked about.
   2395         // Remember it.
   2396         last_ssa_reg = def_ssa_reg;
   2397       }
   2398     }
   2399   }
   2400 
   2401   if (last_ssa_reg == -1) {
   2402     // If we get to this point we couldn't find a define of register user asked about.
   2403     // Let's assume the user knows what he's doing so we can be safe and say that if we
   2404     // couldn't find a def, it is live out.
   2405     return true;
   2406   }
   2407 
   2408   // If it is not -1, we found a match, is it ssa_reg?
   2409   return (ssa_reg == last_ssa_reg);
   2410 }
   2411 
   2412 bool BasicBlock::ReplaceChild(BasicBlockId old_bb, BasicBlockId new_bb) {
   2413   // We need to check taken, fall_through, and successor_blocks to replace.
   2414   bool found = false;
   2415   if (taken == old_bb) {
   2416     taken = new_bb;
   2417     found = true;
   2418   }
   2419 
   2420   if (fall_through == old_bb) {
   2421     fall_through = new_bb;
   2422     found = true;
   2423   }
   2424 
   2425   if (successor_block_list_type != kNotUsed) {
   2426     for (SuccessorBlockInfo* successor_block_info : successor_blocks) {
   2427       if (successor_block_info->block == old_bb) {
   2428         successor_block_info->block = new_bb;
   2429         found = true;
   2430       }
   2431     }
   2432   }
   2433 
   2434   return found;
   2435 }
   2436 
   2437 void BasicBlock::ErasePredecessor(BasicBlockId old_pred) {
   2438   auto pos = std::find(predecessors.begin(), predecessors.end(), old_pred);
   2439   DCHECK(pos != predecessors.end());
   2440   // It's faster to move the back() to *pos than erase(pos).
   2441   *pos = predecessors.back();
   2442   predecessors.pop_back();
   2443   size_t idx = std::distance(predecessors.begin(), pos);
   2444   for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
   2445     if (static_cast<int>(mir->dalvikInsn.opcode) != kMirOpPhi) {
   2446       break;
   2447     }
   2448     DCHECK_EQ(mir->ssa_rep->num_uses - 1u, predecessors.size());
   2449     DCHECK_EQ(mir->meta.phi_incoming[idx], old_pred);
   2450     mir->meta.phi_incoming[idx] = mir->meta.phi_incoming[predecessors.size()];
   2451     mir->ssa_rep->uses[idx] = mir->ssa_rep->uses[predecessors.size()];
   2452     mir->ssa_rep->num_uses = predecessors.size();
   2453   }
   2454 }
   2455 
   2456 void BasicBlock::UpdatePredecessor(BasicBlockId old_pred, BasicBlockId new_pred) {
   2457   DCHECK_NE(new_pred, NullBasicBlockId);
   2458   auto pos = std::find(predecessors.begin(), predecessors.end(), old_pred);
   2459   DCHECK(pos != predecessors.end());
   2460   *pos = new_pred;
   2461   size_t idx = std::distance(predecessors.begin(), pos);
   2462   for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
   2463     if (static_cast<int>(mir->dalvikInsn.opcode) != kMirOpPhi) {
   2464       break;
   2465     }
   2466     DCHECK_EQ(mir->meta.phi_incoming[idx], old_pred);
   2467     mir->meta.phi_incoming[idx] = new_pred;
   2468   }
   2469 }
   2470 
   2471 // Create a new basic block with block_id as num_blocks_ that is
   2472 // post-incremented.
   2473 BasicBlock* MIRGraph::CreateNewBB(BBType block_type) {
   2474   BasicBlockId id = static_cast<BasicBlockId>(block_list_.size());
   2475   BasicBlock* res = NewMemBB(block_type, id);
   2476   block_list_.push_back(res);
   2477   return res;
   2478 }
   2479 
   2480 void MIRGraph::CalculateBasicBlockInformation(const PassManager* const post_opt_pass_manager) {
   2481   /* Create the pass driver and launch it */
   2482   PassDriverMEPostOpt driver(post_opt_pass_manager, cu_);
   2483   driver.Launch();
   2484 }
   2485 
   2486 int MIR::DecodedInstruction::FlagsOf() const {
   2487   // Calculate new index.
   2488   int idx = static_cast<int>(opcode) - kNumPackedOpcodes;
   2489 
   2490   // Check if it is an extended or not.
   2491   if (idx < 0) {
   2492     return Instruction::FlagsOf(opcode);
   2493   }
   2494 
   2495   // For extended, we use a switch.
   2496   switch (static_cast<int>(opcode)) {
   2497     case kMirOpPhi:
   2498       return Instruction::kContinue;
   2499     case kMirOpCopy:
   2500       return Instruction::kContinue;
   2501     case kMirOpFusedCmplFloat:
   2502       return Instruction::kContinue | Instruction::kBranch;
   2503     case kMirOpFusedCmpgFloat:
   2504       return Instruction::kContinue | Instruction::kBranch;
   2505     case kMirOpFusedCmplDouble:
   2506       return Instruction::kContinue | Instruction::kBranch;
   2507     case kMirOpFusedCmpgDouble:
   2508       return Instruction::kContinue | Instruction::kBranch;
   2509     case kMirOpFusedCmpLong:
   2510       return Instruction::kContinue | Instruction::kBranch;
   2511     case kMirOpNop:
   2512       return Instruction::kContinue;
   2513     case kMirOpNullCheck:
   2514       return Instruction::kContinue | Instruction::kThrow;
   2515     case kMirOpRangeCheck:
   2516       return Instruction::kContinue | Instruction::kThrow;
   2517     case kMirOpDivZeroCheck:
   2518       return Instruction::kContinue | Instruction::kThrow;
   2519     case kMirOpCheck:
   2520       return Instruction::kContinue | Instruction::kThrow;
   2521     case kMirOpSelect:
   2522       return Instruction::kContinue;
   2523     case kMirOpConstVector:
   2524       return Instruction::kContinue;
   2525     case kMirOpMoveVector:
   2526       return Instruction::kContinue;
   2527     case kMirOpPackedMultiply:
   2528       return Instruction::kContinue;
   2529     case kMirOpPackedAddition:
   2530       return Instruction::kContinue;
   2531     case kMirOpPackedSubtract:
   2532       return Instruction::kContinue;
   2533     case kMirOpPackedShiftLeft:
   2534       return Instruction::kContinue;
   2535     case kMirOpPackedSignedShiftRight:
   2536       return Instruction::kContinue;
   2537     case kMirOpPackedUnsignedShiftRight:
   2538       return Instruction::kContinue;
   2539     case kMirOpPackedAnd:
   2540       return Instruction::kContinue;
   2541     case kMirOpPackedOr:
   2542       return Instruction::kContinue;
   2543     case kMirOpPackedXor:
   2544       return Instruction::kContinue;
   2545     case kMirOpPackedAddReduce:
   2546       return Instruction::kContinue;
   2547     case kMirOpPackedReduce:
   2548       return Instruction::kContinue;
   2549     case kMirOpPackedSet:
   2550       return Instruction::kContinue;
   2551     case kMirOpReserveVectorRegisters:
   2552       return Instruction::kContinue;
   2553     case kMirOpReturnVectorRegisters:
   2554       return Instruction::kContinue;
   2555     case kMirOpMemBarrier:
   2556       return Instruction::kContinue;
   2557     case kMirOpPackedArrayGet:
   2558       return Instruction::kContinue | Instruction::kThrow;
   2559     case kMirOpPackedArrayPut:
   2560       return Instruction::kContinue | Instruction::kThrow;
   2561     case kMirOpMaddInt:
   2562     case kMirOpMsubInt:
   2563     case kMirOpMaddLong:
   2564     case kMirOpMsubLong:
   2565       return Instruction::kContinue;
   2566     default:
   2567       LOG(WARNING) << "ExtendedFlagsOf: Unhandled case: " << static_cast<int> (opcode);
   2568       return 0;
   2569   }
   2570 }
   2571 
   2572 const uint16_t* MIRGraph::GetInsns(int m_unit_index) const {
   2573   return m_units_[m_unit_index]->GetCodeItem()->insns_;
   2574 }
   2575 
   2576 void MIRGraph::SetPuntToInterpreter(bool val) {
   2577   punt_to_interpreter_ = val;
   2578   if (val) {
   2579     // Disable all subsequent optimizations. They may not be safe to run. (For example,
   2580     // LVN/GVN assumes there are no conflicts found by the type inference pass.)
   2581     cu_->disable_opt = ~static_cast<decltype(cu_->disable_opt)>(0);
   2582   }
   2583 }
   2584 
   2585 }  // namespace art
   2586