Home | History | Annotate | Download | only in dex
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_COMPILER_DEX_MIR_GRAPH_H_
     18 #define ART_COMPILER_DEX_MIR_GRAPH_H_
     19 
     20 #include <stdint.h>
     21 
     22 #include "dex_file.h"
     23 #include "dex_instruction.h"
     24 #include "compiler_ir.h"
     25 #include "invoke_type.h"
     26 #include "mir_field_info.h"
     27 #include "mir_method_info.h"
     28 #include "utils/arena_bit_vector.h"
     29 #include "utils/growable_array.h"
     30 #include "utils/arena_containers.h"
     31 #include "utils/scoped_arena_containers.h"
     32 #include "reg_location.h"
     33 #include "reg_storage.h"
     34 
     35 namespace art {
     36 
     37 class GlobalValueNumbering;
     38 
     39 enum InstructionAnalysisAttributePos {
     40   kUninterestingOp = 0,
     41   kArithmeticOp,
     42   kFPOp,
     43   kSingleOp,
     44   kDoubleOp,
     45   kIntOp,
     46   kLongOp,
     47   kBranchOp,
     48   kInvokeOp,
     49   kArrayOp,
     50   kHeavyweightOp,
     51   kSimpleConstOp,
     52   kMoveOp,
     53   kSwitch
     54 };
     55 
     56 #define AN_NONE (1 << kUninterestingOp)
     57 #define AN_MATH (1 << kArithmeticOp)
     58 #define AN_FP (1 << kFPOp)
     59 #define AN_LONG (1 << kLongOp)
     60 #define AN_INT (1 << kIntOp)
     61 #define AN_SINGLE (1 << kSingleOp)
     62 #define AN_DOUBLE (1 << kDoubleOp)
     63 #define AN_FLOATMATH (1 << kFPOp)
     64 #define AN_BRANCH (1 << kBranchOp)
     65 #define AN_INVOKE (1 << kInvokeOp)
     66 #define AN_ARRAYOP (1 << kArrayOp)
     67 #define AN_HEAVYWEIGHT (1 << kHeavyweightOp)
     68 #define AN_SIMPLECONST (1 << kSimpleConstOp)
     69 #define AN_MOVE (1 << kMoveOp)
     70 #define AN_SWITCH (1 << kSwitch)
     71 #define AN_COMPUTATIONAL (AN_MATH | AN_ARRAYOP | AN_MOVE | AN_SIMPLECONST)
     72 
     73 enum DataFlowAttributePos {
     74   kUA = 0,
     75   kUB,
     76   kUC,
     77   kAWide,
     78   kBWide,
     79   kCWide,
     80   kDA,
     81   kIsMove,
     82   kSetsConst,
     83   kFormat35c,
     84   kFormat3rc,
     85   kFormatExtended,       // Extended format for extended MIRs.
     86   kNullCheckSrc0,        // Null check of uses[0].
     87   kNullCheckSrc1,        // Null check of uses[1].
     88   kNullCheckSrc2,        // Null check of uses[2].
     89   kNullCheckOut0,        // Null check out outgoing arg0.
     90   kDstNonNull,           // May assume dst is non-null.
     91   kRetNonNull,           // May assume retval is non-null.
     92   kNullTransferSrc0,     // Object copy src[0] -> dst.
     93   kNullTransferSrcN,     // Phi null check state transfer.
     94   kRangeCheckSrc1,       // Range check of uses[1].
     95   kRangeCheckSrc2,       // Range check of uses[2].
     96   kRangeCheckSrc3,       // Range check of uses[3].
     97   kFPA,
     98   kFPB,
     99   kFPC,
    100   kCoreA,
    101   kCoreB,
    102   kCoreC,
    103   kRefA,
    104   kRefB,
    105   kRefC,
    106   kUsesMethodStar,       // Implicit use of Method*.
    107   kUsesIField,           // Accesses an instance field (IGET/IPUT).
    108   kUsesSField,           // Accesses a static field (SGET/SPUT).
    109   kDoLVN,                // Worth computing local value numbers.
    110 };
    111 
    112 #define DF_NOP                  UINT64_C(0)
    113 #define DF_UA                   (UINT64_C(1) << kUA)
    114 #define DF_UB                   (UINT64_C(1) << kUB)
    115 #define DF_UC                   (UINT64_C(1) << kUC)
    116 #define DF_A_WIDE               (UINT64_C(1) << kAWide)
    117 #define DF_B_WIDE               (UINT64_C(1) << kBWide)
    118 #define DF_C_WIDE               (UINT64_C(1) << kCWide)
    119 #define DF_DA                   (UINT64_C(1) << kDA)
    120 #define DF_IS_MOVE              (UINT64_C(1) << kIsMove)
    121 #define DF_SETS_CONST           (UINT64_C(1) << kSetsConst)
    122 #define DF_FORMAT_35C           (UINT64_C(1) << kFormat35c)
    123 #define DF_FORMAT_3RC           (UINT64_C(1) << kFormat3rc)
    124 #define DF_FORMAT_EXTENDED      (UINT64_C(1) << kFormatExtended)
    125 #define DF_NULL_CHK_0           (UINT64_C(1) << kNullCheckSrc0)
    126 #define DF_NULL_CHK_1           (UINT64_C(1) << kNullCheckSrc1)
    127 #define DF_NULL_CHK_2           (UINT64_C(1) << kNullCheckSrc2)
    128 #define DF_NULL_CHK_OUT0        (UINT64_C(1) << kNullCheckOut0)
    129 #define DF_NON_NULL_DST         (UINT64_C(1) << kDstNonNull)
    130 #define DF_NON_NULL_RET         (UINT64_C(1) << kRetNonNull)
    131 #define DF_NULL_TRANSFER_0      (UINT64_C(1) << kNullTransferSrc0)
    132 #define DF_NULL_TRANSFER_N      (UINT64_C(1) << kNullTransferSrcN)
    133 #define DF_RANGE_CHK_1          (UINT64_C(1) << kRangeCheckSrc1)
    134 #define DF_RANGE_CHK_2          (UINT64_C(1) << kRangeCheckSrc2)
    135 #define DF_RANGE_CHK_3          (UINT64_C(1) << kRangeCheckSrc3)
    136 #define DF_FP_A                 (UINT64_C(1) << kFPA)
    137 #define DF_FP_B                 (UINT64_C(1) << kFPB)
    138 #define DF_FP_C                 (UINT64_C(1) << kFPC)
    139 #define DF_CORE_A               (UINT64_C(1) << kCoreA)
    140 #define DF_CORE_B               (UINT64_C(1) << kCoreB)
    141 #define DF_CORE_C               (UINT64_C(1) << kCoreC)
    142 #define DF_REF_A                (UINT64_C(1) << kRefA)
    143 #define DF_REF_B                (UINT64_C(1) << kRefB)
    144 #define DF_REF_C                (UINT64_C(1) << kRefC)
    145 #define DF_UMS                  (UINT64_C(1) << kUsesMethodStar)
    146 #define DF_IFIELD               (UINT64_C(1) << kUsesIField)
    147 #define DF_SFIELD               (UINT64_C(1) << kUsesSField)
    148 #define DF_LVN                  (UINT64_C(1) << kDoLVN)
    149 
    150 #define DF_HAS_USES             (DF_UA | DF_UB | DF_UC)
    151 
    152 #define DF_HAS_DEFS             (DF_DA)
    153 
    154 #define DF_HAS_NULL_CHKS        (DF_NULL_CHK_0 | \
    155                                  DF_NULL_CHK_1 | \
    156                                  DF_NULL_CHK_2 | \
    157                                  DF_NULL_CHK_OUT0)
    158 
    159 #define DF_HAS_RANGE_CHKS       (DF_RANGE_CHK_1 | \
    160                                  DF_RANGE_CHK_2 | \
    161                                  DF_RANGE_CHK_3)
    162 
    163 #define DF_HAS_NR_CHKS          (DF_HAS_NULL_CHKS | \
    164                                  DF_HAS_RANGE_CHKS)
    165 
    166 #define DF_A_IS_REG             (DF_UA | DF_DA)
    167 #define DF_B_IS_REG             (DF_UB)
    168 #define DF_C_IS_REG             (DF_UC)
    169 #define DF_IS_GETTER_OR_SETTER  (DF_IS_GETTER | DF_IS_SETTER)
    170 #define DF_USES_FP              (DF_FP_A | DF_FP_B | DF_FP_C)
    171 #define DF_NULL_TRANSFER        (DF_NULL_TRANSFER_0 | DF_NULL_TRANSFER_N)
    172 enum OatMethodAttributes {
    173   kIsLeaf,            // Method is leaf.
    174   kHasLoop,           // Method contains simple loop.
    175 };
    176 
    177 #define METHOD_IS_LEAF          (1 << kIsLeaf)
    178 #define METHOD_HAS_LOOP         (1 << kHasLoop)
    179 
    180 // Minimum field size to contain Dalvik v_reg number.
    181 #define VREG_NUM_WIDTH 16
    182 
    183 #define INVALID_SREG (-1)
    184 #define INVALID_VREG (0xFFFFU)
    185 #define INVALID_OFFSET (0xDEADF00FU)
    186 
    187 #define MIR_IGNORE_NULL_CHECK           (1 << kMIRIgnoreNullCheck)
    188 #define MIR_NULL_CHECK_ONLY             (1 << kMIRNullCheckOnly)
    189 #define MIR_IGNORE_RANGE_CHECK          (1 << kMIRIgnoreRangeCheck)
    190 #define MIR_RANGE_CHECK_ONLY            (1 << kMIRRangeCheckOnly)
    191 #define MIR_IGNORE_CLINIT_CHECK         (1 << kMIRIgnoreClInitCheck)
    192 #define MIR_INLINED                     (1 << kMIRInlined)
    193 #define MIR_INLINED_PRED                (1 << kMIRInlinedPred)
    194 #define MIR_CALLEE                      (1 << kMIRCallee)
    195 #define MIR_IGNORE_SUSPEND_CHECK        (1 << kMIRIgnoreSuspendCheck)
    196 #define MIR_DUP                         (1 << kMIRDup)
    197 
    198 #define BLOCK_NAME_LEN 80
    199 
    200 typedef uint16_t BasicBlockId;
    201 static const BasicBlockId NullBasicBlockId = 0;
    202 static constexpr bool kLeafOptimization = false;
    203 
    204 /*
    205  * In general, vreg/sreg describe Dalvik registers that originated with dx.  However,
    206  * it is useful to have compiler-generated temporary registers and have them treated
    207  * in the same manner as dx-generated virtual registers.  This struct records the SSA
    208  * name of compiler-introduced temporaries.
    209  */
    210 struct CompilerTemp {
    211   int32_t v_reg;      // Virtual register number for temporary.
    212   int32_t s_reg_low;  // SSA name for low Dalvik word.
    213 };
    214 
    215 enum CompilerTempType {
    216   kCompilerTempVR,                // A virtual register temporary.
    217   kCompilerTempSpecialMethodPtr,  // Temporary that keeps track of current method pointer.
    218 };
    219 
    220 // When debug option enabled, records effectiveness of null and range check elimination.
    221 struct Checkstats {
    222   int32_t null_checks;
    223   int32_t null_checks_eliminated;
    224   int32_t range_checks;
    225   int32_t range_checks_eliminated;
    226 };
    227 
    228 // Dataflow attributes of a basic block.
    229 struct BasicBlockDataFlow {
    230   ArenaBitVector* use_v;
    231   ArenaBitVector* def_v;
    232   ArenaBitVector* live_in_v;
    233   ArenaBitVector* phi_v;
    234   int32_t* vreg_to_ssa_map_exit;
    235   ArenaBitVector* ending_check_v;  // For null check and class init check elimination.
    236 };
    237 
    238 /*
    239  * Normalized use/def for a MIR operation using SSA names rather than vregs.  Note that
    240  * uses/defs retain the Dalvik convention that long operations operate on a pair of 32-bit
    241  * vregs.  For example, "ADD_LONG v0, v2, v3" would have 2 defs (v0/v1) and 4 uses (v2/v3, v4/v5).
    242  * Following SSA renaming, this is the primary struct used by code generators to locate
    243  * operand and result registers.  This is a somewhat confusing and unhelpful convention that
    244  * we may want to revisit in the future.
    245  *
    246  * TODO:
    247  *  1. Add accessors for uses/defs and make data private
    248  *  2. Change fp_use/fp_def to a bit array (could help memory usage)
    249  *  3. Combine array storage into internal array and handled via accessors from 1.
    250  */
    251 struct SSARepresentation {
    252   int32_t* uses;
    253   bool* fp_use;
    254   int32_t* defs;
    255   bool* fp_def;
    256   int16_t num_uses_allocated;
    257   int16_t num_defs_allocated;
    258   int16_t num_uses;
    259   int16_t num_defs;
    260 
    261   static uint32_t GetStartUseIndex(Instruction::Code opcode);
    262 };
    263 
    264 /*
    265  * The Midlevel Intermediate Representation node, which may be largely considered a
    266  * wrapper around a Dalvik byte code.
    267  */
    268 struct MIR {
    269   /*
    270    * TODO: remove embedded DecodedInstruction to save space, keeping only opcode.  Recover
    271    * additional fields on as-needed basis.  Question: how to support MIR Pseudo-ops; probably
    272    * need to carry aux data pointer.
    273    */
    274   struct DecodedInstruction {
    275     uint32_t vA;
    276     uint32_t vB;
    277     uint64_t vB_wide;        /* for k51l */
    278     uint32_t vC;
    279     uint32_t arg[5];         /* vC/D/E/F/G in invoke or filled-new-array */
    280     Instruction::Code opcode;
    281 
    282     explicit DecodedInstruction():vA(0), vB(0), vB_wide(0), vC(0), opcode(Instruction::NOP) {
    283     }
    284 
    285     /*
    286      * Given a decoded instruction representing a const bytecode, it updates
    287      * the out arguments with proper values as dictated by the constant bytecode.
    288      */
    289     bool GetConstant(int64_t* ptr_value, bool* wide) const;
    290 
    291     static bool IsPseudoMirOp(Instruction::Code opcode) {
    292       return static_cast<int>(opcode) >= static_cast<int>(kMirOpFirst);
    293     }
    294 
    295     static bool IsPseudoMirOp(int opcode) {
    296       return opcode >= static_cast<int>(kMirOpFirst);
    297     }
    298 
    299     bool IsInvoke() const {
    300       return !IsPseudoMirOp(opcode) && ((Instruction::FlagsOf(opcode) & Instruction::kInvoke) == Instruction::kInvoke);
    301     }
    302 
    303     bool IsStore() const {
    304       return !IsPseudoMirOp(opcode) && ((Instruction::FlagsOf(opcode) & Instruction::kStore) == Instruction::kStore);
    305     }
    306 
    307     bool IsLoad() const {
    308       return !IsPseudoMirOp(opcode) && ((Instruction::FlagsOf(opcode) & Instruction::kLoad) == Instruction::kLoad);
    309     }
    310 
    311     bool IsConditionalBranch() const {
    312       return !IsPseudoMirOp(opcode) && (Instruction::FlagsOf(opcode) == (Instruction::kContinue | Instruction::kBranch));
    313     }
    314 
    315     /**
    316      * @brief Is the register C component of the decoded instruction a constant?
    317      */
    318     bool IsCFieldOrConstant() const {
    319       return !IsPseudoMirOp(opcode) && ((Instruction::FlagsOf(opcode) & Instruction::kRegCFieldOrConstant) == Instruction::kRegCFieldOrConstant);
    320     }
    321 
    322     /**
    323      * @brief Is the register C component of the decoded instruction a constant?
    324      */
    325     bool IsBFieldOrConstant() const {
    326       return !IsPseudoMirOp(opcode) && ((Instruction::FlagsOf(opcode) & Instruction::kRegBFieldOrConstant) == Instruction::kRegBFieldOrConstant);
    327     }
    328 
    329     bool IsCast() const {
    330       return !IsPseudoMirOp(opcode) && ((Instruction::FlagsOf(opcode) & Instruction::kCast) == Instruction::kCast);
    331     }
    332 
    333     /**
    334      * @brief Does the instruction clobber memory?
    335      * @details Clobber means that the instruction changes the memory not in a punctual way.
    336      *          Therefore any supposition on memory aliasing or memory contents should be disregarded
    337      *            when crossing such an instruction.
    338      */
    339     bool Clobbers() const {
    340       return !IsPseudoMirOp(opcode) && ((Instruction::FlagsOf(opcode) & Instruction::kClobber) == Instruction::kClobber);
    341     }
    342 
    343     bool IsLinear() const {
    344       return !IsPseudoMirOp(opcode) && (Instruction::FlagsOf(opcode) & (Instruction::kAdd | Instruction::kSubtract)) != 0;
    345     }
    346   } dalvikInsn;
    347 
    348   NarrowDexOffset offset;         // Offset of the instruction in code units.
    349   uint16_t optimization_flags;
    350   int16_t m_unit_index;           // From which method was this MIR included
    351   BasicBlockId bb;
    352   MIR* next;
    353   SSARepresentation* ssa_rep;
    354   union {
    355     // Incoming edges for phi node.
    356     BasicBlockId* phi_incoming;
    357     // Establish link from check instruction (kMirOpCheck) to the actual throwing instruction.
    358     MIR* throw_insn;
    359     // Branch condition for fused cmp or select.
    360     ConditionCode ccode;
    361     // IGET/IPUT lowering info index, points to MIRGraph::ifield_lowering_infos_. Due to limit on
    362     // the number of code points (64K) and size of IGET/IPUT insn (2), this will never exceed 32K.
    363     uint32_t ifield_lowering_info;
    364     // SGET/SPUT lowering info index, points to MIRGraph::sfield_lowering_infos_. Due to limit on
    365     // the number of code points (64K) and size of SGET/SPUT insn (2), this will never exceed 32K.
    366     uint32_t sfield_lowering_info;
    367     // INVOKE data index, points to MIRGraph::method_lowering_infos_.
    368     uint32_t method_lowering_info;
    369   } meta;
    370 
    371   explicit MIR():offset(0), optimization_flags(0), m_unit_index(0), bb(NullBasicBlockId),
    372                  next(nullptr), ssa_rep(nullptr) {
    373     memset(&meta, 0, sizeof(meta));
    374   }
    375 
    376   uint32_t GetStartUseIndex() const {
    377     return SSARepresentation::GetStartUseIndex(dalvikInsn.opcode);
    378   }
    379 
    380   MIR* Copy(CompilationUnit *c_unit);
    381   MIR* Copy(MIRGraph* mir_Graph);
    382 
    383   static void* operator new(size_t size, ArenaAllocator* arena) {
    384     return arena->Alloc(sizeof(MIR), kArenaAllocMIR);
    385   }
    386   static void operator delete(void* p) {}  // Nop.
    387 };
    388 
    389 struct SuccessorBlockInfo;
    390 
    391 struct BasicBlock {
    392   BasicBlockId id;
    393   BasicBlockId dfs_id;
    394   NarrowDexOffset start_offset;     // Offset in code units.
    395   BasicBlockId fall_through;
    396   BasicBlockId taken;
    397   BasicBlockId i_dom;               // Immediate dominator.
    398   uint16_t nesting_depth;
    399   BBType block_type:4;
    400   BlockListType successor_block_list_type:4;
    401   bool visited:1;
    402   bool hidden:1;
    403   bool catch_entry:1;
    404   bool explicit_throw:1;
    405   bool conditional_branch:1;
    406   bool terminated_by_return:1;  // Block ends with a Dalvik return opcode.
    407   bool dominates_return:1;      // Is a member of return extended basic block.
    408   bool use_lvn:1;               // Run local value numbering on this block.
    409   MIR* first_mir_insn;
    410   MIR* last_mir_insn;
    411   BasicBlockDataFlow* data_flow_info;
    412   ArenaBitVector* dominators;
    413   ArenaBitVector* i_dominated;      // Set nodes being immediately dominated.
    414   ArenaBitVector* dom_frontier;     // Dominance frontier.
    415   GrowableArray<BasicBlockId>* predecessors;
    416   GrowableArray<SuccessorBlockInfo*>* successor_blocks;
    417 
    418   void AppendMIR(MIR* mir);
    419   void AppendMIRList(MIR* first_list_mir, MIR* last_list_mir);
    420   void AppendMIRList(const std::vector<MIR*>& insns);
    421   void PrependMIR(MIR* mir);
    422   void PrependMIRList(MIR* first_list_mir, MIR* last_list_mir);
    423   void PrependMIRList(const std::vector<MIR*>& to_add);
    424   void InsertMIRAfter(MIR* current_mir, MIR* new_mir);
    425   void InsertMIRListAfter(MIR* insert_after, MIR* first_list_mir, MIR* last_list_mir);
    426   MIR* FindPreviousMIR(MIR* mir);
    427   void InsertMIRBefore(MIR* insert_before, MIR* list);
    428   void InsertMIRListBefore(MIR* insert_before, MIR* first_list_mir, MIR* last_list_mir);
    429   bool RemoveMIR(MIR* mir);
    430   bool RemoveMIRList(MIR* first_list_mir, MIR* last_list_mir);
    431 
    432   BasicBlock* Copy(CompilationUnit* c_unit);
    433   BasicBlock* Copy(MIRGraph* mir_graph);
    434 
    435   /**
    436    * @brief Reset the optimization_flags field of each MIR.
    437    */
    438   void ResetOptimizationFlags(uint16_t reset_flags);
    439 
    440   /**
    441    * @brief Hide the BasicBlock.
    442    * @details Set it to kDalvikByteCode, set hidden to true, remove all MIRs,
    443    *          remove itself from any predecessor edges, remove itself from any
    444    *          child's predecessor growable array.
    445    */
    446   void Hide(CompilationUnit* c_unit);
    447 
    448   /**
    449    * @brief Is ssa_reg the last SSA definition of that VR in the block?
    450    */
    451   bool IsSSALiveOut(const CompilationUnit* c_unit, int ssa_reg);
    452 
    453   /**
    454    * @brief Replace the edge going to old_bb to now go towards new_bb.
    455    */
    456   bool ReplaceChild(BasicBlockId old_bb, BasicBlockId new_bb);
    457 
    458   /**
    459    * @brief Update the predecessor growable array from old_pred to new_pred.
    460    */
    461   void UpdatePredecessor(BasicBlockId old_pred, BasicBlockId new_pred);
    462 
    463   /**
    464    * @brief Used to obtain the next MIR that follows unconditionally.
    465    * @details The implementation does not guarantee that a MIR does not
    466    * follow even if this method returns nullptr.
    467    * @param mir_graph the MIRGraph.
    468    * @param current The MIR for which to find an unconditional follower.
    469    * @return Returns the following MIR if one can be found.
    470    */
    471   MIR* GetNextUnconditionalMir(MIRGraph* mir_graph, MIR* current);
    472   bool IsExceptionBlock() const;
    473 
    474   static void* operator new(size_t size, ArenaAllocator* arena) {
    475     return arena->Alloc(sizeof(BasicBlock), kArenaAllocBB);
    476   }
    477   static void operator delete(void* p) {}  // Nop.
    478 };
    479 
    480 /*
    481  * The "blocks" field in "successor_block_list" points to an array of elements with the type
    482  * "SuccessorBlockInfo".  For catch blocks, key is type index for the exception.  For switch
    483  * blocks, key is the case value.
    484  */
    485 struct SuccessorBlockInfo {
    486   BasicBlockId block;
    487   int key;
    488 };
    489 
    490 /**
    491  * @class ChildBlockIterator
    492  * @brief Enable an easy iteration of the children.
    493  */
    494 class ChildBlockIterator {
    495  public:
    496   /**
    497    * @brief Constructs a child iterator.
    498    * @param bb The basic whose children we need to iterate through.
    499    * @param mir_graph The MIRGraph used to get the basic block during iteration.
    500    */
    501   ChildBlockIterator(BasicBlock* bb, MIRGraph* mir_graph);
    502   BasicBlock* Next();
    503 
    504  private:
    505   BasicBlock* basic_block_;
    506   MIRGraph* mir_graph_;
    507   bool visited_fallthrough_;
    508   bool visited_taken_;
    509   bool have_successors_;
    510   GrowableArray<SuccessorBlockInfo*>::Iterator successor_iter_;
    511 };
    512 
    513 /*
    514  * Collection of information describing an invoke, and the destination of
    515  * the subsequent MOVE_RESULT (if applicable).  Collected as a unit to enable
    516  * more efficient invoke code generation.
    517  */
    518 struct CallInfo {
    519   int num_arg_words;    // Note: word count, not arg count.
    520   RegLocation* args;    // One for each word of arguments.
    521   RegLocation result;   // Eventual target of MOVE_RESULT.
    522   int opt_flags;
    523   InvokeType type;
    524   uint32_t dex_idx;
    525   uint32_t index;       // Method idx for invokes, type idx for FilledNewArray.
    526   uintptr_t direct_code;
    527   uintptr_t direct_method;
    528   RegLocation target;    // Target of following move_result.
    529   bool skip_this;
    530   bool is_range;
    531   DexOffset offset;      // Offset in code units.
    532   MIR* mir;
    533 };
    534 
    535 
    536 const RegLocation bad_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0, RegStorage(), INVALID_SREG,
    537                              INVALID_SREG};
    538 
    539 class MIRGraph {
    540  public:
    541   MIRGraph(CompilationUnit* cu, ArenaAllocator* arena);
    542   ~MIRGraph();
    543 
    544   /*
    545    * Examine the graph to determine whether it's worthwile to spend the time compiling
    546    * this method.
    547    */
    548   bool SkipCompilation(std::string* skip_message);
    549 
    550   /*
    551    * Should we skip the compilation of this method based on its name?
    552    */
    553   bool SkipCompilationByName(const std::string& methodname);
    554 
    555   /*
    556    * Parse dex method and add MIR at current insert point.  Returns id (which is
    557    * actually the index of the method in the m_units_ array).
    558    */
    559   void InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
    560                     InvokeType invoke_type, uint16_t class_def_idx,
    561                     uint32_t method_idx, jobject class_loader, const DexFile& dex_file);
    562 
    563   /* Find existing block */
    564   BasicBlock* FindBlock(DexOffset code_offset) {
    565     return FindBlock(code_offset, false, false, NULL);
    566   }
    567 
    568   const uint16_t* GetCurrentInsns() const {
    569     return current_code_item_->insns_;
    570   }
    571 
    572   const uint16_t* GetInsns(int m_unit_index) const {
    573     return m_units_[m_unit_index]->GetCodeItem()->insns_;
    574   }
    575 
    576   unsigned int GetNumBlocks() const {
    577     return num_blocks_;
    578   }
    579 
    580   size_t GetNumDalvikInsns() const {
    581     return cu_->code_item->insns_size_in_code_units_;
    582   }
    583 
    584   ArenaBitVector* GetTryBlockAddr() const {
    585     return try_block_addr_;
    586   }
    587 
    588   BasicBlock* GetEntryBlock() const {
    589     return entry_block_;
    590   }
    591 
    592   BasicBlock* GetExitBlock() const {
    593     return exit_block_;
    594   }
    595 
    596   BasicBlock* GetBasicBlock(unsigned int block_id) const {
    597     return (block_id == NullBasicBlockId) ? NULL : block_list_.Get(block_id);
    598   }
    599 
    600   size_t GetBasicBlockListCount() const {
    601     return block_list_.Size();
    602   }
    603 
    604   GrowableArray<BasicBlock*>* GetBlockList() {
    605     return &block_list_;
    606   }
    607 
    608   GrowableArray<BasicBlockId>* GetDfsOrder() {
    609     return dfs_order_;
    610   }
    611 
    612   GrowableArray<BasicBlockId>* GetDfsPostOrder() {
    613     return dfs_post_order_;
    614   }
    615 
    616   GrowableArray<BasicBlockId>* GetDomPostOrder() {
    617     return dom_post_order_traversal_;
    618   }
    619 
    620   int GetDefCount() const {
    621     return def_count_;
    622   }
    623 
    624   ArenaAllocator* GetArena() {
    625     return arena_;
    626   }
    627 
    628   void EnableOpcodeCounting() {
    629     opcode_count_ = static_cast<int*>(arena_->Alloc(kNumPackedOpcodes * sizeof(int),
    630                                                     kArenaAllocMisc));
    631   }
    632 
    633   void ShowOpcodeStats();
    634 
    635   DexCompilationUnit* GetCurrentDexCompilationUnit() const {
    636     return m_units_[current_method_];
    637   }
    638 
    639   /**
    640    * @brief Dump a CFG into a dot file format.
    641    * @param dir_prefix the directory the file will be created in.
    642    * @param all_blocks does the dumper use all the basic blocks or use the reachable blocks.
    643    * @param suffix does the filename require a suffix or not (default = nullptr).
    644    */
    645   void DumpCFG(const char* dir_prefix, bool all_blocks, const char* suffix = nullptr);
    646 
    647   bool HasFieldAccess() const {
    648     return (merged_df_flags_ & (DF_IFIELD | DF_SFIELD)) != 0u;
    649   }
    650 
    651   bool HasStaticFieldAccess() const {
    652     return (merged_df_flags_ & DF_SFIELD) != 0u;
    653   }
    654 
    655   bool HasInvokes() const {
    656     // NOTE: These formats include the rare filled-new-array/range.
    657     return (merged_df_flags_ & (DF_FORMAT_35C | DF_FORMAT_3RC)) != 0u;
    658   }
    659 
    660   void DoCacheFieldLoweringInfo();
    661 
    662   const MirIFieldLoweringInfo& GetIFieldLoweringInfo(MIR* mir) const {
    663     DCHECK_LT(mir->meta.ifield_lowering_info, ifield_lowering_infos_.Size());
    664     return ifield_lowering_infos_.GetRawStorage()[mir->meta.ifield_lowering_info];
    665   }
    666 
    667   const MirSFieldLoweringInfo& GetSFieldLoweringInfo(MIR* mir) const {
    668     DCHECK_LT(mir->meta.sfield_lowering_info, sfield_lowering_infos_.Size());
    669     return sfield_lowering_infos_.GetRawStorage()[mir->meta.sfield_lowering_info];
    670   }
    671 
    672   void DoCacheMethodLoweringInfo();
    673 
    674   const MirMethodLoweringInfo& GetMethodLoweringInfo(MIR* mir) {
    675     DCHECK_LT(mir->meta.method_lowering_info, method_lowering_infos_.Size());
    676     return method_lowering_infos_.GetRawStorage()[mir->meta.method_lowering_info];
    677   }
    678 
    679   void ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke, MIR* iget_or_iput);
    680 
    681   void InitRegLocations();
    682 
    683   void RemapRegLocations();
    684 
    685   void DumpRegLocTable(RegLocation* table, int count);
    686 
    687   void BasicBlockOptimization();
    688 
    689   GrowableArray<BasicBlockId>* GetTopologicalSortOrder() {
    690     DCHECK(topological_order_ != nullptr);
    691     return topological_order_;
    692   }
    693 
    694   GrowableArray<BasicBlockId>* GetTopologicalSortOrderLoopEnds() {
    695     DCHECK(topological_order_loop_ends_ != nullptr);
    696     return topological_order_loop_ends_;
    697   }
    698 
    699   GrowableArray<BasicBlockId>* GetTopologicalSortOrderIndexes() {
    700     DCHECK(topological_order_indexes_ != nullptr);
    701     return topological_order_indexes_;
    702   }
    703 
    704   GrowableArray<std::pair<uint16_t, bool>>* GetTopologicalSortOrderLoopHeadStack() {
    705     DCHECK(topological_order_loop_head_stack_ != nullptr);
    706     return topological_order_loop_head_stack_;
    707   }
    708 
    709   bool IsConst(int32_t s_reg) const {
    710     return is_constant_v_->IsBitSet(s_reg);
    711   }
    712 
    713   bool IsConst(RegLocation loc) const {
    714     return loc.orig_sreg < 0 ? false : IsConst(loc.orig_sreg);
    715   }
    716 
    717   int32_t ConstantValue(RegLocation loc) const {
    718     DCHECK(IsConst(loc));
    719     return constant_values_[loc.orig_sreg];
    720   }
    721 
    722   int32_t ConstantValue(int32_t s_reg) const {
    723     DCHECK(IsConst(s_reg));
    724     return constant_values_[s_reg];
    725   }
    726 
    727   int64_t ConstantValueWide(RegLocation loc) const {
    728     DCHECK(IsConst(loc));
    729     DCHECK(!loc.high_word);  // Do not allow asking for the high partner.
    730     DCHECK_LT(loc.orig_sreg + 1, GetNumSSARegs());
    731     return (static_cast<int64_t>(constant_values_[loc.orig_sreg + 1]) << 32) |
    732         Low32Bits(static_cast<int64_t>(constant_values_[loc.orig_sreg]));
    733   }
    734 
    735   bool IsConstantNullRef(RegLocation loc) const {
    736     return loc.ref && loc.is_const && (ConstantValue(loc) == 0);
    737   }
    738 
    739   int GetNumSSARegs() const {
    740     return num_ssa_regs_;
    741   }
    742 
    743   void SetNumSSARegs(int new_num) {
    744      /*
    745       * TODO: It's theoretically possible to exceed 32767, though any cases which did
    746       * would be filtered out with current settings.  When orig_sreg field is removed
    747       * from RegLocation, expand s_reg_low to handle all possible cases and remove DCHECK().
    748       */
    749     CHECK_EQ(new_num, static_cast<int16_t>(new_num));
    750     num_ssa_regs_ = new_num;
    751   }
    752 
    753   unsigned int GetNumReachableBlocks() const {
    754     return num_reachable_blocks_;
    755   }
    756 
    757   int GetUseCount(int vreg) const {
    758     return use_counts_.Get(vreg);
    759   }
    760 
    761   int GetRawUseCount(int vreg) const {
    762     return raw_use_counts_.Get(vreg);
    763   }
    764 
    765   int GetSSASubscript(int ssa_reg) const {
    766     return ssa_subscripts_->Get(ssa_reg);
    767   }
    768 
    769   RegLocation GetRawSrc(MIR* mir, int num) {
    770     DCHECK(num < mir->ssa_rep->num_uses);
    771     RegLocation res = reg_location_[mir->ssa_rep->uses[num]];
    772     return res;
    773   }
    774 
    775   RegLocation GetRawDest(MIR* mir) {
    776     DCHECK_GT(mir->ssa_rep->num_defs, 0);
    777     RegLocation res = reg_location_[mir->ssa_rep->defs[0]];
    778     return res;
    779   }
    780 
    781   RegLocation GetDest(MIR* mir) {
    782     RegLocation res = GetRawDest(mir);
    783     DCHECK(!res.wide);
    784     return res;
    785   }
    786 
    787   RegLocation GetSrc(MIR* mir, int num) {
    788     RegLocation res = GetRawSrc(mir, num);
    789     DCHECK(!res.wide);
    790     return res;
    791   }
    792 
    793   RegLocation GetDestWide(MIR* mir) {
    794     RegLocation res = GetRawDest(mir);
    795     DCHECK(res.wide);
    796     return res;
    797   }
    798 
    799   RegLocation GetSrcWide(MIR* mir, int low) {
    800     RegLocation res = GetRawSrc(mir, low);
    801     DCHECK(res.wide);
    802     return res;
    803   }
    804 
    805   RegLocation GetBadLoc() {
    806     return bad_loc;
    807   }
    808 
    809   int GetMethodSReg() const {
    810     return method_sreg_;
    811   }
    812 
    813   /**
    814    * @brief Used to obtain the number of compiler temporaries being used.
    815    * @return Returns the number of compiler temporaries.
    816    */
    817   size_t GetNumUsedCompilerTemps() const {
    818     size_t total_num_temps = compiler_temps_.Size();
    819     DCHECK_LE(num_non_special_compiler_temps_, total_num_temps);
    820     return total_num_temps;
    821   }
    822 
    823   /**
    824    * @brief Used to obtain the number of non-special compiler temporaries being used.
    825    * @return Returns the number of non-special compiler temporaries.
    826    */
    827   size_t GetNumNonSpecialCompilerTemps() const {
    828     return num_non_special_compiler_temps_;
    829   }
    830 
    831   /**
    832    * @brief Used to set the total number of available non-special compiler temporaries.
    833    * @details Can fail setting the new max if there are more temps being used than the new_max.
    834    * @param new_max The new maximum number of non-special compiler temporaries.
    835    * @return Returns true if the max was set and false if failed to set.
    836    */
    837   bool SetMaxAvailableNonSpecialCompilerTemps(size_t new_max) {
    838     if (new_max < GetNumNonSpecialCompilerTemps()) {
    839       return false;
    840     } else {
    841       max_available_non_special_compiler_temps_ = new_max;
    842       return true;
    843     }
    844   }
    845 
    846   /**
    847    * @brief Provides the number of non-special compiler temps available.
    848    * @details Even if this returns zero, special compiler temps are guaranteed to be available.
    849    * @return Returns the number of available temps.
    850    */
    851   size_t GetNumAvailableNonSpecialCompilerTemps();
    852 
    853   /**
    854    * @brief Used to obtain an existing compiler temporary.
    855    * @param index The index of the temporary which must be strictly less than the
    856    * number of temporaries.
    857    * @return Returns the temporary that was asked for.
    858    */
    859   CompilerTemp* GetCompilerTemp(size_t index) const {
    860     return compiler_temps_.Get(index);
    861   }
    862 
    863   /**
    864    * @brief Used to obtain the maximum number of compiler temporaries that can be requested.
    865    * @return Returns the maximum number of compiler temporaries, whether used or not.
    866    */
    867   size_t GetMaxPossibleCompilerTemps() const {
    868     return max_available_special_compiler_temps_ + max_available_non_special_compiler_temps_;
    869   }
    870 
    871   /**
    872    * @brief Used to obtain a new unique compiler temporary.
    873    * @param ct_type Type of compiler temporary requested.
    874    * @param wide Whether we should allocate a wide temporary.
    875    * @return Returns the newly created compiler temporary.
    876    */
    877   CompilerTemp* GetNewCompilerTemp(CompilerTempType ct_type, bool wide);
    878 
    879   bool MethodIsLeaf() {
    880     return attributes_ & METHOD_IS_LEAF;
    881   }
    882 
    883   RegLocation GetRegLocation(int index) {
    884     DCHECK((index >= 0) && (index < num_ssa_regs_));
    885     return reg_location_[index];
    886   }
    887 
    888   RegLocation GetMethodLoc() {
    889     return reg_location_[method_sreg_];
    890   }
    891 
    892   bool IsBackedge(BasicBlock* branch_bb, BasicBlockId target_bb_id) {
    893     return ((target_bb_id != NullBasicBlockId) &&
    894             (GetBasicBlock(target_bb_id)->start_offset <= branch_bb->start_offset));
    895   }
    896 
    897   bool IsBackwardsBranch(BasicBlock* branch_bb) {
    898     return IsBackedge(branch_bb, branch_bb->taken) || IsBackedge(branch_bb, branch_bb->fall_through);
    899   }
    900 
    901   void CountBranch(DexOffset target_offset) {
    902     if (target_offset <= current_offset_) {
    903       backward_branches_++;
    904     } else {
    905       forward_branches_++;
    906     }
    907   }
    908 
    909   int GetBranchCount() {
    910     return backward_branches_ + forward_branches_;
    911   }
    912 
    913   // Is this vreg in the in set?
    914   bool IsInVReg(int vreg) {
    915     return (vreg >= cu_->num_regs);
    916   }
    917 
    918   void DumpCheckStats();
    919   MIR* FindMoveResult(BasicBlock* bb, MIR* mir);
    920   int SRegToVReg(int ssa_reg) const;
    921   void VerifyDataflow();
    922   void CheckForDominanceFrontier(BasicBlock* dom_bb, const BasicBlock* succ_bb);
    923   void EliminateNullChecksAndInferTypesStart();
    924   bool EliminateNullChecksAndInferTypes(BasicBlock* bb);
    925   void EliminateNullChecksAndInferTypesEnd();
    926   bool EliminateClassInitChecksGate();
    927   bool EliminateClassInitChecks(BasicBlock* bb);
    928   void EliminateClassInitChecksEnd();
    929   bool ApplyGlobalValueNumberingGate();
    930   bool ApplyGlobalValueNumbering(BasicBlock* bb);
    931   void ApplyGlobalValueNumberingEnd();
    932   /*
    933    * Type inference handling helpers.  Because Dalvik's bytecode is not fully typed,
    934    * we have to do some work to figure out the sreg type.  For some operations it is
    935    * clear based on the opcode (i.e. ADD_FLOAT v0, v1, v2), but for others (MOVE), we
    936    * may never know the "real" type.
    937    *
    938    * We perform the type inference operation by using an iterative  walk over
    939    * the graph, propagating types "defined" by typed opcodes to uses and defs in
    940    * non-typed opcodes (such as MOVE).  The Setxx(index) helpers are used to set defined
    941    * types on typed opcodes (such as ADD_INT).  The Setxx(index, is_xx) form is used to
    942    * propagate types through non-typed opcodes such as PHI and MOVE.  The is_xx flag
    943    * tells whether our guess of the type is based on a previously typed definition.
    944    * If so, the defined type takes precedence.  Note that it's possible to have the same sreg
    945    * show multiple defined types because dx treats constants as untyped bit patterns.
    946    * The return value of the Setxx() helpers says whether or not the Setxx() action changed
    947    * the current guess, and is used to know when to terminate the iterative walk.
    948    */
    949   bool SetFp(int index, bool is_fp);
    950   bool SetFp(int index);
    951   bool SetCore(int index, bool is_core);
    952   bool SetCore(int index);
    953   bool SetRef(int index, bool is_ref);
    954   bool SetRef(int index);
    955   bool SetWide(int index, bool is_wide);
    956   bool SetWide(int index);
    957   bool SetHigh(int index, bool is_high);
    958   bool SetHigh(int index);
    959 
    960   bool PuntToInterpreter() {
    961     return punt_to_interpreter_;
    962   }
    963 
    964   void SetPuntToInterpreter(bool val) {
    965     punt_to_interpreter_ = val;
    966   }
    967 
    968   char* GetDalvikDisassembly(const MIR* mir);
    969   void ReplaceSpecialChars(std::string& str);
    970   std::string GetSSAName(int ssa_reg);
    971   std::string GetSSANameWithConst(int ssa_reg, bool singles_only);
    972   void GetBlockName(BasicBlock* bb, char* name);
    973   const char* GetShortyFromTargetIdx(int);
    974   void DumpMIRGraph();
    975   CallInfo* NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range);
    976   BasicBlock* NewMemBB(BBType block_type, int block_id);
    977   MIR* NewMIR();
    978   MIR* AdvanceMIR(BasicBlock** p_bb, MIR* mir);
    979   BasicBlock* NextDominatedBlock(BasicBlock* bb);
    980   bool LayoutBlocks(BasicBlock* bb);
    981   void ComputeTopologicalSortOrder();
    982   BasicBlock* CreateNewBB(BBType block_type);
    983 
    984   bool InlineSpecialMethodsGate();
    985   void InlineSpecialMethodsStart();
    986   void InlineSpecialMethods(BasicBlock* bb);
    987   void InlineSpecialMethodsEnd();
    988 
    989   /**
    990    * @brief Perform the initial preparation for the Method Uses.
    991    */
    992   void InitializeMethodUses();
    993 
    994   /**
    995    * @brief Perform the initial preparation for the Constant Propagation.
    996    */
    997   void InitializeConstantPropagation();
    998 
    999   /**
   1000    * @brief Perform the initial preparation for the SSA Transformation.
   1001    */
   1002   void SSATransformationStart();
   1003 
   1004   /**
   1005    * @brief Insert a the operands for the Phi nodes.
   1006    * @param bb the considered BasicBlock.
   1007    * @return true
   1008    */
   1009   bool InsertPhiNodeOperands(BasicBlock* bb);
   1010 
   1011   /**
   1012    * @brief Perform the cleanup after the SSA Transformation.
   1013    */
   1014   void SSATransformationEnd();
   1015 
   1016   /**
   1017    * @brief Perform constant propagation on a BasicBlock.
   1018    * @param bb the considered BasicBlock.
   1019    */
   1020   void DoConstantPropagation(BasicBlock* bb);
   1021 
   1022   /**
   1023    * @brief Count the uses in the BasicBlock
   1024    * @param bb the BasicBlock
   1025    */
   1026   void CountUses(struct BasicBlock* bb);
   1027 
   1028   static uint64_t GetDataFlowAttributes(Instruction::Code opcode);
   1029   static uint64_t GetDataFlowAttributes(MIR* mir);
   1030 
   1031   /**
   1032    * @brief Combine BasicBlocks
   1033    * @param the BasicBlock we are considering
   1034    */
   1035   void CombineBlocks(BasicBlock* bb);
   1036 
   1037   void ClearAllVisitedFlags();
   1038 
   1039   void AllocateSSAUseData(MIR *mir, int num_uses);
   1040   void AllocateSSADefData(MIR *mir, int num_defs);
   1041   void CalculateBasicBlockInformation();
   1042   void InitializeBasicBlockData();
   1043   void ComputeDFSOrders();
   1044   void ComputeDefBlockMatrix();
   1045   void ComputeDominators();
   1046   void CompilerInitializeSSAConversion();
   1047   void InsertPhiNodes();
   1048   void DoDFSPreOrderSSARename(BasicBlock* block);
   1049 
   1050   /*
   1051    * IsDebugBuild sanity check: keep track of the Dex PCs for catch entries so that later on
   1052    * we can verify that all catch entries have native PC entries.
   1053    */
   1054   std::set<uint32_t> catches_;
   1055 
   1056   // TODO: make these private.
   1057   RegLocation* reg_location_;                               // Map SSA names to location.
   1058   ArenaSafeMap<unsigned int, unsigned int> block_id_map_;   // Block collapse lookup cache.
   1059 
   1060   static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst];
   1061   static const uint32_t analysis_attributes_[kMirOpLast];
   1062 
   1063   void HandleSSADef(int* defs, int dalvik_reg, int reg_index);
   1064   bool InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed);
   1065 
   1066   // Used for removing redudant suspend tests
   1067   void AppendGenSuspendTestList(BasicBlock* bb) {
   1068     if (gen_suspend_test_list_.Size() == 0 ||
   1069         gen_suspend_test_list_.Get(gen_suspend_test_list_.Size() - 1) != bb) {
   1070       gen_suspend_test_list_.Insert(bb);
   1071     }
   1072   }
   1073 
   1074   /* This is used to check if there is already a method call dominating the
   1075    * source basic block of a backedge and being dominated by the target basic
   1076    * block of the backedge.
   1077    */
   1078   bool HasSuspendTestBetween(BasicBlock* source, BasicBlockId target_id);
   1079 
   1080  protected:
   1081   int FindCommonParent(int block1, int block2);
   1082   void ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1,
   1083                          const ArenaBitVector* src2);
   1084   void HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v,
   1085                        ArenaBitVector* live_in_v, int dalvik_reg_id);
   1086   void HandleDef(ArenaBitVector* def_v, int dalvik_reg_id);
   1087   void HandleExtended(ArenaBitVector* use_v, ArenaBitVector* def_v,
   1088                       ArenaBitVector* live_in_v,
   1089                       const MIR::DecodedInstruction& d_insn);
   1090   bool DoSSAConversion(BasicBlock* bb);
   1091   bool InvokeUsesMethodStar(MIR* mir);
   1092   int ParseInsn(const uint16_t* code_ptr, MIR::DecodedInstruction* decoded_instruction);
   1093   bool ContentIsInsn(const uint16_t* code_ptr);
   1094   BasicBlock* SplitBlock(DexOffset code_offset, BasicBlock* orig_block,
   1095                          BasicBlock** immed_pred_block_p);
   1096   BasicBlock* FindBlock(DexOffset code_offset, bool split, bool create,
   1097                         BasicBlock** immed_pred_block_p);
   1098   void ProcessTryCatchBlocks();
   1099   bool IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset, NarrowDexOffset catch_offset);
   1100   BasicBlock* ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
   1101                                int flags, const uint16_t* code_ptr, const uint16_t* code_end);
   1102   BasicBlock* ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
   1103                                int flags);
   1104   BasicBlock* ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
   1105                               int flags, ArenaBitVector* try_block_addr, const uint16_t* code_ptr,
   1106                               const uint16_t* code_end);
   1107   int AddNewSReg(int v_reg);
   1108   void HandleSSAUse(int* uses, int dalvik_reg, int reg_index);
   1109   void DataFlowSSAFormat35C(MIR* mir);
   1110   void DataFlowSSAFormat3RC(MIR* mir);
   1111   void DataFlowSSAFormatExtended(MIR* mir);
   1112   bool FindLocalLiveIn(BasicBlock* bb);
   1113   bool VerifyPredInfo(BasicBlock* bb);
   1114   BasicBlock* NeedsVisit(BasicBlock* bb);
   1115   BasicBlock* NextUnvisitedSuccessor(BasicBlock* bb);
   1116   void MarkPreOrder(BasicBlock* bb);
   1117   void RecordDFSOrders(BasicBlock* bb);
   1118   void ComputeDomPostOrderTraversal(BasicBlock* bb);
   1119   void SetConstant(int32_t ssa_reg, int value);
   1120   void SetConstantWide(int ssa_reg, int64_t value);
   1121   int GetSSAUseCount(int s_reg);
   1122   bool BasicBlockOpt(BasicBlock* bb);
   1123   bool BuildExtendedBBList(struct BasicBlock* bb);
   1124   bool FillDefBlockMatrix(BasicBlock* bb);
   1125   void InitializeDominationInfo(BasicBlock* bb);
   1126   bool ComputeblockIDom(BasicBlock* bb);
   1127   bool ComputeBlockDominators(BasicBlock* bb);
   1128   bool SetDominators(BasicBlock* bb);
   1129   bool ComputeBlockLiveIns(BasicBlock* bb);
   1130   bool ComputeDominanceFrontier(BasicBlock* bb);
   1131 
   1132   void CountChecks(BasicBlock* bb);
   1133   void AnalyzeBlock(BasicBlock* bb, struct MethodStats* stats);
   1134   bool ComputeSkipCompilation(struct MethodStats* stats, bool skip_default,
   1135                               std::string* skip_message);
   1136 
   1137   CompilationUnit* const cu_;
   1138   GrowableArray<int>* ssa_base_vregs_;
   1139   GrowableArray<int>* ssa_subscripts_;
   1140   // Map original Dalvik virtual reg i to the current SSA name.
   1141   int* vreg_to_ssa_map_;            // length == method->registers_size
   1142   int* ssa_last_defs_;              // length == method->registers_size
   1143   ArenaBitVector* is_constant_v_;   // length == num_ssa_reg
   1144   int* constant_values_;            // length == num_ssa_reg
   1145   // Use counts of ssa names.
   1146   GrowableArray<uint32_t> use_counts_;      // Weighted by nesting depth
   1147   GrowableArray<uint32_t> raw_use_counts_;  // Not weighted
   1148   unsigned int num_reachable_blocks_;
   1149   unsigned int max_num_reachable_blocks_;
   1150   GrowableArray<BasicBlockId>* dfs_order_;
   1151   GrowableArray<BasicBlockId>* dfs_post_order_;
   1152   GrowableArray<BasicBlockId>* dom_post_order_traversal_;
   1153   GrowableArray<BasicBlockId>* topological_order_;
   1154   // Indexes in topological_order_ need to be only as big as the BasicBlockId.
   1155   COMPILE_ASSERT(sizeof(BasicBlockId) == sizeof(uint16_t), assuming_16_bit_BasicBlockId);
   1156   // For each loop head, remember the past-the-end index of the end of the loop. 0 if not loop head.
   1157   GrowableArray<uint16_t>* topological_order_loop_ends_;
   1158   // Map BB ids to topological_order_ indexes. 0xffff if not included (hidden or null block).
   1159   GrowableArray<uint16_t>* topological_order_indexes_;
   1160   // Stack of the loop head indexes and recalculation flags for RepeatingTopologicalSortIterator.
   1161   GrowableArray<std::pair<uint16_t, bool>>* topological_order_loop_head_stack_;
   1162   int* i_dom_list_;
   1163   ArenaBitVector** def_block_matrix_;    // num_dalvik_register x num_blocks.
   1164   std::unique_ptr<ScopedArenaAllocator> temp_scoped_alloc_;
   1165   uint16_t* temp_insn_data_;
   1166   uint32_t temp_bit_vector_size_;
   1167   ArenaBitVector* temp_bit_vector_;
   1168   std::unique_ptr<GlobalValueNumbering> temp_gvn_;
   1169   static const int kInvalidEntry = -1;
   1170   GrowableArray<BasicBlock*> block_list_;
   1171   ArenaBitVector* try_block_addr_;
   1172   BasicBlock* entry_block_;
   1173   BasicBlock* exit_block_;
   1174   unsigned int num_blocks_;
   1175   const DexFile::CodeItem* current_code_item_;
   1176   GrowableArray<uint16_t> dex_pc_to_block_map_;  // FindBlock lookup cache.
   1177   ArenaVector<DexCompilationUnit*> m_units_;     // List of methods included in this graph
   1178   typedef std::pair<int, int> MIRLocation;       // Insert point, (m_unit_ index, offset)
   1179   ArenaVector<MIRLocation> method_stack_;        // Include stack
   1180   int current_method_;
   1181   DexOffset current_offset_;                     // Offset in code units
   1182   int def_count_;                                // Used to estimate size of ssa name storage.
   1183   int* opcode_count_;                            // Dex opcode coverage stats.
   1184   int num_ssa_regs_;                             // Number of names following SSA transformation.
   1185   ArenaVector<BasicBlockId> extended_basic_blocks_;  // Heads of block "traces".
   1186   int method_sreg_;
   1187   unsigned int attributes_;
   1188   Checkstats* checkstats_;
   1189   ArenaAllocator* arena_;
   1190   int backward_branches_;
   1191   int forward_branches_;
   1192   GrowableArray<CompilerTemp*> compiler_temps_;
   1193   size_t num_non_special_compiler_temps_;
   1194   size_t max_available_non_special_compiler_temps_;
   1195   size_t max_available_special_compiler_temps_;
   1196   bool punt_to_interpreter_;                    // Difficult or not worthwhile - just interpret.
   1197   uint64_t merged_df_flags_;
   1198   GrowableArray<MirIFieldLoweringInfo> ifield_lowering_infos_;
   1199   GrowableArray<MirSFieldLoweringInfo> sfield_lowering_infos_;
   1200   GrowableArray<MirMethodLoweringInfo> method_lowering_infos_;
   1201   static const uint64_t oat_data_flow_attributes_[kMirOpLast];
   1202   GrowableArray<BasicBlock*> gen_suspend_test_list_;  // List of blocks containing suspend tests
   1203 
   1204   friend class ClassInitCheckEliminationTest;
   1205   friend class GlobalValueNumberingTest;
   1206   friend class LocalValueNumberingTest;
   1207   friend class TopologicalSortOrderTest;
   1208 };
   1209 
   1210 }  // namespace art
   1211 
   1212 #endif  // ART_COMPILER_DEX_MIR_GRAPH_H_
   1213