Home | History | Annotate | Download | only in dex
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_COMPILER_DEX_MIR_GRAPH_H_
     18 #define ART_COMPILER_DEX_MIR_GRAPH_H_
     19 
     20 #include "dex_file.h"
     21 #include "dex_instruction.h"
     22 #include "compiler_ir.h"
     23 #include "arena_bit_vector.h"
     24 #include "growable_array.h"
     25 
     26 namespace art {
     27 
     28 enum InstructionAnalysisAttributePos {
     29   kUninterestingOp = 0,
     30   kArithmeticOp,
     31   kFPOp,
     32   kSingleOp,
     33   kDoubleOp,
     34   kIntOp,
     35   kLongOp,
     36   kBranchOp,
     37   kInvokeOp,
     38   kArrayOp,
     39   kHeavyweightOp,
     40   kSimpleConstOp,
     41   kMoveOp,
     42   kSwitch
     43 };
     44 
     45 #define AN_NONE (1 << kUninterestingOp)
     46 #define AN_MATH (1 << kArithmeticOp)
     47 #define AN_FP (1 << kFPOp)
     48 #define AN_LONG (1 << kLongOp)
     49 #define AN_INT (1 << kIntOp)
     50 #define AN_SINGLE (1 << kSingleOp)
     51 #define AN_DOUBLE (1 << kDoubleOp)
     52 #define AN_FLOATMATH (1 << kFPOp)
     53 #define AN_BRANCH (1 << kBranchOp)
     54 #define AN_INVOKE (1 << kInvokeOp)
     55 #define AN_ARRAYOP (1 << kArrayOp)
     56 #define AN_HEAVYWEIGHT (1 << kHeavyweightOp)
     57 #define AN_SIMPLECONST (1 << kSimpleConstOp)
     58 #define AN_MOVE (1 << kMoveOp)
     59 #define AN_SWITCH (1 << kSwitch)
     60 #define AN_COMPUTATIONAL (AN_MATH | AN_ARRAYOP | AN_MOVE | AN_SIMPLECONST)
     61 
     62 enum DataFlowAttributePos {
     63   kUA = 0,
     64   kUB,
     65   kUC,
     66   kAWide,
     67   kBWide,
     68   kCWide,
     69   kDA,
     70   kIsMove,
     71   kSetsConst,
     72   kFormat35c,
     73   kFormat3rc,
     74   kNullCheckSrc0,        // Null check of uses[0].
     75   kNullCheckSrc1,        // Null check of uses[1].
     76   kNullCheckSrc2,        // Null check of uses[2].
     77   kNullCheckOut0,        // Null check out outgoing arg0.
     78   kDstNonNull,           // May assume dst is non-null.
     79   kRetNonNull,           // May assume retval is non-null.
     80   kNullTransferSrc0,     // Object copy src[0] -> dst.
     81   kNullTransferSrcN,     // Phi null check state transfer.
     82   kRangeCheckSrc1,       // Range check of uses[1].
     83   kRangeCheckSrc2,       // Range check of uses[2].
     84   kRangeCheckSrc3,       // Range check of uses[3].
     85   kFPA,
     86   kFPB,
     87   kFPC,
     88   kCoreA,
     89   kCoreB,
     90   kCoreC,
     91   kRefA,
     92   kRefB,
     93   kRefC,
     94   kUsesMethodStar,       // Implicit use of Method*.
     95 };
     96 
     97 #define DF_NOP                  0
     98 #define DF_UA                   (1 << kUA)
     99 #define DF_UB                   (1 << kUB)
    100 #define DF_UC                   (1 << kUC)
    101 #define DF_A_WIDE               (1 << kAWide)
    102 #define DF_B_WIDE               (1 << kBWide)
    103 #define DF_C_WIDE               (1 << kCWide)
    104 #define DF_DA                   (1 << kDA)
    105 #define DF_IS_MOVE              (1 << kIsMove)
    106 #define DF_SETS_CONST           (1 << kSetsConst)
    107 #define DF_FORMAT_35C           (1 << kFormat35c)
    108 #define DF_FORMAT_3RC           (1 << kFormat3rc)
    109 #define DF_NULL_CHK_0           (1 << kNullCheckSrc0)
    110 #define DF_NULL_CHK_1           (1 << kNullCheckSrc1)
    111 #define DF_NULL_CHK_2           (1 << kNullCheckSrc2)
    112 #define DF_NULL_CHK_OUT0        (1 << kNullCheckOut0)
    113 #define DF_NON_NULL_DST         (1 << kDstNonNull)
    114 #define DF_NON_NULL_RET         (1 << kRetNonNull)
    115 #define DF_NULL_TRANSFER_0      (1 << kNullTransferSrc0)
    116 #define DF_NULL_TRANSFER_N      (1 << kNullTransferSrcN)
    117 #define DF_RANGE_CHK_1          (1 << kRangeCheckSrc1)
    118 #define DF_RANGE_CHK_2          (1 << kRangeCheckSrc2)
    119 #define DF_RANGE_CHK_3          (1 << kRangeCheckSrc3)
    120 #define DF_FP_A                 (1 << kFPA)
    121 #define DF_FP_B                 (1 << kFPB)
    122 #define DF_FP_C                 (1 << kFPC)
    123 #define DF_CORE_A               (1 << kCoreA)
    124 #define DF_CORE_B               (1 << kCoreB)
    125 #define DF_CORE_C               (1 << kCoreC)
    126 #define DF_REF_A                (1 << kRefA)
    127 #define DF_REF_B                (1 << kRefB)
    128 #define DF_REF_C                (1 << kRefC)
    129 #define DF_UMS                  (1 << kUsesMethodStar)
    130 
    131 #define DF_HAS_USES             (DF_UA | DF_UB | DF_UC)
    132 
    133 #define DF_HAS_DEFS             (DF_DA)
    134 
    135 #define DF_HAS_NULL_CHKS        (DF_NULL_CHK_0 | \
    136                                  DF_NULL_CHK_1 | \
    137                                  DF_NULL_CHK_2 | \
    138                                  DF_NULL_CHK_OUT0)
    139 
    140 #define DF_HAS_RANGE_CHKS       (DF_RANGE_CHK_1 | \
    141                                  DF_RANGE_CHK_2 | \
    142                                  DF_RANGE_CHK_3)
    143 
    144 #define DF_HAS_NR_CHKS          (DF_HAS_NULL_CHKS | \
    145                                  DF_HAS_RANGE_CHKS)
    146 
    147 #define DF_A_IS_REG             (DF_UA | DF_DA)
    148 #define DF_B_IS_REG             (DF_UB)
    149 #define DF_C_IS_REG             (DF_UC)
    150 #define DF_IS_GETTER_OR_SETTER  (DF_IS_GETTER | DF_IS_SETTER)
    151 #define DF_USES_FP              (DF_FP_A | DF_FP_B | DF_FP_C)
    152 
    153 enum OatMethodAttributes {
    154   kIsLeaf,            // Method is leaf.
    155   kHasLoop,           // Method contains simple loop.
    156 };
    157 
    158 #define METHOD_IS_LEAF          (1 << kIsLeaf)
    159 #define METHOD_HAS_LOOP         (1 << kHasLoop)
    160 
    161 // Minimum field size to contain Dalvik v_reg number.
    162 #define VREG_NUM_WIDTH 16
    163 
    164 #define INVALID_SREG (-1)
    165 #define INVALID_VREG (0xFFFFU)
    166 #define INVALID_REG (0xFF)
    167 #define INVALID_OFFSET (0xDEADF00FU)
    168 
    169 /* SSA encodings for special registers */
    170 #define SSA_METHOD_BASEREG (-2)
    171 /* First compiler temp basereg, grows smaller */
    172 #define SSA_CTEMP_BASEREG (SSA_METHOD_BASEREG - 1)
    173 
    174 #define MIR_IGNORE_NULL_CHECK           (1 << kMIRIgnoreNullCheck)
    175 #define MIR_NULL_CHECK_ONLY             (1 << kMIRNullCheckOnly)
    176 #define MIR_IGNORE_RANGE_CHECK          (1 << kMIRIgnoreRangeCheck)
    177 #define MIR_RANGE_CHECK_ONLY            (1 << kMIRRangeCheckOnly)
    178 #define MIR_INLINED                     (1 << kMIRInlined)
    179 #define MIR_INLINED_PRED                (1 << kMIRInlinedPred)
    180 #define MIR_CALLEE                      (1 << kMIRCallee)
    181 #define MIR_IGNORE_SUSPEND_CHECK        (1 << kMIRIgnoreSuspendCheck)
    182 #define MIR_DUP                         (1 << kMIRDup)
    183 
    184 #define BLOCK_NAME_LEN 80
    185 
    186 /*
    187  * In general, vreg/sreg describe Dalvik registers that originated with dx.  However,
    188  * it is useful to have compiler-generated temporary registers and have them treated
    189  * in the same manner as dx-generated virtual registers.  This struct records the SSA
    190  * name of compiler-introduced temporaries.
    191  */
    192 struct CompilerTemp {
    193   int s_reg;
    194 };
    195 
    196 // When debug option enabled, records effectiveness of null and range check elimination.
    197 struct Checkstats {
    198   int null_checks;
    199   int null_checks_eliminated;
    200   int range_checks;
    201   int range_checks_eliminated;
    202 };
    203 
    204 // Dataflow attributes of a basic block.
    205 struct BasicBlockDataFlow {
    206   ArenaBitVector* use_v;
    207   ArenaBitVector* def_v;
    208   ArenaBitVector* live_in_v;
    209   ArenaBitVector* phi_v;
    210   int* vreg_to_ssa_map;
    211   ArenaBitVector* ending_null_check_v;
    212 };
    213 
    214 /*
    215  * Normalized use/def for a MIR operation using SSA names rather than vregs.  Note that
    216  * uses/defs retain the Dalvik convention that long operations operate on a pair of 32-bit
    217  * vregs.  For example, "ADD_LONG v0, v2, v3" would have 2 defs (v0/v1) and 4 uses (v2/v3, v4/v5).
    218  * Following SSA renaming, this is the primary struct used by code generators to locate
    219  * operand and result registers.  This is a somewhat confusing and unhelpful convention that
    220  * we may want to revisit in the future.
    221  */
    222 struct SSARepresentation {
    223   int num_uses;
    224   int* uses;
    225   bool* fp_use;
    226   int num_defs;
    227   int* defs;
    228   bool* fp_def;
    229 };
    230 
    231 /*
    232  * The Midlevel Intermediate Representation node, which may be largely considered a
    233  * wrapper around a Dalvik byte code.
    234  */
    235 struct MIR {
    236   DecodedInstruction dalvikInsn;
    237   uint32_t width;                 // NOTE: only need 16 bits for width.
    238   unsigned int offset;
    239   int m_unit_index;               // From which method was this MIR included
    240   MIR* prev;
    241   MIR* next;
    242   SSARepresentation* ssa_rep;
    243   int optimization_flags;
    244   union {
    245     // Establish link between two halves of throwing instructions.
    246     MIR* throw_insn;
    247     // Saved opcode for NOP'd MIRs
    248     Instruction::Code original_opcode;
    249   } meta;
    250 };
    251 
    252 struct SuccessorBlockInfo;
    253 
    254 struct BasicBlock {
    255   int id;
    256   int dfs_id;
    257   bool visited;
    258   bool hidden;
    259   bool catch_entry;
    260   bool explicit_throw;
    261   bool conditional_branch;
    262   bool terminated_by_return;        // Block ends with a Dalvik return opcode.
    263   bool dominates_return;            // Is a member of return extended basic block.
    264   uint16_t start_offset;
    265   uint16_t nesting_depth;
    266   BBType block_type;
    267   MIR* first_mir_insn;
    268   MIR* last_mir_insn;
    269   BasicBlock* fall_through;
    270   BasicBlock* taken;
    271   BasicBlock* i_dom;                // Immediate dominator.
    272   BasicBlockDataFlow* data_flow_info;
    273   GrowableArray<BasicBlock*>* predecessors;
    274   ArenaBitVector* dominators;
    275   ArenaBitVector* i_dominated;      // Set nodes being immediately dominated.
    276   ArenaBitVector* dom_frontier;     // Dominance frontier.
    277   struct {                          // For one-to-many successors like.
    278     BlockListType block_list_type;  // switch and exception handling.
    279     GrowableArray<SuccessorBlockInfo*>* blocks;
    280   } successor_block_list;
    281 };
    282 
    283 /*
    284  * The "blocks" field in "successor_block_list" points to an array of elements with the type
    285  * "SuccessorBlockInfo".  For catch blocks, key is type index for the exception.  For swtich
    286  * blocks, key is the case value.
    287  */
    288 // TODO: make class with placement new.
    289 struct SuccessorBlockInfo {
    290   BasicBlock* block;
    291   int key;
    292 };
    293 
    294 /*
    295  * Whereas a SSA name describes a definition of a Dalvik vreg, the RegLocation describes
    296  * the type of an SSA name (and, can also be used by code generators to record where the
    297  * value is located (i.e. - physical register, frame, spill, etc.).  For each SSA name (SReg)
    298  * there is a RegLocation.
    299  * FIXME: The orig_sreg field was added as a workaround for llvm bitcode generation.  With
    300  * the latest restructuring, we should be able to remove it and rely on s_reg_low throughout.
    301  */
    302 struct RegLocation {
    303   RegLocationType location:3;
    304   unsigned wide:1;
    305   unsigned defined:1;   // Do we know the type?
    306   unsigned is_const:1;  // Constant, value in mir_graph->constant_values[].
    307   unsigned fp:1;        // Floating point?
    308   unsigned core:1;      // Non-floating point?
    309   unsigned ref:1;       // Something GC cares about.
    310   unsigned high_word:1;  // High word of pair?
    311   unsigned home:1;      // Does this represent the home location?
    312   uint8_t low_reg;      // First physical register.
    313   uint8_t high_reg;     // 2nd physical register (if wide).
    314   int32_t s_reg_low;    // SSA name for low Dalvik word.
    315   int32_t orig_sreg;    // TODO: remove after Bitcode gen complete
    316                         // and consolodate usage w/ s_reg_low.
    317 };
    318 
    319 /*
    320  * Collection of information describing an invoke, and the destination of
    321  * the subsequent MOVE_RESULT (if applicable).  Collected as a unit to enable
    322  * more efficient invoke code generation.
    323  */
    324 struct CallInfo {
    325   int num_arg_words;    // Note: word count, not arg count.
    326   RegLocation* args;    // One for each word of arguments.
    327   RegLocation result;   // Eventual target of MOVE_RESULT.
    328   int opt_flags;
    329   InvokeType type;
    330   uint32_t dex_idx;
    331   uint32_t index;       // Method idx for invokes, type idx for FilledNewArray.
    332   uintptr_t direct_code;
    333   uintptr_t direct_method;
    334   RegLocation target;    // Target of following move_result.
    335   bool skip_this;
    336   bool is_range;
    337   int offset;            // Dalvik offset.
    338 };
    339 
    340 
    341 const RegLocation bad_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
    342                              INVALID_REG, INVALID_REG, INVALID_SREG, INVALID_SREG};
    343 
    344 class MIRGraph {
    345  public:
    346   MIRGraph(CompilationUnit* cu, ArenaAllocator* arena);
    347   ~MIRGraph();
    348 
    349   /*
    350    * Examine the graph to determine whether it's worthwile to spend the time compiling
    351    * this method.
    352    */
    353   bool SkipCompilation(Runtime::CompilerFilter compiler_filter);
    354 
    355   /*
    356    * Parse dex method and add MIR at current insert point.  Returns id (which is
    357    * actually the index of the method in the m_units_ array).
    358    */
    359   void InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
    360                     InvokeType invoke_type, uint16_t class_def_idx,
    361                     uint32_t method_idx, jobject class_loader, const DexFile& dex_file);
    362 
    363   /* Find existing block */
    364   BasicBlock* FindBlock(unsigned int code_offset) {
    365     return FindBlock(code_offset, false, false, NULL);
    366   }
    367 
    368   const uint16_t* GetCurrentInsns() const {
    369     return current_code_item_->insns_;
    370   }
    371 
    372   const uint16_t* GetInsns(int m_unit_index) const {
    373     return m_units_[m_unit_index]->GetCodeItem()->insns_;
    374   }
    375 
    376   int GetNumBlocks() const {
    377     return num_blocks_;
    378   }
    379 
    380   size_t GetNumDalvikInsns() const {
    381     return cu_->code_item->insns_size_in_code_units_;
    382   }
    383 
    384   ArenaBitVector* GetTryBlockAddr() const {
    385     return try_block_addr_;
    386   }
    387 
    388   BasicBlock* GetEntryBlock() const {
    389     return entry_block_;
    390   }
    391 
    392   BasicBlock* GetExitBlock() const {
    393     return exit_block_;
    394   }
    395 
    396   BasicBlock* GetBasicBlock(int block_id) const {
    397     return block_list_.Get(block_id);
    398   }
    399 
    400   size_t GetBasicBlockListCount() const {
    401     return block_list_.Size();
    402   }
    403 
    404   GrowableArray<BasicBlock*>* GetBlockList() {
    405     return &block_list_;
    406   }
    407 
    408   GrowableArray<int>* GetDfsOrder() {
    409     return dfs_order_;
    410   }
    411 
    412   GrowableArray<int>* GetDfsPostOrder() {
    413     return dfs_post_order_;
    414   }
    415 
    416   GrowableArray<int>* GetDomPostOrder() {
    417     return dom_post_order_traversal_;
    418   }
    419 
    420   int GetDefCount() const {
    421     return def_count_;
    422   }
    423 
    424   ArenaAllocator* GetArena() {
    425     return arena_;
    426   }
    427 
    428   void EnableOpcodeCounting() {
    429     opcode_count_ = static_cast<int*>(arena_->Alloc(kNumPackedOpcodes * sizeof(int),
    430                                                     ArenaAllocator::kAllocMisc));
    431   }
    432 
    433   void ShowOpcodeStats();
    434 
    435   DexCompilationUnit* GetCurrentDexCompilationUnit() const {
    436     return m_units_[current_method_];
    437   }
    438 
    439   void DumpCFG(const char* dir_prefix, bool all_blocks);
    440 
    441   void BuildRegLocations();
    442 
    443   void DumpRegLocTable(RegLocation* table, int count);
    444 
    445   void BasicBlockOptimization();
    446 
    447   bool IsConst(int32_t s_reg) const {
    448     return is_constant_v_->IsBitSet(s_reg);
    449   }
    450 
    451   bool IsConst(RegLocation loc) const {
    452     return (IsConst(loc.orig_sreg));
    453   }
    454 
    455   int32_t ConstantValue(RegLocation loc) const {
    456     DCHECK(IsConst(loc));
    457     return constant_values_[loc.orig_sreg];
    458   }
    459 
    460   int32_t ConstantValue(int32_t s_reg) const {
    461     DCHECK(IsConst(s_reg));
    462     return constant_values_[s_reg];
    463   }
    464 
    465   int64_t ConstantValueWide(RegLocation loc) const {
    466     DCHECK(IsConst(loc));
    467     return (static_cast<int64_t>(constant_values_[loc.orig_sreg + 1]) << 32) |
    468         Low32Bits(static_cast<int64_t>(constant_values_[loc.orig_sreg]));
    469   }
    470 
    471   bool IsConstantNullRef(RegLocation loc) const {
    472     return loc.ref && loc.is_const && (ConstantValue(loc) == 0);
    473   }
    474 
    475   int GetNumSSARegs() const {
    476     return num_ssa_regs_;
    477   }
    478 
    479   void SetNumSSARegs(int new_num) {
    480     num_ssa_regs_ = new_num;
    481   }
    482 
    483   unsigned int GetNumReachableBlocks() const {
    484     return num_reachable_blocks_;
    485   }
    486 
    487   int GetUseCount(int vreg) const {
    488     return use_counts_.Get(vreg);
    489   }
    490 
    491   int GetRawUseCount(int vreg) const {
    492     return raw_use_counts_.Get(vreg);
    493   }
    494 
    495   int GetSSASubscript(int ssa_reg) const {
    496     return ssa_subscripts_->Get(ssa_reg);
    497   }
    498 
    499   RegLocation GetRawSrc(MIR* mir, int num) {
    500     DCHECK(num < mir->ssa_rep->num_uses);
    501     RegLocation res = reg_location_[mir->ssa_rep->uses[num]];
    502     return res;
    503   }
    504 
    505   RegLocation GetRawDest(MIR* mir) {
    506     DCHECK_GT(mir->ssa_rep->num_defs, 0);
    507     RegLocation res = reg_location_[mir->ssa_rep->defs[0]];
    508     return res;
    509   }
    510 
    511   RegLocation GetDest(MIR* mir) {
    512     RegLocation res = GetRawDest(mir);
    513     DCHECK(!res.wide);
    514     return res;
    515   }
    516 
    517   RegLocation GetSrc(MIR* mir, int num) {
    518     RegLocation res = GetRawSrc(mir, num);
    519     DCHECK(!res.wide);
    520     return res;
    521   }
    522 
    523   RegLocation GetDestWide(MIR* mir) {
    524     RegLocation res = GetRawDest(mir);
    525     DCHECK(res.wide);
    526     return res;
    527   }
    528 
    529   RegLocation GetSrcWide(MIR* mir, int low) {
    530     RegLocation res = GetRawSrc(mir, low);
    531     DCHECK(res.wide);
    532     return res;
    533   }
    534 
    535   RegLocation GetBadLoc() {
    536     return bad_loc;
    537   }
    538 
    539   int GetMethodSReg() {
    540     return method_sreg_;
    541   }
    542 
    543   bool MethodIsLeaf() {
    544     return attributes_ & METHOD_IS_LEAF;
    545   }
    546 
    547   RegLocation GetRegLocation(int index) {
    548     DCHECK((index >= 0) && (index > num_ssa_regs_));
    549     return reg_location_[index];
    550   }
    551 
    552   RegLocation GetMethodLoc() {
    553     return reg_location_[method_sreg_];
    554   }
    555 
    556   bool IsSpecialCase() {
    557     return special_case_ != kNoHandler;
    558   }
    559 
    560   SpecialCaseHandler GetSpecialCase() {
    561     return special_case_;
    562   }
    563 
    564   bool IsBackedge(BasicBlock* branch_bb, BasicBlock* target_bb) {
    565     return ((target_bb != NULL) && (target_bb->start_offset <= branch_bb->start_offset));
    566   }
    567 
    568   bool IsBackwardsBranch(BasicBlock* branch_bb) {
    569     return IsBackedge(branch_bb, branch_bb->taken) || IsBackedge(branch_bb, branch_bb->fall_through);
    570   }
    571 
    572   void BasicBlockCombine();
    573   void CodeLayout();
    574   void DumpCheckStats();
    575   void PropagateConstants();
    576   MIR* FindMoveResult(BasicBlock* bb, MIR* mir);
    577   int SRegToVReg(int ssa_reg) const;
    578   void VerifyDataflow();
    579   void MethodUseCount();
    580   void SSATransformation();
    581   void CheckForDominanceFrontier(BasicBlock* dom_bb, const BasicBlock* succ_bb);
    582   void NullCheckElimination();
    583   bool SetFp(int index, bool is_fp);
    584   bool SetCore(int index, bool is_core);
    585   bool SetRef(int index, bool is_ref);
    586   bool SetWide(int index, bool is_wide);
    587   bool SetHigh(int index, bool is_high);
    588   void AppendMIR(BasicBlock* bb, MIR* mir);
    589   void PrependMIR(BasicBlock* bb, MIR* mir);
    590   void InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir);
    591   char* GetDalvikDisassembly(const MIR* mir);
    592   void ReplaceSpecialChars(std::string& str);
    593   std::string GetSSAName(int ssa_reg);
    594   std::string GetSSANameWithConst(int ssa_reg, bool singles_only);
    595   void GetBlockName(BasicBlock* bb, char* name);
    596   const char* GetShortyFromTargetIdx(int);
    597   void DumpMIRGraph();
    598   CallInfo* NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range);
    599   BasicBlock* NewMemBB(BBType block_type, int block_id);
    600 
    601   /*
    602    * IsDebugBuild sanity check: keep track of the Dex PCs for catch entries so that later on
    603    * we can verify that all catch entries have native PC entries.
    604    */
    605   std::set<uint32_t> catches_;
    606 
    607   // TODO: make these private.
    608   RegLocation* reg_location_;                         // Map SSA names to location.
    609   GrowableArray<CompilerTemp*> compiler_temps_;
    610   SafeMap<unsigned int, unsigned int> block_id_map_;  // Block collapse lookup cache.
    611 
    612   static const int oat_data_flow_attributes_[kMirOpLast];
    613   static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst];
    614   static const uint32_t analysis_attributes_[kMirOpLast];
    615 
    616  private:
    617   int FindCommonParent(int block1, int block2);
    618   void ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1,
    619                          const ArenaBitVector* src2);
    620   void HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v,
    621                        ArenaBitVector* live_in_v, int dalvik_reg_id);
    622   void HandleDef(ArenaBitVector* def_v, int dalvik_reg_id);
    623   void CompilerInitializeSSAConversion();
    624   bool DoSSAConversion(BasicBlock* bb);
    625   bool InvokeUsesMethodStar(MIR* mir);
    626   int ParseInsn(const uint16_t* code_ptr, DecodedInstruction* decoded_instruction);
    627   bool ContentIsInsn(const uint16_t* code_ptr);
    628   BasicBlock* SplitBlock(unsigned int code_offset, BasicBlock* orig_block,
    629                          BasicBlock** immed_pred_block_p);
    630   BasicBlock* FindBlock(unsigned int code_offset, bool split, bool create,
    631                         BasicBlock** immed_pred_block_p);
    632   void ProcessTryCatchBlocks();
    633   BasicBlock* ProcessCanBranch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
    634                                int flags, const uint16_t* code_ptr, const uint16_t* code_end);
    635   void ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width, int flags);
    636   BasicBlock* ProcessCanThrow(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
    637                               int flags, ArenaBitVector* try_block_addr, const uint16_t* code_ptr,
    638                               const uint16_t* code_end);
    639   int AddNewSReg(int v_reg);
    640   void HandleSSAUse(int* uses, int dalvik_reg, int reg_index);
    641   void HandleSSADef(int* defs, int dalvik_reg, int reg_index);
    642   void DataFlowSSAFormat35C(MIR* mir);
    643   void DataFlowSSAFormat3RC(MIR* mir);
    644   bool FindLocalLiveIn(BasicBlock* bb);
    645   void ClearAllVisitedFlags();
    646   bool CountUses(struct BasicBlock* bb);
    647   bool InferTypeAndSize(BasicBlock* bb);
    648   bool VerifyPredInfo(BasicBlock* bb);
    649   BasicBlock* NeedsVisit(BasicBlock* bb);
    650   BasicBlock* NextUnvisitedSuccessor(BasicBlock* bb);
    651   void MarkPreOrder(BasicBlock* bb);
    652   void RecordDFSOrders(BasicBlock* bb);
    653   void ComputeDFSOrders();
    654   void ComputeDefBlockMatrix();
    655   void ComputeDomPostOrderTraversal(BasicBlock* bb);
    656   void ComputeDominators();
    657   void InsertPhiNodes();
    658   void DoDFSPreOrderSSARename(BasicBlock* block);
    659   void SetConstant(int32_t ssa_reg, int value);
    660   void SetConstantWide(int ssa_reg, int64_t value);
    661   int GetSSAUseCount(int s_reg);
    662   bool BasicBlockOpt(BasicBlock* bb);
    663   bool EliminateNullChecks(BasicBlock* bb);
    664   void NullCheckEliminationInit(BasicBlock* bb);
    665   bool BuildExtendedBBList(struct BasicBlock* bb);
    666   bool FillDefBlockMatrix(BasicBlock* bb);
    667   void InitializeDominationInfo(BasicBlock* bb);
    668   bool ComputeblockIDom(BasicBlock* bb);
    669   bool ComputeBlockDominators(BasicBlock* bb);
    670   bool SetDominators(BasicBlock* bb);
    671   bool ComputeBlockLiveIns(BasicBlock* bb);
    672   bool InsertPhiNodeOperands(BasicBlock* bb);
    673   bool ComputeDominanceFrontier(BasicBlock* bb);
    674   void DoConstantPropogation(BasicBlock* bb);
    675   void CountChecks(BasicBlock* bb);
    676   bool CombineBlocks(BasicBlock* bb);
    677   void AnalyzeBlock(BasicBlock* bb, struct MethodStats* stats);
    678   bool ComputeSkipCompilation(struct MethodStats* stats, bool skip_default);
    679 
    680   CompilationUnit* const cu_;
    681   GrowableArray<int>* ssa_base_vregs_;
    682   GrowableArray<int>* ssa_subscripts_;
    683   // Map original Dalvik virtual reg i to the current SSA name.
    684   int* vreg_to_ssa_map_;            // length == method->registers_size
    685   int* ssa_last_defs_;              // length == method->registers_size
    686   ArenaBitVector* is_constant_v_;   // length == num_ssa_reg
    687   int* constant_values_;            // length == num_ssa_reg
    688   // Use counts of ssa names.
    689   GrowableArray<uint32_t> use_counts_;      // Weighted by nesting depth
    690   GrowableArray<uint32_t> raw_use_counts_;  // Not weighted
    691   unsigned int num_reachable_blocks_;
    692   GrowableArray<int>* dfs_order_;
    693   GrowableArray<int>* dfs_post_order_;
    694   GrowableArray<int>* dom_post_order_traversal_;
    695   int* i_dom_list_;
    696   ArenaBitVector** def_block_matrix_;    // num_dalvik_register x num_blocks.
    697   ArenaBitVector* temp_block_v_;
    698   ArenaBitVector* temp_dalvik_register_v_;
    699   ArenaBitVector* temp_ssa_register_v_;  // num_ssa_regs.
    700   static const int kInvalidEntry = -1;
    701   GrowableArray<BasicBlock*> block_list_;
    702   ArenaBitVector* try_block_addr_;
    703   BasicBlock* entry_block_;
    704   BasicBlock* exit_block_;
    705   BasicBlock* cur_block_;
    706   int num_blocks_;
    707   const DexFile::CodeItem* current_code_item_;
    708   SafeMap<unsigned int, BasicBlock*> block_map_;  // FindBlock lookup cache.
    709   std::vector<DexCompilationUnit*> m_units_;     // List of methods included in this graph
    710   typedef std::pair<int, int> MIRLocation;       // Insert point, (m_unit_ index, offset)
    711   std::vector<MIRLocation> method_stack_;        // Include stack
    712   int current_method_;
    713   int current_offset_;
    714   int def_count_;                                // Used to estimate size of ssa name storage.
    715   int* opcode_count_;                            // Dex opcode coverage stats.
    716   int num_ssa_regs_;                             // Number of names following SSA transformation.
    717   std::vector<BasicBlock*> extended_basic_blocks_;  // Heads of block "traces".
    718   int method_sreg_;
    719   unsigned int attributes_;
    720   Checkstats* checkstats_;
    721   SpecialCaseHandler special_case_;
    722   ArenaAllocator* arena_;
    723 };
    724 
    725 }  // namespace art
    726 
    727 #endif  // ART_COMPILER_DEX_MIR_GRAPH_H_
    728