Home | History | Annotate | Download | only in dex
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "compiler_internals.h"
     18 #include "global_value_numbering.h"
     19 #include "local_value_numbering.h"
     20 #include "dataflow_iterator-inl.h"
     21 #include "dex/global_value_numbering.h"
     22 #include "dex/quick/dex_file_method_inliner.h"
     23 #include "dex/quick/dex_file_to_method_inliner_map.h"
     24 #include "utils/scoped_arena_containers.h"
     25 
     26 namespace art {
     27 
     28 static unsigned int Predecessors(BasicBlock* bb) {
     29   return bb->predecessors->Size();
     30 }
     31 
     32 /* Setup a constant value for opcodes thare have the DF_SETS_CONST attribute */
     33 void MIRGraph::SetConstant(int32_t ssa_reg, int value) {
     34   is_constant_v_->SetBit(ssa_reg);
     35   constant_values_[ssa_reg] = value;
     36 }
     37 
     38 void MIRGraph::SetConstantWide(int ssa_reg, int64_t value) {
     39   is_constant_v_->SetBit(ssa_reg);
     40   is_constant_v_->SetBit(ssa_reg + 1);
     41   constant_values_[ssa_reg] = Low32Bits(value);
     42   constant_values_[ssa_reg + 1] = High32Bits(value);
     43 }
     44 
     45 void MIRGraph::DoConstantPropagation(BasicBlock* bb) {
     46   MIR* mir;
     47 
     48   for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
     49     // Skip pass if BB has MIR without SSA representation.
     50     if (mir->ssa_rep == nullptr) {
     51        return;
     52     }
     53 
     54     uint64_t df_attributes = GetDataFlowAttributes(mir);
     55 
     56     MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
     57 
     58     if (!(df_attributes & DF_HAS_DEFS)) continue;
     59 
     60     /* Handle instructions that set up constants directly */
     61     if (df_attributes & DF_SETS_CONST) {
     62       if (df_attributes & DF_DA) {
     63         int32_t vB = static_cast<int32_t>(d_insn->vB);
     64         switch (d_insn->opcode) {
     65           case Instruction::CONST_4:
     66           case Instruction::CONST_16:
     67           case Instruction::CONST:
     68             SetConstant(mir->ssa_rep->defs[0], vB);
     69             break;
     70           case Instruction::CONST_HIGH16:
     71             SetConstant(mir->ssa_rep->defs[0], vB << 16);
     72             break;
     73           case Instruction::CONST_WIDE_16:
     74           case Instruction::CONST_WIDE_32:
     75             SetConstantWide(mir->ssa_rep->defs[0], static_cast<int64_t>(vB));
     76             break;
     77           case Instruction::CONST_WIDE:
     78             SetConstantWide(mir->ssa_rep->defs[0], d_insn->vB_wide);
     79             break;
     80           case Instruction::CONST_WIDE_HIGH16:
     81             SetConstantWide(mir->ssa_rep->defs[0], static_cast<int64_t>(vB) << 48);
     82             break;
     83           default:
     84             break;
     85         }
     86       }
     87       /* Handle instructions that set up constants directly */
     88     } else if (df_attributes & DF_IS_MOVE) {
     89       int i;
     90 
     91       for (i = 0; i < mir->ssa_rep->num_uses; i++) {
     92         if (!is_constant_v_->IsBitSet(mir->ssa_rep->uses[i])) break;
     93       }
     94       /* Move a register holding a constant to another register */
     95       if (i == mir->ssa_rep->num_uses) {
     96         SetConstant(mir->ssa_rep->defs[0], constant_values_[mir->ssa_rep->uses[0]]);
     97         if (df_attributes & DF_A_WIDE) {
     98           SetConstant(mir->ssa_rep->defs[1], constant_values_[mir->ssa_rep->uses[1]]);
     99         }
    100       }
    101     }
    102   }
    103   /* TODO: implement code to handle arithmetic operations */
    104 }
    105 
    106 /* Advance to next strictly dominated MIR node in an extended basic block */
    107 MIR* MIRGraph::AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
    108   BasicBlock* bb = *p_bb;
    109   if (mir != NULL) {
    110     mir = mir->next;
    111     if (mir == NULL) {
    112       bb = GetBasicBlock(bb->fall_through);
    113       if ((bb == NULL) || Predecessors(bb) != 1) {
    114         mir = NULL;
    115       } else {
    116       *p_bb = bb;
    117       mir = bb->first_mir_insn;
    118       }
    119     }
    120   }
    121   return mir;
    122 }
    123 
    124 /*
    125  * To be used at an invoke mir.  If the logically next mir node represents
    126  * a move-result, return it.  Else, return NULL.  If a move-result exists,
    127  * it is required to immediately follow the invoke with no intervening
    128  * opcodes or incoming arcs.  However, if the result of the invoke is not
    129  * used, a move-result may not be present.
    130  */
    131 MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) {
    132   BasicBlock* tbb = bb;
    133   mir = AdvanceMIR(&tbb, mir);
    134   while (mir != NULL) {
    135     if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) ||
    136         (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) ||
    137         (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE)) {
    138       break;
    139     }
    140     // Keep going if pseudo op, otherwise terminate
    141     if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
    142       mir = AdvanceMIR(&tbb, mir);
    143     } else {
    144       mir = NULL;
    145     }
    146   }
    147   return mir;
    148 }
    149 
    150 BasicBlock* MIRGraph::NextDominatedBlock(BasicBlock* bb) {
    151   if (bb->block_type == kDead) {
    152     return NULL;
    153   }
    154   DCHECK((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
    155       || (bb->block_type == kExitBlock));
    156   BasicBlock* bb_taken = GetBasicBlock(bb->taken);
    157   BasicBlock* bb_fall_through = GetBasicBlock(bb->fall_through);
    158   if (((bb_fall_through == NULL) && (bb_taken != NULL)) &&
    159       ((bb_taken->block_type == kDalvikByteCode) || (bb_taken->block_type == kExitBlock))) {
    160     // Follow simple unconditional branches.
    161     bb = bb_taken;
    162   } else {
    163     // Follow simple fallthrough
    164     bb = (bb_taken != NULL) ? NULL : bb_fall_through;
    165   }
    166   if (bb == NULL || (Predecessors(bb) != 1)) {
    167     return NULL;
    168   }
    169   DCHECK((bb->block_type == kDalvikByteCode) || (bb->block_type == kExitBlock));
    170   return bb;
    171 }
    172 
    173 static MIR* FindPhi(BasicBlock* bb, int ssa_name) {
    174   for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
    175     if (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi) {
    176       for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
    177         if (mir->ssa_rep->uses[i] == ssa_name) {
    178           return mir;
    179         }
    180       }
    181     }
    182   }
    183   return NULL;
    184 }
    185 
    186 static SelectInstructionKind SelectKind(MIR* mir) {
    187   switch (mir->dalvikInsn.opcode) {
    188     case Instruction::MOVE:
    189     case Instruction::MOVE_OBJECT:
    190     case Instruction::MOVE_16:
    191     case Instruction::MOVE_OBJECT_16:
    192     case Instruction::MOVE_FROM16:
    193     case Instruction::MOVE_OBJECT_FROM16:
    194       return kSelectMove;
    195     case Instruction::CONST:
    196     case Instruction::CONST_4:
    197     case Instruction::CONST_16:
    198       return kSelectConst;
    199     case Instruction::GOTO:
    200     case Instruction::GOTO_16:
    201     case Instruction::GOTO_32:
    202       return kSelectGoto;
    203     default:
    204       return kSelectNone;
    205   }
    206 }
    207 
    208 static constexpr ConditionCode kIfCcZConditionCodes[] = {
    209     kCondEq, kCondNe, kCondLt, kCondGe, kCondGt, kCondLe
    210 };
    211 
    212 COMPILE_ASSERT(arraysize(kIfCcZConditionCodes) == Instruction::IF_LEZ - Instruction::IF_EQZ + 1,
    213                if_ccz_ccodes_size1);
    214 
    215 static constexpr bool IsInstructionIfCcZ(Instruction::Code opcode) {
    216   return Instruction::IF_EQZ <= opcode && opcode <= Instruction::IF_LEZ;
    217 }
    218 
    219 static constexpr ConditionCode ConditionCodeForIfCcZ(Instruction::Code opcode) {
    220   return kIfCcZConditionCodes[opcode - Instruction::IF_EQZ];
    221 }
    222 
    223 COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_EQZ) == kCondEq, check_if_eqz_ccode);
    224 COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_NEZ) == kCondNe, check_if_nez_ccode);
    225 COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_LTZ) == kCondLt, check_if_ltz_ccode);
    226 COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_GEZ) == kCondGe, check_if_gez_ccode);
    227 COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_GTZ) == kCondGt, check_if_gtz_ccode);
    228 COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_LEZ) == kCondLe, check_if_lez_ccode);
    229 
    230 int MIRGraph::GetSSAUseCount(int s_reg) {
    231   return raw_use_counts_.Get(s_reg);
    232 }
    233 
    234 size_t MIRGraph::GetNumAvailableNonSpecialCompilerTemps() {
    235   if (num_non_special_compiler_temps_ >= max_available_non_special_compiler_temps_) {
    236     return 0;
    237   } else {
    238     return max_available_non_special_compiler_temps_ - num_non_special_compiler_temps_;
    239   }
    240 }
    241 
    242 
    243 // FIXME - will probably need to revisit all uses of this, as type not defined.
    244 static const RegLocation temp_loc = {kLocCompilerTemp,
    245                                      0, 1 /*defined*/, 0, 0, 0, 0, 0, 1 /*home*/,
    246                                      RegStorage(), INVALID_SREG, INVALID_SREG};
    247 
    248 CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide) {
    249   // There is a limit to the number of non-special temps so check to make sure it wasn't exceeded.
    250   if (ct_type == kCompilerTempVR) {
    251     size_t available_temps = GetNumAvailableNonSpecialCompilerTemps();
    252     if (available_temps <= 0 || (available_temps <= 1 && wide)) {
    253       return 0;
    254     }
    255   }
    256 
    257   CompilerTemp *compiler_temp = static_cast<CompilerTemp *>(arena_->Alloc(sizeof(CompilerTemp),
    258                                                             kArenaAllocRegAlloc));
    259 
    260   // Create the type of temp requested. Special temps need special handling because
    261   // they have a specific virtual register assignment.
    262   if (ct_type == kCompilerTempSpecialMethodPtr) {
    263     DCHECK_EQ(wide, false);
    264     compiler_temp->v_reg = static_cast<int>(kVRegMethodPtrBaseReg);
    265     compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
    266 
    267     // The MIR graph keeps track of the sreg for method pointer specially, so record that now.
    268     method_sreg_ = compiler_temp->s_reg_low;
    269   } else {
    270     DCHECK_EQ(ct_type, kCompilerTempVR);
    271 
    272     // The new non-special compiler temp must receive a unique v_reg with a negative value.
    273     compiler_temp->v_reg = static_cast<int>(kVRegNonSpecialTempBaseReg) -
    274         num_non_special_compiler_temps_;
    275     compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
    276     num_non_special_compiler_temps_++;
    277 
    278     if (wide) {
    279       // Create a new CompilerTemp for the high part.
    280       CompilerTemp *compiler_temp_high =
    281           static_cast<CompilerTemp *>(arena_->Alloc(sizeof(CompilerTemp), kArenaAllocRegAlloc));
    282       compiler_temp_high->v_reg = compiler_temp->v_reg;
    283       compiler_temp_high->s_reg_low = compiler_temp->s_reg_low;
    284       compiler_temps_.Insert(compiler_temp_high);
    285 
    286       // Ensure that the two registers are consecutive. Since the virtual registers used for temps
    287       // grow in a negative fashion, we need the smaller to refer to the low part. Thus, we
    288       // redefine the v_reg and s_reg_low.
    289       compiler_temp->v_reg--;
    290       int ssa_reg_high = compiler_temp->s_reg_low;
    291       compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
    292       int ssa_reg_low = compiler_temp->s_reg_low;
    293 
    294       // If needed initialize the register location for the high part.
    295       // The low part is handled later in this method on a common path.
    296       if (reg_location_ != nullptr) {
    297         reg_location_[ssa_reg_high] = temp_loc;
    298         reg_location_[ssa_reg_high].high_word = 1;
    299         reg_location_[ssa_reg_high].s_reg_low = ssa_reg_low;
    300         reg_location_[ssa_reg_high].wide = true;
    301       }
    302 
    303       num_non_special_compiler_temps_++;
    304     }
    305   }
    306 
    307   // Have we already allocated the register locations?
    308   if (reg_location_ != nullptr) {
    309     int ssa_reg_low = compiler_temp->s_reg_low;
    310     reg_location_[ssa_reg_low] = temp_loc;
    311     reg_location_[ssa_reg_low].s_reg_low = ssa_reg_low;
    312     reg_location_[ssa_reg_low].wide = wide;
    313   }
    314 
    315   compiler_temps_.Insert(compiler_temp);
    316   return compiler_temp;
    317 }
    318 
    319 /* Do some MIR-level extended basic block optimizations */
    320 bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
    321   if (bb->block_type == kDead) {
    322     return true;
    323   }
    324   // Don't do a separate LVN if we did the GVN.
    325   bool use_lvn = bb->use_lvn && (cu_->disable_opt & (1u << kGlobalValueNumbering)) != 0u;
    326   std::unique_ptr<ScopedArenaAllocator> allocator;
    327   std::unique_ptr<GlobalValueNumbering> global_valnum;
    328   std::unique_ptr<LocalValueNumbering> local_valnum;
    329   if (use_lvn) {
    330     allocator.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
    331     global_valnum.reset(new (allocator.get()) GlobalValueNumbering(cu_, allocator.get()));
    332     local_valnum.reset(new (allocator.get()) LocalValueNumbering(global_valnum.get(), bb->id,
    333                                                                  allocator.get()));
    334   }
    335   while (bb != NULL) {
    336     for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
    337       // TUNING: use the returned value number for CSE.
    338       if (use_lvn) {
    339         local_valnum->GetValueNumber(mir);
    340       }
    341       // Look for interesting opcodes, skip otherwise
    342       Instruction::Code opcode = mir->dalvikInsn.opcode;
    343       switch (opcode) {
    344         case Instruction::CMPL_FLOAT:
    345         case Instruction::CMPL_DOUBLE:
    346         case Instruction::CMPG_FLOAT:
    347         case Instruction::CMPG_DOUBLE:
    348         case Instruction::CMP_LONG:
    349           if ((cu_->disable_opt & (1 << kBranchFusing)) != 0) {
    350             // Bitcode doesn't allow this optimization.
    351             break;
    352           }
    353           if (mir->next != NULL) {
    354             MIR* mir_next = mir->next;
    355             // Make sure result of cmp is used by next insn and nowhere else
    356             if (IsInstructionIfCcZ(mir_next->dalvikInsn.opcode) &&
    357                 (mir->ssa_rep->defs[0] == mir_next->ssa_rep->uses[0]) &&
    358                 (GetSSAUseCount(mir->ssa_rep->defs[0]) == 1)) {
    359               mir_next->meta.ccode = ConditionCodeForIfCcZ(mir_next->dalvikInsn.opcode);
    360               switch (opcode) {
    361                 case Instruction::CMPL_FLOAT:
    362                   mir_next->dalvikInsn.opcode =
    363                       static_cast<Instruction::Code>(kMirOpFusedCmplFloat);
    364                   break;
    365                 case Instruction::CMPL_DOUBLE:
    366                   mir_next->dalvikInsn.opcode =
    367                       static_cast<Instruction::Code>(kMirOpFusedCmplDouble);
    368                   break;
    369                 case Instruction::CMPG_FLOAT:
    370                   mir_next->dalvikInsn.opcode =
    371                       static_cast<Instruction::Code>(kMirOpFusedCmpgFloat);
    372                   break;
    373                 case Instruction::CMPG_DOUBLE:
    374                   mir_next->dalvikInsn.opcode =
    375                       static_cast<Instruction::Code>(kMirOpFusedCmpgDouble);
    376                   break;
    377                 case Instruction::CMP_LONG:
    378                   mir_next->dalvikInsn.opcode =
    379                       static_cast<Instruction::Code>(kMirOpFusedCmpLong);
    380                   break;
    381                 default: LOG(ERROR) << "Unexpected opcode: " << opcode;
    382               }
    383               mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
    384               // Copy the SSA information that is relevant.
    385               mir_next->ssa_rep->num_uses = mir->ssa_rep->num_uses;
    386               mir_next->ssa_rep->uses = mir->ssa_rep->uses;
    387               mir_next->ssa_rep->fp_use = mir->ssa_rep->fp_use;
    388               mir_next->ssa_rep->num_defs = 0;
    389               mir->ssa_rep->num_uses = 0;
    390               mir->ssa_rep->num_defs = 0;
    391               // Copy in the decoded instruction information for potential SSA re-creation.
    392               mir_next->dalvikInsn.vA = mir->dalvikInsn.vB;
    393               mir_next->dalvikInsn.vB = mir->dalvikInsn.vC;
    394             }
    395           }
    396           break;
    397         case Instruction::GOTO:
    398         case Instruction::GOTO_16:
    399         case Instruction::GOTO_32:
    400         case Instruction::IF_EQ:
    401         case Instruction::IF_NE:
    402         case Instruction::IF_LT:
    403         case Instruction::IF_GE:
    404         case Instruction::IF_GT:
    405         case Instruction::IF_LE:
    406         case Instruction::IF_EQZ:
    407         case Instruction::IF_NEZ:
    408         case Instruction::IF_LTZ:
    409         case Instruction::IF_GEZ:
    410         case Instruction::IF_GTZ:
    411         case Instruction::IF_LEZ:
    412           // If we've got a backwards branch to return, no need to suspend check.
    413           if ((IsBackedge(bb, bb->taken) && GetBasicBlock(bb->taken)->dominates_return) ||
    414               (IsBackedge(bb, bb->fall_through) &&
    415                           GetBasicBlock(bb->fall_through)->dominates_return)) {
    416             mir->optimization_flags |= MIR_IGNORE_SUSPEND_CHECK;
    417             if (cu_->verbose) {
    418               LOG(INFO) << "Suppressed suspend check on branch to return at 0x" << std::hex
    419                         << mir->offset;
    420             }
    421           }
    422           break;
    423         default:
    424           break;
    425       }
    426       // Is this the select pattern?
    427       // TODO: flesh out support for Mips.  NOTE: llvm's select op doesn't quite work here.
    428       // TUNING: expand to support IF_xx compare & branches
    429       if (!cu_->compiler->IsPortable() &&
    430           (cu_->instruction_set == kArm64 || cu_->instruction_set == kThumb2 ||
    431            cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) &&
    432           IsInstructionIfCcZ(mir->dalvikInsn.opcode)) {
    433         BasicBlock* ft = GetBasicBlock(bb->fall_through);
    434         DCHECK(ft != NULL);
    435         BasicBlock* ft_ft = GetBasicBlock(ft->fall_through);
    436         BasicBlock* ft_tk = GetBasicBlock(ft->taken);
    437 
    438         BasicBlock* tk = GetBasicBlock(bb->taken);
    439         DCHECK(tk != NULL);
    440         BasicBlock* tk_ft = GetBasicBlock(tk->fall_through);
    441         BasicBlock* tk_tk = GetBasicBlock(tk->taken);
    442 
    443         /*
    444          * In the select pattern, the taken edge goes to a block that unconditionally
    445          * transfers to the rejoin block and the fall_though edge goes to a block that
    446          * unconditionally falls through to the rejoin block.
    447          */
    448         if ((tk_ft == NULL) && (ft_tk == NULL) && (tk_tk == ft_ft) &&
    449             (Predecessors(tk) == 1) && (Predecessors(ft) == 1)) {
    450           /*
    451            * Okay - we have the basic diamond shape.  At the very least, we can eliminate the
    452            * suspend check on the taken-taken branch back to the join point.
    453            */
    454           if (SelectKind(tk->last_mir_insn) == kSelectGoto) {
    455               tk->last_mir_insn->optimization_flags |= (MIR_IGNORE_SUSPEND_CHECK);
    456           }
    457 
    458           // TODO: Add logic for LONG.
    459           // Are the block bodies something we can handle?
    460           if ((ft->first_mir_insn == ft->last_mir_insn) &&
    461               (tk->first_mir_insn != tk->last_mir_insn) &&
    462               (tk->first_mir_insn->next == tk->last_mir_insn) &&
    463               ((SelectKind(ft->first_mir_insn) == kSelectMove) ||
    464               (SelectKind(ft->first_mir_insn) == kSelectConst)) &&
    465               (SelectKind(ft->first_mir_insn) == SelectKind(tk->first_mir_insn)) &&
    466               (SelectKind(tk->last_mir_insn) == kSelectGoto)) {
    467             // Almost there.  Are the instructions targeting the same vreg?
    468             MIR* if_true = tk->first_mir_insn;
    469             MIR* if_false = ft->first_mir_insn;
    470             // It's possible that the target of the select isn't used - skip those (rare) cases.
    471             MIR* phi = FindPhi(tk_tk, if_true->ssa_rep->defs[0]);
    472             if ((phi != NULL) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
    473               /*
    474                * We'll convert the IF_EQZ/IF_NEZ to a SELECT.  We need to find the
    475                * Phi node in the merge block and delete it (while using the SSA name
    476                * of the merge as the target of the SELECT.  Delete both taken and
    477                * fallthrough blocks, and set fallthrough to merge block.
    478                * NOTE: not updating other dataflow info (no longer used at this point).
    479                * If this changes, need to update i_dom, etc. here (and in CombineBlocks).
    480                */
    481               mir->meta.ccode = ConditionCodeForIfCcZ(mir->dalvikInsn.opcode);
    482               mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpSelect);
    483               bool const_form = (SelectKind(if_true) == kSelectConst);
    484               if ((SelectKind(if_true) == kSelectMove)) {
    485                 if (IsConst(if_true->ssa_rep->uses[0]) &&
    486                     IsConst(if_false->ssa_rep->uses[0])) {
    487                     const_form = true;
    488                     if_true->dalvikInsn.vB = ConstantValue(if_true->ssa_rep->uses[0]);
    489                     if_false->dalvikInsn.vB = ConstantValue(if_false->ssa_rep->uses[0]);
    490                 }
    491               }
    492               if (const_form) {
    493                 /*
    494                  * TODO: If both constants are the same value, then instead of generating
    495                  * a select, we should simply generate a const bytecode. This should be
    496                  * considered after inlining which can lead to CFG of this form.
    497                  */
    498                 // "true" set val in vB
    499                 mir->dalvikInsn.vB = if_true->dalvikInsn.vB;
    500                 // "false" set val in vC
    501                 mir->dalvikInsn.vC = if_false->dalvikInsn.vB;
    502               } else {
    503                 DCHECK_EQ(SelectKind(if_true), kSelectMove);
    504                 DCHECK_EQ(SelectKind(if_false), kSelectMove);
    505                 int* src_ssa =
    506                     static_cast<int*>(arena_->Alloc(sizeof(int) * 3, kArenaAllocDFInfo));
    507                 src_ssa[0] = mir->ssa_rep->uses[0];
    508                 src_ssa[1] = if_true->ssa_rep->uses[0];
    509                 src_ssa[2] = if_false->ssa_rep->uses[0];
    510                 mir->ssa_rep->uses = src_ssa;
    511                 mir->ssa_rep->num_uses = 3;
    512               }
    513               mir->ssa_rep->num_defs = 1;
    514               mir->ssa_rep->defs =
    515                   static_cast<int*>(arena_->Alloc(sizeof(int) * 1, kArenaAllocDFInfo));
    516               mir->ssa_rep->fp_def =
    517                   static_cast<bool*>(arena_->Alloc(sizeof(bool) * 1, kArenaAllocDFInfo));
    518               mir->ssa_rep->fp_def[0] = if_true->ssa_rep->fp_def[0];
    519               // Match type of uses to def.
    520               mir->ssa_rep->fp_use =
    521                   static_cast<bool*>(arena_->Alloc(sizeof(bool) * mir->ssa_rep->num_uses,
    522                                                    kArenaAllocDFInfo));
    523               for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
    524                 mir->ssa_rep->fp_use[i] = mir->ssa_rep->fp_def[0];
    525               }
    526               /*
    527                * There is usually a Phi node in the join block for our two cases.  If the
    528                * Phi node only contains our two cases as input, we will use the result
    529                * SSA name of the Phi node as our select result and delete the Phi.  If
    530                * the Phi node has more than two operands, we will arbitrarily use the SSA
    531                * name of the "true" path, delete the SSA name of the "false" path from the
    532                * Phi node (and fix up the incoming arc list).
    533                */
    534               if (phi->ssa_rep->num_uses == 2) {
    535                 mir->ssa_rep->defs[0] = phi->ssa_rep->defs[0];
    536                 phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
    537               } else {
    538                 int dead_def = if_false->ssa_rep->defs[0];
    539                 int live_def = if_true->ssa_rep->defs[0];
    540                 mir->ssa_rep->defs[0] = live_def;
    541                 BasicBlockId* incoming = phi->meta.phi_incoming;
    542                 for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
    543                   if (phi->ssa_rep->uses[i] == live_def) {
    544                     incoming[i] = bb->id;
    545                   }
    546                 }
    547                 for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
    548                   if (phi->ssa_rep->uses[i] == dead_def) {
    549                     int last_slot = phi->ssa_rep->num_uses - 1;
    550                     phi->ssa_rep->uses[i] = phi->ssa_rep->uses[last_slot];
    551                     incoming[i] = incoming[last_slot];
    552                   }
    553                 }
    554               }
    555               phi->ssa_rep->num_uses--;
    556               bb->taken = NullBasicBlockId;
    557               tk->block_type = kDead;
    558               for (MIR* tmir = ft->first_mir_insn; tmir != NULL; tmir = tmir->next) {
    559                 tmir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
    560               }
    561             }
    562           }
    563         }
    564       }
    565     }
    566     bb = ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) ? NextDominatedBlock(bb) : NULL;
    567   }
    568   if (use_lvn && UNLIKELY(!global_valnum->Good())) {
    569     LOG(WARNING) << "LVN overflow in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
    570   }
    571 
    572   return true;
    573 }
    574 
    575 /* Collect stats on number of checks removed */
    576 void MIRGraph::CountChecks(struct BasicBlock* bb) {
    577   if (bb->data_flow_info != NULL) {
    578     for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
    579       if (mir->ssa_rep == NULL) {
    580         continue;
    581       }
    582       uint64_t df_attributes = GetDataFlowAttributes(mir);
    583       if (df_attributes & DF_HAS_NULL_CHKS) {
    584         checkstats_->null_checks++;
    585         if (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) {
    586           checkstats_->null_checks_eliminated++;
    587         }
    588       }
    589       if (df_attributes & DF_HAS_RANGE_CHKS) {
    590         checkstats_->range_checks++;
    591         if (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) {
    592           checkstats_->range_checks_eliminated++;
    593         }
    594       }
    595     }
    596   }
    597 }
    598 
    599 /* Try to make common case the fallthrough path. */
    600 bool MIRGraph::LayoutBlocks(BasicBlock* bb) {
    601   // TODO: For now, just looking for direct throws.  Consider generalizing for profile feedback.
    602   if (!bb->explicit_throw) {
    603     return false;
    604   }
    605 
    606   // If we visited it, we are done.
    607   if (bb->visited) {
    608     return false;
    609   }
    610   bb->visited = true;
    611 
    612   BasicBlock* walker = bb;
    613   while (true) {
    614     // Check termination conditions.
    615     if ((walker->block_type == kEntryBlock) || (Predecessors(walker) != 1)) {
    616       break;
    617     }
    618     BasicBlock* prev = GetBasicBlock(walker->predecessors->Get(0));
    619 
    620     // If we visited the predecessor, we are done.
    621     if (prev->visited) {
    622       return false;
    623     }
    624     prev->visited = true;
    625 
    626     if (prev->conditional_branch) {
    627       if (GetBasicBlock(prev->fall_through) == walker) {
    628         // Already done - return.
    629         break;
    630       }
    631       DCHECK_EQ(walker, GetBasicBlock(prev->taken));
    632       // Got one.  Flip it and exit.
    633       Instruction::Code opcode = prev->last_mir_insn->dalvikInsn.opcode;
    634       switch (opcode) {
    635         case Instruction::IF_EQ: opcode = Instruction::IF_NE; break;
    636         case Instruction::IF_NE: opcode = Instruction::IF_EQ; break;
    637         case Instruction::IF_LT: opcode = Instruction::IF_GE; break;
    638         case Instruction::IF_GE: opcode = Instruction::IF_LT; break;
    639         case Instruction::IF_GT: opcode = Instruction::IF_LE; break;
    640         case Instruction::IF_LE: opcode = Instruction::IF_GT; break;
    641         case Instruction::IF_EQZ: opcode = Instruction::IF_NEZ; break;
    642         case Instruction::IF_NEZ: opcode = Instruction::IF_EQZ; break;
    643         case Instruction::IF_LTZ: opcode = Instruction::IF_GEZ; break;
    644         case Instruction::IF_GEZ: opcode = Instruction::IF_LTZ; break;
    645         case Instruction::IF_GTZ: opcode = Instruction::IF_LEZ; break;
    646         case Instruction::IF_LEZ: opcode = Instruction::IF_GTZ; break;
    647         default: LOG(FATAL) << "Unexpected opcode " << opcode;
    648       }
    649       prev->last_mir_insn->dalvikInsn.opcode = opcode;
    650       BasicBlockId t_bb = prev->taken;
    651       prev->taken = prev->fall_through;
    652       prev->fall_through = t_bb;
    653       break;
    654     }
    655     walker = prev;
    656   }
    657   return false;
    658 }
    659 
    660 /* Combine any basic blocks terminated by instructions that we now know can't throw */
    661 void MIRGraph::CombineBlocks(struct BasicBlock* bb) {
    662   // Loop here to allow combining a sequence of blocks
    663   while (true) {
    664     // Check termination conditions
    665     if ((bb->first_mir_insn == NULL)
    666         || (bb->data_flow_info == NULL)
    667         || (bb->block_type == kExceptionHandling)
    668         || (bb->block_type == kExitBlock)
    669         || (bb->block_type == kDead)
    670         || (bb->taken == NullBasicBlockId)
    671         || (GetBasicBlock(bb->taken)->block_type != kExceptionHandling)
    672         || (bb->successor_block_list_type != kNotUsed)
    673         || (static_cast<int>(bb->last_mir_insn->dalvikInsn.opcode) != kMirOpCheck)) {
    674       break;
    675     }
    676 
    677     // Test the kMirOpCheck instruction
    678     MIR* mir = bb->last_mir_insn;
    679     // Grab the attributes from the paired opcode
    680     MIR* throw_insn = mir->meta.throw_insn;
    681     uint64_t df_attributes = GetDataFlowAttributes(throw_insn);
    682     bool can_combine = true;
    683     if (df_attributes & DF_HAS_NULL_CHKS) {
    684       can_combine &= ((throw_insn->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0);
    685     }
    686     if (df_attributes & DF_HAS_RANGE_CHKS) {
    687       can_combine &= ((throw_insn->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0);
    688     }
    689     if (!can_combine) {
    690       break;
    691     }
    692     // OK - got one.  Combine
    693     BasicBlock* bb_next = GetBasicBlock(bb->fall_through);
    694     DCHECK(!bb_next->catch_entry);
    695     DCHECK_EQ(Predecessors(bb_next), 1U);
    696     // Overwrite the kOpCheck insn with the paired opcode
    697     DCHECK_EQ(bb_next->first_mir_insn, throw_insn);
    698     *bb->last_mir_insn = *throw_insn;
    699     // Use the successor info from the next block
    700     bb->successor_block_list_type = bb_next->successor_block_list_type;
    701     bb->successor_blocks = bb_next->successor_blocks;
    702     // Use the ending block linkage from the next block
    703     bb->fall_through = bb_next->fall_through;
    704     GetBasicBlock(bb->taken)->block_type = kDead;  // Kill the unused exception block
    705     bb->taken = bb_next->taken;
    706     // Include the rest of the instructions
    707     bb->last_mir_insn = bb_next->last_mir_insn;
    708     /*
    709      * If lower-half of pair of blocks to combine contained a return, move the flag
    710      * to the newly combined block.
    711      */
    712     bb->terminated_by_return = bb_next->terminated_by_return;
    713 
    714     /*
    715      * NOTE: we aren't updating all dataflow info here.  Should either make sure this pass
    716      * happens after uses of i_dominated, dom_frontier or update the dataflow info here.
    717      */
    718 
    719     // Kill bb_next and remap now-dead id to parent
    720     bb_next->block_type = kDead;
    721     block_id_map_.Overwrite(bb_next->id, bb->id);
    722 
    723     // Now, loop back and see if we can keep going
    724   }
    725 }
    726 
    727 void MIRGraph::EliminateNullChecksAndInferTypesStart() {
    728   if ((cu_->disable_opt & (1 << kNullCheckElimination)) == 0) {
    729     if (kIsDebugBuild) {
    730       AllNodesIterator iter(this);
    731       for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
    732         CHECK(bb->data_flow_info == nullptr || bb->data_flow_info->ending_check_v == nullptr);
    733       }
    734     }
    735 
    736     DCHECK(temp_scoped_alloc_.get() == nullptr);
    737     temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
    738     temp_bit_vector_size_ = GetNumSSARegs();
    739     temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
    740         temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapTempSSARegisterV);
    741   }
    742 }
    743 
    744 /*
    745  * Eliminate unnecessary null checks for a basic block.   Also, while we're doing
    746  * an iterative walk go ahead and perform type and size inference.
    747  */
    748 bool MIRGraph::EliminateNullChecksAndInferTypes(BasicBlock* bb) {
    749   if (bb->data_flow_info == NULL) return false;
    750   bool infer_changed = false;
    751   bool do_nce = ((cu_->disable_opt & (1 << kNullCheckElimination)) == 0);
    752 
    753   ArenaBitVector* ssa_regs_to_check = temp_bit_vector_;
    754   if (do_nce) {
    755     /*
    756      * Set initial state. Catch blocks don't need any special treatment.
    757      */
    758     if (bb->block_type == kEntryBlock) {
    759       ssa_regs_to_check->ClearAllBits();
    760       // Assume all ins are objects.
    761       for (uint16_t in_reg = cu_->num_dalvik_registers - cu_->num_ins;
    762            in_reg < cu_->num_dalvik_registers; in_reg++) {
    763         ssa_regs_to_check->SetBit(in_reg);
    764       }
    765       if ((cu_->access_flags & kAccStatic) == 0) {
    766         // If non-static method, mark "this" as non-null
    767         int this_reg = cu_->num_dalvik_registers - cu_->num_ins;
    768         ssa_regs_to_check->ClearBit(this_reg);
    769       }
    770     } else if (bb->predecessors->Size() == 1) {
    771       BasicBlock* pred_bb = GetBasicBlock(bb->predecessors->Get(0));
    772       // pred_bb must have already been processed at least once.
    773       DCHECK(pred_bb->data_flow_info->ending_check_v != nullptr);
    774       ssa_regs_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
    775       if (pred_bb->block_type == kDalvikByteCode) {
    776         // Check to see if predecessor had an explicit null-check.
    777         MIR* last_insn = pred_bb->last_mir_insn;
    778         if (last_insn != nullptr) {
    779           Instruction::Code last_opcode = last_insn->dalvikInsn.opcode;
    780           if (last_opcode == Instruction::IF_EQZ) {
    781             if (pred_bb->fall_through == bb->id) {
    782               // The fall-through of a block following a IF_EQZ, set the vA of the IF_EQZ to show that
    783               // it can't be null.
    784               ssa_regs_to_check->ClearBit(last_insn->ssa_rep->uses[0]);
    785             }
    786           } else if (last_opcode == Instruction::IF_NEZ) {
    787             if (pred_bb->taken == bb->id) {
    788               // The taken block following a IF_NEZ, set the vA of the IF_NEZ to show that it can't be
    789               // null.
    790               ssa_regs_to_check->ClearBit(last_insn->ssa_rep->uses[0]);
    791             }
    792           }
    793         }
    794       }
    795     } else {
    796       // Starting state is union of all incoming arcs
    797       GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
    798       BasicBlock* pred_bb = GetBasicBlock(iter.Next());
    799       CHECK(pred_bb != NULL);
    800       while (pred_bb->data_flow_info->ending_check_v == nullptr) {
    801         pred_bb = GetBasicBlock(iter.Next());
    802         // At least one predecessor must have been processed before this bb.
    803         DCHECK(pred_bb != nullptr);
    804         DCHECK(pred_bb->data_flow_info != nullptr);
    805       }
    806       ssa_regs_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
    807       while (true) {
    808         pred_bb = GetBasicBlock(iter.Next());
    809         if (!pred_bb) break;
    810         DCHECK(pred_bb->data_flow_info != nullptr);
    811         if (pred_bb->data_flow_info->ending_check_v == nullptr) {
    812           continue;
    813         }
    814         ssa_regs_to_check->Union(pred_bb->data_flow_info->ending_check_v);
    815       }
    816     }
    817     // At this point, ssa_regs_to_check shows which sregs have an object definition with
    818     // no intervening uses.
    819   }
    820 
    821   // Walk through the instruction in the block, updating as necessary
    822   for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
    823     if (mir->ssa_rep == NULL) {
    824         continue;
    825     }
    826 
    827     // Propagate type info.
    828     infer_changed = InferTypeAndSize(bb, mir, infer_changed);
    829     if (!do_nce) {
    830       continue;
    831     }
    832 
    833     uint64_t df_attributes = GetDataFlowAttributes(mir);
    834 
    835     // Might need a null check?
    836     if (df_attributes & DF_HAS_NULL_CHKS) {
    837       int src_idx;
    838       if (df_attributes & DF_NULL_CHK_1) {
    839         src_idx = 1;
    840       } else if (df_attributes & DF_NULL_CHK_2) {
    841         src_idx = 2;
    842       } else {
    843         src_idx = 0;
    844       }
    845       int src_sreg = mir->ssa_rep->uses[src_idx];
    846       if (!ssa_regs_to_check->IsBitSet(src_sreg)) {
    847         // Eliminate the null check.
    848         mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
    849       } else {
    850         // Do the null check.
    851         mir->optimization_flags &= ~MIR_IGNORE_NULL_CHECK;
    852         // Mark s_reg as null-checked
    853         ssa_regs_to_check->ClearBit(src_sreg);
    854       }
    855     }
    856 
    857     if ((df_attributes & DF_A_WIDE) ||
    858         (df_attributes & (DF_REF_A | DF_SETS_CONST | DF_NULL_TRANSFER)) == 0) {
    859       continue;
    860     }
    861 
    862     /*
    863      * First, mark all object definitions as requiring null check.
    864      * Note: we can't tell if a CONST definition might be used as an object, so treat
    865      * them all as object definitions.
    866      */
    867     if (((df_attributes & (DF_DA | DF_REF_A)) == (DF_DA | DF_REF_A)) ||
    868         (df_attributes & DF_SETS_CONST))  {
    869       ssa_regs_to_check->SetBit(mir->ssa_rep->defs[0]);
    870     }
    871 
    872     // Now, remove mark from all object definitions we know are non-null.
    873     if (df_attributes & DF_NON_NULL_DST) {
    874       // Mark target of NEW* as non-null
    875       ssa_regs_to_check->ClearBit(mir->ssa_rep->defs[0]);
    876     }
    877 
    878     // Mark non-null returns from invoke-style NEW*
    879     if (df_attributes & DF_NON_NULL_RET) {
    880       MIR* next_mir = mir->next;
    881       // Next should be an MOVE_RESULT_OBJECT
    882       if (next_mir &&
    883           next_mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
    884         // Mark as null checked
    885         ssa_regs_to_check->ClearBit(next_mir->ssa_rep->defs[0]);
    886       } else {
    887         if (next_mir) {
    888           LOG(WARNING) << "Unexpected opcode following new: " << next_mir->dalvikInsn.opcode;
    889         } else if (bb->fall_through != NullBasicBlockId) {
    890           // Look in next basic block
    891           struct BasicBlock* next_bb = GetBasicBlock(bb->fall_through);
    892           for (MIR* tmir = next_bb->first_mir_insn; tmir != NULL;
    893             tmir =tmir->next) {
    894             if (MIR::DecodedInstruction::IsPseudoMirOp(tmir->dalvikInsn.opcode)) {
    895               continue;
    896             }
    897             // First non-pseudo should be MOVE_RESULT_OBJECT
    898             if (tmir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
    899               // Mark as null checked
    900               ssa_regs_to_check->ClearBit(tmir->ssa_rep->defs[0]);
    901             } else {
    902               LOG(WARNING) << "Unexpected op after new: " << tmir->dalvikInsn.opcode;
    903             }
    904             break;
    905           }
    906         }
    907       }
    908     }
    909 
    910     /*
    911      * Propagate nullcheck state on register copies (including
    912      * Phi pseudo copies.  For the latter, nullcheck state is
    913      * the "or" of all the Phi's operands.
    914      */
    915     if (df_attributes & (DF_NULL_TRANSFER_0 | DF_NULL_TRANSFER_N)) {
    916       int tgt_sreg = mir->ssa_rep->defs[0];
    917       int operands = (df_attributes & DF_NULL_TRANSFER_0) ? 1 :
    918           mir->ssa_rep->num_uses;
    919       bool needs_null_check = false;
    920       for (int i = 0; i < operands; i++) {
    921         needs_null_check |= ssa_regs_to_check->IsBitSet(mir->ssa_rep->uses[i]);
    922       }
    923       if (needs_null_check) {
    924         ssa_regs_to_check->SetBit(tgt_sreg);
    925       } else {
    926         ssa_regs_to_check->ClearBit(tgt_sreg);
    927       }
    928     }
    929   }
    930 
    931   // Did anything change?
    932   bool nce_changed = false;
    933   if (do_nce) {
    934     if (bb->data_flow_info->ending_check_v == nullptr) {
    935       DCHECK(temp_scoped_alloc_.get() != nullptr);
    936       bb->data_flow_info->ending_check_v = new (temp_scoped_alloc_.get()) ArenaBitVector(
    937           temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapNullCheck);
    938       nce_changed = ssa_regs_to_check->GetHighestBitSet() != -1;
    939       bb->data_flow_info->ending_check_v->Copy(ssa_regs_to_check);
    940     } else if (!ssa_regs_to_check->SameBitsSet(bb->data_flow_info->ending_check_v)) {
    941       nce_changed = true;
    942       bb->data_flow_info->ending_check_v->Copy(ssa_regs_to_check);
    943     }
    944   }
    945   return infer_changed | nce_changed;
    946 }
    947 
    948 void MIRGraph::EliminateNullChecksAndInferTypesEnd() {
    949   if ((cu_->disable_opt & (1 << kNullCheckElimination)) == 0) {
    950     // Clean up temporaries.
    951     temp_bit_vector_size_ = 0u;
    952     temp_bit_vector_ = nullptr;
    953     AllNodesIterator iter(this);
    954     for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
    955       if (bb->data_flow_info != nullptr) {
    956         bb->data_flow_info->ending_check_v = nullptr;
    957       }
    958     }
    959     DCHECK(temp_scoped_alloc_.get() != nullptr);
    960     temp_scoped_alloc_.reset();
    961   }
    962 }
    963 
    964 bool MIRGraph::EliminateClassInitChecksGate() {
    965   if ((cu_->disable_opt & (1 << kClassInitCheckElimination)) != 0 ||
    966       !cu_->mir_graph->HasStaticFieldAccess()) {
    967     return false;
    968   }
    969 
    970   if (kIsDebugBuild) {
    971     AllNodesIterator iter(this);
    972     for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
    973       CHECK(bb->data_flow_info == nullptr || bb->data_flow_info->ending_check_v == nullptr);
    974     }
    975   }
    976 
    977   DCHECK(temp_scoped_alloc_.get() == nullptr);
    978   temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
    979 
    980   // Each insn we use here has at least 2 code units, offset/2 will be a unique index.
    981   const size_t end = (cu_->code_item->insns_size_in_code_units_ + 1u) / 2u;
    982   temp_insn_data_ = static_cast<uint16_t*>(
    983       temp_scoped_alloc_->Alloc(end * sizeof(*temp_insn_data_), kArenaAllocGrowableArray));
    984 
    985   uint32_t unique_class_count = 0u;
    986   {
    987     // Get unique_class_count and store indexes in temp_insn_data_ using a map on a nested
    988     // ScopedArenaAllocator.
    989 
    990     // Embed the map value in the entry to save space.
    991     struct MapEntry {
    992       // Map key: the class identified by the declaring dex file and type index.
    993       const DexFile* declaring_dex_file;
    994       uint16_t declaring_class_idx;
    995       // Map value: index into bit vectors of classes requiring initialization checks.
    996       uint16_t index;
    997     };
    998     struct MapEntryComparator {
    999       bool operator()(const MapEntry& lhs, const MapEntry& rhs) const {
   1000         if (lhs.declaring_class_idx != rhs.declaring_class_idx) {
   1001           return lhs.declaring_class_idx < rhs.declaring_class_idx;
   1002         }
   1003         return lhs.declaring_dex_file < rhs.declaring_dex_file;
   1004       }
   1005     };
   1006 
   1007     ScopedArenaAllocator allocator(&cu_->arena_stack);
   1008     ScopedArenaSet<MapEntry, MapEntryComparator> class_to_index_map(MapEntryComparator(),
   1009                                                                     allocator.Adapter());
   1010 
   1011     // First, find all SGET/SPUTs that may need class initialization checks, record INVOKE_STATICs.
   1012     AllNodesIterator iter(this);
   1013     for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
   1014       for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
   1015         DCHECK(bb->data_flow_info != nullptr);
   1016         if (mir->dalvikInsn.opcode >= Instruction::SGET &&
   1017             mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
   1018           const MirSFieldLoweringInfo& field_info = GetSFieldLoweringInfo(mir);
   1019           uint16_t index = 0xffffu;
   1020           if (!field_info.IsInitialized()) {
   1021             DCHECK_LT(class_to_index_map.size(), 0xffffu);
   1022             MapEntry entry = {
   1023                 // Treat unresolved fields as if each had its own class.
   1024                 field_info.IsResolved() ? field_info.DeclaringDexFile()
   1025                                         : nullptr,
   1026                 field_info.IsResolved() ? field_info.DeclaringClassIndex()
   1027                                         : field_info.FieldIndex(),
   1028                 static_cast<uint16_t>(class_to_index_map.size())
   1029             };
   1030             index = class_to_index_map.insert(entry).first->index;
   1031           }
   1032           // Using offset/2 for index into temp_insn_data_.
   1033           temp_insn_data_[mir->offset / 2u] = index;
   1034         }
   1035       }
   1036     }
   1037     unique_class_count = static_cast<uint32_t>(class_to_index_map.size());
   1038   }
   1039 
   1040   if (unique_class_count == 0u) {
   1041     // All SGET/SPUTs refer to initialized classes. Nothing to do.
   1042     temp_insn_data_ = nullptr;
   1043     temp_scoped_alloc_.reset();
   1044     return false;
   1045   }
   1046 
   1047   temp_bit_vector_size_ = unique_class_count;
   1048   temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
   1049       temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapClInitCheck);
   1050   DCHECK_GT(temp_bit_vector_size_, 0u);
   1051   return true;
   1052 }
   1053 
   1054 /*
   1055  * Eliminate unnecessary class initialization checks for a basic block.
   1056  */
   1057 bool MIRGraph::EliminateClassInitChecks(BasicBlock* bb) {
   1058   DCHECK_EQ((cu_->disable_opt & (1 << kClassInitCheckElimination)), 0u);
   1059   if (bb->data_flow_info == NULL) {
   1060     return false;
   1061   }
   1062 
   1063   /*
   1064    * Set initial state.  Catch blocks don't need any special treatment.
   1065    */
   1066   ArenaBitVector* classes_to_check = temp_bit_vector_;
   1067   DCHECK(classes_to_check != nullptr);
   1068   if (bb->block_type == kEntryBlock) {
   1069     classes_to_check->SetInitialBits(temp_bit_vector_size_);
   1070   } else if (bb->predecessors->Size() == 1) {
   1071     BasicBlock* pred_bb = GetBasicBlock(bb->predecessors->Get(0));
   1072     // pred_bb must have already been processed at least once.
   1073     DCHECK(pred_bb != nullptr);
   1074     DCHECK(pred_bb->data_flow_info != nullptr);
   1075     DCHECK(pred_bb->data_flow_info->ending_check_v != nullptr);
   1076     classes_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
   1077   } else {
   1078     // Starting state is union of all incoming arcs
   1079     GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
   1080     BasicBlock* pred_bb = GetBasicBlock(iter.Next());
   1081     DCHECK(pred_bb != NULL);
   1082     DCHECK(pred_bb->data_flow_info != NULL);
   1083     while (pred_bb->data_flow_info->ending_check_v == nullptr) {
   1084       pred_bb = GetBasicBlock(iter.Next());
   1085       // At least one predecessor must have been processed before this bb.
   1086       DCHECK(pred_bb != nullptr);
   1087       DCHECK(pred_bb->data_flow_info != nullptr);
   1088     }
   1089     classes_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
   1090     while (true) {
   1091       pred_bb = GetBasicBlock(iter.Next());
   1092       if (!pred_bb) break;
   1093       DCHECK(pred_bb->data_flow_info != nullptr);
   1094       if (pred_bb->data_flow_info->ending_check_v == nullptr) {
   1095         continue;
   1096       }
   1097       classes_to_check->Union(pred_bb->data_flow_info->ending_check_v);
   1098     }
   1099   }
   1100   // At this point, classes_to_check shows which classes need clinit checks.
   1101 
   1102   // Walk through the instruction in the block, updating as necessary
   1103   for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
   1104     if (mir->dalvikInsn.opcode >= Instruction::SGET &&
   1105         mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
   1106       uint16_t index = temp_insn_data_[mir->offset / 2u];
   1107       if (index != 0xffffu) {
   1108         if (mir->dalvikInsn.opcode >= Instruction::SGET &&
   1109             mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
   1110           if (!classes_to_check->IsBitSet(index)) {
   1111             // Eliminate the class init check.
   1112             mir->optimization_flags |= MIR_IGNORE_CLINIT_CHECK;
   1113           } else {
   1114             // Do the class init check.
   1115             mir->optimization_flags &= ~MIR_IGNORE_CLINIT_CHECK;
   1116           }
   1117         }
   1118         // Mark the class as initialized.
   1119         classes_to_check->ClearBit(index);
   1120       }
   1121     }
   1122   }
   1123 
   1124   // Did anything change?
   1125   bool changed = false;
   1126   if (bb->data_flow_info->ending_check_v == nullptr) {
   1127     DCHECK(temp_scoped_alloc_.get() != nullptr);
   1128     DCHECK(bb->data_flow_info != nullptr);
   1129     bb->data_flow_info->ending_check_v = new (temp_scoped_alloc_.get()) ArenaBitVector(
   1130         temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapClInitCheck);
   1131     changed = classes_to_check->GetHighestBitSet() != -1;
   1132     bb->data_flow_info->ending_check_v->Copy(classes_to_check);
   1133   } else if (!classes_to_check->Equal(bb->data_flow_info->ending_check_v)) {
   1134     changed = true;
   1135     bb->data_flow_info->ending_check_v->Copy(classes_to_check);
   1136   }
   1137   return changed;
   1138 }
   1139 
   1140 void MIRGraph::EliminateClassInitChecksEnd() {
   1141   // Clean up temporaries.
   1142   temp_bit_vector_size_ = 0u;
   1143   temp_bit_vector_ = nullptr;
   1144   AllNodesIterator iter(this);
   1145   for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
   1146     if (bb->data_flow_info != nullptr) {
   1147       bb->data_flow_info->ending_check_v = nullptr;
   1148     }
   1149   }
   1150 
   1151   DCHECK(temp_insn_data_ != nullptr);
   1152   temp_insn_data_ = nullptr;
   1153   DCHECK(temp_scoped_alloc_.get() != nullptr);
   1154   temp_scoped_alloc_.reset();
   1155 }
   1156 
   1157 bool MIRGraph::ApplyGlobalValueNumberingGate() {
   1158   if ((cu_->disable_opt & (1u << kGlobalValueNumbering)) != 0u) {
   1159     return false;
   1160   }
   1161 
   1162   DCHECK(temp_scoped_alloc_ == nullptr);
   1163   temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
   1164   DCHECK(temp_gvn_ == nullptr);
   1165   temp_gvn_.reset(
   1166       new (temp_scoped_alloc_.get()) GlobalValueNumbering(cu_, temp_scoped_alloc_.get()));
   1167   return true;
   1168 }
   1169 
   1170 bool MIRGraph::ApplyGlobalValueNumbering(BasicBlock* bb) {
   1171   DCHECK(temp_gvn_ != nullptr);
   1172   LocalValueNumbering* lvn = temp_gvn_->PrepareBasicBlock(bb);
   1173   if (lvn != nullptr) {
   1174     for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
   1175       lvn->GetValueNumber(mir);
   1176     }
   1177   }
   1178   bool change = (lvn != nullptr) && temp_gvn_->FinishBasicBlock(bb);
   1179   return change;
   1180 }
   1181 
   1182 void MIRGraph::ApplyGlobalValueNumberingEnd() {
   1183   // Perform modifications.
   1184   if (temp_gvn_->Good()) {
   1185     temp_gvn_->AllowModifications();
   1186     PreOrderDfsIterator iter(this);
   1187     for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
   1188       ScopedArenaAllocator allocator(&cu_->arena_stack);  // Reclaim memory after each LVN.
   1189       LocalValueNumbering* lvn = temp_gvn_->PrepareBasicBlock(bb, &allocator);
   1190       if (lvn != nullptr) {
   1191         for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
   1192           lvn->GetValueNumber(mir);
   1193         }
   1194         bool change = temp_gvn_->FinishBasicBlock(bb);
   1195         DCHECK(!change) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
   1196       }
   1197     }
   1198   } else {
   1199     LOG(WARNING) << "GVN failed for " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
   1200   }
   1201 
   1202   DCHECK(temp_gvn_ != nullptr);
   1203   temp_gvn_.reset();
   1204   DCHECK(temp_scoped_alloc_ != nullptr);
   1205   temp_scoped_alloc_.reset();
   1206 }
   1207 
   1208 void MIRGraph::ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke, MIR* iget_or_iput) {
   1209   uint32_t method_index = invoke->meta.method_lowering_info;
   1210   if (temp_bit_vector_->IsBitSet(method_index)) {
   1211     iget_or_iput->meta.ifield_lowering_info = temp_insn_data_[method_index];
   1212     DCHECK_EQ(field_idx, GetIFieldLoweringInfo(iget_or_iput).FieldIndex());
   1213     return;
   1214   }
   1215 
   1216   const MirMethodLoweringInfo& method_info = GetMethodLoweringInfo(invoke);
   1217   MethodReference target = method_info.GetTargetMethod();
   1218   DexCompilationUnit inlined_unit(
   1219       cu_, cu_->class_loader, cu_->class_linker, *target.dex_file,
   1220       nullptr /* code_item not used */, 0u /* class_def_idx not used */, target.dex_method_index,
   1221       0u /* access_flags not used */, nullptr /* verified_method not used */);
   1222   MirIFieldLoweringInfo inlined_field_info(field_idx);
   1223   MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, &inlined_unit, &inlined_field_info, 1u);
   1224   DCHECK(inlined_field_info.IsResolved());
   1225 
   1226   uint32_t field_info_index = ifield_lowering_infos_.Size();
   1227   ifield_lowering_infos_.Insert(inlined_field_info);
   1228   temp_bit_vector_->SetBit(method_index);
   1229   temp_insn_data_[method_index] = field_info_index;
   1230   iget_or_iput->meta.ifield_lowering_info = field_info_index;
   1231 }
   1232 
   1233 bool MIRGraph::InlineSpecialMethodsGate() {
   1234   if ((cu_->disable_opt & (1 << kSuppressMethodInlining)) != 0 ||
   1235       method_lowering_infos_.Size() == 0u) {
   1236     return false;
   1237   }
   1238   if (cu_->compiler_driver->GetMethodInlinerMap() == nullptr) {
   1239     // This isn't the Quick compiler.
   1240     return false;
   1241   }
   1242   return true;
   1243 }
   1244 
   1245 void MIRGraph::InlineSpecialMethodsStart() {
   1246   // Prepare for inlining getters/setters. Since we're inlining at most 1 IGET/IPUT from
   1247   // each INVOKE, we can index the data by the MIR::meta::method_lowering_info index.
   1248 
   1249   DCHECK(temp_scoped_alloc_.get() == nullptr);
   1250   temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
   1251   temp_bit_vector_size_ = method_lowering_infos_.Size();
   1252   temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
   1253       temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapMisc);
   1254   temp_bit_vector_->ClearAllBits();
   1255   temp_insn_data_ = static_cast<uint16_t*>(temp_scoped_alloc_->Alloc(
   1256       temp_bit_vector_size_ * sizeof(*temp_insn_data_), kArenaAllocGrowableArray));
   1257 }
   1258 
   1259 void MIRGraph::InlineSpecialMethods(BasicBlock* bb) {
   1260   if (bb->block_type != kDalvikByteCode) {
   1261     return;
   1262   }
   1263   for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
   1264     if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
   1265       continue;
   1266     }
   1267     if (!(Instruction::FlagsOf(mir->dalvikInsn.opcode) & Instruction::kInvoke)) {
   1268       continue;
   1269     }
   1270     const MirMethodLoweringInfo& method_info = GetMethodLoweringInfo(mir);
   1271     if (!method_info.FastPath()) {
   1272       continue;
   1273     }
   1274     InvokeType sharp_type = method_info.GetSharpType();
   1275     if ((sharp_type != kDirect) &&
   1276         (sharp_type != kStatic || method_info.NeedsClassInitialization())) {
   1277       continue;
   1278     }
   1279     DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
   1280     MethodReference target = method_info.GetTargetMethod();
   1281     if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(target.dex_file)
   1282             ->GenInline(this, bb, mir, target.dex_method_index)) {
   1283       if (cu_->verbose || cu_->print_pass) {
   1284         LOG(INFO) << "SpecialMethodInliner: Inlined " << method_info.GetInvokeType() << " ("
   1285             << sharp_type << ") call to \"" << PrettyMethod(target.dex_method_index, *target.dex_file)
   1286             << "\" from \"" << PrettyMethod(cu_->method_idx, *cu_->dex_file)
   1287             << "\" @0x" << std::hex << mir->offset;
   1288       }
   1289     }
   1290   }
   1291 }
   1292 
   1293 void MIRGraph::InlineSpecialMethodsEnd() {
   1294   DCHECK(temp_insn_data_ != nullptr);
   1295   temp_insn_data_ = nullptr;
   1296   DCHECK(temp_bit_vector_ != nullptr);
   1297   temp_bit_vector_ = nullptr;
   1298   DCHECK(temp_scoped_alloc_.get() != nullptr);
   1299   temp_scoped_alloc_.reset();
   1300 }
   1301 
   1302 void MIRGraph::DumpCheckStats() {
   1303   Checkstats* stats =
   1304       static_cast<Checkstats*>(arena_->Alloc(sizeof(Checkstats), kArenaAllocDFInfo));
   1305   checkstats_ = stats;
   1306   AllNodesIterator iter(this);
   1307   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
   1308     CountChecks(bb);
   1309   }
   1310   if (stats->null_checks > 0) {
   1311     float eliminated = static_cast<float>(stats->null_checks_eliminated);
   1312     float checks = static_cast<float>(stats->null_checks);
   1313     LOG(INFO) << "Null Checks: " << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
   1314               << stats->null_checks_eliminated << " of " << stats->null_checks << " -> "
   1315               << (eliminated/checks) * 100.0 << "%";
   1316     }
   1317   if (stats->range_checks > 0) {
   1318     float eliminated = static_cast<float>(stats->range_checks_eliminated);
   1319     float checks = static_cast<float>(stats->range_checks);
   1320     LOG(INFO) << "Range Checks: " << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
   1321               << stats->range_checks_eliminated << " of " << stats->range_checks << " -> "
   1322               << (eliminated/checks) * 100.0 << "%";
   1323   }
   1324 }
   1325 
   1326 bool MIRGraph::BuildExtendedBBList(struct BasicBlock* bb) {
   1327   if (bb->visited) return false;
   1328   if (!((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
   1329       || (bb->block_type == kExitBlock))) {
   1330     // Ignore special blocks
   1331     bb->visited = true;
   1332     return false;
   1333   }
   1334   // Must be head of extended basic block.
   1335   BasicBlock* start_bb = bb;
   1336   extended_basic_blocks_.push_back(bb->id);
   1337   bool terminated_by_return = false;
   1338   bool do_local_value_numbering = false;
   1339   // Visit blocks strictly dominated by this head.
   1340   while (bb != NULL) {
   1341     bb->visited = true;
   1342     terminated_by_return |= bb->terminated_by_return;
   1343     do_local_value_numbering |= bb->use_lvn;
   1344     bb = NextDominatedBlock(bb);
   1345   }
   1346   if (terminated_by_return || do_local_value_numbering) {
   1347     // Do lvn for all blocks in this extended set.
   1348     bb = start_bb;
   1349     while (bb != NULL) {
   1350       bb->use_lvn = do_local_value_numbering;
   1351       bb->dominates_return = terminated_by_return;
   1352       bb = NextDominatedBlock(bb);
   1353     }
   1354   }
   1355   return false;  // Not iterative - return value will be ignored
   1356 }
   1357 
   1358 void MIRGraph::BasicBlockOptimization() {
   1359   if ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) {
   1360     ClearAllVisitedFlags();
   1361     PreOrderDfsIterator iter2(this);
   1362     for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
   1363       BuildExtendedBBList(bb);
   1364     }
   1365     // Perform extended basic block optimizations.
   1366     for (unsigned int i = 0; i < extended_basic_blocks_.size(); i++) {
   1367       BasicBlockOpt(GetBasicBlock(extended_basic_blocks_[i]));
   1368     }
   1369   } else {
   1370     PreOrderDfsIterator iter(this);
   1371     for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
   1372       BasicBlockOpt(bb);
   1373     }
   1374   }
   1375 }
   1376 
   1377 }  // namespace art
   1378