Home | History | Annotate | Download | only in dex
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "compiler_internals.h"
     18 #include "local_value_numbering.h"
     19 #include "dataflow_iterator-inl.h"
     20 
     21 namespace art {
     22 
     23 static unsigned int Predecessors(BasicBlock* bb) {
     24   return bb->predecessors->Size();
     25 }
     26 
     27 /* Setup a constant value for opcodes thare have the DF_SETS_CONST attribute */
     28 void MIRGraph::SetConstant(int32_t ssa_reg, int value) {
     29   is_constant_v_->SetBit(ssa_reg);
     30   constant_values_[ssa_reg] = value;
     31 }
     32 
     33 void MIRGraph::SetConstantWide(int ssa_reg, int64_t value) {
     34   is_constant_v_->SetBit(ssa_reg);
     35   constant_values_[ssa_reg] = Low32Bits(value);
     36   constant_values_[ssa_reg + 1] = High32Bits(value);
     37 }
     38 
     39 void MIRGraph::DoConstantPropogation(BasicBlock* bb) {
     40   MIR* mir;
     41 
     42   for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
     43     int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
     44 
     45     DecodedInstruction *d_insn = &mir->dalvikInsn;
     46 
     47     if (!(df_attributes & DF_HAS_DEFS)) continue;
     48 
     49     /* Handle instructions that set up constants directly */
     50     if (df_attributes & DF_SETS_CONST) {
     51       if (df_attributes & DF_DA) {
     52         int32_t vB = static_cast<int32_t>(d_insn->vB);
     53         switch (d_insn->opcode) {
     54           case Instruction::CONST_4:
     55           case Instruction::CONST_16:
     56           case Instruction::CONST:
     57             SetConstant(mir->ssa_rep->defs[0], vB);
     58             break;
     59           case Instruction::CONST_HIGH16:
     60             SetConstant(mir->ssa_rep->defs[0], vB << 16);
     61             break;
     62           case Instruction::CONST_WIDE_16:
     63           case Instruction::CONST_WIDE_32:
     64             SetConstantWide(mir->ssa_rep->defs[0], static_cast<int64_t>(vB));
     65             break;
     66           case Instruction::CONST_WIDE:
     67             SetConstantWide(mir->ssa_rep->defs[0], d_insn->vB_wide);
     68             break;
     69           case Instruction::CONST_WIDE_HIGH16:
     70             SetConstantWide(mir->ssa_rep->defs[0], static_cast<int64_t>(vB) << 48);
     71             break;
     72           default:
     73             break;
     74         }
     75       }
     76       /* Handle instructions that set up constants directly */
     77     } else if (df_attributes & DF_IS_MOVE) {
     78       int i;
     79 
     80       for (i = 0; i < mir->ssa_rep->num_uses; i++) {
     81         if (!is_constant_v_->IsBitSet(mir->ssa_rep->uses[i])) break;
     82       }
     83       /* Move a register holding a constant to another register */
     84       if (i == mir->ssa_rep->num_uses) {
     85         SetConstant(mir->ssa_rep->defs[0], constant_values_[mir->ssa_rep->uses[0]]);
     86         if (df_attributes & DF_A_WIDE) {
     87           SetConstant(mir->ssa_rep->defs[1], constant_values_[mir->ssa_rep->uses[1]]);
     88         }
     89       }
     90     }
     91   }
     92   /* TODO: implement code to handle arithmetic operations */
     93 }
     94 
     95 void MIRGraph::PropagateConstants() {
     96   is_constant_v_ = new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false);
     97   constant_values_ = static_cast<int*>(arena_->Alloc(sizeof(int) * GetNumSSARegs(),
     98                                                      ArenaAllocator::kAllocDFInfo));
     99   AllNodesIterator iter(this, false /* not iterative */);
    100   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
    101     DoConstantPropogation(bb);
    102   }
    103 }
    104 
    105 /* Advance to next strictly dominated MIR node in an extended basic block */
    106 static MIR* AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
    107   BasicBlock* bb = *p_bb;
    108   if (mir != NULL) {
    109     mir = mir->next;
    110     if (mir == NULL) {
    111       bb = bb->fall_through;
    112       if ((bb == NULL) || Predecessors(bb) != 1) {
    113         mir = NULL;
    114       } else {
    115       *p_bb = bb;
    116       mir = bb->first_mir_insn;
    117       }
    118     }
    119   }
    120   return mir;
    121 }
    122 
    123 /*
    124  * To be used at an invoke mir.  If the logically next mir node represents
    125  * a move-result, return it.  Else, return NULL.  If a move-result exists,
    126  * it is required to immediately follow the invoke with no intervening
    127  * opcodes or incoming arcs.  However, if the result of the invoke is not
    128  * used, a move-result may not be present.
    129  */
    130 MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) {
    131   BasicBlock* tbb = bb;
    132   mir = AdvanceMIR(&tbb, mir);
    133   while (mir != NULL) {
    134     int opcode = mir->dalvikInsn.opcode;
    135     if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) ||
    136         (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) ||
    137         (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE)) {
    138       break;
    139     }
    140     // Keep going if pseudo op, otherwise terminate
    141     if (opcode < kNumPackedOpcodes) {
    142       mir = NULL;
    143     } else {
    144       mir = AdvanceMIR(&tbb, mir);
    145     }
    146   }
    147   return mir;
    148 }
    149 
    150 static BasicBlock* NextDominatedBlock(BasicBlock* bb) {
    151   if (bb->block_type == kDead) {
    152     return NULL;
    153   }
    154   DCHECK((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
    155       || (bb->block_type == kExitBlock));
    156   if (((bb->taken != NULL) && (bb->fall_through == NULL)) &&
    157       ((bb->taken->block_type == kDalvikByteCode) || (bb->taken->block_type == kExitBlock))) {
    158     // Follow simple unconditional branches.
    159     bb = bb->taken;
    160   } else {
    161     // Follow simple fallthrough
    162     bb = (bb->taken != NULL) ? NULL : bb->fall_through;
    163   }
    164   if (bb == NULL || (Predecessors(bb) != 1)) {
    165     return NULL;
    166   }
    167   DCHECK((bb->block_type == kDalvikByteCode) || (bb->block_type == kExitBlock));
    168   return bb;
    169 }
    170 
    171 static MIR* FindPhi(BasicBlock* bb, int ssa_name) {
    172   for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
    173     if (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi) {
    174       for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
    175         if (mir->ssa_rep->uses[i] == ssa_name) {
    176           return mir;
    177         }
    178       }
    179     }
    180   }
    181   return NULL;
    182 }
    183 
    184 static SelectInstructionKind SelectKind(MIR* mir) {
    185   switch (mir->dalvikInsn.opcode) {
    186     case Instruction::MOVE:
    187     case Instruction::MOVE_OBJECT:
    188     case Instruction::MOVE_16:
    189     case Instruction::MOVE_OBJECT_16:
    190     case Instruction::MOVE_FROM16:
    191     case Instruction::MOVE_OBJECT_FROM16:
    192       return kSelectMove;
    193     case Instruction::CONST:
    194     case Instruction::CONST_4:
    195     case Instruction::CONST_16:
    196       return kSelectConst;
    197     case Instruction::GOTO:
    198     case Instruction::GOTO_16:
    199     case Instruction::GOTO_32:
    200       return kSelectGoto;
    201     default:
    202       return kSelectNone;
    203   }
    204 }
    205 
    206 int MIRGraph::GetSSAUseCount(int s_reg) {
    207   return raw_use_counts_.Get(s_reg);
    208 }
    209 
    210 
    211 /* Do some MIR-level extended basic block optimizations */
    212 bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
    213   if (bb->block_type == kDead) {
    214     return true;
    215   }
    216   int num_temps = 0;
    217   LocalValueNumbering local_valnum(cu_);
    218   while (bb != NULL) {
    219     for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
    220       // TUNING: use the returned value number for CSE.
    221       local_valnum.GetValueNumber(mir);
    222       // Look for interesting opcodes, skip otherwise
    223       Instruction::Code opcode = mir->dalvikInsn.opcode;
    224       switch (opcode) {
    225         case Instruction::CMPL_FLOAT:
    226         case Instruction::CMPL_DOUBLE:
    227         case Instruction::CMPG_FLOAT:
    228         case Instruction::CMPG_DOUBLE:
    229         case Instruction::CMP_LONG:
    230           if ((cu_->disable_opt & (1 << kBranchFusing)) != 0) {
    231             // Bitcode doesn't allow this optimization.
    232             break;
    233           }
    234           if (mir->next != NULL) {
    235             MIR* mir_next = mir->next;
    236             Instruction::Code br_opcode = mir_next->dalvikInsn.opcode;
    237             ConditionCode ccode = kCondNv;
    238             switch (br_opcode) {
    239               case Instruction::IF_EQZ:
    240                 ccode = kCondEq;
    241                 break;
    242               case Instruction::IF_NEZ:
    243                 ccode = kCondNe;
    244                 break;
    245               case Instruction::IF_LTZ:
    246                 ccode = kCondLt;
    247                 break;
    248               case Instruction::IF_GEZ:
    249                 ccode = kCondGe;
    250                 break;
    251               case Instruction::IF_GTZ:
    252                 ccode = kCondGt;
    253                 break;
    254               case Instruction::IF_LEZ:
    255                 ccode = kCondLe;
    256                 break;
    257               default:
    258                 break;
    259             }
    260             // Make sure result of cmp is used by next insn and nowhere else
    261             if ((ccode != kCondNv) &&
    262                 (mir->ssa_rep->defs[0] == mir_next->ssa_rep->uses[0]) &&
    263                 (GetSSAUseCount(mir->ssa_rep->defs[0]) == 1)) {
    264               mir_next->dalvikInsn.arg[0] = ccode;
    265               switch (opcode) {
    266                 case Instruction::CMPL_FLOAT:
    267                   mir_next->dalvikInsn.opcode =
    268                       static_cast<Instruction::Code>(kMirOpFusedCmplFloat);
    269                   break;
    270                 case Instruction::CMPL_DOUBLE:
    271                   mir_next->dalvikInsn.opcode =
    272                       static_cast<Instruction::Code>(kMirOpFusedCmplDouble);
    273                   break;
    274                 case Instruction::CMPG_FLOAT:
    275                   mir_next->dalvikInsn.opcode =
    276                       static_cast<Instruction::Code>(kMirOpFusedCmpgFloat);
    277                   break;
    278                 case Instruction::CMPG_DOUBLE:
    279                   mir_next->dalvikInsn.opcode =
    280                       static_cast<Instruction::Code>(kMirOpFusedCmpgDouble);
    281                   break;
    282                 case Instruction::CMP_LONG:
    283                   mir_next->dalvikInsn.opcode =
    284                       static_cast<Instruction::Code>(kMirOpFusedCmpLong);
    285                   break;
    286                 default: LOG(ERROR) << "Unexpected opcode: " << opcode;
    287               }
    288               mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
    289               mir_next->ssa_rep->num_uses = mir->ssa_rep->num_uses;
    290               mir_next->ssa_rep->uses = mir->ssa_rep->uses;
    291               mir_next->ssa_rep->fp_use = mir->ssa_rep->fp_use;
    292               mir_next->ssa_rep->num_defs = 0;
    293               mir->ssa_rep->num_uses = 0;
    294               mir->ssa_rep->num_defs = 0;
    295             }
    296           }
    297           break;
    298         case Instruction::GOTO:
    299         case Instruction::GOTO_16:
    300         case Instruction::GOTO_32:
    301         case Instruction::IF_EQ:
    302         case Instruction::IF_NE:
    303         case Instruction::IF_LT:
    304         case Instruction::IF_GE:
    305         case Instruction::IF_GT:
    306         case Instruction::IF_LE:
    307         case Instruction::IF_EQZ:
    308         case Instruction::IF_NEZ:
    309         case Instruction::IF_LTZ:
    310         case Instruction::IF_GEZ:
    311         case Instruction::IF_GTZ:
    312         case Instruction::IF_LEZ:
    313           // If we've got a backwards branch to return, no need to suspend check.
    314           if ((IsBackedge(bb, bb->taken) && bb->taken->dominates_return) ||
    315               (IsBackedge(bb, bb->fall_through) && bb->fall_through->dominates_return)) {
    316             mir->optimization_flags |= MIR_IGNORE_SUSPEND_CHECK;
    317             if (cu_->verbose) {
    318               LOG(INFO) << "Suppressed suspend check on branch to return at 0x" << std::hex << mir->offset;
    319             }
    320           }
    321           break;
    322         default:
    323           break;
    324       }
    325       // Is this the select pattern?
    326       // TODO: flesh out support for Mips and X86.  NOTE: llvm's select op doesn't quite work here.
    327       // TUNING: expand to support IF_xx compare & branches
    328       if (!(cu_->compiler_backend == kPortable) && (cu_->instruction_set == kThumb2) &&
    329           ((mir->dalvikInsn.opcode == Instruction::IF_EQZ) ||
    330           (mir->dalvikInsn.opcode == Instruction::IF_NEZ))) {
    331         BasicBlock* ft = bb->fall_through;
    332         DCHECK(ft != NULL);
    333         BasicBlock* ft_ft = ft->fall_through;
    334         BasicBlock* ft_tk = ft->taken;
    335 
    336         BasicBlock* tk = bb->taken;
    337         DCHECK(tk != NULL);
    338         BasicBlock* tk_ft = tk->fall_through;
    339         BasicBlock* tk_tk = tk->taken;
    340 
    341         /*
    342          * In the select pattern, the taken edge goes to a block that unconditionally
    343          * transfers to the rejoin block and the fall_though edge goes to a block that
    344          * unconditionally falls through to the rejoin block.
    345          */
    346         if ((tk_ft == NULL) && (ft_tk == NULL) && (tk_tk == ft_ft) &&
    347             (Predecessors(tk) == 1) && (Predecessors(ft) == 1)) {
    348           /*
    349            * Okay - we have the basic diamond shape.  At the very least, we can eliminate the
    350            * suspend check on the taken-taken branch back to the join point.
    351            */
    352           if (SelectKind(tk->last_mir_insn) == kSelectGoto) {
    353               tk->last_mir_insn->optimization_flags |= (MIR_IGNORE_SUSPEND_CHECK);
    354           }
    355           // Are the block bodies something we can handle?
    356           if ((ft->first_mir_insn == ft->last_mir_insn) &&
    357               (tk->first_mir_insn != tk->last_mir_insn) &&
    358               (tk->first_mir_insn->next == tk->last_mir_insn) &&
    359               ((SelectKind(ft->first_mir_insn) == kSelectMove) ||
    360               (SelectKind(ft->first_mir_insn) == kSelectConst)) &&
    361               (SelectKind(ft->first_mir_insn) == SelectKind(tk->first_mir_insn)) &&
    362               (SelectKind(tk->last_mir_insn) == kSelectGoto)) {
    363             // Almost there.  Are the instructions targeting the same vreg?
    364             MIR* if_true = tk->first_mir_insn;
    365             MIR* if_false = ft->first_mir_insn;
    366             // It's possible that the target of the select isn't used - skip those (rare) cases.
    367             MIR* phi = FindPhi(tk_tk, if_true->ssa_rep->defs[0]);
    368             if ((phi != NULL) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
    369               /*
    370                * We'll convert the IF_EQZ/IF_NEZ to a SELECT.  We need to find the
    371                * Phi node in the merge block and delete it (while using the SSA name
    372                * of the merge as the target of the SELECT.  Delete both taken and
    373                * fallthrough blocks, and set fallthrough to merge block.
    374                * NOTE: not updating other dataflow info (no longer used at this point).
    375                * If this changes, need to update i_dom, etc. here (and in CombineBlocks).
    376                */
    377               if (opcode == Instruction::IF_NEZ) {
    378                 // Normalize.
    379                 MIR* tmp_mir = if_true;
    380                 if_true = if_false;
    381                 if_false = tmp_mir;
    382               }
    383               mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpSelect);
    384               bool const_form = (SelectKind(if_true) == kSelectConst);
    385               if ((SelectKind(if_true) == kSelectMove)) {
    386                 if (IsConst(if_true->ssa_rep->uses[0]) &&
    387                     IsConst(if_false->ssa_rep->uses[0])) {
    388                     const_form = true;
    389                     if_true->dalvikInsn.vB = ConstantValue(if_true->ssa_rep->uses[0]);
    390                     if_false->dalvikInsn.vB = ConstantValue(if_false->ssa_rep->uses[0]);
    391                 }
    392               }
    393               if (const_form) {
    394                 // "true" set val in vB
    395                 mir->dalvikInsn.vB = if_true->dalvikInsn.vB;
    396                 // "false" set val in vC
    397                 mir->dalvikInsn.vC = if_false->dalvikInsn.vB;
    398               } else {
    399                 DCHECK_EQ(SelectKind(if_true), kSelectMove);
    400                 DCHECK_EQ(SelectKind(if_false), kSelectMove);
    401                 int* src_ssa =
    402                     static_cast<int*>(arena_->Alloc(sizeof(int) * 3, ArenaAllocator::kAllocDFInfo));
    403                 src_ssa[0] = mir->ssa_rep->uses[0];
    404                 src_ssa[1] = if_true->ssa_rep->uses[0];
    405                 src_ssa[2] = if_false->ssa_rep->uses[0];
    406                 mir->ssa_rep->uses = src_ssa;
    407                 mir->ssa_rep->num_uses = 3;
    408               }
    409               mir->ssa_rep->num_defs = 1;
    410               mir->ssa_rep->defs =
    411                   static_cast<int*>(arena_->Alloc(sizeof(int) * 1, ArenaAllocator::kAllocDFInfo));
    412               mir->ssa_rep->fp_def =
    413                   static_cast<bool*>(arena_->Alloc(sizeof(bool) * 1, ArenaAllocator::kAllocDFInfo));
    414               mir->ssa_rep->fp_def[0] = if_true->ssa_rep->fp_def[0];
    415               // Match type of uses to def.
    416               mir->ssa_rep->fp_use =
    417                   static_cast<bool*>(arena_->Alloc(sizeof(bool) * mir->ssa_rep->num_uses,
    418                                                    ArenaAllocator::kAllocDFInfo));
    419               for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
    420                 mir->ssa_rep->fp_use[i] = mir->ssa_rep->fp_def[0];
    421               }
    422               /*
    423                * There is usually a Phi node in the join block for our two cases.  If the
    424                * Phi node only contains our two cases as input, we will use the result
    425                * SSA name of the Phi node as our select result and delete the Phi.  If
    426                * the Phi node has more than two operands, we will arbitrarily use the SSA
    427                * name of the "true" path, delete the SSA name of the "false" path from the
    428                * Phi node (and fix up the incoming arc list).
    429                */
    430               if (phi->ssa_rep->num_uses == 2) {
    431                 mir->ssa_rep->defs[0] = phi->ssa_rep->defs[0];
    432                 phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
    433               } else {
    434                 int dead_def = if_false->ssa_rep->defs[0];
    435                 int live_def = if_true->ssa_rep->defs[0];
    436                 mir->ssa_rep->defs[0] = live_def;
    437                 int* incoming = reinterpret_cast<int*>(phi->dalvikInsn.vB);
    438                 for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
    439                   if (phi->ssa_rep->uses[i] == live_def) {
    440                     incoming[i] = bb->id;
    441                   }
    442                 }
    443                 for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
    444                   if (phi->ssa_rep->uses[i] == dead_def) {
    445                     int last_slot = phi->ssa_rep->num_uses - 1;
    446                     phi->ssa_rep->uses[i] = phi->ssa_rep->uses[last_slot];
    447                     incoming[i] = incoming[last_slot];
    448                   }
    449                 }
    450               }
    451               phi->ssa_rep->num_uses--;
    452               bb->taken = NULL;
    453               tk->block_type = kDead;
    454               for (MIR* tmir = ft->first_mir_insn; tmir != NULL; tmir = tmir->next) {
    455                 tmir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
    456               }
    457             }
    458           }
    459         }
    460       }
    461     }
    462     bb = NextDominatedBlock(bb);
    463   }
    464 
    465   if (num_temps > cu_->num_compiler_temps) {
    466     cu_->num_compiler_temps = num_temps;
    467   }
    468   return true;
    469 }
    470 
    471 void MIRGraph::NullCheckEliminationInit(struct BasicBlock* bb) {
    472   if (bb->data_flow_info != NULL) {
    473     bb->data_flow_info->ending_null_check_v =
    474         new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false, kBitMapNullCheck);
    475   }
    476 }
    477 
    478 /* Collect stats on number of checks removed */
    479 void MIRGraph::CountChecks(struct BasicBlock* bb) {
    480   if (bb->data_flow_info != NULL) {
    481     for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
    482       if (mir->ssa_rep == NULL) {
    483         continue;
    484       }
    485       int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
    486       if (df_attributes & DF_HAS_NULL_CHKS) {
    487         checkstats_->null_checks++;
    488         if (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) {
    489           checkstats_->null_checks_eliminated++;
    490         }
    491       }
    492       if (df_attributes & DF_HAS_RANGE_CHKS) {
    493         checkstats_->range_checks++;
    494         if (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) {
    495           checkstats_->range_checks_eliminated++;
    496         }
    497       }
    498     }
    499   }
    500 }
    501 
    502 /* Try to make common case the fallthrough path */
    503 static bool LayoutBlocks(struct BasicBlock* bb) {
    504   // TODO: For now, just looking for direct throws.  Consider generalizing for profile feedback
    505   if (!bb->explicit_throw) {
    506     return false;
    507   }
    508   BasicBlock* walker = bb;
    509   while (true) {
    510     // Check termination conditions
    511     if ((walker->block_type == kEntryBlock) || (Predecessors(walker) != 1)) {
    512       break;
    513     }
    514     BasicBlock* prev = walker->predecessors->Get(0);
    515     if (prev->conditional_branch) {
    516       if (prev->fall_through == walker) {
    517         // Already done - return
    518         break;
    519       }
    520       DCHECK_EQ(walker, prev->taken);
    521       // Got one.  Flip it and exit
    522       Instruction::Code opcode = prev->last_mir_insn->dalvikInsn.opcode;
    523       switch (opcode) {
    524         case Instruction::IF_EQ: opcode = Instruction::IF_NE; break;
    525         case Instruction::IF_NE: opcode = Instruction::IF_EQ; break;
    526         case Instruction::IF_LT: opcode = Instruction::IF_GE; break;
    527         case Instruction::IF_GE: opcode = Instruction::IF_LT; break;
    528         case Instruction::IF_GT: opcode = Instruction::IF_LE; break;
    529         case Instruction::IF_LE: opcode = Instruction::IF_GT; break;
    530         case Instruction::IF_EQZ: opcode = Instruction::IF_NEZ; break;
    531         case Instruction::IF_NEZ: opcode = Instruction::IF_EQZ; break;
    532         case Instruction::IF_LTZ: opcode = Instruction::IF_GEZ; break;
    533         case Instruction::IF_GEZ: opcode = Instruction::IF_LTZ; break;
    534         case Instruction::IF_GTZ: opcode = Instruction::IF_LEZ; break;
    535         case Instruction::IF_LEZ: opcode = Instruction::IF_GTZ; break;
    536         default: LOG(FATAL) << "Unexpected opcode " << opcode;
    537       }
    538       prev->last_mir_insn->dalvikInsn.opcode = opcode;
    539       BasicBlock* t_bb = prev->taken;
    540       prev->taken = prev->fall_through;
    541       prev->fall_through = t_bb;
    542       break;
    543     }
    544     walker = prev;
    545   }
    546   return false;
    547 }
    548 
    549 /* Combine any basic blocks terminated by instructions that we now know can't throw */
    550 bool MIRGraph::CombineBlocks(struct BasicBlock* bb) {
    551   // Loop here to allow combining a sequence of blocks
    552   while (true) {
    553     // Check termination conditions
    554     if ((bb->first_mir_insn == NULL)
    555         || (bb->data_flow_info == NULL)
    556         || (bb->block_type == kExceptionHandling)
    557         || (bb->block_type == kExitBlock)
    558         || (bb->block_type == kDead)
    559         || ((bb->taken == NULL) || (bb->taken->block_type != kExceptionHandling))
    560         || (bb->successor_block_list.block_list_type != kNotUsed)
    561         || (static_cast<int>(bb->last_mir_insn->dalvikInsn.opcode) != kMirOpCheck)) {
    562       break;
    563     }
    564 
    565     // Test the kMirOpCheck instruction
    566     MIR* mir = bb->last_mir_insn;
    567     // Grab the attributes from the paired opcode
    568     MIR* throw_insn = mir->meta.throw_insn;
    569     int df_attributes = oat_data_flow_attributes_[throw_insn->dalvikInsn.opcode];
    570     bool can_combine = true;
    571     if (df_attributes & DF_HAS_NULL_CHKS) {
    572       can_combine &= ((throw_insn->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0);
    573     }
    574     if (df_attributes & DF_HAS_RANGE_CHKS) {
    575       can_combine &= ((throw_insn->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0);
    576     }
    577     if (!can_combine) {
    578       break;
    579     }
    580     // OK - got one.  Combine
    581     BasicBlock* bb_next = bb->fall_through;
    582     DCHECK(!bb_next->catch_entry);
    583     DCHECK_EQ(Predecessors(bb_next), 1U);
    584     MIR* t_mir = bb->last_mir_insn->prev;
    585     // Overwrite the kOpCheck insn with the paired opcode
    586     DCHECK_EQ(bb_next->first_mir_insn, throw_insn);
    587     *bb->last_mir_insn = *throw_insn;
    588     bb->last_mir_insn->prev = t_mir;
    589     // Use the successor info from the next block
    590     bb->successor_block_list = bb_next->successor_block_list;
    591     // Use the ending block linkage from the next block
    592     bb->fall_through = bb_next->fall_through;
    593     bb->taken->block_type = kDead;  // Kill the unused exception block
    594     bb->taken = bb_next->taken;
    595     // Include the rest of the instructions
    596     bb->last_mir_insn = bb_next->last_mir_insn;
    597     /*
    598      * If lower-half of pair of blocks to combine contained a return, move the flag
    599      * to the newly combined block.
    600      */
    601     bb->terminated_by_return = bb_next->terminated_by_return;
    602 
    603     /*
    604      * NOTE: we aren't updating all dataflow info here.  Should either make sure this pass
    605      * happens after uses of i_dominated, dom_frontier or update the dataflow info here.
    606      */
    607 
    608     // Kill bb_next and remap now-dead id to parent
    609     bb_next->block_type = kDead;
    610     block_id_map_.Overwrite(bb_next->id, bb->id);
    611 
    612     // Now, loop back and see if we can keep going
    613   }
    614   return false;
    615 }
    616 
    617 /* Eliminate unnecessary null checks for a basic block. */
    618 bool MIRGraph::EliminateNullChecks(struct BasicBlock* bb) {
    619   if (bb->data_flow_info == NULL) return false;
    620 
    621   /*
    622    * Set initial state.  Be conservative with catch
    623    * blocks and start with no assumptions about null check
    624    * status (except for "this").
    625    */
    626   if ((bb->block_type == kEntryBlock) | bb->catch_entry) {
    627     temp_ssa_register_v_->ClearAllBits();
    628     if ((cu_->access_flags & kAccStatic) == 0) {
    629       // If non-static method, mark "this" as non-null
    630       int this_reg = cu_->num_dalvik_registers - cu_->num_ins;
    631       temp_ssa_register_v_->SetBit(this_reg);
    632     }
    633   } else if (bb->predecessors->Size() == 1) {
    634     BasicBlock* pred_bb = bb->predecessors->Get(0);
    635     temp_ssa_register_v_->Copy(pred_bb->data_flow_info->ending_null_check_v);
    636     if (pred_bb->block_type == kDalvikByteCode) {
    637       // Check to see if predecessor had an explicit null-check.
    638       MIR* last_insn = pred_bb->last_mir_insn;
    639       Instruction::Code last_opcode = last_insn->dalvikInsn.opcode;
    640       if (last_opcode == Instruction::IF_EQZ) {
    641         if (pred_bb->fall_through == bb) {
    642           // The fall-through of a block following a IF_EQZ, set the vA of the IF_EQZ to show that
    643           // it can't be null.
    644           temp_ssa_register_v_->SetBit(last_insn->ssa_rep->uses[0]);
    645         }
    646       } else if (last_opcode == Instruction::IF_NEZ) {
    647         if (pred_bb->taken == bb) {
    648           // The taken block following a IF_NEZ, set the vA of the IF_NEZ to show that it can't be
    649           // null.
    650           temp_ssa_register_v_->SetBit(last_insn->ssa_rep->uses[0]);
    651         }
    652       }
    653     }
    654   } else {
    655     // Starting state is intersection of all incoming arcs
    656     GrowableArray<BasicBlock*>::Iterator iter(bb->predecessors);
    657     BasicBlock* pred_bb = iter.Next();
    658     DCHECK(pred_bb != NULL);
    659     temp_ssa_register_v_->Copy(pred_bb->data_flow_info->ending_null_check_v);
    660     while (true) {
    661       pred_bb = iter.Next();
    662       if (!pred_bb) break;
    663       if ((pred_bb->data_flow_info == NULL) ||
    664           (pred_bb->data_flow_info->ending_null_check_v == NULL)) {
    665         continue;
    666       }
    667       temp_ssa_register_v_->Intersect(pred_bb->data_flow_info->ending_null_check_v);
    668     }
    669   }
    670 
    671   // Walk through the instruction in the block, updating as necessary
    672   for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
    673     if (mir->ssa_rep == NULL) {
    674         continue;
    675     }
    676     int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
    677 
    678     // Mark target of NEW* as non-null
    679     if (df_attributes & DF_NON_NULL_DST) {
    680       temp_ssa_register_v_->SetBit(mir->ssa_rep->defs[0]);
    681     }
    682 
    683     // Mark non-null returns from invoke-style NEW*
    684     if (df_attributes & DF_NON_NULL_RET) {
    685       MIR* next_mir = mir->next;
    686       // Next should be an MOVE_RESULT_OBJECT
    687       if (next_mir &&
    688           next_mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
    689         // Mark as null checked
    690         temp_ssa_register_v_->SetBit(next_mir->ssa_rep->defs[0]);
    691       } else {
    692         if (next_mir) {
    693           LOG(WARNING) << "Unexpected opcode following new: " << next_mir->dalvikInsn.opcode;
    694         } else if (bb->fall_through) {
    695           // Look in next basic block
    696           struct BasicBlock* next_bb = bb->fall_through;
    697           for (MIR* tmir = next_bb->first_mir_insn; tmir != NULL;
    698             tmir =tmir->next) {
    699             if (static_cast<int>(tmir->dalvikInsn.opcode) >= static_cast<int>(kMirOpFirst)) {
    700               continue;
    701             }
    702             // First non-pseudo should be MOVE_RESULT_OBJECT
    703             if (tmir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
    704               // Mark as null checked
    705               temp_ssa_register_v_->SetBit(tmir->ssa_rep->defs[0]);
    706             } else {
    707               LOG(WARNING) << "Unexpected op after new: " << tmir->dalvikInsn.opcode;
    708             }
    709             break;
    710           }
    711         }
    712       }
    713     }
    714 
    715     /*
    716      * Propagate nullcheck state on register copies (including
    717      * Phi pseudo copies.  For the latter, nullcheck state is
    718      * the "and" of all the Phi's operands.
    719      */
    720     if (df_attributes & (DF_NULL_TRANSFER_0 | DF_NULL_TRANSFER_N)) {
    721       int tgt_sreg = mir->ssa_rep->defs[0];
    722       int operands = (df_attributes & DF_NULL_TRANSFER_0) ? 1 :
    723           mir->ssa_rep->num_uses;
    724       bool null_checked = true;
    725       for (int i = 0; i < operands; i++) {
    726         null_checked &= temp_ssa_register_v_->IsBitSet(mir->ssa_rep->uses[i]);
    727       }
    728       if (null_checked) {
    729         temp_ssa_register_v_->SetBit(tgt_sreg);
    730       }
    731     }
    732 
    733     // Already nullchecked?
    734     if ((df_attributes & DF_HAS_NULL_CHKS) && !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
    735       int src_idx;
    736       if (df_attributes & DF_NULL_CHK_1) {
    737         src_idx = 1;
    738       } else if (df_attributes & DF_NULL_CHK_2) {
    739         src_idx = 2;
    740       } else {
    741         src_idx = 0;
    742       }
    743       int src_sreg = mir->ssa_rep->uses[src_idx];
    744         if (temp_ssa_register_v_->IsBitSet(src_sreg)) {
    745           // Eliminate the null check
    746           mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
    747         } else {
    748           // Mark s_reg as null-checked
    749           temp_ssa_register_v_->SetBit(src_sreg);
    750         }
    751      }
    752   }
    753 
    754   // Did anything change?
    755   bool changed = !temp_ssa_register_v_->Equal(bb->data_flow_info->ending_null_check_v);
    756   if (changed) {
    757     bb->data_flow_info->ending_null_check_v->Copy(temp_ssa_register_v_);
    758   }
    759   return changed;
    760 }
    761 
    762 void MIRGraph::NullCheckElimination() {
    763   if (!(cu_->disable_opt & (1 << kNullCheckElimination))) {
    764     DCHECK(temp_ssa_register_v_ != NULL);
    765     AllNodesIterator iter(this, false /* not iterative */);
    766     for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
    767       NullCheckEliminationInit(bb);
    768     }
    769     PreOrderDfsIterator iter2(this, true /* iterative */);
    770     bool change = false;
    771     for (BasicBlock* bb = iter2.Next(change); bb != NULL; bb = iter2.Next(change)) {
    772       change = EliminateNullChecks(bb);
    773     }
    774   }
    775   if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
    776     DumpCFG("/sdcard/4_post_nce_cfg/", false);
    777   }
    778 }
    779 
    780 void MIRGraph::BasicBlockCombine() {
    781   PreOrderDfsIterator iter(this, false /* not iterative */);
    782   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
    783     CombineBlocks(bb);
    784   }
    785   if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
    786     DumpCFG("/sdcard/5_post_bbcombine_cfg/", false);
    787   }
    788 }
    789 
    790 void MIRGraph::CodeLayout() {
    791   if (cu_->enable_debug & (1 << kDebugVerifyDataflow)) {
    792     VerifyDataflow();
    793   }
    794   AllNodesIterator iter(this, false /* not iterative */);
    795   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
    796     LayoutBlocks(bb);
    797   }
    798   if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
    799     DumpCFG("/sdcard/2_post_layout_cfg/", true);
    800   }
    801 }
    802 
    803 void MIRGraph::DumpCheckStats() {
    804   Checkstats* stats =
    805       static_cast<Checkstats*>(arena_->Alloc(sizeof(Checkstats), ArenaAllocator::kAllocDFInfo));
    806   checkstats_ = stats;
    807   AllNodesIterator iter(this, false /* not iterative */);
    808   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
    809     CountChecks(bb);
    810   }
    811   if (stats->null_checks > 0) {
    812     float eliminated = static_cast<float>(stats->null_checks_eliminated);
    813     float checks = static_cast<float>(stats->null_checks);
    814     LOG(INFO) << "Null Checks: " << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
    815               << stats->null_checks_eliminated << " of " << stats->null_checks << " -> "
    816               << (eliminated/checks) * 100.0 << "%";
    817     }
    818   if (stats->range_checks > 0) {
    819     float eliminated = static_cast<float>(stats->range_checks_eliminated);
    820     float checks = static_cast<float>(stats->range_checks);
    821     LOG(INFO) << "Range Checks: " << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
    822               << stats->range_checks_eliminated << " of " << stats->range_checks << " -> "
    823               << (eliminated/checks) * 100.0 << "%";
    824   }
    825 }
    826 
    827 bool MIRGraph::BuildExtendedBBList(struct BasicBlock* bb) {
    828   if (bb->visited) return false;
    829   if (!((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
    830       || (bb->block_type == kExitBlock))) {
    831     // Ignore special blocks
    832     bb->visited = true;
    833     return false;
    834   }
    835   // Must be head of extended basic block.
    836   BasicBlock* start_bb = bb;
    837   extended_basic_blocks_.push_back(bb);
    838   bool terminated_by_return = false;
    839   // Visit blocks strictly dominated by this head.
    840   while (bb != NULL) {
    841     bb->visited = true;
    842     terminated_by_return |= bb->terminated_by_return;
    843     bb = NextDominatedBlock(bb);
    844   }
    845   if (terminated_by_return) {
    846     // This extended basic block contains a return, so mark all members.
    847     bb = start_bb;
    848     while (bb != NULL) {
    849       bb->dominates_return = true;
    850       bb = NextDominatedBlock(bb);
    851     }
    852   }
    853   return false;  // Not iterative - return value will be ignored
    854 }
    855 
    856 
    857 void MIRGraph::BasicBlockOptimization() {
    858   if (!(cu_->disable_opt & (1 << kBBOpt))) {
    859     DCHECK_EQ(cu_->num_compiler_temps, 0);
    860     ClearAllVisitedFlags();
    861     PreOrderDfsIterator iter2(this, false /* not iterative */);
    862     for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
    863       BuildExtendedBBList(bb);
    864     }
    865     // Perform extended basic block optimizations.
    866     for (unsigned int i = 0; i < extended_basic_blocks_.size(); i++) {
    867       BasicBlockOpt(extended_basic_blocks_[i]);
    868     }
    869   }
    870   if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
    871     DumpCFG("/sdcard/6_post_bbo_cfg/", false);
    872   }
    873 }
    874 
    875 }  // namespace art
    876