Home | History | Annotate | Download | only in optimizing
      1 /*
      2  * Copyright (C) 2016 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "loop_optimization.h"
     18 
     19 #include "arch/arm/instruction_set_features_arm.h"
     20 #include "arch/arm64/instruction_set_features_arm64.h"
     21 #include "arch/instruction_set.h"
     22 #include "arch/mips/instruction_set_features_mips.h"
     23 #include "arch/mips64/instruction_set_features_mips64.h"
     24 #include "arch/x86/instruction_set_features_x86.h"
     25 #include "arch/x86_64/instruction_set_features_x86_64.h"
     26 #include "driver/compiler_driver.h"
     27 #include "linear_order.h"
     28 #include "mirror/array-inl.h"
     29 #include "mirror/string.h"
     30 
     31 namespace art {
     32 
     33 // Enables vectorization (SIMDization) in the loop optimizer.
     34 static constexpr bool kEnableVectorization = true;
     35 
     36 // No loop unrolling factor (just one copy of the loop-body).
     37 static constexpr uint32_t kNoUnrollingFactor = 1;
     38 
     39 //
     40 // Static helpers.
     41 //
     42 
     43 // Base alignment for arrays/strings guaranteed by the Android runtime.
     44 static uint32_t BaseAlignment() {
     45   return kObjectAlignment;
     46 }
     47 
     48 // Hidden offset for arrays/strings guaranteed by the Android runtime.
     49 static uint32_t HiddenOffset(DataType::Type type, bool is_string_char_at) {
     50   return is_string_char_at
     51       ? mirror::String::ValueOffset().Uint32Value()
     52       : mirror::Array::DataOffset(DataType::Size(type)).Uint32Value();
     53 }
     54 
     55 // Remove the instruction from the graph. A bit more elaborate than the usual
     56 // instruction removal, since there may be a cycle in the use structure.
     57 static void RemoveFromCycle(HInstruction* instruction) {
     58   instruction->RemoveAsUserOfAllInputs();
     59   instruction->RemoveEnvironmentUsers();
     60   instruction->GetBlock()->RemoveInstructionOrPhi(instruction, /*ensure_safety=*/ false);
     61   RemoveEnvironmentUses(instruction);
     62   ResetEnvironmentInputRecords(instruction);
     63 }
     64 
     65 // Detect a goto block and sets succ to the single successor.
     66 static bool IsGotoBlock(HBasicBlock* block, /*out*/ HBasicBlock** succ) {
     67   if (block->GetPredecessors().size() == 1 &&
     68       block->GetSuccessors().size() == 1 &&
     69       block->IsSingleGoto()) {
     70     *succ = block->GetSingleSuccessor();
     71     return true;
     72   }
     73   return false;
     74 }
     75 
     76 // Detect an early exit loop.
     77 static bool IsEarlyExit(HLoopInformation* loop_info) {
     78   HBlocksInLoopReversePostOrderIterator it_loop(*loop_info);
     79   for (it_loop.Advance(); !it_loop.Done(); it_loop.Advance()) {
     80     for (HBasicBlock* successor : it_loop.Current()->GetSuccessors()) {
     81       if (!loop_info->Contains(*successor)) {
     82         return true;
     83       }
     84     }
     85   }
     86   return false;
     87 }
     88 
     89 // Forward declaration.
     90 static bool IsZeroExtensionAndGet(HInstruction* instruction,
     91                                   DataType::Type type,
     92                                   /*out*/ HInstruction** operand);
     93 
     94 // Detect a sign extension in instruction from the given type.
     95 // Returns the promoted operand on success.
     96 static bool IsSignExtensionAndGet(HInstruction* instruction,
     97                                   DataType::Type type,
     98                                   /*out*/ HInstruction** operand) {
     99   // Accept any already wider constant that would be handled properly by sign
    100   // extension when represented in the *width* of the given narrower data type
    101   // (the fact that Uint8/Uint16 normally zero extend does not matter here).
    102   int64_t value = 0;
    103   if (IsInt64AndGet(instruction, /*out*/ &value)) {
    104     switch (type) {
    105       case DataType::Type::kUint8:
    106       case DataType::Type::kInt8:
    107         if (IsInt<8>(value)) {
    108           *operand = instruction;
    109           return true;
    110         }
    111         return false;
    112       case DataType::Type::kUint16:
    113       case DataType::Type::kInt16:
    114         if (IsInt<16>(value)) {
    115           *operand = instruction;
    116           return true;
    117         }
    118         return false;
    119       default:
    120         return false;
    121     }
    122   }
    123   // An implicit widening conversion of any signed expression sign-extends.
    124   if (instruction->GetType() == type) {
    125     switch (type) {
    126       case DataType::Type::kInt8:
    127       case DataType::Type::kInt16:
    128         *operand = instruction;
    129         return true;
    130       default:
    131         return false;
    132     }
    133   }
    134   // An explicit widening conversion of a signed expression sign-extends.
    135   if (instruction->IsTypeConversion()) {
    136     HInstruction* conv = instruction->InputAt(0);
    137     DataType::Type from = conv->GetType();
    138     switch (instruction->GetType()) {
    139       case DataType::Type::kInt32:
    140       case DataType::Type::kInt64:
    141         if (type == from && (from == DataType::Type::kInt8 ||
    142                              from == DataType::Type::kInt16 ||
    143                              from == DataType::Type::kInt32)) {
    144           *operand = conv;
    145           return true;
    146         }
    147         return false;
    148       case DataType::Type::kInt16:
    149         return type == DataType::Type::kUint16 &&
    150                from == DataType::Type::kUint16 &&
    151                IsZeroExtensionAndGet(instruction->InputAt(0), type, /*out*/ operand);
    152       default:
    153         return false;
    154     }
    155   }
    156   return false;
    157 }
    158 
    159 // Detect a zero extension in instruction from the given type.
    160 // Returns the promoted operand on success.
    161 static bool IsZeroExtensionAndGet(HInstruction* instruction,
    162                                   DataType::Type type,
    163                                   /*out*/ HInstruction** operand) {
    164   // Accept any already wider constant that would be handled properly by zero
    165   // extension when represented in the *width* of the given narrower data type
    166   // (the fact that Int8/Int16 normally sign extend does not matter here).
    167   int64_t value = 0;
    168   if (IsInt64AndGet(instruction, /*out*/ &value)) {
    169     switch (type) {
    170       case DataType::Type::kUint8:
    171       case DataType::Type::kInt8:
    172         if (IsUint<8>(value)) {
    173           *operand = instruction;
    174           return true;
    175         }
    176         return false;
    177       case DataType::Type::kUint16:
    178       case DataType::Type::kInt16:
    179         if (IsUint<16>(value)) {
    180           *operand = instruction;
    181           return true;
    182         }
    183         return false;
    184       default:
    185         return false;
    186     }
    187   }
    188   // An implicit widening conversion of any unsigned expression zero-extends.
    189   if (instruction->GetType() == type) {
    190     switch (type) {
    191       case DataType::Type::kUint8:
    192       case DataType::Type::kUint16:
    193         *operand = instruction;
    194         return true;
    195       default:
    196         return false;
    197     }
    198   }
    199   // An explicit widening conversion of an unsigned expression zero-extends.
    200   if (instruction->IsTypeConversion()) {
    201     HInstruction* conv = instruction->InputAt(0);
    202     DataType::Type from = conv->GetType();
    203     switch (instruction->GetType()) {
    204       case DataType::Type::kInt32:
    205       case DataType::Type::kInt64:
    206         if (type == from && from == DataType::Type::kUint16) {
    207           *operand = conv;
    208           return true;
    209         }
    210         return false;
    211       case DataType::Type::kUint16:
    212         return type == DataType::Type::kInt16 &&
    213                from == DataType::Type::kInt16 &&
    214                IsSignExtensionAndGet(instruction->InputAt(0), type, /*out*/ operand);
    215       default:
    216         return false;
    217     }
    218   }
    219   return false;
    220 }
    221 
    222 // Detect situations with same-extension narrower operands.
    223 // Returns true on success and sets is_unsigned accordingly.
    224 static bool IsNarrowerOperands(HInstruction* a,
    225                                HInstruction* b,
    226                                DataType::Type type,
    227                                /*out*/ HInstruction** r,
    228                                /*out*/ HInstruction** s,
    229                                /*out*/ bool* is_unsigned) {
    230   // Look for a matching sign extension.
    231   DataType::Type stype = HVecOperation::ToSignedType(type);
    232   if (IsSignExtensionAndGet(a, stype, r) && IsSignExtensionAndGet(b, stype, s)) {
    233     *is_unsigned = false;
    234     return true;
    235   }
    236   // Look for a matching zero extension.
    237   DataType::Type utype = HVecOperation::ToUnsignedType(type);
    238   if (IsZeroExtensionAndGet(a, utype, r) && IsZeroExtensionAndGet(b, utype, s)) {
    239     *is_unsigned = true;
    240     return true;
    241   }
    242   return false;
    243 }
    244 
    245 // As above, single operand.
    246 static bool IsNarrowerOperand(HInstruction* a,
    247                               DataType::Type type,
    248                               /*out*/ HInstruction** r,
    249                               /*out*/ bool* is_unsigned) {
    250   // Look for a matching sign extension.
    251   DataType::Type stype = HVecOperation::ToSignedType(type);
    252   if (IsSignExtensionAndGet(a, stype, r)) {
    253     *is_unsigned = false;
    254     return true;
    255   }
    256   // Look for a matching zero extension.
    257   DataType::Type utype = HVecOperation::ToUnsignedType(type);
    258   if (IsZeroExtensionAndGet(a, utype, r)) {
    259     *is_unsigned = true;
    260     return true;
    261   }
    262   return false;
    263 }
    264 
    265 // Compute relative vector length based on type difference.
    266 static uint32_t GetOtherVL(DataType::Type other_type, DataType::Type vector_type, uint32_t vl) {
    267   DCHECK(DataType::IsIntegralType(other_type));
    268   DCHECK(DataType::IsIntegralType(vector_type));
    269   DCHECK_GE(DataType::SizeShift(other_type), DataType::SizeShift(vector_type));
    270   return vl >> (DataType::SizeShift(other_type) - DataType::SizeShift(vector_type));
    271 }
    272 
    273 // Detect up to two instructions a and b, and an acccumulated constant c.
    274 static bool IsAddConstHelper(HInstruction* instruction,
    275                              /*out*/ HInstruction** a,
    276                              /*out*/ HInstruction** b,
    277                              /*out*/ int64_t* c,
    278                              int32_t depth) {
    279   static constexpr int32_t kMaxDepth = 8;  // don't search too deep
    280   int64_t value = 0;
    281   if (IsInt64AndGet(instruction, &value)) {
    282     *c += value;
    283     return true;
    284   } else if (instruction->IsAdd() && depth <= kMaxDepth) {
    285     return IsAddConstHelper(instruction->InputAt(0), a, b, c, depth + 1) &&
    286            IsAddConstHelper(instruction->InputAt(1), a, b, c, depth + 1);
    287   } else if (*a == nullptr) {
    288     *a = instruction;
    289     return true;
    290   } else if (*b == nullptr) {
    291     *b = instruction;
    292     return true;
    293   }
    294   return false;  // too many non-const operands
    295 }
    296 
    297 // Detect a + b + c for an optional constant c.
    298 static bool IsAddConst(HInstruction* instruction,
    299                        /*out*/ HInstruction** a,
    300                        /*out*/ HInstruction** b,
    301                        /*out*/ int64_t* c) {
    302   if (instruction->IsAdd()) {
    303     // Try to find a + b and accumulated c.
    304     if (IsAddConstHelper(instruction->InputAt(0), a, b, c, /*depth*/ 0) &&
    305         IsAddConstHelper(instruction->InputAt(1), a, b, c, /*depth*/ 0) &&
    306         *b != nullptr) {
    307       return true;
    308     }
    309     // Found a + b.
    310     *a = instruction->InputAt(0);
    311     *b = instruction->InputAt(1);
    312     *c = 0;
    313     return true;
    314   }
    315   return false;
    316 }
    317 
    318 // Detect a + c for constant c.
    319 static bool IsAddConst(HInstruction* instruction,
    320                        /*out*/ HInstruction** a,
    321                        /*out*/ int64_t* c) {
    322   if (instruction->IsAdd()) {
    323     if (IsInt64AndGet(instruction->InputAt(0), c)) {
    324       *a = instruction->InputAt(1);
    325       return true;
    326     } else if (IsInt64AndGet(instruction->InputAt(1), c)) {
    327       *a = instruction->InputAt(0);
    328       return true;
    329     }
    330   }
    331   return false;
    332 }
    333 
    334 // Detect reductions of the following forms,
    335 //   x = x_phi + ..
    336 //   x = x_phi - ..
    337 //   x = max(x_phi, ..)
    338 //   x = min(x_phi, ..)
    339 static bool HasReductionFormat(HInstruction* reduction, HInstruction* phi) {
    340   if (reduction->IsAdd()) {
    341     return (reduction->InputAt(0) == phi && reduction->InputAt(1) != phi) ||
    342            (reduction->InputAt(0) != phi && reduction->InputAt(1) == phi);
    343   } else if (reduction->IsSub()) {
    344     return (reduction->InputAt(0) == phi && reduction->InputAt(1) != phi);
    345   } else if (reduction->IsInvokeStaticOrDirect()) {
    346     switch (reduction->AsInvokeStaticOrDirect()->GetIntrinsic()) {
    347       case Intrinsics::kMathMinIntInt:
    348       case Intrinsics::kMathMinLongLong:
    349       case Intrinsics::kMathMinFloatFloat:
    350       case Intrinsics::kMathMinDoubleDouble:
    351       case Intrinsics::kMathMaxIntInt:
    352       case Intrinsics::kMathMaxLongLong:
    353       case Intrinsics::kMathMaxFloatFloat:
    354       case Intrinsics::kMathMaxDoubleDouble:
    355         return (reduction->InputAt(0) == phi && reduction->InputAt(1) != phi) ||
    356                (reduction->InputAt(0) != phi && reduction->InputAt(1) == phi);
    357       default:
    358         return false;
    359     }
    360   }
    361   return false;
    362 }
    363 
    364 // Translates vector operation to reduction kind.
    365 static HVecReduce::ReductionKind GetReductionKind(HVecOperation* reduction) {
    366   if (reduction->IsVecAdd() || reduction->IsVecSub() || reduction->IsVecSADAccumulate()) {
    367     return HVecReduce::kSum;
    368   } else if (reduction->IsVecMin()) {
    369     return HVecReduce::kMin;
    370   } else if (reduction->IsVecMax()) {
    371     return HVecReduce::kMax;
    372   }
    373   LOG(FATAL) << "Unsupported SIMD reduction " << reduction->GetId();
    374   UNREACHABLE();
    375 }
    376 
    377 // Test vector restrictions.
    378 static bool HasVectorRestrictions(uint64_t restrictions, uint64_t tested) {
    379   return (restrictions & tested) != 0;
    380 }
    381 
    382 // Insert an instruction.
    383 static HInstruction* Insert(HBasicBlock* block, HInstruction* instruction) {
    384   DCHECK(block != nullptr);
    385   DCHECK(instruction != nullptr);
    386   block->InsertInstructionBefore(instruction, block->GetLastInstruction());
    387   return instruction;
    388 }
    389 
    390 // Check that instructions from the induction sets are fully removed: have no uses
    391 // and no other instructions use them.
    392 static bool CheckInductionSetFullyRemoved(ScopedArenaSet<HInstruction*>* iset) {
    393   for (HInstruction* instr : *iset) {
    394     if (instr->GetBlock() != nullptr ||
    395         !instr->GetUses().empty() ||
    396         !instr->GetEnvUses().empty() ||
    397         HasEnvironmentUsedByOthers(instr)) {
    398       return false;
    399     }
    400   }
    401   return true;
    402 }
    403 
    404 //
    405 // Public methods.
    406 //
    407 
    408 HLoopOptimization::HLoopOptimization(HGraph* graph,
    409                                      CompilerDriver* compiler_driver,
    410                                      HInductionVarAnalysis* induction_analysis,
    411                                      OptimizingCompilerStats* stats,
    412                                      const char* name)
    413     : HOptimization(graph, name, stats),
    414       compiler_driver_(compiler_driver),
    415       induction_range_(induction_analysis),
    416       loop_allocator_(nullptr),
    417       global_allocator_(graph_->GetAllocator()),
    418       top_loop_(nullptr),
    419       last_loop_(nullptr),
    420       iset_(nullptr),
    421       reductions_(nullptr),
    422       simplified_(false),
    423       vector_length_(0),
    424       vector_refs_(nullptr),
    425       vector_static_peeling_factor_(0),
    426       vector_dynamic_peeling_candidate_(nullptr),
    427       vector_runtime_test_a_(nullptr),
    428       vector_runtime_test_b_(nullptr),
    429       vector_map_(nullptr),
    430       vector_permanent_map_(nullptr),
    431       vector_mode_(kSequential),
    432       vector_preheader_(nullptr),
    433       vector_header_(nullptr),
    434       vector_body_(nullptr),
    435       vector_index_(nullptr) {
    436 }
    437 
    438 void HLoopOptimization::Run() {
    439   // Skip if there is no loop or the graph has try-catch/irreducible loops.
    440   // TODO: make this less of a sledgehammer.
    441   if (!graph_->HasLoops() || graph_->HasTryCatch() || graph_->HasIrreducibleLoops()) {
    442     return;
    443   }
    444 
    445   // Phase-local allocator.
    446   ScopedArenaAllocator allocator(graph_->GetArenaStack());
    447   loop_allocator_ = &allocator;
    448 
    449   // Perform loop optimizations.
    450   LocalRun();
    451   if (top_loop_ == nullptr) {
    452     graph_->SetHasLoops(false);  // no more loops
    453   }
    454 
    455   // Detach.
    456   loop_allocator_ = nullptr;
    457   last_loop_ = top_loop_ = nullptr;
    458 }
    459 
    460 //
    461 // Loop setup and traversal.
    462 //
    463 
    464 void HLoopOptimization::LocalRun() {
    465   // Build the linear order using the phase-local allocator. This step enables building
    466   // a loop hierarchy that properly reflects the outer-inner and previous-next relation.
    467   ScopedArenaVector<HBasicBlock*> linear_order(loop_allocator_->Adapter(kArenaAllocLinearOrder));
    468   LinearizeGraph(graph_, &linear_order);
    469 
    470   // Build the loop hierarchy.
    471   for (HBasicBlock* block : linear_order) {
    472     if (block->IsLoopHeader()) {
    473       AddLoop(block->GetLoopInformation());
    474     }
    475   }
    476 
    477   // Traverse the loop hierarchy inner-to-outer and optimize. Traversal can use
    478   // temporary data structures using the phase-local allocator. All new HIR
    479   // should use the global allocator.
    480   if (top_loop_ != nullptr) {
    481     ScopedArenaSet<HInstruction*> iset(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
    482     ScopedArenaSafeMap<HInstruction*, HInstruction*> reds(
    483         std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
    484     ScopedArenaSet<ArrayReference> refs(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
    485     ScopedArenaSafeMap<HInstruction*, HInstruction*> map(
    486         std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
    487     ScopedArenaSafeMap<HInstruction*, HInstruction*> perm(
    488         std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
    489     // Attach.
    490     iset_ = &iset;
    491     reductions_ = &reds;
    492     vector_refs_ = &refs;
    493     vector_map_ = &map;
    494     vector_permanent_map_ = &perm;
    495     // Traverse.
    496     TraverseLoopsInnerToOuter(top_loop_);
    497     // Detach.
    498     iset_ = nullptr;
    499     reductions_ = nullptr;
    500     vector_refs_ = nullptr;
    501     vector_map_ = nullptr;
    502     vector_permanent_map_ = nullptr;
    503   }
    504 }
    505 
    506 void HLoopOptimization::AddLoop(HLoopInformation* loop_info) {
    507   DCHECK(loop_info != nullptr);
    508   LoopNode* node = new (loop_allocator_) LoopNode(loop_info);
    509   if (last_loop_ == nullptr) {
    510     // First loop.
    511     DCHECK(top_loop_ == nullptr);
    512     last_loop_ = top_loop_ = node;
    513   } else if (loop_info->IsIn(*last_loop_->loop_info)) {
    514     // Inner loop.
    515     node->outer = last_loop_;
    516     DCHECK(last_loop_->inner == nullptr);
    517     last_loop_ = last_loop_->inner = node;
    518   } else {
    519     // Subsequent loop.
    520     while (last_loop_->outer != nullptr && !loop_info->IsIn(*last_loop_->outer->loop_info)) {
    521       last_loop_ = last_loop_->outer;
    522     }
    523     node->outer = last_loop_->outer;
    524     node->previous = last_loop_;
    525     DCHECK(last_loop_->next == nullptr);
    526     last_loop_ = last_loop_->next = node;
    527   }
    528 }
    529 
    530 void HLoopOptimization::RemoveLoop(LoopNode* node) {
    531   DCHECK(node != nullptr);
    532   DCHECK(node->inner == nullptr);
    533   if (node->previous != nullptr) {
    534     // Within sequence.
    535     node->previous->next = node->next;
    536     if (node->next != nullptr) {
    537       node->next->previous = node->previous;
    538     }
    539   } else {
    540     // First of sequence.
    541     if (node->outer != nullptr) {
    542       node->outer->inner = node->next;
    543     } else {
    544       top_loop_ = node->next;
    545     }
    546     if (node->next != nullptr) {
    547       node->next->outer = node->outer;
    548       node->next->previous = nullptr;
    549     }
    550   }
    551 }
    552 
    553 bool HLoopOptimization::TraverseLoopsInnerToOuter(LoopNode* node) {
    554   bool changed = false;
    555   for ( ; node != nullptr; node = node->next) {
    556     // Visit inner loops first. Recompute induction information for this
    557     // loop if the induction of any inner loop has changed.
    558     if (TraverseLoopsInnerToOuter(node->inner)) {
    559       induction_range_.ReVisit(node->loop_info);
    560     }
    561     // Repeat simplifications in the loop-body until no more changes occur.
    562     // Note that since each simplification consists of eliminating code (without
    563     // introducing new code), this process is always finite.
    564     do {
    565       simplified_ = false;
    566       SimplifyInduction(node);
    567       SimplifyBlocks(node);
    568       changed = simplified_ || changed;
    569     } while (simplified_);
    570     // Optimize inner loop.
    571     if (node->inner == nullptr) {
    572       changed = OptimizeInnerLoop(node) || changed;
    573     }
    574   }
    575   return changed;
    576 }
    577 
    578 //
    579 // Optimization.
    580 //
    581 
    582 void HLoopOptimization::SimplifyInduction(LoopNode* node) {
    583   HBasicBlock* header = node->loop_info->GetHeader();
    584   HBasicBlock* preheader = node->loop_info->GetPreHeader();
    585   // Scan the phis in the header to find opportunities to simplify an induction
    586   // cycle that is only used outside the loop. Replace these uses, if any, with
    587   // the last value and remove the induction cycle.
    588   // Examples: for (int i = 0; x != null;   i++) { .... no i .... }
    589   //           for (int i = 0; i < 10; i++, k++) { .... no k .... } return k;
    590   for (HInstructionIterator it(header->GetPhis()); !it.Done(); it.Advance()) {
    591     HPhi* phi = it.Current()->AsPhi();
    592     if (TrySetPhiInduction(phi, /*restrict_uses*/ true) &&
    593         TryAssignLastValue(node->loop_info, phi, preheader, /*collect_loop_uses*/ false)) {
    594       // Note that it's ok to have replaced uses after the loop with the last value, without
    595       // being able to remove the cycle. Environment uses (which are the reason we may not be
    596       // able to remove the cycle) within the loop will still hold the right value. We must
    597       // have tried first, however, to replace outside uses.
    598       if (CanRemoveCycle()) {
    599         simplified_ = true;
    600         for (HInstruction* i : *iset_) {
    601           RemoveFromCycle(i);
    602         }
    603         DCHECK(CheckInductionSetFullyRemoved(iset_));
    604       }
    605     }
    606   }
    607 }
    608 
    609 void HLoopOptimization::SimplifyBlocks(LoopNode* node) {
    610   // Iterate over all basic blocks in the loop-body.
    611   for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) {
    612     HBasicBlock* block = it.Current();
    613     // Remove dead instructions from the loop-body.
    614     RemoveDeadInstructions(block->GetPhis());
    615     RemoveDeadInstructions(block->GetInstructions());
    616     // Remove trivial control flow blocks from the loop-body.
    617     if (block->GetPredecessors().size() == 1 &&
    618         block->GetSuccessors().size() == 1 &&
    619         block->GetSingleSuccessor()->GetPredecessors().size() == 1) {
    620       simplified_ = true;
    621       block->MergeWith(block->GetSingleSuccessor());
    622     } else if (block->GetSuccessors().size() == 2) {
    623       // Trivial if block can be bypassed to either branch.
    624       HBasicBlock* succ0 = block->GetSuccessors()[0];
    625       HBasicBlock* succ1 = block->GetSuccessors()[1];
    626       HBasicBlock* meet0 = nullptr;
    627       HBasicBlock* meet1 = nullptr;
    628       if (succ0 != succ1 &&
    629           IsGotoBlock(succ0, &meet0) &&
    630           IsGotoBlock(succ1, &meet1) &&
    631           meet0 == meet1 &&  // meets again
    632           meet0 != block &&  // no self-loop
    633           meet0->GetPhis().IsEmpty()) {  // not used for merging
    634         simplified_ = true;
    635         succ0->DisconnectAndDelete();
    636         if (block->Dominates(meet0)) {
    637           block->RemoveDominatedBlock(meet0);
    638           succ1->AddDominatedBlock(meet0);
    639           meet0->SetDominator(succ1);
    640         }
    641       }
    642     }
    643   }
    644 }
    645 
    646 bool HLoopOptimization::OptimizeInnerLoop(LoopNode* node) {
    647   HBasicBlock* header = node->loop_info->GetHeader();
    648   HBasicBlock* preheader = node->loop_info->GetPreHeader();
    649   // Ensure loop header logic is finite.
    650   int64_t trip_count = 0;
    651   if (!induction_range_.IsFinite(node->loop_info, &trip_count)) {
    652     return false;
    653   }
    654   // Ensure there is only a single loop-body (besides the header).
    655   HBasicBlock* body = nullptr;
    656   for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) {
    657     if (it.Current() != header) {
    658       if (body != nullptr) {
    659         return false;
    660       }
    661       body = it.Current();
    662     }
    663   }
    664   CHECK(body != nullptr);
    665   // Ensure there is only a single exit point.
    666   if (header->GetSuccessors().size() != 2) {
    667     return false;
    668   }
    669   HBasicBlock* exit = (header->GetSuccessors()[0] == body)
    670       ? header->GetSuccessors()[1]
    671       : header->GetSuccessors()[0];
    672   // Ensure exit can only be reached by exiting loop.
    673   if (exit->GetPredecessors().size() != 1) {
    674     return false;
    675   }
    676   // Detect either an empty loop (no side effects other than plain iteration) or
    677   // a trivial loop (just iterating once). Replace subsequent index uses, if any,
    678   // with the last value and remove the loop, possibly after unrolling its body.
    679   HPhi* main_phi = nullptr;
    680   if (TrySetSimpleLoopHeader(header, &main_phi)) {
    681     bool is_empty = IsEmptyBody(body);
    682     if (reductions_->empty() &&  // TODO: possible with some effort
    683         (is_empty || trip_count == 1) &&
    684         TryAssignLastValue(node->loop_info, main_phi, preheader, /*collect_loop_uses*/ true)) {
    685       if (!is_empty) {
    686         // Unroll the loop-body, which sees initial value of the index.
    687         main_phi->ReplaceWith(main_phi->InputAt(0));
    688         preheader->MergeInstructionsWith(body);
    689       }
    690       body->DisconnectAndDelete();
    691       exit->RemovePredecessor(header);
    692       header->RemoveSuccessor(exit);
    693       header->RemoveDominatedBlock(exit);
    694       header->DisconnectAndDelete();
    695       preheader->AddSuccessor(exit);
    696       preheader->AddInstruction(new (global_allocator_) HGoto());
    697       preheader->AddDominatedBlock(exit);
    698       exit->SetDominator(preheader);
    699       RemoveLoop(node);  // update hierarchy
    700       return true;
    701     }
    702   }
    703   // Vectorize loop, if possible and valid.
    704   if (kEnableVectorization &&
    705       TrySetSimpleLoopHeader(header, &main_phi) &&
    706       ShouldVectorize(node, body, trip_count) &&
    707       TryAssignLastValue(node->loop_info, main_phi, preheader, /*collect_loop_uses*/ true)) {
    708     Vectorize(node, body, exit, trip_count);
    709     graph_->SetHasSIMD(true);  // flag SIMD usage
    710     MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorized);
    711     return true;
    712   }
    713   return false;
    714 }
    715 
    716 //
    717 // Loop vectorization. The implementation is based on the book by Aart J.C. Bik:
    718 // "The Software Vectorization Handbook. Applying Multimedia Extensions for Maximum Performance."
    719 // Intel Press, June, 2004 (http://www.aartbik.com/).
    720 //
    721 
    722 bool HLoopOptimization::ShouldVectorize(LoopNode* node, HBasicBlock* block, int64_t trip_count) {
    723   // Reset vector bookkeeping.
    724   vector_length_ = 0;
    725   vector_refs_->clear();
    726   vector_static_peeling_factor_ = 0;
    727   vector_dynamic_peeling_candidate_ = nullptr;
    728   vector_runtime_test_a_ =
    729   vector_runtime_test_b_ = nullptr;
    730 
    731   // Phis in the loop-body prevent vectorization.
    732   if (!block->GetPhis().IsEmpty()) {
    733     return false;
    734   }
    735 
    736   // Scan the loop-body, starting a right-hand-side tree traversal at each left-hand-side
    737   // occurrence, which allows passing down attributes down the use tree.
    738   for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
    739     if (!VectorizeDef(node, it.Current(), /*generate_code*/ false)) {
    740       return false;  // failure to vectorize a left-hand-side
    741     }
    742   }
    743 
    744   // Prepare alignment analysis:
    745   // (1) find desired alignment (SIMD vector size in bytes).
    746   // (2) initialize static loop peeling votes (peeling factor that will
    747   //     make one particular reference aligned), never to exceed (1).
    748   // (3) variable to record how many references share same alignment.
    749   // (4) variable to record suitable candidate for dynamic loop peeling.
    750   uint32_t desired_alignment = GetVectorSizeInBytes();
    751   DCHECK_LE(desired_alignment, 16u);
    752   uint32_t peeling_votes[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
    753   uint32_t max_num_same_alignment = 0;
    754   const ArrayReference* peeling_candidate = nullptr;
    755 
    756   // Data dependence analysis. Find each pair of references with same type, where
    757   // at least one is a write. Each such pair denotes a possible data dependence.
    758   // This analysis exploits the property that differently typed arrays cannot be
    759   // aliased, as well as the property that references either point to the same
    760   // array or to two completely disjoint arrays, i.e., no partial aliasing.
    761   // Other than a few simply heuristics, no detailed subscript analysis is done.
    762   // The scan over references also prepares finding a suitable alignment strategy.
    763   for (auto i = vector_refs_->begin(); i != vector_refs_->end(); ++i) {
    764     uint32_t num_same_alignment = 0;
    765     // Scan over all next references.
    766     for (auto j = i; ++j != vector_refs_->end(); ) {
    767       if (i->type == j->type && (i->lhs || j->lhs)) {
    768         // Found same-typed a[i+x] vs. b[i+y], where at least one is a write.
    769         HInstruction* a = i->base;
    770         HInstruction* b = j->base;
    771         HInstruction* x = i->offset;
    772         HInstruction* y = j->offset;
    773         if (a == b) {
    774           // Found a[i+x] vs. a[i+y]. Accept if x == y (loop-independent data dependence).
    775           // Conservatively assume a loop-carried data dependence otherwise, and reject.
    776           if (x != y) {
    777             return false;
    778           }
    779           // Count the number of references that have the same alignment (since
    780           // base and offset are the same) and where at least one is a write, so
    781           // e.g. a[i] = a[i] + b[i] counts a[i] but not b[i]).
    782           num_same_alignment++;
    783         } else {
    784           // Found a[i+x] vs. b[i+y]. Accept if x == y (at worst loop-independent data dependence).
    785           // Conservatively assume a potential loop-carried data dependence otherwise, avoided by
    786           // generating an explicit a != b disambiguation runtime test on the two references.
    787           if (x != y) {
    788             // To avoid excessive overhead, we only accept one a != b test.
    789             if (vector_runtime_test_a_ == nullptr) {
    790               // First test found.
    791               vector_runtime_test_a_ = a;
    792               vector_runtime_test_b_ = b;
    793             } else if ((vector_runtime_test_a_ != a || vector_runtime_test_b_ != b) &&
    794                        (vector_runtime_test_a_ != b || vector_runtime_test_b_ != a)) {
    795               return false;  // second test would be needed
    796             }
    797           }
    798         }
    799       }
    800     }
    801     // Update information for finding suitable alignment strategy:
    802     // (1) update votes for static loop peeling,
    803     // (2) update suitable candidate for dynamic loop peeling.
    804     Alignment alignment = ComputeAlignment(i->offset, i->type, i->is_string_char_at);
    805     if (alignment.Base() >= desired_alignment) {
    806       // If the array/string object has a known, sufficient alignment, use the
    807       // initial offset to compute the static loop peeling vote (this always
    808       // works, since elements have natural alignment).
    809       uint32_t offset = alignment.Offset() & (desired_alignment - 1u);
    810       uint32_t vote = (offset == 0)
    811           ? 0
    812           : ((desired_alignment - offset) >> DataType::SizeShift(i->type));
    813       DCHECK_LT(vote, 16u);
    814       ++peeling_votes[vote];
    815     } else if (BaseAlignment() >= desired_alignment &&
    816                num_same_alignment > max_num_same_alignment) {
    817       // Otherwise, if the array/string object has a known, sufficient alignment
    818       // for just the base but with an unknown offset, record the candidate with
    819       // the most occurrences for dynamic loop peeling (again, the peeling always
    820       // works, since elements have natural alignment).
    821       max_num_same_alignment = num_same_alignment;
    822       peeling_candidate = &(*i);
    823     }
    824   }  // for i
    825 
    826   // Find a suitable alignment strategy.
    827   SetAlignmentStrategy(peeling_votes, peeling_candidate);
    828 
    829   // Does vectorization seem profitable?
    830   if (!IsVectorizationProfitable(trip_count)) {
    831     return false;
    832   }
    833 
    834   // Success!
    835   return true;
    836 }
    837 
    838 void HLoopOptimization::Vectorize(LoopNode* node,
    839                                   HBasicBlock* block,
    840                                   HBasicBlock* exit,
    841                                   int64_t trip_count) {
    842   HBasicBlock* header = node->loop_info->GetHeader();
    843   HBasicBlock* preheader = node->loop_info->GetPreHeader();
    844 
    845   // Pick a loop unrolling factor for the vector loop.
    846   uint32_t unroll = GetUnrollingFactor(block, trip_count);
    847   uint32_t chunk = vector_length_ * unroll;
    848 
    849   DCHECK(trip_count == 0 || (trip_count >= MaxNumberPeeled() + chunk));
    850 
    851   // A cleanup loop is needed, at least, for any unknown trip count or
    852   // for a known trip count with remainder iterations after vectorization.
    853   bool needs_cleanup = trip_count == 0 ||
    854       ((trip_count - vector_static_peeling_factor_) % chunk) != 0;
    855 
    856   // Adjust vector bookkeeping.
    857   HPhi* main_phi = nullptr;
    858   bool is_simple_loop_header = TrySetSimpleLoopHeader(header, &main_phi);  // refills sets
    859   DCHECK(is_simple_loop_header);
    860   vector_header_ = header;
    861   vector_body_ = block;
    862 
    863   // Loop induction type.
    864   DataType::Type induc_type = main_phi->GetType();
    865   DCHECK(induc_type == DataType::Type::kInt32 || induc_type == DataType::Type::kInt64)
    866       << induc_type;
    867 
    868   // Generate the trip count for static or dynamic loop peeling, if needed:
    869   // ptc = <peeling factor>;
    870   HInstruction* ptc = nullptr;
    871   if (vector_static_peeling_factor_ != 0) {
    872     // Static loop peeling for SIMD alignment (using the most suitable
    873     // fixed peeling factor found during prior alignment analysis).
    874     DCHECK(vector_dynamic_peeling_candidate_ == nullptr);
    875     ptc = graph_->GetConstant(induc_type, vector_static_peeling_factor_);
    876   } else if (vector_dynamic_peeling_candidate_ != nullptr) {
    877     // Dynamic loop peeling for SIMD alignment (using the most suitable
    878     // candidate found during prior alignment analysis):
    879     // rem = offset % ALIGN;    // adjusted as #elements
    880     // ptc = rem == 0 ? 0 : (ALIGN - rem);
    881     uint32_t shift = DataType::SizeShift(vector_dynamic_peeling_candidate_->type);
    882     uint32_t align = GetVectorSizeInBytes() >> shift;
    883     uint32_t hidden_offset = HiddenOffset(vector_dynamic_peeling_candidate_->type,
    884                                           vector_dynamic_peeling_candidate_->is_string_char_at);
    885     HInstruction* adjusted_offset = graph_->GetConstant(induc_type, hidden_offset >> shift);
    886     HInstruction* offset = Insert(preheader, new (global_allocator_) HAdd(
    887         induc_type, vector_dynamic_peeling_candidate_->offset, adjusted_offset));
    888     HInstruction* rem = Insert(preheader, new (global_allocator_) HAnd(
    889         induc_type, offset, graph_->GetConstant(induc_type, align - 1u)));
    890     HInstruction* sub = Insert(preheader, new (global_allocator_) HSub(
    891         induc_type, graph_->GetConstant(induc_type, align), rem));
    892     HInstruction* cond = Insert(preheader, new (global_allocator_) HEqual(
    893         rem, graph_->GetConstant(induc_type, 0)));
    894     ptc = Insert(preheader, new (global_allocator_) HSelect(
    895         cond, graph_->GetConstant(induc_type, 0), sub, kNoDexPc));
    896     needs_cleanup = true;  // don't know the exact amount
    897   }
    898 
    899   // Generate loop control:
    900   // stc = <trip-count>;
    901   // ptc = min(stc, ptc);
    902   // vtc = stc - (stc - ptc) % chunk;
    903   // i = 0;
    904   HInstruction* stc = induction_range_.GenerateTripCount(node->loop_info, graph_, preheader);
    905   HInstruction* vtc = stc;
    906   if (needs_cleanup) {
    907     DCHECK(IsPowerOfTwo(chunk));
    908     HInstruction* diff = stc;
    909     if (ptc != nullptr) {
    910       if (trip_count == 0) {
    911         HInstruction* cond = Insert(preheader, new (global_allocator_) HAboveOrEqual(stc, ptc));
    912         ptc = Insert(preheader, new (global_allocator_) HSelect(cond, ptc, stc, kNoDexPc));
    913       }
    914       diff = Insert(preheader, new (global_allocator_) HSub(induc_type, stc, ptc));
    915     }
    916     HInstruction* rem = Insert(
    917         preheader, new (global_allocator_) HAnd(induc_type,
    918                                                 diff,
    919                                                 graph_->GetConstant(induc_type, chunk - 1)));
    920     vtc = Insert(preheader, new (global_allocator_) HSub(induc_type, stc, rem));
    921   }
    922   vector_index_ = graph_->GetConstant(induc_type, 0);
    923 
    924   // Generate runtime disambiguation test:
    925   // vtc = a != b ? vtc : 0;
    926   if (vector_runtime_test_a_ != nullptr) {
    927     HInstruction* rt = Insert(
    928         preheader,
    929         new (global_allocator_) HNotEqual(vector_runtime_test_a_, vector_runtime_test_b_));
    930     vtc = Insert(preheader,
    931                  new (global_allocator_)
    932                  HSelect(rt, vtc, graph_->GetConstant(induc_type, 0), kNoDexPc));
    933     needs_cleanup = true;
    934   }
    935 
    936   // Generate alignment peeling loop, if needed:
    937   // for ( ; i < ptc; i += 1)
    938   //    <loop-body>
    939   //
    940   // NOTE: The alignment forced by the peeling loop is preserved even if data is
    941   //       moved around during suspend checks, since all analysis was based on
    942   //       nothing more than the Android runtime alignment conventions.
    943   if (ptc != nullptr) {
    944     vector_mode_ = kSequential;
    945     GenerateNewLoop(node,
    946                     block,
    947                     graph_->TransformLoopForVectorization(vector_header_, vector_body_, exit),
    948                     vector_index_,
    949                     ptc,
    950                     graph_->GetConstant(induc_type, 1),
    951                     kNoUnrollingFactor);
    952   }
    953 
    954   // Generate vector loop, possibly further unrolled:
    955   // for ( ; i < vtc; i += chunk)
    956   //    <vectorized-loop-body>
    957   vector_mode_ = kVector;
    958   GenerateNewLoop(node,
    959                   block,
    960                   graph_->TransformLoopForVectorization(vector_header_, vector_body_, exit),
    961                   vector_index_,
    962                   vtc,
    963                   graph_->GetConstant(induc_type, vector_length_),  // increment per unroll
    964                   unroll);
    965   HLoopInformation* vloop = vector_header_->GetLoopInformation();
    966 
    967   // Generate cleanup loop, if needed:
    968   // for ( ; i < stc; i += 1)
    969   //    <loop-body>
    970   if (needs_cleanup) {
    971     vector_mode_ = kSequential;
    972     GenerateNewLoop(node,
    973                     block,
    974                     graph_->TransformLoopForVectorization(vector_header_, vector_body_, exit),
    975                     vector_index_,
    976                     stc,
    977                     graph_->GetConstant(induc_type, 1),
    978                     kNoUnrollingFactor);
    979   }
    980 
    981   // Link reductions to their final uses.
    982   for (auto i = reductions_->begin(); i != reductions_->end(); ++i) {
    983     if (i->first->IsPhi()) {
    984       HInstruction* phi = i->first;
    985       HInstruction* repl = ReduceAndExtractIfNeeded(i->second);
    986       // Deal with regular uses.
    987       for (const HUseListNode<HInstruction*>& use : phi->GetUses()) {
    988         induction_range_.Replace(use.GetUser(), phi, repl);  // update induction use
    989       }
    990       phi->ReplaceWith(repl);
    991     }
    992   }
    993 
    994   // Remove the original loop by disconnecting the body block
    995   // and removing all instructions from the header.
    996   block->DisconnectAndDelete();
    997   while (!header->GetFirstInstruction()->IsGoto()) {
    998     header->RemoveInstruction(header->GetFirstInstruction());
    999   }
   1000 
   1001   // Update loop hierarchy: the old header now resides in the same outer loop
   1002   // as the old preheader. Note that we don't bother putting sequential
   1003   // loops back in the hierarchy at this point.
   1004   header->SetLoopInformation(preheader->GetLoopInformation());  // outward
   1005   node->loop_info = vloop;
   1006 }
   1007 
   1008 void HLoopOptimization::GenerateNewLoop(LoopNode* node,
   1009                                         HBasicBlock* block,
   1010                                         HBasicBlock* new_preheader,
   1011                                         HInstruction* lo,
   1012                                         HInstruction* hi,
   1013                                         HInstruction* step,
   1014                                         uint32_t unroll) {
   1015   DCHECK(unroll == 1 || vector_mode_ == kVector);
   1016   DataType::Type induc_type = lo->GetType();
   1017   // Prepare new loop.
   1018   vector_preheader_ = new_preheader,
   1019   vector_header_ = vector_preheader_->GetSingleSuccessor();
   1020   vector_body_ = vector_header_->GetSuccessors()[1];
   1021   HPhi* phi = new (global_allocator_) HPhi(global_allocator_,
   1022                                            kNoRegNumber,
   1023                                            0,
   1024                                            HPhi::ToPhiType(induc_type));
   1025   // Generate header and prepare body.
   1026   // for (i = lo; i < hi; i += step)
   1027   //    <loop-body>
   1028   HInstruction* cond = new (global_allocator_) HAboveOrEqual(phi, hi);
   1029   vector_header_->AddPhi(phi);
   1030   vector_header_->AddInstruction(cond);
   1031   vector_header_->AddInstruction(new (global_allocator_) HIf(cond));
   1032   vector_index_ = phi;
   1033   vector_permanent_map_->clear();  // preserved over unrolling
   1034   for (uint32_t u = 0; u < unroll; u++) {
   1035     // Generate instruction map.
   1036     vector_map_->clear();
   1037     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
   1038       bool vectorized_def = VectorizeDef(node, it.Current(), /*generate_code*/ true);
   1039       DCHECK(vectorized_def);
   1040     }
   1041     // Generate body from the instruction map, but in original program order.
   1042     HEnvironment* env = vector_header_->GetFirstInstruction()->GetEnvironment();
   1043     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
   1044       auto i = vector_map_->find(it.Current());
   1045       if (i != vector_map_->end() && !i->second->IsInBlock()) {
   1046         Insert(vector_body_, i->second);
   1047         // Deal with instructions that need an environment, such as the scalar intrinsics.
   1048         if (i->second->NeedsEnvironment()) {
   1049           i->second->CopyEnvironmentFromWithLoopPhiAdjustment(env, vector_header_);
   1050         }
   1051       }
   1052     }
   1053     // Generate the induction.
   1054     vector_index_ = new (global_allocator_) HAdd(induc_type, vector_index_, step);
   1055     Insert(vector_body_, vector_index_);
   1056   }
   1057   // Finalize phi inputs for the reductions (if any).
   1058   for (auto i = reductions_->begin(); i != reductions_->end(); ++i) {
   1059     if (!i->first->IsPhi()) {
   1060       DCHECK(i->second->IsPhi());
   1061       GenerateVecReductionPhiInputs(i->second->AsPhi(), i->first);
   1062     }
   1063   }
   1064   // Finalize phi inputs for the loop index.
   1065   phi->AddInput(lo);
   1066   phi->AddInput(vector_index_);
   1067   vector_index_ = phi;
   1068 }
   1069 
   1070 bool HLoopOptimization::VectorizeDef(LoopNode* node,
   1071                                      HInstruction* instruction,
   1072                                      bool generate_code) {
   1073   // Accept a left-hand-side array base[index] for
   1074   // (1) supported vector type,
   1075   // (2) loop-invariant base,
   1076   // (3) unit stride index,
   1077   // (4) vectorizable right-hand-side value.
   1078   uint64_t restrictions = kNone;
   1079   if (instruction->IsArraySet()) {
   1080     DataType::Type type = instruction->AsArraySet()->GetComponentType();
   1081     HInstruction* base = instruction->InputAt(0);
   1082     HInstruction* index = instruction->InputAt(1);
   1083     HInstruction* value = instruction->InputAt(2);
   1084     HInstruction* offset = nullptr;
   1085     if (TrySetVectorType(type, &restrictions) &&
   1086         node->loop_info->IsDefinedOutOfTheLoop(base) &&
   1087         induction_range_.IsUnitStride(instruction, index, graph_, &offset) &&
   1088         VectorizeUse(node, value, generate_code, type, restrictions)) {
   1089       if (generate_code) {
   1090         GenerateVecSub(index, offset);
   1091         GenerateVecMem(instruction, vector_map_->Get(index), vector_map_->Get(value), offset, type);
   1092       } else {
   1093         vector_refs_->insert(ArrayReference(base, offset, type, /*lhs*/ true));
   1094       }
   1095       return true;
   1096     }
   1097     return false;
   1098   }
   1099   // Accept a left-hand-side reduction for
   1100   // (1) supported vector type,
   1101   // (2) vectorizable right-hand-side value.
   1102   auto redit = reductions_->find(instruction);
   1103   if (redit != reductions_->end()) {
   1104     DataType::Type type = instruction->GetType();
   1105     // Recognize SAD idiom or direct reduction.
   1106     if (VectorizeSADIdiom(node, instruction, generate_code, type, restrictions) ||
   1107         (TrySetVectorType(type, &restrictions) &&
   1108          VectorizeUse(node, instruction, generate_code, type, restrictions))) {
   1109       if (generate_code) {
   1110         HInstruction* new_red = vector_map_->Get(instruction);
   1111         vector_permanent_map_->Put(new_red, vector_map_->Get(redit->second));
   1112         vector_permanent_map_->Overwrite(redit->second, new_red);
   1113       }
   1114       return true;
   1115     }
   1116     return false;
   1117   }
   1118   // Branch back okay.
   1119   if (instruction->IsGoto()) {
   1120     return true;
   1121   }
   1122   // Otherwise accept only expressions with no effects outside the immediate loop-body.
   1123   // Note that actual uses are inspected during right-hand-side tree traversal.
   1124   return !IsUsedOutsideLoop(node->loop_info, instruction) && !instruction->DoesAnyWrite();
   1125 }
   1126 
   1127 // TODO: saturation arithmetic.
   1128 bool HLoopOptimization::VectorizeUse(LoopNode* node,
   1129                                      HInstruction* instruction,
   1130                                      bool generate_code,
   1131                                      DataType::Type type,
   1132                                      uint64_t restrictions) {
   1133   // Accept anything for which code has already been generated.
   1134   if (generate_code) {
   1135     if (vector_map_->find(instruction) != vector_map_->end()) {
   1136       return true;
   1137     }
   1138   }
   1139   // Continue the right-hand-side tree traversal, passing in proper
   1140   // types and vector restrictions along the way. During code generation,
   1141   // all new nodes are drawn from the global allocator.
   1142   if (node->loop_info->IsDefinedOutOfTheLoop(instruction)) {
   1143     // Accept invariant use, using scalar expansion.
   1144     if (generate_code) {
   1145       GenerateVecInv(instruction, type);
   1146     }
   1147     return true;
   1148   } else if (instruction->IsArrayGet()) {
   1149     // Deal with vector restrictions.
   1150     bool is_string_char_at = instruction->AsArrayGet()->IsStringCharAt();
   1151     if (is_string_char_at && HasVectorRestrictions(restrictions, kNoStringCharAt)) {
   1152       return false;
   1153     }
   1154     // Accept a right-hand-side array base[index] for
   1155     // (1) matching vector type (exact match or signed/unsigned integral type of the same size),
   1156     // (2) loop-invariant base,
   1157     // (3) unit stride index,
   1158     // (4) vectorizable right-hand-side value.
   1159     HInstruction* base = instruction->InputAt(0);
   1160     HInstruction* index = instruction->InputAt(1);
   1161     HInstruction* offset = nullptr;
   1162     if (HVecOperation::ToSignedType(type) == HVecOperation::ToSignedType(instruction->GetType()) &&
   1163         node->loop_info->IsDefinedOutOfTheLoop(base) &&
   1164         induction_range_.IsUnitStride(instruction, index, graph_, &offset)) {
   1165       if (generate_code) {
   1166         GenerateVecSub(index, offset);
   1167         GenerateVecMem(instruction, vector_map_->Get(index), nullptr, offset, type);
   1168       } else {
   1169         vector_refs_->insert(ArrayReference(base, offset, type, /*lhs*/ false, is_string_char_at));
   1170       }
   1171       return true;
   1172     }
   1173   } else if (instruction->IsPhi()) {
   1174     // Accept particular phi operations.
   1175     if (reductions_->find(instruction) != reductions_->end()) {
   1176       // Deal with vector restrictions.
   1177       if (HasVectorRestrictions(restrictions, kNoReduction)) {
   1178         return false;
   1179       }
   1180       // Accept a reduction.
   1181       if (generate_code) {
   1182         GenerateVecReductionPhi(instruction->AsPhi());
   1183       }
   1184       return true;
   1185     }
   1186     // TODO: accept right-hand-side induction?
   1187     return false;
   1188   } else if (instruction->IsTypeConversion()) {
   1189     // Accept particular type conversions.
   1190     HTypeConversion* conversion = instruction->AsTypeConversion();
   1191     HInstruction* opa = conversion->InputAt(0);
   1192     DataType::Type from = conversion->GetInputType();
   1193     DataType::Type to = conversion->GetResultType();
   1194     if (DataType::IsIntegralType(from) && DataType::IsIntegralType(to)) {
   1195       uint32_t size_vec = DataType::Size(type);
   1196       uint32_t size_from = DataType::Size(from);
   1197       uint32_t size_to = DataType::Size(to);
   1198       // Accept an integral conversion
   1199       // (1a) narrowing into vector type, "wider" operations cannot bring in higher order bits, or
   1200       // (1b) widening from at least vector type, and
   1201       // (2) vectorizable operand.
   1202       if ((size_to < size_from &&
   1203            size_to == size_vec &&
   1204            VectorizeUse(node, opa, generate_code, type, restrictions | kNoHiBits)) ||
   1205           (size_to >= size_from &&
   1206            size_from >= size_vec &&
   1207            VectorizeUse(node, opa, generate_code, type, restrictions))) {
   1208         if (generate_code) {
   1209           if (vector_mode_ == kVector) {
   1210             vector_map_->Put(instruction, vector_map_->Get(opa));  // operand pass-through
   1211           } else {
   1212             GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type);
   1213           }
   1214         }
   1215         return true;
   1216       }
   1217     } else if (to == DataType::Type::kFloat32 && from == DataType::Type::kInt32) {
   1218       DCHECK_EQ(to, type);
   1219       // Accept int to float conversion for
   1220       // (1) supported int,
   1221       // (2) vectorizable operand.
   1222       if (TrySetVectorType(from, &restrictions) &&
   1223           VectorizeUse(node, opa, generate_code, from, restrictions)) {
   1224         if (generate_code) {
   1225           GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type);
   1226         }
   1227         return true;
   1228       }
   1229     }
   1230     return false;
   1231   } else if (instruction->IsNeg() || instruction->IsNot() || instruction->IsBooleanNot()) {
   1232     // Accept unary operator for vectorizable operand.
   1233     HInstruction* opa = instruction->InputAt(0);
   1234     if (VectorizeUse(node, opa, generate_code, type, restrictions)) {
   1235       if (generate_code) {
   1236         GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type);
   1237       }
   1238       return true;
   1239     }
   1240   } else if (instruction->IsAdd() || instruction->IsSub() ||
   1241              instruction->IsMul() || instruction->IsDiv() ||
   1242              instruction->IsAnd() || instruction->IsOr()  || instruction->IsXor()) {
   1243     // Deal with vector restrictions.
   1244     if ((instruction->IsMul() && HasVectorRestrictions(restrictions, kNoMul)) ||
   1245         (instruction->IsDiv() && HasVectorRestrictions(restrictions, kNoDiv))) {
   1246       return false;
   1247     }
   1248     // Accept binary operator for vectorizable operands.
   1249     HInstruction* opa = instruction->InputAt(0);
   1250     HInstruction* opb = instruction->InputAt(1);
   1251     if (VectorizeUse(node, opa, generate_code, type, restrictions) &&
   1252         VectorizeUse(node, opb, generate_code, type, restrictions)) {
   1253       if (generate_code) {
   1254         GenerateVecOp(instruction, vector_map_->Get(opa), vector_map_->Get(opb), type);
   1255       }
   1256       return true;
   1257     }
   1258   } else if (instruction->IsShl() || instruction->IsShr() || instruction->IsUShr()) {
   1259     // Recognize halving add idiom.
   1260     if (VectorizeHalvingAddIdiom(node, instruction, generate_code, type, restrictions)) {
   1261       return true;
   1262     }
   1263     // Deal with vector restrictions.
   1264     HInstruction* opa = instruction->InputAt(0);
   1265     HInstruction* opb = instruction->InputAt(1);
   1266     HInstruction* r = opa;
   1267     bool is_unsigned = false;
   1268     if ((HasVectorRestrictions(restrictions, kNoShift)) ||
   1269         (instruction->IsShr() && HasVectorRestrictions(restrictions, kNoShr))) {
   1270       return false;  // unsupported instruction
   1271     } else if (HasVectorRestrictions(restrictions, kNoHiBits)) {
   1272       // Shifts right need extra care to account for higher order bits.
   1273       // TODO: less likely shr/unsigned and ushr/signed can by flipping signess.
   1274       if (instruction->IsShr() &&
   1275           (!IsNarrowerOperand(opa, type, &r, &is_unsigned) || is_unsigned)) {
   1276         return false;  // reject, unless all operands are sign-extension narrower
   1277       } else if (instruction->IsUShr() &&
   1278                  (!IsNarrowerOperand(opa, type, &r, &is_unsigned) || !is_unsigned)) {
   1279         return false;  // reject, unless all operands are zero-extension narrower
   1280       }
   1281     }
   1282     // Accept shift operator for vectorizable/invariant operands.
   1283     // TODO: accept symbolic, albeit loop invariant shift factors.
   1284     DCHECK(r != nullptr);
   1285     if (generate_code && vector_mode_ != kVector) {  // de-idiom
   1286       r = opa;
   1287     }
   1288     int64_t distance = 0;
   1289     if (VectorizeUse(node, r, generate_code, type, restrictions) &&
   1290         IsInt64AndGet(opb, /*out*/ &distance)) {
   1291       // Restrict shift distance to packed data type width.
   1292       int64_t max_distance = DataType::Size(type) * 8;
   1293       if (0 <= distance && distance < max_distance) {
   1294         if (generate_code) {
   1295           GenerateVecOp(instruction, vector_map_->Get(r), opb, type);
   1296         }
   1297         return true;
   1298       }
   1299     }
   1300   } else if (instruction->IsInvokeStaticOrDirect()) {
   1301     // Accept particular intrinsics.
   1302     HInvokeStaticOrDirect* invoke = instruction->AsInvokeStaticOrDirect();
   1303     switch (invoke->GetIntrinsic()) {
   1304       case Intrinsics::kMathAbsInt:
   1305       case Intrinsics::kMathAbsLong:
   1306       case Intrinsics::kMathAbsFloat:
   1307       case Intrinsics::kMathAbsDouble: {
   1308         // Deal with vector restrictions.
   1309         HInstruction* opa = instruction->InputAt(0);
   1310         HInstruction* r = opa;
   1311         bool is_unsigned = false;
   1312         if (HasVectorRestrictions(restrictions, kNoAbs)) {
   1313           return false;
   1314         } else if (HasVectorRestrictions(restrictions, kNoHiBits) &&
   1315                    (!IsNarrowerOperand(opa, type, &r, &is_unsigned) || is_unsigned)) {
   1316           return false;  // reject, unless operand is sign-extension narrower
   1317         }
   1318         // Accept ABS(x) for vectorizable operand.
   1319         DCHECK(r != nullptr);
   1320         if (generate_code && vector_mode_ != kVector) {  // de-idiom
   1321           r = opa;
   1322         }
   1323         if (VectorizeUse(node, r, generate_code, type, restrictions)) {
   1324           if (generate_code) {
   1325             GenerateVecOp(instruction,
   1326                           vector_map_->Get(r),
   1327                           nullptr,
   1328                           HVecOperation::ToProperType(type, is_unsigned));
   1329           }
   1330           return true;
   1331         }
   1332         return false;
   1333       }
   1334       case Intrinsics::kMathMinIntInt:
   1335       case Intrinsics::kMathMinLongLong:
   1336       case Intrinsics::kMathMinFloatFloat:
   1337       case Intrinsics::kMathMinDoubleDouble:
   1338       case Intrinsics::kMathMaxIntInt:
   1339       case Intrinsics::kMathMaxLongLong:
   1340       case Intrinsics::kMathMaxFloatFloat:
   1341       case Intrinsics::kMathMaxDoubleDouble: {
   1342         // Deal with vector restrictions.
   1343         HInstruction* opa = instruction->InputAt(0);
   1344         HInstruction* opb = instruction->InputAt(1);
   1345         HInstruction* r = opa;
   1346         HInstruction* s = opb;
   1347         bool is_unsigned = false;
   1348         if (HasVectorRestrictions(restrictions, kNoMinMax)) {
   1349           return false;
   1350         } else if (HasVectorRestrictions(restrictions, kNoHiBits) &&
   1351                    !IsNarrowerOperands(opa, opb, type, &r, &s, &is_unsigned)) {
   1352           return false;  // reject, unless all operands are same-extension narrower
   1353         }
   1354         // Accept MIN/MAX(x, y) for vectorizable operands.
   1355         DCHECK(r != nullptr);
   1356         DCHECK(s != nullptr);
   1357         if (generate_code && vector_mode_ != kVector) {  // de-idiom
   1358           r = opa;
   1359           s = opb;
   1360         }
   1361         if (VectorizeUse(node, r, generate_code, type, restrictions) &&
   1362             VectorizeUse(node, s, generate_code, type, restrictions)) {
   1363           if (generate_code) {
   1364             GenerateVecOp(
   1365                 instruction, vector_map_->Get(r), vector_map_->Get(s), type, is_unsigned);
   1366           }
   1367           return true;
   1368         }
   1369         return false;
   1370       }
   1371       default:
   1372         return false;
   1373     }  // switch
   1374   }
   1375   return false;
   1376 }
   1377 
   1378 uint32_t HLoopOptimization::GetVectorSizeInBytes() {
   1379   switch (compiler_driver_->GetInstructionSet()) {
   1380     case InstructionSet::kArm:
   1381     case InstructionSet::kThumb2:
   1382       return 8;  // 64-bit SIMD
   1383     default:
   1384       return 16;  // 128-bit SIMD
   1385   }
   1386 }
   1387 
   1388 bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrictions) {
   1389   const InstructionSetFeatures* features = compiler_driver_->GetInstructionSetFeatures();
   1390   switch (compiler_driver_->GetInstructionSet()) {
   1391     case InstructionSet::kArm:
   1392     case InstructionSet::kThumb2:
   1393       // Allow vectorization for all ARM devices, because Android assumes that
   1394       // ARM 32-bit always supports advanced SIMD (64-bit SIMD).
   1395       switch (type) {
   1396         case DataType::Type::kBool:
   1397         case DataType::Type::kUint8:
   1398         case DataType::Type::kInt8:
   1399           *restrictions |= kNoDiv | kNoReduction;
   1400           return TrySetVectorLength(8);
   1401         case DataType::Type::kUint16:
   1402         case DataType::Type::kInt16:
   1403           *restrictions |= kNoDiv | kNoStringCharAt | kNoReduction;
   1404           return TrySetVectorLength(4);
   1405         case DataType::Type::kInt32:
   1406           *restrictions |= kNoDiv | kNoWideSAD;
   1407           return TrySetVectorLength(2);
   1408         default:
   1409           break;
   1410       }
   1411       return false;
   1412     case InstructionSet::kArm64:
   1413       // Allow vectorization for all ARM devices, because Android assumes that
   1414       // ARMv8 AArch64 always supports advanced SIMD (128-bit SIMD).
   1415       switch (type) {
   1416         case DataType::Type::kBool:
   1417         case DataType::Type::kUint8:
   1418         case DataType::Type::kInt8:
   1419           *restrictions |= kNoDiv;
   1420           return TrySetVectorLength(16);
   1421         case DataType::Type::kUint16:
   1422         case DataType::Type::kInt16:
   1423           *restrictions |= kNoDiv;
   1424           return TrySetVectorLength(8);
   1425         case DataType::Type::kInt32:
   1426           *restrictions |= kNoDiv;
   1427           return TrySetVectorLength(4);
   1428         case DataType::Type::kInt64:
   1429           *restrictions |= kNoDiv | kNoMul | kNoMinMax;
   1430           return TrySetVectorLength(2);
   1431         case DataType::Type::kFloat32:
   1432           *restrictions |= kNoReduction;
   1433           return TrySetVectorLength(4);
   1434         case DataType::Type::kFloat64:
   1435           *restrictions |= kNoReduction;
   1436           return TrySetVectorLength(2);
   1437         default:
   1438           return false;
   1439       }
   1440     case InstructionSet::kX86:
   1441     case InstructionSet::kX86_64:
   1442       // Allow vectorization for SSE4.1-enabled X86 devices only (128-bit SIMD).
   1443       if (features->AsX86InstructionSetFeatures()->HasSSE4_1()) {
   1444         switch (type) {
   1445           case DataType::Type::kBool:
   1446           case DataType::Type::kUint8:
   1447           case DataType::Type::kInt8:
   1448             *restrictions |=
   1449                 kNoMul | kNoDiv | kNoShift | kNoAbs | kNoSignedHAdd | kNoUnroundedHAdd | kNoSAD;
   1450             return TrySetVectorLength(16);
   1451           case DataType::Type::kUint16:
   1452           case DataType::Type::kInt16:
   1453             *restrictions |= kNoDiv | kNoAbs | kNoSignedHAdd | kNoUnroundedHAdd | kNoSAD;
   1454             return TrySetVectorLength(8);
   1455           case DataType::Type::kInt32:
   1456             *restrictions |= kNoDiv | kNoSAD;
   1457             return TrySetVectorLength(4);
   1458           case DataType::Type::kInt64:
   1459             *restrictions |= kNoMul | kNoDiv | kNoShr | kNoAbs | kNoMinMax | kNoSAD;
   1460             return TrySetVectorLength(2);
   1461           case DataType::Type::kFloat32:
   1462             *restrictions |= kNoMinMax | kNoReduction;  // minmax: -0.0 vs +0.0
   1463             return TrySetVectorLength(4);
   1464           case DataType::Type::kFloat64:
   1465             *restrictions |= kNoMinMax | kNoReduction;  // minmax: -0.0 vs +0.0
   1466             return TrySetVectorLength(2);
   1467           default:
   1468             break;
   1469         }  // switch type
   1470       }
   1471       return false;
   1472     case InstructionSet::kMips:
   1473       if (features->AsMipsInstructionSetFeatures()->HasMsa()) {
   1474         switch (type) {
   1475           case DataType::Type::kBool:
   1476           case DataType::Type::kUint8:
   1477           case DataType::Type::kInt8:
   1478             *restrictions |= kNoDiv;
   1479             return TrySetVectorLength(16);
   1480           case DataType::Type::kUint16:
   1481           case DataType::Type::kInt16:
   1482             *restrictions |= kNoDiv | kNoStringCharAt;
   1483             return TrySetVectorLength(8);
   1484           case DataType::Type::kInt32:
   1485             *restrictions |= kNoDiv;
   1486             return TrySetVectorLength(4);
   1487           case DataType::Type::kInt64:
   1488             *restrictions |= kNoDiv;
   1489             return TrySetVectorLength(2);
   1490           case DataType::Type::kFloat32:
   1491             *restrictions |= kNoMinMax | kNoReduction;  // min/max(x, NaN)
   1492             return TrySetVectorLength(4);
   1493           case DataType::Type::kFloat64:
   1494             *restrictions |= kNoMinMax | kNoReduction;  // min/max(x, NaN)
   1495             return TrySetVectorLength(2);
   1496           default:
   1497             break;
   1498         }  // switch type
   1499       }
   1500       return false;
   1501     case InstructionSet::kMips64:
   1502       if (features->AsMips64InstructionSetFeatures()->HasMsa()) {
   1503         switch (type) {
   1504           case DataType::Type::kBool:
   1505           case DataType::Type::kUint8:
   1506           case DataType::Type::kInt8:
   1507             *restrictions |= kNoDiv;
   1508             return TrySetVectorLength(16);
   1509           case DataType::Type::kUint16:
   1510           case DataType::Type::kInt16:
   1511             *restrictions |= kNoDiv | kNoStringCharAt;
   1512             return TrySetVectorLength(8);
   1513           case DataType::Type::kInt32:
   1514             *restrictions |= kNoDiv;
   1515             return TrySetVectorLength(4);
   1516           case DataType::Type::kInt64:
   1517             *restrictions |= kNoDiv;
   1518             return TrySetVectorLength(2);
   1519           case DataType::Type::kFloat32:
   1520             *restrictions |= kNoMinMax | kNoReduction;  // min/max(x, NaN)
   1521             return TrySetVectorLength(4);
   1522           case DataType::Type::kFloat64:
   1523             *restrictions |= kNoMinMax | kNoReduction;  // min/max(x, NaN)
   1524             return TrySetVectorLength(2);
   1525           default:
   1526             break;
   1527         }  // switch type
   1528       }
   1529       return false;
   1530     default:
   1531       return false;
   1532   }  // switch instruction set
   1533 }
   1534 
   1535 bool HLoopOptimization::TrySetVectorLength(uint32_t length) {
   1536   DCHECK(IsPowerOfTwo(length) && length >= 2u);
   1537   // First time set?
   1538   if (vector_length_ == 0) {
   1539     vector_length_ = length;
   1540   }
   1541   // Different types are acceptable within a loop-body, as long as all the corresponding vector
   1542   // lengths match exactly to obtain a uniform traversal through the vector iteration space
   1543   // (idiomatic exceptions to this rule can be handled by further unrolling sub-expressions).
   1544   return vector_length_ == length;
   1545 }
   1546 
   1547 void HLoopOptimization::GenerateVecInv(HInstruction* org, DataType::Type type) {
   1548   if (vector_map_->find(org) == vector_map_->end()) {
   1549     // In scalar code, just use a self pass-through for scalar invariants
   1550     // (viz. expression remains itself).
   1551     if (vector_mode_ == kSequential) {
   1552       vector_map_->Put(org, org);
   1553       return;
   1554     }
   1555     // In vector code, explicit scalar expansion is needed.
   1556     HInstruction* vector = nullptr;
   1557     auto it = vector_permanent_map_->find(org);
   1558     if (it != vector_permanent_map_->end()) {
   1559       vector = it->second;  // reuse during unrolling
   1560     } else {
   1561       // Generates ReplicateScalar( (optional_type_conv) org ).
   1562       HInstruction* input = org;
   1563       DataType::Type input_type = input->GetType();
   1564       if (type != input_type && (type == DataType::Type::kInt64 ||
   1565                                  input_type == DataType::Type::kInt64)) {
   1566         input = Insert(vector_preheader_,
   1567                        new (global_allocator_) HTypeConversion(type, input, kNoDexPc));
   1568       }
   1569       vector = new (global_allocator_)
   1570           HVecReplicateScalar(global_allocator_, input, type, vector_length_, kNoDexPc);
   1571       vector_permanent_map_->Put(org, Insert(vector_preheader_, vector));
   1572     }
   1573     vector_map_->Put(org, vector);
   1574   }
   1575 }
   1576 
   1577 void HLoopOptimization::GenerateVecSub(HInstruction* org, HInstruction* offset) {
   1578   if (vector_map_->find(org) == vector_map_->end()) {
   1579     HInstruction* subscript = vector_index_;
   1580     int64_t value = 0;
   1581     if (!IsInt64AndGet(offset, &value) || value != 0) {
   1582       subscript = new (global_allocator_) HAdd(DataType::Type::kInt32, subscript, offset);
   1583       if (org->IsPhi()) {
   1584         Insert(vector_body_, subscript);  // lacks layout placeholder
   1585       }
   1586     }
   1587     vector_map_->Put(org, subscript);
   1588   }
   1589 }
   1590 
   1591 void HLoopOptimization::GenerateVecMem(HInstruction* org,
   1592                                        HInstruction* opa,
   1593                                        HInstruction* opb,
   1594                                        HInstruction* offset,
   1595                                        DataType::Type type) {
   1596   uint32_t dex_pc = org->GetDexPc();
   1597   HInstruction* vector = nullptr;
   1598   if (vector_mode_ == kVector) {
   1599     // Vector store or load.
   1600     bool is_string_char_at = false;
   1601     HInstruction* base = org->InputAt(0);
   1602     if (opb != nullptr) {
   1603       vector = new (global_allocator_) HVecStore(
   1604           global_allocator_, base, opa, opb, type, org->GetSideEffects(), vector_length_, dex_pc);
   1605     } else  {
   1606       is_string_char_at = org->AsArrayGet()->IsStringCharAt();
   1607       vector = new (global_allocator_) HVecLoad(global_allocator_,
   1608                                                 base,
   1609                                                 opa,
   1610                                                 type,
   1611                                                 org->GetSideEffects(),
   1612                                                 vector_length_,
   1613                                                 is_string_char_at,
   1614                                                 dex_pc);
   1615     }
   1616     // Known (forced/adjusted/original) alignment?
   1617     if (vector_dynamic_peeling_candidate_ != nullptr) {
   1618       if (vector_dynamic_peeling_candidate_->offset == offset &&  // TODO: diffs too?
   1619           DataType::Size(vector_dynamic_peeling_candidate_->type) == DataType::Size(type) &&
   1620           vector_dynamic_peeling_candidate_->is_string_char_at == is_string_char_at) {
   1621         vector->AsVecMemoryOperation()->SetAlignment(  // forced
   1622             Alignment(GetVectorSizeInBytes(), 0));
   1623       }
   1624     } else {
   1625       vector->AsVecMemoryOperation()->SetAlignment(  // adjusted/original
   1626           ComputeAlignment(offset, type, is_string_char_at, vector_static_peeling_factor_));
   1627     }
   1628   } else {
   1629     // Scalar store or load.
   1630     DCHECK(vector_mode_ == kSequential);
   1631     if (opb != nullptr) {
   1632       DataType::Type component_type = org->AsArraySet()->GetComponentType();
   1633       vector = new (global_allocator_) HArraySet(
   1634           org->InputAt(0), opa, opb, component_type, org->GetSideEffects(), dex_pc);
   1635     } else  {
   1636       bool is_string_char_at = org->AsArrayGet()->IsStringCharAt();
   1637       vector = new (global_allocator_) HArrayGet(
   1638           org->InputAt(0), opa, org->GetType(), org->GetSideEffects(), dex_pc, is_string_char_at);
   1639     }
   1640   }
   1641   vector_map_->Put(org, vector);
   1642 }
   1643 
   1644 void HLoopOptimization::GenerateVecReductionPhi(HPhi* phi) {
   1645   DCHECK(reductions_->find(phi) != reductions_->end());
   1646   DCHECK(reductions_->Get(phi->InputAt(1)) == phi);
   1647   HInstruction* vector = nullptr;
   1648   if (vector_mode_ == kSequential) {
   1649     HPhi* new_phi = new (global_allocator_) HPhi(
   1650         global_allocator_, kNoRegNumber, 0, phi->GetType());
   1651     vector_header_->AddPhi(new_phi);
   1652     vector = new_phi;
   1653   } else {
   1654     // Link vector reduction back to prior unrolled update, or a first phi.
   1655     auto it = vector_permanent_map_->find(phi);
   1656     if (it != vector_permanent_map_->end()) {
   1657       vector = it->second;
   1658     } else {
   1659       HPhi* new_phi = new (global_allocator_) HPhi(
   1660           global_allocator_, kNoRegNumber, 0, HVecOperation::kSIMDType);
   1661       vector_header_->AddPhi(new_phi);
   1662       vector = new_phi;
   1663     }
   1664   }
   1665   vector_map_->Put(phi, vector);
   1666 }
   1667 
   1668 void HLoopOptimization::GenerateVecReductionPhiInputs(HPhi* phi, HInstruction* reduction) {
   1669   HInstruction* new_phi = vector_map_->Get(phi);
   1670   HInstruction* new_init = reductions_->Get(phi);
   1671   HInstruction* new_red = vector_map_->Get(reduction);
   1672   // Link unrolled vector loop back to new phi.
   1673   for (; !new_phi->IsPhi(); new_phi = vector_permanent_map_->Get(new_phi)) {
   1674     DCHECK(new_phi->IsVecOperation());
   1675   }
   1676   // Prepare the new initialization.
   1677   if (vector_mode_ == kVector) {
   1678     // Generate a [initial, 0, .., 0] vector for add or
   1679     // a [initial, initial, .., initial] vector for min/max.
   1680     HVecOperation* red_vector = new_red->AsVecOperation();
   1681     HVecReduce::ReductionKind kind = GetReductionKind(red_vector);
   1682     uint32_t vector_length = red_vector->GetVectorLength();
   1683     DataType::Type type = red_vector->GetPackedType();
   1684     if (kind == HVecReduce::ReductionKind::kSum) {
   1685       new_init = Insert(vector_preheader_,
   1686                         new (global_allocator_) HVecSetScalars(global_allocator_,
   1687                                                                &new_init,
   1688                                                                type,
   1689                                                                vector_length,
   1690                                                                1,
   1691                                                                kNoDexPc));
   1692     } else {
   1693       new_init = Insert(vector_preheader_,
   1694                         new (global_allocator_) HVecReplicateScalar(global_allocator_,
   1695                                                                     new_init,
   1696                                                                     type,
   1697                                                                     vector_length,
   1698                                                                     kNoDexPc));
   1699     }
   1700   } else {
   1701     new_init = ReduceAndExtractIfNeeded(new_init);
   1702   }
   1703   // Set the phi inputs.
   1704   DCHECK(new_phi->IsPhi());
   1705   new_phi->AsPhi()->AddInput(new_init);
   1706   new_phi->AsPhi()->AddInput(new_red);
   1707   // New feed value for next phi (safe mutation in iteration).
   1708   reductions_->find(phi)->second = new_phi;
   1709 }
   1710 
   1711 HInstruction* HLoopOptimization::ReduceAndExtractIfNeeded(HInstruction* instruction) {
   1712   if (instruction->IsPhi()) {
   1713     HInstruction* input = instruction->InputAt(1);
   1714     if (HVecOperation::ReturnsSIMDValue(input)) {
   1715       DCHECK(!input->IsPhi());
   1716       HVecOperation* input_vector = input->AsVecOperation();
   1717       uint32_t vector_length = input_vector->GetVectorLength();
   1718       DataType::Type type = input_vector->GetPackedType();
   1719       HVecReduce::ReductionKind kind = GetReductionKind(input_vector);
   1720       HBasicBlock* exit = instruction->GetBlock()->GetSuccessors()[0];
   1721       // Generate a vector reduction and scalar extract
   1722       //    x = REDUCE( [x_1, .., x_n] )
   1723       //    y = x_1
   1724       // along the exit of the defining loop.
   1725       HInstruction* reduce = new (global_allocator_) HVecReduce(
   1726           global_allocator_, instruction, type, vector_length, kind, kNoDexPc);
   1727       exit->InsertInstructionBefore(reduce, exit->GetFirstInstruction());
   1728       instruction = new (global_allocator_) HVecExtractScalar(
   1729           global_allocator_, reduce, type, vector_length, 0, kNoDexPc);
   1730       exit->InsertInstructionAfter(instruction, reduce);
   1731     }
   1732   }
   1733   return instruction;
   1734 }
   1735 
   1736 #define GENERATE_VEC(x, y) \
   1737   if (vector_mode_ == kVector) { \
   1738     vector = (x); \
   1739   } else { \
   1740     DCHECK(vector_mode_ == kSequential); \
   1741     vector = (y); \
   1742   } \
   1743   break;
   1744 
   1745 void HLoopOptimization::GenerateVecOp(HInstruction* org,
   1746                                       HInstruction* opa,
   1747                                       HInstruction* opb,
   1748                                       DataType::Type type,
   1749                                       bool is_unsigned) {
   1750   uint32_t dex_pc = org->GetDexPc();
   1751   HInstruction* vector = nullptr;
   1752   DataType::Type org_type = org->GetType();
   1753   switch (org->GetKind()) {
   1754     case HInstruction::kNeg:
   1755       DCHECK(opb == nullptr);
   1756       GENERATE_VEC(
   1757         new (global_allocator_) HVecNeg(global_allocator_, opa, type, vector_length_, dex_pc),
   1758         new (global_allocator_) HNeg(org_type, opa, dex_pc));
   1759     case HInstruction::kNot:
   1760       DCHECK(opb == nullptr);
   1761       GENERATE_VEC(
   1762         new (global_allocator_) HVecNot(global_allocator_, opa, type, vector_length_, dex_pc),
   1763         new (global_allocator_) HNot(org_type, opa, dex_pc));
   1764     case HInstruction::kBooleanNot:
   1765       DCHECK(opb == nullptr);
   1766       GENERATE_VEC(
   1767         new (global_allocator_) HVecNot(global_allocator_, opa, type, vector_length_, dex_pc),
   1768         new (global_allocator_) HBooleanNot(opa, dex_pc));
   1769     case HInstruction::kTypeConversion:
   1770       DCHECK(opb == nullptr);
   1771       GENERATE_VEC(
   1772         new (global_allocator_) HVecCnv(global_allocator_, opa, type, vector_length_, dex_pc),
   1773         new (global_allocator_) HTypeConversion(org_type, opa, dex_pc));
   1774     case HInstruction::kAdd:
   1775       GENERATE_VEC(
   1776         new (global_allocator_) HVecAdd(global_allocator_, opa, opb, type, vector_length_, dex_pc),
   1777         new (global_allocator_) HAdd(org_type, opa, opb, dex_pc));
   1778     case HInstruction::kSub:
   1779       GENERATE_VEC(
   1780         new (global_allocator_) HVecSub(global_allocator_, opa, opb, type, vector_length_, dex_pc),
   1781         new (global_allocator_) HSub(org_type, opa, opb, dex_pc));
   1782     case HInstruction::kMul:
   1783       GENERATE_VEC(
   1784         new (global_allocator_) HVecMul(global_allocator_, opa, opb, type, vector_length_, dex_pc),
   1785         new (global_allocator_) HMul(org_type, opa, opb, dex_pc));
   1786     case HInstruction::kDiv:
   1787       GENERATE_VEC(
   1788         new (global_allocator_) HVecDiv(global_allocator_, opa, opb, type, vector_length_, dex_pc),
   1789         new (global_allocator_) HDiv(org_type, opa, opb, dex_pc));
   1790     case HInstruction::kAnd:
   1791       GENERATE_VEC(
   1792         new (global_allocator_) HVecAnd(global_allocator_, opa, opb, type, vector_length_, dex_pc),
   1793         new (global_allocator_) HAnd(org_type, opa, opb, dex_pc));
   1794     case HInstruction::kOr:
   1795       GENERATE_VEC(
   1796         new (global_allocator_) HVecOr(global_allocator_, opa, opb, type, vector_length_, dex_pc),
   1797         new (global_allocator_) HOr(org_type, opa, opb, dex_pc));
   1798     case HInstruction::kXor:
   1799       GENERATE_VEC(
   1800         new (global_allocator_) HVecXor(global_allocator_, opa, opb, type, vector_length_, dex_pc),
   1801         new (global_allocator_) HXor(org_type, opa, opb, dex_pc));
   1802     case HInstruction::kShl:
   1803       GENERATE_VEC(
   1804         new (global_allocator_) HVecShl(global_allocator_, opa, opb, type, vector_length_, dex_pc),
   1805         new (global_allocator_) HShl(org_type, opa, opb, dex_pc));
   1806     case HInstruction::kShr:
   1807       GENERATE_VEC(
   1808         new (global_allocator_) HVecShr(global_allocator_, opa, opb, type, vector_length_, dex_pc),
   1809         new (global_allocator_) HShr(org_type, opa, opb, dex_pc));
   1810     case HInstruction::kUShr:
   1811       GENERATE_VEC(
   1812         new (global_allocator_) HVecUShr(global_allocator_, opa, opb, type, vector_length_, dex_pc),
   1813         new (global_allocator_) HUShr(org_type, opa, opb, dex_pc));
   1814     case HInstruction::kInvokeStaticOrDirect: {
   1815       HInvokeStaticOrDirect* invoke = org->AsInvokeStaticOrDirect();
   1816       if (vector_mode_ == kVector) {
   1817         switch (invoke->GetIntrinsic()) {
   1818           case Intrinsics::kMathAbsInt:
   1819           case Intrinsics::kMathAbsLong:
   1820           case Intrinsics::kMathAbsFloat:
   1821           case Intrinsics::kMathAbsDouble:
   1822             DCHECK(opb == nullptr);
   1823             vector = new (global_allocator_)
   1824                 HVecAbs(global_allocator_, opa, type, vector_length_, dex_pc);
   1825             break;
   1826           case Intrinsics::kMathMinIntInt:
   1827           case Intrinsics::kMathMinLongLong:
   1828           case Intrinsics::kMathMinFloatFloat:
   1829           case Intrinsics::kMathMinDoubleDouble: {
   1830             vector = new (global_allocator_)
   1831                 HVecMin(global_allocator_,
   1832                         opa,
   1833                         opb,
   1834                         HVecOperation::ToProperType(type, is_unsigned),
   1835                         vector_length_,
   1836                         dex_pc);
   1837             break;
   1838           }
   1839           case Intrinsics::kMathMaxIntInt:
   1840           case Intrinsics::kMathMaxLongLong:
   1841           case Intrinsics::kMathMaxFloatFloat:
   1842           case Intrinsics::kMathMaxDoubleDouble: {
   1843             vector = new (global_allocator_)
   1844                 HVecMax(global_allocator_,
   1845                         opa,
   1846                         opb,
   1847                         HVecOperation::ToProperType(type, is_unsigned),
   1848                         vector_length_,
   1849                         dex_pc);
   1850             break;
   1851           }
   1852           default:
   1853             LOG(FATAL) << "Unsupported SIMD intrinsic " << org->GetId();
   1854             UNREACHABLE();
   1855         }  // switch invoke
   1856       } else {
   1857         // In scalar code, simply clone the method invoke, and replace its operands with the
   1858         // corresponding new scalar instructions in the loop. The instruction will get an
   1859         // environment while being inserted from the instruction map in original program order.
   1860         DCHECK(vector_mode_ == kSequential);
   1861         size_t num_args = invoke->GetNumberOfArguments();
   1862         HInvokeStaticOrDirect* new_invoke = new (global_allocator_) HInvokeStaticOrDirect(
   1863             global_allocator_,
   1864             num_args,
   1865             invoke->GetType(),
   1866             invoke->GetDexPc(),
   1867             invoke->GetDexMethodIndex(),
   1868             invoke->GetResolvedMethod(),
   1869             invoke->GetDispatchInfo(),
   1870             invoke->GetInvokeType(),
   1871             invoke->GetTargetMethod(),
   1872             invoke->GetClinitCheckRequirement());
   1873         HInputsRef inputs = invoke->GetInputs();
   1874         size_t num_inputs = inputs.size();
   1875         DCHECK_LE(num_args, num_inputs);
   1876         DCHECK_EQ(num_inputs, new_invoke->GetInputs().size());  // both invokes agree
   1877         for (size_t index = 0; index < num_inputs; ++index) {
   1878           HInstruction* new_input = index < num_args
   1879               ? vector_map_->Get(inputs[index])
   1880               : inputs[index];  // beyond arguments: just pass through
   1881           new_invoke->SetArgumentAt(index, new_input);
   1882         }
   1883         new_invoke->SetIntrinsic(invoke->GetIntrinsic(),
   1884                                  kNeedsEnvironmentOrCache,
   1885                                  kNoSideEffects,
   1886                                  kNoThrow);
   1887         vector = new_invoke;
   1888       }
   1889       break;
   1890     }
   1891     default:
   1892       break;
   1893   }  // switch
   1894   CHECK(vector != nullptr) << "Unsupported SIMD operator";
   1895   vector_map_->Put(org, vector);
   1896 }
   1897 
   1898 #undef GENERATE_VEC
   1899 
   1900 //
   1901 // Vectorization idioms.
   1902 //
   1903 
   1904 // Method recognizes the following idioms:
   1905 //   rounding  halving add (a + b + 1) >> 1 for unsigned/signed operands a, b
   1906 //   truncated halving add (a + b)     >> 1 for unsigned/signed operands a, b
   1907 // Provided that the operands are promoted to a wider form to do the arithmetic and
   1908 // then cast back to narrower form, the idioms can be mapped into efficient SIMD
   1909 // implementation that operates directly in narrower form (plus one extra bit).
   1910 // TODO: current version recognizes implicit byte/short/char widening only;
   1911 //       explicit widening from int to long could be added later.
   1912 bool HLoopOptimization::VectorizeHalvingAddIdiom(LoopNode* node,
   1913                                                  HInstruction* instruction,
   1914                                                  bool generate_code,
   1915                                                  DataType::Type type,
   1916                                                  uint64_t restrictions) {
   1917   // Test for top level arithmetic shift right x >> 1 or logical shift right x >>> 1
   1918   // (note whether the sign bit in wider precision is shifted in has no effect
   1919   // on the narrow precision computed by the idiom).
   1920   if ((instruction->IsShr() ||
   1921        instruction->IsUShr()) &&
   1922       IsInt64Value(instruction->InputAt(1), 1)) {
   1923     // Test for (a + b + c) >> 1 for optional constant c.
   1924     HInstruction* a = nullptr;
   1925     HInstruction* b = nullptr;
   1926     int64_t       c = 0;
   1927     if (IsAddConst(instruction->InputAt(0), /*out*/ &a, /*out*/ &b, /*out*/ &c)) {
   1928       DCHECK(a != nullptr && b != nullptr);
   1929       // Accept c == 1 (rounded) or c == 0 (not rounded).
   1930       bool is_rounded = false;
   1931       if (c == 1) {
   1932         is_rounded = true;
   1933       } else if (c != 0) {
   1934         return false;
   1935       }
   1936       // Accept consistent zero or sign extension on operands a and b.
   1937       HInstruction* r = nullptr;
   1938       HInstruction* s = nullptr;
   1939       bool is_unsigned = false;
   1940       if (!IsNarrowerOperands(a, b, type, &r, &s, &is_unsigned)) {
   1941         return false;
   1942       }
   1943       // Deal with vector restrictions.
   1944       if ((!is_unsigned && HasVectorRestrictions(restrictions, kNoSignedHAdd)) ||
   1945           (!is_rounded && HasVectorRestrictions(restrictions, kNoUnroundedHAdd))) {
   1946         return false;
   1947       }
   1948       // Accept recognized halving add for vectorizable operands. Vectorized code uses the
   1949       // shorthand idiomatic operation. Sequential code uses the original scalar expressions.
   1950       DCHECK(r != nullptr);
   1951       DCHECK(s != nullptr);
   1952       if (generate_code && vector_mode_ != kVector) {  // de-idiom
   1953         r = instruction->InputAt(0);
   1954         s = instruction->InputAt(1);
   1955       }
   1956       if (VectorizeUse(node, r, generate_code, type, restrictions) &&
   1957           VectorizeUse(node, s, generate_code, type, restrictions)) {
   1958         if (generate_code) {
   1959           if (vector_mode_ == kVector) {
   1960             vector_map_->Put(instruction, new (global_allocator_) HVecHalvingAdd(
   1961                 global_allocator_,
   1962                 vector_map_->Get(r),
   1963                 vector_map_->Get(s),
   1964                 HVecOperation::ToProperType(type, is_unsigned),
   1965                 vector_length_,
   1966                 is_rounded,
   1967                 kNoDexPc));
   1968             MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorizedIdiom);
   1969           } else {
   1970             GenerateVecOp(instruction, vector_map_->Get(r), vector_map_->Get(s), type);
   1971           }
   1972         }
   1973         return true;
   1974       }
   1975     }
   1976   }
   1977   return false;
   1978 }
   1979 
   1980 // Method recognizes the following idiom:
   1981 //   q += ABS(a - b) for signed operands a, b
   1982 // Provided that the operands have the same type or are promoted to a wider form.
   1983 // Since this may involve a vector length change, the idiom is handled by going directly
   1984 // to a sad-accumulate node (rather than relying combining finer grained nodes later).
   1985 // TODO: unsigned SAD too?
   1986 bool HLoopOptimization::VectorizeSADIdiom(LoopNode* node,
   1987                                           HInstruction* instruction,
   1988                                           bool generate_code,
   1989                                           DataType::Type reduction_type,
   1990                                           uint64_t restrictions) {
   1991   // Filter integral "q += ABS(a - b);" reduction, where ABS and SUB
   1992   // are done in the same precision (either int or long).
   1993   if (!instruction->IsAdd() ||
   1994       (reduction_type != DataType::Type::kInt32 && reduction_type != DataType::Type::kInt64)) {
   1995     return false;
   1996   }
   1997   HInstruction* q = instruction->InputAt(0);
   1998   HInstruction* v = instruction->InputAt(1);
   1999   HInstruction* a = nullptr;
   2000   HInstruction* b = nullptr;
   2001   if (v->IsInvokeStaticOrDirect() &&
   2002        (v->AsInvokeStaticOrDirect()->GetIntrinsic() == Intrinsics::kMathAbsInt ||
   2003         v->AsInvokeStaticOrDirect()->GetIntrinsic() == Intrinsics::kMathAbsLong)) {
   2004     HInstruction* x = v->InputAt(0);
   2005     if (x->GetType() == reduction_type) {
   2006       int64_t c = 0;
   2007       if (x->IsSub()) {
   2008         a = x->InputAt(0);
   2009         b = x->InputAt(1);
   2010       } else if (IsAddConst(x, /*out*/ &a, /*out*/ &c)) {
   2011         b = graph_->GetConstant(reduction_type, -c);  // hidden SUB!
   2012       }
   2013     }
   2014   }
   2015   if (a == nullptr || b == nullptr) {
   2016     return false;
   2017   }
   2018   // Accept same-type or consistent sign extension for narrower-type on operands a and b.
   2019   // The same-type or narrower operands are called r (a or lower) and s (b or lower).
   2020   // We inspect the operands carefully to pick the most suited type.
   2021   HInstruction* r = a;
   2022   HInstruction* s = b;
   2023   bool is_unsigned = false;
   2024   DataType::Type sub_type = a->GetType();
   2025   if (DataType::Size(b->GetType()) < DataType::Size(sub_type)) {
   2026     sub_type = b->GetType();
   2027   }
   2028   if (a->IsTypeConversion() &&
   2029       DataType::Size(a->InputAt(0)->GetType()) < DataType::Size(sub_type)) {
   2030     sub_type = a->InputAt(0)->GetType();
   2031   }
   2032   if (b->IsTypeConversion() &&
   2033       DataType::Size(b->InputAt(0)->GetType()) < DataType::Size(sub_type)) {
   2034     sub_type = b->InputAt(0)->GetType();
   2035   }
   2036   if (reduction_type != sub_type &&
   2037       (!IsNarrowerOperands(a, b, sub_type, &r, &s, &is_unsigned) || is_unsigned)) {
   2038     return false;
   2039   }
   2040   // Try same/narrower type and deal with vector restrictions.
   2041   if (!TrySetVectorType(sub_type, &restrictions) ||
   2042       HasVectorRestrictions(restrictions, kNoSAD) ||
   2043       (reduction_type != sub_type && HasVectorRestrictions(restrictions, kNoWideSAD))) {
   2044     return false;
   2045   }
   2046   // Accept SAD idiom for vectorizable operands. Vectorized code uses the shorthand
   2047   // idiomatic operation. Sequential code uses the original scalar expressions.
   2048   DCHECK(r != nullptr);
   2049   DCHECK(s != nullptr);
   2050   if (generate_code && vector_mode_ != kVector) {  // de-idiom
   2051     r = s = v->InputAt(0);
   2052   }
   2053   if (VectorizeUse(node, q, generate_code, sub_type, restrictions) &&
   2054       VectorizeUse(node, r, generate_code, sub_type, restrictions) &&
   2055       VectorizeUse(node, s, generate_code, sub_type, restrictions)) {
   2056     if (generate_code) {
   2057       reduction_type = HVecOperation::ToProperType(reduction_type, is_unsigned);
   2058       if (vector_mode_ == kVector) {
   2059         vector_map_->Put(instruction, new (global_allocator_) HVecSADAccumulate(
   2060             global_allocator_,
   2061             vector_map_->Get(q),
   2062             vector_map_->Get(r),
   2063             vector_map_->Get(s),
   2064             reduction_type,
   2065             GetOtherVL(reduction_type, sub_type, vector_length_),
   2066             kNoDexPc));
   2067         MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorizedIdiom);
   2068       } else {
   2069         GenerateVecOp(v, vector_map_->Get(r), nullptr, reduction_type);
   2070         GenerateVecOp(instruction, vector_map_->Get(q), vector_map_->Get(v), reduction_type);
   2071       }
   2072     }
   2073     return true;
   2074   }
   2075   return false;
   2076 }
   2077 
   2078 //
   2079 // Vectorization heuristics.
   2080 //
   2081 
   2082 Alignment HLoopOptimization::ComputeAlignment(HInstruction* offset,
   2083                                               DataType::Type type,
   2084                                               bool is_string_char_at,
   2085                                               uint32_t peeling) {
   2086   // Combine the alignment and hidden offset that is guaranteed by
   2087   // the Android runtime with a known starting index adjusted as bytes.
   2088   int64_t value = 0;
   2089   if (IsInt64AndGet(offset, /*out*/ &value)) {
   2090     uint32_t start_offset =
   2091         HiddenOffset(type, is_string_char_at) + (value + peeling) * DataType::Size(type);
   2092     return Alignment(BaseAlignment(), start_offset & (BaseAlignment() - 1u));
   2093   }
   2094   // Otherwise, the Android runtime guarantees at least natural alignment.
   2095   return Alignment(DataType::Size(type), 0);
   2096 }
   2097 
   2098 void HLoopOptimization::SetAlignmentStrategy(uint32_t peeling_votes[],
   2099                                              const ArrayReference* peeling_candidate) {
   2100   // Current heuristic: pick the best static loop peeling factor, if any,
   2101   // or otherwise use dynamic loop peeling on suggested peeling candidate.
   2102   uint32_t max_vote = 0;
   2103   for (int32_t i = 0; i < 16; i++) {
   2104     if (peeling_votes[i] > max_vote) {
   2105       max_vote = peeling_votes[i];
   2106       vector_static_peeling_factor_ = i;
   2107     }
   2108   }
   2109   if (max_vote == 0) {
   2110     vector_dynamic_peeling_candidate_ = peeling_candidate;
   2111   }
   2112 }
   2113 
   2114 uint32_t HLoopOptimization::MaxNumberPeeled() {
   2115   if (vector_dynamic_peeling_candidate_ != nullptr) {
   2116     return vector_length_ - 1u;  // worst-case
   2117   }
   2118   return vector_static_peeling_factor_;  // known exactly
   2119 }
   2120 
   2121 bool HLoopOptimization::IsVectorizationProfitable(int64_t trip_count) {
   2122   // Current heuristic: non-empty body with sufficient number of iterations (if known).
   2123   // TODO: refine by looking at e.g. operation count, alignment, etc.
   2124   // TODO: trip count is really unsigned entity, provided the guarding test
   2125   //       is satisfied; deal with this more carefully later
   2126   uint32_t max_peel = MaxNumberPeeled();
   2127   if (vector_length_ == 0) {
   2128     return false;  // nothing found
   2129   } else if (trip_count < 0) {
   2130     return false;  // guard against non-taken/large
   2131   } else if ((0 < trip_count) && (trip_count < (vector_length_ + max_peel))) {
   2132     return false;  // insufficient iterations
   2133   }
   2134   return true;
   2135 }
   2136 
   2137 static constexpr uint32_t ARM64_SIMD_MAXIMUM_UNROLL_FACTOR = 8;
   2138 static constexpr uint32_t ARM64_SIMD_HEURISTIC_MAX_BODY_SIZE = 50;
   2139 
   2140 uint32_t HLoopOptimization::GetUnrollingFactor(HBasicBlock* block, int64_t trip_count) {
   2141   uint32_t max_peel = MaxNumberPeeled();
   2142   switch (compiler_driver_->GetInstructionSet()) {
   2143     case InstructionSet::kArm64: {
   2144       // Don't unroll with insufficient iterations.
   2145       // TODO: Unroll loops with unknown trip count.
   2146       DCHECK_NE(vector_length_, 0u);
   2147       if (trip_count < (2 * vector_length_ + max_peel)) {
   2148         return kNoUnrollingFactor;
   2149       }
   2150       // Don't unroll for large loop body size.
   2151       uint32_t instruction_count = block->GetInstructions().CountSize();
   2152       if (instruction_count >= ARM64_SIMD_HEURISTIC_MAX_BODY_SIZE) {
   2153         return kNoUnrollingFactor;
   2154       }
   2155       // Find a beneficial unroll factor with the following restrictions:
   2156       //  - At least one iteration of the transformed loop should be executed.
   2157       //  - The loop body shouldn't be "too big" (heuristic).
   2158       uint32_t uf1 = ARM64_SIMD_HEURISTIC_MAX_BODY_SIZE / instruction_count;
   2159       uint32_t uf2 = (trip_count - max_peel) / vector_length_;
   2160       uint32_t unroll_factor =
   2161           TruncToPowerOfTwo(std::min({uf1, uf2, ARM64_SIMD_MAXIMUM_UNROLL_FACTOR}));
   2162       DCHECK_GE(unroll_factor, 1u);
   2163       return unroll_factor;
   2164     }
   2165     case InstructionSet::kX86:
   2166     case InstructionSet::kX86_64:
   2167     default:
   2168       return kNoUnrollingFactor;
   2169   }
   2170 }
   2171 
   2172 //
   2173 // Helpers.
   2174 //
   2175 
   2176 bool HLoopOptimization::TrySetPhiInduction(HPhi* phi, bool restrict_uses) {
   2177   // Start with empty phi induction.
   2178   iset_->clear();
   2179 
   2180   // Special case Phis that have equivalent in a debuggable setup. Our graph checker isn't
   2181   // smart enough to follow strongly connected components (and it's probably not worth
   2182   // it to make it so). See b/33775412.
   2183   if (graph_->IsDebuggable() && phi->HasEquivalentPhi()) {
   2184     return false;
   2185   }
   2186 
   2187   // Lookup phi induction cycle.
   2188   ArenaSet<HInstruction*>* set = induction_range_.LookupCycle(phi);
   2189   if (set != nullptr) {
   2190     for (HInstruction* i : *set) {
   2191       // Check that, other than instructions that are no longer in the graph (removed earlier)
   2192       // each instruction is removable and, when restrict uses are requested, other than for phi,
   2193       // all uses are contained within the cycle.
   2194       if (!i->IsInBlock()) {
   2195         continue;
   2196       } else if (!i->IsRemovable()) {
   2197         return false;
   2198       } else if (i != phi && restrict_uses) {
   2199         // Deal with regular uses.
   2200         for (const HUseListNode<HInstruction*>& use : i->GetUses()) {
   2201           if (set->find(use.GetUser()) == set->end()) {
   2202             return false;
   2203           }
   2204         }
   2205       }
   2206       iset_->insert(i);  // copy
   2207     }
   2208     return true;
   2209   }
   2210   return false;
   2211 }
   2212 
   2213 bool HLoopOptimization::TrySetPhiReduction(HPhi* phi) {
   2214   DCHECK(iset_->empty());
   2215   // Only unclassified phi cycles are candidates for reductions.
   2216   if (induction_range_.IsClassified(phi)) {
   2217     return false;
   2218   }
   2219   // Accept operations like x = x + .., provided that the phi and the reduction are
   2220   // used exactly once inside the loop, and by each other.
   2221   HInputsRef inputs = phi->GetInputs();
   2222   if (inputs.size() == 2) {
   2223     HInstruction* reduction = inputs[1];
   2224     if (HasReductionFormat(reduction, phi)) {
   2225       HLoopInformation* loop_info = phi->GetBlock()->GetLoopInformation();
   2226       uint32_t use_count = 0;
   2227       bool single_use_inside_loop =
   2228           // Reduction update only used by phi.
   2229           reduction->GetUses().HasExactlyOneElement() &&
   2230           !reduction->HasEnvironmentUses() &&
   2231           // Reduction update is only use of phi inside the loop.
   2232           IsOnlyUsedAfterLoop(loop_info, phi, /*collect_loop_uses*/ true, &use_count) &&
   2233           iset_->size() == 1;
   2234       iset_->clear();  // leave the way you found it
   2235       if (single_use_inside_loop) {
   2236         // Link reduction back, and start recording feed value.
   2237         reductions_->Put(reduction, phi);
   2238         reductions_->Put(phi, phi->InputAt(0));
   2239         return true;
   2240       }
   2241     }
   2242   }
   2243   return false;
   2244 }
   2245 
   2246 bool HLoopOptimization::TrySetSimpleLoopHeader(HBasicBlock* block, /*out*/ HPhi** main_phi) {
   2247   // Start with empty phi induction and reductions.
   2248   iset_->clear();
   2249   reductions_->clear();
   2250 
   2251   // Scan the phis to find the following (the induction structure has already
   2252   // been optimized, so we don't need to worry about trivial cases):
   2253   // (1) optional reductions in loop,
   2254   // (2) the main induction, used in loop control.
   2255   HPhi* phi = nullptr;
   2256   for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
   2257     if (TrySetPhiReduction(it.Current()->AsPhi())) {
   2258       continue;
   2259     } else if (phi == nullptr) {
   2260       // Found the first candidate for main induction.
   2261       phi = it.Current()->AsPhi();
   2262     } else {
   2263       return false;
   2264     }
   2265   }
   2266 
   2267   // Then test for a typical loopheader:
   2268   //   s:  SuspendCheck
   2269   //   c:  Condition(phi, bound)
   2270   //   i:  If(c)
   2271   if (phi != nullptr && TrySetPhiInduction(phi, /*restrict_uses*/ false)) {
   2272     HInstruction* s = block->GetFirstInstruction();
   2273     if (s != nullptr && s->IsSuspendCheck()) {
   2274       HInstruction* c = s->GetNext();
   2275       if (c != nullptr &&
   2276           c->IsCondition() &&
   2277           c->GetUses().HasExactlyOneElement() &&  // only used for termination
   2278           !c->HasEnvironmentUses()) {  // unlikely, but not impossible
   2279         HInstruction* i = c->GetNext();
   2280         if (i != nullptr && i->IsIf() && i->InputAt(0) == c) {
   2281           iset_->insert(c);
   2282           iset_->insert(s);
   2283           *main_phi = phi;
   2284           return true;
   2285         }
   2286       }
   2287     }
   2288   }
   2289   return false;
   2290 }
   2291 
   2292 bool HLoopOptimization::IsEmptyBody(HBasicBlock* block) {
   2293   if (!block->GetPhis().IsEmpty()) {
   2294     return false;
   2295   }
   2296   for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
   2297     HInstruction* instruction = it.Current();
   2298     if (!instruction->IsGoto() && iset_->find(instruction) == iset_->end()) {
   2299       return false;
   2300     }
   2301   }
   2302   return true;
   2303 }
   2304 
   2305 bool HLoopOptimization::IsUsedOutsideLoop(HLoopInformation* loop_info,
   2306                                           HInstruction* instruction) {
   2307   // Deal with regular uses.
   2308   for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
   2309     if (use.GetUser()->GetBlock()->GetLoopInformation() != loop_info) {
   2310       return true;
   2311     }
   2312   }
   2313   return false;
   2314 }
   2315 
   2316 bool HLoopOptimization::IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
   2317                                             HInstruction* instruction,
   2318                                             bool collect_loop_uses,
   2319                                             /*out*/ uint32_t* use_count) {
   2320   // Deal with regular uses.
   2321   for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
   2322     HInstruction* user = use.GetUser();
   2323     if (iset_->find(user) == iset_->end()) {  // not excluded?
   2324       HLoopInformation* other_loop_info = user->GetBlock()->GetLoopInformation();
   2325       if (other_loop_info != nullptr && other_loop_info->IsIn(*loop_info)) {
   2326         // If collect_loop_uses is set, simply keep adding those uses to the set.
   2327         // Otherwise, reject uses inside the loop that were not already in the set.
   2328         if (collect_loop_uses) {
   2329           iset_->insert(user);
   2330           continue;
   2331         }
   2332         return false;
   2333       }
   2334       ++*use_count;
   2335     }
   2336   }
   2337   return true;
   2338 }
   2339 
   2340 bool HLoopOptimization::TryReplaceWithLastValue(HLoopInformation* loop_info,
   2341                                                 HInstruction* instruction,
   2342                                                 HBasicBlock* block) {
   2343   // Try to replace outside uses with the last value.
   2344   if (induction_range_.CanGenerateLastValue(instruction)) {
   2345     HInstruction* replacement = induction_range_.GenerateLastValue(instruction, graph_, block);
   2346     // Deal with regular uses.
   2347     const HUseList<HInstruction*>& uses = instruction->GetUses();
   2348     for (auto it = uses.begin(), end = uses.end(); it != end;) {
   2349       HInstruction* user = it->GetUser();
   2350       size_t index = it->GetIndex();
   2351       ++it;  // increment before replacing
   2352       if (iset_->find(user) == iset_->end()) {  // not excluded?
   2353         if (kIsDebugBuild) {
   2354           // We have checked earlier in 'IsOnlyUsedAfterLoop' that the use is after the loop.
   2355           HLoopInformation* other_loop_info = user->GetBlock()->GetLoopInformation();
   2356           CHECK(other_loop_info == nullptr || !other_loop_info->IsIn(*loop_info));
   2357         }
   2358         user->ReplaceInput(replacement, index);
   2359         induction_range_.Replace(user, instruction, replacement);  // update induction
   2360       }
   2361     }
   2362     // Deal with environment uses.
   2363     const HUseList<HEnvironment*>& env_uses = instruction->GetEnvUses();
   2364     for (auto it = env_uses.begin(), end = env_uses.end(); it != end;) {
   2365       HEnvironment* user = it->GetUser();
   2366       size_t index = it->GetIndex();
   2367       ++it;  // increment before replacing
   2368       if (iset_->find(user->GetHolder()) == iset_->end()) {  // not excluded?
   2369         // Only update environment uses after the loop.
   2370         HLoopInformation* other_loop_info = user->GetHolder()->GetBlock()->GetLoopInformation();
   2371         if (other_loop_info == nullptr || !other_loop_info->IsIn(*loop_info)) {
   2372           user->RemoveAsUserOfInput(index);
   2373           user->SetRawEnvAt(index, replacement);
   2374           replacement->AddEnvUseAt(user, index);
   2375         }
   2376       }
   2377     }
   2378     return true;
   2379   }
   2380   return false;
   2381 }
   2382 
   2383 bool HLoopOptimization::TryAssignLastValue(HLoopInformation* loop_info,
   2384                                            HInstruction* instruction,
   2385                                            HBasicBlock* block,
   2386                                            bool collect_loop_uses) {
   2387   // Assigning the last value is always successful if there are no uses.
   2388   // Otherwise, it succeeds in a no early-exit loop by generating the
   2389   // proper last value assignment.
   2390   uint32_t use_count = 0;
   2391   return IsOnlyUsedAfterLoop(loop_info, instruction, collect_loop_uses, &use_count) &&
   2392       (use_count == 0 ||
   2393        (!IsEarlyExit(loop_info) && TryReplaceWithLastValue(loop_info, instruction, block)));
   2394 }
   2395 
   2396 void HLoopOptimization::RemoveDeadInstructions(const HInstructionList& list) {
   2397   for (HBackwardInstructionIterator i(list); !i.Done(); i.Advance()) {
   2398     HInstruction* instruction = i.Current();
   2399     if (instruction->IsDeadAndRemovable()) {
   2400       simplified_ = true;
   2401       instruction->GetBlock()->RemoveInstructionOrPhi(instruction);
   2402     }
   2403   }
   2404 }
   2405 
   2406 bool HLoopOptimization::CanRemoveCycle() {
   2407   for (HInstruction* i : *iset_) {
   2408     // We can never remove instructions that have environment
   2409     // uses when we compile 'debuggable'.
   2410     if (i->HasEnvironmentUses() && graph_->IsDebuggable()) {
   2411       return false;
   2412     }
   2413     // A deoptimization should never have an environment input removed.
   2414     for (const HUseListNode<HEnvironment*>& use : i->GetEnvUses()) {
   2415       if (use.GetUser()->GetHolder()->IsDeoptimize()) {
   2416         return false;
   2417       }
   2418     }
   2419   }
   2420   return true;
   2421 }
   2422 
   2423 }  // namespace art
   2424