Home | History | Annotate | Download | only in optimizing
      1 /*
      2  * Copyright (C) 2018 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "loop_analysis.h"
     18 
     19 #include "base/bit_vector-inl.h"
     20 #include "induction_var_range.h"
     21 
     22 namespace art {
     23 
     24 void LoopAnalysis::CalculateLoopBasicProperties(HLoopInformation* loop_info,
     25                                                 LoopAnalysisInfo* analysis_results,
     26                                                 int64_t trip_count) {
     27   analysis_results->trip_count_ = trip_count;
     28 
     29   for (HBlocksInLoopIterator block_it(*loop_info);
     30        !block_it.Done();
     31        block_it.Advance()) {
     32     HBasicBlock* block = block_it.Current();
     33 
     34     // Check whether one of the successor is loop exit.
     35     for (HBasicBlock* successor : block->GetSuccessors()) {
     36       if (!loop_info->Contains(*successor)) {
     37         analysis_results->exits_num_++;
     38 
     39         // We track number of invariant loop exits which correspond to HIf instruction and
     40         // can be eliminated by loop peeling; other control flow instruction are ignored and will
     41         // not cause loop peeling to happen as they either cannot be inside a loop, or by
     42         // definition cannot be loop exits (unconditional instructions), or are not beneficial for
     43         // the optimization.
     44         HIf* hif = block->GetLastInstruction()->AsIf();
     45         if (hif != nullptr && !loop_info->Contains(*hif->InputAt(0)->GetBlock())) {
     46           analysis_results->invariant_exits_num_++;
     47         }
     48       }
     49     }
     50 
     51     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
     52       HInstruction* instruction = it.Current();
     53       if (it.Current()->GetType() == DataType::Type::kInt64) {
     54         analysis_results->has_long_type_instructions_ = true;
     55       }
     56       if (MakesScalarPeelingUnrollingNonBeneficial(instruction)) {
     57         analysis_results->has_instructions_preventing_scalar_peeling_ = true;
     58         analysis_results->has_instructions_preventing_scalar_unrolling_ = true;
     59       }
     60       analysis_results->instr_num_++;
     61     }
     62     analysis_results->bb_num_++;
     63   }
     64 }
     65 
     66 int64_t LoopAnalysis::GetLoopTripCount(HLoopInformation* loop_info,
     67                                        const InductionVarRange* induction_range) {
     68   int64_t trip_count;
     69   if (!induction_range->HasKnownTripCount(loop_info, &trip_count)) {
     70     trip_count = LoopAnalysisInfo::kUnknownTripCount;
     71   }
     72   return trip_count;
     73 }
     74 
     75 // Default implementation of loop helper; used for all targets unless a custom implementation
     76 // is provided. Enables scalar loop peeling and unrolling with the most conservative heuristics.
     77 class ArchDefaultLoopHelper : public ArchNoOptsLoopHelper {
     78  public:
     79   // Scalar loop unrolling parameters and heuristics.
     80   //
     81   // Maximum possible unrolling factor.
     82   static constexpr uint32_t kScalarMaxUnrollFactor = 2;
     83   // Loop's maximum instruction count. Loops with higher count will not be peeled/unrolled.
     84   static constexpr uint32_t kScalarHeuristicMaxBodySizeInstr = 17;
     85   // Loop's maximum basic block count. Loops with higher count will not be peeled/unrolled.
     86   static constexpr uint32_t kScalarHeuristicMaxBodySizeBlocks = 6;
     87   // Maximum number of instructions to be created as a result of full unrolling.
     88   static constexpr uint32_t kScalarHeuristicFullyUnrolledMaxInstrThreshold = 35;
     89 
     90   bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* analysis_info) const override {
     91     return analysis_info->HasLongTypeInstructions() ||
     92            IsLoopTooBig(analysis_info,
     93                         kScalarHeuristicMaxBodySizeInstr,
     94                         kScalarHeuristicMaxBodySizeBlocks);
     95   }
     96 
     97   uint32_t GetScalarUnrollingFactor(const LoopAnalysisInfo* analysis_info) const override {
     98     int64_t trip_count = analysis_info->GetTripCount();
     99     // Unroll only loops with known trip count.
    100     if (trip_count == LoopAnalysisInfo::kUnknownTripCount) {
    101       return LoopAnalysisInfo::kNoUnrollingFactor;
    102     }
    103     uint32_t desired_unrolling_factor = kScalarMaxUnrollFactor;
    104     if (trip_count < desired_unrolling_factor || trip_count % desired_unrolling_factor != 0) {
    105       return LoopAnalysisInfo::kNoUnrollingFactor;
    106     }
    107 
    108     return desired_unrolling_factor;
    109   }
    110 
    111   bool IsLoopPeelingEnabled() const override { return true; }
    112 
    113   bool IsFullUnrollingBeneficial(LoopAnalysisInfo* analysis_info) const override {
    114     int64_t trip_count = analysis_info->GetTripCount();
    115     // We assume that trip count is known.
    116     DCHECK_NE(trip_count, LoopAnalysisInfo::kUnknownTripCount);
    117     size_t instr_num = analysis_info->GetNumberOfInstructions();
    118     return (trip_count * instr_num < kScalarHeuristicFullyUnrolledMaxInstrThreshold);
    119   }
    120 
    121  protected:
    122   bool IsLoopTooBig(LoopAnalysisInfo* loop_analysis_info,
    123                     size_t instr_threshold,
    124                     size_t bb_threshold) const {
    125     size_t instr_num = loop_analysis_info->GetNumberOfInstructions();
    126     size_t bb_num = loop_analysis_info->GetNumberOfBasicBlocks();
    127     return (instr_num >= instr_threshold || bb_num >= bb_threshold);
    128   }
    129 };
    130 
    131 // Custom implementation of loop helper for arm64 target. Enables heuristics for scalar loop
    132 // peeling and unrolling and supports SIMD loop unrolling.
    133 class Arm64LoopHelper : public ArchDefaultLoopHelper {
    134  public:
    135   // SIMD loop unrolling parameters and heuristics.
    136   //
    137   // Maximum possible unrolling factor.
    138   static constexpr uint32_t kArm64SimdMaxUnrollFactor = 8;
    139   // Loop's maximum instruction count. Loops with higher count will not be unrolled.
    140   static constexpr uint32_t kArm64SimdHeuristicMaxBodySizeInstr = 50;
    141 
    142   // Loop's maximum instruction count. Loops with higher count will not be peeled/unrolled.
    143   static constexpr uint32_t kArm64ScalarHeuristicMaxBodySizeInstr = 40;
    144   // Loop's maximum basic block count. Loops with higher count will not be peeled/unrolled.
    145   static constexpr uint32_t kArm64ScalarHeuristicMaxBodySizeBlocks = 8;
    146 
    147   bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* loop_analysis_info) const override {
    148     return IsLoopTooBig(loop_analysis_info,
    149                         kArm64ScalarHeuristicMaxBodySizeInstr,
    150                         kArm64ScalarHeuristicMaxBodySizeBlocks);
    151   }
    152 
    153   uint32_t GetSIMDUnrollingFactor(HBasicBlock* block,
    154                                   int64_t trip_count,
    155                                   uint32_t max_peel,
    156                                   uint32_t vector_length) const override {
    157     // Don't unroll with insufficient iterations.
    158     // TODO: Unroll loops with unknown trip count.
    159     DCHECK_NE(vector_length, 0u);
    160     if (trip_count < (2 * vector_length + max_peel)) {
    161       return LoopAnalysisInfo::kNoUnrollingFactor;
    162     }
    163     // Don't unroll for large loop body size.
    164     uint32_t instruction_count = block->GetInstructions().CountSize();
    165     if (instruction_count >= kArm64SimdHeuristicMaxBodySizeInstr) {
    166       return LoopAnalysisInfo::kNoUnrollingFactor;
    167     }
    168     // Find a beneficial unroll factor with the following restrictions:
    169     //  - At least one iteration of the transformed loop should be executed.
    170     //  - The loop body shouldn't be "too big" (heuristic).
    171 
    172     uint32_t uf1 = kArm64SimdHeuristicMaxBodySizeInstr / instruction_count;
    173     uint32_t uf2 = (trip_count - max_peel) / vector_length;
    174     uint32_t unroll_factor =
    175         TruncToPowerOfTwo(std::min({uf1, uf2, kArm64SimdMaxUnrollFactor}));
    176     DCHECK_GE(unroll_factor, 1u);
    177     return unroll_factor;
    178   }
    179 };
    180 
    181 ArchNoOptsLoopHelper* ArchNoOptsLoopHelper::Create(InstructionSet isa,
    182                                                    ArenaAllocator* allocator) {
    183   switch (isa) {
    184     case InstructionSet::kArm64: {
    185       return new (allocator) Arm64LoopHelper;
    186     }
    187     default: {
    188       return new (allocator) ArchDefaultLoopHelper;
    189     }
    190   }
    191 }
    192 
    193 }  // namespace art
    194