Home | History | Annotate | Download | only in optimizing
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include <memory>
     18 #include <vector>
     19 
     20 #include "arch/instruction_set.h"
     21 #include "cfi_test.h"
     22 #include "driver/compiler_options.h"
     23 #include "gtest/gtest.h"
     24 #include "optimizing/code_generator.h"
     25 #include "optimizing/optimizing_unit_test.h"
     26 #include "utils/assembler.h"
     27 #ifdef ART_USE_OLD_ARM_BACKEND
     28 #include "utils/arm/assembler_thumb2.h"
     29 #else
     30 #include "utils/arm/assembler_arm_vixl.h"
     31 #endif
     32 #include "utils/mips/assembler_mips.h"
     33 #include "utils/mips64/assembler_mips64.h"
     34 
     35 #include "optimizing/optimizing_cfi_test_expected.inc"
     36 
     37 #ifndef ART_USE_OLD_ARM_BACKEND
     38 namespace vixl32 = vixl::aarch32;
     39 
     40 using vixl32::r0;
     41 #endif
     42 
     43 namespace art {
     44 
     45 // Run the tests only on host.
     46 #ifndef ART_TARGET_ANDROID
     47 
     48 class OptimizingCFITest : public CFITest {
     49  public:
     50   // Enable this flag to generate the expected outputs.
     51   static constexpr bool kGenerateExpected = false;
     52 
     53   OptimizingCFITest()
     54       : pool_(),
     55         allocator_(&pool_),
     56         opts_(),
     57         isa_features_(),
     58         graph_(nullptr),
     59         code_gen_(),
     60         blocks_(allocator_.Adapter()) {}
     61 
     62   void SetUpFrame(InstructionSet isa) {
     63     // Setup simple context.
     64     std::string error;
     65     isa_features_ = InstructionSetFeatures::FromVariant(isa, "default", &error);
     66     graph_ = CreateGraph(&allocator_);
     67     // Generate simple frame with some spills.
     68     code_gen_ = CodeGenerator::Create(graph_, isa, *isa_features_, opts_);
     69     code_gen_->GetAssembler()->cfi().SetEnabled(true);
     70     const int frame_size = 64;
     71     int core_reg = 0;
     72     int fp_reg = 0;
     73     for (int i = 0; i < 2; i++) {  // Two registers of each kind.
     74       for (; core_reg < 32; core_reg++) {
     75         if (code_gen_->IsCoreCalleeSaveRegister(core_reg)) {
     76           auto location = Location::RegisterLocation(core_reg);
     77           code_gen_->AddAllocatedRegister(location);
     78           core_reg++;
     79           break;
     80         }
     81       }
     82       for (; fp_reg < 32; fp_reg++) {
     83         if (code_gen_->IsFloatingPointCalleeSaveRegister(fp_reg)) {
     84           auto location = Location::FpuRegisterLocation(fp_reg);
     85           code_gen_->AddAllocatedRegister(location);
     86           fp_reg++;
     87           break;
     88         }
     89       }
     90     }
     91     code_gen_->block_order_ = &blocks_;
     92     code_gen_->ComputeSpillMask();
     93     code_gen_->SetFrameSize(frame_size);
     94     code_gen_->GenerateFrameEntry();
     95   }
     96 
     97   void Finish() {
     98     code_gen_->GenerateFrameExit();
     99     code_gen_->Finalize(&code_allocator_);
    100   }
    101 
    102   void Check(InstructionSet isa,
    103              const char* isa_str,
    104              const std::vector<uint8_t>& expected_asm,
    105              const std::vector<uint8_t>& expected_cfi) {
    106     // Get the outputs.
    107     const std::vector<uint8_t>& actual_asm = code_allocator_.GetMemory();
    108     Assembler* opt_asm = code_gen_->GetAssembler();
    109     const std::vector<uint8_t>& actual_cfi = *(opt_asm->cfi().data());
    110 
    111     if (kGenerateExpected) {
    112       GenerateExpected(stdout, isa, isa_str, actual_asm, actual_cfi);
    113     } else {
    114       EXPECT_EQ(expected_asm, actual_asm);
    115       EXPECT_EQ(expected_cfi, actual_cfi);
    116     }
    117   }
    118 
    119   void TestImpl(InstructionSet isa, const char*
    120                 isa_str,
    121                 const std::vector<uint8_t>& expected_asm,
    122                 const std::vector<uint8_t>& expected_cfi) {
    123     SetUpFrame(isa);
    124     Finish();
    125     Check(isa, isa_str, expected_asm, expected_cfi);
    126   }
    127 
    128   CodeGenerator* GetCodeGenerator() {
    129     return code_gen_.get();
    130   }
    131 
    132  private:
    133   class InternalCodeAllocator : public CodeAllocator {
    134    public:
    135     InternalCodeAllocator() {}
    136 
    137     virtual uint8_t* Allocate(size_t size) {
    138       memory_.resize(size);
    139       return memory_.data();
    140     }
    141 
    142     const std::vector<uint8_t>& GetMemory() { return memory_; }
    143 
    144    private:
    145     std::vector<uint8_t> memory_;
    146 
    147     DISALLOW_COPY_AND_ASSIGN(InternalCodeAllocator);
    148   };
    149 
    150   ArenaPool pool_;
    151   ArenaAllocator allocator_;
    152   CompilerOptions opts_;
    153   std::unique_ptr<const InstructionSetFeatures> isa_features_;
    154   HGraph* graph_;
    155   std::unique_ptr<CodeGenerator> code_gen_;
    156   ArenaVector<HBasicBlock*> blocks_;
    157   InternalCodeAllocator code_allocator_;
    158 };
    159 
    160 #define TEST_ISA(isa)                                         \
    161   TEST_F(OptimizingCFITest, isa) {                            \
    162     std::vector<uint8_t> expected_asm(                        \
    163         expected_asm_##isa,                                   \
    164         expected_asm_##isa + arraysize(expected_asm_##isa));  \
    165     std::vector<uint8_t> expected_cfi(                        \
    166         expected_cfi_##isa,                                   \
    167         expected_cfi_##isa + arraysize(expected_cfi_##isa));  \
    168     TestImpl(isa, #isa, expected_asm, expected_cfi);          \
    169   }
    170 
    171 #ifdef ART_ENABLE_CODEGEN_arm
    172 TEST_ISA(kThumb2)
    173 #endif
    174 #ifdef ART_ENABLE_CODEGEN_arm64
    175 TEST_ISA(kArm64)
    176 #endif
    177 #ifdef ART_ENABLE_CODEGEN_x86
    178 TEST_ISA(kX86)
    179 #endif
    180 #ifdef ART_ENABLE_CODEGEN_x86_64
    181 TEST_ISA(kX86_64)
    182 #endif
    183 #ifdef ART_ENABLE_CODEGEN_mips
    184 TEST_ISA(kMips)
    185 #endif
    186 #ifdef ART_ENABLE_CODEGEN_mips64
    187 TEST_ISA(kMips64)
    188 #endif
    189 
    190 #ifdef ART_ENABLE_CODEGEN_arm
    191 TEST_F(OptimizingCFITest, kThumb2Adjust) {
    192   std::vector<uint8_t> expected_asm(
    193       expected_asm_kThumb2_adjust,
    194       expected_asm_kThumb2_adjust + arraysize(expected_asm_kThumb2_adjust));
    195   std::vector<uint8_t> expected_cfi(
    196       expected_cfi_kThumb2_adjust,
    197       expected_cfi_kThumb2_adjust + arraysize(expected_cfi_kThumb2_adjust));
    198   SetUpFrame(kThumb2);
    199 #ifdef ART_USE_OLD_ARM_BACKEND
    200 #define __ down_cast<arm::Thumb2Assembler*>(GetCodeGenerator()->GetAssembler())->
    201   Label target;
    202   __ CompareAndBranchIfZero(arm::R0, &target);
    203   // Push the target out of range of CBZ.
    204   for (size_t i = 0; i != 65; ++i) {
    205     __ ldr(arm::R0, arm::Address(arm::R0));
    206   }
    207 #else
    208 #define __ down_cast<arm::ArmVIXLAssembler*>(GetCodeGenerator() \
    209     ->GetAssembler())->GetVIXLAssembler()->
    210   vixl32::Label target;
    211   __ CompareAndBranchIfZero(r0, &target);
    212   // Push the target out of range of CBZ.
    213   for (size_t i = 0; i != 65; ++i) {
    214     __ Ldr(r0, vixl32::MemOperand(r0));
    215   }
    216 #endif
    217   __ Bind(&target);
    218 #undef __
    219   Finish();
    220   Check(kThumb2, "kThumb2_adjust", expected_asm, expected_cfi);
    221 }
    222 #endif
    223 
    224 #ifdef ART_ENABLE_CODEGEN_mips
    225 TEST_F(OptimizingCFITest, kMipsAdjust) {
    226   // One NOP in delay slot, 1 << 15 NOPS have size 1 << 17 which exceeds 18-bit signed maximum.
    227   static constexpr size_t kNumNops = 1u + (1u << 15);
    228   std::vector<uint8_t> expected_asm(
    229       expected_asm_kMips_adjust_head,
    230       expected_asm_kMips_adjust_head + arraysize(expected_asm_kMips_adjust_head));
    231   expected_asm.resize(expected_asm.size() + kNumNops * 4u, 0u);
    232   expected_asm.insert(
    233       expected_asm.end(),
    234       expected_asm_kMips_adjust_tail,
    235       expected_asm_kMips_adjust_tail + arraysize(expected_asm_kMips_adjust_tail));
    236   std::vector<uint8_t> expected_cfi(
    237       expected_cfi_kMips_adjust,
    238       expected_cfi_kMips_adjust + arraysize(expected_cfi_kMips_adjust));
    239   SetUpFrame(kMips);
    240 #define __ down_cast<mips::MipsAssembler*>(GetCodeGenerator()->GetAssembler())->
    241   mips::MipsLabel target;
    242   __ Beqz(mips::A0, &target);
    243   // Push the target out of range of BEQZ.
    244   for (size_t i = 0; i != kNumNops; ++i) {
    245     __ Nop();
    246   }
    247   __ Bind(&target);
    248 #undef __
    249   Finish();
    250   Check(kMips, "kMips_adjust", expected_asm, expected_cfi);
    251 }
    252 #endif
    253 
    254 #ifdef ART_ENABLE_CODEGEN_mips64
    255 TEST_F(OptimizingCFITest, kMips64Adjust) {
    256   // One NOP in forbidden slot, 1 << 15 NOPS have size 1 << 17 which exceeds 18-bit signed maximum.
    257   static constexpr size_t kNumNops = 1u + (1u << 15);
    258   std::vector<uint8_t> expected_asm(
    259       expected_asm_kMips64_adjust_head,
    260       expected_asm_kMips64_adjust_head + arraysize(expected_asm_kMips64_adjust_head));
    261   expected_asm.resize(expected_asm.size() + kNumNops * 4u, 0u);
    262   expected_asm.insert(
    263       expected_asm.end(),
    264       expected_asm_kMips64_adjust_tail,
    265       expected_asm_kMips64_adjust_tail + arraysize(expected_asm_kMips64_adjust_tail));
    266   std::vector<uint8_t> expected_cfi(
    267       expected_cfi_kMips64_adjust,
    268       expected_cfi_kMips64_adjust + arraysize(expected_cfi_kMips64_adjust));
    269   SetUpFrame(kMips64);
    270 #define __ down_cast<mips64::Mips64Assembler*>(GetCodeGenerator()->GetAssembler())->
    271   mips64::Mips64Label target;
    272   __ Beqc(mips64::A1, mips64::A2, &target);
    273   // Push the target out of range of BEQC.
    274   for (size_t i = 0; i != kNumNops; ++i) {
    275     __ Nop();
    276   }
    277   __ Bind(&target);
    278 #undef __
    279   Finish();
    280   Check(kMips64, "kMips64_adjust", expected_asm, expected_cfi);
    281 }
    282 #endif
    283 
    284 #endif  // ART_TARGET_ANDROID
    285 
    286 }  // namespace art
    287