Home | History | Annotate | Download | only in optimizing
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include <memory>
     18 #include <vector>
     19 
     20 #include "arch/instruction_set.h"
     21 #include "base/runtime_debug.h"
     22 #include "cfi_test.h"
     23 #include "driver/compiler_options.h"
     24 #include "gtest/gtest.h"
     25 #include "optimizing/code_generator.h"
     26 #include "optimizing/optimizing_unit_test.h"
     27 #include "read_barrier_config.h"
     28 #include "utils/arm/assembler_arm_vixl.h"
     29 #include "utils/assembler.h"
     30 #include "utils/mips/assembler_mips.h"
     31 #include "utils/mips64/assembler_mips64.h"
     32 
     33 #include "optimizing/optimizing_cfi_test_expected.inc"
     34 
     35 namespace vixl32 = vixl::aarch32;
     36 
     37 using vixl32::r0;
     38 
     39 namespace art {
     40 
     41 // Run the tests only on host.
     42 #ifndef ART_TARGET_ANDROID
     43 
     44 class OptimizingCFITest : public CFITest, public OptimizingUnitTestHelper {
     45  public:
     46   // Enable this flag to generate the expected outputs.
     47   static constexpr bool kGenerateExpected = false;
     48 
     49   OptimizingCFITest()
     50       : pool_and_allocator_(),
     51         opts_(),
     52         isa_features_(),
     53         graph_(nullptr),
     54         code_gen_(),
     55         blocks_(GetAllocator()->Adapter()) {}
     56 
     57   ArenaAllocator* GetAllocator() { return pool_and_allocator_.GetAllocator(); }
     58 
     59   void SetUpFrame(InstructionSet isa) {
     60     // Ensure that slow-debug is off, so that there is no unexpected read-barrier check emitted.
     61     SetRuntimeDebugFlagsEnabled(false);
     62 
     63     // Setup simple context.
     64     std::string error;
     65     isa_features_ = InstructionSetFeatures::FromVariant(isa, "default", &error);
     66     graph_ = CreateGraph();
     67     // Generate simple frame with some spills.
     68     code_gen_ = CodeGenerator::Create(graph_, isa, *isa_features_, opts_);
     69     code_gen_->GetAssembler()->cfi().SetEnabled(true);
     70     code_gen_->InitializeCodeGenerationData();
     71     const int frame_size = 64;
     72     int core_reg = 0;
     73     int fp_reg = 0;
     74     for (int i = 0; i < 2; i++) {  // Two registers of each kind.
     75       for (; core_reg < 32; core_reg++) {
     76         if (code_gen_->IsCoreCalleeSaveRegister(core_reg)) {
     77           auto location = Location::RegisterLocation(core_reg);
     78           code_gen_->AddAllocatedRegister(location);
     79           core_reg++;
     80           break;
     81         }
     82       }
     83       for (; fp_reg < 32; fp_reg++) {
     84         if (code_gen_->IsFloatingPointCalleeSaveRegister(fp_reg)) {
     85           auto location = Location::FpuRegisterLocation(fp_reg);
     86           code_gen_->AddAllocatedRegister(location);
     87           fp_reg++;
     88           break;
     89         }
     90       }
     91     }
     92     code_gen_->block_order_ = &blocks_;
     93     code_gen_->ComputeSpillMask();
     94     code_gen_->SetFrameSize(frame_size);
     95     code_gen_->GenerateFrameEntry();
     96   }
     97 
     98   void Finish() {
     99     code_gen_->GenerateFrameExit();
    100     code_gen_->Finalize(&code_allocator_);
    101   }
    102 
    103   void Check(InstructionSet isa,
    104              const char* isa_str,
    105              const std::vector<uint8_t>& expected_asm,
    106              const std::vector<uint8_t>& expected_cfi) {
    107     // Get the outputs.
    108     const std::vector<uint8_t>& actual_asm = code_allocator_.GetMemory();
    109     Assembler* opt_asm = code_gen_->GetAssembler();
    110     const std::vector<uint8_t>& actual_cfi = *(opt_asm->cfi().data());
    111 
    112     if (kGenerateExpected) {
    113       GenerateExpected(stdout, isa, isa_str, actual_asm, actual_cfi);
    114     } else {
    115       EXPECT_EQ(expected_asm, actual_asm);
    116       EXPECT_EQ(expected_cfi, actual_cfi);
    117     }
    118   }
    119 
    120   void TestImpl(InstructionSet isa, const char*
    121                 isa_str,
    122                 const std::vector<uint8_t>& expected_asm,
    123                 const std::vector<uint8_t>& expected_cfi) {
    124     SetUpFrame(isa);
    125     Finish();
    126     Check(isa, isa_str, expected_asm, expected_cfi);
    127   }
    128 
    129   CodeGenerator* GetCodeGenerator() {
    130     return code_gen_.get();
    131   }
    132 
    133  private:
    134   class InternalCodeAllocator : public CodeAllocator {
    135    public:
    136     InternalCodeAllocator() {}
    137 
    138     virtual uint8_t* Allocate(size_t size) {
    139       memory_.resize(size);
    140       return memory_.data();
    141     }
    142 
    143     const std::vector<uint8_t>& GetMemory() { return memory_; }
    144 
    145    private:
    146     std::vector<uint8_t> memory_;
    147 
    148     DISALLOW_COPY_AND_ASSIGN(InternalCodeAllocator);
    149   };
    150 
    151   ArenaPoolAndAllocator pool_and_allocator_;
    152   CompilerOptions opts_;
    153   std::unique_ptr<const InstructionSetFeatures> isa_features_;
    154   HGraph* graph_;
    155   std::unique_ptr<CodeGenerator> code_gen_;
    156   ArenaVector<HBasicBlock*> blocks_;
    157   InternalCodeAllocator code_allocator_;
    158 };
    159 
    160 #define TEST_ISA(isa)                                                 \
    161   TEST_F(OptimizingCFITest, isa) {                                    \
    162     std::vector<uint8_t> expected_asm(                                \
    163         expected_asm_##isa,                                           \
    164         expected_asm_##isa + arraysize(expected_asm_##isa));          \
    165     std::vector<uint8_t> expected_cfi(                                \
    166         expected_cfi_##isa,                                           \
    167         expected_cfi_##isa + arraysize(expected_cfi_##isa));          \
    168     TestImpl(InstructionSet::isa, #isa, expected_asm, expected_cfi);  \
    169   }
    170 
    171 #ifdef ART_ENABLE_CODEGEN_arm
    172 TEST_ISA(kThumb2)
    173 #endif
    174 
    175 #ifdef ART_ENABLE_CODEGEN_arm64
    176 // Run the tests for ARM64 only with Baker read barriers, as the
    177 // expected generated code saves and restore X21 and X22 (instead of
    178 // X20 and X21), as X20 is used as Marking Register in the Baker read
    179 // barrier configuration, and as such is removed from the set of
    180 // callee-save registers in the ARM64 code generator of the Optimizing
    181 // compiler.
    182 #if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
    183 TEST_ISA(kArm64)
    184 #endif
    185 #endif
    186 
    187 #ifdef ART_ENABLE_CODEGEN_x86
    188 TEST_ISA(kX86)
    189 #endif
    190 
    191 #ifdef ART_ENABLE_CODEGEN_x86_64
    192 TEST_ISA(kX86_64)
    193 #endif
    194 
    195 #ifdef ART_ENABLE_CODEGEN_mips
    196 TEST_ISA(kMips)
    197 #endif
    198 
    199 #ifdef ART_ENABLE_CODEGEN_mips64
    200 TEST_ISA(kMips64)
    201 #endif
    202 
    203 #ifdef ART_ENABLE_CODEGEN_arm
    204 TEST_F(OptimizingCFITest, kThumb2Adjust) {
    205   std::vector<uint8_t> expected_asm(
    206       expected_asm_kThumb2_adjust,
    207       expected_asm_kThumb2_adjust + arraysize(expected_asm_kThumb2_adjust));
    208   std::vector<uint8_t> expected_cfi(
    209       expected_cfi_kThumb2_adjust,
    210       expected_cfi_kThumb2_adjust + arraysize(expected_cfi_kThumb2_adjust));
    211   SetUpFrame(InstructionSet::kThumb2);
    212 #define __ down_cast<arm::ArmVIXLAssembler*>(GetCodeGenerator() \
    213     ->GetAssembler())->GetVIXLAssembler()->
    214   vixl32::Label target;
    215   __ CompareAndBranchIfZero(r0, &target);
    216   // Push the target out of range of CBZ.
    217   for (size_t i = 0; i != 65; ++i) {
    218     __ Ldr(r0, vixl32::MemOperand(r0));
    219   }
    220   __ Bind(&target);
    221 #undef __
    222   Finish();
    223   Check(InstructionSet::kThumb2, "kThumb2_adjust", expected_asm, expected_cfi);
    224 }
    225 #endif
    226 
    227 #ifdef ART_ENABLE_CODEGEN_mips
    228 TEST_F(OptimizingCFITest, kMipsAdjust) {
    229   // One NOP in delay slot, 1 << 15 NOPS have size 1 << 17 which exceeds 18-bit signed maximum.
    230   static constexpr size_t kNumNops = 1u + (1u << 15);
    231   std::vector<uint8_t> expected_asm(
    232       expected_asm_kMips_adjust_head,
    233       expected_asm_kMips_adjust_head + arraysize(expected_asm_kMips_adjust_head));
    234   expected_asm.resize(expected_asm.size() + kNumNops * 4u, 0u);
    235   expected_asm.insert(
    236       expected_asm.end(),
    237       expected_asm_kMips_adjust_tail,
    238       expected_asm_kMips_adjust_tail + arraysize(expected_asm_kMips_adjust_tail));
    239   std::vector<uint8_t> expected_cfi(
    240       expected_cfi_kMips_adjust,
    241       expected_cfi_kMips_adjust + arraysize(expected_cfi_kMips_adjust));
    242   SetUpFrame(InstructionSet::kMips);
    243 #define __ down_cast<mips::MipsAssembler*>(GetCodeGenerator()->GetAssembler())->
    244   mips::MipsLabel target;
    245   __ Beqz(mips::A0, &target);
    246   // Push the target out of range of BEQZ.
    247   for (size_t i = 0; i != kNumNops; ++i) {
    248     __ Nop();
    249   }
    250   __ Bind(&target);
    251 #undef __
    252   Finish();
    253   Check(InstructionSet::kMips, "kMips_adjust", expected_asm, expected_cfi);
    254 }
    255 #endif
    256 
    257 #ifdef ART_ENABLE_CODEGEN_mips64
    258 TEST_F(OptimizingCFITest, kMips64Adjust) {
    259   // One NOP in forbidden slot, 1 << 15 NOPS have size 1 << 17 which exceeds 18-bit signed maximum.
    260   static constexpr size_t kNumNops = 1u + (1u << 15);
    261   std::vector<uint8_t> expected_asm(
    262       expected_asm_kMips64_adjust_head,
    263       expected_asm_kMips64_adjust_head + arraysize(expected_asm_kMips64_adjust_head));
    264   expected_asm.resize(expected_asm.size() + kNumNops * 4u, 0u);
    265   expected_asm.insert(
    266       expected_asm.end(),
    267       expected_asm_kMips64_adjust_tail,
    268       expected_asm_kMips64_adjust_tail + arraysize(expected_asm_kMips64_adjust_tail));
    269   std::vector<uint8_t> expected_cfi(
    270       expected_cfi_kMips64_adjust,
    271       expected_cfi_kMips64_adjust + arraysize(expected_cfi_kMips64_adjust));
    272   SetUpFrame(InstructionSet::kMips64);
    273 #define __ down_cast<mips64::Mips64Assembler*>(GetCodeGenerator()->GetAssembler())->
    274   mips64::Mips64Label target;
    275   __ Beqc(mips64::A1, mips64::A2, &target);
    276   // Push the target out of range of BEQC.
    277   for (size_t i = 0; i != kNumNops; ++i) {
    278     __ Nop();
    279   }
    280   __ Bind(&target);
    281 #undef __
    282   Finish();
    283   Check(InstructionSet::kMips64, "kMips64_adjust", expected_asm, expected_cfi);
    284 }
    285 #endif
    286 
    287 #endif  // ART_TARGET_ANDROID
    288 
    289 }  // namespace art
    290