1 /* 2 * Copyright (C) 2015 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include <memory> 18 #include <vector> 19 20 #include "arch/instruction_set.h" 21 #include "cfi_test.h" 22 #include "gtest/gtest.h" 23 #include "optimizing/code_generator.h" 24 #include "optimizing/optimizing_unit_test.h" 25 #include "utils/assembler.h" 26 #include "utils/arm/assembler_thumb2.h" 27 #include "utils/mips/assembler_mips.h" 28 #include "utils/mips64/assembler_mips64.h" 29 30 #include "optimizing/optimizing_cfi_test_expected.inc" 31 32 namespace art { 33 34 // Run the tests only on host. 35 #ifndef __ANDROID__ 36 37 class OptimizingCFITest : public CFITest { 38 public: 39 // Enable this flag to generate the expected outputs. 40 static constexpr bool kGenerateExpected = false; 41 42 OptimizingCFITest() 43 : pool_(), 44 allocator_(&pool_), 45 opts_(), 46 isa_features_(), 47 graph_(nullptr), 48 code_gen_(), 49 blocks_(allocator_.Adapter()) {} 50 51 void SetUpFrame(InstructionSet isa) { 52 // Setup simple context. 53 std::string error; 54 isa_features_.reset(InstructionSetFeatures::FromVariant(isa, "default", &error)); 55 graph_ = CreateGraph(&allocator_); 56 // Generate simple frame with some spills. 57 code_gen_ = CodeGenerator::Create(graph_, isa, *isa_features_, opts_); 58 code_gen_->GetAssembler()->cfi().SetEnabled(true); 59 const int frame_size = 64; 60 int core_reg = 0; 61 int fp_reg = 0; 62 for (int i = 0; i < 2; i++) { // Two registers of each kind. 63 for (; core_reg < 32; core_reg++) { 64 if (code_gen_->IsCoreCalleeSaveRegister(core_reg)) { 65 auto location = Location::RegisterLocation(core_reg); 66 code_gen_->AddAllocatedRegister(location); 67 core_reg++; 68 break; 69 } 70 } 71 for (; fp_reg < 32; fp_reg++) { 72 if (code_gen_->IsFloatingPointCalleeSaveRegister(fp_reg)) { 73 auto location = Location::FpuRegisterLocation(fp_reg); 74 code_gen_->AddAllocatedRegister(location); 75 fp_reg++; 76 break; 77 } 78 } 79 } 80 code_gen_->block_order_ = &blocks_; 81 code_gen_->ComputeSpillMask(); 82 code_gen_->SetFrameSize(frame_size); 83 code_gen_->GenerateFrameEntry(); 84 } 85 86 void Finish() { 87 code_gen_->GenerateFrameExit(); 88 code_gen_->Finalize(&code_allocator_); 89 } 90 91 void Check(InstructionSet isa, 92 const char* isa_str, 93 const std::vector<uint8_t>& expected_asm, 94 const std::vector<uint8_t>& expected_cfi) { 95 // Get the outputs. 96 const std::vector<uint8_t>& actual_asm = code_allocator_.GetMemory(); 97 Assembler* opt_asm = code_gen_->GetAssembler(); 98 const std::vector<uint8_t>& actual_cfi = *(opt_asm->cfi().data()); 99 100 if (kGenerateExpected) { 101 GenerateExpected(stdout, isa, isa_str, actual_asm, actual_cfi); 102 } else { 103 EXPECT_EQ(expected_asm, actual_asm); 104 EXPECT_EQ(expected_cfi, actual_cfi); 105 } 106 } 107 108 void TestImpl(InstructionSet isa, const char* 109 isa_str, 110 const std::vector<uint8_t>& expected_asm, 111 const std::vector<uint8_t>& expected_cfi) { 112 SetUpFrame(isa); 113 Finish(); 114 Check(isa, isa_str, expected_asm, expected_cfi); 115 } 116 117 CodeGenerator* GetCodeGenerator() { 118 return code_gen_.get(); 119 } 120 121 private: 122 class InternalCodeAllocator : public CodeAllocator { 123 public: 124 InternalCodeAllocator() {} 125 126 virtual uint8_t* Allocate(size_t size) { 127 memory_.resize(size); 128 return memory_.data(); 129 } 130 131 const std::vector<uint8_t>& GetMemory() { return memory_; } 132 133 private: 134 std::vector<uint8_t> memory_; 135 136 DISALLOW_COPY_AND_ASSIGN(InternalCodeAllocator); 137 }; 138 139 ArenaPool pool_; 140 ArenaAllocator allocator_; 141 CompilerOptions opts_; 142 std::unique_ptr<const InstructionSetFeatures> isa_features_; 143 HGraph* graph_; 144 std::unique_ptr<CodeGenerator> code_gen_; 145 ArenaVector<HBasicBlock*> blocks_; 146 InternalCodeAllocator code_allocator_; 147 }; 148 149 #define TEST_ISA(isa) \ 150 TEST_F(OptimizingCFITest, isa) { \ 151 std::vector<uint8_t> expected_asm( \ 152 expected_asm_##isa, \ 153 expected_asm_##isa + arraysize(expected_asm_##isa)); \ 154 std::vector<uint8_t> expected_cfi( \ 155 expected_cfi_##isa, \ 156 expected_cfi_##isa + arraysize(expected_cfi_##isa)); \ 157 TestImpl(isa, #isa, expected_asm, expected_cfi); \ 158 } 159 160 TEST_ISA(kThumb2) 161 TEST_ISA(kArm64) 162 TEST_ISA(kX86) 163 TEST_ISA(kX86_64) 164 TEST_ISA(kMips) 165 TEST_ISA(kMips64) 166 167 TEST_F(OptimizingCFITest, kThumb2Adjust) { 168 std::vector<uint8_t> expected_asm( 169 expected_asm_kThumb2_adjust, 170 expected_asm_kThumb2_adjust + arraysize(expected_asm_kThumb2_adjust)); 171 std::vector<uint8_t> expected_cfi( 172 expected_cfi_kThumb2_adjust, 173 expected_cfi_kThumb2_adjust + arraysize(expected_cfi_kThumb2_adjust)); 174 SetUpFrame(kThumb2); 175 #define __ down_cast<arm::Thumb2Assembler*>(GetCodeGenerator()->GetAssembler())-> 176 Label target; 177 __ CompareAndBranchIfZero(arm::R0, &target); 178 // Push the target out of range of CBZ. 179 for (size_t i = 0; i != 65; ++i) { 180 __ ldr(arm::R0, arm::Address(arm::R0)); 181 } 182 __ Bind(&target); 183 #undef __ 184 Finish(); 185 Check(kThumb2, "kThumb2_adjust", expected_asm, expected_cfi); 186 } 187 188 TEST_F(OptimizingCFITest, kMipsAdjust) { 189 // One NOP in delay slot, 1 << 15 NOPS have size 1 << 17 which exceeds 18-bit signed maximum. 190 static constexpr size_t kNumNops = 1u + (1u << 15); 191 std::vector<uint8_t> expected_asm( 192 expected_asm_kMips_adjust_head, 193 expected_asm_kMips_adjust_head + arraysize(expected_asm_kMips_adjust_head)); 194 expected_asm.resize(expected_asm.size() + kNumNops * 4u, 0u); 195 expected_asm.insert( 196 expected_asm.end(), 197 expected_asm_kMips_adjust_tail, 198 expected_asm_kMips_adjust_tail + arraysize(expected_asm_kMips_adjust_tail)); 199 std::vector<uint8_t> expected_cfi( 200 expected_cfi_kMips_adjust, 201 expected_cfi_kMips_adjust + arraysize(expected_cfi_kMips_adjust)); 202 SetUpFrame(kMips); 203 #define __ down_cast<mips::MipsAssembler*>(GetCodeGenerator()->GetAssembler())-> 204 mips::MipsLabel target; 205 __ Beqz(mips::A0, &target); 206 // Push the target out of range of BEQZ. 207 for (size_t i = 0; i != kNumNops; ++i) { 208 __ Nop(); 209 } 210 __ Bind(&target); 211 #undef __ 212 Finish(); 213 Check(kMips, "kMips_adjust", expected_asm, expected_cfi); 214 } 215 216 TEST_F(OptimizingCFITest, kMips64Adjust) { 217 // One NOP in forbidden slot, 1 << 15 NOPS have size 1 << 17 which exceeds 18-bit signed maximum. 218 static constexpr size_t kNumNops = 1u + (1u << 15); 219 std::vector<uint8_t> expected_asm( 220 expected_asm_kMips64_adjust_head, 221 expected_asm_kMips64_adjust_head + arraysize(expected_asm_kMips64_adjust_head)); 222 expected_asm.resize(expected_asm.size() + kNumNops * 4u, 0u); 223 expected_asm.insert( 224 expected_asm.end(), 225 expected_asm_kMips64_adjust_tail, 226 expected_asm_kMips64_adjust_tail + arraysize(expected_asm_kMips64_adjust_tail)); 227 std::vector<uint8_t> expected_cfi( 228 expected_cfi_kMips64_adjust, 229 expected_cfi_kMips64_adjust + arraysize(expected_cfi_kMips64_adjust)); 230 SetUpFrame(kMips64); 231 #define __ down_cast<mips64::Mips64Assembler*>(GetCodeGenerator()->GetAssembler())-> 232 mips64::Mips64Label target; 233 __ Beqc(mips64::A1, mips64::A2, &target); 234 // Push the target out of range of BEQC. 235 for (size_t i = 0; i != kNumNops; ++i) { 236 __ Nop(); 237 } 238 __ Bind(&target); 239 #undef __ 240 Finish(); 241 Check(kMips64, "kMips64_adjust", expected_asm, expected_cfi); 242 } 243 244 #endif // __ANDROID__ 245 246 } // namespace art 247