1 /* 2 * Copyright (C) 2016 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_VIXL_H_ 18 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_VIXL_H_ 19 20 #include "base/enums.h" 21 #include "code_generator.h" 22 #include "common_arm.h" 23 #include "driver/compiler_options.h" 24 #include "nodes.h" 25 #include "string_reference.h" 26 #include "parallel_move_resolver.h" 27 #include "type_reference.h" 28 #include "utils/arm/assembler_arm_vixl.h" 29 30 // TODO(VIXL): make vixl clean wrt -Wshadow. 31 #pragma GCC diagnostic push 32 #pragma GCC diagnostic ignored "-Wshadow" 33 #include "aarch32/constants-aarch32.h" 34 #include "aarch32/instructions-aarch32.h" 35 #include "aarch32/macro-assembler-aarch32.h" 36 #pragma GCC diagnostic pop 37 38 namespace art { 39 namespace arm { 40 41 // This constant is used as an approximate margin when emission of veneer and literal pools 42 // must be blocked. 43 static constexpr int kMaxMacroInstructionSizeInBytes = 44 15 * vixl::aarch32::kMaxInstructionSizeInBytes; 45 46 static const vixl::aarch32::Register kParameterCoreRegistersVIXL[] = { 47 vixl::aarch32::r1, 48 vixl::aarch32::r2, 49 vixl::aarch32::r3 50 }; 51 static const size_t kParameterCoreRegistersLengthVIXL = arraysize(kParameterCoreRegistersVIXL); 52 static const vixl::aarch32::SRegister kParameterFpuRegistersVIXL[] = { 53 vixl::aarch32::s0, 54 vixl::aarch32::s1, 55 vixl::aarch32::s2, 56 vixl::aarch32::s3, 57 vixl::aarch32::s4, 58 vixl::aarch32::s5, 59 vixl::aarch32::s6, 60 vixl::aarch32::s7, 61 vixl::aarch32::s8, 62 vixl::aarch32::s9, 63 vixl::aarch32::s10, 64 vixl::aarch32::s11, 65 vixl::aarch32::s12, 66 vixl::aarch32::s13, 67 vixl::aarch32::s14, 68 vixl::aarch32::s15 69 }; 70 static const size_t kParameterFpuRegistersLengthVIXL = arraysize(kParameterFpuRegistersVIXL); 71 72 static const vixl::aarch32::Register kMethodRegister = vixl::aarch32::r0; 73 74 static const vixl::aarch32::Register kCoreAlwaysSpillRegister = vixl::aarch32::r5; 75 76 // Callee saves core registers r5, r6, r7, r8 (except when emitting Baker 77 // read barriers, where it is used as Marking Register), r10, r11, and lr. 78 static const vixl::aarch32::RegisterList kCoreCalleeSaves = vixl::aarch32::RegisterList::Union( 79 vixl::aarch32::RegisterList(vixl::aarch32::r5, 80 vixl::aarch32::r6, 81 vixl::aarch32::r7), 82 // Do not consider r8 as a callee-save register with Baker read barriers. 83 ((kEmitCompilerReadBarrier && kUseBakerReadBarrier) 84 ? vixl::aarch32::RegisterList() 85 : vixl::aarch32::RegisterList(vixl::aarch32::r8)), 86 vixl::aarch32::RegisterList(vixl::aarch32::r10, 87 vixl::aarch32::r11, 88 vixl::aarch32::lr)); 89 90 // Callee saves FP registers s16 to s31 inclusive. 91 static const vixl::aarch32::SRegisterList kFpuCalleeSaves = 92 vixl::aarch32::SRegisterList(vixl::aarch32::s16, 16); 93 94 static const vixl::aarch32::Register kRuntimeParameterCoreRegistersVIXL[] = { 95 vixl::aarch32::r0, 96 vixl::aarch32::r1, 97 vixl::aarch32::r2, 98 vixl::aarch32::r3 99 }; 100 static const size_t kRuntimeParameterCoreRegistersLengthVIXL = 101 arraysize(kRuntimeParameterCoreRegistersVIXL); 102 static const vixl::aarch32::SRegister kRuntimeParameterFpuRegistersVIXL[] = { 103 vixl::aarch32::s0, 104 vixl::aarch32::s1, 105 vixl::aarch32::s2, 106 vixl::aarch32::s3 107 }; 108 static const size_t kRuntimeParameterFpuRegistersLengthVIXL = 109 arraysize(kRuntimeParameterFpuRegistersVIXL); 110 111 class LoadClassSlowPathARMVIXL; 112 class CodeGeneratorARMVIXL; 113 114 using VIXLInt32Literal = vixl::aarch32::Literal<int32_t>; 115 using VIXLUInt32Literal = vixl::aarch32::Literal<uint32_t>; 116 117 class JumpTableARMVIXL : public DeletableArenaObject<kArenaAllocSwitchTable> { 118 public: 119 explicit JumpTableARMVIXL(HPackedSwitch* switch_instr) 120 : switch_instr_(switch_instr), 121 table_start_(), 122 bb_addresses_(switch_instr->GetArena()->Adapter(kArenaAllocCodeGenerator)) { 123 uint32_t num_entries = switch_instr_->GetNumEntries(); 124 for (uint32_t i = 0; i < num_entries; i++) { 125 VIXLInt32Literal *lit = new VIXLInt32Literal(0, vixl32::RawLiteral::kManuallyPlaced); 126 bb_addresses_.emplace_back(lit); 127 } 128 } 129 130 vixl::aarch32::Label* GetTableStartLabel() { return &table_start_; } 131 132 void EmitTable(CodeGeneratorARMVIXL* codegen); 133 void FixTable(CodeGeneratorARMVIXL* codegen); 134 135 private: 136 HPackedSwitch* const switch_instr_; 137 vixl::aarch32::Label table_start_; 138 ArenaVector<std::unique_ptr<VIXLInt32Literal>> bb_addresses_; 139 140 DISALLOW_COPY_AND_ASSIGN(JumpTableARMVIXL); 141 }; 142 143 class InvokeRuntimeCallingConventionARMVIXL 144 : public CallingConvention<vixl::aarch32::Register, vixl::aarch32::SRegister> { 145 public: 146 InvokeRuntimeCallingConventionARMVIXL() 147 : CallingConvention(kRuntimeParameterCoreRegistersVIXL, 148 kRuntimeParameterCoreRegistersLengthVIXL, 149 kRuntimeParameterFpuRegistersVIXL, 150 kRuntimeParameterFpuRegistersLengthVIXL, 151 kArmPointerSize) {} 152 153 private: 154 DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConventionARMVIXL); 155 }; 156 157 class InvokeDexCallingConventionARMVIXL 158 : public CallingConvention<vixl::aarch32::Register, vixl::aarch32::SRegister> { 159 public: 160 InvokeDexCallingConventionARMVIXL() 161 : CallingConvention(kParameterCoreRegistersVIXL, 162 kParameterCoreRegistersLengthVIXL, 163 kParameterFpuRegistersVIXL, 164 kParameterFpuRegistersLengthVIXL, 165 kArmPointerSize) {} 166 167 private: 168 DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionARMVIXL); 169 }; 170 171 class InvokeDexCallingConventionVisitorARMVIXL : public InvokeDexCallingConventionVisitor { 172 public: 173 InvokeDexCallingConventionVisitorARMVIXL() {} 174 virtual ~InvokeDexCallingConventionVisitorARMVIXL() {} 175 176 Location GetNextLocation(Primitive::Type type) OVERRIDE; 177 Location GetReturnLocation(Primitive::Type type) const OVERRIDE; 178 Location GetMethodLocation() const OVERRIDE; 179 180 private: 181 InvokeDexCallingConventionARMVIXL calling_convention; 182 uint32_t double_index_ = 0; 183 184 DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorARMVIXL); 185 }; 186 187 class FieldAccessCallingConventionARMVIXL : public FieldAccessCallingConvention { 188 public: 189 FieldAccessCallingConventionARMVIXL() {} 190 191 Location GetObjectLocation() const OVERRIDE { 192 return helpers::LocationFrom(vixl::aarch32::r1); 193 } 194 Location GetFieldIndexLocation() const OVERRIDE { 195 return helpers::LocationFrom(vixl::aarch32::r0); 196 } 197 Location GetReturnLocation(Primitive::Type type) const OVERRIDE { 198 return Primitive::Is64BitType(type) 199 ? helpers::LocationFrom(vixl::aarch32::r0, vixl::aarch32::r1) 200 : helpers::LocationFrom(vixl::aarch32::r0); 201 } 202 Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE { 203 return Primitive::Is64BitType(type) 204 ? helpers::LocationFrom(vixl::aarch32::r2, vixl::aarch32::r3) 205 : (is_instance 206 ? helpers::LocationFrom(vixl::aarch32::r2) 207 : helpers::LocationFrom(vixl::aarch32::r1)); 208 } 209 Location GetFpuLocation(Primitive::Type type) const OVERRIDE { 210 return Primitive::Is64BitType(type) 211 ? helpers::LocationFrom(vixl::aarch32::s0, vixl::aarch32::s1) 212 : helpers::LocationFrom(vixl::aarch32::s0); 213 } 214 215 private: 216 DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARMVIXL); 217 }; 218 219 class SlowPathCodeARMVIXL : public SlowPathCode { 220 public: 221 explicit SlowPathCodeARMVIXL(HInstruction* instruction) 222 : SlowPathCode(instruction), entry_label_(), exit_label_() {} 223 224 vixl::aarch32::Label* GetEntryLabel() { return &entry_label_; } 225 vixl::aarch32::Label* GetExitLabel() { return &exit_label_; } 226 227 void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE; 228 void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE; 229 230 private: 231 vixl::aarch32::Label entry_label_; 232 vixl::aarch32::Label exit_label_; 233 234 DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARMVIXL); 235 }; 236 237 class ParallelMoveResolverARMVIXL : public ParallelMoveResolverWithSwap { 238 public: 239 ParallelMoveResolverARMVIXL(ArenaAllocator* allocator, CodeGeneratorARMVIXL* codegen) 240 : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {} 241 242 void EmitMove(size_t index) OVERRIDE; 243 void EmitSwap(size_t index) OVERRIDE; 244 void SpillScratch(int reg) OVERRIDE; 245 void RestoreScratch(int reg) OVERRIDE; 246 247 ArmVIXLAssembler* GetAssembler() const; 248 249 private: 250 void Exchange(vixl32::Register reg, int mem); 251 void Exchange(int mem1, int mem2); 252 253 CodeGeneratorARMVIXL* const codegen_; 254 255 DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverARMVIXL); 256 }; 257 258 class LocationsBuilderARMVIXL : public HGraphVisitor { 259 public: 260 LocationsBuilderARMVIXL(HGraph* graph, CodeGeneratorARMVIXL* codegen) 261 : HGraphVisitor(graph), codegen_(codegen) {} 262 263 #define DECLARE_VISIT_INSTRUCTION(name, super) \ 264 void Visit##name(H##name* instr) OVERRIDE; 265 266 FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) 267 FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION) 268 FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION) 269 270 #undef DECLARE_VISIT_INSTRUCTION 271 272 void VisitInstruction(HInstruction* instruction) OVERRIDE { 273 LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() 274 << " (id " << instruction->GetId() << ")"; 275 } 276 277 private: 278 void HandleInvoke(HInvoke* invoke); 279 void HandleBitwiseOperation(HBinaryOperation* operation, Opcode opcode); 280 void HandleCondition(HCondition* condition); 281 void HandleIntegerRotate(LocationSummary* locations); 282 void HandleLongRotate(LocationSummary* locations); 283 void HandleShift(HBinaryOperation* operation); 284 void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info); 285 void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info); 286 287 Location ArithmeticZeroOrFpuRegister(HInstruction* input); 288 Location ArmEncodableConstantOrRegister(HInstruction* constant, Opcode opcode); 289 bool CanEncodeConstantAsImmediate(HConstant* input_cst, Opcode opcode); 290 bool CanEncodeConstantAsImmediate(uint32_t value, Opcode opcode, SetCc set_cc = kCcDontCare); 291 292 CodeGeneratorARMVIXL* const codegen_; 293 InvokeDexCallingConventionVisitorARMVIXL parameter_visitor_; 294 295 DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARMVIXL); 296 }; 297 298 class InstructionCodeGeneratorARMVIXL : public InstructionCodeGenerator { 299 public: 300 InstructionCodeGeneratorARMVIXL(HGraph* graph, CodeGeneratorARMVIXL* codegen); 301 302 #define DECLARE_VISIT_INSTRUCTION(name, super) \ 303 void Visit##name(H##name* instr) OVERRIDE; 304 305 FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION) 306 FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION) 307 FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION) 308 309 #undef DECLARE_VISIT_INSTRUCTION 310 311 void VisitInstruction(HInstruction* instruction) OVERRIDE { 312 LOG(FATAL) << "Unreachable instruction " << instruction->DebugName() 313 << " (id " << instruction->GetId() << ")"; 314 } 315 316 ArmVIXLAssembler* GetAssembler() const { return assembler_; } 317 ArmVIXLMacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); } 318 319 private: 320 // Generate code for the given suspend check. If not null, `successor` 321 // is the block to branch to if the suspend check is not needed, and after 322 // the suspend call. 323 void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor); 324 void GenerateClassInitializationCheck(LoadClassSlowPathARMVIXL* slow_path, 325 vixl32::Register class_reg); 326 void GenerateAndConst(vixl::aarch32::Register out, vixl::aarch32::Register first, uint32_t value); 327 void GenerateOrrConst(vixl::aarch32::Register out, vixl::aarch32::Register first, uint32_t value); 328 void GenerateEorConst(vixl::aarch32::Register out, vixl::aarch32::Register first, uint32_t value); 329 void GenerateAddLongConst(Location out, Location first, uint64_t value); 330 void HandleBitwiseOperation(HBinaryOperation* operation); 331 void HandleCondition(HCondition* condition); 332 void HandleIntegerRotate(HRor* ror); 333 void HandleLongRotate(HRor* ror); 334 void HandleShift(HBinaryOperation* operation); 335 336 void GenerateWideAtomicStore(vixl::aarch32::Register addr, 337 uint32_t offset, 338 vixl::aarch32::Register value_lo, 339 vixl::aarch32::Register value_hi, 340 vixl::aarch32::Register temp1, 341 vixl::aarch32::Register temp2, 342 HInstruction* instruction); 343 void GenerateWideAtomicLoad(vixl::aarch32::Register addr, 344 uint32_t offset, 345 vixl::aarch32::Register out_lo, 346 vixl::aarch32::Register out_hi); 347 348 void HandleFieldSet(HInstruction* instruction, 349 const FieldInfo& field_info, 350 bool value_can_be_null); 351 void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info); 352 353 // Generate a heap reference load using one register `out`: 354 // 355 // out <- *(out + offset) 356 // 357 // while honoring heap poisoning and/or read barriers (if any). 358 // 359 // Location `maybe_temp` is used when generating a read barrier and 360 // shall be a register in that case; it may be an invalid location 361 // otherwise. 362 void GenerateReferenceLoadOneRegister(HInstruction* instruction, 363 Location out, 364 uint32_t offset, 365 Location maybe_temp, 366 ReadBarrierOption read_barrier_option); 367 // Generate a heap reference load using two different registers 368 // `out` and `obj`: 369 // 370 // out <- *(obj + offset) 371 // 372 // while honoring heap poisoning and/or read barriers (if any). 373 // 374 // Location `maybe_temp` is used when generating a Baker's (fast 375 // path) read barrier and shall be a register in that case; it may 376 // be an invalid location otherwise. 377 void GenerateReferenceLoadTwoRegisters(HInstruction* instruction, 378 Location out, 379 Location obj, 380 uint32_t offset, 381 Location maybe_temp, 382 ReadBarrierOption read_barrier_option); 383 // Generate a GC root reference load: 384 // 385 // root <- *(obj + offset) 386 // 387 // while honoring read barriers based on read_barrier_option. 388 void GenerateGcRootFieldLoad(HInstruction* instruction, 389 Location root, 390 vixl::aarch32::Register obj, 391 uint32_t offset, 392 ReadBarrierOption read_barrier_option); 393 void GenerateTestAndBranch(HInstruction* instruction, 394 size_t condition_input_index, 395 vixl::aarch32::Label* true_target, 396 vixl::aarch32::Label* false_target, 397 bool far_target = true); 398 void GenerateCompareTestAndBranch(HCondition* condition, 399 vixl::aarch32::Label* true_target, 400 vixl::aarch32::Label* false_target, 401 bool is_far_target = true); 402 void DivRemOneOrMinusOne(HBinaryOperation* instruction); 403 void DivRemByPowerOfTwo(HBinaryOperation* instruction); 404 void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction); 405 void GenerateDivRemConstantIntegral(HBinaryOperation* instruction); 406 void HandleGoto(HInstruction* got, HBasicBlock* successor); 407 408 vixl::aarch32::MemOperand VecAddress( 409 HVecMemoryOperation* instruction, 410 // This function may acquire a scratch register. 411 vixl::aarch32::UseScratchRegisterScope* temps_scope, 412 /*out*/ vixl32::Register* scratch); 413 vixl::aarch32::AlignedMemOperand VecAddressUnaligned( 414 HVecMemoryOperation* instruction, 415 // This function may acquire a scratch register. 416 vixl::aarch32::UseScratchRegisterScope* temps_scope, 417 /*out*/ vixl32::Register* scratch); 418 419 ArmVIXLAssembler* const assembler_; 420 CodeGeneratorARMVIXL* const codegen_; 421 422 DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorARMVIXL); 423 }; 424 425 class CodeGeneratorARMVIXL : public CodeGenerator { 426 public: 427 CodeGeneratorARMVIXL(HGraph* graph, 428 const ArmInstructionSetFeatures& isa_features, 429 const CompilerOptions& compiler_options, 430 OptimizingCompilerStats* stats = nullptr); 431 virtual ~CodeGeneratorARMVIXL() {} 432 433 void GenerateFrameEntry() OVERRIDE; 434 void GenerateFrameExit() OVERRIDE; 435 void Bind(HBasicBlock* block) OVERRIDE; 436 void MoveConstant(Location destination, int32_t value) OVERRIDE; 437 void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE; 438 void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE; 439 440 size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; 441 size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; 442 size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; 443 size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; 444 445 size_t GetWordSize() const OVERRIDE { 446 return static_cast<size_t>(kArmPointerSize); 447 } 448 449 size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return vixl::aarch32::kRegSizeInBytes; } 450 451 HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; } 452 453 HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; } 454 455 ArmVIXLAssembler* GetAssembler() OVERRIDE { return &assembler_; } 456 457 const ArmVIXLAssembler& GetAssembler() const OVERRIDE { return assembler_; } 458 459 ArmVIXLMacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); } 460 461 uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE { 462 vixl::aarch32::Label* block_entry_label = GetLabelOf(block); 463 DCHECK(block_entry_label->IsBound()); 464 return block_entry_label->GetLocation(); 465 } 466 467 void FixJumpTables(); 468 void SetupBlockedRegisters() const OVERRIDE; 469 470 void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE; 471 void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE; 472 473 ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; } 474 InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kThumb2; } 475 // Helper method to move a 32-bit value between two locations. 476 void Move32(Location destination, Location source); 477 478 void LoadFromShiftedRegOffset(Primitive::Type type, 479 Location out_loc, 480 vixl::aarch32::Register base, 481 vixl::aarch32::Register reg_index, 482 vixl::aarch32::Condition cond = vixl::aarch32::al); 483 void StoreToShiftedRegOffset(Primitive::Type type, 484 Location out_loc, 485 vixl::aarch32::Register base, 486 vixl::aarch32::Register reg_index, 487 vixl::aarch32::Condition cond = vixl::aarch32::al); 488 489 // Generate code to invoke a runtime entry point. 490 void InvokeRuntime(QuickEntrypointEnum entrypoint, 491 HInstruction* instruction, 492 uint32_t dex_pc, 493 SlowPathCode* slow_path = nullptr) OVERRIDE; 494 495 // Generate code to invoke a runtime entry point, but do not record 496 // PC-related information in a stack map. 497 void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset, 498 HInstruction* instruction, 499 SlowPathCode* slow_path); 500 501 // Emit a write barrier. 502 void MarkGCCard(vixl::aarch32::Register temp, 503 vixl::aarch32::Register card, 504 vixl::aarch32::Register object, 505 vixl::aarch32::Register value, 506 bool can_be_null); 507 508 void GenerateMemoryBarrier(MemBarrierKind kind); 509 510 vixl::aarch32::Label* GetLabelOf(HBasicBlock* block) { 511 block = FirstNonEmptyBlock(block); 512 return &(block_labels_[block->GetBlockId()]); 513 } 514 515 vixl32::Label* GetFinalLabel(HInstruction* instruction, vixl32::Label* final_label); 516 517 void Initialize() OVERRIDE { 518 block_labels_.resize(GetGraph()->GetBlocks().size()); 519 } 520 521 void Finalize(CodeAllocator* allocator) OVERRIDE; 522 523 const ArmInstructionSetFeatures& GetInstructionSetFeatures() const { return isa_features_; } 524 525 bool NeedsTwoRegisters(Primitive::Type type) const OVERRIDE { 526 return type == Primitive::kPrimDouble || type == Primitive::kPrimLong; 527 } 528 529 void ComputeSpillMask() OVERRIDE; 530 531 vixl::aarch32::Label* GetFrameEntryLabel() { return &frame_entry_label_; } 532 533 // Check if the desired_string_load_kind is supported. If it is, return it, 534 // otherwise return a fall-back kind that should be used instead. 535 HLoadString::LoadKind GetSupportedLoadStringKind( 536 HLoadString::LoadKind desired_string_load_kind) OVERRIDE; 537 538 // Check if the desired_class_load_kind is supported. If it is, return it, 539 // otherwise return a fall-back kind that should be used instead. 540 HLoadClass::LoadKind GetSupportedLoadClassKind( 541 HLoadClass::LoadKind desired_class_load_kind) OVERRIDE; 542 543 // Check if the desired_dispatch_info is supported. If it is, return it, 544 // otherwise return a fall-back info that should be used instead. 545 HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch( 546 const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info, 547 HInvokeStaticOrDirect* invoke) OVERRIDE; 548 549 void GenerateStaticOrDirectCall( 550 HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE; 551 void GenerateVirtualCall( 552 HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE; 553 554 void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE; 555 556 // The PcRelativePatchInfo is used for PC-relative addressing of dex cache arrays 557 // and boot image strings/types. The only difference is the interpretation of the 558 // offset_or_index. The PC-relative address is loaded with three instructions, 559 // MOVW+MOVT to load the offset to base_reg and then ADD base_reg, PC. The offset 560 // is calculated from the ADD's effective PC, i.e. PC+4 on Thumb2. Though we 561 // currently emit these 3 instructions together, instruction scheduling could 562 // split this sequence apart, so we keep separate labels for each of them. 563 struct PcRelativePatchInfo { 564 PcRelativePatchInfo(const DexFile& dex_file, uint32_t off_or_idx) 565 : target_dex_file(dex_file), offset_or_index(off_or_idx) { } 566 PcRelativePatchInfo(PcRelativePatchInfo&& other) = default; 567 568 const DexFile& target_dex_file; 569 // Either the dex cache array element offset or the string/type index. 570 uint32_t offset_or_index; 571 vixl::aarch32::Label movw_label; 572 vixl::aarch32::Label movt_label; 573 vixl::aarch32::Label add_pc_label; 574 }; 575 576 PcRelativePatchInfo* NewPcRelativeMethodPatch(MethodReference target_method); 577 PcRelativePatchInfo* NewMethodBssEntryPatch(MethodReference target_method); 578 PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index); 579 PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index); 580 PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file, 581 dex::StringIndex string_index); 582 583 // Add a new baker read barrier patch and return the label to be bound 584 // before the BNE instruction. 585 vixl::aarch32::Label* NewBakerReadBarrierPatch(uint32_t custom_data); 586 587 VIXLUInt32Literal* DeduplicateBootImageAddressLiteral(uint32_t address); 588 VIXLUInt32Literal* DeduplicateJitStringLiteral(const DexFile& dex_file, 589 dex::StringIndex string_index, 590 Handle<mirror::String> handle); 591 VIXLUInt32Literal* DeduplicateJitClassLiteral(const DexFile& dex_file, 592 dex::TypeIndex type_index, 593 Handle<mirror::Class> handle); 594 595 void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE; 596 597 void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE; 598 599 // Maybe add the reserved entrypoint register as a temporary for field load. This temp 600 // is added only for AOT compilation if link-time generated thunks for fields are enabled. 601 void MaybeAddBakerCcEntrypointTempForFields(LocationSummary* locations); 602 603 // Fast path implementation of ReadBarrier::Barrier for a heap 604 // reference field load when Baker's read barriers are used. 605 void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction, 606 Location ref, 607 vixl::aarch32::Register obj, 608 uint32_t offset, 609 Location temp, 610 bool needs_null_check); 611 // Fast path implementation of ReadBarrier::Barrier for a heap 612 // reference array load when Baker's read barriers are used. 613 void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction, 614 Location ref, 615 vixl::aarch32::Register obj, 616 uint32_t data_offset, 617 Location index, 618 Location temp, 619 bool needs_null_check); 620 // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier, 621 // GenerateArrayLoadWithBakerReadBarrier and some intrinsics. 622 // 623 // Load the object reference located at the address 624 // `obj + offset + (index << scale_factor)`, held by object `obj`, into 625 // `ref`, and mark it if needed. 626 void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction, 627 Location ref, 628 vixl::aarch32::Register obj, 629 uint32_t offset, 630 Location index, 631 ScaleFactor scale_factor, 632 Location temp, 633 bool needs_null_check); 634 635 // Generate code checking whether the the reference field at the 636 // address `obj + field_offset`, held by object `obj`, needs to be 637 // marked, and if so, marking it and updating the field within `obj` 638 // with the marked value. 639 // 640 // This routine is used for the implementation of the 641 // UnsafeCASObject intrinsic with Baker read barriers. 642 // 643 // This method has a structure similar to 644 // GenerateReferenceLoadWithBakerReadBarrier, but note that argument 645 // `ref` is only as a temporary here, and thus its value should not 646 // be used afterwards. 647 void UpdateReferenceFieldWithBakerReadBarrier(HInstruction* instruction, 648 Location ref, 649 vixl::aarch32::Register obj, 650 Location field_offset, 651 Location temp, 652 bool needs_null_check, 653 vixl::aarch32::Register temp2); 654 655 // Generate a heap reference load (with no read barrier). 656 void GenerateRawReferenceLoad(HInstruction* instruction, 657 Location ref, 658 vixl::aarch32::Register obj, 659 uint32_t offset, 660 Location index, 661 ScaleFactor scale_factor, 662 bool needs_null_check); 663 664 // Generate a read barrier for a heap reference within `instruction` 665 // using a slow path. 666 // 667 // A read barrier for an object reference read from the heap is 668 // implemented as a call to the artReadBarrierSlow runtime entry 669 // point, which is passed the values in locations `ref`, `obj`, and 670 // `offset`: 671 // 672 // mirror::Object* artReadBarrierSlow(mirror::Object* ref, 673 // mirror::Object* obj, 674 // uint32_t offset); 675 // 676 // The `out` location contains the value returned by 677 // artReadBarrierSlow. 678 // 679 // When `index` is provided (i.e. for array accesses), the offset 680 // value passed to artReadBarrierSlow is adjusted to take `index` 681 // into account. 682 void GenerateReadBarrierSlow(HInstruction* instruction, 683 Location out, 684 Location ref, 685 Location obj, 686 uint32_t offset, 687 Location index = Location::NoLocation()); 688 689 // If read barriers are enabled, generate a read barrier for a heap 690 // reference using a slow path. If heap poisoning is enabled, also 691 // unpoison the reference in `out`. 692 void MaybeGenerateReadBarrierSlow(HInstruction* instruction, 693 Location out, 694 Location ref, 695 Location obj, 696 uint32_t offset, 697 Location index = Location::NoLocation()); 698 699 // Generate a read barrier for a GC root within `instruction` using 700 // a slow path. 701 // 702 // A read barrier for an object reference GC root is implemented as 703 // a call to the artReadBarrierForRootSlow runtime entry point, 704 // which is passed the value in location `root`: 705 // 706 // mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root); 707 // 708 // The `out` location contains the value returned by 709 // artReadBarrierForRootSlow. 710 void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root); 711 712 void GenerateNop() OVERRIDE; 713 714 void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE; 715 void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE; 716 717 JumpTableARMVIXL* CreateJumpTable(HPackedSwitch* switch_instr) { 718 jump_tables_.emplace_back(new (GetGraph()->GetArena()) JumpTableARMVIXL(switch_instr)); 719 return jump_tables_.back().get(); 720 } 721 void EmitJumpTables(); 722 723 void EmitMovwMovtPlaceholder(CodeGeneratorARMVIXL::PcRelativePatchInfo* labels, 724 vixl::aarch32::Register out); 725 726 // `temp` is an extra temporary register that is used for some conditions; 727 // callers may not specify it, in which case the method will use a scratch 728 // register instead. 729 void GenerateConditionWithZero(IfCondition condition, 730 vixl::aarch32::Register out, 731 vixl::aarch32::Register in, 732 vixl::aarch32::Register temp = vixl32::Register()); 733 734 private: 735 vixl::aarch32::Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, 736 vixl::aarch32::Register temp); 737 738 using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, VIXLUInt32Literal*>; 739 using StringToLiteralMap = ArenaSafeMap<StringReference, 740 VIXLUInt32Literal*, 741 StringReferenceValueComparator>; 742 using TypeToLiteralMap = ArenaSafeMap<TypeReference, 743 VIXLUInt32Literal*, 744 TypeReferenceValueComparator>; 745 746 struct BakerReadBarrierPatchInfo { 747 explicit BakerReadBarrierPatchInfo(uint32_t data) : label(), custom_data(data) { } 748 749 vixl::aarch32::Label label; 750 uint32_t custom_data; 751 }; 752 753 VIXLUInt32Literal* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map); 754 PcRelativePatchInfo* NewPcRelativePatch(const DexFile& dex_file, 755 uint32_t offset_or_index, 756 ArenaDeque<PcRelativePatchInfo>* patches); 757 template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)> 758 static void EmitPcRelativeLinkerPatches(const ArenaDeque<PcRelativePatchInfo>& infos, 759 ArenaVector<LinkerPatch>* linker_patches); 760 761 // Labels for each block that will be compiled. 762 // We use a deque so that the `vixl::aarch32::Label` objects do not move in memory. 763 ArenaDeque<vixl::aarch32::Label> block_labels_; // Indexed by block id. 764 vixl::aarch32::Label frame_entry_label_; 765 766 ArenaVector<std::unique_ptr<JumpTableARMVIXL>> jump_tables_; 767 LocationsBuilderARMVIXL location_builder_; 768 InstructionCodeGeneratorARMVIXL instruction_visitor_; 769 ParallelMoveResolverARMVIXL move_resolver_; 770 771 ArmVIXLAssembler assembler_; 772 const ArmInstructionSetFeatures& isa_features_; 773 774 // Deduplication map for 32-bit literals, used for non-patchable boot image addresses. 775 Uint32ToLiteralMap uint32_literals_; 776 // PC-relative method patch info for kBootImageLinkTimePcRelative. 777 ArenaDeque<PcRelativePatchInfo> pc_relative_method_patches_; 778 // PC-relative method patch info for kBssEntry. 779 ArenaDeque<PcRelativePatchInfo> method_bss_entry_patches_; 780 // PC-relative type patch info for kBootImageLinkTimePcRelative. 781 ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_; 782 // PC-relative type patch info for kBssEntry. 783 ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_; 784 // PC-relative String patch info; type depends on configuration (app .bss or boot image PIC). 785 ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_; 786 // Baker read barrier patch info. 787 ArenaDeque<BakerReadBarrierPatchInfo> baker_read_barrier_patches_; 788 789 // Patches for string literals in JIT compiled code. 790 StringToLiteralMap jit_string_patches_; 791 // Patches for class literals in JIT compiled code. 792 TypeToLiteralMap jit_class_patches_; 793 794 DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARMVIXL); 795 }; 796 797 } // namespace arm 798 } // namespace art 799 800 #endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_VIXL_H_ 801