/art/compiler/utils/x86/ |
assembler_x86_test.cc | 110 GetAssembler()->movl(x86::EAX, x86::EBX); 116 GetAssembler()->movntl(x86::Address(x86::EDI, x86::EBX, x86::TIMES_4, 12), x86::EAX); 117 GetAssembler()->movntl(x86::Address(x86::EDI, 0), x86::EAX); 126 GetAssembler()->psrlq(x86::XMM0, CreateImmediate(32)); 132 GetAssembler()->punpckldq(x86::XMM0, x86::XMM1); 138 GetAssembler()->LoadLongConstant(x86::XMM0, 51); 148 GetAssembler()->LockCmpxchgl(x86::Address( 151 GetAssembler()->LockCmpxchgl(x86::Address( 154 GetAssembler()->LockCmpxchgl(x86::Address( 157 GetAssembler()->LockCmpxchgl(x86::Address [all...] |
/art/compiler/utils/x86_64/ |
assembler_x86_64_test.cc | 687 GetAssembler()->LockCmpxchgl(x86_64::Address( 690 GetAssembler()->LockCmpxchgl(x86_64::Address( 693 GetAssembler()->LockCmpxchgl(x86_64::Address( 696 GetAssembler()->LockCmpxchgl(x86_64::Address( 698 GetAssembler()->LockCmpxchgl(x86_64::Address( 712 GetAssembler()->LockCmpxchgq(x86_64::Address( 715 GetAssembler()->LockCmpxchgq(x86_64::Address( 718 GetAssembler()->LockCmpxchgq(x86_64::Address( 721 GetAssembler()->LockCmpxchgq(x86_64::Address( 723 GetAssembler()->LockCmpxchgq(x86_64::Address [all...] |
/art/compiler/optimizing/ |
intrinsics_mips64.cc | 38 Mips64Assembler* IntrinsicCodeGeneratorMIPS64::GetAssembler() { 39 return reinterpret_cast<Mips64Assembler*>(codegen_->GetAssembler()); 46 #define __ codegen->GetAssembler()-> 166 MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); 175 MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); 203 MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); 212 MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); 254 GenReverseBytes(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler()); 263 GenReverseBytes(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler()); 272 GenReverseBytes(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler()); [all...] |
intrinsics_arm.cc | 34 ArmAssembler* IntrinsicCodeGeneratorARM::GetAssembler() { 35 return codegen_->GetAssembler(); 116 MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); 119 MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); 130 MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); 133 MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); 179 GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler()); 191 GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler()); 228 GenNumberOfTrailingZeros(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler()); 240 GenNumberOfTrailingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler()); [all...] |
intrinsics_mips.h | 75 MipsAssembler* GetAssembler();
|
intrinsics_mips64.h | 71 Mips64Assembler* GetAssembler();
|
intrinsics_x86.h | 72 X86Assembler* GetAssembler();
|
intrinsics_x86_64.h | 72 X86_64Assembler* GetAssembler();
|
optimizing_cfi_test.cc | 58 code_gen_->GetAssembler()->cfi().SetEnabled(true); 97 Assembler* opt_asm = code_gen_->GetAssembler(); 175 #define __ down_cast<arm::Thumb2Assembler*>(GetCodeGenerator()->GetAssembler())-> 203 #define __ down_cast<mips::MipsAssembler*>(GetCodeGenerator()->GetAssembler())-> 231 #define __ down_cast<mips64::Mips64Assembler*>(GetCodeGenerator()->GetAssembler())->
|
intrinsics_arm.h | 78 ArmAssembler* GetAssembler();
|
intrinsics_utils.h | 51 Assembler* assembler = codegen->GetAssembler();
|
intrinsics_x86_64.cc | 43 X86_64Assembler* IntrinsicCodeGeneratorX86_64::GetAssembler() { 44 return down_cast<X86_64Assembler*>(codegen_->GetAssembler()); 118 MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); 121 MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); 132 MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); 135 MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); 174 GenReverseBytes(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler()); 182 GenReverseBytes(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler()); 190 GenReverseBytes(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler()); 233 MathAbsFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler(), codegen_) [all...] |
intrinsics_mips.cc | 38 MipsAssembler* IntrinsicCodeGeneratorMIPS::GetAssembler() { 39 return reinterpret_cast<MipsAssembler*>(codegen_->GetAssembler()); 58 #define __ codegen->GetAssembler()-> 182 MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); 191 MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); 224 MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); 233 MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); 411 GetAssembler()); 425 GetAssembler()); 439 GetAssembler()); [all...] |
intrinsics_x86.cc | 49 X86Assembler* IntrinsicCodeGeneratorX86::GetAssembler() { 50 return down_cast<X86Assembler*>(codegen_->GetAssembler()); 150 MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); 153 MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); 164 MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); 167 MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); 219 GenReverseBytes(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler()); 235 X86Assembler* assembler = GetAssembler(); 248 GenReverseBytes(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler()); 313 MathAbsFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler(), codegen_) [all...] |
code_generator_arm64.h | 209 Arm64Assembler* GetAssembler() const { return assembler_; } 210 vixl::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; } 328 Arm64Assembler* GetAssembler() const; 330 return GetAssembler()->vixl_masm_; 377 Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; } 378 const Arm64Assembler& GetAssembler() const OVERRIDE { return assembler_; } 379 vixl::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; } 655 inline Arm64Assembler* ParallelMoveResolverARM64::GetAssembler() const { 656 return codegen_->GetAssembler();
|
code_generator_mips.h | 145 MipsAssembler* GetAssembler() const; 218 MipsAssembler* GetAssembler() const { return assembler_; } 284 MipsAssembler* GetAssembler() OVERRIDE { return &assembler_; } 285 const MipsAssembler& GetAssembler() const OVERRIDE { return assembler_; }
|
code_generator_mips64.h | 145 Mips64Assembler* GetAssembler() const; 218 Mips64Assembler* GetAssembler() const { return assembler_; } 280 Mips64Assembler* GetAssembler() OVERRIDE { return &assembler_; } 281 const Mips64Assembler& GetAssembler() const OVERRIDE { return assembler_; }
|
code_generator.cc | 169 start_offset_ = codegen_.GetAssembler().CodeSize(); 177 instruction_, start_offset_, codegen_.GetAssembler().CodeSize()); 194 code_start = GetAssembler()->CodeSize(); 200 disasm_info_->AddSlowPathInterval(slow_path, code_start, GetAssembler()->CodeSize()); 215 size_t frame_start = GetAssembler()->CodeSize(); 217 DCHECK_EQ(GetAssembler()->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size_)); 219 disasm_info_->SetFrameEntryInterval(frame_start, GetAssembler()->CodeSize()); 260 size_t code_size = GetAssembler()->CodeSize(); 264 GetAssembler()->FinalizeInstructions(code); 741 uint32_t native_pc = GetAssembler()->CodeSize() [all...] |
intrinsics_arm64.cc | 60 return codegen_->GetAssembler()->vixl_masm_; 67 #define __ codegen->GetAssembler()->vixl_masm_-> 781 vixl::MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_; [all...] |
code_generator_arm.h | 142 ArmAssembler* GetAssembler() const; 210 ArmAssembler* GetAssembler() const { return assembler_; } 334 ArmAssembler* GetAssembler() OVERRIDE { 338 const ArmAssembler& GetAssembler() const OVERRIDE {
|
code_generator_x86.h | 135 X86Assembler* GetAssembler() const; 198 X86Assembler* GetAssembler() const { return assembler_; } 356 X86Assembler* GetAssembler() OVERRIDE { 360 const X86Assembler& GetAssembler() const OVERRIDE {
|
code_generator_x86_64.h | 139 X86_64Assembler* GetAssembler() const; 204 X86_64Assembler* GetAssembler() const { return assembler_; } 337 X86_64Assembler* GetAssembler() OVERRIDE { 341 const X86_64Assembler& GetAssembler() const OVERRIDE {
|
code_generator.h | 204 virtual Assembler* GetAssembler() = 0; 205 virtual const Assembler& GetAssembler() const = 0;
|
/art/compiler/utils/arm/ |
assembler_arm32_test.cc | 535 return std::bind(f, GetAssembler(), _1, _2); 540 return std::bind(f, GetAssembler(), _1, _2, _3); 546 return std::bind(f, GetAssembler(), _1, _2, _3, _4); 552 return std::bind(f, GetAssembler(), _1, _2, _3, _4, _5); 855 GetAssembler()->vmstat(); 863 GetAssembler()->ldrexd(arm::R0, arm::R1, arm::R0); 864 GetAssembler()->ldrexd(arm::R0, arm::R1, arm::R1); 865 GetAssembler()->ldrexd(arm::R0, arm::R1, arm::R2); 875 GetAssembler()->strexd(arm::R9, arm::R0, arm::R1, arm::R0); 876 GetAssembler()->strexd(arm::R9, arm::R0, arm::R1, arm::R1) [all...] |
assembler_arm_test.h | 153 (Base::GetAssembler()->*f)(*reg1, *reg2, i, j, c); 245 (Base::GetAssembler()->*f)(*reg1, *reg2, i, j, c); 308 (Base::GetAssembler()->*f)(*reg1, *reg2, c); 383 (Base::GetAssembler()->*f)(*reg1, *reg2, *reg3, c); 438 (Base::GetAssembler()->*f)(*reg, shift, c); 503 (Base::GetAssembler()->*f)(*reg1, *reg2, shift, c);
|