/art/compiler/jni/quick/x86_64/ |
calling_convention_x86_64.cc | 105 // We spill the argument registers on X86 to free them up for scratch use, we then assume 114 ManagedRegisterSpill spill(in_reg, size, spill_offset); 115 entry_spills_.push_back(spill); 155 // Plus return value spill area size
|
/art/runtime/arch/arm/ |
jni_entrypoints_arm.S | 24 push {r0, r1, r2, r3, lr} @ spill regs
|
/art/runtime/interpreter/mterp/x86/ |
op_shl_long.S | 11 /* Need to spill rINST */
|
op_shr_long.S | 11 /* Need to spill rIBASE */
|
op_ushr_long.S | 11 /* Need to spill rIBASE */
|
/art/test/528-long-hint/src/ |
Main.java | 30 getUnsafe(); // spill offset
|
/external/llvm/lib/Target/AMDGPU/ |
SIMachineFunctionInfo.cpp | 155 struct SpilledReg Spill; 169 Spill.VGPR = LaneVGPRs[LaneVGPRIdx]; 170 Spill.Lane = Lane; 171 return Spill;
|
SIMachineFunctionInfo.h | 264 void setHasSpilledSGPRs(bool Spill = true) { 265 HasSpilledSGPRs = Spill; 272 void setHasSpilledVGPRs(bool Spill = true) { 273 HasSpilledVGPRs = Spill;
|
SIRegisterInfo.cpp | 109 // Reserve 1 SGPR for scratch wave offset in case we need to spill. 116 // to spill. 197 default: llvm_unreachable("Invalid spill opcode"); 271 // SGPR register spill 282 struct SIMachineFunctionInfo::SpilledReg Spill = 285 if (Spill.VGPR == AMDGPU::NoRegister) { 292 Spill.VGPR) 294 .addImm(Spill.Lane); 315 struct SIMachineFunctionInfo::SpilledReg Spill = 318 if (Spill.VGPR == AMDGPU::NoRegister) [all...] |
/external/llvm/test/CodeGen/AArch64/ |
arm64-platform-reg.ll | 22 ; CHECK-RESERVE-X18: Spill
|
/external/llvm/test/CodeGen/MIR/Mips/ |
memory-operands.mir | 40 - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, 72 - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, 74 - { id: 1, type: spill-slot, offset: -8, size: 4, alignment: 4, 76 - { id: 2, type: spill-slot, offset: -12, size: 4, alignment: 4,
|
/external/llvm/test/CodeGen/MIR/X86/ |
spill-slot-fixed-stack-object-aliased.mir | 22 - { id: 0, type: spill-slot, offset: 0, size: 4, isAliased: true }
|
spill-slot-fixed-stack-object-immutable.mir | 22 - { id: 0, type: spill-slot, offset: 0, size: 4, isImmutable: true }
|
/external/llvm/test/CodeGen/SPARC/ |
spillsize.ll | 7 ; Both must use 8-byte spill and fill instructions.
|
/external/llvm/test/CodeGen/X86/ |
2011-10-11-SpillDead.ll | 7 ; The call to @g forces a spill of that register.
|
pr23103.ll | 12 ; CHECK-NEXT: vmovsd %xmm0, {{.*}}(%rsp) {{.*#+}} 8-byte Spill
|
reghinting.ll | 4 ;; The registers %x and %y must both spill across the finit call.
|
win32-seh-catchpad-realign.ll | 50 ; Spill EBP 52 ; Spill ESP
|
/external/v8/src/compiler/ |
live-range-separator.h | 52 // because they would "spill in deferred blocks" anyway.
|
/toolchain/binutils/binutils-2.25/gas/testsuite/gas/ia64/ |
unwind-err.s | 29 .spill 0
|
/external/llvm/lib/CodeGen/ |
InlineSpiller.cpp | 54 static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden, 55 cl::desc("Disable inline spill hoisting")); 72 // Variables that are valid during spill(), but used by multiple methods. 78 // All registers to spill to StackSlot, including the main register. 92 // True when all reaching defs were reloads: No spill is necessary. 101 // The preferred register to spill. 152 void spill(LiveRangeEdit &) override; 203 // When spilling a virtual register, we also spill any snippets it is connected 209 // spill slots which can be important in tight loops. 234 // %Reg = COPY %snip / SPILL %snip, fi 1371 void InlineSpiller::spill(LiveRangeEdit &edit) { function in class:InlineSpiller [all...] |
RegisterScavenging.cpp | 13 // spill slots. 382 // have to spill. 392 // If we found an unused register there is no reason to spill it. 405 // We need to scavenge a register but have no spill slot, the target 414 // otherwise, use the emergency stack spill slot. 416 // Spill the scavenged register before I. 418 "Cannot scavenge register without an emergency spill slot!"); 440 DEBUG(dbgs() << "Scavenged register (with spill): " << TRI->getName(SReg) <<
|
/art/compiler/optimizing/ |
register_allocator.h | 106 // Allocate a spill slot for the given interval. Should be called in linear 110 // Allocate a spill slot for the given catch phi. Will allocate the same slot 198 // The spill slots allocated for live intervals. We ensure spill slots 207 // Spill slots allocated to catch phis. This category is special-cased because
|
/external/llvm/test/CodeGen/MIR/ARM/ |
cfi-same-value.mir | 25 - { id: 1, type: spill-slot, offset: -4, size: 4, alignment: 4, 27 - { id: 2, type: spill-slot, offset: -8, size: 4, alignment: 4,
|
/external/llvm/test/CodeGen/Mips/ |
stldst.ll | 36 ; 16: sw ${{[0-9]+}}, {{[0-9]+}} ( $sp ); # 4-byte Folded Spill 38 ; 16: sw ${{[0-9]+}}, {{[0-9]+}} ( $sp ); # 4-byte Folded Spill
|