/external/libunwind/tests/ |
ia64-test-rbs-asm.S | 40 #define SPILL(n) \ 123 SPILL(2); SPILL(3) 124 SPILL(4); SPILL(5); SPILL(6); SPILL(7) 125 SPILL(8); SPILL(9); SPILL(10); SPILL(11 [all...] |
/external/libunwind/src/hppa/ |
getcontext.S | 26 #define SPILL(n) stw %r##n, (LINUX_UC_MCONTEXT_OFF+LINUX_SC_GR_OFF+4*(n))(%r26) 36 SPILL (2) /* return-pointer */ 37 SPILL (3) /* frame pointer */ 38 SPILL (4) /* 2nd-ary frame pointer */ 39 SPILL (5) /* preserved register */ 40 SPILL (6) /* preserved register */ 41 SPILL (7) /* preserved register */ 42 SPILL (8) /* preserved register */ 43 SPILL (9) /* preserved register */ 44 SPILL (10) /* preserved register * [all...] |
/external/llvm/lib/CodeGen/ |
Spiller.h | 22 /// Implementations are utility classes which insert spill or remat code on 29 /// spill - Spill the LRE.getParent() live interval. 30 virtual void spill(LiveRangeEdit &LRE) = 0; 34 /// Create and return a spiller that will insert spill code directly instead
|
RegAllocBasic.cpp | 57 /// algorithm. It prioritizes live virtual registers by spill weight and spills 163 // Spill or split all live virtual registers currently unified under PhysReg 189 // Spill each interfering vreg allocated to PhysReg or an alias. 191 LiveInterval &Spill = *Intfs[i]; 194 if (!VRM->hasPhys(Spill.reg)) 199 Matrix->unassign(Spill); 201 // Spill the extracted interval. 202 LiveRangeEdit LRE(&Spill, SplitVRegs, *MF, *LIS, VRM); 203 spiller().spill(LRE); 222 // Populate a list of physical register spill candidates [all...] |
SpillPlacement.h | 1 //===-- SpillPlacement.h - Optimal Spill Code Placement --------*- C++ -*--===// 10 // This analysis computes the optimal spill code placement between basic blocks. 21 // The returned bit vector can be used to place optimal spill code at basic 22 // block entries and exits. Spill code placement inside a basic block is not 93 /// the stack can be live-out on the stack without inserting a spill. 97 /// prepare - Reset state and prepare for a new spill placement computation. 116 /// @param Blocks Array of block numbers that prefer to spill in and out. 138 /// finish - Compute the optimal spill code placement given the
|
StackSlotColoring.cpp | 54 // SSIntervals - Spill slot intervals. 57 // SSRefs - Keep a list of MachineMemOperands for each spill slot. 69 // AllColors - If index is set, it's a spill slot, i.e. color. 70 // FIXME: This assumes PEI locate spill slot with smaller indices 137 /// ScanForSpillSlotRefs - Scan all the machine instructions for spill slot 138 /// references and update spill slot weights. 177 /// InitializeSlots - Process all spill stack slot liveintervals and add them 195 // Gather all spill slots into a list. 196 DEBUG(dbgs() << "Spill slot intervals:\n"); 251 assert(NextColor != -1 && "No more spill slots?") [all...] |
/external/libunwind/src/ia64/ |
getcontext.S | 51 st8.spill [r2] = r1, (SC_FLAGS - GR(1)) // M3 66 st8.spill [r2] = r12, (GR(4) - GR(12)) // M3 70 stf.spill [r3] = f2 // M2 71 stf.spill [r8] = f16 // M3 76 stf.spill [r9] = f24, (FR(31) - FR(24)) // M2 80 stf.spill [r9] = f31 // M2 81 st8.spill [r2] = r4, (GR(5) - GR(4)) // M3, bank 1 85 .mem.offset 0,0; st8.spill [r2] = r5, (GR(6) - GR(5)) // M4, bank 0 86 .mem.offset 8,0; st8.spill [r3] = r7, (BR(0) - GR(7)) // M3, bank 0 90 st8.spill [r2] = r6, (BR(1) - GR(6)) // M2, bank [all...] |
/external/llvm/include/llvm/CodeGen/ |
CalcSpillWeights.h | 24 /// \brief Normalize the spill weight of a live interval 26 /// The spill weight of a live interval is computed as: 39 // intervals have a spill weight that is mostly proportional to the number 40 // of uses, while large intervals get a spill weight that is closer to a use 46 /// spill weight and allocation hint. 66 /// \brief (re)compute li's spill weight and allocation hint. 70 /// \brief Compute spill weights and allocation hints for all virtual register
|
/external/llvm/lib/Target/R600/ |
SIMachineFunctionInfo.cpp | 51 struct SpilledReg Spill; 66 Spill.VGPR = LaneVGPRs[LaneVGPRIdx]; 67 Spill.Lane = Lane; 68 return Spill;
|
/external/llvm/test/CodeGen/ARM/ |
varargs-spill-stack-align-nacl.ll | 9 ; stack. A varargs function must therefore spill rN-r3 just below the 12 ; This test checks for a bug in which a gap was left between the spill
|
/external/llvm/test/CodeGen/X86/ |
2013-10-14-FastISel-incorrect-vreg.ll | 9 ; In this example, this is illustrated by a spill/reload of the 12 ; Before this patch, the compiler was accessing two different spill 19 ; Spill %arg2. 21 ; Spill %loaded_ptr. 58 ; Spill %arg2. 60 ; Spill %loaded_ptr. 97 ; Spill %arg2. 99 ; Spill %loaded_ptr.
|
avx512-intel-ocl.ll | 64 ; WIN64: vmovups %zmm21, {{.*(%rbp).*}} # 64-byte Spill 65 ; WIN64: vmovups %zmm6, {{.*(%rbp).*}} # 64-byte Spill 71 ; X64: kmovw %k7, {{.*}}(%rsp) ## 8-byte Folded Spill 72 ; X64: kmovw %k6, {{.*}}(%rsp) ## 8-byte Folded Spill 73 ; X64: kmovw %k5, {{.*}}(%rsp) ## 8-byte Folded Spill 74 ; X64: kmovw %k4, {{.*}}(%rsp) ## 8-byte Folded Spill 75 ; X64: vmovups %zmm31, {{.*}}(%rsp) ## 64-byte Spill 76 ; X64: vmovups %zmm16, {{.*}}(%rsp) ## 64-byte Spill
|
sse-intel-ocl.ll | 73 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill 74 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill 75 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill 76 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill 77 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill 78 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill 79 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill 80 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill
|
avx-intel-ocl.ll | 69 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 70 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 71 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 72 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 73 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 74 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 75 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 76 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 77 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 78 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill [all...] |
unaligned-spill-folding.ll | 33 ; We can't fold the spill into a padd unless the stack is aligned. Just spilling 37 ; UNALIGNED: movdqu {{.*}} # 16-byte Folded Spill 42 ; ALIGNED: movdqa {{.*}} # 16-byte Spill 47 ; FORCEALIGNED: movdqa {{.*}} # 16-byte Spill
|
/external/llvm/test/CodeGen/AArch64/ |
arm64-neon-vector-list-spill.ll | 3 ; FIXME: We should not generate ld/st for such register spill/fill, because the 5 ; spill/fill algorithm is optimized, this test case may not be triggered. And 7 define i32 @spill.DPairReg(i32* %arg1, i32 %arg2) { 8 ; CHECK-LABEL: spill.DPairReg: 27 define i16 @spill.DTripleReg(i16* %arg1, i32 %arg2) { 28 ; CHECK-LABEL: spill.DTripleReg: 47 define i16 @spill.DQuadReg(i16* %arg1, i32 %arg2) { 48 ; CHECK-LABEL: spill.DQuadReg: 67 define i32 @spill.QPairReg(i32* %arg1, i32 %arg2) { 68 ; CHECK-LABEL: spill.QPairReg [all...] |
arm64-tls-dynamic-together.ll | 4 ; glue) then LLVM will separate them quite happily (with a spill at O0, hence
|
/art/compiler/utils/ |
managed_register.h | 119 ManagedRegisterSpill spill(__x); 120 std::vector<ManagedRegisterSpill>::push_back(spill); 124 ManagedRegisterSpill spill(__x, __size); 125 std::vector<ManagedRegisterSpill>::push_back(spill);
|
/external/llvm/test/CodeGen/SystemZ/Large/ |
spill-02.py | 1 # Test cases where we spill from one frame index to another, both of which 2 # are out of range of MVC, and both of which need emergency spill slots. 17 # Arrange for %foo's spill slot to be at 8184(%r15) and the alloca area to be at 18 # 8192(%r15). The two emergency spill slots live below that, so this requires
|
spill-01.py | 1 # Test cases where MVC is used for spill slots that end up being out of range. 6 # call frame, and a further 8 bytes are needed for the emergency spill slot.
|
/external/llvm/test/MC/AArch64/ |
elf-extern.s | 16 str x30, [sp, #8] // 8-byte Folded Spill
|
/art/runtime/arch/arm64/ |
jni_entrypoints_arm64.S | 26 // spill regs. 41 // load spill regs.
|
/external/valgrind/VEX/priv/ |
host_generic_reg_alloc2.c | 51 providing we can arrange for the dst to have the same spill slot. 66 /* The "home" spill slot, if needed. Never changes. */ 105 spill. */ 108 rreg has the same value as the spill slot for the associated 110 spill store or reload for this rreg. */ 148 sequence. Point is to select a virtual register to spill, by 154 caller to arbitrarily restrict the set of spill candidates to be 162 spill, or -1 if none was found. */ 193 /* Check that this vreg has been assigned a sane spill offset. */ 375 /* Return one, or, if we're unlucky, two insn(s) to spill/restore [all...] |
/art/compiler/jni/quick/x86/ |
calling_convention_x86.cc | 126 // We spill the argument registers on X86 to free them up for scratch use, we then assume 136 ManagedRegisterSpill spill(in_reg, size, spill_offset); 137 entry_spills_.push_back(spill); 142 // We have to spill the second half of the long. 187 // Plus return value spill area size
|
/external/llvm/test/CodeGen/Thumb2/ |
aligned-spill.ll | 7 ; This function is forced to spill a double. 8 ; Verify that the spill slot is properly aligned. 33 ; Since the spill slot is only 8 bytes, technically it would be fine to only 47 ; Spill 7 d-registers. 71 ; Spill 7 d-registers, leave a hole.
|