/external/libunwind/tests/ |
ia64-test-rbs-asm.S | 40 #define SPILL(n) \ 123 SPILL(2); SPILL(3) 124 SPILL(4); SPILL(5); SPILL(6); SPILL(7) 125 SPILL(8); SPILL(9); SPILL(10); SPILL(11 [all...] |
/external/libunwind/src/hppa/ |
getcontext.S | 26 #define SPILL(n) stw %r##n, (LINUX_UC_MCONTEXT_OFF+LINUX_SC_GR_OFF+4*(n))(%r26) 36 SPILL (2) /* return-pointer */ 37 SPILL (3) /* frame pointer */ 38 SPILL (4) /* 2nd-ary frame pointer */ 39 SPILL (5) /* preserved register */ 40 SPILL (6) /* preserved register */ 41 SPILL (7) /* preserved register */ 42 SPILL (8) /* preserved register */ 43 SPILL (9) /* preserved register */ 44 SPILL (10) /* preserved register * [all...] |
/external/libunwind/src/ia64/ |
getcontext.S | 51 st8.spill [r2] = r1, (SC_FLAGS - GR(1)) // M3 66 st8.spill [r2] = r12, (GR(4) - GR(12)) // M3 70 stf.spill [r3] = f2 // M2 71 stf.spill [r8] = f16 // M3 76 stf.spill [r9] = f24, (FR(31) - FR(24)) // M2 80 stf.spill [r9] = f31 // M2 81 st8.spill [r2] = r4, (GR(5) - GR(4)) // M3, bank 1 85 .mem.offset 0,0; st8.spill [r2] = r5, (GR(6) - GR(5)) // M4, bank 0 86 .mem.offset 8,0; st8.spill [r3] = r7, (BR(0) - GR(7)) // M3, bank 0 90 st8.spill [r2] = r6, (BR(1) - GR(6)) // M2, bank [all...] |
/external/oprofile/module/ia64/ |
IA64minstate.h | 192 st8.spill [r17]=rR1, 16; /* save original r1 */ \ 194 .mem.offset 0, 0; st8.spill [r16]=r2, 16; \ 195 .mem.offset 8, 0; st8.spill [r17]=r3, 16; \ 198 .mem.offset 0, 0; st8.spill [r16]=r12, 16; \ 199 .mem.offset 8, 0; st8.spill [r17]=r13, 16; \ 203 .mem.offset 0, 0; st8.spill [r16]=r14, 16; \ 204 .mem.offset 8, 0; st8.spill [r17]=r15, 16; \ 207 .mem.offset 0, 0; st8.spill [r16]=r8, 16; \ 208 .mem.offset 8, 0; st8.spill [r17]=r9, 16; \ 212 .mem.offset 0, 0; st8.spill [r16]=r10, 16; [all...] |
/external/llvm/include/llvm/CodeGen/ |
CalcSpillWeights.h | 24 /// \brief Normalize the spill weight of a live interval 26 /// The spill weight of a live interval is computed as: 37 // intervals have a spill weight that is mostly proportional to the number 38 // of uses, while large intervals get a spill weight that is closer to a use 44 /// spill weight and allocation hint. 64 /// \brief (re)compute li's spill weight and allocation hint. 68 /// \brief Compute spill weights and allocation hints for all virtual register
|
/external/llvm/lib/CodeGen/ |
Spiller.h | 22 /// Implementations are utility classes which insert spill or remat code on 29 /// spill - Spill the LRE.getParent() live interval. 30 virtual void spill(LiveRangeEdit &LRE) = 0; 39 /// Create and return a spiller that will insert spill code directly instead
|
RegAllocBasic.cpp | 58 /// algorithm. It prioritizes live virtual registers by spill weight and spills 164 // Spill or split all live virtual registers currently unified under PhysReg 190 // Spill each interfering vreg allocated to PhysReg or an alias. 192 LiveInterval &Spill = *Intfs[i]; 195 if (!VRM->hasPhys(Spill.reg)) 200 Matrix->unassign(Spill); 202 // Spill the extracted interval. 203 LiveRangeEdit LRE(&Spill, SplitVRegs, *MF, *LIS, VRM); 204 spiller().spill(LRE); 223 // Populate a list of physical register spill candidates [all...] |
SpillPlacement.h | 1 //===-- SpillPlacement.h - Optimal Spill Code Placement --------*- C++ -*--===// 10 // This analysis computes the optimal spill code placement between basic blocks. 21 // The returned bit vector can be used to place optimal spill code at basic 22 // block entries and exits. Spill code placement inside a basic block is not 89 /// the stack can be live-out on the stack without inserting a spill. 93 /// prepare - Reset state and prepare for a new spill placement computation. 112 /// @param Blocks Array of block numbers that prefer to spill in and out. 134 /// finish - Compute the optimal spill code placement given the
|
Spiller.cpp | 72 /// Add spill ranges for every use/def of the live interval, inserting loads 81 "Attempting to spill already spilled value."); 84 "Trying to spill a stack slot."); 86 DEBUG(dbgs() << "Trivial spill everywhere of reg" << li->reg << "\n"); 157 /// Spills any live range using the spill-everywhere method with no attempt at 166 void spill(LiveRangeEdit &LRE) override {
|
StackSlotColoring.cpp | 54 // SSIntervals - Spill slot intervals. 57 // SSRefs - Keep a list of MachineMemOperands for each spill slot. 69 // AllColors - If index is set, it's a spill slot, i.e. color. 70 // FIXME: This assumes PEI locate spill slot with smaller indices 137 /// ScanForSpillSlotRefs - Scan all the machine instructions for spill slot 138 /// references and update spill slot weights. 177 /// InitializeSlots - Process all spill stack slot liveintervals and add them 187 // Gather all spill slots into a list. 188 DEBUG(dbgs() << "Spill slot intervals:\n"); 243 assert(NextColor != -1 && "No more spill slots?") [all...] |
/external/llvm/test/CodeGen/ARM/ |
varargs-spill-stack-align-nacl.ll | 9 ; stack. A varargs function must therefore spill rN-r3 just below the 12 ; This test checks for a bug in which a gap was left between the spill
|
/external/llvm/test/CodeGen/X86/ |
2013-10-14-FastISel-incorrect-vreg.ll | 9 ; In this example, this is illustrated by a the spill/reload of the 12 ; Before this patch, the compiler was accessing two different spill 19 ; Spill %arg2. 21 ; Spill %loaded_ptr. 58 ; Spill %arg2. 60 ; Spill %loaded_ptr. 97 ; Spill %arg2. 99 ; Spill %loaded_ptr.
|
sse-intel-ocl.ll | 73 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill 74 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill 75 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill 76 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill 77 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill 78 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill 79 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill 80 ; NOT_WIN: movaps {{%xmm([8-9]|1[0-5])}}, {{.*(%rsp).*}} ## 16-byte Spill
|
avx-intel-ocl.ll | 69 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 70 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 71 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 72 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 73 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 74 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 75 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 76 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 77 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill 78 ; WIN64: vmovaps {{%ymm([6-9]|1[0-5])}}, {{.*(%rbp).*}} # 32-byte Spill [all...] |
unaligned-spill-folding.ll | 33 ; We can't fold the spill into a padd unless the stack is aligned. Just spilling 37 ; UNALIGNED: movdqu {{.*}} # 16-byte Folded Spill 42 ; ALIGNED: movdqa {{.*}} # 16-byte Spill 47 ; FORCEALIGNED: movdqa {{.*}} # 16-byte Spill
|
/external/llvm/test/CodeGen/AArch64/ |
arm64-neon-vector-list-spill.ll | 3 ; FIXME: We should not generate ld/st for such register spill/fill, because the 5 ; spill/fill algorithm is optimized, this test case may not be triggered. And 7 define i32 @spill.DPairReg(i32* %arg1, i32 %arg2) { 8 ; CHECK-LABEL: spill.DPairReg: 27 define i16 @spill.DTripleReg(i16* %arg1, i32 %arg2) { 28 ; CHECK-LABEL: spill.DTripleReg: 47 define i16 @spill.DQuadReg(i16* %arg1, i32 %arg2) { 48 ; CHECK-LABEL: spill.DQuadReg: 67 define i32 @spill.QPairReg(i32* %arg1, i32 %arg2) { 68 ; CHECK-LABEL: spill.QPairReg [all...] |
arm64-tls-dynamic-together.ll | 4 ; glue) then LLVM will separate them quite happily (with a spill at O0, hence
|
/art/compiler/utils/ |
managed_register.h | 115 ManagedRegisterSpill spill(__x); 116 std::vector<ManagedRegisterSpill>::push_back(spill); 120 ManagedRegisterSpill spill(__x, __size); 121 std::vector<ManagedRegisterSpill>::push_back(spill);
|
/external/llvm/test/CodeGen/PowerPC/ |
vrspill.ll | 4 ; This verifies that we generate correct spill/reload code for vector regs.
|
/external/llvm/test/CodeGen/SystemZ/Large/ |
spill-02.py | 1 # Test cases where we spill from one frame index to another, both of which 2 # are out of range of MVC, and both of which need emergency spill slots. 17 # Arrange for %foo's spill slot to be at 8184(%r15) and the alloca area to be at 18 # 8192(%r15). The two emergency spill slots live below that, so this requires
|
spill-01.py | 1 # Test cases where MVC is used for spill slots that end up being out of range. 6 # call frame, and a further 8 bytes are needed for the emergency spill slot.
|
/external/llvm/test/MC/AArch64/ |
elf-extern.s | 16 str x30, [sp, #8] // 8-byte Folded Spill
|
/art/runtime/arch/arm64/ |
jni_entrypoints_arm64.S | 26 // spill regs. 41 // load spill regs.
|
/external/valgrind/main/VEX/priv/ |
host_generic_reg_alloc2.c | 51 providing we can arrange for the dst to have the same spill slot. 66 /* The "home" spill slot, if needed. Never changes. */ 103 spill. */ 106 rreg has the same value as the spill slot for the associated 108 spill store or reload for this rreg. */ 164 sequence. Point is to select a virtual register to spill, by 170 caller to arbitrarily restrict the set of spill candidates to be 174 spill, or -1 if none was found. */ 207 /* Check that this vreg has been assigned a sane spill offset. */ 327 /* Return one, or, if we're unlucky, two insn(s) to spill/restore [all...] |
/external/llvm/test/CodeGen/Thumb2/ |
aligned-spill.ll | 7 ; This function is forced to spill a double. 8 ; Verify that the spill slot is properly aligned. 33 ; Since the spill slot is only 8 bytes, technically it would be fine to only 47 ; Spill 7 d-registers. 71 ; Spill 7 d-registers, leave a hole.
|