Home | History | Annotate | Download | only in X86
      1 ; RUN: llc < %s -enable-tail-merge=0 -mtriple=x86_64-linux | FileCheck %s --check-prefix=LINUX
      2 ; RUN: llc < %s -enable-tail-merge=0 -mtriple=x86_64-linux-gnux32 | FileCheck %s --check-prefix=LINUX-X32
      3 ; RUN: llc < %s -enable-tail-merge=0 -mtriple=x86_64-windows | FileCheck %s --check-prefix=WINDOWS
      4 ; RUN: llc < %s -enable-tail-merge=0 -mtriple=i686-windows | FileCheck %s --check-prefix=X86
      5 
      6 ; Test that we actually spill and reload all arguments in the variadic argument
      7 ; pack. Doing a normal call will clobber all argument registers, and we will
      8 ; spill around it. A simple adjustment should not require any XMM spills.
      9 
     10 declare void @llvm.va_start(i8*) nounwind
     11 
     12 declare void(i8*, ...)* @get_f(i8* %this)
     13 
     14 define void @f_thunk(i8* %this, ...) {
     15   ; Use va_start so that we exercise the combination.
     16   %ap = alloca [4 x i8*], align 16
     17   %ap_i8 = bitcast [4 x i8*]* %ap to i8*
     18   call void @llvm.va_start(i8* %ap_i8)
     19 
     20   %fptr = call void(i8*, ...)*(i8*) @get_f(i8* %this)
     21   musttail call void (i8*, ...) %fptr(i8* %this, ...)
     22   ret void
     23 }
     24 
     25 ; Save and restore 6 GPRs, 8 XMMs, and AL around the call.
     26 
     27 ; LINUX-LABEL: f_thunk:
     28 ; LINUX-DAG: movq %rdi, {{.*}}
     29 ; LINUX-DAG: movq %rsi, {{.*}}
     30 ; LINUX-DAG: movq %rdx, {{.*}}
     31 ; LINUX-DAG: movq %rcx, {{.*}}
     32 ; LINUX-DAG: movq %r8, {{.*}}
     33 ; LINUX-DAG: movq %r9, {{.*}}
     34 ; LINUX-DAG: movb %al, {{.*}}
     35 ; LINUX-DAG: movaps %xmm0, {{[0-9]*}}(%rsp)
     36 ; LINUX-DAG: movaps %xmm1, {{[0-9]*}}(%rsp)
     37 ; LINUX-DAG: movaps %xmm2, {{[0-9]*}}(%rsp)
     38 ; LINUX-DAG: movaps %xmm3, {{[0-9]*}}(%rsp)
     39 ; LINUX-DAG: movaps %xmm4, {{[0-9]*}}(%rsp)
     40 ; LINUX-DAG: movaps %xmm5, {{[0-9]*}}(%rsp)
     41 ; LINUX-DAG: movaps %xmm6, {{[0-9]*}}(%rsp)
     42 ; LINUX-DAG: movaps %xmm7, {{[0-9]*}}(%rsp)
     43 ; LINUX: callq get_f
     44 ; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm0
     45 ; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm1
     46 ; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm2
     47 ; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm3
     48 ; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm4
     49 ; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm5
     50 ; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm6
     51 ; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm7
     52 ; LINUX-DAG: movq {{.*}}, %rdi
     53 ; LINUX-DAG: movq {{.*}}, %rsi
     54 ; LINUX-DAG: movq {{.*}}, %rdx
     55 ; LINUX-DAG: movq {{.*}}, %rcx
     56 ; LINUX-DAG: movq {{.*}}, %r8
     57 ; LINUX-DAG: movq {{.*}}, %r9
     58 ; LINUX-DAG: movb {{.*}}, %al
     59 ; LINUX: jmpq *{{.*}}  # TAILCALL
     60 
     61 ; LINUX-X32-LABEL: f_thunk:
     62 ; LINUX-X32-DAG: movl %edi, {{.*}}
     63 ; LINUX-X32-DAG: movq %rsi, {{.*}}
     64 ; LINUX-X32-DAG: movq %rdx, {{.*}}
     65 ; LINUX-X32-DAG: movq %rcx, {{.*}}
     66 ; LINUX-X32-DAG: movq %r8, {{.*}}
     67 ; LINUX-X32-DAG: movq %r9, {{.*}}
     68 ; LINUX-X32-DAG: movb %al, {{.*}}
     69 ; LINUX-X32-DAG: movaps %xmm0, {{[0-9]*}}(%esp)
     70 ; LINUX-X32-DAG: movaps %xmm1, {{[0-9]*}}(%esp)
     71 ; LINUX-X32-DAG: movaps %xmm2, {{[0-9]*}}(%esp)
     72 ; LINUX-X32-DAG: movaps %xmm3, {{[0-9]*}}(%esp)
     73 ; LINUX-X32-DAG: movaps %xmm4, {{[0-9]*}}(%esp)
     74 ; LINUX-X32-DAG: movaps %xmm5, {{[0-9]*}}(%esp)
     75 ; LINUX-X32-DAG: movaps %xmm6, {{[0-9]*}}(%esp)
     76 ; LINUX-X32-DAG: movaps %xmm7, {{[0-9]*}}(%esp)
     77 ; LINUX-X32: callq get_f
     78 ; LINUX-X32-DAG: movaps {{[0-9]*}}(%esp), %xmm0
     79 ; LINUX-X32-DAG: movaps {{[0-9]*}}(%esp), %xmm1
     80 ; LINUX-X32-DAG: movaps {{[0-9]*}}(%esp), %xmm2
     81 ; LINUX-X32-DAG: movaps {{[0-9]*}}(%esp), %xmm3
     82 ; LINUX-X32-DAG: movaps {{[0-9]*}}(%esp), %xmm4
     83 ; LINUX-X32-DAG: movaps {{[0-9]*}}(%esp), %xmm5
     84 ; LINUX-X32-DAG: movaps {{[0-9]*}}(%esp), %xmm6
     85 ; LINUX-X32-DAG: movaps {{[0-9]*}}(%esp), %xmm7
     86 ; LINUX-X32-DAG: movl {{.*}}, %edi
     87 ; LINUX-X32-DAG: movq {{.*}}, %rsi
     88 ; LINUX-X32-DAG: movq {{.*}}, %rdx
     89 ; LINUX-X32-DAG: movq {{.*}}, %rcx
     90 ; LINUX-X32-DAG: movq {{.*}}, %r8
     91 ; LINUX-X32-DAG: movq {{.*}}, %r9
     92 ; LINUX-X32-DAG: movb {{.*}}, %al
     93 ; LINUX-X32: jmpq *{{.*}}  # TAILCALL
     94 
     95 ; WINDOWS-LABEL: f_thunk:
     96 ; WINDOWS-NOT: mov{{.}}ps
     97 ; WINDOWS-DAG: movq %rdx, {{.*}}
     98 ; WINDOWS-DAG: movq %rcx, {{.*}}
     99 ; WINDOWS-DAG: movq %r8, {{.*}}
    100 ; WINDOWS-DAG: movq %r9, {{.*}}
    101 ; WINDOWS-NOT: mov{{.}}ps
    102 ; WINDOWS: callq get_f
    103 ; WINDOWS-NOT: mov{{.}}ps
    104 ; WINDOWS-DAG: movq {{.*}}, %rdx
    105 ; WINDOWS-DAG: movq {{.*}}, %rcx
    106 ; WINDOWS-DAG: movq {{.*}}, %r8
    107 ; WINDOWS-DAG: movq {{.*}}, %r9
    108 ; WINDOWS-NOT: mov{{.}}ps
    109 ; WINDOWS: jmpq *{{.*}} # TAILCALL
    110 
    111 ; No regparms on normal x86 conventions.
    112 
    113 ; X86-LABEL: _f_thunk:
    114 ; X86: calll _get_f
    115 ; X86: jmpl *{{.*}} # TAILCALL
    116 
    117 ; This thunk shouldn't require any spills and reloads, assuming the register
    118 ; allocator knows what it's doing.
    119 
    120 define void @g_thunk(i8* %fptr_i8, ...) {
    121   %fptr = bitcast i8* %fptr_i8 to void (i8*, ...)*
    122   musttail call void (i8*, ...) %fptr(i8* %fptr_i8, ...)
    123   ret void
    124 }
    125 
    126 ; LINUX-LABEL: g_thunk:
    127 ; LINUX-NOT: movq
    128 ; LINUX: jmpq *%rdi  # TAILCALL
    129 
    130 ; LINUX-X32-LABEL: g_thunk:
    131 ; LINUX-X32-DAG: movl %edi, %[[REG:e[abcd]x|ebp|esi|edi|r8|r9|r1[0-5]]]
    132 ; LINUX-X32-DAG: jmpq *%[[REG]]  # TAILCALL
    133 
    134 ; WINDOWS-LABEL: g_thunk:
    135 ; WINDOWS-NOT: movq
    136 ; WINDOWS: jmpq *%rcx # TAILCALL
    137 
    138 ; X86-LABEL: _g_thunk:
    139 ; X86: jmpl *%eax # TAILCALL
    140 
    141 ; Do a simple multi-exit multi-bb test.
    142 
    143 %struct.Foo = type { i1, i8*, i8* }
    144 
    145 @g = external global i32
    146 
    147 define void @h_thunk(%struct.Foo* %this, ...) {
    148   %cond_p = getelementptr %struct.Foo, %struct.Foo* %this, i32 0, i32 0
    149   %cond = load i1, i1* %cond_p
    150   br i1 %cond, label %then, label %else
    151 
    152 then:
    153   %a_p = getelementptr %struct.Foo, %struct.Foo* %this, i32 0, i32 1
    154   %a_i8 = load i8*, i8** %a_p
    155   %a = bitcast i8* %a_i8 to void (%struct.Foo*, ...)*
    156   musttail call void (%struct.Foo*, ...) %a(%struct.Foo* %this, ...)
    157   ret void
    158 
    159 else:
    160   %b_p = getelementptr %struct.Foo, %struct.Foo* %this, i32 0, i32 2
    161   %b_i8 = load i8*, i8** %b_p
    162   %b = bitcast i8* %b_i8 to void (%struct.Foo*, ...)*
    163   store i32 42, i32* @g
    164   musttail call void (%struct.Foo*, ...) %b(%struct.Foo* %this, ...)
    165   ret void
    166 }
    167 
    168 ; LINUX-LABEL: h_thunk:
    169 ; LINUX: jne
    170 ; LINUX: jmpq *{{.*}} # TAILCALL
    171 ; LINUX: jmpq *{{.*}} # TAILCALL
    172 ; LINUX-X32-LABEL: h_thunk:
    173 ; LINUX-X32: jne
    174 ; LINUX-X32: jmpq *{{.*}} # TAILCALL
    175 ; LINUX-X32: jmpq *{{.*}} # TAILCALL
    176 ; WINDOWS-LABEL: h_thunk:
    177 ; WINDOWS: jne
    178 ; WINDOWS: jmpq *{{.*}} # TAILCALL
    179 ; WINDOWS: jmpq *{{.*}} # TAILCALL
    180 ; X86-LABEL: _h_thunk:
    181 ; X86: jne
    182 ; X86: jmpl *{{.*}} # TAILCALL
    183 ; X86: jmpl *{{.*}} # TAILCALL
    184