1 ; RUN: llc < %s -emulated-tls -mtriple=i386-linux-gnu -relocation-model=pic | FileCheck -check-prefix=X32 %s 2 ; RUN: llc < %s -emulated-tls -mtriple=x86_64-linux-gnu -relocation-model=pic | FileCheck -check-prefix=X64 %s 3 ; RUN: llc < %s -emulated-tls -mtriple=i386-linux-android -relocation-model=pic | FileCheck -check-prefix=X32 %s 4 ; RUN: llc < %s -emulated-tls -mtriple=x86_64-linux-android -relocation-model=pic | FileCheck -check-prefix=X64 %s 5 6 ; RUN: llc < %s -mtriple=i386-linux-gnu -relocation-model=pic | FileCheck -check-prefix=NoEMU %s 7 ; RUN: llc < %s -mtriple=x86_64-linux-gnu -relocation-model=pic | FileCheck -check-prefix=NoEMU %s 8 ; RUN: llc < %s -mtriple=i386-linux-android -relocation-model=pic | FileCheck -check-prefix=X32 %s 9 ; RUN: llc < %s -mtriple=x86_64-linux-android -relocation-model=pic | FileCheck -check-prefix=X64 %s 10 11 ; NoEMU-NOT: __emutls 12 13 ; Use my_emutls_get_address like __emutls_get_address. 14 @my_emutls_v_xyz = external global i8*, align 4 15 declare i8* @my_emutls_get_address(i8*) 16 17 define i32 @my_get_xyz() { 18 ; X32-LABEL: my_get_xyz: 19 ; X32: movl my_emutls_v_xyz@GOT(%ebx), %eax 20 ; X32-NEXT: movl %eax, (%esp) 21 ; X32-NEXT: calll my_emutls_get_address@PLT 22 ; X64-LABEL: my_get_xyz: 23 ; X64: movq my_emutls_v_xyz@GOTPCREL(%rip), %rdi 24 ; X64-NEXT: callq my_emutls_get_address@PLT 25 ; X64-NEXT: movl (%rax), %eax 26 27 entry: 28 %call = call i8* @my_emutls_get_address(i8* bitcast (i8** @my_emutls_v_xyz to i8*)) 29 %0 = bitcast i8* %call to i32* 30 %1 = load i32, i32* %0, align 4 31 ret i32 %1 32 } 33 34 @i = thread_local global i32 15 35 @j = internal thread_local global i32 42 36 @k = internal thread_local global i32 0, align 8 37 38 define i32 @f1() { 39 entry: 40 %tmp1 = load i32, i32* @i 41 ret i32 %tmp1 42 } 43 44 ; X32-LABEL: f1: 45 ; X32: movl __emutls_v.i@GOT(%ebx), %eax 46 ; X32-NEXT: movl %eax, (%esp) 47 ; X32-NEXT: calll __emutls_get_address@PLT 48 ; X64-LABEL: f1: 49 ; X64: movq __emutls_v.i@GOTPCREL(%rip), %rdi 50 ; X64-NEXT: callq __emutls_get_address@PLT 51 ; X64-NEXT: movl (%rax), %eax 52 53 @i2 = external thread_local global i32 54 55 define i32* @f2() { 56 entry: 57 ret i32* @i 58 } 59 60 ; X32-LABEL: f2: 61 ; X64-LABEL: f2: 62 63 64 define i32 @f3() { 65 entry: 66 %tmp1 = load i32, i32* @i ; <i32> [#uses=1] 67 ret i32 %tmp1 68 } 69 70 ; X32-LABEL: f3: 71 ; X64-LABEL: f3: 72 73 74 define i32* @f4() nounwind { 75 entry: 76 ret i32* @i 77 } 78 79 ; X32-LABEL: f4: 80 ; X64-LABEL: f4: 81 82 83 define i32 @f5() nounwind { 84 entry: 85 %0 = load i32, i32* @j, align 4 86 %1 = load i32, i32* @k, align 4 87 %add = add nsw i32 %0, %1 88 ret i32 %add 89 } 90 91 ; X32-LABEL: f5: 92 ; X32: leal __emutls_v.j@GOTOFF(%ebx), %eax 93 ; X32-NEXT: movl %eax, (%esp) 94 ; X32-NEXT: calll __emutls_get_address@PLT 95 ; X32-NEXT: movl (%eax), %esi 96 ; X32-NEXT: leal __emutls_v.k@GOTOFF(%ebx), %eax 97 ; X32-NEXT: movl %eax, (%esp) 98 ; X32-NEXT: calll __emutls_get_address@PLT 99 ; X32-NEXT: addl (%eax), %esi 100 ; X32-NEXT: movl %esi, %eax 101 102 ; X64-LABEL: f5: 103 ; X64: leaq __emutls_v.j(%rip), %rdi 104 ; X64-NEXT: callq __emutls_get_address@PLT 105 ; X64-NEXT: movl (%rax), %ebx 106 ; X64-NEXT: leaq __emutls_v.k(%rip), %rdi 107 ; X64-NEXT: callq __emutls_get_address@PLT 108 ; X64-NEXT: addl (%rax), %ebx 109 ; X64-NEXT: movl %ebx, %eax 110 111 ;;;;; 32-bit targets 112 113 ; X32: .data{{$}} 114 ; X32: .globl __emutls_v.i 115 ; X32-LABEL: __emutls_v.i: 116 ; X32-NEXT: .long 4 117 ; X32-NEXT: .long 4 118 ; X32-NEXT: .long 0 119 ; X32-NEXT: .long __emutls_t.i 120 121 ; X32: .section .rodata, 122 ; X32-LABEL: __emutls_t.i: 123 ; X32-NEXT: .long 15 124 125 ; X32: .data{{$}} 126 ; X32-NOT: .globl 127 ; X32-LABEL: __emutls_v.j: 128 ; X32-NEXT: .long 4 129 ; X32-NEXT: .long 4 130 ; X32-NEXT: .long 0 131 ; X32-NEXT: .long __emutls_t.j 132 133 ; X32: .section .rodata, 134 ; X32-LABEL: __emutls_t.j: 135 ; X32-NEXT: .long 42 136 137 ; X32: .data{{$}} 138 ; X32-NOT: .globl 139 ; X32-LABEL: __emutls_v.k: 140 ; X32-NEXT: .long 4 141 ; X32-NEXT: .long 8 142 ; X32-NEXT: .long 0 143 ; X32-NEXT: .long 0 144 145 ; X32-NOT: __emutls_t.k: 146 147 ;;;;; 64-bit targets 148 149 ; X64: .data{{$}} 150 ; X64: .globl __emutls_v.i 151 ; X64-LABEL: __emutls_v.i: 152 ; X64-NEXT: .quad 4 153 ; X64-NEXT: .quad 4 154 ; X64-NEXT: .quad 0 155 ; X64-NEXT: .quad __emutls_t.i 156 157 ; X64: .section .rodata, 158 ; X64-LABEL: __emutls_t.i: 159 ; X64-NEXT: .long 15 160 161 ; X64: .data{{$}} 162 ; X64-NOT: .globl 163 ; X64-LABEL: __emutls_v.j: 164 ; X64-NEXT: .quad 4 165 ; X64-NEXT: .quad 4 166 ; X64-NEXT: .quad 0 167 ; X64-NEXT: .quad __emutls_t.j 168 169 ; X64: .section .rodata, 170 ; X64-LABEL: __emutls_t.j: 171 ; X64-NEXT: .long 42 172 173 ; X64: .data{{$}} 174 ; X64-NOT: .globl 175 ; X64-LABEL: __emutls_v.k: 176 ; X64-NEXT: .quad 4 177 ; X64-NEXT: .quad 8 178 ; X64-NEXT: .quad 0 179 ; X64-NEXT: .quad 0 180 181 ; X64-NOT: __emutls_t.k: 182