Home | History | Annotate | Download | only in X86
      1 ; RUN: llc < %s -mattr=-avx -fast-isel -O0 -regalloc=fast -asm-verbose=0 -fast-isel-abort | FileCheck %s
      2 ; RUN: llc < %s -mattr=+avx -fast-isel -O0 -regalloc=fast -asm-verbose=0 -fast-isel-abort | FileCheck %s --check-prefix=AVX
      3 
      4 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
      5 target triple = "x86_64-apple-darwin10.0.0"
      6 
      7 ; Make sure that fast-isel folds the immediate into the binop even though it
      8 ; is non-canonical.
      9 define i32 @test1(i32 %i) nounwind ssp {
     10   %and = and i32 8, %i
     11   ret i32 %and
     12 }
     13 
     14 ; CHECK: test1:
     15 ; CHECK: andl	$8, 
     16 
     17 
     18 ; rdar://9289512 - The load should fold into the compare.
     19 define void @test2(i64 %x) nounwind ssp {
     20 entry:
     21   %x.addr = alloca i64, align 8
     22   store i64 %x, i64* %x.addr, align 8
     23   %tmp = load i64* %x.addr, align 8
     24   %cmp = icmp sgt i64 %tmp, 42
     25   br i1 %cmp, label %if.then, label %if.end
     26 
     27 if.then:                                          ; preds = %entry
     28   br label %if.end
     29 
     30 if.end:                                           ; preds = %if.then, %entry
     31   ret void
     32 ; CHECK: test2:
     33 ; CHECK: movq	%rdi, -8(%rsp)
     34 ; CHECK: cmpq	$42, -8(%rsp)
     35 }
     36 
     37 
     38 
     39 
     40 @G = external global i32
     41 define i64 @test3() nounwind {
     42   %A = ptrtoint i32* @G to i64
     43   ret i64 %A
     44 ; CHECK: test3:
     45 ; CHECK: movq _G@GOTPCREL(%rip), %rax
     46 ; CHECK-NEXT: ret
     47 }
     48 
     49 
     50 
     51 ; rdar://9289558
     52 @rtx_length = external global [153 x i8]
     53 
     54 define i32 @test4(i64 %idxprom9) nounwind {
     55   %arrayidx10 = getelementptr inbounds [153 x i8]* @rtx_length, i32 0, i64 %idxprom9
     56   %tmp11 = load i8* %arrayidx10, align 1
     57   %conv = zext i8 %tmp11 to i32
     58   ret i32 %conv
     59 
     60 ; CHECK: test4:
     61 ; CHECK: movq	_rtx_length@GOTPCREL(%rip), %rax
     62 ; CHECK-NEXT: movzbl	(%rax,%rdi), %eax
     63 ; CHECK-NEXT: ret
     64 }
     65 
     66 
     67 ; PR3242 - Out of range shifts should not be folded by fastisel.
     68 define void @test5(i32 %x, i32* %p) nounwind {
     69   %y = ashr i32 %x, 50000
     70   store i32 %y, i32* %p
     71   ret void
     72 
     73 ; CHECK: test5:
     74 ; CHECK: movl	$50000, %ecx
     75 ; CHECK: sarl	%cl, %edi
     76 ; CHECK: ret
     77 }
     78 
     79 ; rdar://9289501 - fast isel should fold trivial multiplies to shifts.
     80 define i64 @test6(i64 %x) nounwind ssp {
     81 entry:
     82   %mul = mul nsw i64 %x, 8
     83   ret i64 %mul
     84 
     85 ; CHECK: test6:
     86 ; CHECK: shlq	$3, %rdi
     87 }
     88 
     89 define i32 @test7(i32 %x) nounwind ssp {
     90 entry:
     91   %mul = mul nsw i32 %x, 8
     92   ret i32 %mul
     93 ; CHECK: test7:
     94 ; CHECK: shll	$3, %edi
     95 }
     96 
     97 
     98 ; rdar://9289507 - folding of immediates into 64-bit operations.
     99 define i64 @test8(i64 %x) nounwind ssp {
    100 entry:
    101   %add = add nsw i64 %x, 7
    102   ret i64 %add
    103 
    104 ; CHECK: test8:
    105 ; CHECK: addq	$7, %rdi
    106 }
    107 
    108 define i64 @test9(i64 %x) nounwind ssp {
    109 entry:
    110   %add = mul nsw i64 %x, 7
    111   ret i64 %add
    112 ; CHECK: test9:
    113 ; CHECK: imulq	$7, %rdi, %rax
    114 }
    115 
    116 ; rdar://9297011 - Don't reject udiv by a power of 2.
    117 define i32 @test10(i32 %X) nounwind {
    118   %Y = udiv i32 %X, 8
    119   ret i32 %Y
    120 ; CHECK: test10:
    121 ; CHECK: shrl	$3, 
    122 }
    123 
    124 define i32 @test11(i32 %X) nounwind {
    125   %Y = sdiv exact i32 %X, 8
    126   ret i32 %Y
    127 ; CHECK: test11:
    128 ; CHECK: sarl	$3, 
    129 }
    130 
    131 
    132 ; rdar://9297006 - Trunc to bool.
    133 define void @test12(i8 %tmp) nounwind ssp noredzone {
    134 entry:
    135   %tobool = trunc i8 %tmp to i1
    136   br i1 %tobool, label %if.then, label %if.end
    137 
    138 if.then:                                          ; preds = %entry
    139   call void @test12(i8 0) noredzone
    140   br label %if.end
    141 
    142 if.end:                                           ; preds = %if.then, %entry
    143   ret void
    144 ; CHECK: test12:
    145 ; CHECK: testb	$1,
    146 ; CHECK-NEXT: je L
    147 ; CHECK-NEXT: movl $0, %edi
    148 ; CHECK-NEXT: callq
    149 }
    150 
    151 declare void @test13f(i1 %X)
    152 
    153 define void @test13() nounwind {
    154   call void @test13f(i1 0)
    155   ret void
    156 ; CHECK: test13:
    157 ; CHECK: movl $0, %edi
    158 ; CHECK-NEXT: callq
    159 }
    160 
    161 
    162 
    163 ; rdar://9297003 - fast isel bails out on all functions taking bools
    164 define void @test14(i8 %tmp) nounwind ssp noredzone {
    165 entry:
    166   %tobool = trunc i8 %tmp to i1
    167   call void @test13f(i1 zeroext %tobool) noredzone
    168   ret void
    169 ; CHECK: test14:
    170 ; CHECK: andb	$1, 
    171 ; CHECK: callq
    172 }
    173 
    174 declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i32, i1)
    175 
    176 ; rdar://9289488 - fast-isel shouldn't bail out on llvm.memcpy
    177 define void @test15(i8* %a, i8* %b) nounwind {
    178   call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 4, i32 4, i1 false)
    179   ret void
    180 ; CHECK: test15:
    181 ; CHECK-NEXT: movl	(%rsi), %eax
    182 ; CHECK-NEXT: movl	%eax, (%rdi)
    183 ; CHECK-NEXT: ret
    184 }
    185 
    186 ; Handling for varargs calls
    187 declare void @test16callee(...) nounwind
    188 define void @test16() nounwind {
    189 ; CHECK: test16:
    190 ; CHECK: movl $1, %edi
    191 ; CHECK: movb $0, %al
    192 ; CHECK: callq _test16callee
    193   call void (...)* @test16callee(i32 1)
    194   br label %block2
    195 
    196 block2:
    197 ; CHECK: movabsq $1
    198 ; CHECK: cvtsi2sdq {{.*}} %xmm0
    199 ; CHECK: movb $1, %al
    200 ; CHECK: callq _test16callee
    201 
    202 ; AVX: movabsq $1
    203 ; AVX: vmovsd LCP{{.*}}_{{.*}}(%rip), %xmm0
    204 ; AVX: movb $1, %al
    205 ; AVX: callq _test16callee
    206   call void (...)* @test16callee(double 1.000000e+00)
    207   ret void
    208 }
    209 
    210 
    211 declare void @foo() unnamed_addr ssp align 2
    212 
    213 ; Verify that we don't fold the load into the compare here.  That would move it
    214 ; w.r.t. the call.
    215 define i32 @test17(i32 *%P) ssp nounwind {
    216 entry:
    217   %tmp = load i32* %P
    218   %cmp = icmp ne i32 %tmp, 5
    219   call void @foo()
    220   br i1 %cmp, label %if.then, label %if.else
    221 
    222 if.then:                                          ; preds = %entry
    223   ret i32 1
    224 
    225 if.else:                                          ; preds = %entry
    226   ret i32 2
    227 ; CHECK: test17:
    228 ; CHECK: movl	(%rdi), %eax
    229 ; CHECK: callq _foo
    230 ; CHECK: cmpl	$5, %eax
    231 ; CHECK-NEXT: je 
    232 }
    233 
    234 ; Check that 0.0 is materialized using xorps
    235 define void @test18(float* %p1) {
    236   store float 0.0, float* %p1
    237   ret void
    238 ; CHECK: test18:
    239 ; CHECK: xorps
    240 }
    241 
    242 ; Without any type hints, doubles use the smaller xorps instead of xorpd.
    243 define void @test19(double* %p1) {
    244   store double 0.0, double* %p1
    245   ret void
    246 ; CHECK: test19:
    247 ; CHECK: xorps
    248 }
    249 
    250 ; Check that we fast-isel sret
    251 %struct.a = type { i64, i64, i64 }
    252 define void @test20() nounwind ssp {
    253 entry:
    254   %tmp = alloca %struct.a, align 8
    255   call void @test20sret(%struct.a* sret %tmp)
    256   ret void
    257 ; CHECK: test20:
    258 ; CHECK: leaq (%rsp), %rdi
    259 ; CHECK: callq _test20sret
    260 }
    261 declare void @test20sret(%struct.a* sret)
    262 
    263 ; Check that -0.0 is not materialized using xor
    264 define void @test21(double* %p1) {
    265   store double -0.0, double* %p1
    266   ret void
    267 ; CHECK: test21:
    268 ; CHECK-NOT: xor
    269 ; CHECK: movsd	LCPI
    270 }
    271 
    272 ; Check that immediate arguments to a function
    273 ; do not cause massive spilling and are used
    274 ; as immediates just before the call.
    275 define void @test22() nounwind {
    276 entry:
    277   call void @foo22(i32 0)
    278   call void @foo22(i32 1)
    279   call void @foo22(i32 2)
    280   call void @foo22(i32 3)
    281   ret void
    282 ; CHECK: test22:
    283 ; CHECK: movl	$0, %edi
    284 ; CHECK: callq	_foo22
    285 ; CHECK: movl	$1, %edi
    286 ; CHECK: callq	_foo22
    287 ; CHECK: movl	$2, %edi
    288 ; CHECK: callq	_foo22
    289 ; CHECK: movl	$3, %edi
    290 ; CHECK: callq	_foo22
    291 }
    292 
    293 declare void @foo22(i32)
    294