Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -mattr=+sse2 | FileCheck %s
      3 ; PR2656
      4 
      5 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
      6 target triple = "i686-apple-darwin9.4.0"
      7 
      8 %struct.anon = type <{ float, float }>
      9 @.str = internal constant [17 x i8] c"pt: %.0f, %.0f\0A\00\00"		; <[17 x i8]*> [#uses=1]
     10 
     11 ; We can not fold either stack load into an 'xor' instruction because that
     12 ; would change what should be a 4-byte load into a 16-byte load.
     13 ; We can fold the 16-byte constant load into either 'xor' instruction,
     14 ; but we do not. It has more than one use, so it gets loaded into a register.
     15 
     16 define void @foo(%struct.anon* byval %p) nounwind {
     17 ; CHECK-LABEL: foo:
     18 ; CHECK:       ## %bb.0: ## %entry
     19 ; CHECK-NEXT:    subl $28, %esp
     20 ; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
     21 ; CHECK-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
     22 ; CHECK-NEXT:    movaps {{.*#+}} xmm2 = [-0.000000e+00,-0.000000e+00,-0.000000e+00,-0.000000e+00]
     23 ; CHECK-NEXT:    xorps %xmm2, %xmm0
     24 ; CHECK-NEXT:    cvtss2sd %xmm0, %xmm0
     25 ; CHECK-NEXT:    xorps %xmm2, %xmm1
     26 ; CHECK-NEXT:    cvtss2sd %xmm1, %xmm1
     27 ; CHECK-NEXT:    movsd %xmm1, {{[0-9]+}}(%esp)
     28 ; CHECK-NEXT:    movsd %xmm0, {{[0-9]+}}(%esp)
     29 ; CHECK-NEXT:    movl $_.str, (%esp)
     30 ; CHECK-NEXT:    calll _printf
     31 ; CHECK-NEXT:    addl $28, %esp
     32 ; CHECK-NEXT:    retl
     33 entry:
     34 	%tmp = getelementptr %struct.anon, %struct.anon* %p, i32 0, i32 0		; <float*> [#uses=1]
     35 	%tmp1 = load float, float* %tmp		; <float> [#uses=1]
     36 	%tmp2 = getelementptr %struct.anon, %struct.anon* %p, i32 0, i32 1		; <float*> [#uses=1]
     37 	%tmp3 = load float, float* %tmp2		; <float> [#uses=1]
     38 	%neg = fsub float -0.000000e+00, %tmp1		; <float> [#uses=1]
     39 	%conv = fpext float %neg to double		; <double> [#uses=1]
     40 	%neg4 = fsub float -0.000000e+00, %tmp3		; <float> [#uses=1]
     41 	%conv5 = fpext float %neg4 to double		; <double> [#uses=1]
     42 	%call = call i32 (...) @printf( i8* getelementptr ([17 x i8], [17 x i8]* @.str, i32 0, i32 0), double %conv, double %conv5 )		; <i32> [#uses=0]
     43 	ret void
     44 }
     45 
     46 declare i32 @printf(...)
     47 
     48 ; We can not fold the load from the stack into the 'and' instruction because
     49 ; that changes an 8-byte load into a 16-byte load (illegal memory access).
     50 ; We can fold the load of the constant because it is a 16-byte vector constant.
     51 
     52 define double @PR22371(double %x) {
     53 ; CHECK-LABEL: PR22371:
     54 ; CHECK:       ## %bb.0:
     55 ; CHECK-NEXT:    subl $12, %esp
     56 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
     57 ; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
     58 ; CHECK-NEXT:    andps LCPI1_0, %xmm0
     59 ; CHECK-NEXT:    movlps %xmm0, (%esp)
     60 ; CHECK-NEXT:    fldl (%esp)
     61 ; CHECK-NEXT:    addl $12, %esp
     62 ; CHECK-NEXT:    retl
     63   %call = tail call double @fabs(double %x) #0
     64   ret double %call
     65 }
     66 
     67 declare double @fabs(double) #0
     68 attributes #0 = { readnone }
     69