Home | History | Annotate | Download | only in PowerPC
      1 ; RUN: llc < %s -march=ppc32 -mattr=+altivec --enable-unsafe-fp-math | FileCheck %s
      2 
      3 define void @VXOR(<4 x float>* %P1, <4 x i32>* %P2, <4 x float>* %P3) {
      4         %tmp = load <4 x float>* %P3            ; <<4 x float>> [#uses=1]
      5         %tmp3 = load <4 x float>* %P1           ; <<4 x float>> [#uses=1]
      6         %tmp4 = fmul <4 x float> %tmp, %tmp3             ; <<4 x float>> [#uses=1]
      7         store <4 x float> %tmp4, <4 x float>* %P3
      8         store <4 x float> zeroinitializer, <4 x float>* %P1
      9         store <4 x i32> zeroinitializer, <4 x i32>* %P2
     10         ret void
     11 }
     12 ; The fmul will spill a vspltisw to create a -0.0 vector used as the addend
     13 ; to vmaddfp (so it would IEEE compliant with zero sign propagation).
     14 ; CHECK: @VXOR
     15 ; CHECK: vsplti
     16 ; CHECK: vxor
     17 
     18 define void @VSPLTI(<4 x i32>* %P2, <8 x i16>* %P3) {
     19         store <4 x i32> bitcast (<16 x i8> < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 > to <4 x i32>), <4 x i32>* %P2
     20         store <8 x i16> < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 >, <8 x i16>* %P3
     21         ret void
     22 }
     23 ; CHECK: @VSPLTI
     24 ; CHECK: vsplti
     25