Home | History | Annotate | Download | only in ARM
      1 ; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
      2 ; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s -check-prefix=NEON
      3 ; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=A8
      4 ; RUN: llc < %s -march=arm -mcpu=cortex-a8 -regalloc=basic | FileCheck %s -check-prefix=A8
      5 
      6 define float @t1(float %acc, float %a, float %b) nounwind {
      7 entry:
      8 ; VFP2: t1:
      9 ; VFP2: vnmla.f32
     10 
     11 ; NEON: t1:
     12 ; NEON: vnmla.f32
     13 
     14 ; A8: t1:
     15 ; A8: vnmul.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}}
     16 ; A8: vsub.f32 d{{[0-9]}}, d{{[0-9]}}, d{{[0-9]}}
     17 	%0 = fmul float %a, %b
     18 	%1 = fsub float -0.0, %0
     19         %2 = fsub float %1, %acc
     20 	ret float %2
     21 }
     22 
     23 define float @t2(float %acc, float %a, float %b) nounwind {
     24 entry:
     25 ; VFP2: t2:
     26 ; VFP2: vnmla.f32
     27 
     28 ; NEON: t2:
     29 ; NEON: vnmla.f32
     30 
     31 ; A8: t2:
     32 ; A8: vnmul.f32 s{{[01234]}}, s{{[01234]}}, s{{[01234]}}
     33 ; A8: vsub.f32 d{{[0-9]}}, d{{[0-9]}}, d{{[0-9]}}
     34 	%0 = fmul float %a, %b
     35 	%1 = fmul float -1.0, %0
     36         %2 = fsub float %1, %acc
     37 	ret float %2
     38 }
     39 
     40 define double @t3(double %acc, double %a, double %b) nounwind {
     41 entry:
     42 ; VFP2: t3:
     43 ; VFP2: vnmla.f64
     44 
     45 ; NEON: t3:
     46 ; NEON: vnmla.f64
     47 
     48 ; A8: t3:
     49 ; A8: vnmul.f64 d1{{[67]}}, d1{{[67]}}, d1{{[67]}}
     50 ; A8: vsub.f64 d1{{[67]}}, d1{{[67]}}, d1{{[67]}}
     51 	%0 = fmul double %a, %b
     52 	%1 = fsub double -0.0, %0
     53         %2 = fsub double %1, %acc
     54 	ret double %2
     55 }
     56 
     57 define double @t4(double %acc, double %a, double %b) nounwind {
     58 entry:
     59 ; VFP2: t4:
     60 ; VFP2: vnmla.f64
     61 
     62 ; NEON: t4:
     63 ; NEON: vnmla.f64
     64 
     65 ; A8: t4:
     66 ; A8: vnmul.f64 d1{{[67]}}, d1{{[67]}}, d1{{[67]}}
     67 ; A8: vsub.f64 d1{{[67]}}, d1{{[67]}}, d1{{[67]}}
     68 	%0 = fmul double %a, %b
     69 	%1 = fmul double -1.0, %0
     70         %2 = fsub double %1, %acc
     71 	ret double %2
     72 }
     73