Home | History | Annotate | Download | only in X86
      1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
      2 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
      3 
      4 declare <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32)
      5 declare <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32)
      6 
      7 define <4 x float>@test_var_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2, i8 %mask) {
      8 ; CHECK-LABEL: test_var_mask:
      9 ; CHECK:       ## %bb.0:
     10 ; CHECK-NEXT:    kmovw %edi, %k1
     11 ; CHECK-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1}
     12 ; CHECK-NEXT:    retq
     13   %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2,  i8 %mask, i32 4)
     14   ret < 4 x float> %res
     15 }
     16 
     17 define <4 x float>@test_var_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2, i8 %mask) {
     18 ; CHECK-LABEL: test_var_maskz:
     19 ; CHECK:       ## %bb.0:
     20 ; CHECK-NEXT:    kmovw %edi, %k1
     21 ; CHECK-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z}
     22 ; CHECK-NEXT:    retq
     23   %res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2,  i8 %mask, i32 4)
     24   ret < 4 x float> %res
     25 }
     26 
     27 ; FIXME: we should just return %xmm0 here.
     28 define <4 x float>@test_const0_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
     29 ; CHECK-LABEL: test_const0_mask:
     30 ; CHECK:       ## %bb.0:
     31 ; CHECK-NEXT:    retq
     32   %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2,  i8 0, i32 4)
     33   ret < 4 x float> %res
     34 }
     35 
     36 ; FIXME: we should zero the lower element of xmm0 and return it.
     37 define <4 x float>@test_const0_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
     38 ; CHECK-LABEL: test_const0_maskz:
     39 ; CHECK:       ## %bb.0:
     40 ; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
     41 ; CHECK-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
     42 ; CHECK-NEXT:    retq
     43   %res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2,  i8 0, i32 4)
     44   ret < 4 x float> %res
     45 }
     46 
     47 ; FIXME: we should just return %xmm0 here.
     48 define <4 x float>@test_const2_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
     49 ; CHECK-LABEL: test_const2_mask:
     50 ; CHECK:       ## %bb.0:
     51 ; CHECK-NEXT:    retq
     52   %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2,  i8 2, i32 4)
     53   ret < 4 x float> %res
     54 }
     55 
     56 ; FIXME: we should zero the lower element of xmm0 and return it.
     57 define <4 x float>@test_const2_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
     58 ; CHECK-LABEL: test_const2_maskz:
     59 ; CHECK:       ## %bb.0:
     60 ; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
     61 ; CHECK-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
     62 ; CHECK-NEXT:    retq
     63   %res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2,  i8 2, i32 4)
     64   ret < 4 x float> %res
     65 }
     66 
     67 define <4 x float>@test_const_allone_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
     68 ; CHECK-LABEL: test_const_allone_mask:
     69 ; CHECK:       ## %bb.0:
     70 ; CHECK-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0
     71 ; CHECK-NEXT:    retq
     72   %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2,  i8 -1, i32 4)
     73   ret < 4 x float> %res
     74 }
     75 
     76 define <4 x float>@test_const_allone_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
     77 ; CHECK-LABEL: test_const_allone_maskz:
     78 ; CHECK:       ## %bb.0:
     79 ; CHECK-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0
     80 ; CHECK-NEXT:    retq
     81   %res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2,  i8 -1, i32 4)
     82   ret < 4 x float> %res
     83 }
     84 
     85 define <4 x float>@test_const_3_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
     86 ; CHECK-LABEL: test_const_3_mask:
     87 ; CHECK:       ## %bb.0:
     88 ; CHECK-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0
     89 ; CHECK-NEXT:    retq
     90   %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2,  i8 3, i32 4)
     91   ret < 4 x float> %res
     92 }
     93 
     94 define <4 x float>@test_const_3_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
     95 ; CHECK-LABEL: test_const_3_maskz:
     96 ; CHECK:       ## %bb.0:
     97 ; CHECK-NEXT:    vfmadd213ss %xmm2, %xmm1, %xmm0
     98 ; CHECK-NEXT:    retq
     99   %res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2,  i8 3, i32 4)
    100   ret < 4 x float> %res
    101 }
    102