Home | History | Annotate | Download | only in CodeGen

Lines Matching refs:vf

13 vector float vf = { -1.5, 2.5, -3.5, 4.5 };
56 vf = vec_abs(vf); // CHECK: and <4 x i32>
87 res_vf = vec_add(vf, vf); // CHECK: fadd <4 x float>
106 res_vf = vec_vaddfp(vf, vf); // CHECK: fadd <4 x float>
255 res_vf = vec_andc(vf, vf); // CHECK: xor <4 x i32>
258 res_vf = vec_andc(vbi, vf); // CHECK: xor <4 x i32>
261 res_vf = vec_andc(vf, vbi); // CHECK: xor <4 x i32>
324 res_vf = vec_vandc(vf, vf); // CHECK: xor <4 x i32>
327 res_vf = vec_vandc(vbi, vf); // CHECK: xor <4 x i32>
330 res_vf = vec_vandc(vf, vbi); // CHECK: xor <4 x i32>
352 res_vf = vec_ceil(vf); // CHECK: @llvm.ppc.altivec.vrfip
353 res_vf = vec_vrfip(vf); // CHECK: @llvm.ppc.altivec.vrfip
356 res_vi = vec_cmpb(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp
357 res_vi = vec_vcmpbfp(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp
366 res_vbi = vec_cmpeq(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp
369 res_vbi = vec_cmpge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp
370 res_vbi = vec_vcmpgefp(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp
383 res_vbi = vec_cmpgt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
390 res_vbi = vec_vcmpgtfp(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
393 res_vbi = vec_cmple(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp
405 res_vbi = vec_cmplt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp
414 res_vi = vec_cts(vf, 0); // CHECK: @llvm.ppc.altivec.vctsxs
415 res_vi = vec_vctsxs(vf, 0); // CHECK: @llvm.ppc.altivec.vctsxs
418 res_vui = vec_ctu(vf, 0); // CHECK: @llvm.ppc.altivec.vctuxs
419 res_vui = vec_vctuxs(vf, 0); // CHECK: @llvm.ppc.altivec.vctuxs
437 vec_dstt(&vf, 0, 0); // CHECK: @llvm.ppc.altivec.dstt
440 res_vf = vec_expte(vf); // CHECK: @llvm.ppc.altivec.vexptefp
441 res_vf = vec_vexptefp(vf); // CHECK: @llvm.ppc.altivec.vexptefp
444 res_vf = vec_floor(vf); // CHECK: @llvm.ppc.altivec.vrfim
445 res_vf = vec_vrfim(vf); // CHECK: @llvm.ppc.altivec.vrfim
464 res_vf = vec_ld(0, &vf); // CHECK: @llvm.ppc.altivec.lvx
482 res_vf = vec_lvx(0, &vf); // CHECK: @llvm.ppc.altivec.lvx
492 res_vf = vec_lde(0, &vf); // CHECK: @llvm.ppc.altivec.lvewx
499 res_vf = vec_lvewx(0, &vf); // CHECK: @llvm.ppc.altivec.lvewx
518 res_vf = vec_ldl(0, &vf); // CHECK: @llvm.ppc.altivec.lvxl
536 res_vf = vec_lvxl(0, &vf); // CHECK: @llvm.ppc.altivec.lvxl
540 res_vf = vec_loge(vf); // CHECK: @llvm.ppc.altivec.vlogefp
541 res_vf = vec_vlogefp(vf); // CHECK: @llvm.ppc.altivec.vlogefp
550 res_vf =vec_madd(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vmaddfp
551 res_vf = vec_vmaddfp(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vmaddfp
576 res_vf = vec_max(vf, vf); // CHECK: @llvm.ppc.altivec.vmaxfp
595 res_vf = vec_vmaxfp(vf, vf); // CHECK: @llvm.ppc.altivec.vmaxfp
608 res_vf = vec_mergeh(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
619 res_vf = vec_vmrghw(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
632 res_vf = vec_mergel(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
643 res_vf = vec_vmrglw(vf, vf); // CHECK: @llvm.ppc.altivec.vperm
667 res_vf = vec_min(vf, vf); // CHECK: @llvm.ppc.altivec.vminfp
686 res_vf = vec_vminfp(vf, vf); // CHECK: @llvm.ppc.altivec.vminfp
754 res_vf = vec_nmsub(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vnmsubfp
755 res_vf = vec_vnmsubfp(vf, vf, vf); // CHECK: @llvm.ppc.altivec.vnmsubfp
785 res_vf = vec_nor(vf, vf); // CHECK: or <4 x i32>
815 res_vf = vec_vnor(vf, vf); // CHECK: or <4 x i32>
840 res_vf = vec_or(vf, vf); // CHECK: or <4 x i32>
841 res_vf = vec_or(vbi, vf); // CHECK: or <4 x i32>
842 res_vf = vec_or(vf, vbi); // CHECK: or <4 x i32>
864 res_vf = vec_vor(vf, vf); // CHECK: or <4 x i32>
865 res_vf = vec_vor(vbi, vf); // CHECK: or <4 x i32>
866 res_vf = vec_vor(vf, vbi); // CHECK: or <4 x i32>
917 res_vf = vec_perm(vf, vf, vuc); // CHECK: @llvm.ppc.altivec.vperm
928 res_vf = vec_vperm(vf, vf, vuc); // CHECK: @llvm.ppc.altivec.vperm
931 res_vf = vec_re(vf); // CHECK: @llvm.ppc.altivec.vrefp
932 res_vf = vec_vrefp(vf); // CHECK: @llvm.ppc.altivec.vrefp
949 res_vf = vec_round(vf); // CHECK: @llvm.ppc.altivec.vrfin
950 res_vf = vec_vrfin(vf); // CHECK: @llvm.ppc.altivec.vrfin
953 res_vf = vec_rsqrte(vf); // CHECK: @llvm.ppc.altivec.vrsqrtefp
954 res_vf = vec_vrsqrtefp(vf); // CHECK: @llvm.ppc.altivec.vrsqrtefp
1047 res_vf = vec_sel(vf, vf, vui); // CHECK: xor <4 x i32>
1052 res_vf = vec_sel(vf, vf, vbi); // CHECK: xor <4 x i32>
1147 res_vf = vec_vsel(vf, vf, vui); // CHECK: xor <4 x i32>
1152 res_vf = vec_vsel(vf, vf, vbi); // CHECK: xor <4 x i32>
1179 res_vf = vec_sld(vf, vf, 0); // CHECK: @llvm.ppc.altivec.vperm
1187 res_vf = vec_vsldoi(vf, vf, 0); // CHECK: @llvm.ppc.altivec.vperm
1266 res_vf = vec_slo(vf, vsc); // CHECK: @llvm.ppc.altivec.vslo
1267 res_vf = vec_slo(vf, vuc); // CHECK: @llvm.ppc.altivec.vslo
1282 res_vf = vec_vslo(vf, vsc); // CHECK: @llvm.ppc.altivec.vslo
1283 res_vf = vec_vslo(vf, vuc); // CHECK: @llvm.ppc.altivec.vslo
1296 res_vf = vec_splat(vf, 0); // CHECK: @llvm.ppc.altivec.vperm
1307 res_vf = vec_vspltw(vf, 0); // CHECK: @llvm.ppc.altivec.vperm
1435 res_vf = vec_sro(vf, vsc); // CHECK: @llvm.ppc.altivec.vsro
1436 res_vf = vec_sro(vf, vuc); // CHECK: @llvm.ppc.altivec.vsro
1451 res_vf = vec_vsro(vf, vsc); // CHECK: @llvm.ppc.altivec.vsro
1452 res_vf = vec_vsro(vf, vuc); // CHECK: @llvm.ppc.altivec.vsro
1479 vec_st(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvx
1480 vec_st(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvx
1505 vec_stvx(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvx
1506 vec_stvx(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvx
1523 vec_ste(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvewx
1538 vec_stvewx(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvewx
1565 vec_stl(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvxl
1566 vec_stl(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvxl
1591 vec_stvxl(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.stvxl
1592 vec_stvxl(vf, 0, &param_f); // CHECK: @llvm.ppc.altivec.stvxl
1613 res_vf = vec_sub(vf, vf); // CHECK: fsub <4 x float>
1632 res_vf = vec_vsubfp(vf, vf); // CHECK: fsub <4 x float>
1693 res_vf = vec_trunc(vf); // CHECK: @llvm.ppc.altivec.vrfiz
1694 res_vf = vec_vrfiz(vf); // CHECK: @llvm.ppc.altivec.vrfiz
1742 res_vf = vec_xor(vf, vf); // CHECK: xor <4 x i32>
1743 res_vf = vec_xor(vbi, vf); // CHECK: xor <4 x i32>
1744 res_vf = vec_xor(vf, vbi); // CHECK: xor <4 x i32>
1766 res_vf = vec_vxor(vf, vf); // CHECK: xor <4 x i32>
1767 res_vf = vec_vxor(vbi, vf); // CHECK: xor <4 x i32>
1768 res_vf = vec_vxor(vf, vbi); // CHECK: xor <4 x i32>
1779 res_f = vec_extract(vf, param_i); // CHECK: extractelement <4 x float>
1788 res_vf = vec_insert(param_f, vf, param_i); // CHECK: insertelement <4 x float>
1871 res_vf = vec_lvlx(0, &vf); // CHECK: @llvm.ppc.altivec.lvx
1957 res_vf = vec_lvlxl(0, &vf); // CHECK: @llvm.ppc.altivec.lvxl
2043 res_vf = vec_lvrx(0, &vf); // CHECK: @llvm.ppc.altivec.lvx
2129 res_vf = vec_lvrxl(0, &vf); // CHECK: @llvm.ppc.altivec.lvxl
2263 vec_stvlx(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.lvx
2400 vec_stvlxl(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.lvx
2537 vec_stvrx(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.lvx
2674 vec_stvrxl(vf, 0, &vf); // CHECK: @llvm.ppc.altivec.lvx
2744 res_i = vec_all_eq(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
2768 res_i = vec_all_ge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
2792 res_i = vec_all_gt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
2795 res_i = vec_all_in(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp.p
2819 res_i = vec_all_le(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
2843 res_i = vec_all_lt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
2846 res_i = vec_all_nan(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
2871 res_i = vec_all_ne(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
2874 res_i = vec_all_nge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
2877 res_i = vec_all_ngt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
2880 res_i = vec_all_nle(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
2883 res_i = vec_all_nlt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
2886 res_i = vec_all_numeric(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
2911 res_i = vec_any_eq(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
2935 res_i = vec_any_ge(vf, vf
2959 res_i = vec_any_gt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
2983 res_i = vec_any_le(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
3007 res_i = vec_any_lt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
3010 res_i = vec_any_nan(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
3035 res_i = vec_any_ne(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
3038 res_i = vec_any_nge(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
3041 res_i = vec_any_ngt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
3044 res_i = vec_any_nle(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgefp.p
3047 res_i = vec_any_nlt(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpgtfp.p
3050 res_i = vec_any_numeric(vf); // CHECK: @llvm.ppc.altivec.vcmpeqfp.p
3053 res_i = vec_any_out(vf, vf); // CHECK: @llvm.ppc.altivec.vcmpbfp.p