Home | History | Annotate | Download | only in CodeGen

Lines Matching refs:vbs

6 vector bool short vbs = { 1, 0, 1, 0, 1, 0, 1, 0 };
76 res_vs = vec_add(vbs, vs); // CHECK: add <8 x i16>
77 res_vs = vec_add(vs, vbs); // CHECK: add <8 x i16>
79 res_vus = vec_add(vbs, vus); // CHECK: add <8 x i16>
80 res_vus = vec_add(vus, vbs); // CHECK: add <8 x i16>
95 res_vs = vec_vadduhm(vbs, vs); // CHECK: add <8 x i16>
96 res_vs = vec_vadduhm(vs, vbs); // CHECK: add <8 x i16>
98 res_vus = vec_vadduhm(vbs, vus); // CHECK: add <8 x i16>
99 res_vus = vec_vadduhm(vus, vbs); // CHECK: add <8 x i16>
120 res_vs = vec_adds(vbs, vs); // CHECK: @llvm.ppc.altivec.vaddshs
121 res_vs = vec_adds(vs, vbs); // CHECK: @llvm.ppc.altivec.vaddshs
123 res_vus = vec_adds(vbs, vus); // CHECK: @llvm.ppc.altivec.vadduhs
124 res_vus = vec_adds(vus, vbs); // CHECK: @llvm.ppc.altivec.vadduhs
138 res_vs = vec_vaddshs(vbs, vs); // CHECK: @llvm.ppc.altivec.vaddshs
139 res_vs = vec_vaddshs(vs, vbs); // CHECK: @llvm.ppc.altivec.vaddshs
141 res_vus = vec_vadduhs(vbs, vus); // CHECK: @llvm.ppc.altivec.vadduhs
142 res_vus = vec_vadduhs(vus, vbs); // CHECK: @llvm.ppc.altivec.vadduhs
159 res_vs = vec_and(vbs, vs); // CHECK: and <8 x i16>
160 res_vs = vec_and(vs, vbs); // CHECK: and <8 x i16>
162 res_vus = vec_and(vbs, vus); // CHECK: and <8 x i16>
163 res_vus = vec_and(vus, vbs); // CHECK: and <8 x i16>
164 res_vbs = vec_and(vbs, vbs); // CHECK: and <8 x i16>
180 res_vs = vec_vand(vbs, vs); // CHECK: and <8 x i16>
181 res_vs = vec_vand(vs, vbs); // CHECK: and <8 x i16>
183 res_vus = vec_vand(vbs, vus); // CHECK: and <8 x i16>
184 res_vus = vec_vand(vus, vbs); // CHECK: and <8 x i16>
185 res_vbs = vec_vand(vbs, vbs); // CHECK: and <8 x i16>
219 res_vs = vec_andc(vbs, vs); // CHECK: xor <8 x i16>
222 res_vs = vec_andc(vs, vbs); // CHECK: xor <8 x i16>
228 res_vus = vec_andc(vbs, vus); // CHECK: xor <8 x i16>
231 res_vus = vec_andc(vus, vbs); // CHECK: xor <8 x i16>
234 res_vbs = vec_andc(vbs, vbs); // CHECK: xor <8 x i16>
288 res_vs = vec_vandc(vbs, vs); // CHECK: xor <8 x i16>
291 res_vs = vec_vandc(vs, vbs); // CHECK: xor <8 x i16>
297 res_vus = vec_vandc(vbs
300 res_vus = vec_vandc(vus, vbs); // CHECK: xor <8 x i16>
303 res_vbs = vec_vandc(vbs, vbs); // CHECK: xor <8 x i16>
457 res_vbs = vec_ld(0, &vbs); // CHECK: @llvm.ppc.altivec.lvx
475 res_vbs = vec_lvx(0, &vbs); // CHECK: @llvm.ppc.altivec.lvx
511 res_vbs = vec_ldl(0, &vbs); // CHECK: @llvm.ppc.altivec.lvxl
529 res_vbs = vec_lvxl(0, &vbs); // CHECK: @llvm.ppc.altivec.lvxl
565 res_vs = vec_max(vbs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh
566 res_vs = vec_max(vs, vbs); // CHECK: @llvm.ppc.altivec.vmaxsh
568 res_vus = vec_max(vbs, vus); // CHECK: @llvm.ppc.altivec.vmaxuh
569 res_vus = vec_max(vus, vbs
584 res_vs = vec_vmaxsh(vbs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh
585 res_vs = vec_vmaxsh(vs, vbs); // CHECK: @llvm.ppc.altivec.vmaxsh
587 res_vus = vec_vmaxuh(vbs, vus); // CHECK: @llvm.ppc.altivec.vmaxuh
588 res_vus = vec_vmaxuh(vus, vbs); // CHECK: @llvm.ppc.altivec.vmaxuh
604 res_vbs = vec_mergeh(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
615 res_vbs = vec_vmrghh(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
628 res_vbs = vec_mergel(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
639 res_vbs = vec_vmrglh(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
656 res_vs = vec_min(vbs, vs); // CHECK: @llvm.ppc.altivec.vminsh
657 res_vs = vec_min(vs, vbs); // CHECK: @llvm.ppc.altivec.vminsh
659 res_vus = vec_min(vbs, vus); // CHECK: @llvm.ppc.altivec.vminuh
660 res_vus = vec_min(vus, vbs); // CHECK: @llvm.ppc.altivec.vminuh
675 res_vs = vec_vminsh(vbs, vs); // CHECK: @llvm.ppc.altivec.vminsh
676 res_vs = vec_vminsh(vs, vbs); // CHECK: @llvm.ppc.altivec.vminsh
678 res_vus = vec_vminuh(vbs, vus); // CHECK: @llvm.ppc.altivec.vminuh
679 res_vus = vec_vminuh(vus, vbs); // CHECK: @llvm.ppc.altivec.vminuh
727 vec_mtvscr(vbs); // CHECK: @llvm.ppc.altivec.mtvscr
773 res_vus = vec_nor(vbs, vbs); // CHECK: or <8 x i16>
803 res_vus = vec_vnor(vbs, vbs); // CHECK: or <8 x i16>
827 res_vs = vec_or(vbs, vs); // CHECK: or <8 x i16>
828 res_vs = vec_or(vs, vbs); // CHECK: or <8 x i16>
830 res_vus = vec_or(vbs, vus); // CHECK: or <8 x i16>
831 res_vus = vec_or(vus, vbs); // CHECK: or <8 x i16>
832 res_vbs = vec_or(vbs, vbs); // CHECK: or <8 x i16>
851 res_vs = vec_vor(vbs, vs); // CHECK: or <8 x i16>
852 res_vs = vec_vor(vs, vbs); // CHECK: or <8 x i16>
854 res_vus = vec_vor(vbs, vus); // CHECK: or <8 x i16>
855 res_vus = vec_vor(vus, vbs); // CHECK: or <8 x i16>
856 res_vbs = vec_vor(vbs, vbs); // CHECK: or <8 x i16>
871 res_vbc = vec_pack(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
877 res_vbc = vec_vpkuhum(vbs, vbs); // CHECK: @llvm.ppc.altivec.vperm
912 res_vbs = vec_perm(vbs, vbs, vuc); // CHECK: @llvm.ppc.altivec.vperm
923 res_vbs = vec_vperm(vbs, vbs, vuc); // CHECK: @llvm.ppc.altivec.vperm
992 res_vs = vec_sel(vs, vs, vbs); // CHECK: xor <8 x i16>
1002 res_vus = vec_sel(vus, vus, vbs); // CHECK: xor <8 x i16>
1007 res_vbs = vec_sel(vbs, vbs, vus); // CHECK: xor <8 x i16>
1012 res_vbs = vec_sel(vbs, vbs, vbs); // CHECK: xor <8 x i16>
1092 res_vs = vec_vsel(vs, vs, vbs); // CHECK: xor <8 x i16>
1102 res_vus = vec_vsel(vus, vus, vbs); // CHECK: xor <8 x i16>
1107 res_vbs = vec_vsel(vbs, vbs, vus); // CHECK: xor <8 x i16>
1112 res_vbs = vec_vsel(vbs, vbs, vbs); // CHECK: xor <8 x i16>
1205 res_vbs = vec_sll(vbs, vuc); // CHECK: @llvm.ppc.altivec.vsl
1206 res_vbs = vec_sll(vbs, vus); // CHECK: @llvm.ppc.altivec.vsl
1207 res_vbs = vec_sll(vbs, vui); // CHECK: @llvm.ppc.altivec.vsl
1235 res_vbs = vec_vsl(vbs, vuc); // CHECK: @llvm.ppc.altivec.vsl
1236 res_vbs = vec_vsl(vbs, vus); // CHECK: @llvm.ppc.altivec.vsl
1237 res_vbs = vec_vsl(vbs, vui); // CHECK: @llvm.ppc.altivec.vsl
1291 res_vbs = vec_splat(vbs, 0); // CHECK: @llvm.ppc.altivec.vperm
1302 res_vbs = vec_vsplth(vbs, 0); // CHECK: @llvm.ppc.altivec.vperm
1374 res_vbs = vec_srl(vbs, vuc); // CHECK: @llvm.ppc.altivec.vsr
1375 res_vbs = vec_srl(vbs, vus); // CHECK: @llvm.ppc.altivec.vsr
1376 res_vbs = vec_srl(vbs, vui); // CHECK: @llvm.ppc.altivec.vsr
1404 res_vbs = vec_vsr(vbs, vuc); // CHECK: @llvm.ppc.altivec.vsr
1405 res_vbs = vec_vsr(vbs, vus); // CHECK: @llvm.ppc.altivec.vsr
1406 res_vbs = vec_vsr(vbs, vui); // CHECK: @llvm.ppc.altivec.vsr
1466 vec_st(vbs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvx
1467 vec_st(vbs, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvx
1468 vec_st(vbs, 0, &vbs); // CHECK: @llvm.ppc.altivec.stvx
1492 vec_stvx(vbs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvx
1493 vec_stvx(vbs, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvx
1494 vec_stvx(vbs, 0, &vbs); // CHECK: @llvm.ppc.altivec.stvx
1515 vec_ste(vbs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvehx
1516 vec_ste(vbs, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvehx
1530 vec_stvehx(vbs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvehx
1531 vec_stvehx(vbs, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvehx
1552 vec_stl(vbs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvxl
1553 vec_stl(vbs, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvxl
1554 vec_stl(vbs, 0, &vbs); // CHECK: @llvm.ppc.altivec.stvxl
1578 vec_stvxl(vbs, 0, &param_s); // CHECK: @llvm.ppc.altivec.stvxl
1579 vec_stvxl(vbs, 0, &param_us); // CHECK: @llvm.ppc.altivec.stvxl
1580 vec_stvxl(vbs, 0, &vbs); // CHECK: @llvm.ppc.altivec.stvxl
1602 res_vs = vec_sub(vbs, vs); // CHECK: sub <8 x i16>
1603 res_vs = vec_sub(vs, vbs); // CHECK: sub <8 x i16>
1605 res_vus = vec_sub(vbs, vus); // CHECK: sub <8 x i16>
1606 res_vus = vec_sub(vus, vbs); // CHECK: sub <8 x i16>
1621 res_vs = vec_vsubuhm(vbs, vus); // CHECK: sub <8 x i16>
1622 res_vs = vec_vsubuhm(vus, vbs); // CHECK: sub <8 x i16>
1624 res_vus = vec_vsubuhm(vbs, vus); // CHECK: sub <8 x i16>
1625 res_vus = vec_vsubuhm(vus, vbs); // CHECK: sub <8 x i16>
1646 res_vs = vec_subs(vbs, vs); // CHECK: @llvm.ppc.altivec.vsubshs
1647 res_vs = vec_subs(vs, vbs); // CHECK: @llvm.ppc.altivec.vsubshs
1649 res_vus = vec_subs(vbs, vus); // CHECK: @llvm.ppc.altivec.vsubuhs
1650 res_vus = vec_subs(vus, vbs); // CHECK: @llvm.ppc.altivec.vsubuhs
1664 res_vs = vec_vsubshs(vbs, vs); // CHECK: @llvm.ppc.altivec.vsubshs
1665 res_vs = vec_vsubshs(vs, vbs); // CHECK: @llvm.ppc.altivec.vsubshs
1667 res_vus = vec_vsubuhs(vbs, vus); // CHECK: @llvm.ppc.altivec.vsubuhs
1668 res_vus = vec_vsubuhs(vus, vbs); // CHECK: @llvm.ppc.altivec.vsubuhs
1700 res_vbi = vec_unpackh(vbs); // CHECK: @llvm.ppc.altivec.vupkhsh
1705 res_vbi = vec_vupkhsh(vbs); // CHECK: @llvm.ppc.altivec.vupkhsh
1712 res_vbi = vec_unpackl(vbs); // CHECK: @llvm.ppc.altivec.vupklsh
1717 res_vbi = vec_vupklsh(vbs); // CHECK: @llvm.ppc.altivec.vupklsh
1729 res_vs = vec_xor(vbs, vs); // CHECK: xor <8 x i16>
1730 res_vs = vec_xor(vs, vbs); // CHECK: xor <8 x i16>
1732 res_vus = vec_xor(vbs, vus); // CHECK: xor <8 x i16>
1733 res_vus = vec_xor(vus, vbs); // CHECK: xor <8 x i16>
1734 res_vbs = vec_xor(vbs, vbs); // CHECK: xor <8 x i16>
1753 res_vs = vec_vxor(vbs, vs); // CHECK: xor <8 x i16>
1754 res_vs = vec_vxor(vs, vbs); // CHECK: xor <8 x i16>
1756 res_vus = vec_vxor(vbs, vus); // CHECK: xor <8 x i16>
1757 res_vus = vec_vxor(vus, vbs); // CHECK: xor <8 x i16>
1758 res_vbs = vec_vxor(vbs, vbs); // CHECK: xor <8 x i16>
1836 res_vbs = vec_lvlx(0, &vbs); // CHECK: @llvm.ppc.altivec.lvx
1922 res_vbs = vec_lvlxl(0, &vbs); // CHECK: @llvm.ppc.altivec.lvxl
2008 res_vbs = vec_lvrx(0, &vbs); // CHECK: store <8 x i16> zeroinitializer
2094 res_vbs = vec_lvrxl(0, &vbs); // CHECK: store <8 x i16> zeroinitializer
2207 vec_stvlx(vbs, 0, &vbs); // CHECK: store <8 x i16> zeroinitializer
2344 vec_stvlxl(vbs, 0, &vbs); // CHECK: store <8 x i16> zeroinitializer
2481 vec_stvrx(vbs, 0, &vbs); // CHECK: @llvm.ppc.altivec.lvx
2618 vec_stvrxl(vbs, 0, &vbs); // CHECK: @llvm.ppc.altivec.lvx
2730 res_i = vec_all_eq(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
2732 res_i = vec_all_eq(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
2733 res_i = vec_all_eq(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
2734 res_i = vec_all_eq(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
2735 res_i = vec_all_eq(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
2755 res_i = vec_all_ge(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
2757 res_i = vec_all_ge(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2758 res_i = vec_all_ge(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2759 res_i = vec_all_ge(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2760 res_i = vec_all_ge(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2779 res_i = vec_all_gt(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
2781 res_i = vec_all_gt(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2782 res_i = vec_all_gt(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2783 res_i = vec_all_gt(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2784 res_i = vec_all_gt(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2806 res_i = vec_all_le(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
2808 res_i = vec_all_le(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2809 res_i = vec_all_le(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2810 res_i = vec_all_le(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2811 res_i = vec_all_le(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2830 res_i = vec_all_lt(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
2832 res_i = vec_all_lt(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2833 res_i = vec_all_lt(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2834 res_i = vec_all_lt(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2835 res_i = vec_all_lt(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2857 res_i = vec_all_ne(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
2859 res_i = vec_all_ne(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
2860 res_i = vec_all_ne(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
2861 res_i = vec_all_ne(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
2862 res_i = vec_all_ne(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
2897 res_i = vec_any_eq(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
2899 res_i = vec_any_eq(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
2900 res_i = vec_any_eq(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
2901 res_i = vec_any_eq(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
2902 res_i = vec_any_eq(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
2922 res_i = vec_any_ge(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
2924 res_i = vec_any_ge(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2925 res_i = vec_any_ge(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2926 res_i = vec_any_ge(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2927 res_i = vec_any_ge(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2946 res_i = vec_any_gt(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
2948 res_i = vec_any_gt(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2949 res_i = vec_any_gt(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2950 res_i = vec_any_gt(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2951 res_i = vec_any_gt(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2970 res_i = vec_any_le(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
2972 res_i = vec_any_le(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2973 res_i = vec_any_le(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2974 res_i = vec_any_le(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2975 res_i = vec_any_le(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2994 res_i = vec_any_lt(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p
2996 res_i = vec_any_lt(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2997 res_i = vec_any_lt(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2998 res_i = vec_any_lt(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
2999 res_i = vec_any_lt(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p
3021 res_i = vec_any_ne(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
3023 res_i = vec_any_ne(vus, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
3024 res_i = vec_any_ne(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p
3025 res_i = vec_any_ne(vbs, vus); // CHECK: @llvm.ppc.altivec.vcmpequh.p
3026 res_i = vec_any_ne(vbs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p