Home | History | Annotate | Download | only in source

Lines Matching refs:reg1

481   v16u8 reg0, reg1, reg2, reg3;
509 reg1 = (v16u8)__msa_sldi_b((v16i8)reg2, (v16i8)reg0, 11);
511 dst1 = (v16u8)__msa_vshf_b(shuffler1, (v16i8)reg3, (v16i8)reg1);
570 v8u16 reg0, reg1, reg2;
586 reg1 = (v8u16)__msa_srai_h(vec1, 4);
588 reg1 = (v8u16)__msa_slli_h((v8i16)reg1, 4);
590 reg1 |= const_0xF000;
592 dst0 = (v16u8)(reg1 | reg0);
610 v8u16 reg0, reg1, reg2;
626 reg1 = (v8u16)__msa_srai_h(vec1, 3);
628 reg1 = (v8u16)__msa_slli_h((v8i16)reg1, 5);
630 reg1 |= const_0x8000;
632 dst0 = (v16u8)(reg1 | reg0);
774 v8u16 reg0, reg1, reg2, reg3, reg4, reg5;
791 reg1 = (v8u16)__msa_ilvev_b(zero, (v16i8)vec1);
797 reg1 *= const_0x19;
803 reg1 += reg3;
805 reg1 += reg5;
807 reg1 += const_0x1080;
809 reg1 = (v8u16)__msa_srai_h((v8i16)reg1, 8);
810 dst0 = (v16u8)__msa_pckev_b((v16i8)reg1, (v16i8)reg0);
826 v8u16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8, reg9;
859 reg1 = __msa_hadd_u_h(vec9, vec9);
887 reg1 += __msa_hadd_u_h(vec9, vec9);
893 reg1 = (v8u16)__msa_srai_h((v8i16)reg1, 2);
899 reg7 = reg1 * const_0x70;
907 reg1 *= const_0x12;
913 reg3 += reg1;
1085 v16u8 src0, src1, src2, src3, reg0, reg1, reg2, reg3, dst0, dst1;
1102 reg1 = (v16u8)__msa_pckev_b((v16i8)src3, (v16i8)src2);
1105 src0 = (v16u8)__msa_pckev_b((v16i8)reg1, (v16i8)reg0);
1107 src2 = (v16u8)__msa_pckod_b((v16i8)reg1, (v16i8)reg0);
1159 v4u32 reg0, reg1, reg2, reg3;
1170 reg1 = (v4u32)__msa_ilvl_h(zero, (v8i16)vec0);
1174 reg1 *= (v4u32)__msa_ilvl_h(zero, (v8i16)vec2);
1178 reg1 = (v4u32)__msa_srai_w((v4i32)reg1, 16);
1181 vec0 = (v8u16)__msa_pckev_h((v8i16)reg1, (v8i16)reg0);
1237 v4u32 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
1261 reg1 = (v4u32)__msa_ilvl_h(zero, (v8i16)vec4);
1269 reg1 *= (v4u32)__msa_ilvl_h(zero, (v8i16)vec0);
1277 reg1 = (v4u32)__msa_srai_w((v4i32)reg1, 24);
1284 vec0 = (v8u16)__msa_pckev_h((v8i16)reg1, (v8i16)reg0);
1305 v8i16 reg0, reg1, reg2;
1318 reg1 = (v8i16)__msa_ilvev_b(zero, (v16i8)vec1);
1321 reg1 += vec_d0;
1324 reg1 = __msa_maxi_s_h((v8i16)reg1, 0);
1327 reg1 = __msa_min_s_h((v8i16)max, (v8i16)reg1);
1331 reg1 = __msa_srai_h(reg1, 2);
1333 reg1 = __msa_slli_h(reg1, 5);
1334 reg0 |= reg1;
1373 v4u32 reg0, reg1, reg2, reg3, rgba_scale;
1385 reg1 = (v4u32)__msa_ilvl_h(zero, (v8i16)vec0);
1389 reg1 *= rgba_scale;
1393 reg1 = (v4u32)__msa_srai_w((v4i32)reg1, 24);
1396 vec0 = (v8u16)__msa_pckev_h((v8i16)reg1, (v8i16)reg0);
1433 v8u16 reg0, reg1, reg2;
1449 reg1 = (v8u16)__msa_dotp_u_h(vec0, const_0x5816);
1452 reg1 = (v8u16)__msa_dpadd_u_h(reg1, vec1, const_0x2D);
1455 reg1 = (v8u16)__msa_srai_h((v8i16)reg1, 7);
1457 reg1 = (v8u16)__msa_min_u_h((v8u16)reg1, const_0xFF);
1460 vec1 = (v16u8)__msa_pckev_b((v16i8)reg1, (v16i8)reg1);
1506 v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6;
1526 reg1 = (v16u8)__msa_pckev_b((v16i8)vec3, (v16i8)vec2);
1530 reg5 = (v16u8)__msa_slli_b((v16i8)reg1, 3);
1533 reg5 |= (v16u8)__msa_srai_b((v16i8)reg1, 2);
1537 reg1 = (v16u8)__msa_ilvl_b((v16i8)reg6, (v16i8)reg4);
1542 dst2 = (v16u8)__msa_ilvr_b((v16i8)reg3, (v16i8)reg1);
1543 dst3 = (v16u8)__msa_ilvl_b((v16i8)reg3, (v16i8)reg1);
1553 v8u16 reg0, reg1, reg2, reg3, reg4, reg5;
1570 reg1 = (v8u16)__msa_srli_h((v8i16)vec1, 3);
1576 reg1 |= (v8u16)__msa_srli_h((v8i16)vec1, 9);
1582 res1 = (v16u8)__msa_ilvev_b((v16i8)alpha, (v16i8)reg1);
1648 v8u16 reg0, reg1, reg2, reg3, reg4, reg5;
1670 reg1 = (v8u16)__msa_slli_h((v8i16)vec1, 3);
1672 reg1 |= (v8u16)__msa_srai_h((v8i16)vec1, 2);
1682 reg1 *= const_0x19;
1688 reg1 += reg3;
1690 reg1 += reg5;
1692 reg1 += const_0x1080;
1694 reg1 = (v8u16)__msa_srai_h((v8i16)reg1, 8);
1695 dst0 = (v16u8)__msa_pckev_b((v16i8)reg1, (v16i8)reg0);
1705 v8u16 reg0, reg1, reg2, reg3, reg4, reg5;
1725 reg1 = (v8u16)__msa_srli_h((v8i16)vec1, 3);
1731 reg1 |= (v8u16)__msa_srli_h((v8i16)vec1, 9);
1736 vec0 = (v8u16)__msa_ilvr_h((v8i16)reg1, (v8i16)reg0);
1737 vec1 = (v8u16)__msa_ilvl_h((v8i16)reg1, (v8i16)reg0);
1767 v16u8 src0, src1, src2, reg0, reg1, reg2, reg3, dst0;
1784 reg1 = (v16u8)__msa_vshf_b(mask1, (v16i8)src1, (v16i8)src0);
1787 vec0 = (v8u16)__msa_pckev_h((v8i16)reg1, (v8i16)reg0);
1789 vec2 = (v8u16)__msa_pckod_h((v8i16)reg1, (v8i16)reg0);
1808 v16u8 src0, src1, src2, reg0, reg1, reg2, reg3, dst0;
1825 reg1 = (v16u8)__msa_vshf_b(mask1, (v16i8)src1, (v16i8)src0);
1828 vec0 = (v8u16)__msa_pckev_h((v8i16)reg1, (v8i16)reg0);
1830 vec2 = (v8u16)__msa_pckod_h((v8i16)reg1, (v8i16)reg0);
1856 v8u16 src0, src1, src2, src3, reg0, reg1, reg2, reg3;
1905 reg1 = vec0 * const_0x4A;
1909 reg1 += vec2 * const_0x26;
1912 reg0 -= reg1;
1937 v8u16 src0, src1, src2, src3, reg0, reg1, reg2, reg3;
1985 reg1 = vec1 * const_0x4A;
1989 reg1 += vec4 * const_0x26;
1992 reg0 -= reg1;
2020 v8i16 reg0, reg1, reg2, reg3;
2069 reg1 = (v8i16)__msa_pckev_d((v2i64)vec3, (v2i64)vec2);
2073 reg1 += (v8i16)__msa_pckod_d((v2i64)vec3, (v2i64)vec2);
2077 reg1 = __msa_srai_h((v8i16)reg1, 2);
2080 vec4 = (v8u16)__msa_pckev_h(reg1, reg0);
2082 vec6 = (v8u16)__msa_pckod_h(reg1, reg0);
2094 reg1 = __msa_subv_h((v8i16)const_0x8080, (v8i16)vec5);
2097 reg0 += reg1;
2125 v8i16 reg0, reg1, reg2, reg3;
2174 reg1 = (v8i16)__msa_pckev_d((v2i64)vec3, (v2i64)vec2);
2178 reg1 += (v8i16)__msa_pckod_d((v2i64)vec3, (v2i64)vec2);
2182 reg1 = __msa_srai_h(reg1, 2);
2185 vec4 = (v8u16)__msa_pckev_h((v8i16)reg1, (v8i16)reg0);
2187 vec6 = (v8u16)__msa_pckod_h((v8i16)reg1, (v8i16)reg0);
2199 reg1 = __msa_subv_h((v8i16)const_0x8080, (v8i16)vec5);
2202 reg0 += reg1;
2385 v16u8 reg0, reg1, dst0, dst1, dst2, dst3;
2395 reg1 = (v16u8)__msa_ilvl_b((v16i8)alpha, (v16i8)vec0);
2398 dst2 = (v16u8)__msa_ilvr_b((v16i8)reg1, (v16i8)vec2);
2399 dst3 = (v16u8)__msa_ilvl_b((v16i8)reg1, (v16i8)vec2);
2666 v4i32 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8, reg9;
2678 reg1 = (v4i32)__msa_ilvl_h((v8i16)zero, (v8i16)vec0);
2680 reg1 *= vec_yg;
2682 reg1 = __msa_srai_w(reg1, 16);
2684 reg5 = reg1 + vec_br;
2686 reg3 = reg1 + vec_bg;
2688 reg1 += vec_bb;
2696 reg1 -= reg7 * vec_ub;
2704 reg1 = __msa_srai_w(reg1, 6);
2709 CLIP_0TO255(reg0, reg1, reg2, reg3, reg4, reg5);
2710 vec0 = (v8u16)__msa_pckev_h((v8i16)reg1, (v8i16)reg0);
2729 v4i32 reg0, reg1, reg2, reg3;
2741 reg1 = (v4i32)__msa_ilvl_h(zero, vec0);
2745 reg1 *= vec_yg;
2749 reg1 = __msa_srai_w(reg1, 16);
2752 vec0 = (v8i16)__msa_pckev_h((v8i16)reg1, (v8i16)reg0);