HomeSort by relevance Sort by last modified time
    Searched refs:v16i8 (Results 1 - 25 of 60) sorted by null

1 2 3

  /external/libvpx/libvpx/vpx_dsp/mips/
loopfilter_msa.h 18 v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign; \
19 v16i8 filt, filt1, filt2, cnst4b, cnst3b; \
22 p1_m = (v16i8)__msa_xori_b(p1_in, 0x80); \
23 p0_m = (v16i8)__msa_xori_b(p0_in, 0x80); \
24 q0_m = (v16i8)__msa_xori_b(q0_in, 0x80); \
25 q1_m = (v16i8)__msa_xori_b(q1_in, 0x80); \
28 filt = filt & (v16i8)hev_in; \
34 q0_sub_p0_r = __msa_dotp_s_h((v16i8)q0_sub_p0_r, (v16i8)cnst3h); \
40 filt = __msa_pckev_b((v16i8)filt_r, (v16i8)filt_r);
    [all...]
loopfilter_16_msa.c 84 v16i8 zero = { 0 };
114 q0_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q0);
133 q0_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q0);
148 r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
154 q1_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q1);
161 q1_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q1);
168 r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out);
174 q2_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q2)
    [all...]
vpx_convolve_msa.h 23 tmp0 = __msa_dotp_s_h((v16i8)vec0, (v16i8)filt0); \
24 tmp0 = __msa_dpadd_s_h(tmp0, (v16i8)vec1, (v16i8)filt1); \
25 tmp1 = __msa_dotp_s_h((v16i8)vec2, (v16i8)filt2); \
26 tmp1 = __msa_dpadd_s_h(tmp1, (v16i8)vec3, (v16i8)filt3); \
34 v16i8 vec0_m, vec1_m, vec2_m, vec3_m; \
52 v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m;
    [all...]
vpx_convolve8_msa.c 29 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
30 v16i8 filt_hz0, filt_hz1, filt_hz2, filt_hz3;
65 out2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4);
74 hz_out6 = (v8i16)__msa_sldi_b((v16i8)hz_out7, (v16i8)hz_out5, 8);
75 out3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6);
81 hz_out8 = (v8i16)__msa_sldi_b((v16i8)hz_out9, (v16i8)hz_out7, 8)
    [all...]
macros_msa.h 21 #define LD_SB(...) LD_B(v16i8, __VA_ARGS__)
32 #define ST_SB(...) ST_B(v16i8, __VA_ARGS__)
301 #define LD_SB2(...) LD_B2(v16i8, __VA_ARGS__)
314 #define LD_SB4(...) LD_B4(v16i8, __VA_ARGS__)
321 #define LD_SB5(...) LD_B5(v16i8, __VA_ARGS__)
328 #define LD_SB7(...) LD_B7(v16i8, __VA_ARGS__)
336 #define LD_SB8(...) LD_B8(v16i8, __VA_ARGS__)
608 v16i8 zero_m = { 0 }; \
609 out0 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in0, slide_val);
    [all...]
vpx_convolve8_avg_msa.c 23 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
25 v16i8 filt_hz0, filt_hz1, filt_hz2, filt_hz3;
59 vec2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4);
69 hz_out6 = (v8i16)__msa_sldi_b((v16i8)hz_out7, (v16i8)hz_out5, 8);
70 vec3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6);
76 hz_out8 = (v8i16)__msa_sldi_b((v16i8)hz_out9, (v16i8)hz_out7, 8)
    [all...]
subtract_msa.c 19 v16i8 src = { 0 };
20 v16i8 pred = { 0 };
38 v16i8 src = { 0 };
39 v16i8 pred = { 0 };
62 v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
63 v16i8 pred0, pred1, pred2, pred3, pred4, pred5, pred6, pred7;
121 v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
122 v16i8 pred0, pred1, pred2, pred3, pred4, pred5, pred6, pred7;
183 v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
184 v16i8 pred0, pred1, pred2, pred3, pred4, pred5, pred6, pred7
    [all...]
intrapred_msa.c 156 v16i8 store, src = { 0 };
168 store = __msa_splati_b((v16i8)sum_w, 0);
177 v16i8 store, data = { 0 };
182 data = (v16i8)__msa_insert_w((v4i32)data, 0, val0);
186 store = __msa_splati_b((v16i8)sum_w, 0);
194 const v16i8 store = __msa_ldi_b(128);
205 v16i8 store;
220 store = __msa_splati_b((v16i8)sum_w, 0);
231 v16i8 store;
243 store = __msa_splati_b((v16i8)sum_w, 0)
    [all...]
vpx_convolve8_vert_msa.c 19 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
20 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r;
21 v16i8 src65_r, src87_r, src109_r, src2110, src4332, src6554, src8776;
22 v16i8 src10998, filt0, filt1, filt2, filt3;
70 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
71 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r;
72 v16i8 src65_r, src87_r, src109_r, filt0, filt1, filt2, filt3;
124 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
125 v16i8 filt0, filt1, filt2, filt3;
126 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r
    [all...]
sub_pixel_variance_msa.c 406 v16i8 src0, src1, src2, src3;
407 v16i8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
429 src0 = (v16i8)__msa_ilvev_d((v2i64)src2, (v2i64)src0);
449 v16i8 src0, src1, src2, src3;
450 v16i8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
493 v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
494 v16i8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
616 out = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
708 out0 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0)
    [all...]
sad_msa.c 255 ref = (v16u8)__msa_sldi_b((v16i8)ref1, (v16i8)ref0, 1);
259 ref = (v16u8)__msa_sldi_b((v16i8)ref1, (v16i8)ref0, 2);
271 ref = (v16u8)__msa_sldi_b((v16i8)ref1, (v16i8)ref0, 1);
275 ref = (v16u8)__msa_sldi_b((v16i8)ref1, (v16i8)ref0, 2);
550 ref = (v16u8)__msa_sldi_b((v16i8)ref1, (v16i8)ref0, 1)
    [all...]
vpx_convolve8_avg_vert_msa.c 22 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
24 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r;
25 v16i8 src65_r, src87_r, src109_r, src2110, src4332, src6554, src8776;
26 v16i8 src10998, filt0, filt1, filt2, filt3;
82 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
84 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r;
85 v16i8 src65_r, src87_r, src109_r, filt0, filt1, filt2, filt3;
143 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
144 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r;
145 v16i8 src65_r, src87_r, src109_r, src10_l, src32_l, src54_l, src76_l
    [all...]
  /external/libvpx/libvpx/vp8/common/mips/msa/
sixtap_filter_msa.c 40 v16i8 vec0_m, vec1_m, vec2_m; \
59 v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m; \
74 v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \
94 tmp0 = __msa_dotp_s_h((v16i8)vec0, (v16i8)filt0); \
95 tmp0 = __msa_dpadd_s_h(tmp0, (v16i8)vec1, (v16i8)filt1); \
102 v16i8 vec0_m, vec1_m; \
118 v16i8 vec0_m, vec1_m, vec2_m, vec3_m; \
130 v16i8 vec0_m, vec1_m, vec2_m, vec3_m;
    [all...]
mfqe_msa.c 22 v16i8 src0 = { 0 };
23 v16i8 src1 = { 0 };
24 v16i8 dst0 = { 0 };
25 v16i8 dst1 = { 0 };
52 dst0 = (v16i8)__msa_pckev_b((v16i8)res_h_l, (v16i8)res_h_r);
63 dst1 = (v16i8)__msa_pckev_b((v16i8)res_h_l, (v16i8)res_h_r)
    [all...]
vp8_macros_msa.h 21 #define LD_SB(...) LD_B(v16i8, __VA_ARGS__)
33 #define ST_SB(...) ST_B(v16i8, __VA_ARGS__)
292 #define LD_SB2(...) LD_B2(v16i8, __VA_ARGS__)
300 #define LD_SB3(...) LD_B3(v16i8, __VA_ARGS__)
308 #define LD_SB4(...) LD_B4(v16i8, __VA_ARGS__)
316 #define LD_SB5(...) LD_B5(v16i8, __VA_ARGS__)
325 #define LD_SB8(...) LD_B8(v16i8, __VA_ARGS__)
376 #define ST_SB4(...) ST_B4(v16i8, __VA_ARGS__)
533 v16i8 zero_m = { 0 }; \
535 out0 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in0, slide_val);
    [all...]
loopfilter_filters_msa.c 21 p1_a_sub_q1 = (v16u8)__msa_srli_b((v16i8)p1_a_sub_q1, 1); \
30 v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign; \
31 v16i8 filt, filt1, filt2, cnst4b, cnst3b; \
34 p1_m = (v16i8)__msa_xori_b(p1_in_out, 0x80); \
35 p0_m = (v16i8)__msa_xori_b(p0_in_out, 0x80); \
36 q0_m = (v16i8)__msa_xori_b(q0_in_out, 0x80); \
37 q1_m = (v16i8)__msa_xori_b(q1_in_out, 0x80); \
41 filt = filt & (v16i8)hev_in; \
48 q0_sub_p0_r = __msa_dotp_s_h((v16i8)q0_sub_p0_r, (v16i8)cnst3h);
    [all...]
bilinear_filter_msa.c 41 v16i8 src0, src1, src2, src3, mask;
63 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
64 v16i8 res0, res1, res2, res3;
104 v16i8 src0, src1, src2, src3, mask;
127 v16i8 src0, src1, src2, src3, mask, out0, out1;
205 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
270 v16i8 src0, src1, src2, src3, src4;
271 v16i8 src10_r, src32_r, src21_r, src43_r, src2110, src4332;
287 src2110 = __msa_pckev_b((v16i8)tmp1, (v16i8)tmp0)
    [all...]
postproc_msa.c 294 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 0);
298 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 1);
302 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 2);
306 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 3);
310 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 4);
314 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 5);
325 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 6);
336 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 7);
450 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 0);
454 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 1)
    [all...]
  /external/libvpx/libvpx/vp8/encoder/mips/msa/
denoising_msa.c 123 temp2_h = (v8i16)__msa_pckev_b((v16i8)temp3_h, (v16i8)temp2_h);
124 running_avg_y = (v16u8)__msa_pckev_b((v16i8)temp1_h, (v16i8)temp0_h);
173 temp2_h = (v8i16)__msa_pckev_b((v16i8)temp3_h, (v16i8)temp2_h);
174 running_avg_y = (v16u8)__msa_pckev_b((v16i8)temp1_h, (v16i8)temp0_h);
256 running_avg_y = (v16u8)__msa_pckev_b((v16i8)adjust3,
257 (v16i8)adjust2)
    [all...]
  /external/libvpx/libvpx/vp9/common/mips/msa/
vp9_mfqe_msa.c 21 v16i8 src0 = { 0 };
22 v16i8 src1 = { 0 };
23 v16i8 dst0 = { 0 };
24 v16i8 dst1 = { 0 };
50 dst0 = (v16i8)__msa_pckev_b((v16i8)res_h_l, (v16i8)res_h_r);
61 dst1 = (v16i8)__msa_pckev_b((v16i8)res_h_l, (v16i8)res_h_r)
    [all...]
  /external/clang/test/CodeGen/
systemz-abi-vector.c 27 typedef __attribute__((vector_size(16))) char v16i8; typedef
38 unsigned int align = __alignof__ (v16i8);
58 v16i8 pass_v16i8(v16i8 arg) { return arg; }
153 struct agg_v16i8 { v16i8 a; };
317 v16i8 va_v16i8(__builtin_va_list l) { return __builtin_va_arg(l, v16i8); }
    [all...]
  /external/llvm/lib/Target/AMDGPU/
SITypeRewriter.cpp 14 /// v16i8 => i128
15 /// - v16i8 is used for constant memory resource descriptors. This type is
17 /// in the backend, because we want the legalizer to expand all v16i8
38 Type *v16i8; member in class:__anon17592::SITypeRewriter
59 v16i8 = VectorType::get(Type::getInt8Ty(M.getContext()), 16);
79 if (ElemTy == v16i8) {
104 if (Arg->getType() == v16i8) {
  /external/llvm/lib/Target/X86/
X86TargetTransformInfo.cpp 178 { ISD::SHL, MVT::v16i8, 1 },
179 { ISD::SRL, MVT::v16i8, 2 },
180 { ISD::SRA, MVT::v16i8, 2 },
246 { ISD::SHL, MVT::v16i8, 1 }, // psllw.
255 { ISD::SRL, MVT::v16i8, 1 }, // psrlw.
264 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
321 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence.
330 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
339 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
354 { ISD::SDIV, MVT::v16i8, 16*20 }
    [all...]
  /external/llvm/include/llvm/CodeGen/
MachineValueType.h 72 v16i8 = 25, // 16 x i8 enumerator in enum:llvm::MVT::SimpleValueType
242 return (SimpleTy == MVT::v16i8 || SimpleTy == MVT::v8i16 ||
322 case v16i8:
383 case v16i8:
475 case v16i8:
600 if (NumElements == 16) return MVT::v16i8;
  /external/libvpx/libvpx/vp9/encoder/mips/msa/
vp9_fdct4x4_msa.c 57 v16i8 zero = { 0 };
58 v16i8 one = __msa_ldi_b(1);

Completed in 1187 milliseconds

1 2 3