HomeSort by relevance Sort by last modified time
    Searched refs:cospi_16_64 (Results 1 - 25 of 46) sorted by null

1 2

  /external/libvpx/libvpx/vpx_dsp/arm/
idct32x32_1_add_neon.c 42 const int16_t out0 = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64));
43 const int16_t out1 = WRAPLOW(dct_const_round_shift(out0 * cospi_16_64));
idct4x4_1_add_neon.c 35 const int16_t out0 = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64));
36 const int16_t out1 = WRAPLOW(dct_const_round_shift(out0 * cospi_16_64));
highbd_idct32x32_34_add_neon.c 103 s1[0] = multiply_shift_and_narrow_s32_dual(in[0], cospi_16_64);
120 s1[5] = sub_multiply_shift_and_narrow_s32_dual(s1[7], s1[4], cospi_16_64);
121 s1[6] = add_multiply_shift_and_narrow_s32_dual(s1[4], s1[7], cospi_16_64);
153 s2[10] = sub_multiply_shift_and_narrow_s32_dual(s2[14], s2[9], cospi_16_64);
154 s2[13] = add_multiply_shift_and_narrow_s32_dual(s2[9], s2[14], cospi_16_64);
156 s2[11] = sub_multiply_shift_and_narrow_s32_dual(s2[15], s2[8], cospi_16_64);
157 s2[12] = add_multiply_shift_and_narrow_s32_dual(s2[8], s2[15], cospi_16_64);
195 s1[20] = sub_multiply_shift_and_narrow_s32_dual(s3[27], s2[20], cospi_16_64);
196 s1[27] = add_multiply_shift_and_narrow_s32_dual(s2[20], s3[27], cospi_16_64);
198 s1[21] = sub_multiply_shift_and_narrow_s32_dual(s3[26], s2[21], cospi_16_64);
    [all...]
idct32x32_135_add_neon.c 173 s4[0] = multiply_shift_and_narrow_s16(in[0], cospi_16_64);
210 s5[5] = sub_multiply_shift_and_narrow_s16(s3[7], s3[4], cospi_16_64);
211 s5[6] = add_multiply_shift_and_narrow_s16(s3[4], s3[7], cospi_16_64);
252 s6[10] = sub_multiply_shift_and_narrow_s16(s5[13], s5[10], cospi_16_64);
253 s6[13] = add_multiply_shift_and_narrow_s16(s5[10], s5[13], cospi_16_64);
255 s6[11] = sub_multiply_shift_and_narrow_s16(s5[12], s5[11], cospi_16_64);
256 s6[12] = add_multiply_shift_and_narrow_s16(s5[11], s5[12], cospi_16_64);
294 s7[20] = sub_multiply_shift_and_narrow_s16(s6[27], s6[20], cospi_16_64);
295 s7[27] = add_multiply_shift_and_narrow_s16(s6[20], s6[27], cospi_16_64);
297 s7[21] = sub_multiply_shift_and_narrow_s16(s6[26], s6[21], cospi_16_64);
    [all...]
idct32x32_34_add_neon.c 96 s1[0] = multiply_shift_and_narrow_s16(in[0], cospi_16_64);
113 s1[5] = sub_multiply_shift_and_narrow_s16(s1[7], s1[4], cospi_16_64);
114 s1[6] = add_multiply_shift_and_narrow_s16(s1[4], s1[7], cospi_16_64);
146 s2[10] = sub_multiply_shift_and_narrow_s16(s2[14], s2[9], cospi_16_64);
147 s2[13] = add_multiply_shift_and_narrow_s16(s2[9], s2[14], cospi_16_64);
149 s2[11] = sub_multiply_shift_and_narrow_s16(s2[15], s2[8], cospi_16_64);
150 s2[12] = add_multiply_shift_and_narrow_s16(s2[8], s2[15], cospi_16_64);
188 s1[20] = sub_multiply_shift_and_narrow_s16(s3[27], s2[20], cospi_16_64);
189 s1[27] = add_multiply_shift_and_narrow_s16(s2[20], s3[27], cospi_16_64);
191 s1[21] = sub_multiply_shift_and_narrow_s16(s3[26], s2[21], cospi_16_64);
    [all...]
idct8x8_1_add_neon.c 39 const int16_t out0 = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64));
40 const int16_t out1 = WRAPLOW(dct_const_round_shift(out0 * cospi_16_64));
highbd_idct32x32_135_add_neon.c 183 s4[0] = multiply_shift_and_narrow_s32_dual(in[0], cospi_16_64);
220 s5[5] = sub_multiply_shift_and_narrow_s32_dual(s3[7], s3[4], cospi_16_64);
221 s5[6] = add_multiply_shift_and_narrow_s32_dual(s3[4], s3[7], cospi_16_64);
262 s6[10] = sub_multiply_shift_and_narrow_s32_dual(s5[13], s5[10], cospi_16_64);
263 s6[13] = add_multiply_shift_and_narrow_s32_dual(s5[10], s5[13], cospi_16_64);
265 s6[11] = sub_multiply_shift_and_narrow_s32_dual(s5[12], s5[11], cospi_16_64);
266 s6[12] = add_multiply_shift_and_narrow_s32_dual(s5[11], s5[12], cospi_16_64);
304 s7[20] = sub_multiply_shift_and_narrow_s32_dual(s6[27], s6[20], cospi_16_64);
305 s7[27] = add_multiply_shift_and_narrow_s32_dual(s6[20], s6[27], cospi_16_64);
307 s7[21] = sub_multiply_shift_and_narrow_s32_dual(s6[26], s6[21], cospi_16_64);
    [all...]
fdct_neon.c 49 // (s_0 +/- s_1) * cospi_16_64
53 const int32x4_t temp1 = vmulq_n_s32(s_0_p_s_1, (int16_t)cospi_16_64);
54 const int32x4_t temp2 = vmulq_n_s32(s_0_m_s_1, (int16_t)cospi_16_64);
fwd_txfm_neon.c 59 v_t0_lo = vmulq_n_s32(v_t0_lo, (int32_t)cospi_16_64);
60 v_t0_hi = vmulq_n_s32(v_t0_hi, (int32_t)cospi_16_64);
61 v_t1_lo = vmulq_n_s32(v_t1_lo, (int32_t)cospi_16_64);
62 v_t1_hi = vmulq_n_s32(v_t1_hi, (int32_t)cospi_16_64);
80 v_t0_lo = vmull_n_s16(vget_low_s16(v_x0), (int16_t)cospi_16_64);
81 v_t0_hi = vmull_n_s16(vget_high_s16(v_x0), (int16_t)cospi_16_64);
82 v_t1_lo = vmull_n_s16(vget_low_s16(v_x1), (int16_t)cospi_16_64);
83 v_t1_hi = vmull_n_s16(vget_high_s16(v_x1), (int16_t)cospi_16_64);
idct16x16_1_add_neon.c 35 const int16_t out0 = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64));
36 const int16_t out1 = WRAPLOW(dct_const_round_shift(out0 * cospi_16_64));
  /external/libvpx/libvpx/vpx_dsp/mips/
idct8x8_msa.c 66 k0 = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
67 k1 = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
79 k1 = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
108 out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), DCT_CONST_BITS);
109 out = ROUND_POWER_OF_TWO((out * cospi_16_64), DCT_CONST_BITS);
inv_txfm_dspr2.h 33 __asm__ __volatile__(/* out = dct_const_round_shift(dc * cospi_16_64); */ \
39 "%[cospi_16_64] \n\t" \
43 /* out = dct_const_round_shift(out * cospi_16_64); */ \
49 "%[cospi_16_64] \n\t" \
56 [cospi_16_64] "r"(cospi_16_64)); \
idct32x32_msa.c 53 DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
58 DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4);
95 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1);
96 DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4);
218 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1)
    [all...]
idct16x16_msa.c 30 DOTP_CONST_PAIR(reg14, reg2, cospi_16_64, cospi_16_64, loc2, loc3);
31 DOTP_CONST_PAIR(reg0, reg8, cospi_16_64, cospi_16_64, reg0, reg8);
79 DOTP_CONST_PAIR(reg5, reg11, cospi_16_64, cospi_16_64, reg5, reg11);
85 DOTP_CONST_PAIR(reg3, reg13, cospi_16_64, cospi_16_64, reg3, reg13);
122 DOTP_CONST_PAIR(reg14, reg2, cospi_16_64, cospi_16_64, loc2, loc3)
    [all...]
itrans8_dspr2.c 26 temp_1 = (input[0] + input[4]) * cospi_16_64;
29 temp_2 = (input[0] - input[4]) * cospi_16_64;
39 "madd $ac0, %[Temp2], %[cospi_16_64] \n\t"
43 "madd $ac1, %[Temp3], %[cospi_16_64] \n\t"
131 temp_1 = (step1_7 - step1_6 - step1_4 + step1_5) * cospi_16_64;
132 temp_2 = (step1_4 - step1_5 - step1_6 + step1_7) * cospi_16_64;
146 "madd $ac0, %[Temp0], %[cospi_16_64] \n\t"
147 "madd $ac1, %[Temp1], %[cospi_16_64] \n\t"
184 [cospi_16_64] "r"(cospi_16_64), [cospi_28_64] "r"(cospi_28_64)
    [all...]
fwd_dct32x32_msa.c 77 DOTP_CONST_PAIR(temp0, in4, cospi_16_64, cospi_16_64, temp1, temp0);
88 DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6);
101 DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5);
102 DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4);
141 DOTP_CONST_PAIR(in27, in20, cospi_16_64, cospi_16_64, in20, in27)
    [all...]
itrans16_dspr2.c 45 "madd $ac1, %[result1], %[cospi_16_64] \n\t"
46 "madd $ac2, %[result2], %[cospi_16_64] \n\t"
76 [cospi_16_64] "r"(cospi_16_64));
245 "madd $ac1, %[load5], %[cospi_16_64] \n\t"
246 "madd $ac3, %[load6], %[cospi_16_64] \n\t"
262 [cospi_16_64] "r"(cospi_16_64));
274 "madd $ac0, %[load5], %[cospi_16_64] \n\t"
280 "madd $ac1, %[load6], %[cospi_16_64] \n\t
    [all...]
inv_txfm_msa.h 25 v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, -cospi_16_64, \
117 c0_m = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); \
118 c1_m = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); \
221 cospi_16_64, -cospi_4_64, -cospi_20_64, -cospi_16_64 }; \
258 -cospi_24_64, cospi_8_64, cospi_16_64, -cospi_16_64, 0, 0, 0, 0
    [all...]
  /external/libvpx/libvpx/vpx_dsp/x86/
fwd_txfm_impl_sse2.h 47 octa_set_epi16(cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64,
48 cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64);
50 octa_set_epi16(cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64
    [all...]
highbd_inv_txfm_sse2.h 42 out = HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
43 out = HIGHBD_WRAPLOW(dct_const_round_shift(out * cospi_16_64), bd);
inv_txfm_ssse3.c 26 const __m128i stk2_0 = pair_set_epi16(cospi_16_64, cospi_16_64);
27 const __m128i stk2_1 = pair_set_epi16(cospi_16_64, -cospi_16_64);
228 const __m128i stg2_0 = pair_set_epi16(2 * cospi_16_64, 2 * cospi_16_64);
229 const __m128i stk2_0 = pair_set_epi16(cospi_16_64, cospi_16_64);
230 const __m128i stk2_1 = pair_set_epi16(cospi_16_64, -cospi_16_64);
    [all...]
  /external/libvpx/libvpx/vpx_dsp/
txfm_common.h 43 static const tran_high_t cospi_16_64 = 11585; variable
inv_txfm.c 137 temp1 = (input[0] + input[2]) * cospi_16_64;
138 temp2 = (input[0] - input[2]) * cospi_16_64;
180 tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64));
182 out = WRAPLOW(dct_const_round_shift(out * cospi_16_64));
249 s2 = (int)(cospi_16_64 * (x2 + x3));
250 s3 = (int)(cospi_16_64 * (x2 - x3));
251 s6 = (int)(cospi_16_64 * (x6 + x7));
252 s7 = (int)(cospi_16_64 * (x6 - x7));
288 temp1 = (step1[0] + step1[2]) * cospi_16_64;
289 temp2 = (step1[0] - step1[2]) * cospi_16_64;
    [all...]
fwd_txfm.c 56 temp1 = (step[0] + step[1]) * cospi_16_64;
57 temp2 = (step[0] - step[1]) * cospi_16_64;
132 t0 = (x0 + x1) * cospi_16_64;
133 t1 = (x0 - x1) * cospi_16_64;
142 t0 = (s6 - s5) * cospi_16_64;
143 t1 = (s6 + s5) * cospi_16_64;
266 t0 = (x0 + x1) * cospi_16_64;
267 t1 = (x0 - x1) * cospi_16_64;
276 t0 = (s6 - s5) * cospi_16_64;
277 t1 = (s6 + s5) * cospi_16_64;
    [all...]
  /external/libvpx/libvpx/vp9/encoder/
vp9_dct.c 32 temp1 = (step[0] + step[1]) * cospi_16_64;
33 temp2 = (step[0] - step[1]) * cospi_16_64;
62 t0 = (x0 + x1) * cospi_16_64;
63 t1 = (x0 - x1) * cospi_16_64;
72 t0 = (s6 - s5) * cospi_16_64;
73 t1 = (s6 + s5) * cospi_16_64;
141 t0 = (x0 + x1) * cospi_16_64;
142 t1 = (x0 - x1) * cospi_16_64;
151 t0 = (s6 - s5) * cospi_16_64;
152 t1 = (s6 + s5) * cospi_16_64;
    [all...]

Completed in 200 milliseconds

1 2