/external/libvpx/libvpx/vpx_dsp/arm/ |
idct32x32_135_add_neon.c | 175 s4[3] = multiply_shift_and_narrow_s16(in[8], cospi_8_64); 177 s4[9] = multiply_accumulate_shift_and_narrow_s16(s2[8], -cospi_8_64, s2[15], 180 cospi_8_64); 183 s3[13], -cospi_8_64); 184 s4[13] = multiply_accumulate_shift_and_narrow_s16(s3[10], -cospi_8_64, s3[13], 222 s5[18] = multiply_accumulate_shift_and_narrow_s16(s4[18], -cospi_8_64, s4[29], 225 cospi_8_64); 227 s5[19] = multiply_accumulate_shift_and_narrow_s16(s4[19], -cospi_8_64, s4[28], 230 cospi_8_64); 233 s4[27], -cospi_8_64); [all...] |
highbd_idct32x32_34_add_neon.c | 105 s2[9] = multiply_accumulate_shift_and_narrow_s32_dual(s2[8], -cospi_8_64, 108 s2[15], cospi_8_64); 123 s1[18] = multiply_accumulate_shift_and_narrow_s32_dual(s1[17], -cospi_8_64, 126 s1[30], cospi_8_64); 128 s1[19] = multiply_accumulate_shift_and_narrow_s32_dual(s1[16], -cospi_8_64, 131 s1[31], cospi_8_64); 134 s2[27], -cospi_8_64); 135 s1[27] = multiply_accumulate_shift_and_narrow_s32_dual(s2[20], -cospi_8_64, 139 s2[26], -cospi_8_64); 140 s1[26] = multiply_accumulate_shift_and_narrow_s32_dual(s2[21], -cospi_8_64, [all...] |
idct32x32_34_add_neon.c | 98 s2[9] = multiply_accumulate_shift_and_narrow_s16(s2[8], -cospi_8_64, s2[15], 101 cospi_8_64); 116 s1[18] = multiply_accumulate_shift_and_narrow_s16(s1[17], -cospi_8_64, s1[30], 119 cospi_8_64); 121 s1[19] = multiply_accumulate_shift_and_narrow_s16(s1[16], -cospi_8_64, s1[31], 124 cospi_8_64); 127 s2[27], -cospi_8_64); 128 s1[27] = multiply_accumulate_shift_and_narrow_s16(s2[20], -cospi_8_64, s2[27], 132 s2[26], -cospi_8_64); 133 s1[26] = multiply_accumulate_shift_and_narrow_s16(s2[21], -cospi_8_64, s2[26] [all...] |
highbd_idct32x32_135_add_neon.c | 185 s4[3] = multiply_shift_and_narrow_s32_dual(in[8], cospi_8_64); 187 s4[9] = multiply_accumulate_shift_and_narrow_s32_dual(s2[8], -cospi_8_64, 190 s2[15], cospi_8_64); 193 s3[13], -cospi_8_64); 194 s4[13] = multiply_accumulate_shift_and_narrow_s32_dual(s3[10], -cospi_8_64, 232 s5[18] = multiply_accumulate_shift_and_narrow_s32_dual(s4[18], -cospi_8_64, 235 s4[29], cospi_8_64); 237 s5[19] = multiply_accumulate_shift_and_narrow_s32_dual(s4[19], -cospi_8_64, 240 s4[28], cospi_8_64); 243 s4[27], -cospi_8_64); [all...] |
fdct_neon.c | 60 // s_3 * cospi_8_64 + s_2 * cospi_24_64 61 // s_3 * cospi_24_64 - s_2 * cospi_8_64 62 const int32x4_t s_3_cospi_8_64 = vmull_n_s16(s_3, (int16_t)cospi_8_64); 68 vmlsl_n_s16(s_3_cospi_24_64, s_2, (int16_t)cospi_8_64);
|
fwd_txfm_neon.c | 55 v_t2_lo = vmlal_n_s16(v_t2_lo, vget_low_s16(v_x3), (int16_t)cospi_8_64); 56 v_t2_hi = vmlal_n_s16(v_t2_hi, vget_high_s16(v_x3), (int16_t)cospi_8_64); 57 v_t3_lo = vmlsl_n_s16(v_t3_lo, vget_low_s16(v_x2), (int16_t)cospi_8_64); 58 v_t3_hi = vmlsl_n_s16(v_t3_hi, vget_high_s16(v_x2), (int16_t)cospi_8_64);
|
/external/libvpx/libvpx/vpx_dsp/ |
txfm_common.h | 35 static const tran_high_t cospi_8_64 = 15137; variable
|
fwd_txfm.c | 60 temp1 = step[2] * cospi_24_64 + step[3] * cospi_8_64; 61 temp2 = -step[2] * cospi_8_64 + step[3] * cospi_24_64; 134 t2 = x2 * cospi_24_64 + x3 * cospi_8_64; 135 t3 = -x2 * cospi_8_64 + x3 * cospi_24_64; 268 t2 = x3 * cospi_8_64 + x2 * cospi_24_64; 269 t3 = x3 * cospi_24_64 - x2 * cospi_8_64; 318 temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64; 319 temp2 = step3[2] * cospi_24_64 + step3[5] * cospi_8_64; 322 temp1 = step3[2] * cospi_8_64 - step3[5] * cospi_24_64; 323 temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64; [all...] |
inv_txfm.c | 141 temp1 = input[1] * cospi_24_64 - input[3] * cospi_8_64; 142 temp2 = input[1] * cospi_8_64 + input[3] * cospi_24_64; 234 s4 = (int)(cospi_8_64 * x4 + cospi_24_64 * x5); 235 s5 = (int)(cospi_24_64 * x4 - cospi_8_64 * x5); 236 s6 = (int)(-cospi_24_64 * x6 + cospi_8_64 * x7); 237 s7 = (int)(cospi_8_64 * x6 + cospi_24_64 * x7); 292 temp1 = step1[1] * cospi_24_64 - step1[3] * cospi_8_64; 293 temp2 = step1[1] * cospi_8_64 + step1[3] * cospi_24_64; 487 s4 = x4 * cospi_8_64 + x5 * cospi_24_64; 488 s5 = x4 * cospi_24_64 - x5 * cospi_8_64; [all...] |
/external/libvpx/libvpx/vpx_dsp/mips/ |
itrans8_dspr2.c | 49 temp_1 = input[2] * cospi_24_64 - input[6] * cospi_8_64; 55 "msub $ac0, %[Temp1], %[cospi_8_64] \n\t" 68 temp_2 = input[2] * cospi_8_64 + input[6] * cospi_24_64; 71 "madd $ac1, %[Temp0], %[cospi_8_64] \n\t" 186 [cospi_20_64] "r"(cospi_20_64), [cospi_8_64] "r"(cospi_8_64), 231 temp_1 = input[2] * cospi_24_64 - input[6] * cospi_8_64; 237 "msub $ac0, %[Temp1], %[cospi_8_64] \n\t" 250 temp_2 = input[2] * cospi_8_64 + input[6] * cospi_24_64; 253 "madd $ac1, %[Temp0], %[cospi_8_64] \n\t [all...] |
itrans32_cols_dspr2.c | 326 "msub $ac1, %[load1], %[cospi_8_64] \n\t" 329 "madd $ac3, %[load2], %[cospi_8_64] \n\t" 343 [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64)); 383 "msub $ac1, %[load2], %[cospi_8_64] \n\t" 385 "msub $ac3, %[load1], %[cospi_8_64] \n\t" 399 [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64)); 457 "msub $ac0, %[temp0], %[cospi_8_64] \n\t" 461 "madd $ac1, %[temp1], %[cospi_8_64] \n\t [all...] |
itrans32_dspr2.c | 370 "msub $ac1, %[load1], %[cospi_8_64] \n\t" 373 "madd $ac3, %[load2], %[cospi_8_64] \n\t" 387 [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64)); 427 "msub $ac1, %[load2], %[cospi_8_64] \n\t" 429 "msub $ac3, %[load1], %[cospi_8_64] \n\t" 443 [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64)); 501 "msub $ac0, %[temp0], %[cospi_8_64] \n\t" 505 "madd $ac1, %[temp1], %[cospi_8_64] \n\t [all...] |
itrans16_dspr2.c | 53 "msub $ac3, %[load4], %[cospi_8_64] \n\t" 58 "madd $ac1, %[load3], %[cospi_8_64] \n\t" 75 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64), 119 "msub $ac1, %[load5], %[cospi_8_64] \n\t" 121 "madd $ac3, %[load6], %[cospi_8_64] \n\t" 137 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64)); 180 "msub $ac1, %[load2], %[cospi_8_64] \n\t" 182 "msub $ac3, %[load1], %[cospi_8_64] \n\t [all...] |
itrans4_dspr2.c | 53 temp1 = input[1] * cospi_24_64 - input[3] * cospi_8_64; 57 "msub $ac0, %[Temp1], %[cospi_8_64] \n\t" 61 temp2 = input[1] * cospi_8_64 + input[3] * cospi_24_64; 64 "madd $ac1, %[Temp0], %[cospi_8_64] \n\t" 90 [cospi_8_64] "r"(cospi_8_64), [cospi_16_64] "r"(cospi_16_64), 139 temp1 = input[1] * cospi_24_64 - input[3] * cospi_8_64; 143 "msub $ac0, %[Temp1], %[cospi_8_64] \n\t" 147 temp2 = input[1] * cospi_8_64 + input[3] * cospi_24_64; 150 "madd $ac1, %[Temp0], %[cospi_8_64] \n\t [all...] |
inv_txfm_msa.h | 25 v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, -cospi_16_64, \ 122 c2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \ 123 c3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \ 239 k2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \ 240 k3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \ 256 -cospi_26_64, cospi_8_64, cospi_24_64, -cospi_8_64 }; \ 258 -cospi_24_64, cospi_8_64, cospi_16_64, -cospi_16_64, 0, 0, 0, 0 \ 386 k0_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); [all...] |
idct32x32_msa.c | 59 DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6); 87 DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7); 88 DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1); 160 DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1); 169 DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3); 196 DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1); 205 DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1); 370 DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6); 400 DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7); 401 DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1) [all...] |
fwd_dct32x32_msa.c | 82 DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0); 104 DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3); 118 DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1); 182 DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29); 183 DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28); 216 DOTP_CONST_PAIR(-in16, in27, cospi_24_64, cospi_8_64, in20, in27); 217 DOTP_CONST_PAIR(-in17, in26, cospi_24_64, cospi_8_64, in21, in26); 335 DOTP_CONST_PAIR_W(vec5_r, vec7_r, vec0_r, vec1_r, cospi_24_64, cospi_8_64, 363 DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3); 377 DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1) [all...] |
fwd_txfm_msa.h | 23 cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, -cospi_8_64, 0, 0, 0 \ 63 v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, \ 123 v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, \ 187 v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, \ 188 -cospi_8_64, -cospi_24_64, cospi_12_64, cospi_20_64 }; \
|
idct16x16_msa.c | 32 DOTP_CONST_PAIR(reg4, reg12, cospi_24_64, cospi_8_64, reg4, reg12); 65 DOTP_CONST_PAIR(reg7, reg9, cospi_24_64, cospi_8_64, reg7, reg9); 66 DOTP_CONST_PAIR((-reg5), (-reg11), cospi_8_64, cospi_24_64, reg5, reg11); 124 DOTP_CONST_PAIR(reg4, reg12, cospi_24_64, cospi_8_64, reg4, reg12); 162 DOTP_CONST_PAIR(reg7, reg9, cospi_24_64, cospi_8_64, reg7, reg9); 163 DOTP_CONST_PAIR((-reg5), (-reg11), cospi_8_64, cospi_24_64, reg5, reg11); 411 k0 = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); 412 k1 = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); 413 k2 = VP9_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64);
|
idct8x8_msa.c | 68 k2 = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); 69 k3 = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
|
/external/libvpx/libvpx/vp9/encoder/ |
vp9_dct.c | 36 temp1 = step[2] * cospi_24_64 + step[3] * cospi_8_64; 37 temp2 = -step[2] * cospi_8_64 + step[3] * cospi_24_64; 64 t2 = x2 * cospi_24_64 + x3 * cospi_8_64; 65 t3 = -x2 * cospi_8_64 + x3 * cospi_24_64; 143 t2 = x3 * cospi_8_64 + x2 * cospi_24_64; 144 t3 = x3 * cospi_24_64 - x2 * cospi_8_64; 194 temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64; 195 temp2 = step3[2] * cospi_24_64 + step3[5] * cospi_8_64; 198 temp1 = step3[2] * cospi_8_64 - step3[5] * cospi_24_64; 199 temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64; [all...] |
/external/libvpx/libvpx/vpx_dsp/x86/ |
fwd_txfm_impl_sse2.h | 53 octa_set_epi16(cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64, 54 cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64); 56 octa_set_epi16(cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64, 57 cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64); 65 octa_set_epi16(cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64 [all...] |
inv_txfm_ssse3.c | 28 const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64); 29 const __m128i stg2_3 = pair_set_epi16(cospi_8_64, cospi_24_64); 232 const __m128i stg2_3 = pair_set_epi16(2 * cospi_8_64, 2 * cospi_8_64); 447 const __m128i stg4_4 = pair_set_epi16(-cospi_8_64, cospi_24_64); 448 const __m128i stg4_5 = pair_set_epi16(cospi_24_64, cospi_8_64); 449 const __m128i stg4_6 = pair_set_epi16(-cospi_24_64, -cospi_8_64); 539 const __m128i stg4_4 = pair_set_epi16(-cospi_8_64, cospi_24_64); 540 const __m128i stg4_5 = pair_set_epi16(cospi_24_64, cospi_8_64); 541 const __m128i stg4_6 = pair_set_epi16(-cospi_24_64, -cospi_8_64); [all...] |
inv_txfm_sse2.c | 75 const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64); 76 const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64); 259 const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64); 260 const __m128i stg2_3 = pair_set_epi16(cospi_8_64, cospi_24_64); 348 const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64); 349 const __m128i stg2_3 = pair_set_epi16(cospi_8_64, cospi_24_64); 374 const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64); 375 const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64); 376 const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64); 604 const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64); [all...] |
/external/libvpx/libvpx/vp9/encoder/mips/msa/ |
vp9_fdct_msa.h | 25 v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, -cospi_16_64, \
|