/external/libvpx/libvpx/vpx_dsp/ |
txfm_common.h | 47 static const tran_high_t cospi_20_64 = 9102; variable
|
fwd_txfm.c | 155 t1 = x1 * cospi_12_64 + x2 * cospi_20_64; 156 t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; 289 t1 = x1 * cospi_12_64 + x2 * cospi_20_64; 290 t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; 608 output[5] = dct_32_round(step[5] * cospi_12_64 + step[6] * cospi_20_64); 609 output[6] = dct_32_round(step[6] * cospi_12_64 + step[5] * -cospi_20_64); 625 output[21] = dct_32_round(step[21] * -cospi_20_64 + step[26] * cospi_12_64); 626 output[22] = dct_32_round(step[22] * -cospi_12_64 + step[25] * -cospi_20_64); 629 output[25] = dct_32_round(step[25] * cospi_12_64 + step[22] * -cospi_20_64); 630 output[26] = dct_32_round(step[26] * cospi_20_64 + step[21] * cospi_12_64) [all...] |
inv_txfm.c | 282 temp1 = input[5] * cospi_12_64 - input[3] * cospi_20_64; 283 temp2 = input[5] * cospi_20_64 + input[3] * cospi_12_64; 458 s10 = x10 * cospi_20_64 + x11 * cospi_12_64; 459 s11 = x10 * cospi_12_64 - x11 * cospi_20_64; 462 s14 = -x14 * cospi_12_64 + x15 * cospi_20_64; 463 s15 = x14 * cospi_20_64 + x15 * cospi_12_64; 616 temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64; 617 temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64; 928 temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64; 929 temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64 [all...] |
/external/libvpx/libvpx/vpx_dsp/mips/ |
itrans8_dspr2.c | 107 temp_1 = input[5] * cospi_12_64 - input[3] * cospi_20_64; 115 "msub $ac0, %[Temp1], %[cospi_20_64] \n\t" 119 temp_2 = input[5] * cospi_20_64 + input[3] * cospi_12_64; 125 "madd $ac1, %[Temp0], %[cospi_20_64] \n\t" 186 [cospi_20_64] "r"(cospi_20_64), [cospi_8_64] "r"(cospi_8_64), 289 temp_1 = input[5] * cospi_12_64 - input[3] * cospi_20_64; 297 "msub $ac0, %[Temp1], %[cospi_20_64] \n\t" 301 temp_2 = input[5] * cospi_20_64 + input[3] * cospi_12_64; 307 "madd $ac1, %[Temp0], %[cospi_20_64] \n\t [all...] |
itrans32_cols_dspr2.c | 214 "msub $ac1, %[load1], %[cospi_20_64] \n\t" 216 "madd $ac3, %[load2], %[cospi_20_64] \n\t" 231 [cospi_12_64] "r"(cospi_12_64), [cospi_20_64] "r"(cospi_20_64)); 271 "msub $ac1, %[load2], %[cospi_20_64] \n\t" 272 "msub $ac3, %[load1], %[cospi_20_64] \n\t" 287 [cospi_12_64] "r"(cospi_12_64), [cospi_20_64] "r"(cospi_20_64)); [all...] |
itrans32_dspr2.c | 258 "msub $ac1, %[load1], %[cospi_20_64] \n\t" 260 "madd $ac3, %[load2], %[cospi_20_64] \n\t" 275 [cospi_12_64] "r"(cospi_12_64), [cospi_20_64] "r"(cospi_20_64)); 315 "msub $ac1, %[load2], %[cospi_20_64] \n\t" 316 "msub $ac3, %[load1], %[cospi_20_64] \n\t" 331 [cospi_12_64] "r"(cospi_12_64), [cospi_20_64] "r"(cospi_20_64)); [all...] |
idct8x8_msa.c | 56 k2 = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64); 57 k3 = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
|
itrans16_dspr2.c | 216 "msub $ac3, %[load8], %[cospi_20_64] \n\t" 224 "madd $ac1, %[load7], %[cospi_20_64] \n\t" 260 [cospi_20_64] "r"(cospi_20_64), [cospi_12_64] "r"(cospi_12_64), [all...] |
idct16x16_msa.c | 28 DOTP_CONST_PAIR(reg10, reg6, cospi_12_64, cospi_20_64, reg10, reg6); 120 DOTP_CONST_PAIR(reg10, reg6, cospi_12_64, cospi_20_64, reg10, reg6); 394 k0 = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64); 395 k1 = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64); 396 k2 = VP9_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64);
|
inv_txfm_msa.h | 220 v8i16 mask_m = { cospi_28_64, cospi_4_64, cospi_20_64, cospi_12_64, \ 221 cospi_16_64, -cospi_4_64, -cospi_20_64, -cospi_16_64 }; \ 375 k0_m = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64); \ 376 k1_m = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64); \ 377 k2_m = VP9_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64); \
|
fwd_txfm_msa.h | 64 cospi_4_64, cospi_28_64, cospi_12_64, cospi_20_64 }; \ 124 cospi_4_64, cospi_28_64, cospi_12_64, cospi_20_64 }; \ 188 -cospi_8_64, -cospi_24_64, cospi_12_64, cospi_20_64 }; \
|
idct32x32_msa.c | 51 DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3); 190 DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1); 191 DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3); 362 DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3); 494 DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1); 495 DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3);
|
fwd_dct32x32_msa.c | 96 DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0); 219 DOTP_CONST_PAIR(in18, in17, cospi_12_64, cospi_20_64, in29, in30); 232 DOTP_CONST_PAIR(-in16, in19, cospi_12_64, cospi_20_64, in28, in31); 354 DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, in5, in4); 427 DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0); 565 DOTP_CONST_PAIR(in18, in17, cospi_12_64, cospi_20_64, in29, in30); 579 DOTP_CONST_PAIR(-in16, in19, cospi_12_64, cospi_20_64, in28, in31); 740 DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0); [all...] |
/external/libvpx/libvpx/vpx_dsp/arm/ |
fwd_txfm_neon.c | 104 v_t1_lo = vmlal_n_s16(v_t1_lo, vget_low_s16(v_x2), (int16_t)cospi_20_64); 105 v_t1_hi = vmlal_n_s16(v_t1_hi, vget_high_s16(v_x2), (int16_t)cospi_20_64); 108 v_t2_lo = vmlsl_n_s16(v_t2_lo, vget_low_s16(v_x1), (int16_t)cospi_20_64); 109 v_t2_hi = vmlsl_n_s16(v_t2_hi, vget_high_s16(v_x1), (int16_t)cospi_20_64);
|
idct32x32_135_add_neon.c | 162 s3[21] = multiply_accumulate_shift_and_narrow_s16(s2[21], -cospi_20_64, 165 cospi_20_64); 168 s1[24], -cospi_20_64); 169 s3[25] = multiply_accumulate_shift_and_narrow_s16(s1[23], -cospi_20_64, 445 s3[5] = multiply_shift_and_narrow_s16(in[12], -cospi_20_64); 467 s3[21] = multiply_accumulate_shift_and_narrow_s16(s2[21], -cospi_20_64, 470 cospi_20_64); 473 s2[25], -cospi_20_64); 474 s3[25] = multiply_accumulate_shift_and_narrow_s16(s2[22], -cospi_20_64,
|
highbd_idct32x32_34_add_neon.c | 92 s1[21] = multiply_accumulate_shift_and_narrow_s32_dual(s1[20], -cospi_20_64, 95 s1[27], cospi_20_64); 98 s1[24], -cospi_20_64); 99 s1[25] = multiply_accumulate_shift_and_narrow_s32_dual(s1[23], -cospi_20_64, 416 s1[21] = multiply_accumulate_shift_and_narrow_s32_dual(s1[20], -cospi_20_64, 419 s1[27], cospi_20_64); 422 s1[24], -cospi_20_64); 423 s1[25] = multiply_accumulate_shift_and_narrow_s32_dual(s1[23], -cospi_20_64,
|
idct32x32_34_add_neon.c | 85 s1[21] = multiply_accumulate_shift_and_narrow_s16(s1[20], -cospi_20_64, 88 cospi_20_64); 91 s1[24], -cospi_20_64); 92 s1[25] = multiply_accumulate_shift_and_narrow_s16(s1[23], -cospi_20_64, 312 s1[21] = multiply_accumulate_shift_and_narrow_s16(s1[20], -cospi_20_64, 315 cospi_20_64); 318 s1[24], -cospi_20_64); 319 s1[25] = multiply_accumulate_shift_and_narrow_s16(s1[23], -cospi_20_64,
|
highbd_idct32x32_135_add_neon.c | 172 s3[21] = multiply_accumulate_shift_and_narrow_s32_dual(s2[21], -cospi_20_64, 175 s2[26], cospi_20_64); 178 s1[24], -cospi_20_64); 179 s3[25] = multiply_accumulate_shift_and_narrow_s32_dual(s1[23], -cospi_20_64, 521 s3[5] = multiply_shift_and_narrow_s32_dual(in[12], -cospi_20_64); 543 s3[21] = multiply_accumulate_shift_and_narrow_s32_dual(s2[21], -cospi_20_64, 546 s2[26], cospi_20_64); 549 s2[25], -cospi_20_64); 550 s3[25] = multiply_accumulate_shift_and_narrow_s32_dual(s2[22], -cospi_20_64, [all...] |
highbd_idct32x32_1024_add_neon.c | 468 do_butterfly(q[14], q[13], cospi_12_64, cospi_20_64, &q[1], &q[3]); 482 do_butterfly(q[14], q[13], -cospi_20_64, -cospi_12_64, &q[4], &q[7]); 587 do_butterfly(q[14], q[13], cospi_12_64, cospi_20_64, &q[1], &q[3]);
|
idct32x32_add_neon.c | 598 do_butterfly(q[14], q[13], cospi_12_64, cospi_20_64, &q[1], &q[3]); 612 do_butterfly(q[14], q[13], -cospi_20_64, -cospi_12_64, &q[4], &q[7]); 717 do_butterfly(q[14], q[13], cospi_12_64, cospi_20_64, &q[1], &q[3]);
|
/external/libvpx/libvpx/vp9/encoder/ |
vp9_dct.c | 85 t1 = x1 * cospi_12_64 + x2 * cospi_20_64; 86 t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; 164 t1 = x1 * cospi_12_64 + x2 * cospi_20_64; 165 t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; 413 s10 = x10 * cospi_20_64 + x11 * cospi_12_64; 414 s11 = x10 * cospi_12_64 - x11 * cospi_20_64; 417 s14 = -x14 * cospi_12_64 + x15 * cospi_20_64; 418 s15 = x14 * cospi_20_64 + x15 * cospi_12_64; 618 t1 = x1 * cospi_12_64 + x2 * cospi_20_64; 619 t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; [all...] |
/external/libvpx/libvpx/vpx_dsp/x86/ |
inv_txfm_ssse3.c | 24 const __m128i stg1_2 = pair_set_epi16(-cospi_20_64, cospi_12_64); 25 const __m128i stg1_3 = pair_set_epi16(cospi_12_64, cospi_20_64); 226 const __m128i stg1_2 = pair_set_epi16(-2 * cospi_20_64, -2 * cospi_20_64); 534 const __m128i stg3_8 = pair_set_epi16(-cospi_20_64, cospi_12_64); 535 const __m128i stg3_9 = pair_set_epi16(cospi_12_64, cospi_20_64); 536 const __m128i stg3_10 = pair_set_epi16(-cospi_12_64, -cospi_20_64); 711 const __m128i stk3_2 = pair_set_epi16(-2 * cospi_20_64, -2 * cospi_20_64); 881 const __m128i stg3_8 = pair_set_epi16(-cospi_20_64, cospi_12_64) [all...] |
inv_txfm_sse2.c | 255 const __m128i stg1_2 = pair_set_epi16(-cospi_20_64, cospi_12_64); 256 const __m128i stg1_3 = pair_set_epi16(cospi_12_64, cospi_20_64); 344 const __m128i stg1_2 = pair_set_epi16(-cospi_20_64, cospi_12_64); 345 const __m128i stg1_3 = pair_set_epi16(cospi_12_64, cospi_20_64); 600 const __m128i stg1_2 = pair_set_epi16(-cospi_20_64, cospi_12_64); 601 const __m128i stg1_3 = pair_set_epi16(cospi_12_64, cospi_20_64); [all...] |
fwd_txfm_impl_sse2.h | 270 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); 271 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); 592 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); 593 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); [all...] |
/external/libvpx/libvpx/vp9/encoder/x86/ |
vp9_dct_ssse3.c | 40 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); 41 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
|