/external/libvpx/libvpx/vpx_dsp/mips/ |
itrans32_cols_dspr2.c | 332 "madd $ac1, %[load2], %[cospi_24_64] \n\t" 333 "madd $ac3, %[load1], %[cospi_24_64] \n\t" 348 [cospi_8_64] "r" (cospi_8_64), [cospi_24_64] "r" (cospi_24_64) 388 "msub $ac1, %[load1], %[cospi_24_64] \n\t" 390 "madd $ac3, %[load2], %[cospi_24_64] \n\t" 405 [cospi_8_64] "r" (cospi_8_64), [cospi_24_64] "r" (cospi_24_64) 462 "madd $ac0, %[step2_29], %[cospi_24_64] \n\t" 468 [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64 [all...] |
itrans32_dspr2.c | 388 "madd $ac1, %[load2], %[cospi_24_64] \n\t" 389 "madd $ac3, %[load1], %[cospi_24_64] \n\t" 406 [cospi_8_64] "r" (cospi_8_64), [cospi_24_64] "r" (cospi_24_64) 449 "msub $ac1, %[load1], %[cospi_24_64] \n\t" 451 "madd $ac3, %[load2], %[cospi_24_64] \n\t" 468 [cospi_8_64] "r" (cospi_8_64), [cospi_24_64] "r" (cospi_24_64) 530 "madd $ac0, %[step2_29], %[cospi_24_64] \n\t" 536 [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64 [all...] |
itrans4_dspr2.c | 53 temp1 = input[1] * cospi_24_64 - input[3] * cospi_8_64; 56 "madd $ac0, %[Temp0], %[cospi_24_64] \n\t" 61 temp2 = input[1] * cospi_8_64 + input[3] * cospi_24_64; 65 "madd $ac1, %[Temp1], %[cospi_24_64] \n\t" 93 [cospi_24_64] "r" (cospi_24_64), 153 temp1 = input[1] * cospi_24_64 - input[3] * cospi_8_64; 156 "madd $ac0, %[Temp0], %[cospi_24_64] \n\t" 161 temp2 = input[1] * cospi_8_64 + input[3] * cospi_24_64; 165 "madd $ac1, %[Temp1], %[cospi_24_64] \n\t [all...] |
itrans8_dspr2.c | 49 temp_1 = input[2] * cospi_24_64 - input[6] * cospi_8_64; 54 "madd $ac0, %[Temp0], %[cospi_24_64] \n\t" 68 temp_2 = input[2] * cospi_8_64 + input[6] * cospi_24_64; 72 "madd $ac1, %[Temp1], %[cospi_24_64] \n\t" 188 [cospi_24_64] "r" (cospi_24_64), 244 temp_1 = input[2] * cospi_24_64 - input[6] * cospi_8_64; 249 "madd $ac0, %[Temp0], %[cospi_24_64] \n\t" 263 temp_2 = input[2] * cospi_8_64 + input[6] * cospi_24_64; 267 "madd $ac1, %[Temp1], %[cospi_24_64] \n\t [all...] |
itrans16_dspr2.c | 52 "madd $ac3, %[load3], %[cospi_24_64] \n\t" 59 "madd $ac1, %[load4], %[cospi_24_64] \n\t" 75 [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64), 119 "madd $ac1, %[load6], %[cospi_24_64] \n\t" 121 "madd $ac3, %[load5], %[cospi_24_64] \n\t" 138 [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64) 181 "msub $ac1, %[load1], %[cospi_24_64] \n\t" 183 "madd $ac3, %[load2], %[cospi_24_64] \n\t [all...] |
inv_txfm_msa.h | 25 -cospi_16_64, cospi_24_64, -cospi_24_64, 0, 0 }; \ 122 c2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \ 123 c3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \ 237 k2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \ 238 k3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \ 254 cospi_6_64, -cospi_26_64, cospi_8_64, cospi_24_64, -cospi_8_64 }; \ 255 v8i16 mask3_m = { -cospi_24_64, cospi_8_64, cospi_16_64, \ 385 k0_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \ 386 k1_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); [all...] |
idct32x32_msa.c | 59 DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6); 87 DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7); 88 DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1); 160 DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1); 169 DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3); 197 DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1); 207 DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1); 375 DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6); 405 DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7); 406 DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1) [all...] |
fwd_dct32x32_msa.c | 82 DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0); 104 DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3); 118 DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1); 182 DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29); 183 DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28); 216 DOTP_CONST_PAIR(-in16, in27, cospi_24_64, cospi_8_64, in20, in27); 217 DOTP_CONST_PAIR(-in17, in26, cospi_24_64, cospi_8_64, in21, in26); 338 DOTP_CONST_PAIR_W(vec5_r, vec7_r, vec0_r, vec1_r, cospi_24_64, 366 DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3); 380 DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1) [all...] |
idct16x16_msa.c | 32 DOTP_CONST_PAIR(reg4, reg12, cospi_24_64, cospi_8_64, reg4, reg12); 65 DOTP_CONST_PAIR(reg7, reg9, cospi_24_64, cospi_8_64, reg7, reg9); 66 DOTP_CONST_PAIR((-reg5), (-reg11), cospi_8_64, cospi_24_64, reg5, reg11); 124 DOTP_CONST_PAIR(reg4, reg12, cospi_24_64, cospi_8_64, reg4, reg12); 162 DOTP_CONST_PAIR(reg7, reg9, cospi_24_64, cospi_8_64, reg7, reg9); 163 DOTP_CONST_PAIR((-reg5), (-reg11), cospi_8_64, cospi_24_64, reg5, reg11); 413 k0 = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); 414 k1 = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); 415 k2 = VP9_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64);
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/mips/dspr2/ |
vp9_itrans32_cols_dspr2.c | 337 "madd $ac1, %[load2], %[cospi_24_64] \n\t" 338 "madd $ac3, %[load1], %[cospi_24_64] \n\t" 353 [cospi_8_64] "r" (cospi_8_64), [cospi_24_64] "r" (cospi_24_64) 393 "msub $ac1, %[load1], %[cospi_24_64] \n\t" 395 "madd $ac3, %[load2], %[cospi_24_64] \n\t" 410 [cospi_8_64] "r" (cospi_8_64), [cospi_24_64] "r" (cospi_24_64) 467 "madd $ac0, %[step2_29], %[cospi_24_64] \n\t" 473 [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64 [all...] |
vp9_itrans32_dspr2.c | 391 "madd $ac1, %[load2], %[cospi_24_64] \n\t" 392 "madd $ac3, %[load1], %[cospi_24_64] \n\t" 409 [cospi_8_64] "r" (cospi_8_64), [cospi_24_64] "r" (cospi_24_64) 452 "msub $ac1, %[load1], %[cospi_24_64] \n\t" 454 "madd $ac3, %[load2], %[cospi_24_64] \n\t" 471 [cospi_8_64] "r" (cospi_8_64), [cospi_24_64] "r" (cospi_24_64) 533 "madd $ac0, %[step2_29], %[cospi_24_64] \n\t" 539 [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64 [all...] |
vp9_itrans16_dspr2.c | 57 "madd $ac3, %[load3], %[cospi_24_64] \n\t" 64 "madd $ac1, %[load4], %[cospi_24_64] \n\t" 80 [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64), 124 "madd $ac1, %[load6], %[cospi_24_64] \n\t" 126 "madd $ac3, %[load5], %[cospi_24_64] \n\t" 143 [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64) 186 "msub $ac1, %[load1], %[cospi_24_64] \n\t" 188 "madd $ac3, %[load2], %[cospi_24_64] \n\t [all...] |
vp9_itrans8_dspr2.c | 55 temp_1 = input[2] * cospi_24_64 - input[6] * cospi_8_64; 60 "madd $ac0, %[Temp0], %[cospi_24_64] \n\t" 74 temp_2 = input[2] * cospi_8_64 + input[6] * cospi_24_64; 78 "madd $ac1, %[Temp1], %[cospi_24_64] \n\t" 194 [cospi_24_64] "r" (cospi_24_64), 250 temp_1 = input[2] * cospi_24_64 - input[6] * cospi_8_64; 255 "madd $ac0, %[Temp0], %[cospi_24_64] \n\t" 269 temp_2 = input[2] * cospi_8_64 + input[6] * cospi_24_64; 273 "madd $ac1, %[Temp1], %[cospi_24_64] \n\t [all...] |
vp9_itrans4_dspr2.c | 58 temp1 = input[1] * cospi_24_64 - input[3] * cospi_8_64; 61 "madd $ac0, %[Temp0], %[cospi_24_64] \n\t" 66 temp2 = input[1] * cospi_8_64 + input[3] * cospi_24_64; 70 "madd $ac1, %[Temp1], %[cospi_24_64] \n\t" 98 [cospi_24_64] "r" (cospi_24_64), 158 temp1 = input[1] * cospi_24_64 - input[3] * cospi_8_64; 161 "madd $ac0, %[Temp0], %[cospi_24_64] \n\t" 166 temp2 = input[1] * cospi_8_64 + input[3] * cospi_24_64; 170 "madd $ac1, %[Temp1], %[cospi_24_64] \n\t [all...] |
/external/libvpx/libvpx/vpx_dsp/ |
txfm_common.h | 51 static const tran_high_t cospi_24_64 = 6270; variable
|
fwd_txfm.c | 57 temp1 = step[2] * cospi_24_64 + step[3] * cospi_8_64; 58 temp2 = -step[2] * cospi_8_64 + step[3] * cospi_24_64; 136 t2 = x2 * cospi_24_64 + x3 * cospi_8_64; 137 t3 = -x2 * cospi_8_64 + x3 * cospi_24_64; 272 t2 = x3 * cospi_8_64 + x2 * cospi_24_64; 273 t3 = x3 * cospi_24_64 - x2 * cospi_8_64; 322 temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64; 323 temp2 = step3[2] * cospi_24_64 + step3[5] * cospi_8_64; 326 temp1 = step3[2] * cospi_8_64 - step3[5] * cospi_24_64; 327 temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64 [all...] |
inv_txfm.c | 102 temp1 = input[1] * cospi_24_64 - input[3] * cospi_8_64; 103 temp2 = input[1] * cospi_8_64 + input[3] * cospi_24_64; 178 temp1 = step1[1] * cospi_24_64 - step1[3] * cospi_8_64; 179 temp2 = step1[1] * cospi_8_64 + step1[3] * cospi_24_64; 327 s4 = (int)(cospi_8_64 * x4 + cospi_24_64 * x5); 328 s5 = (int)(cospi_24_64 * x4 - cospi_8_64 * x5); 329 s6 = (int)(-cospi_24_64 * x6 + cospi_8_64 * x7); 330 s7 = (int)(cospi_8_64 * x6 + cospi_24_64 * x7); 469 temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64; 470 temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64; [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/ |
vp9_idct.c | 107 temp1 = input[1] * cospi_24_64 - input[3] * cospi_8_64; 108 temp2 = input[1] * cospi_8_64 + input[3] * cospi_24_64; 356 s4 = cospi_8_64 * x4 + cospi_24_64 * x5; 357 s5 = cospi_24_64 * x4 - cospi_8_64 * x5; 358 s6 = -cospi_24_64 * x6 + cospi_8_64 * x7; 359 s7 = cospi_8_64 * x6 + cospi_24_64 * x7; 530 temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64; 531 temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64; 541 temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64; 542 temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64 [all...] |
vp9_idct.h | 67 static const int cospi_24_64 = 6270; variable
|
/external/libvpx/libvpx/vp9/encoder/ |
vp9_dct.c | 36 temp1 = step[2] * cospi_24_64 + step[3] * cospi_8_64; 37 temp2 = -step[2] * cospi_8_64 + step[3] * cospi_24_64; 64 t2 = x2 * cospi_24_64 + x3 * cospi_8_64; 65 t3 = -x2 * cospi_8_64 + x3 * cospi_24_64; 143 t2 = x3 * cospi_8_64 + x2 * cospi_24_64; 144 t3 = x3 * cospi_24_64 - x2 * cospi_8_64; 194 temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64; 195 temp2 = step3[2] * cospi_24_64 + step3[5] * cospi_8_64; 198 temp1 = step3[2] * cospi_8_64 - step3[5] * cospi_24_64; 199 temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64 [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/ |
vp9_dct.c | 40 temp1 = step[2] * cospi_24_64 + step[3] * cospi_8_64; 41 temp2 = -step[2] * cospi_8_64 + step[3] * cospi_24_64; 89 temp1 = step[2] * cospi_24_64 + step[3] * cospi_8_64; 90 temp2 = -step[2] * cospi_8_64 + step[3] * cospi_24_64; 213 t2 = x2 * cospi_24_64 + x3 * cospi_8_64; 214 t3 = -x2 * cospi_8_64 + x3 * cospi_24_64; 273 t2 = x2 * cospi_24_64 + x3 * cospi_8_64; 274 t3 = -x2 * cospi_8_64 + x3 * cospi_24_64; 397 t2 = x3 * cospi_8_64 + x2 * cospi_24_64; 398 t3 = x3 * cospi_24_64 - x2 * cospi_8_64 [all...] |
/external/libvpx/libvpx/vpx_dsp/x86/ |
fwd_txfm_impl_sse2.h | 54 const __m128i k__cospi_C = octa_set_epi16(cospi_8_64, cospi_24_64, 55 cospi_8_64, cospi_24_64, 56 cospi_24_64, -cospi_8_64, 57 cospi_24_64, -cospi_8_64); 58 const __m128i k__cospi_D = octa_set_epi16(cospi_24_64, -cospi_8_64, 59 cospi_24_64, -cospi_8_64, 60 cospi_8_64, cospi_24_64, 61 cospi_8_64, cospi_24_64); 70 const __m128i k__cospi_G = octa_set_epi16(cospi_8_64, cospi_24_64, 71 cospi_8_64, cospi_24_64, [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/arm/neon/ |
vp9_iht4x4_add_neon.asm | 20 ; cospi_16_64. d2 must contain cospi_24_64. The output will be stored back 29 vmull.s16 q15, d17, d2 ; input[1] * cospi_24_64 33 vmlsl.s16 q15, d19, d0 ; input[1] * cospi_24_64 - input[3] * cospi_8_64 34 vmlal.s16 q10, d19, d2 ; input[1] * cospi_8_64 + input[3] * cospi_24_64 98 ; cospi_24_64 = 6270 = 0x187e 105 vdup.16 d2, r12 ; duplicate cospi_24_64
|
vp9_iht8x8_add_neon.asm | 57 ; generate cospi_24_64 = 6270 92 ; generate cospi_24_64 = 6270 197 vdup.16 d0, r8 ; duplicate cospi_24_64 208 ; input[1] * cospi_24_64 216 ; input[1] * cospi_24_64 - input[3] * cospi_8_64 220 ; input[1] * cospi_8_64 + input[3] * cospi_24_64 420 vdup.16 d31, r9 ; duplicate cospi_24_64 434 ; cospi_24_64 * x4 438 ; s4 = cospi_8_64 * x4 + cospi_24_64 * x5; 442 ; s5 = cospi_24_64 * x4 - cospi_8_64 * x5 [all...] |
/external/libvpx/libvpx/vpx_dsp/arm/ |
fwd_txfm_neon.c | 47 int32x4_t v_t2_lo = vmull_n_s16(vget_low_s16(v_x2), (int16_t)cospi_24_64); 48 int32x4_t v_t2_hi = vmull_n_s16(vget_high_s16(v_x2), (int16_t)cospi_24_64); 49 int32x4_t v_t3_lo = vmull_n_s16(vget_low_s16(v_x3), (int16_t)cospi_24_64); 50 int32x4_t v_t3_hi = vmull_n_s16(vget_high_s16(v_x3), (int16_t)cospi_24_64);
|