/external/libvpx/libvpx/vp8/encoder/x86/ |
encodeopt.asm | 36 pmaddwd xmm0, xmm0 37 pmaddwd xmm2, xmm2 89 pmaddwd mm5, mm5 95 pmaddwd mm1, mm1 106 pmaddwd mm5, mm5 109 pmaddwd mm3, mm3 160 pmaddwd mm5, mm5 165 pmaddwd mm3, mm3 177 pmaddwd mm5, mm5 180 pmaddwd mm3, mm [all...] |
dct_mmx.asm | 94 pmaddwd mm1, MMWORD PTR[GLOBAL (_5352_2217)] ; c1*2217 + d1*5352 95 pmaddwd mm4, MMWORD PTR[GLOBAL (_5352_2217)] ; c1*2217 + d1*5352 97 pmaddwd mm3, MMWORD PTR[GLOBAL(_2217_neg5352)] ; d1*2217 - c1*5352 98 pmaddwd mm5, MMWORD PTR[GLOBAL(_2217_neg5352)] ; d1*2217 - c1*5352 179 pmaddwd mm1, MMWORD PTR[GLOBAL (_5352_2217)] ; c1*2217 + d1*5352 180 pmaddwd mm4, MMWORD PTR[GLOBAL (_5352_2217)] ; c1*2217 + d1*5352 182 pmaddwd mm3, MMWORD PTR[GLOBAL(_2217_neg5352)] ; d1*2217 - c1*5352 183 pmaddwd mm5, MMWORD PTR[GLOBAL(_2217_neg5352)] ; d1*2217 - c1*5352
|
dct_sse2.asm | 94 pmaddwd xmm0, XMMWORD PTR[GLOBAL(_mult_add)] ;a1 + b1 95 pmaddwd xmm1, XMMWORD PTR[GLOBAL(_mult_sub)] ;a1 - b1 97 pmaddwd xmm3, XMMWORD PTR[GLOBAL(_5352_2217)] ;c1*2217 + d1*5352 98 pmaddwd xmm4, XMMWORD PTR[GLOBAL(_2217_neg5352)];d1*2217 - c1*5352 136 pmaddwd xmm0, XMMWORD PTR[GLOBAL(_mult_add)] ;a1 + b1 137 pmaddwd xmm1, XMMWORD PTR[GLOBAL(_mult_sub)] ;a1 - b1 149 pmaddwd xmm3, XMMWORD PTR[GLOBAL(_5352_2217)] ;c1*2217 + d1*5352 150 pmaddwd xmm4, XMMWORD PTR[GLOBAL(_2217_neg5352)] ;d1*2217 - c1*5352 246 pmaddwd xmm1, XMMWORD PTR[GLOBAL (_5352_2217)] ; c1*2217 + d1*5352 247 pmaddwd xmm4, XMMWORD PTR[GLOBAL (_5352_2217)] ; c1*2217 + d1*535 [all...] |
fwalsh_sse2.asm | 78 pmaddwd xmm0, [GLOBAL(c1)] ; d11 a11 d10 a10 79 pmaddwd xmm2, [GLOBAL(cn1)] ; c11 b11 c10 b10 81 pmaddwd xmm1, [GLOBAL(c1)] ; d12 a12 d13 a13 82 pmaddwd xmm3, [GLOBAL(cn1)] ; c12 b12 c13 b13
|
quantize_mmx.asm | 217 pmaddwd mm0, mm2 219 pmaddwd mm1, mm3 240 pmaddwd mm0, mm2 242 pmaddwd mm1, mm3
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/x86/ |
encodeopt.asm | 36 pmaddwd xmm0, xmm0 37 pmaddwd xmm2, xmm2 89 pmaddwd mm5, mm5 95 pmaddwd mm1, mm1 106 pmaddwd mm5, mm5 109 pmaddwd mm3, mm3 160 pmaddwd mm5, mm5 165 pmaddwd mm3, mm3 177 pmaddwd mm5, mm5 180 pmaddwd mm3, mm [all...] |
dct_mmx.asm | 94 pmaddwd mm1, MMWORD PTR[GLOBAL (_5352_2217)] ; c1*2217 + d1*5352 95 pmaddwd mm4, MMWORD PTR[GLOBAL (_5352_2217)] ; c1*2217 + d1*5352 97 pmaddwd mm3, MMWORD PTR[GLOBAL(_2217_neg5352)] ; d1*2217 - c1*5352 98 pmaddwd mm5, MMWORD PTR[GLOBAL(_2217_neg5352)] ; d1*2217 - c1*5352 179 pmaddwd mm1, MMWORD PTR[GLOBAL (_5352_2217)] ; c1*2217 + d1*5352 180 pmaddwd mm4, MMWORD PTR[GLOBAL (_5352_2217)] ; c1*2217 + d1*5352 182 pmaddwd mm3, MMWORD PTR[GLOBAL(_2217_neg5352)] ; d1*2217 - c1*5352 183 pmaddwd mm5, MMWORD PTR[GLOBAL(_2217_neg5352)] ; d1*2217 - c1*5352
|
dct_sse2.asm | 94 pmaddwd xmm0, XMMWORD PTR[GLOBAL(_mult_add)] ;a1 + b1 95 pmaddwd xmm1, XMMWORD PTR[GLOBAL(_mult_sub)] ;a1 - b1 97 pmaddwd xmm3, XMMWORD PTR[GLOBAL(_5352_2217)] ;c1*2217 + d1*5352 98 pmaddwd xmm4, XMMWORD PTR[GLOBAL(_2217_neg5352)];d1*2217 - c1*5352 136 pmaddwd xmm0, XMMWORD PTR[GLOBAL(_mult_add)] ;a1 + b1 137 pmaddwd xmm1, XMMWORD PTR[GLOBAL(_mult_sub)] ;a1 - b1 149 pmaddwd xmm3, XMMWORD PTR[GLOBAL(_5352_2217)] ;c1*2217 + d1*5352 150 pmaddwd xmm4, XMMWORD PTR[GLOBAL(_2217_neg5352)] ;d1*2217 - c1*5352 246 pmaddwd xmm1, XMMWORD PTR[GLOBAL (_5352_2217)] ; c1*2217 + d1*5352 247 pmaddwd xmm4, XMMWORD PTR[GLOBAL (_5352_2217)] ; c1*2217 + d1*535 [all...] |
fwalsh_sse2.asm | 78 pmaddwd xmm0, [GLOBAL(c1)] ; d11 a11 d10 a10 79 pmaddwd xmm2, [GLOBAL(cn1)] ; c11 b11 c10 b10 81 pmaddwd xmm1, [GLOBAL(c1)] ; d12 a12 d13 a13 82 pmaddwd xmm3, [GLOBAL(cn1)] ; c12 b12 c13 b13
|
/external/libvpx/libvpx/vp9/encoder/x86/ |
vp9_error_sse2.asm | 35 pmaddwd m0, m0 36 pmaddwd m1, m1 37 pmaddwd m2, m2 38 pmaddwd m3, m3
|
vp9_variance_impl_mmx.asm | 35 pmaddwd mm0, mm0 36 pmaddwd mm1, mm1 37 pmaddwd mm2, mm2 38 pmaddwd mm3, mm3 112 pmaddwd mm0, mm0 ; square and accumulate 113 pmaddwd mm2, mm2 ; square and accumulate 136 pmaddwd mm0, mm0 ; square and accumulate 137 pmaddwd mm2, mm2 ; square and accumulate 159 pmaddwd mm0, mm0 ; square and accumulate 160 pmaddwd mm2, mm2 ; square and accumulat [all...] |
vp9_variance_impl_sse2.asm | 39 pmaddwd xmm0, xmm0 40 pmaddwd xmm1, xmm1 41 pmaddwd xmm2, xmm2 42 pmaddwd xmm3, xmm3 149 pmaddwd xmm1, xmm1 152 pmaddwd xmm3, xmm3 255 pmaddwd xmm1, xmm1 266 pmaddwd xmm2, xmm2 279 pmaddwd xmm2, xmm2 294 pmaddwd xmm2, xmm [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/x86/ |
vp9_error_sse2.asm | 35 pmaddwd m0, m0 36 pmaddwd m1, m1 37 pmaddwd m2, m2 38 pmaddwd m3, m3
|
vp9_variance_impl_mmx.asm | 35 pmaddwd mm0, mm0 36 pmaddwd mm1, mm1 37 pmaddwd mm2, mm2 38 pmaddwd mm3, mm3 112 pmaddwd mm0, mm0 ; square and accumulate 113 pmaddwd mm2, mm2 ; square and accumulate 136 pmaddwd mm0, mm0 ; square and accumulate 137 pmaddwd mm2, mm2 ; square and accumulate 159 pmaddwd mm0, mm0 ; square and accumulate 160 pmaddwd mm2, mm2 ; square and accumulat [all...] |
vp9_variance_impl_sse2.asm | 39 pmaddwd xmm0, xmm0 40 pmaddwd xmm1, xmm1 41 pmaddwd xmm2, xmm2 42 pmaddwd xmm3, xmm3 149 pmaddwd xmm1, xmm1 152 pmaddwd xmm3, xmm3 255 pmaddwd xmm1, xmm1 266 pmaddwd xmm2, xmm2 279 pmaddwd xmm2, xmm2 294 pmaddwd xmm2, xmm [all...] |
vp9_subpel_variance_impl_sse2.asm | 72 pmaddwd xmm5, xmm5 ; xmm5 *= xmm5 73 pmaddwd xmm4, xmm4 179 pmaddwd xmm5, xmm5 ; xmm5 *= xmm5 180 pmaddwd xmm4, xmm4 285 pmaddwd xmm5, xmm5 ; xmm5 *= xmm5 286 pmaddwd xmm1, xmm1
|
/external/libvpx/libvpx/vp8/common/x86/ |
variance_impl_mmx.asm | 35 pmaddwd mm0, mm0 36 pmaddwd mm1, mm1 37 pmaddwd mm2, mm2 38 pmaddwd mm3, mm3 112 pmaddwd mm0, mm0 ; square and accumulate 113 pmaddwd mm2, mm2 ; square and accumulate 136 pmaddwd mm0, mm0 ; square and accumulate 137 pmaddwd mm2, mm2 ; square and accumulate 159 pmaddwd mm0, mm0 ; square and accumulate 160 pmaddwd mm2, mm2 ; square and accumulat [all...] |
variance_impl_sse2.asm | 41 pmaddwd xmm0, xmm0 42 pmaddwd xmm1, xmm1 43 pmaddwd xmm2, xmm2 44 pmaddwd xmm3, xmm3 151 pmaddwd xmm1, xmm1 154 pmaddwd xmm3, xmm3 257 pmaddwd xmm1, xmm1 268 pmaddwd xmm2, xmm2 281 pmaddwd xmm2, xmm2 296 pmaddwd xmm2, xmm [all...] |
variance_impl_ssse3.asm | 131 pmaddwd xmm2, xmm2 132 pmaddwd xmm3, xmm3 197 pmaddwd xmm1, xmm1 198 pmaddwd xmm2, xmm2 239 pmaddwd xmm1, xmm1 240 pmaddwd xmm2, xmm2 288 pmaddwd xmm1, xmm1 289 pmaddwd xmm3, xmm3
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/ |
variance_impl_mmx.asm | 35 pmaddwd mm0, mm0 36 pmaddwd mm1, mm1 37 pmaddwd mm2, mm2 38 pmaddwd mm3, mm3 112 pmaddwd mm0, mm0 ; square and accumulate 113 pmaddwd mm2, mm2 ; square and accumulate 136 pmaddwd mm0, mm0 ; square and accumulate 137 pmaddwd mm2, mm2 ; square and accumulate 159 pmaddwd mm0, mm0 ; square and accumulate 160 pmaddwd mm2, mm2 ; square and accumulat [all...] |
variance_impl_sse2.asm | 41 pmaddwd xmm0, xmm0 42 pmaddwd xmm1, xmm1 43 pmaddwd xmm2, xmm2 44 pmaddwd xmm3, xmm3 151 pmaddwd xmm1, xmm1 154 pmaddwd xmm3, xmm3 257 pmaddwd xmm1, xmm1 268 pmaddwd xmm2, xmm2 281 pmaddwd xmm2, xmm2 296 pmaddwd xmm2, xmm [all...] |
variance_impl_ssse3.asm | 131 pmaddwd xmm2, xmm2 132 pmaddwd xmm3, xmm3 197 pmaddwd xmm1, xmm1 198 pmaddwd xmm2, xmm2 239 pmaddwd xmm1, xmm1 240 pmaddwd xmm2, xmm2 288 pmaddwd xmm1, xmm1 289 pmaddwd xmm3, xmm3
|
/development/perftests/panorama/feature_stab/db_vlvm/ |
db_utilities_linalg.h | 338 pmaddwd xmm0,[ecx] 343 pmaddwd xmm2,[ecx+16] 347 pmaddwd xmm1,[ecx+32] 352 pmaddwd xmm0,[ecx+48] 357 pmaddwd xmm2,[ecx+64] 361 pmaddwd xmm1,[ecx+80] 366 pmaddwd xmm0,[ecx+96] 371 pmaddwd xmm2,[ecx+112] 375 pmaddwd xmm1,[ecx+128] 380 pmaddwd xmm0,[ecx+144 [all...] |
/packages/apps/Camera/jni/feature_stab/db_vlvm/ |
db_utilities_linalg.h | 338 pmaddwd xmm0,[ecx] 343 pmaddwd xmm2,[ecx+16] 347 pmaddwd xmm1,[ecx+32] 352 pmaddwd xmm0,[ecx+48] 357 pmaddwd xmm2,[ecx+64] 361 pmaddwd xmm1,[ecx+80] 366 pmaddwd xmm0,[ecx+96] 371 pmaddwd xmm2,[ecx+112] 375 pmaddwd xmm1,[ecx+128] 380 pmaddwd xmm0,[ecx+144 [all...] |
/packages/apps/LegacyCamera/jni/feature_stab/db_vlvm/ |
db_utilities_linalg.h | 338 pmaddwd xmm0,[ecx] 343 pmaddwd xmm2,[ecx+16] 347 pmaddwd xmm1,[ecx+32] 352 pmaddwd xmm0,[ecx+48] 357 pmaddwd xmm2,[ecx+64] 361 pmaddwd xmm1,[ecx+80] 366 pmaddwd xmm0,[ecx+96] 371 pmaddwd xmm2,[ecx+112] 375 pmaddwd xmm1,[ecx+128] 380 pmaddwd xmm0,[ecx+144 [all...] |