/external/libvpx/vp8/encoder/x86/ |
ssim_opt.asm | 17 movdqa xmm1, xmm3 20 movdqa xmm2, xmm4 29 movdqa xmm2,%1 33 movdqa xmm2,%1 41 movdqa xmm1, %1 91 movdqa xmm3, xmm5 92 movdqa xmm4, xmm6 98 movdqa xmm3, xmm5 99 movdqa xmm4, xmm6 180 movdqa xmm3, xmm [all...] |
variance_impl_ssse3.asm | 69 movdqa xmm2, xmm0 94 movdqa xmm3, xmm1 107 movdqa xmm2, xmm0 108 movdqa xmm0, xmm1 109 movdqa xmm3, xmm2 164 movdqa xmm0, xmm1 174 movdqa xmm2, xmm1 175 movdqa xmm0, xmm3 202 movdqa xmm1, xmm0 266 movdqa xmm3, xmm [all...] |
variance_impl_sse2.asm | 37 movdqa xmm0, [rax] 38 movdqa xmm1, [rax+16] 39 movdqa xmm2, [rax+32] 40 movdqa xmm3, [rax+48] 55 movdqa xmm3,xmm4 58 movdqa xmm3,xmm4 135 movdqa xmm3, xmm1 136 movdqa xmm4, xmm2 165 movdqa xmm1, xmm6 177 movdqa xmm2, xmm [all...] |
sad_sse3.asm | 161 movdqa xmm0, XMMWORD PTR [%2] 170 movdqa xmm0, XMMWORD PTR [%2] 183 movdqa xmm0, XMMWORD PTR [%2+%4] 255 movdqa xmm0, XMMWORD PTR [%2] 266 movdqa xmm0, XMMWORD PTR [%2] 283 movdqa xmm0, XMMWORD PTR [%2+%7] 593 movdqa xmm0, XMMWORD PTR [src_ptr] 595 movdqa xmm2, XMMWORD PTR [src_ptr+src_stride] 601 movdqa xmm4, XMMWORD PTR [src_ptr] 603 movdqa xmm6, XMMWORD PTR [src_ptr+src_stride [all...] |
/external/libyuv/files/source/ |
planar_functions.cc | 76 movdqa xmm0, [eax] 77 movdqa xmm1, [eax + 16] 79 movdqa xmm2, xmm0 80 movdqa xmm3, xmm1 84 movdqa [edx], xmm0 89 movdqa [edi], xmm2 107 "movdqa (%0),%%xmm0\n" 108 "movdqa 0x10(%0),%%xmm1\n" 110 "movdqa %%xmm0,%%xmm2\n" 111 "movdqa %%xmm1,%%xmm3\n 525 movdqa [edx], xmm2 local 675 movdqa [edx], xmm0 local 742 movdqa [edx], xmm0 local 1222 movdqa [edx], xmm0 local 1223 movdqa [edx + 16], xmm1 local 1245 movdqa [edx], xmm0 local 1267 movdqa [edx], xmm0 local [all...] |
row_posix.cc | 38 "movdqa (%3),%%xmm7\n" 39 "movdqa (%4),%%xmm6\n" 40 "movdqa %%xmm6,%%xmm5\n" 43 "movdqa (%0),%%xmm0\n" 45 "movdqa 0x10(%0),%%xmm1\n" 73 "movdqa (%3),%%xmm6\n" 75 "movdqa (%0),%%xmm0\n" 76 "movdqa 0x10(%0),%%xmm1\n" 77 "movdqa 0x20(%0),%%xmm3\n" 79 "movdqa %%xmm3,%%xmm2\n [all...] |
convert.cc | 376 ;movdqa xmm1, xmm6 377 ;movdqa xmm2, xmm6 378 ;movdqa xmm4, xmm6 381 movdqa xmm1, xmm3 387 movdqa xmm2, xmm5 490 movdqa xmm1, xmm6 491 movdqa xmm2, xmm6 492 movdqa xmm4, xmm6
|
/external/llvm/test/CodeGen/X86/ |
4char-promote.ll | 8 ; CHECK: movdqa
|
pmulld.ll | 9 ; WIN64-NEXT: movdqa (%rcx), %xmm0 20 ; WIN64-NEXT: movdqa (%rcx), %xmm0
|
vec_compare.ll | 28 ; CHECK: movdqa 37 ; CHECK: movdqa
|
2009-02-05-CoalescerBug.ll | 4 ; RUN: grep movdqa %t | count 2
|
dagcombine-buildvector.ll | 17 ; CHECK: movdqa
|
pmul.ll | 5 ; The f() arguments in %xmm0 and %xmm1 cause an extra movdqa without -join-physregs.
|
vec_shuffle-27.ll | 25 ; CHECK: movdqa
|
widen_cast-2.ll | 5 ; CHECK: movdqa
|
widen_arith-5.ll | 2 ; CHECK: movdqa
|
sse3.ll | 23 ; X64: movdqa %xmm0, (%rdi) 35 ; X64: movdqa (%rdi), %xmm0 124 ; X64: movdqa %xmm0, (%rdi)
|
/external/libvpx/vp8/common/x86/ |
postproc_sse2.asm | 39 movdqa xmm0, [GLOBAL(rd42)] 41 movdqa [rsp], xmm0 66 movdqa xmm1, xmm3 ; mm1 = p0..p3 74 movdqa xmm7, xmm1 ; mm7 = r0 p0..p3 85 movdqa xmm6, xmm1 ; mm6 = r0 p0..p3 99 movdqa xmm6, xmm1 ; mm6 = r0 p0..p3 111 movdqa xmm6, xmm1 ; mm6 = r0 p0..p3 152 movdqa xmm3, xmm4 155 movdqa xmm1, xmm3 ; mm1 = p0..p3 159 movdqa xmm5, xmm [all...] |
postproc_mmx.c | 445 movdqa xmm1, xmm3 ; 458 movdqa xmm7, xmm1 ; 477 movdqa xmm6, xmm1 ; 500 movdqa xmm6, xmm1 ; 521 movdqa xmm6, xmm1 ; 574 movdqa xmm3, xmm4 578 movdqa xmm1, xmm3 ; 583 movdqa xmm5, xmm4 592 movdqa xmm7, xmm1 ; 602 movdqa xmm5, xmm [all...] |
/external/libffi/src/x86/ |
darwin64.S | 190 movdqa 48(%r10), %xmm0 191 movdqa 64(%r10), %xmm1 192 movdqa 80(%r10), %xmm2 193 movdqa 96(%r10), %xmm3 194 movdqa 112(%r10), %xmm4 195 movdqa 128(%r10), %xmm5 196 movdqa 144(%r10), %xmm6 197 movdqa 160(%r10), %xmm7 307 movdqa %xmm0, 48(%rsp) 308 movdqa %xmm1, 64(%rsp [all...] |
unix64.S | 194 movdqa 48(%r10), %xmm0 195 movdqa 64(%r10), %xmm1 196 movdqa 80(%r10), %xmm2 197 movdqa 96(%r10), %xmm3 198 movdqa 112(%r10), %xmm4 199 movdqa 128(%r10), %xmm5 200 movdqa 144(%r10), %xmm6 201 movdqa 160(%r10), %xmm7 318 movdqa %xmm0, 48(%rsp) 319 movdqa %xmm1, 64(%rsp [all...] |
/external/dropbear/libtomcrypt/src/encauth/gcm/ |
gcm_mult_h.c | 30 asm("movdqa (%0),%%xmm0"::"r"(&gcm->PC[0][I[0]][0])); 34 asm("movdqa %%xmm0,(%0)"::"r"(&T));
|
/external/v8/src/ia32/ |
codegen-ia32.cc | 131 __ movdqa(xmm0, Operand(src, 0x00)); 132 __ movdqa(xmm1, Operand(src, 0x10)); 135 __ movdqa(Operand(dst, 0x00), xmm0); 136 __ movdqa(Operand(dst, 0x10), xmm1); 147 __ movdqa(xmm0, Operand(src, 0)); 149 __ movdqa(Operand(dst, 0), xmm0); 181 __ movdqa(Operand(dst, 0x00), xmm0); 182 __ movdqa(Operand(dst, 0x10), xmm1); 195 __ movdqa(Operand(dst, 0), xmm0);
|
/external/libvpx/vpx_ports/ |
x86_abi_support.asm | 263 movdqa XMMWORD PTR [rsp], xmm6 264 movdqa XMMWORD PTR [rsp+16], xmm7 267 movdqa xmm6, XMMWORD PTR [rsp] 268 movdqa xmm7, XMMWORD PTR [rsp+16]
|
/packages/apps/Camera/jni/feature_stab/db_vlvm/ |
db_utilities_linalg.h | 336 movdqa xmm0,[eax] 341 movdqa xmm2,[eax+16] 345 movdqa xmm1,[eax+32] 350 movdqa xmm0,[eax+48] 355 movdqa xmm2,[eax+64] 359 movdqa xmm1,[eax+80] 364 movdqa xmm0,[eax+96] 369 movdqa xmm2,[eax+112] 373 movdqa xmm1,[eax+128] 378 movdqa xmm0,[eax+144 [all...] |