/prebuilts/go/darwin-x86/src/runtime/ |
duff_amd64.s | 8 MOVUPS X0,(DI) 9 MOVUPS X0,16(DI) 10 MOVUPS X0,32(DI) 11 MOVUPS X0,48(DI) 14 MOVUPS X0,(DI) 15 MOVUPS X0,16(DI) 16 MOVUPS X0,32(DI) 17 MOVUPS X0,48(DI) 20 MOVUPS X0,(DI) 21 MOVUPS X0,16(DI [all...] |
/prebuilts/go/linux-x86/src/runtime/ |
duff_amd64.s | 8 MOVUPS X0,(DI) 9 MOVUPS X0,16(DI) 10 MOVUPS X0,32(DI) 11 MOVUPS X0,48(DI) 14 MOVUPS X0,(DI) 15 MOVUPS X0,16(DI) 16 MOVUPS X0,32(DI) 17 MOVUPS X0,48(DI) 20 MOVUPS X0,(DI) 21 MOVUPS X0,16(DI [all...] |
/prebuilts/go/darwin-x86/src/runtime/cgo/ |
asm_amd64.s | 25 MOVUPS X6, 0x60(SP) 26 MOVUPS X7, 0x70(SP) 27 MOVUPS X8, 0x80(SP) 28 MOVUPS X9, 0x90(SP) 29 MOVUPS X10, 0xa0(SP) 30 MOVUPS X11, 0xb0(SP) 31 MOVUPS X12, 0xc0(SP) 32 MOVUPS X13, 0xd0(SP) 33 MOVUPS X14, 0xe0(SP) 34 MOVUPS X15, 0xf0(SP [all...] |
/prebuilts/go/linux-x86/src/runtime/cgo/ |
asm_amd64.s | 25 MOVUPS X6, 0x60(SP) 26 MOVUPS X7, 0x70(SP) 27 MOVUPS X8, 0x80(SP) 28 MOVUPS X9, 0x90(SP) 29 MOVUPS X10, 0xa0(SP) 30 MOVUPS X11, 0xb0(SP) 31 MOVUPS X12, 0xc0(SP) 32 MOVUPS X13, 0xd0(SP) 33 MOVUPS X14, 0xe0(SP) 34 MOVUPS X15, 0xf0(SP [all...] |
/prebuilts/go/darwin-x86/src/crypto/aes/ |
asm_amd64.s | 13 MOVUPS 0(AX), X1 14 MOVUPS 0(BX), X0 21 MOVUPS 0(AX), X1 23 MOVUPS 16(AX), X1 27 MOVUPS 0(AX), X1 29 MOVUPS 16(AX), X1 33 MOVUPS 0(AX), X1 35 MOVUPS 16(AX), X1 37 MOVUPS 32(AX), X1 39 MOVUPS 48(AX), X [all...] |
/prebuilts/go/linux-x86/src/crypto/aes/ |
asm_amd64.s | 13 MOVUPS 0(AX), X1 14 MOVUPS 0(BX), X0 21 MOVUPS 0(AX), X1 23 MOVUPS 16(AX), X1 27 MOVUPS 0(AX), X1 29 MOVUPS 16(AX), X1 33 MOVUPS 0(AX), X1 35 MOVUPS 16(AX), X1 37 MOVUPS 32(AX), X1 39 MOVUPS 48(AX), X [all...] |
/external/boringssl/win-x86/crypto/fipsmodule/ |
aesni-x86.asm | 24 movups xmm2,[eax] 27 movups xmm0,[edx] 28 movups xmm1,[16+edx] 34 movups xmm1,[edx] 40 movups [eax],xmm2 49 movups xmm2,[eax] 52 movups xmm0,[edx] 53 movups xmm1,[16+edx] 59 movups xmm1,[edx] 65 movups [eax],xmm [all...] |
/external/valgrind/memcheck/tests/amd64/ |
fxsave-amd64.c | 57 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm0"); 58 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm1"); 59 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm2"); 60 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm3"); 61 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm4"); 62 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm5"); 63 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm6"); 64 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm7"); 65 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm8"); 66 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm9") [all...] |
sh-mem-vec128.c | 12 "movups (%1), %%xmm7 ; movups %%xmm7, (%0)"
|
xor-undef-amd64.c | 66 "movups 0(%0), %%xmm0\n\t" 67 "movups 16(%0), %%xmm8\n\t" 69 "movups %%xmm0, 32(%0)\n\t" 78 "movups 0(%0), %%xmm0\n\t" 79 "movups 16(%0), %%xmm8\n\t" 81 "movups %%xmm0, 32(%0)\n\t" 93 "movups 0(%0), %%xmm0\n\t" 94 "movups 16(%0), %%xmm8\n\t" 96 "movups %%xmm0, 32(%0)\n\t" 105 "movups 0(%0), %%xmm0\n\t [all...] |
/external/valgrind/memcheck/tests/amd64-solaris/ |
context_sse.c | 69 "movups %[y0], %%xmm0\n" 70 "movups %[d0], %%xmm1\n" 71 "movups %[d0], %%xmm2\n" 72 "movups %[y0], %%xmm3\n" 73 "movups %[y0], %%xmm4\n" 74 "movups %[d0], %%xmm5\n" 75 "movups %[d0], %%xmm6\n" 76 "movups %[y0], %%xmm7\n" 80 "movups %%xmm0, 0x00 + %[out]\n" 81 "movups %%xmm1, 0x10 + %[out]\n [all...] |
/external/valgrind/memcheck/tests/x86-solaris/ |
context_sse.c | 67 "movups %[y0], %%xmm0\n" 68 "movups %[d0], %%xmm1\n" 69 "movups %[d0], %%xmm2\n" 70 "movups %[y0], %%xmm3\n" 71 "movups %[y0], %%xmm4\n" 72 "movups %[d0], %%xmm5\n" 73 "movups %[d0], %%xmm6\n" 74 "movups %[y0], %%xmm7\n" 85 "movups %%xmm0, 0x00 + %[out]\n" 86 "movups %%xmm1, 0x10 + %[out]\n [all...] |
/external/valgrind/memcheck/tests/x86/ |
sh-mem-vec128.c | 12 "movups (%1), %%xmm7 ; movups %%xmm7, (%0)"
|
xor-undef-x86.c | 68 "movups 0(%0), %%xmm0\n\t" 69 "movups 16(%0), %%xmm7\n\t" 71 "movups %%xmm0, 32(%0)\n\t" 82 "movups 0(%0), %%xmm0\n\t" 83 "movups 16(%0), %%xmm7\n\t" 85 "movups %%xmm0, 32(%0)\n\t" 99 "movups 0(%0), %%xmm0\n\t" 100 "movups 16(%0), %%xmm7\n\t" 102 "movups %%xmm0, 32(%0)\n\t" 113 "movups 0(%0), %%xmm0\n\t [all...] |
/external/boringssl/linux-x86/crypto/fipsmodule/ |
aesni-x86.S | 11 movups (%eax),%xmm2 14 movups (%edx),%xmm0 15 movups 16(%edx),%xmm1 21 movups (%edx),%xmm1 27 movups %xmm2,(%eax) 39 movups (%eax),%xmm2 42 movups (%edx),%xmm0 43 movups 16(%edx),%xmm1 49 movups (%edx),%xmm1 55 movups %xmm2,(%eax [all...] |
/external/boringssl/mac-x86/crypto/fipsmodule/ |
aesni-x86.S | 10 movups (%eax),%xmm2 13 movups (%edx),%xmm0 14 movups 16(%edx),%xmm1 20 movups (%edx),%xmm1 26 movups %xmm2,(%eax) 36 movups (%eax),%xmm2 39 movups (%edx),%xmm0 40 movups 16(%edx),%xmm1 46 movups (%edx),%xmm1 52 movups %xmm2,(%eax [all...] |
/external/llvm/test/CodeGen/X86/ |
2008-05-22-FoldUnalignedLoad.ll | 12 ; CHECK: movups 13 ; CHECK: movups 14 ; CHECK-NOT: movups
|
small-byval-memcpy.ll | 18 ; NEHALEM: movups 19 ; NEHALEM-NEXT: movups 22 ; BTVER2: movups 23 ; BTVER2-NEXT: movups
|
sse-align-10.ll | 1 ; RUN: llc < %s -march=x86-64 | grep movups | count 1
|
sse-align-8.ll | 1 ; RUN: llc < %s -march=x86-64 | grep movups | count 1
|
/external/boringssl/linux-x86_64/crypto/fipsmodule/ |
aesni-x86_64.S | 10 movups (%rdi),%xmm2 12 movups (%rdx),%xmm0 13 movups 16(%rdx),%xmm1 19 movups (%rdx),%xmm1 25 movups %xmm2,(%rsi) 35 movups (%rdi),%xmm2 37 movups (%rdx),%xmm0 38 movups 16(%rdx),%xmm1 44 movups (%rdx),%xmm1 50 movups %xmm2,(%rsi [all...] |
/external/boringssl/mac-x86_64/crypto/fipsmodule/ |
aesni-x86_64.S | 9 movups (%rdi),%xmm2 11 movups (%rdx),%xmm0 12 movups 16(%rdx),%xmm1 18 movups (%rdx),%xmm1 24 movups %xmm2,(%rsi) 34 movups (%rdi),%xmm2 36 movups (%rdx),%xmm0 37 movups 16(%rdx),%xmm1 43 movups (%rdx),%xmm1 49 movups %xmm2,(%rsi [all...] |
/external/boringssl/win-x86_64/crypto/fipsmodule/ |
aesni-x86_64.asm | 12 movups xmm2,XMMWORD[rcx] 14 movups xmm0,XMMWORD[r8] 15 movups xmm1,XMMWORD[16+r8] 21 movups xmm1,XMMWORD[r8] 27 movups XMMWORD[rdx],xmm2 36 movups xmm2,XMMWORD[rcx] 38 movups xmm0,XMMWORD[r8] 39 movups xmm1,XMMWORD[16+r8] 45 movups xmm1,XMMWORD[r8] 51 movups XMMWORD[rdx],xmm [all...] |
/external/valgrind/none/tests/x86-solaris/ |
coredump_single_thread_sse.c | 19 "movups (%%esp), %%xmm0\n" 24 "movups (%%esp), %%xmm1\n" 29 "movups (%%esp), %%xmm2\n" 34 "movups (%%esp), %%xmm3\n" 39 "movups (%%esp), %%xmm4\n" 44 "movups (%%esp), %%xmm5\n" 49 "movups (%%esp), %%xmm6\n" 54 "movups (%%esp), %%xmm7\n"
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
sse-align-10.ll | 1 ; RUN: llc < %s -march=x86-64 | grep movups | count 1
|