/external/llvm/test/CodeGen/X86/ |
2008-05-22-FoldUnalignedLoad.ll | 12 ; CHECK: movups 13 ; CHECK: movups 14 ; CHECK-NOT: movups
|
sse-align-10.ll | 1 ; RUN: llc < %s -march=x86-64 | grep movups | count 1
|
sse-align-8.ll | 1 ; RUN: llc < %s -march=x86-64 | grep movups | count 1
|
2009-11-16-UnfoldMemOpBug.ll | 9 ; CHECK: movups L_str+12(%rip), %xmm0 10 ; CHECK: movups L_str(%rip), %xmm1 17 ; CHECK: movups %xmm0, 12(%rsp)
|
memset64-on-x86-32.ll | 1 ; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=nehalem | grep movups | count 5
|
misaligned-memset.ll | 6 ; CHECK: movups
|
sse-align-2.ll | 10 ; CHECK: movups
|
2012-11-28-merge-store-alias.ll | 6 ; CHECK-NEXT: movups %xmm0 35 ; CHECK-NOT: movups %xmm0
|
/external/chromium_org/third_party/openssl/openssl/crypto/aes/asm/ |
aesni-x86_64.S | 6 movups (%rdi),%xmm2 8 movups (%rdx),%xmm0 9 movups 16(%rdx),%xmm1 15 movups (%rdx),%xmm1 19 movups %xmm2,(%rsi) 27 movups (%rdi),%xmm2 29 movups (%rdx),%xmm0 30 movups 16(%rdx),%xmm1 36 movups (%rdx),%xmm1 40 movups %xmm2,(%rsi [all...] |
aesni-x86.S | 10 movups (%eax),%xmm2 13 movups (%edx),%xmm0 14 movups 16(%edx),%xmm1 20 movups (%edx),%xmm1 24 movups %xmm2,(%eax) 34 movups (%eax),%xmm2 37 movups (%edx),%xmm0 38 movups 16(%edx),%xmm1 44 movups (%edx),%xmm1 48 movups %xmm2,(%eax [all...] |
aesni-x86.pl | 57 if ($PREFIX eq "aesni") { $movekey=*movups; } 58 else { $movekey=*movups; } 120 &movups ($rndkey0,&QWP(0,$key)); 168 &movups ($inout0,&QWP(0,"eax")); 175 &movups (&QWP(0,"eax"),$inout0); 184 &movups ($inout0,&QWP(0,"eax")); 191 &movups (&QWP(0,"eax"),$inout0); 389 &movups (&QWP(0,$out),$inout0); 391 &movups (&QWP(0x10,$out),$inout1); 393 &movups (&QWP(0x20,$out),$inout2) [all...] |
aesni-x86_64.pl | 178 $movkey = $PREFIX eq "aesni" ? "movups" : "movups"; 246 movups ($inp),$inout0 # load input 251 movups $inout0,($out) # output 259 movups ($inp),$inout0 # load input 264 movups $inout0,($out) # output 557 movups $inout0,($out) 561 movups $inout1,0x10($out) 563 movups $inout2,0x20($out) 565 movups $inout3,0x30($out [all...] |
aesni-sha1-x86_64.S | 68 movups (%r15),%xmm13 69 movups 16(%r15),%xmm14 75 movups 0(%r12),%xmm12 79 movups 32(%r15),%xmm15 105 movups 48(%r15),%xmm14 132 movups 64(%r15),%xmm15 162 movups 80(%r15),%xmm14 187 movups 96(%r15),%xmm15 216 movups 112(%r15),%xmm14 243 movups 128(%r15),%xmm1 [all...] |
/external/openssl/crypto/aes/asm/ |
aesni-x86_64.S | 6 movups (%rdi),%xmm2 8 movups (%rdx),%xmm0 9 movups 16(%rdx),%xmm1 15 movups (%rdx),%xmm1 19 movups %xmm2,(%rsi) 27 movups (%rdi),%xmm2 29 movups (%rdx),%xmm0 30 movups 16(%rdx),%xmm1 36 movups (%rdx),%xmm1 40 movups %xmm2,(%rsi [all...] |
aesni-x86.S | 10 movups (%eax),%xmm2 13 movups (%edx),%xmm0 14 movups 16(%edx),%xmm1 20 movups (%edx),%xmm1 24 movups %xmm2,(%eax) 34 movups (%eax),%xmm2 37 movups (%edx),%xmm0 38 movups 16(%edx),%xmm1 44 movups (%edx),%xmm1 48 movups %xmm2,(%eax [all...] |
aesni-x86.pl | 57 if ($PREFIX eq "aesni") { $movekey=*movups; } 58 else { $movekey=*movups; } 120 &movups ($rndkey0,&QWP(0,$key)); 168 &movups ($inout0,&QWP(0,"eax")); 175 &movups (&QWP(0,"eax"),$inout0); 184 &movups ($inout0,&QWP(0,"eax")); 191 &movups (&QWP(0,"eax"),$inout0); 389 &movups (&QWP(0,$out),$inout0); 391 &movups (&QWP(0x10,$out),$inout1); 393 &movups (&QWP(0x20,$out),$inout2) [all...] |
aesni-x86_64.pl | 178 $movkey = $PREFIX eq "aesni" ? "movups" : "movups"; 246 movups ($inp),$inout0 # load input 251 movups $inout0,($out) # output 259 movups ($inp),$inout0 # load input 264 movups $inout0,($out) # output 557 movups $inout0,($out) 561 movups $inout1,0x10($out) 563 movups $inout2,0x20($out) 565 movups $inout3,0x30($out [all...] |
aesni-sha1-x86_64.S | 68 movups (%r15),%xmm13 69 movups 16(%r15),%xmm14 75 movups 0(%r12),%xmm12 79 movups 32(%r15),%xmm15 105 movups 48(%r15),%xmm14 132 movups 64(%r15),%xmm15 162 movups 80(%r15),%xmm14 187 movups 96(%r15),%xmm15 216 movups 112(%r15),%xmm14 243 movups 128(%r15),%xmm1 [all...] |
/external/valgrind/main/memcheck/tests/amd64/ |
fxsave-amd64.c | 49 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm0"); 50 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm1"); 51 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm2"); 52 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm3"); 53 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm4"); 54 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm5"); 55 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm6"); 56 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm7"); 57 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm8"); 58 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm9") [all...] |
xor-undef-amd64.c | 66 "movups 0(%0), %%xmm0\n\t" 67 "movups 16(%0), %%xmm8\n\t" 69 "movups %%xmm0, 32(%0)\n\t" 78 "movups 0(%0), %%xmm0\n\t" 79 "movups 16(%0), %%xmm8\n\t" 81 "movups %%xmm0, 32(%0)\n\t" 93 "movups 0(%0), %%xmm0\n\t" 94 "movups 16(%0), %%xmm8\n\t" 96 "movups %%xmm0, 32(%0)\n\t" 105 "movups 0(%0), %%xmm0\n\t [all...] |
/external/valgrind/main/memcheck/tests/x86/ |
xor-undef-x86.c | 68 "movups 0(%0), %%xmm0\n\t" 69 "movups 16(%0), %%xmm7\n\t" 71 "movups %%xmm0, 32(%0)\n\t" 82 "movups 0(%0), %%xmm0\n\t" 83 "movups 16(%0), %%xmm7\n\t" 85 "movups %%xmm0, 32(%0)\n\t" 99 "movups 0(%0), %%xmm0\n\t" 100 "movups 16(%0), %%xmm7\n\t" 102 "movups %%xmm0, 32(%0)\n\t" 113 "movups 0(%0), %%xmm0\n\t [all...] |
fxsave.c | 38 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm0"); 39 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm1"); 40 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm2"); 41 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm3"); 42 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm4"); 43 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm5"); 44 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm6"); 45 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm7"); 62 asm __volatile__("movups " VG_SYM(vec0) ", %xmm0"); 63 asm __volatile__("movups " VG_SYM(vec1) ", %xmm1") [all...] |
/external/valgrind/main/VEX/test/ |
fxsave.c | 37 asm __volatile__("movups vecZ, %xmm0"); 38 asm __volatile__("movups vecZ, %xmm1"); 39 asm __volatile__("movups vecZ, %xmm2"); 40 asm __volatile__("movups vecZ, %xmm3"); 41 asm __volatile__("movups vecZ, %xmm4"); 42 asm __volatile__("movups vecZ, %xmm5"); 43 asm __volatile__("movups vecZ, %xmm6"); 44 asm __volatile__("movups vecZ, %xmm7"); 61 asm __volatile__("movups vec0, %xmm0"); 62 asm __volatile__("movups vec1, %xmm1") [all...] |
/external/valgrind/main/none/tests/amd64/ |
bug137714-amd64.c | 47 "movups (%0), %%xmm1\n\t" 48 "movups (%1), %%xmm12\n\t"
|
/external/valgrind/main/none/tests/x86/ |
bug137714-x86.c | 47 "movups (%0), %%xmm1\n\t" 48 "movups (%1), %%xmm2\n\t"
|