/external/llvm/test/MC/X86/ |
x86_64-avx-encoding.s | 131 // CHECK: vmaxss %xmm10, %xmm14, %xmm12 133 vmaxss %xmm10, %xmm14, %xmm12 135 // CHECK: vmaxsd %xmm10, %xmm14, %xmm12 137 vmaxsd %xmm10, %xmm14, %xmm12 139 // CHECK: vminss %xmm10, %xmm14, %xmm12 141 vminss %xmm10, %xmm14, %xmm12 143 // CHECK: vminsd %xmm10, %xmm14, %xmm12 145 vminsd %xmm10, %xmm14, %xmm12 147 // CHECK: vmaxss -4(%rbx,%rcx,8), %xmm12, %xmm10 149 vmaxss -4(%rbx,%rcx,8), %xmm12, %xmm1 [all...] |
x86_64-avx-clmul-encoding.s | 3 // CHECK: vpclmulqdq $17, %xmm12, %xmm10, %xmm11 5 vpclmulhqhqdq %xmm12, %xmm10, %xmm11 11 // CHECK: vpclmulqdq $1, %xmm12, %xmm10, %xmm11 13 vpclmulhqlqdq %xmm12, %xmm10, %xmm11 19 // CHECK: vpclmulqdq $16, %xmm12, %xmm10, %xmm11 21 vpclmullqhqdq %xmm12, %xmm10, %xmm11 27 // CHECK: vpclmulqdq $0, %xmm12, %xmm10, %xmm11 29 vpclmullqlqdq %xmm12, %xmm10, %xmm11 35 // CHECK: vpclmulqdq $17, %xmm12, %xmm10, %xmm11 37 vpclmulqdq $17, %xmm12, %xmm10, %xmm1 [all...] |
x86_64-fma3-encoding.s | 3 // CHECK: vfmadd132pd %xmm12, %xmm10, %xmm11 5 vfmadd132pd %xmm12, %xmm10, %xmm11 11 // CHECK: vfmadd132ps %xmm12, %xmm10, %xmm11 13 vfmadd132ps %xmm12, %xmm10, %xmm11 19 // CHECK: vfmadd213pd %xmm12, %xmm10, %xmm11 21 vfmadd213pd %xmm12, %xmm10, %xmm11 27 // CHECK: vfmadd213ps %xmm12, %xmm10, %xmm11 29 vfmadd213ps %xmm12, %xmm10, %xmm11 35 // CHECK: vfmadd231pd %xmm12, %xmm10, %xmm11 37 vfmadd231pd %xmm12, %xmm10, %xmm1 [all...] |
/external/chromium_org/third_party/yasm/source/patched-yasm/modules/arch/x86/tests/ |
sse5-basic.asm | 8 compd xmm12, xmm4, xmm14, 5 ; 0F 25 2D 346 C1 05 9 compd xmm9, xmm12, [0], byte 5 ; 0F 25 2D 044 045 94 00 00 00 00 05 10 compd xmm9, xmm12, [r8], byte 5 ; 0F 25 2D 040 95 05
|
aes.asm | 5 aesenc xmm10, xmm12
|
/external/chromium_org/third_party/libvpx/source/libvpx/vp8/encoder/x86/ |
ssim_opt.asm | 22 paddd xmm12, xmm2 ; sum_sq_r 83 pxor xmm12,xmm12 ;sum_sq_r 115 SUM_ACROSS_Q xmm12 125 movd [rdi], xmm12; 173 pxor xmm12,xmm12 ;sum_sq_r 196 SUM_ACROSS_Q xmm12 206 movd [rdi], xmm12;
|
/external/chromium_org/third_party/libvpx/source/libvpx/vp9/encoder/x86/ |
vp9_ssim_opt.asm | 22 paddd xmm12, xmm2 ; sum_sq_r 83 pxor xmm12,xmm12 ;sum_sq_r 115 SUM_ACROSS_Q xmm12 125 movd [rdi], xmm12; 173 pxor xmm12,xmm12 ;sum_sq_r 196 SUM_ACROSS_Q xmm12 206 movd [rdi], xmm12;
|
/external/libvpx/libvpx/vp8/encoder/x86/ |
ssim_opt.asm | 22 paddd xmm12, xmm2 ; sum_sq_r 83 pxor xmm12,xmm12 ;sum_sq_r 115 SUM_ACROSS_Q xmm12 125 movd [rdi], xmm12; 173 pxor xmm12,xmm12 ;sum_sq_r 196 SUM_ACROSS_Q xmm12 206 movd [rdi], xmm12;
|
/external/libvpx/libvpx/vp9/encoder/x86/ |
vp9_ssim_opt.asm | 22 paddd xmm12, xmm2 ; sum_sq_r 83 pxor xmm12,xmm12 ;sum_sq_r 115 SUM_ACROSS_Q xmm12 125 movd [rdi], xmm12; 173 pxor xmm12,xmm12 ;sum_sq_r 196 SUM_ACROSS_Q xmm12 206 movd [rdi], xmm12;
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/x86/ |
ssim_opt.asm | 22 paddd xmm12, xmm2 ; sum_sq_r 83 pxor xmm12,xmm12 ;sum_sq_r 115 SUM_ACROSS_Q xmm12 125 movd [rdi], xmm12; 173 pxor xmm12,xmm12 ;sum_sq_r 196 SUM_ACROSS_Q xmm12 206 movd [rdi], xmm12;
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/x86/ |
vp9_ssim_opt.asm | 22 paddd xmm12, xmm2 ; sum_sq_r 83 pxor xmm12,xmm12 ;sum_sq_r 115 SUM_ACROSS_Q xmm12 125 movd [rdi], xmm12; 173 pxor xmm12,xmm12 ;sum_sq_r 196 SUM_ACROSS_Q xmm12 206 movd [rdi], xmm12;
|
/external/chromium_org/third_party/openssl/openssl/crypto/aes/asm/ |
bsaes-x86_64.S | 158 movdqa %xmm1,%xmm12 165 pxor %xmm3,%xmm12 179 pand %xmm11,%xmm12 180 pxor %xmm12,%xmm10 181 pxor %xmm12,%xmm9 182 movdqa %xmm6,%xmm12 184 pxor %xmm0,%xmm12 186 movdqa %xmm12,%xmm8 187 pand %xmm11,%xmm12 189 pxor %xmm12,%xmm [all...] |
aesni-x86_64.S | 900 pxor %xmm12,%xmm12 913 movdqa %xmm12,-40(%rsp) 918 pshufd $192,%xmm12,%xmm2 919 pshufd $128,%xmm12,%xmm3 920 pshufd $64,%xmm12,%xmm4 955 movdqa -40(%rsp),%xmm12 986 paddd %xmm13,%xmm12 990 movdqa %xmm12,-40(%rsp) 1013 pshufd $192,%xmm12,%xmm [all...] |
/external/openssl/crypto/aes/asm/ |
bsaes-x86_64.S | 158 movdqa %xmm1,%xmm12 165 pxor %xmm3,%xmm12 179 pand %xmm11,%xmm12 180 pxor %xmm12,%xmm10 181 pxor %xmm12,%xmm9 182 movdqa %xmm6,%xmm12 184 pxor %xmm0,%xmm12 186 movdqa %xmm12,%xmm8 187 pand %xmm11,%xmm12 189 pxor %xmm12,%xmm [all...] |
/external/llvm/test/TableGen/ |
cast.td | 58 def XMM12: Register<"xmm12">; 66 XMM12, XMM13, XMM14, XMM15]>;
|
MultiPat.td | 62 def XMM12: Register<"xmm12">; 70 XMM12, XMM13, XMM14, XMM15]>;
|
Slice.td | 52 def XMM12: Register<"xmm12">; 60 XMM12, XMM13, XMM14, XMM15]>;
|
TargetInstrSpec.td | 59 def XMM12: Register<"xmm12">; 67 XMM12, XMM13, XMM14, XMM15]>;
|
/art/runtime/arch/x86_64/ |
jni_entrypoints_x86_64.S | 42 movq %xmm12, 64(%rsp) 59 movq 64(%rsp), %xmm12
|
registers_x86_64.h | 64 XMM12 = 12,
|
quick_method_frame_info_x86_64.h | 38 (1 << art::x86_64::XMM12) | (1 << art::x86_64::XMM13) |
|
/external/valgrind/main/none/tests/amd64/ |
bug137714-amd64.c | 48 "movups (%1), %%xmm12\n\t" 50 "maskmovdqu %%xmm12,%%xmm1\n\t"
|
/external/llvm/test/CodeGen/X86/ |
preserve_allcc64.ll | 24 ;SSE-NEXT: movaps %xmm12 68 call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{rbp},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15}"() 97 %a20 = call <2 x double> asm sideeffect "", "={xmm12}"() nounwind 102 call void asm sideeffect "", "{rax},{rcx},{rdx},{r8},{r9},{r10},{r11},{xmm2},{xmm3},{xmm4},{xmm5},{xmm6},{xmm7},{xmm8},{xmm9},{xmm10},{xmm11},{xmm12},{xmm13},{xmm14},{xmm15}"(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, <2 x double> %a10, <2 x double> %a11, <2 x double> %a12, <2 x double> %a13, <2 x double> %a14, <2 x double> %a15, <2 x double> %a16, <2 x double> %a17, <2 x double> %a18, <2 x double> %a19, <2 x double> %a20, <2 x double> %a21, <2 x double> %a22, <2 x double> %a23)
|
preserve_mostcc64.ll | 37 call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{rbp},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15}"() 57 ;SSE: movaps %xmm12 79 %a20 = call <2 x double> asm sideeffect "", "={xmm12}"() nounwind 84 call void asm sideeffect "", "{rax},{rcx},{rdx},{r8},{r9},{r10},{r11},{xmm2},{xmm3},{xmm4},{xmm5},{xmm6},{xmm7},{xmm8},{xmm9},{xmm10},{xmm11},{xmm12},{xmm13},{xmm14},{xmm15}"(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, <2 x double> %a10, <2 x double> %a11, <2 x double> %a12, <2 x double> %a13, <2 x double> %a14, <2 x double> %a15, <2 x double> %a16, <2 x double> %a17, <2 x double> %a18, <2 x double> %a19, <2 x double> %a20, <2 x double> %a21, <2 x double> %a22, <2 x double> %a23)
|
/external/valgrind/main/memcheck/tests/amd64/ |
fxsave-amd64.c | 61 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm12"); 78 asm __volatile__("movups " VG_SYM(vecZ) "(%rip), %xmm12"); 112 asm __volatile__("movaps %xmm1, %xmm12");
|