HomeSort by relevance Sort by last modified time
    Searched full:xmm9 (Results 26 - 50 of 133) sorted by null

12 3 4 5 6

  /external/valgrind/none/tests/amd64/
avx2-1.c 90 : /*TRASH*/"xmm0","xmm7","xmm8","xmm6","xmm9","r14","memory","cc" \
110 "xmm0","xmm8","xmm7","xmm9","r14","rax","memory","cc" \
654 "vextracti128 $0x0, %%ymm7, %%xmm9",
658 "vextracti128 $0x1, %%ymm7, %%xmm9",
662 "vinserti128 $0x0, %%xmm9, %%ymm7, %%ymm8",
666 "vinserti128 $0x1, %%xmm9, %%ymm7, %%ymm8",
695 "vbroadcastss %%xmm9, %%xmm7")
698 "vbroadcastss %%xmm9, %%ymm7")
701 "vbroadcastsd %%xmm9, %%ymm7")
907 "vpbroadcastb %%xmm9, %%xmm7"
    [all...]
  /external/boringssl/linux-x86_64/crypto/sha/
sha512-x86_64.S 1849 vpaddq -96(%rbp),%xmm1,%xmm9
1855 vmovdqa %xmm9,16(%rsp)
1856 vpaddq 32(%rbp),%xmm5,%xmm9
1863 vmovdqa %xmm9,80(%rsp)
1894 vpxor %xmm9,%xmm8,%xmm8
1919 vpxor %xmm9,%xmm11,%xmm11
1963 vpxor %xmm9,%xmm8,%xmm8
1988 vpxor %xmm9,%xmm11,%xmm11
2032 vpxor %xmm9,%xmm8,%xmm8
2057 vpxor %xmm9,%xmm11,%xmm1
    [all...]
  /external/boringssl/mac-x86_64/crypto/sha/
sha512-x86_64.S 1848 vpaddq -96(%rbp),%xmm1,%xmm9
1854 vmovdqa %xmm9,16(%rsp)
1855 vpaddq 32(%rbp),%xmm5,%xmm9
1862 vmovdqa %xmm9,80(%rsp)
1893 vpxor %xmm9,%xmm8,%xmm8
1918 vpxor %xmm9,%xmm11,%xmm11
1962 vpxor %xmm9,%xmm8,%xmm8
1987 vpxor %xmm9,%xmm11,%xmm11
2031 vpxor %xmm9,%xmm8,%xmm8
2056 vpxor %xmm9,%xmm11,%xmm1
    [all...]
  /external/llvm/test/CodeGen/X86/
vselect-minmax.ll     [all...]
preserve_allcc64.ll 27 ;SSE-NEXT: movaps %xmm9
68 call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{rbp},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15}"()
94 %a17 = call <2 x double> asm sideeffect "", "={xmm9}"() nounwind
102 call void asm sideeffect "", "{rax},{rcx},{rdx},{r8},{r9},{r10},{r11},{xmm2},{xmm3},{xmm4},{xmm5},{xmm6},{xmm7},{xmm8},{xmm9},{xmm10},{xmm11},{xmm12},{xmm13},{xmm14},{xmm15}"(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, <2 x double> %a10, <2 x double> %a11, <2 x double> %a12, <2 x double> %a13, <2 x double> %a14, <2 x double> %a15, <2 x double> %a16, <2 x double> %a17, <2 x double> %a18, <2 x double> %a19, <2 x double> %a20, <2 x double> %a21, <2 x double> %a22, <2 x double> %a23)
preserve_mostcc64.ll 37 call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{rbp},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15}"()
54 ;SSE: movaps %xmm9
76 %a17 = call <2 x double> asm sideeffect "", "={xmm9}"() nounwind
84 call void asm sideeffect "", "{rax},{rcx},{rdx},{r8},{r9},{r10},{r11},{xmm2},{xmm3},{xmm4},{xmm5},{xmm6},{xmm7},{xmm8},{xmm9},{xmm10},{xmm11},{xmm12},{xmm13},{xmm14},{xmm15}"(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, <2 x double> %a10, <2 x double> %a11, <2 x double> %a12, <2 x double> %a13, <2 x double> %a14, <2 x double> %a15, <2 x double> %a16, <2 x double> %a17, <2 x double> %a18, <2 x double> %a19, <2 x double> %a20, <2 x double> %a21, <2 x double> %a22, <2 x double> %a23)
win64_nonvol.ll 14 ; CHECK-DAG: movaps %xmm9,
  /external/llvm/test/TableGen/
MultiPat.td 59 def XMM9: Register<"xmm9">;
69 XMM8, XMM9, XMM10, XMM11,
Slice.td 49 def XMM9: Register<"xmm9">;
59 XMM8, XMM9, XMM10, XMM11,
TargetInstrSpec.td 56 def XMM9: Register<"xmm9">;
66 XMM8, XMM9, XMM10, XMM11,
  /external/libvpx/libvpx/vpx_dsp/x86/
vpx_high_subpixel_bilinear_sse2.asm 128 movdqa xmm9, xmm0
130 punpckhwd xmm9, xmm1
135 pmaddwd xmm9, xmm7
140 paddd xmm9, xmm4 ;rounding
145 psrad xmm9, 7 ;shift
150 packssdw xmm0, xmm9 ;pack back to word
  /external/libvpx/libvpx/vp8/common/x86/
loopfilter_block_sse2_x86_64.asm 205 movdqa xmm9, i6 ; q2, will contain abs(p1-p0)
207 LF_FILTER_HEV_MASK xmm0, xmm1, xmm2, xmm3, xmm4, xmm8, xmm9, xmm10
227 LF_FILTER_HEV_MASK xmm3, xmm8, xmm0, xmm1, xmm2, xmm4, xmm10, xmm11, xmm9
245 movdqa xmm9, i14 ; q2, will contain abs(p1-p0)
247 LF_FILTER_HEV_MASK xmm4, xmm8, xmm0, xmm1, xmm2, xmm3, xmm9, xmm11, xmm10
494 movdqa xmm9, xmm7
496 punpckhqdq xmm9, i1
525 movdqa i3, xmm9
546 LF_FILTER_HEV_MASK xmm0, xmm12, xmm13, xmm9, xmm4, xmm10, xmm3, xmm11
551 movdqa xmm9, i
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/
loopfilter_block_sse2.asm 205 movdqa xmm9, i6 ; q2, will contain abs(p1-p0)
207 LF_FILTER_HEV_MASK xmm0, xmm1, xmm2, xmm3, xmm4, xmm8, xmm9, xmm10
227 LF_FILTER_HEV_MASK xmm3, xmm8, xmm0, xmm1, xmm2, xmm4, xmm10, xmm11, xmm9
245 movdqa xmm9, i14 ; q2, will contain abs(p1-p0)
247 LF_FILTER_HEV_MASK xmm4, xmm8, xmm0, xmm1, xmm2, xmm3, xmm9, xmm11, xmm10
494 movdqa xmm9, xmm7
496 punpckhqdq xmm9, i1
525 movdqa i3, xmm9
546 LF_FILTER_HEV_MASK xmm0, xmm12, xmm13, xmm9, xmm4, xmm10, xmm3, xmm11
551 movdqa xmm9, i
    [all...]
  /external/libvpx/libvpx/third_party/libyuv/source/
rotate_gcc.cc 254 "movdqa %%xmm8,%%xmm9 \n"
256 "palignr $0x8,%%xmm9,%%xmm9 \n"
303 "punpcklwd %%xmm11,%%xmm9 \n"
305 "movdqa %%xmm9,%%xmm11 \n"
350 "punpckldq %%xmm13,%%xmm9 \n"
353 "movdqa %%xmm9,%%xmm13 \n"
354 "movq %%xmm9,(%1) \n"
373 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
418 "movdqa %%xmm1,%%xmm9 \n
    [all...]
  /external/boringssl/src/crypto/aes/asm/
vpaes-x86_64.pl 75 ## %xmm9-%xmm15 as in _vpaes_preheat
89 movdqa %xmm9, %xmm1
94 pand %xmm9, %xmm0
134 movdqa %xmm9, %xmm1 # 1 : i
138 pand %xmm9, %xmm0 # 0 = k
178 movdqa %xmm9, %xmm1
185 pand %xmm9, %xmm0
240 movdqa %xmm9, %xmm1 # 1 : i
244 pand %xmm9, %xmm0 # 0 = k
522 movdqa %xmm9, %xmm
    [all...]
  /external/boringssl/linux-x86_64/crypto/aes/
vpaes-x86_64.S 25 movdqa %xmm9,%xmm1
30 pand %xmm9,%xmm0
70 movdqa %xmm9,%xmm1
74 pand %xmm9,%xmm0
114 movdqa %xmm9,%xmm1
121 pand %xmm9,%xmm0
176 movdqa %xmm9,%xmm1
180 pand %xmm9,%xmm0
458 movdqa %xmm9,%xmm1
461 pand %xmm9,%xmm
    [all...]
  /external/boringssl/mac-x86_64/crypto/aes/
vpaes-x86_64.S 25 movdqa %xmm9,%xmm1
30 pand %xmm9,%xmm0
70 movdqa %xmm9,%xmm1
74 pand %xmm9,%xmm0
114 movdqa %xmm9,%xmm1
121 pand %xmm9,%xmm0
176 movdqa %xmm9,%xmm1
180 pand %xmm9,%xmm0
458 movdqa %xmm9,%xmm1
461 pand %xmm9,%xmm
    [all...]
  /external/boringssl/win-x86_64/crypto/modes/
ghash-x86_64.asm 1013 movdqa xmm9,xmm8
1016 psrldq xmm9,8
1019 pxor xmm1,xmm9
1024 pxor xmm9,xmm0
1026 psllq xmm9,57
1027 movdqa xmm8,xmm9
1028 pslldq xmm9,8
1031 pxor xmm0,xmm9
1035 movdqa xmm9,xmm0
1045 pxor xmm1,xmm9
    [all...]
  /external/valgrind/none/tests/amd64-solaris/
coredump_single_thread_sse.c 39 "movupd 144(%[input]), %%xmm9\n"
51 "%xmm7", "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13",
  /art/runtime/arch/x86_64/
registers_x86_64.h 62 XMM9 = 9,
  /external/gemmlowp/internal/
kernel_SSE.h 245 // |xmm0 | | xmm8 | xmm9 | xmm10 | xmm11 |
246 // |xmm0 | (Iter2) | xmm8 | xmm9 | xmm10 | xmm11 |
247 // |xmm0 | | xmm8 | xmm9 | xmm10 | xmm11 |
248 // |xmm0 | | xmm8 | xmm9 | xmm10 | xmm11 |
271 "pxor %%xmm9 , %%xmm9 \n\t"
316 "paddd %%xmm3, %%xmm9 \n\t"
370 "paddd %%xmm3, %%xmm9 \n\t"
433 "paddd %%xmm3, %%xmm9 \n\t"
471 "paddd 0x10(%[dst_ptr], %%r12, 1) , %%xmm9 \n\t
    [all...]
  /external/valgrind/memcheck/tests/amd64/
fxsave-amd64.c 66 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm9");
83 asm __volatile__("movups " VG_SYM(vecZ) "(%rip), %xmm9");
117 asm __volatile__("movaps %xmm2, %xmm9");
  /toolchain/binutils/binutils-2.25/gas/testsuite/gas/i386/
x86-64-sha.d 15 [ ]*[a-f0-9]+: 44 0f 38 c8 48 12 sha1nexte 0x12\(%rax\),%xmm9
19 [ ]*[a-f0-9]+: 44 0f 38 c9 48 12 sha1msg1 0x12\(%rax\),%xmm9
23 [ ]*[a-f0-9]+: 44 0f 38 ca 48 12 sha1msg2 0x12\(%rax\),%xmm9
  /external/libyuv/files/source/
rotate.cc 512 "movdqa %%xmm8,%%xmm9 \n"
514 "palignr $0x8,%%xmm9,%%xmm9 \n"
561 "punpcklwd %%xmm11,%%xmm9 \n"
563 "movdqa %%xmm9,%%xmm11 \n"
608 "punpckldq %%xmm13,%%xmm9 \n"
611 "movdqa %%xmm9,%%xmm13 \n"
612 "movq %%xmm9,(%1) \n"
631 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
678 "movdqa %%xmm1,%%xmm9 \n
    [all...]
  /external/boringssl/linux-x86_64/crypto/modes/
ghash-x86_64.S 972 movdqa %xmm8,%xmm9
975 psrldq $8,%xmm9
978 pxor %xmm9,%xmm1
983 pxor %xmm0,%xmm9
985 psllq $57,%xmm9
986 movdqa %xmm9,%xmm8
987 pslldq $8,%xmm9
990 pxor %xmm9,%xmm0
994 movdqa %xmm0,%xmm9
1004 pxor %xmm9,%xmm
    [all...]

Completed in 1219 milliseconds

12 3 4 5 6