HomeSort by relevance Sort by last modified time
    Searched refs:xmm8 (Results 1 - 25 of 45) sorted by null

1 2

  /external/llvm/test/MC/COFF/
seh.s 56 // CHECK-NEXT: 0x0E: SAVE_XMM128 reg=XMM8, offset=0x0
114 movups %xmm8, (%rsp)
115 .seh_savexmm %xmm8, 0
  /external/libvpx/libvpx/vp8/common/x86/
loopfilter_block_sse2.asm 204 movdqa xmm8, i5
207 LF_FILTER_HEV_MASK xmm0, xmm1, xmm2, xmm3, xmm4, xmm8, xmm9, xmm10
212 movdqa xmm8, i5
213 LF_FILTER xmm1, xmm2, xmm3, xmm8, xmm0, xmm4
219 movdqa i5, xmm8
227 LF_FILTER_HEV_MASK xmm3, xmm8, xmm0, xmm1, xmm2, xmm4, xmm10, xmm11, xmm9
232 movdqa xmm8, i9
233 LF_FILTER xmm0, xmm1, xmm4, xmm8, xmm3, xmm2
239 movdqa i9, xmm8
247 LF_FILTER_HEV_MASK xmm4, xmm8, xmm0, xmm1, xmm2, xmm3, xmm9, xmm11, xmm1
    [all...]
  /external/chromium_org/third_party/openssl/openssl/crypto/aes/asm/
bsaes-x86_64.S 11 movdqa (%rax),%xmm8
14 pxor %xmm8,%xmm15
15 pxor %xmm8,%xmm0
17 pxor %xmm8,%xmm1
19 pxor %xmm8,%xmm2
21 pxor %xmm8,%xmm3
23 pxor %xmm8,%xmm4
25 pxor %xmm8,%xmm5
27 pxor %xmm8,%xmm6
32 movdqa 16(%r11),%xmm8
    [all...]
aesni-sha1-x86_64.S 81 movdqa %xmm3,%xmm8
88 psrldq $4,%xmm8
94 pxor %xmm2,%xmm8
99 pxor %xmm8,%xmm4
108 movdqa %xmm4,%xmm8
119 psrld $31,%xmm8
126 por %xmm8,%xmm4
172 movdqa %xmm5,%xmm8
178 pslldq $12,%xmm8
189 movdqa %xmm8,%xmm1
    [all...]
aesni-x86_64.S 318 pxor %xmm0,%xmm8
388 pxor %xmm0,%xmm8
462 movdqu 96(%rdi),%xmm8
483 movups %xmm8,96(%rsi)
484 movdqu 96(%rdi),%xmm8
504 movups %xmm8,96(%rsi)
526 movdqu 96(%rdi),%xmm8
534 movups %xmm8,96(%rsi)
605 movdqu 96(%rdi),%xmm8
626 movups %xmm8,96(%rsi
    [all...]
vpaes-x86_64.pl 79 ## Preserves %xmm6 - %xmm8 so you get some local vectors
288 movdqa .Lk_rcon(%rip), %xmm8 # load rcon
486 ## Adds rcon from low byte of %xmm8, then rotates %xmm8 for
498 # extract rcon from xmm8
500 palignr \$15, %xmm8, %xmm1
501 palignr \$15, %xmm8, %xmm8
681 movaps %xmm8,0x30(%rsp)
704 movaps 0x30(%rsp),%xmm8
    [all...]
  /external/openssl/crypto/aes/asm/
bsaes-x86_64.S 11 movdqa (%rax),%xmm8
14 pxor %xmm8,%xmm15
15 pxor %xmm8,%xmm0
17 pxor %xmm8,%xmm1
19 pxor %xmm8,%xmm2
21 pxor %xmm8,%xmm3
23 pxor %xmm8,%xmm4
25 pxor %xmm8,%xmm5
27 pxor %xmm8,%xmm6
32 movdqa 16(%r11),%xmm8
    [all...]
aesni-sha1-x86_64.S 81 movdqa %xmm3,%xmm8
88 psrldq $4,%xmm8
94 pxor %xmm2,%xmm8
99 pxor %xmm8,%xmm4
108 movdqa %xmm4,%xmm8
119 psrld $31,%xmm8
126 por %xmm8,%xmm4
172 movdqa %xmm5,%xmm8
178 pslldq $12,%xmm8
189 movdqa %xmm8,%xmm1
    [all...]
aesni-x86_64.S 318 pxor %xmm0,%xmm8
388 pxor %xmm0,%xmm8
462 movdqu 96(%rdi),%xmm8
483 movups %xmm8,96(%rsi)
484 movdqu 96(%rdi),%xmm8
504 movups %xmm8,96(%rsi)
526 movdqu 96(%rdi),%xmm8
534 movups %xmm8,96(%rsi)
605 movdqu 96(%rdi),%xmm8
626 movups %xmm8,96(%rsi
    [all...]
vpaes-x86_64.pl 79 ## Preserves %xmm6 - %xmm8 so you get some local vectors
288 movdqa .Lk_rcon(%rip), %xmm8 # load rcon
486 ## Adds rcon from low byte of %xmm8, then rotates %xmm8 for
498 # extract rcon from xmm8
500 palignr \$15, %xmm8, %xmm1
501 palignr \$15, %xmm8, %xmm8
681 movaps %xmm8,0x30(%rsp)
704 movaps 0x30(%rsp),%xmm8
    [all...]
  /external/libvpx/libvpx/vp8/encoder/x86/
quantize_sse4.asm 127 pxor xmm8, xmm8
163 ZIGZAG_LOOP 8, 0, xmm3, xmm7, xmm8
168 ZIGZAG_LOOP 9, 1, xmm3, xmm7, xmm8
169 ZIGZAG_LOOP 12, 4, xmm3, xmm7, xmm8
170 ZIGZAG_LOOP 13, 5, xmm3, xmm7, xmm8
171 ZIGZAG_LOOP 10, 2, xmm3, xmm7, xmm8
173 ZIGZAG_LOOP 11, 3, xmm3, xmm7, xmm8
174 ZIGZAG_LOOP 14, 6, xmm3, xmm7, xmm8
175 ZIGZAG_LOOP 15, 7, xmm3, xmm7, xmm8
    [all...]
  /external/chromium_org/third_party/yasm/source/patched-yasm/modules/arch/x86/tests/
sse5-basic.asm 7 compd xmm8, xmm11, xmm3, 5 ; 0F 25 2D 333 84 05
  /external/llvm/test/MC/AsmParser/
directive_seh.s 30 movups %xmm8, (%rsp)
31 .seh_savexmm %xmm8, 0
  /external/llvm/test/tools/llvm-objdump/
win64-unwind-data.s 18 // CHECK-NEXT: 0x0e: UOP_SaveXMM128 XMM8 [0x0000]
64 movups %xmm8, (%rsp)
65 .seh_savexmm %xmm8, 0
  /external/chromium_org/third_party/mesa/src/src/mesa/x86-64/
xform4.S 82 movups (%rdx), %xmm8 /* ox | oy | oz | ow */
85 pshufd $0x00, %xmm8, %xmm0 /* ox | ox | ox | ox */
87 pshufd $0x55, %xmm8, %xmm1 /* oy | oy | oy | oy */
89 pshufd $0xAA, %xmm8, %xmm2 /* oz | oz | oz | ox */
91 pshufd $0xFF, %xmm8, %xmm3 /* ow | ow | ow | ow */
168 movups (%rdx), %xmm8 /* ox | oy | oz | ow */
171 pshufd $0x00, %xmm8, %xmm0 /* ox | ox | ox | ox */
173 pshufd $0x55, %xmm8, %xmm1 /* oy | oy | oy | oy */
175 pshufd $0xAA, %xmm8, %xmm2 /* oz | oz | oz | ox */
177 pshufd $0xFF, %xmm8, %xmm3 /* ow | ow | ow | ow *
    [all...]
  /external/mesa3d/src/mesa/x86-64/
xform4.S 82 movups (%rdx), %xmm8 /* ox | oy | oz | ow */
85 pshufd $0x00, %xmm8, %xmm0 /* ox | ox | ox | ox */
87 pshufd $0x55, %xmm8, %xmm1 /* oy | oy | oy | oy */
89 pshufd $0xAA, %xmm8, %xmm2 /* oz | oz | oz | ox */
91 pshufd $0xFF, %xmm8, %xmm3 /* ow | ow | ow | ow */
168 movups (%rdx), %xmm8 /* ox | oy | oz | ow */
171 pshufd $0x00, %xmm8, %xmm0 /* ox | ox | ox | ox */
173 pshufd $0x55, %xmm8, %xmm1 /* oy | oy | oy | oy */
175 pshufd $0xAA, %xmm8, %xmm2 /* oz | oz | oz | ox */
177 pshufd $0xFF, %xmm8, %xmm3 /* ow | ow | ow | ow *
    [all...]
  /external/chromium_org/third_party/openssl/openssl/crypto/sha/asm/
sha1-x86_64.S 1340 movdqa %xmm3,%xmm8
1347 psrldq $4,%xmm8
1353 pxor %xmm2,%xmm8
1358 pxor %xmm8,%xmm4
1365 movdqa %xmm4,%xmm8
1376 psrld $31,%xmm8
1383 por %xmm8,%xmm4
1425 movdqa %xmm5,%xmm8
1431 pslldq $12,%xmm8
1440 movdqa %xmm8,%xmm1
    [all...]
  /external/openssl/crypto/sha/asm/
sha1-x86_64.S 1340 movdqa %xmm3,%xmm8
1347 psrldq $4,%xmm8
1353 pxor %xmm2,%xmm8
1358 pxor %xmm8,%xmm4
1365 movdqa %xmm4,%xmm8
1376 psrld $31,%xmm8
1383 por %xmm8,%xmm4
1425 movdqa %xmm5,%xmm8
1431 pslldq $12,%xmm8
1440 movdqa %xmm8,%xmm1
    [all...]
  /external/chromium_org/third_party/openssl/openssl/crypto/
x86_64cpuid.S 202 pxor %xmm8,%xmm8
x86_64cpuid.pl 225 pxor %xmm8,%xmm8
  /external/openssl/crypto/
x86_64cpuid.S 202 pxor %xmm8,%xmm8
x86_64cpuid.pl 225 pxor %xmm8,%xmm8
  /external/llvm/test/MC/X86/
x86_64-avx-encoding.s 3 // CHECK: vaddss %xmm8, %xmm9, %xmm10
5 vaddss %xmm8, %xmm9, %xmm10
7 // CHECK: vmulss %xmm8, %xmm9, %xmm10
9 vmulss %xmm8, %xmm9, %xmm10
11 // CHECK: vsubss %xmm8, %xmm9, %xmm10
13 vsubss %xmm8, %xmm9, %xmm10
15 // CHECK: vdivss %xmm8, %xmm9, %xmm10
17 vdivss %xmm8, %xmm9, %xmm10
19 // CHECK: vaddsd %xmm8, %xmm9, %xmm10
21 vaddsd %xmm8, %xmm9, %xmm1
    [all...]
  /external/zlib/src/contrib/amd64/
amd64-match.S 303 movdqu 48(%prev, %rdx), %xmm8
304 pcmpeqb %xmm8, %xmm7
  /external/chromium_org/v8/test/mjsunit/regress/
regress-crbug-173907.js 54 var xmm8 = v*v*v*v*v*v*v*v*v;

Completed in 380 milliseconds

1 2