HomeSort by relevance Sort by last modified time
    Searched refs:vpalignr (Results 1 - 25 of 81) sorted by null

1 2 3 4

  /external/llvm/test/MC/X86/
shuffle-comments.s 18 vpalignr $8, %xmm0, %xmm1, %xmm2 label
20 vpalignr $8, (%rax), %xmm1, %xmm2 label
23 vpalignr $16, %xmm0, %xmm1, %xmm2 label
25 vpalignr $16, (%rax), %xmm1, %xmm2 label
28 vpalignr $0, %xmm0, %xmm1, %xmm2 label
30 vpalignr $0, (%rax), %xmm1, %xmm2 label
33 vpalignr $8, %ymm0, %ymm1, %ymm2 label
35 vpalignr $8, (%rax), %ymm1, %ymm2 label
38 vpalignr $16, %ymm0, %ymm1, %ymm2 label
40 vpalignr $16, (%rax), %ymm1, %ymm label
43 vpalignr $0, %ymm0, %ymm1, %ymm2 label
45 vpalignr $0, (%rax), %ymm1, %ymm2 label
    [all...]
x86-64-avx512bw_vl.s     [all...]
x86-64-avx512bw.s     [all...]
  /external/boringssl/linux-x86_64/crypto/cipher_extra/
chacha20_poly1305_x86_64.S     [all...]
  /external/boringssl/mac-x86_64/crypto/cipher_extra/
chacha20_poly1305_x86_64.S     [all...]
  /external/boringssl/linux-x86_64/crypto/fipsmodule/
sha512-x86_64.S 1875 vpalignr $8,%xmm0,%xmm1,%xmm8
1878 vpalignr $8,%xmm4,%xmm5,%xmm11
1944 vpalignr $8,%xmm1,%xmm2,%xmm8
1947 vpalignr $8,%xmm5,%xmm6,%xmm11
2013 vpalignr $8,%xmm2,%xmm3,%xmm8
2016 vpalignr $8,%xmm6,%xmm7,%xmm11
2082 vpalignr $8,%xmm3,%xmm4,%xmm8
2085 vpalignr $8,%xmm7,%xmm0,%xmm11
    [all...]
aesni-gcm-x86_64.S 189 vpalignr $8,%xmm4,%xmm4,%xmm0
274 vpalignr $8,%xmm4,%xmm4,%xmm8
702 vpalignr $8,%xmm8,%xmm8,%xmm14
727 vpalignr $8,%xmm8,%xmm8,%xmm14
762 vpalignr $8,%xmm8,%xmm8,%xmm2
766 vpalignr $8,%xmm8,%xmm8,%xmm2
sha1-x86_64.S     [all...]
  /external/boringssl/mac-x86_64/crypto/fipsmodule/
sha512-x86_64.S 1874 vpalignr $8,%xmm0,%xmm1,%xmm8
1877 vpalignr $8,%xmm4,%xmm5,%xmm11
1943 vpalignr $8,%xmm1,%xmm2,%xmm8
1946 vpalignr $8,%xmm5,%xmm6,%xmm11
2012 vpalignr $8,%xmm2,%xmm3,%xmm8
2015 vpalignr $8,%xmm6,%xmm7,%xmm11
2081 vpalignr $8,%xmm3,%xmm4,%xmm8
2084 vpalignr $8,%xmm7,%xmm0,%xmm11
2150 vpalignr $8,%xmm4,%xmm5,%xmm8
    [all...]
aesni-gcm-x86_64.S 189 vpalignr $8,%xmm4,%xmm4,%xmm0
274 vpalignr $8,%xmm4,%xmm4,%xmm8
702 vpalignr $8,%xmm8,%xmm8,%xmm14
727 vpalignr $8,%xmm8,%xmm8,%xmm14
762 vpalignr $8,%xmm8,%xmm8,%xmm2
766 vpalignr $8,%xmm8,%xmm8,%xmm2
sha1-x86_64.S     [all...]
  /toolchain/binutils/binutils-2.25/gas/testsuite/gas/i386/
avx512bw-wig.s 96 vpalignr $0xab, %zmm4, %zmm5, %zmm6 # AVX512BW
97 vpalignr $0xab, %zmm4, %zmm5, %zmm6{%k7} # AVX512BW
98 vpalignr $0xab, %zmm4, %zmm5, %zmm6{%k7}{z} # AVX512BW
99 vpalignr $123, %zmm4, %zmm5, %zmm6 # AVX512BW
100 vpalignr $123, (%ecx), %zmm5, %zmm6 # AVX512BW
101 vpalignr $123, -123456(%esp,%esi,8), %zmm5, %zmm6 # AVX512BW
102 vpalignr $123, 8128(%edx), %zmm5, %zmm6 # AVX512BW Disp8
103 vpalignr $123, 8192(%edx), %zmm5, %zmm6 # AVX512BW
104 vpalignr $123, -8192(%edx), %zmm5, %zmm6 # AVX512BW Disp8
105 vpalignr $123, -8256(%edx), %zmm5, %zmm6 # AVX512B
    [all...]
x86-64-avx512bw-wig.s 96 vpalignr $0xab, %zmm28, %zmm29, %zmm30 # AVX512BW
97 vpalignr $0xab, %zmm28, %zmm29, %zmm30{%k7} # AVX512BW
98 vpalignr $0xab, %zmm28, %zmm29, %zmm30{%k7}{z} # AVX512BW
99 vpalignr $123, %zmm28, %zmm29, %zmm30 # AVX512BW
100 vpalignr $123, (%rcx), %zmm29, %zmm30 # AVX512BW
101 vpalignr $123, 0x123(%rax,%r14,8), %zmm29, %zmm30 # AVX512BW
102 vpalignr $123, 8128(%rdx), %zmm29, %zmm30 # AVX512BW Disp8
103 vpalignr $123, 8192(%rdx), %zmm29, %zmm30 # AVX512BW
104 vpalignr $123, -8192(%rdx), %zmm29, %zmm30 # AVX512BW Disp8
105 vpalignr $123, -8256(%rdx), %zmm29, %zmm30 # AVX512B
    [all...]
x86-64-avx512bw-wig1.d 102 [ ]*[a-f0-9]+:[ ]*62 03 95 40 0f f4 ab[ ]*vpalignr \$0xab,%zmm28,%zmm29,%zmm30
103 [ ]*[a-f0-9]+:[ ]*62 03 95 47 0f f4 ab[ ]*vpalignr \$0xab,%zmm28,%zmm29,%zmm30\{%k7\}
104 [ ]*[a-f0-9]+:[ ]*62 03 95 c7 0f f4 ab[ ]*vpalignr \$0xab,%zmm28,%zmm29,%zmm30\{%k7\}\{z\}
105 [ ]*[a-f0-9]+:[ ]*62 03 95 40 0f f4 7b[ ]*vpalignr \$0x7b,%zmm28,%zmm29,%zmm30
106 [ ]*[a-f0-9]+:[ ]*62 63 95 40 0f 31 7b[ ]*vpalignr \$0x7b,\(%rcx\),%zmm29,%zmm30
107 [ ]*[a-f0-9]+:[ ]*62 23 95 40 0f b4 f0 23 01 00 00 7b[ ]*vpalignr \$0x7b,0x123\(%rax,%r14,8\),%zmm29,%zmm30
108 [ ]*[a-f0-9]+:[ ]*62 63 95 40 0f 72 7f 7b[ ]*vpalignr \$0x7b,0x1fc0\(%rdx\),%zmm29,%zmm30
109 [ ]*[a-f0-9]+:[ ]*62 63 95 40 0f b2 00 20 00 00 7b[ ]*vpalignr \$0x7b,0x2000\(%rdx\),%zmm29,%zmm30
110 [ ]*[a-f0-9]+:[ ]*62 63 95 40 0f 72 80 7b[ ]*vpalignr \$0x7b,-0x2000\(%rdx\),%zmm29,%zmm30
111 [ ]*[a-f0-9]+:[ ]*62 63 95 40 0f b2 c0 df ff ff 7b[ ]*vpalignr \$0x7b,-0x2040\(%rdx\),%zmm29,%zmm3
    [all...]
x86-64-avx512bw_vl-wig.s 186 vpalignr $0xab, %xmm28, %xmm29, %xmm30 # AVX512{BW,VL}
187 vpalignr $0xab, %xmm28, %xmm29, %xmm30{%k7} # AVX512{BW,VL}
188 vpalignr $0xab, %xmm28, %xmm29, %xmm30{%k7}{z} # AVX512{BW,VL}
189 vpalignr $123, %xmm28, %xmm29, %xmm30 # AVX512{BW,VL}
190 vpalignr $123, (%rcx), %xmm29, %xmm30 # AVX512{BW,VL}
191 vpalignr $123, 0x123(%rax,%r14,8), %xmm29, %xmm30 # AVX512{BW,VL}
192 vpalignr $123, 2032(%rdx), %xmm29, %xmm30 # AVX512{BW,VL} Disp8
193 vpalignr $123, 2048(%rdx), %xmm29, %xmm30 # AVX512{BW,VL}
194 vpalignr $123, -2048(%rdx), %xmm29, %xmm30 # AVX512{BW,VL} Disp8
195 vpalignr $123, -2064(%rdx), %xmm29, %xmm30 # AVX512{BW,VL
    [all...]
avx256int.s 191 vpalignr $7,%ymm4,%ymm6,%ymm2
192 vpalignr $7,(%ecx),%ymm6,%ymm2
515 vpalignr ymm2,ymm6,ymm4,7
516 vpalignr ymm2,ymm6,YMMWORD PTR [ecx],7
517 vpalignr ymm2,ymm6,[ecx],7
avx512bw.s 124 vpalignr $0xab, %zmm4, %zmm5, %zmm6 # AVX512BW
125 vpalignr $0xab, %zmm4, %zmm5, %zmm6{%k7} # AVX512BW
126 vpalignr $0xab, %zmm4, %zmm5, %zmm6{%k7}{z} # AVX512BW
127 vpalignr $123, %zmm4, %zmm5, %zmm6 # AVX512BW
128 vpalignr $123, (%ecx), %zmm5, %zmm6 # AVX512BW
129 vpalignr $123, -123456(%esp,%esi,8), %zmm5, %zmm6 # AVX512BW
130 vpalignr $123, 8128(%edx), %zmm5, %zmm6 # AVX512BW Disp8
131 vpalignr $123, 8192(%edx), %zmm5, %zmm6 # AVX512BW
132 vpalignr $123, -8192(%edx), %zmm5, %zmm6 # AVX512BW Disp8
133 vpalignr $123, -8256(%edx), %zmm5, %zmm6 # AVX512B
    [all...]
x86-64-avx256int.s 194 vpalignr $7,%ymm4,%ymm6,%ymm2
195 vpalignr $7,(%rcx),%ymm6,%ymm2
521 vpalignr ymm2,ymm6,ymm4,7
522 vpalignr ymm2,ymm6,YMMWORD PTR [rcx],7
523 vpalignr ymm2,ymm6,[rcx],7
x86-64-avx512bw.s 124 vpalignr $0xab, %zmm28, %zmm29, %zmm30 # AVX512BW
125 vpalignr $0xab, %zmm28, %zmm29, %zmm30{%k7} # AVX512BW
126 vpalignr $0xab, %zmm28, %zmm29, %zmm30{%k7}{z} # AVX512BW
127 vpalignr $123, %zmm28, %zmm29, %zmm30 # AVX512BW
128 vpalignr $123, (%rcx), %zmm29, %zmm30 # AVX512BW
129 vpalignr $123, 0x123(%rax,%r14,8), %zmm29, %zmm30 # AVX512BW
130 vpalignr $123, 8128(%rdx), %zmm29, %zmm30 # AVX512BW Disp8
131 vpalignr $123, 8192(%rdx), %zmm29, %zmm30 # AVX512BW
132 vpalignr $123, -8192(%rdx), %zmm29, %zmm30 # AVX512BW Disp8
133 vpalignr $123, -8256(%rdx), %zmm29, %zmm30 # AVX512B
    [all...]
x86-64-avx512bw_vl-wig1-intel.d 192 [ ]*[a-f0-9]+:[ ]*62 03 95 00 0f f4 ab[ ]*vpalignr xmm30,xmm29,xmm28,0xab
193 [ ]*[a-f0-9]+:[ ]*62 03 95 07 0f f4 ab[ ]*vpalignr xmm30\{k7\},xmm29,xmm28,0xab
194 [ ]*[a-f0-9]+:[ ]*62 03 95 87 0f f4 ab[ ]*vpalignr xmm30\{k7\}\{z\},xmm29,xmm28,0xab
195 [ ]*[a-f0-9]+:[ ]*62 03 95 00 0f f4 7b[ ]*vpalignr xmm30,xmm29,xmm28,0x7b
196 [ ]*[a-f0-9]+:[ ]*62 63 95 00 0f 31 7b[ ]*vpalignr xmm30,xmm29,XMMWORD PTR \[rcx\],0x7b
197 [ ]*[a-f0-9]+:[ ]*62 23 95 00 0f b4 f0 23 01 00 00 7b[ ]*vpalignr xmm30,xmm29,XMMWORD PTR \[rax\+r14\*8\+0x123\],0x7b
198 [ ]*[a-f0-9]+:[ ]*62 63 95 00 0f 72 7f 7b[ ]*vpalignr xmm30,xmm29,XMMWORD PTR \[rdx\+0x7f0\],0x7b
199 [ ]*[a-f0-9]+:[ ]*62 63 95 00 0f b2 00 08 00 00 7b[ ]*vpalignr xmm30,xmm29,XMMWORD PTR \[rdx\+0x800\],0x7b
200 [ ]*[a-f0-9]+:[ ]*62 63 95 00 0f 72 80 7b[ ]*vpalignr xmm30,xmm29,XMMWORD PTR \[rdx-0x800\],0x7b
201 [ ]*[a-f0-9]+:[ ]*62 63 95 00 0f b2 f0 f7 ff ff 7b[ ]*vpalignr xmm30,xmm29,XMMWORD PTR \[rdx-0x810\],0x7
    [all...]
x86-64-avx512bw_vl-wig1.d 192 [ ]*[a-f0-9]+:[ ]*62 03 95 00 0f f4 ab[ ]*vpalignr \$0xab,%xmm28,%xmm29,%xmm30
193 [ ]*[a-f0-9]+:[ ]*62 03 95 07 0f f4 ab[ ]*vpalignr \$0xab,%xmm28,%xmm29,%xmm30\{%k7\}
194 [ ]*[a-f0-9]+:[ ]*62 03 95 87 0f f4 ab[ ]*vpalignr \$0xab,%xmm28,%xmm29,%xmm30\{%k7\}\{z\}
195 [ ]*[a-f0-9]+:[ ]*62 03 95 00 0f f4 7b[ ]*vpalignr \$0x7b,%xmm28,%xmm29,%xmm30
196 [ ]*[a-f0-9]+:[ ]*62 63 95 00 0f 31 7b[ ]*vpalignr \$0x7b,\(%rcx\),%xmm29,%xmm30
197 [ ]*[a-f0-9]+:[ ]*62 23 95 00 0f b4 f0 23 01 00 00 7b[ ]*vpalignr \$0x7b,0x123\(%rax,%r14,8\),%xmm29,%xmm30
198 [ ]*[a-f0-9]+:[ ]*62 63 95 00 0f 72 7f 7b[ ]*vpalignr \$0x7b,0x7f0\(%rdx\),%xmm29,%xmm30
199 [ ]*[a-f0-9]+:[ ]*62 63 95 00 0f b2 00 08 00 00 7b[ ]*vpalignr \$0x7b,0x800\(%rdx\),%xmm29,%xmm30
200 [ ]*[a-f0-9]+:[ ]*62 63 95 00 0f 72 80 7b[ ]*vpalignr \$0x7b,-0x800\(%rdx\),%xmm29,%xmm30
201 [ ]*[a-f0-9]+:[ ]*62 63 95 00 0f b2 f0 f7 ff ff 7b[ ]*vpalignr \$0x7b,-0x810\(%rdx\),%xmm29,%xmm3
    [all...]
avx512bw_vl-wig.s 166 vpalignr $0xab, %xmm4, %xmm5, %xmm6{%k7} # AVX512{BW,VL}
167 vpalignr $0xab, %xmm4, %xmm5, %xmm6{%k7}{z} # AVX512{BW,VL}
168 vpalignr $123, %xmm4, %xmm5, %xmm6{%k7} # AVX512{BW,VL}
169 vpalignr $123, (%ecx), %xmm5, %xmm6{%k7} # AVX512{BW,VL}
170 vpalignr $123, -123456(%esp,%esi,8), %xmm5, %xmm6{%k7} # AVX512{BW,VL}
171 vpalignr $123, 2032(%edx), %xmm5, %xmm6{%k7} # AVX512{BW,VL} Disp8
172 vpalignr $123, 2048(%edx), %xmm5, %xmm6{%k7} # AVX512{BW,VL}
173 vpalignr $123, -2048(%edx), %xmm5, %xmm6{%k7} # AVX512{BW,VL} Disp8
174 vpalignr $123, -2064(%edx), %xmm5, %xmm6{%k7} # AVX512{BW,VL}
175 vpalignr $0xab, %ymm4, %ymm5, %ymm6{%k7} # AVX512{BW,VL
    [all...]
  /external/boringssl/win-x86_64/crypto/fipsmodule/
sha512-x86_64.asm 1907 vpalignr xmm8,xmm1,xmm0,8
1910 vpalignr xmm11,xmm5,xmm4,8
1976 vpalignr xmm8,xmm2,xmm1,8
1979 vpalignr xmm11,xmm6,xmm5,8
2045 vpalignr xmm8,xmm3,xmm2,8
2048 vpalignr xmm11,xmm7,xmm6,8
2114 vpalignr xmm8,xmm4,xmm3,8
2117 vpalignr xmm11,xmm0,xmm7,8
2183 vpalignr xmm8,xmm5,xmm4,8
2186 vpalignr xmm11,xmm1,xmm0,
    [all...]
  /external/boringssl/src/crypto/fipsmodule/modes/asm/
aesni-gcm-x86_64.pl 262 vpalignr \$8,$Z0,$Z0,$Ii # 1st phase
347 vpalignr \$8,$Z0,$Z0,$Xi # 2nd phase
833 vpalignr \$8,$Xi,$Xi,$inout5 # 1st phase
858 vpalignr \$8,$Xi,$Xi,$inout5 # 2nd phase
893 vpalignr \$8,$Xi,$Xi,$T2 # 1st phase
897 vpalignr \$8,$Xi,$Xi,$T2 # 2nd phase
  /external/boringssl/src/crypto/cipher_extra/asm/
aes128gcmsiv-x86_64.pl 333 vpalignr \$8, $T, $T, $T
346 vpalignr \$8, $T, $T, $T
380 vpalignr \$8, $T, $T, $T
384 vpalignr \$8, $T, $T, $T
    [all...]

Completed in 3812 milliseconds

1 2 3 4