HomeSort by relevance Sort by last modified time
    Searched refs:palignr (Results 1 - 18 of 18) sorted by null

  /bionic/libc/arch-x86/string/
ssse3-memcpy5.S 451 palignr $1, %xmm4, %xmm5
452 palignr $1, %xmm3, %xmm4
454 palignr $1, %xmm2, %xmm3
456 palignr $1, %xmm1, %xmm2
471 palignr $1, %xmm2, %xmm3
472 palignr $1, %xmm1, %xmm2
494 palignr $1, %xmm2, %xmm3
495 palignr $1, %xmm1, %xmm2
505 palignr $1, %xmm2, %xmm3
506 palignr $1, %xmm4, %xmm
    [all...]
ssse3-memcmp3-new.S 279 palignr $1,(%esi), %xmm1
283 palignr $1,%xmm2, %xmm3
304 palignr $1,(%esi), %xmm0
308 palignr $1,16(%esi), %xmm3
318 palignr $1,48(%esi), %xmm3
321 palignr $1,32(%esi), %xmm0
362 palignr $2,(%esi), %xmm1
366 palignr $2,%xmm2, %xmm3
387 palignr $2,(%esi), %xmm0
391 palignr $2,16(%esi), %xmm
    [all...]
ssse3-strcmp-latest.S 349 palignr $1, %xmm3, %xmm2
373 palignr $1, %xmm3, %xmm2
455 palignr $2, %xmm3, %xmm2
479 palignr $2, %xmm3, %xmm2
562 palignr $3, %xmm3, %xmm2
586 palignr $3, %xmm3, %xmm2
668 palignr $4, %xmm3, %xmm2
693 palignr $4, %xmm3, %xmm2
777 palignr $5, %xmm3, %xmm2
801 palignr $5, %xmm3, %xmm
    [all...]
  /external/libvpx/libvpx/vp8/common/x86/
sad_ssse3.asm 62 palignr xmm5, xmm4, %2
65 palignr xmm6, xmm4, (%2+1)
67 palignr xmm7, xmm4, (%2+2)
78 palignr xmm1, xmm4, %2
81 palignr xmm2, xmm4, (%2+1)
83 palignr xmm3, xmm4, (%2+2)
98 palignr xmm1, xmm4, %2
101 palignr xmm2, xmm4, (%2+1)
103 palignr xmm3, xmm4, (%2+2)
  /external/libyuv/files/source/
rotate.cc 87 palignr xmm1, xmm1, 8
93 palignr xmm3, xmm3, 8
99 palignr xmm5, xmm5, 8
104 palignr xmm7, xmm7, 8
110 palignr xmm2, xmm2, 8
111 palignr xmm3, xmm3, 8
116 palignr xmm6, xmm6, 8
117 palignr xmm7, xmm7, 8
123 palignr xmm4, xmm4, 8
128 palignr xmm6, xmm6,
    [all...]
row_win.cc 254 palignr xmm2, xmm1, 8 // xmm2 = { xmm3[0:3] xmm1[8:15]}
257 palignr xmm1, xmm0, 12 // xmm1 = { xmm3[0:7] xmm0[12:15]}
264 palignr xmm3, xmm3, 4 // xmm3 = { xmm3[4:15]}
294 palignr xmm2, xmm1, 8 // xmm2 = { xmm3[0:3] xmm1[8:15]}
297 palignr xmm1, xmm0, 12 // xmm1 = { xmm3[0:7] xmm0[12:15]}
304 palignr xmm3, xmm3, 4 // xmm3 = { xmm3[4:15]}
    [all...]
scale.cc 571 palignr xmm1, xmm0, 8
    [all...]
  /external/llvm/test/MC/X86/
shuffle-comments.s 3 palignr $8, %xmm0, %xmm1 label
5 palignr $8, (%rax), %xmm1 label
8 palignr $16, %xmm0, %xmm1 label
10 palignr $16, (%rax), %xmm1 label
13 palignr $0, %xmm0, %xmm1 label
15 palignr $0, (%rax), %xmm1 label
  /external/openssl/crypto/aes/asm/
vpaes-x86.pl 327 &palignr("xmm5","xmm5",12);
445 &palignr("xmm0","xmm6",8);
577 &palignr("xmm1","xmm2",15);
578 &palignr("xmm2","xmm2",15);
583 &palignr("xmm0","xmm0",1);
vpaes-x86_64.pl 236 palignr \$12, %xmm5, %xmm5
362 palignr \$8,%xmm6,%xmm0
500 palignr \$15, %xmm8, %xmm1
501 palignr \$15, %xmm8, %xmm8
506 palignr \$1, %xmm0, %xmm0
aesni-sha1-x86_64.pl 262 &palignr(@X[0],@X[-4&7],8); # compose "X[-14]" in "X[0]"
342 &palignr(@Tx[0],@X[-2&7],8); # compose "X[-6]"
  /external/openssl/crypto/perlasm/
x86asm.pl 110 sub ::palignr
115 { &::generic("palignr",@_); }
x86_64-xlate.pl 736 my $palignr = sub {
    [all...]
  /external/qemu/target-i386/
ops_sse_header.h 280 DEF_HELPER_3(glue(palignr, SUFFIX), void, Reg, Reg, s32)
translate.c     [all...]
  /external/openssl/crypto/sha/asm/
sha1-586.pl 533 &palignr(@X[0],@X[-4&7],8); # compose "X[-14]" in "X[0]"
618 &palignr(@X[2],@X[-2&7],8); # compose "X[-6]"
    [all...]
sha1-x86_64.pl 380 &palignr(@X[0],@X[-4&7],8); # compose "X[-14]" in "X[0]"
460 &palignr(@Tx[0],@X[-2&7],8); # compose "X[-6]"
  /external/libvpx/libvpx/third_party/libyuv/source/
scale.c 927 palignr xmm1, xmm0, 8
    [all...]

Completed in 647 milliseconds