HomeSort by relevance Sort by last modified time
    Searched refs:palignr (Results 1 - 16 of 16) sorted by null

  /bionic/libc/arch-x86/string/
ssse3-memcpy5.S 451 palignr $1, %xmm4, %xmm5
452 palignr $1, %xmm3, %xmm4
454 palignr $1, %xmm2, %xmm3
456 palignr $1, %xmm1, %xmm2
471 palignr $1, %xmm2, %xmm3
472 palignr $1, %xmm1, %xmm2
494 palignr $1, %xmm2, %xmm3
495 palignr $1, %xmm1, %xmm2
505 palignr $1, %xmm2, %xmm3
506 palignr $1, %xmm4, %xmm
    [all...]
ssse3-memcmp3-new.S 279 palignr $1,(%esi), %xmm1
283 palignr $1,%xmm2, %xmm3
304 palignr $1,(%esi), %xmm0
308 palignr $1,16(%esi), %xmm3
318 palignr $1,48(%esi), %xmm3
321 palignr $1,32(%esi), %xmm0
362 palignr $2,(%esi), %xmm1
366 palignr $2,%xmm2, %xmm3
387 palignr $2,(%esi), %xmm0
391 palignr $2,16(%esi), %xmm
    [all...]
ssse3-strcmp-latest.S 349 palignr $1, %xmm3, %xmm2
373 palignr $1, %xmm3, %xmm2
455 palignr $2, %xmm3, %xmm2
479 palignr $2, %xmm3, %xmm2
562 palignr $3, %xmm3, %xmm2
586 palignr $3, %xmm3, %xmm2
668 palignr $4, %xmm3, %xmm2
693 palignr $4, %xmm3, %xmm2
777 palignr $5, %xmm3, %xmm2
801 palignr $5, %xmm3, %xmm
    [all...]
  /external/libvpx/vp8/encoder/x86/
sad_ssse3.asm 62 palignr xmm5, xmm4, %2
65 palignr xmm6, xmm4, (%2+1)
67 palignr xmm7, xmm4, (%2+2)
78 palignr xmm1, xmm4, %2
81 palignr xmm2, xmm4, (%2+1)
83 palignr xmm3, xmm4, (%2+2)
98 palignr xmm1, xmm4, %2
101 palignr xmm2, xmm4, (%2+1)
103 palignr xmm3, xmm4, (%2+2)
  /external/libyuv/files/source/
rotate.cc 92 palignr xmm1, xmm1, 8
98 palignr xmm3, xmm3, 8
104 palignr xmm5, xmm5, 8
109 palignr xmm7, xmm7, 8
115 palignr xmm2, xmm2, 8
116 palignr xmm3, xmm3, 8
121 palignr xmm6, xmm6, 8
122 palignr xmm7, xmm7, 8
128 palignr xmm4, xmm4, 8
133 palignr xmm6, xmm6,
    [all...]
row_win.cc 385 palignr xmm2, xmm1, 8 // xmm2 = { xmm3[0:3] xmm1[8:15]}
388 palignr xmm1, xmm0, 12 // xmm1 = { xmm3[0:7] xmm0[12:15]}
395 palignr xmm3, xmm3, 4 // xmm3 = { xmm3[4:15]}
424 palignr xmm2, xmm1, 8 // xmm2 = { xmm3[0:3] xmm1[8:15]}
427 palignr xmm1, xmm0, 12 // xmm1 = { xmm3[0:7] xmm0[12:15]}
434 palignr xmm3, xmm3, 4 // xmm3 = { xmm3[4:15]}
scale.cc 556 palignr xmm1, xmm0, 8
    [all...]
  /external/openssl/crypto/aes/asm/
vpaes-x86.pl 327 &palignr("xmm5","xmm5",12);
445 &palignr("xmm0","xmm6",8);
577 &palignr("xmm1","xmm2",15);
578 &palignr("xmm2","xmm2",15);
583 &palignr("xmm0","xmm0",1);
vpaes-x86_64.pl 235 palignr \$12, %xmm5, %xmm5
361 palignr \$8,%xmm6,%xmm0
499 palignr \$15, %xmm8, %xmm1
500 palignr \$15, %xmm8, %xmm8
505 palignr \$1, %xmm0, %xmm0
aesni-sha1-x86_64.pl 261 &palignr(@X[0],@X[-4&7],8); # compose "X[-14]" in "X[0]"
341 &palignr(@Tx[0],@X[-2&7],8); # compose "X[-6]"
  /external/openssl/crypto/perlasm/
x86asm.pl 110 sub ::palignr
115 { &::generic("palignr",@_); }
x86_64-xlate.pl 736 my $palignr = sub {
    [all...]
  /external/qemu/target-i386/
ops_sse_header.h 280 DEF_HELPER_3(glue(palignr, SUFFIX), void, Reg, Reg, s32)
translate.c     [all...]
  /external/openssl/crypto/sha/asm/
sha1-586.pl 533 &palignr(@X[0],@X[-4&7],8); # compose "X[-14]" in "X[0]"
618 &palignr(@X[2],@X[-2&7],8); # compose "X[-6]"
    [all...]
sha1-x86_64.pl 379 &palignr(@X[0],@X[-4&7],8); # compose "X[-14]" in "X[0]"
459 &palignr(@Tx[0],@X[-2&7],8); # compose "X[-6]"

Completed in 235 milliseconds