/external/valgrind/none/tests/amd64/ |
insn_ssse3.def | 49 palignr imm8[0] mm.uq[0xFFEEDDCCBBAA9988] mm.uq[0x7766554433221134] => 2.uq[0xffeeddccbbaa9988] 50 palignr imm8[1] mm.uq[0xFFEEDDCCBBAA9988] mm.uq[0x7766554433221134] => 2.uq[0x34ffeeddccbbaa99] 51 palignr imm8[2] mm.uq[0xFFEEDDCCBBAA9988] mm.uq[0x7766554433221134] => 2.uq[0x1134ffeeddccbbaa] 52 palignr imm8[3] mm.uq[0xFFEEDDCCBBAA9988] mm.uq[0x7766554433221134] => 2.uq[0x221134ffeeddccbb] 53 palignr imm8[4] mm.uq[0xFFEEDDCCBBAA9988] mm.uq[0x7766554433221134] => 2.uq[0x33221134ffeeddcc] 54 palignr imm8[5] mm.uq[0xFFEEDDCCBBAA9988] mm.uq[0x7766554433221134] => 2.uq[0x4433221134ffeedd] 55 palignr imm8[6] mm.uq[0xFFEEDDCCBBAA9988] mm.uq[0x7766554433221134] => 2.uq[0x554433221134ffee] 56 palignr imm8[7] mm.uq[0xFFEEDDCCBBAA9988] mm.uq[0x7766554433221134] => 2.uq[0x66554433221134ff] 57 palignr imm8[8] mm.uq[0xFFEEDDCCBBAA9988] mm.uq[0x7766554433221134] => 2.uq[0x7766554433221134] 58 palignr imm8[9] mm.uq[0xFFEEDDCCBBAA9988] mm.uq[0x7766554433221134] => 2.uq[0x77665544332211 [all...] |
/external/valgrind/none/tests/x86/ |
insn_ssse3.def | 49 palignr imm8[0] mm.uq[0xFFEEDDCCBBAA9988] mm.uq[0x7766554433221134] => 2.uq[0xffeeddccbbaa9988] 50 palignr imm8[1] mm.uq[0xFFEEDDCCBBAA9988] mm.uq[0x7766554433221134] => 2.uq[0x34ffeeddccbbaa99] 51 palignr imm8[2] mm.uq[0xFFEEDDCCBBAA9988] mm.uq[0x7766554433221134] => 2.uq[0x1134ffeeddccbbaa] 52 palignr imm8[3] mm.uq[0xFFEEDDCCBBAA9988] mm.uq[0x7766554433221134] => 2.uq[0x221134ffeeddccbb] 53 palignr imm8[4] mm.uq[0xFFEEDDCCBBAA9988] mm.uq[0x7766554433221134] => 2.uq[0x33221134ffeeddcc] 54 palignr imm8[5] mm.uq[0xFFEEDDCCBBAA9988] mm.uq[0x7766554433221134] => 2.uq[0x4433221134ffeedd] 55 palignr imm8[6] mm.uq[0xFFEEDDCCBBAA9988] mm.uq[0x7766554433221134] => 2.uq[0x554433221134ffee] 56 palignr imm8[7] mm.uq[0xFFEEDDCCBBAA9988] mm.uq[0x7766554433221134] => 2.uq[0x66554433221134ff] 57 palignr imm8[8] mm.uq[0xFFEEDDCCBBAA9988] mm.uq[0x7766554433221134] => 2.uq[0x7766554433221134] 58 palignr imm8[9] mm.uq[0xFFEEDDCCBBAA9988] mm.uq[0x7766554433221134] => 2.uq[0x77665544332211 [all...] |
/external/clang/test/CodeGen/ |
palignr.c | 7 // CHECK: palignr $15, %xmm1, %xmm0 11 // CHECK-NOT: palignr 21 // CHECK: palignr 24 // CHECK: palignr 27 // CHECK: palignr 30 // CHECK: palignr
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
intrapred_ssse3.asm | 241 palignr m5, m4, m0, 1 242 palignr m6, m4, m0, 2 259 palignr m3, m4, m5, 1 264 palignr m5, m4, m3, 1 269 palignr m3, m4, m5, 1 274 palignr m5, m4, m3, 1 423 palignr m6, m7, m0, 1 424 palignr m5, m7, m0, 2 436 palignr m3, m7, m0, 1 437 palignr m5, m4, m2, [all...] |
sad_ssse3.asm | 62 palignr xmm5, xmm4, %2 65 palignr xmm6, xmm4, (%2+1) 67 palignr xmm7, xmm4, (%2+2) 78 palignr xmm1, xmm4, %2 81 palignr xmm2, xmm4, (%2+1) 83 palignr xmm3, xmm4, (%2+2) 98 palignr xmm1, xmm4, %2 101 palignr xmm2, xmm4, (%2+1) 103 palignr xmm3, xmm4, (%2+2)
|
vpx_subpixel_8t_ssse3.asm | 75 palignr %2, %1, 1 76 palignr m3, %1, 5 151 palignr m4, m0, 1 153 palignr m1, m0, 5 156 palignr m7, m2, 1 158 palignr m3, m2, 5 234 palignr %2, %1, 1 235 palignr %3, %1, 5 236 palignr %4, %1, 9 237 palignr %5, %1, 1 [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/x86/ |
vp9_intrapred_ssse3.asm | 241 palignr m5, m4, m0, 1 242 palignr m6, m4, m0, 2 259 palignr m3, m4, m5, 1 264 palignr m5, m4, m3, 1 269 palignr m3, m4, m5, 1 274 palignr m5, m4, m3, 1 423 palignr m6, m7, m0, 1 424 palignr m5, m7, m0, 2 436 palignr m3, m7, m0, 1 437 palignr m5, m4, m2, [all...] |
/bionic/libc/arch-x86/atom/string/ |
ssse3-memcpy-atom.S | 452 palignr $1, %xmm4, %xmm5 453 palignr $1, %xmm3, %xmm4 455 palignr $1, %xmm2, %xmm3 457 palignr $1, %xmm1, %xmm2 472 palignr $1, %xmm2, %xmm3 473 palignr $1, %xmm1, %xmm2 495 palignr $1, %xmm2, %xmm3 496 palignr $1, %xmm1, %xmm2 506 palignr $1, %xmm2, %xmm3 507 palignr $1, %xmm4, %xmm [all...] |
ssse3-strcpy-atom.S | 433 palignr $1, %xmm1, %xmm2 450 palignr $1, %xmm1, %xmm2 467 palignr $1, %xmm1, %xmm2 482 palignr $1, %xmm3, %xmm2 510 palignr $1, %xmm4, %xmm5 511 palignr $1, %xmm3, %xmm4 518 palignr $1, %xmm2, %xmm3 520 palignr $1, %xmm1, %xmm2 552 palignr $2, %xmm1, %xmm2 569 palignr $2, %xmm1, %xmm [all...] |
ssse3-wcscpy-atom.S | 271 palignr $4, %xmm1, %xmm2 284 palignr $4, %xmm3, %xmm2 297 palignr $4, %xmm1, %xmm2 309 palignr $4, %xmm3, %xmm2 335 palignr $4, %xmm4, %xmm5 336 palignr $4, %xmm3, %xmm4 340 palignr $4, %xmm2, %xmm3 342 palignr $4, %xmm1, %xmm2 382 palignr $8, %xmm1, %xmm2 395 palignr $8, %xmm3, %xmm [all...] |
ssse3-memcmp-atom.S | 323 palignr $1,(%esi), %xmm1 327 palignr $1,%xmm2, %xmm3 348 palignr $1,(%esi), %xmm0 352 palignr $1,16(%esi), %xmm3 362 palignr $1,48(%esi), %xmm3 365 palignr $1,32(%esi), %xmm0 409 palignr $2,(%esi), %xmm1 413 palignr $2,%xmm2, %xmm3 434 palignr $2,(%esi), %xmm0 438 palignr $2,16(%esi), %xmm [all...] |
/external/libvpx/libvpx/third_party/libyuv/source/ |
rotate_win.cc | 47 palignr xmm1, xmm1, 8 53 palignr xmm3, xmm3, 8 59 palignr xmm5, xmm5, 8 64 palignr xmm7, xmm7, 8 70 palignr xmm2, xmm2, 8 71 palignr xmm3, xmm3, 8 76 palignr xmm6, xmm6, 8 77 palignr xmm7, xmm7, 8 83 palignr xmm4, xmm4, 8 88 palignr xmm6, xmm6, [all...] |
rotate_gcc.cc | 37 "palignr $0x8,%%xmm1,%%xmm1 \n" 43 "palignr $0x8,%%xmm3,%%xmm3 \n" 49 "palignr $0x8,%%xmm5,%%xmm5 \n" 56 "palignr $0x8,%%xmm7,%%xmm7 \n" 63 "palignr $0x8,%%xmm2,%%xmm2 \n" 64 "palignr $0x8,%%xmm3,%%xmm3 \n" 69 "palignr $0x8,%%xmm6,%%xmm6 \n" 70 "palignr $0x8,%%xmm7,%%xmm7 \n" 76 "palignr $0x8,%%xmm4,%%xmm4 \n" 82 "palignr $0x8,%%xmm6,%%xmm6 \n [all...] |
/toolchain/binutils/binutils-2.25/gas/testsuite/gas/i386/ |
ssse3.s | 53 palignr $0x2,(%ecx),%mm0 54 palignr $0x2,%mm1,%mm0 55 palignr $0x2,(%ecx),%xmm0 56 palignr $0x2,%xmm1,%xmm0
|
x86-64-ssse3.s | 53 palignr $0x2,(%rcx),%mm0 54 palignr $0x2,%mm1,%mm0 55 palignr $0x2,(%rcx),%xmm0 56 palignr $0x2,%xmm1,%xmm0
|
ssse3.d | 57 d8: 0f 3a 0f 01 02[ ]+palignr \$0x2,\(%ecx\),%mm0 58 dd: 0f 3a 0f c1 02[ ]+palignr \$0x2,%mm1,%mm0 59 e2: 66 0f 3a 0f 01 02[ ]+palignr \$0x2,\(%ecx\),%xmm0 60 e8: 66 0f 3a 0f c1 02[ ]+palignr \$0x2,%xmm1,%xmm0
|
x86-64-ssse3.d | 57 d8: 0f 3a 0f 01 02[ ]+palignr \$0x2,\(%rcx\),%mm0 58 dd: 0f 3a 0f c1 02[ ]+palignr \$0x2,%mm1,%mm0 59 e2: 66 0f 3a 0f 01 02[ ]+palignr \$0x2,\(%rcx\),%xmm0 60 e8: 66 0f 3a 0f c1 02[ ]+palignr \$0x2,%xmm1,%xmm0
|
sse-noavx.s | 27 palignr $0x2,%mm1,%mm0
|
x86-64-sse-noavx.s | 28 palignr $0x2,%mm1,%mm0
|
/art/runtime/arch/x86/ |
memcmp16_x86.S | 191 palignr $2,(%esi), %xmm1 195 palignr $2,%xmm2, %xmm3 216 palignr $2,(%esi), %xmm0 220 palignr $2,16(%esi), %xmm3 230 palignr $2,48(%esi), %xmm3 233 palignr $2,32(%esi), %xmm0 274 palignr $4,(%esi), %xmm1 278 palignr $4,%xmm2, %xmm3 299 palignr $4,(%esi), %xmm0 303 palignr $4,16(%esi), %xmm [all...] |
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/ |
sad_ssse3.asm | 62 palignr xmm5, xmm4, %2 65 palignr xmm6, xmm4, (%2+1) 67 palignr xmm7, xmm4, (%2+2) 78 palignr xmm1, xmm4, %2 81 palignr xmm2, xmm4, (%2+1) 83 palignr xmm3, xmm4, (%2+2) 98 palignr xmm1, xmm4, %2 101 palignr xmm2, xmm4, (%2+1) 103 palignr xmm3, xmm4, (%2+2)
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/x86/ |
vp9_sad_ssse3.asm | 62 palignr xmm5, xmm4, %2 65 palignr xmm6, xmm4, (%2+1) 67 palignr xmm7, xmm4, (%2+2) 78 palignr xmm1, xmm4, %2 81 palignr xmm2, xmm4, (%2+1) 83 palignr xmm3, xmm4, (%2+2) 98 palignr xmm1, xmm4, %2 101 palignr xmm2, xmm4, (%2+1) 103 palignr xmm3, xmm4, (%2+2)
|
/external/libyuv/files/source/ |
rotate.cc | 87 palignr xmm1, xmm1, 8 93 palignr xmm3, xmm3, 8 99 palignr xmm5, xmm5, 8 104 palignr xmm7, xmm7, 8 110 palignr xmm2, xmm2, 8 111 palignr xmm3, xmm3, 8 116 palignr xmm6, xmm6, 8 117 palignr xmm7, xmm7, 8 123 palignr xmm4, xmm4, 8 128 palignr xmm6, xmm6, [all...] |
/external/llvm/test/CodeGen/X86/ |
palignr.ll | 22 ; CHECK-NEXT: palignr {{.*#+}} xmm1 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3] 38 ; CHECK-NEXT: palignr {{.*#+}} xmm1 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3] 55 ; CHECK-NEXT: palignr {{.*#+}} xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7] 86 ; CHECK-NEXT: palignr {{.*#+}} xmm1 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5] 103 ; CHECK-NEXT: palignr {{.*#+}} xmm1 = xmm0[10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9] 120 ; CHECK-NEXT: palignr {{.*#+}} xmm1 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4] 134 ; Check that we don't do unary (circular on single operand) palignr incorrectly. 136 ; incorrectly. In particular, one of the operands of the palignr node 141 ; CHECK-NEXT: palignr {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
|
/toolchain/binutils/binutils-2.25/gas/testsuite/gas/i386/ilp32/ |
x86-64-ssse3.d | 58 d8: 0f 3a 0f 01 02[ ]+palignr \$0x2,\(%rcx\),%mm0 59 dd: 0f 3a 0f c1 02[ ]+palignr \$0x2,%mm1,%mm0 60 e2: 66 0f 3a 0f 01 02[ ]+palignr \$0x2,\(%rcx\),%xmm0 61 e8: 66 0f 3a 0f c1 02[ ]+palignr \$0x2,%xmm1,%xmm0
|