HomeSort by relevance Sort by last modified time
    Searched full:xmm2 (Results 226 - 250 of 733) sorted by null

1 2 3 4 5 6 7 8 91011>>

  /toolchain/binutils/binutils-2.25/gas/testsuite/gas/i386/
x86-64-avx2.s 57 vpsllvd %xmm4,%xmm6,%xmm2
59 vpsllvq %xmm4,%xmm6,%xmm2
61 vpsravd %xmm4,%xmm6,%xmm2
63 vpsrlvd %xmm4,%xmm6,%xmm2
65 vpsrlvq %xmm4,%xmm6,%xmm2
81 vpblendd $7,%xmm4,%xmm6,%xmm2
82 vpblendd $7,(%rcx),%xmm6,%xmm2
189 vpsllvd xmm2,xmm6,xmm4
192 vpsllvq xmm2,xmm6,xmm4
195 vpsravd xmm2,xmm6,xmm
    [all...]
katmai.d 10 3: 0f 58 ca [ ]*addps %xmm2,%xmm1
11 6: f3 0f 58 13 [ ]*addss \(%ebx\),%xmm2
19 23: f3 0f c2 d2 04 [ ]*cmpneqss %xmm2,%xmm2
27 48: f3 0f c2 d2 00 [ ]*cmpeqss %xmm2,%xmm2
34 69: 0f c2 ca 02 [ ]*cmpleps %xmm2,%xmm1
35 6d: f3 0f c2 13 02 [ ]*cmpless \(%ebx\),%xmm2
43 92: f3 0f c2 d2 04 [ ]*cmpneqss %xmm2,%xmm2
    [all...]
  /external/libvpx/libvpx/third_party/libyuv/source/
row_gcc.cc 279 "movdqa %%xmm3,%%xmm2 \n"
280 "palignr $0x8,%%xmm1,%%xmm2 \n"
281 "pshufb %%xmm4,%%xmm2 \n"
282 "por %%xmm5,%%xmm2 \n"
285 "movdqu %%xmm2," MEMACCESS2(0x20,1) " \n"
302 : "memory", "cc" , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
317 "movdqa %%xmm3,%%xmm2 \n"
318 "palignr $0x8,%%xmm1,%%xmm2 \n"
319 "pshufb %%xmm4,%%xmm2 \n"
320 "por %%xmm5,%%xmm2 \n
    [all...]
rotate_gcc.cc 35 "movq (%0),%%xmm2 \n"
40 "punpcklbw %%xmm3,%%xmm2 \n"
41 "movdqa %%xmm2,%%xmm3 \n"
59 "punpcklwd %%xmm2,%%xmm0 \n"
61 "movdqa %%xmm0,%%xmm2 \n"
63 "palignr $0x8,%%xmm2,%%xmm2 \n"
79 "punpckldq %%xmm6,%%xmm2 \n"
80 "movdqa %%xmm2,%%xmm6 \n"
81 "movq %%xmm2,(%1) \n
    [all...]
  /external/llvm/test/CodeGen/X86/
vec_int_to_fp.ll 166 ; SSE-NEXT: cvtsi2sdq %rax, %xmm2
171 ; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm0[0]
179 ; SSE-NEXT: movapd %xmm2, %xmm0
187 ; AVX1-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm2
190 ; AVX1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
192 ; AVX1-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm2
196 ; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
204 ; AVX2-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm2
207 ; AVX2-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0
    [all...]
avx512-build-vector.ll 17 ; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
18 ; CHECK-NEXT: vmovss %xmm0, %xmm2, %xmm0
19 ; CHECK-NEXT: vmovss %xmm1, %xmm2, %xmm1
vec_cmp_sint-128.ll 231 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
232 ; SSE2-NEXT: pxor %xmm2, %xmm0
233 ; SSE2-NEXT: pxor %xmm2, %xmm1
234 ; SSE2-NEXT: movdqa %xmm1, %xmm2
235 ; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
236 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
240 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
248 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
249 ; SSE41-NEXT: pxor %xmm2, %xmm0
250 ; SSE41-NEXT: pxor %xmm2, %xmm
    [all...]
fma_patterns.ll 14 ; FMA-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0
19 ; FMA4-NEXT: vfmaddss %xmm2, %xmm1, %xmm0, %xmm0
24 ; AVX512-NEXT: vfmadd213ss %xmm2, %xmm0, %xmm1
35 ; FMA-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0
40 ; FMA4-NEXT: vfmaddps %xmm2, %xmm1, %xmm0, %xmm0
45 ; AVX512-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0
75 ; FMA-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0
80 ; FMA4-NEXT: vfmaddsd %xmm2, %xmm1, %xmm0, %xmm0
85 ; AVX512-NEXT: vfmadd213sd %xmm2, %xmm0, %xmm1
96 ; FMA-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm
    [all...]
  /external/compiler-rt/lib/builtins/i386/
ashrdi3.S 14 movd 12(%esp), %xmm2 // Load count
24 psrlq %xmm2, %xmm0 // unsigned shift input by count
33 pandn %xmm1, %xmm2 // 63 - count
35 psubq %xmm1, %xmm2 // 64 - count
36 psllq %xmm2, %xmm1 // -1 << (64 - count) = leading sign bits
  /external/libyuv/files/source/
row_gcc.cc 228 "movdqa %%xmm3,%%xmm2 \n"
229 "palignr $0x8,%%xmm1,%%xmm2 \n"
230 "pshufb %%xmm4,%%xmm2 \n"
231 "por %%xmm5,%%xmm2 \n"
234 "movdqu %%xmm2," MEMACCESS2(0x20,1) " \n"
251 : "memory", "cc" , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
266 "movdqa %%xmm3,%%xmm2 \n"
267 "palignr $0x8,%%xmm1,%%xmm2 \n"
268 "pshufb %%xmm4,%%xmm2 \n"
269 "por %%xmm5,%%xmm2 \n
    [all...]
scale_gcc.cc 164 MEMOPREG(movdqu,0x00,0,3,1,xmm2) // movdqu (%0,%3,1),%%xmm2
169 "pmaddubsw %%xmm4,%%xmm2 \n"
171 "paddw %%xmm2,%%xmm0 \n"
187 "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
284 "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
332 MEMOPREG(movdqu,0x00,0,4,1,xmm2) // movdqu (%0,%4,1),%%xmm2
336 "pmaddubsw %%xmm4,%%xmm2 \n"
338 "paddw %%xmm2,%%xmm0 \n
    [all...]
  /external/valgrind/VEX/test/
fxsave.c 39 asm __volatile__("movups vecZ, %xmm2");
63 asm __volatile__("xorps %xmm2, %xmm2");
64 asm __volatile__("movaps %xmm2, %xmm3");
65 asm __volatile__("movaps %xmm2, %xmm4");
66 asm __volatile__("movaps %xmm2, %xmm5");
67 asm __volatile__("movaps %xmm2, %xmm6");
  /external/valgrind/memcheck/tests/x86/
fxsave.c 40 asm __volatile__("movups " VG_SYM(vecZ) ", %xmm2");
64 asm __volatile__("xorps %xmm2, %xmm2");
65 asm __volatile__("movaps %xmm2, %xmm3");
66 asm __volatile__("movaps %xmm2, %xmm4");
67 asm __volatile__("movaps %xmm2, %xmm5");
68 asm __volatile__("movaps %xmm2, %xmm6");
  /bionic/libc/arch-x86/silvermont/string/
sse2-strlen-slm.S 116 pxor %xmm2, %xmm2
128 pcmpeqb 48(%eax), %xmm2
129 pmovmskb %xmm2, %ecx
149 pcmpeqb 48(%eax), %xmm2
150 pmovmskb %xmm2, %ecx
170 pcmpeqb 48(%eax), %xmm2
171 pmovmskb %xmm2, %ecx
191 pcmpeqb 48(%eax), %xmm2
192 pmovmskb %xmm2, %ec
    [all...]
  /bionic/libc/arch-x86_64/string/
sse2-strlen-slm.S 95 pxor %xmm2, %xmm2
107 pcmpeqb 48(%rax), %xmm2
108 pmovmskb %xmm2, %edx
128 pcmpeqb 48(%rax), %xmm2
129 pmovmskb %xmm2, %edx
149 pcmpeqb 48(%rax), %xmm2
150 pmovmskb %xmm2, %edx
170 pcmpeqb 48(%rax), %xmm2
171 pmovmskb %xmm2, %ed
    [all...]
  /external/libvpx/libvpx/vpx_dsp/x86/
sad_ssse3.asm 27 lddqu xmm2, XMMWORD PTR [rdi+1]
31 psadbw xmm2, xmm0
35 paddw xmm6, xmm2
40 lddqu xmm2, XMMWORD PTR [rdi+rdx+1]
47 psadbw xmm2, xmm0
51 paddw xmm6, xmm2
80 movdqa xmm2, xmm3
81 palignr xmm2, xmm4, (%2+1)
86 psadbw xmm2, xmm0
90 paddw xmm6, xmm2
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/
sad_ssse3.asm 27 lddqu xmm2, XMMWORD PTR [rdi+1]
31 psadbw xmm2, xmm0
35 paddw xmm6, xmm2
40 lddqu xmm2, XMMWORD PTR [rdi+rdx+1]
47 psadbw xmm2, xmm0
51 paddw xmm6, xmm2
80 movdqa xmm2, xmm3
81 palignr xmm2, xmm4, (%2+1)
86 psadbw xmm2, xmm0
90 paddw xmm6, xmm2
    [all...]
subpixel_sse2.asm 184 movq xmm2, MMWORD PTR [rsi +14]
185 pslldq xmm2, 8
187 por xmm2, xmm1
244 movdqa xmm3, xmm2
245 movdqa xmm4, xmm2
247 movdqa xmm5, xmm2
248 movdqa xmm6, xmm2
250 movdqa xmm7, xmm2
273 psrldq xmm2, 5 ; xx xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03
277 punpcklbw xmm2, xmm0 ; xx0a xx09 xx08 xx07 xx06 xx05 xx04 xx0
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/x86/
vp9_sad_ssse3.asm 27 lddqu xmm2, XMMWORD PTR [rdi+1]
31 psadbw xmm2, xmm0
35 paddw xmm6, xmm2
40 lddqu xmm2, XMMWORD PTR [rdi+rdx+1]
47 psadbw xmm2, xmm0
51 paddw xmm6, xmm2
80 movdqa xmm2, xmm3
81 palignr xmm2, xmm4, (%2+1)
86 psadbw xmm2, xmm0
90 paddw xmm6, xmm2
    [all...]
  /external/libyuv/files/util/
psnr.cc 127 movdqu xmm2, [eax + edx] local
130 psubusb xmm1, xmm2
131 psubusb xmm2, xmm3 local
132 por xmm1, xmm2
133 movdqu xmm2, xmm1 local
135 punpckhbw xmm2, xmm5 local
137 pmaddwd xmm2, xmm2 local
139 paddd xmm0, xmm2
163 "movdqu (%0,%1,1),%%xmm2 \n
    [all...]
  /external/v8/test/cctest/
test-disasm-x64.cc 499 __ vmovss(xmm6, xmm14, xmm2);
503 __ vaddss(xmm0, xmm1, xmm2);
505 __ vmulss(xmm0, xmm1, xmm2);
507 __ vsubss(xmm0, xmm1, xmm2);
509 __ vdivss(xmm0, xmm1, xmm2);
511 __ vminss(xmm8, xmm1, xmm2);
513 __ vmaxss(xmm8, xmm1, xmm2);
527 __ vmovsd(xmm6, xmm14, xmm2);
531 __ vaddsd(xmm0, xmm1, xmm2);
533 __ vmulsd(xmm0, xmm1, xmm2);
    [all...]
test-assembler-ia32.cc 575 __ movaps(xmm2, xmm1);
576 __ addps(xmm2, xmm0);
577 __ mulps(xmm2, xmm1);
578 __ subps(xmm2, xmm0);
579 __ divps(xmm2, xmm1);
580 __ cvttss2si(eax, xmm2);
613 __ movsd(xmm2, Operand(esp, 5 * kPointerSize));
614 // argument in xmm0, xmm1 and xmm2
615 // xmm0 * xmm1 + xmm2
618 __ addsd(xmm3, xmm2); // Expected result in xmm
650 __ movsd(Operand(esp, 0), xmm2); local
696 __ movsd(Operand(esp, 0), xmm2); local
745 __ movsd(Operand(esp, 0), xmm2); local
794 __ movsd(Operand(esp, 0), xmm2); local
879 __ movss(Operand(esp, 0), xmm2); local
925 __ movss(Operand(esp, 0), xmm2); local
974 __ movss(Operand(esp, 0), xmm2); local
1023 __ movss(Operand(esp, 0), xmm2); local
    [all...]
  /external/libvpx/libvpx/vp8/common/x86/
subpixel_sse2.asm 184 movq xmm2, MMWORD PTR [rsi +14]
185 pslldq xmm2, 8
187 por xmm2, xmm1
244 movdqa xmm3, xmm2
245 movdqa xmm4, xmm2
247 movdqa xmm5, xmm2
248 movdqa xmm6, xmm2
250 movdqa xmm7, xmm2
273 psrldq xmm2, 5 ; xx xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03
277 punpcklbw xmm2, xmm0 ; xx0a xx09 xx08 xx07 xx06 xx05 xx04 xx0
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/third_party/libyuv/source/
scale.c 683 movdqa xmm2, [eax + esi] local
686 pavgb xmm0, xmm2 // average rows
689 movdqa xmm2, xmm0 // average columns (32 to 16 pixels) local
693 pand xmm2, xmm5 local
695 pavgw xmm0, xmm2
760 movdqa xmm2, [esi + ebx] local
762 pavgb xmm0, xmm2 // average rows
764 movdqa xmm2, [esi + ebx * 2] local
769 pavgb xmm2, xmm4 local
771 pavgb xmm0, xmm2
774 movdqa xmm2, xmm0 \/\/ average columns (32 to 16 pixels) local
778 pand xmm2, xmm7 local
784 movdqa xmm2, xmm0 \/\/ average columns (16 to 8 pixels) local
786 pand xmm2, xmm7 local
851 movdqa xmm2, [esi + ebx] local
855 movdqa xmm2, [esi + ebx * 2] local
861 pavgb xmm2, xmm4 local
866 movdqa xmm2, [ebp] local
870 pavgb xmm2, xmm4 local
878 pavgb xmm2, xmm4 local
926 movdqa xmm2, xmm1 local
930 pshufb xmm2, xmm5 local
968 movdqa xmm2, _shuf01 local
1025 movdqa xmm2, _shuf01 local
1127 movdqa xmm2, [esi + edx] local
1132 punpcklbw xmm2, xmm7 local
1136 movdqa xmm2, [esi + edx * 2] local
1139 punpcklbw xmm2, xmm7 local
1144 movdqa xmm2, xmm0 \/\/ 8 pixels -> 0,1,2 of xmm2 local
1146 paddusw xmm2, xmm0 local
1148 paddusw xmm2, xmm0 local
1149 pshufb xmm2, xmm4 local
1157 paddusw xmm2, xmm3 local
1159 pmulhuw xmm2, xmm6 \/\/ divide by 9,9,6, 9,9,6 local
1160 packuswb xmm2, xmm2 local
1163 pextrw eax, xmm2, 2 local
1190 movdqa xmm2, [esi] \/\/ average 2 rows into xmm2 local
1191 pavgb xmm2, [esi + edx] local
1199 pshufb xmm2, xmm6 local
1236 movdqa xmm2, [esi] local
1240 punpcklbw xmm2, xmm5 local
1250 paddusw xmm2, xmm0 \/\/ sum 16 words local
1299 movdqa xmm2, [esi + edx] local
1304 punpcklbw xmm2, xmm7 local
1309 pmullw xmm2, xmm6 \/\/ scale row 1 local
1343 movdqa xmm2, [esi + edx] local
1388 movdqa xmm2, [esi + edx] local
1425 movdqa xmm2, [esi + edx] local
1452 movdqa xmm2, _shuf01 local
    [all...]
  /external/gemmlowp/internal/
pack_SSE.h 102 __m128i xmm2 = _mm_loadl_epi64( local
109 __m128i xmm5 = _mm_unpacklo_epi16(xmm1, xmm2);
139 xmm2 = _mm_madd_epi16(xmm1, one);
142 sums_of_each_slice_xmm = _mm_add_epi32(sums_of_each_slice_xmm, xmm2);
145 xmm2 = _mm_madd_epi16(xmm1, one);
146 sums_of_each_slice_xmm = _mm_add_epi32(sums_of_each_slice_xmm, xmm2);
149 xmm2 = _mm_madd_epi16(xmm1, one);
150 sums_of_each_slice_xmm = _mm_add_epi32(sums_of_each_slice_xmm, xmm2);
153 xmm2 = _mm_madd_epi16(xmm1, one);
154 sums_of_each_slice_xmm = _mm_add_epi32(sums_of_each_slice_xmm, xmm2);
    [all...]

Completed in 3023 milliseconds

1 2 3 4 5 6 7 8 91011>>