HomeSort by relevance Sort by last modified time
    Searched full:xmm2 (Results 201 - 225 of 733) sorted by null

1 2 3 4 5 6 7 891011>>

  /external/libvpx/libvpx/vp8/common/x86/
recon_sse2.asm 36 movdqu xmm2, [rsi+rax*2]
45 movdqa [rdi+rcx*2],xmm2
68 movdqu xmm2, [rsi+rax*2]
76 movdqa [rdi+rcx*2], xmm2
98 movdqu xmm2, [rsi+rax*2]
104 movdqa [rdi+rcx*2],xmm2
  /toolchain/binutils/binutils-2.25/gas/testsuite/gas/i386/ilp32/
x86-64-sse3.d 11 4: 66 0f d0 ca [ ]*addsubpd %xmm2,%xmm1
12 8: f2 0f d0 13 [ ]*addsubps \(%rbx\),%xmm2
23 3b: f2 0f 7d d2 [ ]*hsubps %xmm2,%xmm2
31 5a: f3 0f 16 ca [ ]*movshdup %xmm2,%xmm1
32 5e: f3 0f 12 13 [ ]*movsldup \(%rbx\),%xmm2
  /toolchain/binutils/binutils-2.25/gas/testsuite/gas/i386/
sse3.d 10 4: 66 0f d0 ca [ ]*addsubpd %xmm2,%xmm1
11 8: f2 0f d0 13 [ ]*addsubps \(%ebx\),%xmm2
22 3b: f2 0f 7d d2 [ ]*hsubps %xmm2,%xmm2
30 5a: f3 0f 16 ca [ ]*movshdup %xmm2,%xmm1
31 5e: f3 0f 12 13 [ ]*movsldup \(%ebx\),%xmm2
x86-64-sse3.d 10 4: 66 0f d0 ca [ ]*addsubpd %xmm2,%xmm1
11 8: f2 0f d0 13 [ ]*addsubps \(%rbx\),%xmm2
22 3b: f2 0f 7d d2 [ ]*hsubps %xmm2,%xmm2
30 5a: f3 0f 16 ca [ ]*movshdup %xmm2,%xmm1
31 5e: f3 0f 12 13 [ ]*movsldup \(%rbx\),%xmm2
avx2.s 57 vpsllvd %xmm4,%xmm6,%xmm2
59 vpsllvq %xmm4,%xmm6,%xmm2
61 vpsravd %xmm4,%xmm6,%xmm2
63 vpsrlvd %xmm4,%xmm6,%xmm2
65 vpsrlvq %xmm4,%xmm6,%xmm2
81 vpblendd $7,%xmm4,%xmm6,%xmm2
82 vpblendd $7,(%ecx),%xmm6,%xmm2
189 vpsllvd xmm2,xmm6,xmm4
192 vpsllvq xmm2,xmm6,xmm4
195 vpsravd xmm2,xmm6,xmm
    [all...]
  /external/libyuv/files/source/
row_win.cc 57 xmm2 = _mm_loadu_si128(&xmm0); \
60 xmm2 = _mm_maddubs_epi16(xmm2, *(__m128i*)yuvconstants->kUVToR); \
63 xmm2 = _mm_sub_epi16(*(__m128i*)yuvconstants->kUVBiasR, xmm2); \
67 xmm2 = _mm_adds_epi16(xmm2, xmm4); \
70 xmm2 = _mm_srai_epi16(xmm2, 6); \
73 xmm2 = _mm_packus_epi16(xmm2, xmm2)
94 __m128i xmm0, xmm1, xmm2, xmm4; local
114 __m128i xmm0, xmm1, xmm2, xmm4, xmm5; local
    [all...]
scale_win.cc 174 movdqu xmm2, [eax + esi] local
179 pmaddubsw xmm2, xmm4 local
181 paddw xmm0, xmm2 // vertical add
362 movdqu xmm2, [eax + esi] local
366 pmaddubsw xmm2, xmm4 local
368 paddw xmm0, xmm2 // vertical add rows 0, 1
370 movdqu xmm2, [eax + esi * 2] local
372 pmaddubsw xmm2, xmm4 local
374 paddw xmm0, xmm2 // add row 2
376 movdqu xmm2, [eax + edi local
379 pmaddubsw xmm2, xmm4 local
513 movdqa xmm2, xmm1 local
517 pshufb xmm2, xmm5 local
554 movdqa xmm2, xmmword ptr kShuf01 local
611 movdqa xmm2, xmmword ptr kShuf01 local
704 movdqa xmm2, xmmword ptr kShufAc local
769 movdqa xmm2, xmmword ptr kShufAb0 local
818 movdqa xmm2, xmm3 local
819 punpcklbw xmm2, xmm5 local
884 movd xmm2, [esp + 12 + 16] \/\/ x local
892 pextrw eax, xmm2, 1 \/\/ get x0 integer. preroll local
898 punpckldq xmm2, xmm0 \/\/ x0 x1 local
901 pextrw edx, xmm2, 3 \/\/ get x1 integer. preroll local
906 paddd xmm2, xmm3 \/\/ x += dx local
918 pextrw eax, xmm2, 1 \/\/ get x0 integer. next iteration. local
919 pextrw edx, xmm2, 3 \/\/ get x1 integer. next iteration. local
936 psrlw xmm2, 9 \/\/ 7 bit fractions. local
937 pshufb xmm2, xmm5 \/\/ 0011 local
939 pxor xmm2, xmm6 \/\/ 0..7f and 7f..0 local
940 paddusb xmm2, xmm7 \/\/ +1 so 0..7f and 80..1 local
941 pmaddubsw xmm2, xmm0 \/\/ 16 bit local
942 paddw xmm2, xmmword ptr kFadd40 \/\/ make pixels unsigned and round. local
943 psrlw xmm2, 7 \/\/ 8.7 fixed point to low 8 bits. local
944 packuswb xmm2, xmm2 \/\/ 8 bits local
1022 movdqa xmm2, xmm0 local
1024 shufps xmm2, xmm1, 0xdd \/\/ odd pixels local
1050 movdqu xmm2, [eax + esi] local
1055 movdqa xmm2, xmm0 \/\/ average columns (8 to 4 pixels) local
1057 shufps xmm2, xmm1, 0xdd \/\/ odd pixels local
1089 movd xmm2, [eax + ebx * 2] local
1092 punpckldq xmm2, xmm3 local
1130 movq xmm2, qword ptr [esi] \/\/ row1 4 pairs local
1131 movhps xmm2, qword ptr [esi + ebx] local
1137 movdqa xmm2, xmm0 \/\/ average columns (8 to 4 pixels) local
1139 shufps xmm2, xmm1, 0xdd \/\/ odd pixels local
1163 movd xmm2, [esp + 8 + 16] \/\/ x local
1166 pshufd xmm2, xmm2, 0 \/\/ x0 x0 x0 x0 local
1168 paddd xmm2, xmm0 local
1171 paddd xmm2, xmm0 \/\/ x3 x2 x1 x0 local
1175 pextrw eax, xmm2, 1 \/\/ get x0 integer. local
1176 pextrw edx, xmm2, 3 \/\/ get x1 integer. local
1187 pextrw eax, xmm2, 5 \/\/ get x2 integer. local
1188 pextrw edx, xmm2, 7 \/\/ get x3 integer. local
1189 paddd xmm2, xmm3 \/\/ x += dx local
1194 pextrw eax, xmm2, 1 \/\/ get x0 integer. next iteration. local
1195 pextrw edx, xmm2, 3 \/\/ get x1 integer. next iteration. local
1210 pextrw eax, xmm2, 5 \/\/ get x2 integer. local
1254 movd xmm2, [esp + 8 + 16] \/\/ x local
1260 pextrw eax, xmm2, 1 \/\/ get x0 integer. preroll local
1266 punpckldq xmm2, xmm0 \/\/ x0 x1 local
1269 pextrw edx, xmm2, 3 \/\/ get x1 integer. preroll local
1274 paddd xmm2, xmm3 \/\/ x += dx local
1282 pextrw eax, xmm2, 1 \/\/ get x0 integer. next iteration. local
1283 pextrw edx, xmm2, 3 \/\/ get x1 integer. next iteration. local
1299 pshufb xmm2, xmm5 \/\/ 00000000 local
1301 pxor xmm2, xmm6 \/\/ 0..7f and 7f..0 local
    [all...]
  /external/llvm/test/CodeGen/X86/
vec_uint_to_fp-fastmath.ll 54 ; AVX2-NEXT: vbroadcastss [[FPMASKCSTADDR]](%rip), %xmm2
55 ; AVX2-NEXT: vmulps %xmm2, %xmm1, %xmm1
56 ; AVX2-NEXT: vpbroadcastd [[MASKCSTADDR]](%rip), %xmm2
57 ; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
86 ; SSE-NEXT: movdqa %xmm0, %xmm2
87 ; SSE-NEXT: psrld $16, %xmm2
88 ; SSE-NEXT: cvtdq2ps %xmm2, %xmm2
90 ; SSE-NEXT: mulps %xmm3, %xmm2
94 ; SSE-NEXT: addps %xmm2, %xmm
    [all...]
vector-rotate-256.ll 14 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [64,64]
15 ; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm3
17 ; AVX1-NEXT: vpsubq %xmm4, %xmm2, %xmm2
28 ; AVX1-NEXT: vpsrlq %xmm2, %xmm5, %xmm4
29 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
30 ; AVX1-NEXT: vpsrlq %xmm2, %xmm5, %xmm2
31 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7
    [all...]
vec_fp_to_int.ll 96 ; SSE-NEXT: movd %rax, %xmm2
100 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
107 ; SSE-NEXT: movdqa %xmm2, %xmm0
115 ; AVX-NEXT: vmovq %rax, %xmm2
119 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
121 ; AVX-NEXT: vmovq %rax, %xmm2
125 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
136 ; SSE-NEXT: movd %rax, %xmm2
140 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0
    [all...]
vec_minmax_sint.ll 17 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
19 ; SSE2-NEXT: pxor %xmm2, %xmm3
20 ; SSE2-NEXT: pxor %xmm0, %xmm2
21 ; SSE2-NEXT: movdqa %xmm2, %xmm4
24 ; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
25 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
26 ; SSE2-NEXT: pand %xmm5, %xmm2
28 ; SSE2-NEXT: por %xmm2, %xmm3
36 ; SSE41-NEXT: movdqa %xmm0, %xmm2
    [all...]
avx-cvt-2.ll 15 ; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
16 ; CHECK-NEXT: vpshufb %xmm2, %xmm1, %xmm1
17 ; CHECK-NEXT: vpshufb %xmm2, %xmm0, %xmm0
32 ; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
33 ; CHECK-NEXT: vpshufb %xmm2, %xmm1, %xmm1
34 ; CHECK-NEXT: vpshufb %xmm2, %xmm0, %xmm0
49 ; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
50 ; CHECK-NEXT: vpshufb %xmm2, %xmm1, %xmm1
51 ; CHECK-NEXT: vpshufb %xmm2, %xmm0, %xmm0
67 ; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15
    [all...]
  /external/libvpx/libvpx/third_party/libyuv/source/
scale_win.cc 140 movdqa xmm2, xmm0 // average columns (32 to 16 pixels) local
144 pand xmm2, xmm5 local
146 pavgw xmm0, xmm2
175 movdqu xmm2, [eax + esi] local
178 pavgb xmm0, xmm2 // average rows
181 movdqa xmm2, xmm0 // average columns (32 to 16 pixels) local
185 pand xmm2, xmm5 local
187 pavgw xmm0, xmm2
358 movdqu xmm2, [eax + esi] local
360 pavgb xmm0, xmm2
362 movdqu xmm2, [eax + esi * 2] local
367 pavgb xmm2, xmm4 local
372 movdqa xmm2, xmm0 \/\/ average columns (32 to 16 pixels) local
376 pand xmm2, xmm7 local
382 movdqa xmm2, xmm0 \/\/ average columns (16 to 8 pixels) local
384 pand xmm2, xmm7 local
510 movdqa xmm2, xmm1 local
514 pshufb xmm2, xmm5 local
551 movdqa xmm2, kShuf01 local
608 movdqa xmm2, kShuf01 local
701 movdqa xmm2, kShufAc local
766 movdqa xmm2, kShufAb0 local
815 movdqa xmm2, xmm3 local
816 punpcklbw xmm2, xmm5 local
871 movd xmm2, [esp + 12 + 16] \/\/ x local
877 pextrw eax, xmm2, 1 \/\/ get x0 integer. preroll local
883 punpckldq xmm2, xmm0 \/\/ x0 x1 local
886 pextrw edx, xmm2, 3 \/\/ get x1 integer. preroll local
891 paddd xmm2, xmm3 \/\/ x += dx local
901 pextrw eax, xmm2, 1 \/\/ get x0 integer. next iteration. local
902 pextrw edx, xmm2, 3 \/\/ get x1 integer. next iteration. local
919 psrlw xmm2, 9 \/\/ 7 bit fractions. local
920 pshufb xmm2, xmm5 \/\/ 0011 local
921 pxor xmm2, xmm6 \/\/ 0..7f and 7f..0 local
1002 movdqa xmm2, xmm0 local
1004 shufps xmm2, xmm1, 0xdd \/\/ odd pixels local
1030 movdqu xmm2, [eax + esi] local
1035 movdqa xmm2, xmm0 \/\/ average columns (8 to 4 pixels) local
1037 shufps xmm2, xmm1, 0xdd \/\/ odd pixels local
1069 movd xmm2, [eax + ebx * 2] local
1072 punpckldq xmm2, xmm3 local
1110 movq xmm2, qword ptr [esi] \/\/ row1 4 pairs local
1111 movhps xmm2, qword ptr [esi + ebx] local
1117 movdqa xmm2, xmm0 \/\/ average columns (8 to 4 pixels) local
1119 shufps xmm2, xmm1, 0xdd \/\/ odd pixels local
1143 movd xmm2, [esp + 8 + 16] \/\/ x local
1146 pshufd xmm2, xmm2, 0 \/\/ x0 x0 x0 x0 local
1148 paddd xmm2, xmm0 local
1151 paddd xmm2, xmm0 \/\/ x3 x2 x1 x0 local
1155 pextrw eax, xmm2, 1 \/\/ get x0 integer. local
1156 pextrw edx, xmm2, 3 \/\/ get x1 integer. local
1167 pextrw eax, xmm2, 5 \/\/ get x2 integer. local
1168 pextrw edx, xmm2, 7 \/\/ get x3 integer. local
1169 paddd xmm2, xmm3 \/\/ x += dx local
1174 pextrw eax, xmm2, 1 \/\/ get x0 integer. next iteration. local
1175 pextrw edx, xmm2, 3 \/\/ get x1 integer. next iteration. local
1190 pextrw eax, xmm2, 5 \/\/ get x2 integer. local
1234 movd xmm2, [esp + 8 + 16] \/\/ x local
1240 pextrw eax, xmm2, 1 \/\/ get x0 integer. preroll local
1246 punpckldq xmm2, xmm0 \/\/ x0 x1 local
1249 pextrw edx, xmm2, 3 \/\/ get x1 integer. preroll local
1254 paddd xmm2, xmm3 \/\/ x += dx local
1262 pextrw eax, xmm2, 1 \/\/ get x0 integer. next iteration. local
1263 pextrw edx, xmm2, 3 \/\/ get x1 integer. next iteration. local
1279 pshufb xmm2, xmm5 \/\/ 00000000 local
1281 pxor xmm2, xmm6 \/\/ 0..7f and 7f..0 local
    [all...]
  /external/libvpx/libvpx/vp9/common/x86/
vp9_postproc_sse2.asm 48 movd xmm2, dword ptr arg(6) ;flimit
49 punpcklwd xmm2, xmm2
50 punpckldq xmm2, xmm2
51 punpcklqdq xmm2, xmm2
78 pcmpgtw xmm7, xmm2
89 pcmpgtw xmm6, xmm2
103 pcmpgtw xmm6, xmm2
    [all...]
  /external/llvm/test/MC/Disassembler/X86/
marked-up.txt 5 # CHECK: xorps <reg:%xmm1>, <reg:%xmm2>
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/x86/
vp9_postproc_sse2.asm 48 movd xmm2, dword ptr arg(6) ;flimit
49 punpcklwd xmm2, xmm2
50 punpckldq xmm2, xmm2
51 punpcklqdq xmm2, xmm2
78 pcmpgtw xmm7, xmm2
89 pcmpgtw xmm6, xmm2
103 pcmpgtw xmm6, xmm2
    [all...]
  /external/compiler-rt/lib/builtins/i386/
floatdidf.S 30 movsd REL_ADDR(twop52), %xmm2 // 0x1.0p52
31 subsd %xmm2, %xmm1 // a_hi - 0x1p52 (no rounding occurs)
32 orpd %xmm2, %xmm0 // 0x1p52 + a_lo (no rounding occurs)
  /external/libvpx/libvpx/vp8/encoder/x86/
temporal_filter_apply_sse2.asm 94 movdqa xmm2, [rdx] ; predictor (frame2)
95 movdqa xmm3, xmm2
96 punpcklbw xmm2, xmm7 ; pred[ 0- 7]
100 psubw xmm0, xmm2 ; src - pred[ 0- 7]
122 movdqa xmm2, [GLOBAL(_const_16w)]
124 psubusw xmm2, xmm0
127 pmullw xmm2, [rsp + filter_weight]
134 paddw xmm4, xmm2
150 pmullw xmm0, xmm2
154 movdqa xmm2, xmm
    [all...]
  /external/libvpx/libvpx/vp9/encoder/x86/
vp9_temporal_filter_apply_sse2.asm 99 movdqa xmm2, [rdx] ; predictor (frame2)
100 movdqa xmm3, xmm2
101 punpcklbw xmm2, xmm7 ; pred[ 0- 7]
105 psubw xmm0, xmm2 ; src - pred[ 0- 7]
127 movdqa xmm2, [GLOBAL(_const_16w)]
129 psubusw xmm2, xmm0
132 pmullw xmm2, [rsp + filter_weight]
139 paddw xmm4, xmm2
155 pmullw xmm0, xmm2
159 movdqa xmm2, xmm
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/x86/
temporal_filter_apply_sse2.asm 94 movdqa xmm2, [rdx] ; predictor (frame2)
95 movdqa xmm3, xmm2
96 punpcklbw xmm2, xmm7 ; pred[ 0- 7]
100 psubw xmm0, xmm2 ; src - pred[ 0- 7]
122 movdqa xmm2, [GLOBAL(_const_16w)]
124 psubusw xmm2, xmm0
127 pmullw xmm2, [rsp + filter_weight]
134 paddw xmm4, xmm2
150 pmullw xmm0, xmm2
154 movdqa xmm2, xmm
    [all...]
  /hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/x86/
vp9_temporal_filter_apply_sse2.asm 94 movdqa xmm2, [rdx] ; predictor (frame2)
95 movdqa xmm3, xmm2
96 punpcklbw xmm2, xmm7 ; pred[ 0- 7]
100 psubw xmm0, xmm2 ; src - pred[ 0- 7]
122 movdqa xmm2, [GLOBAL(_const_16w)]
124 psubusw xmm2, xmm0
127 pmullw xmm2, [rsp + filter_weight]
134 paddw xmm4, xmm2
150 pmullw xmm0, xmm2
154 movdqa xmm2, xmm
    [all...]
  /external/boringssl/linux-x86/crypto/aes/
vpaes-x86.S 77 movdqa (%ebp),%xmm2
83 pxor %xmm5,%xmm2
88 pxor %xmm2,%xmm0
101 movdqa 80(%ebp),%xmm2
105 pxor %xmm5,%xmm2
108 pxor %xmm2,%xmm0
129 movdqa %xmm7,%xmm2
133 pxor %xmm0,%xmm2
155 movdqa -64(%ebx),%xmm2
167 pxor %xmm5,%xmm2
    [all...]
  /external/boringssl/mac-x86/crypto/aes/
vpaes-x86.S 74 movdqa (%ebp),%xmm2
80 pxor %xmm5,%xmm2
85 pxor %xmm2,%xmm0
98 movdqa 80(%ebp),%xmm2
102 pxor %xmm5,%xmm2
105 pxor %xmm2,%xmm0
126 movdqa %xmm7,%xmm2
130 pxor %xmm0,%xmm2
150 movdqa -64(%ebx),%xmm2
162 pxor %xmm5,%xmm2
    [all...]
  /external/boringssl/win-x86/crypto/aes/
vpaes-x86.asm 85 movdqa xmm2,[ebp]
91 pxor xmm2,xmm5
96 pxor xmm0,xmm2
109 movdqa xmm2,[80+ebp]
113 pxor xmm2,xmm5
116 pxor xmm0,xmm2
137 movdqa xmm2,xmm7
141 pxor xmm2,xmm0
160 movdqa xmm2,[ebx-64]
172 pxor xmm2,xmm
    [all...]
  /bionic/libc/arch-x86/atom/string/
sse2-wcslen-atom.S 100 pxor %xmm2, %xmm2
105 pcmpeqd (%eax), %xmm2
106 pmovmskb %xmm2, %edx
130 pcmpeqd (%eax), %xmm2
131 pmovmskb %xmm2, %edx
154 pcmpeqd (%eax), %xmm2
155 pmovmskb %xmm2, %edx
178 pcmpeqd (%eax), %xmm2
179 pmovmskb %xmm2, %ed
    [all...]

Completed in 579 milliseconds

1 2 3 4 5 6 7 891011>>