HomeSort by relevance Sort by last modified time
    Searched full:ymm0 (Results 1 - 25 of 55) sorted by null

1 2 3

  /external/llvm/test/MC/X86/
x86_64-fma4-encoding.s 56 // CHECK: vfmaddps (%rcx), %ymm1, %ymm0, %ymm0
58 vfmaddps (%rcx), %ymm1, %ymm0, %ymm0
60 // CHECK: vfmaddps %ymm1, (%rcx), %ymm0, %ymm0
62 vfmaddps %ymm1, (%rcx),%ymm0, %ymm0
64 // CHECK: vfmaddps %ymm2, %ymm1, %ymm0, %ymm0
    [all...]
shuffle-comments.s 33 vpalignr $8, %ymm0, %ymm1, %ymm2
34 # CHECK: ymm2 = ymm0[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm0[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
38 vpalignr $16, %ymm0, %ymm1, %ymm2
43 vpalignr $0, %ymm0, %ymm1, %ymm2
44 # CHECK: ymm2 = ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
58 vpshufd $27, %ymm0, %ymm1
59 # CHECK: ymm1 = ymm0[3,2,1,0,7,6,5,4]
73 vpunpcklbw %ymm0, %ymm1, %ymm2
74 # CHECK: ymm2 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6] (…)
    [all...]
  /external/llvm/test/CodeGen/X86/
avx2-vector-shifts.ll 12 ; CHECK-NOT: vpsllw $0, %ymm0, %ymm0
22 ; CHECK: vpaddw %ymm0, %ymm0, %ymm0
32 ; CHECK: vpsllw $15, %ymm0, %ymm0
42 ; CHECK-NOT: vpslld $0, %ymm0, %ymm0
52 ; CHECK: vpaddd %ymm0, %ymm0, %ymm
    [all...]
pr17764.ll 9 ; CHECK: vpblendvb %ymm0, %ymm1, %ymm2, %ymm0
avx1-logical-load-folding.ll 17 ; CHECK: vandps LCPI0_0(%rip), %ymm0, %ymm0
31 ; CHECK: vorps LCPI1_0(%rip), %ymm0, %ymm0
45 ; CHECK: vxorps LCPI2_0(%rip), %ymm0, %ymm0
59 ;CHECK: vandnps LCPI3_0(%rip), %ymm0, %ymm0
vec_shuf-insert.ll 16 ; CHECK: vinsertf128 $1, %xmm1, %ymm2, %ymm0
27 ; CHECK: vinsertf128 $1, %xmm2, %ymm0, %ymm0
avx-blend.ll 70 ;CHECK: vblendps $238, %ymm1, %ymm0, %ymm0
79 ;CHECK: vblendps $238, %ymm1, %ymm0, %ymm0
89 ; v1 is in ymm0 and ymm1.
91 ; result is in ymm0 and ymm1.
93 ;CHECK: vblendpd $14, %ymm2, %ymm0, %ymm0
103 ;CHECK: vblendpd $14, %ymm2, %ymm0, %ymm0
    [all...]
fma_patterns.ll 55 ; CHECK: vfmadd213ps %ymm2, %ymm1, %ymm0
58 ; CHECK_FMA4: vfmaddps %ymm2, %ymm1, %ymm0, %ymm0
67 ; CHECK: vfmsub213ps %ymm2, %ymm1, %ymm0
70 ; CHECK_FMA4: vfmsubps %ymm2, %ymm1, %ymm0, %ymm0
79 ; CHECK: vfnmadd213ps %ymm2, %ymm1, %ymm0
82 ; CHECK_FMA4: vfnmaddps %ymm2, %ymm1, %ymm0, %ymm0
91 ; CHECK: vfnmsub213ps %ymm2, %ymm1, %ymm0
    [all...]
2012-07-15-broadcastfold.ll 8 ;CHECK: vbroadcastss [[SPILLED]], %ymm0
avx-varargs-x86_64.ll 9 ; CHECK: vmovaps %ymm0, (%rsp)
avx-vextractf128.ll 22 ; CHECK-NOT: vextractf128 $1, %ymm0, %xmm0
24 ; CHECK: vextractf128 $1, %ymm0, (%rdi)
36 ; CHECK-NOT: vextractf128 $1, %ymm0, %xmm0
38 ; CHECK: vextractf128 $1, %ymm0, (%rdi)
50 ; CHECK-NOT: vextractf128 $1, %ymm0, %xmm0
52 ; CHECK: vextractf128 $1, %ymm0, (%rdi)
vec_cast2.ll 15 ;CHECK-WIDE-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
16 ;CHECK-WIDE-NEXT: vcvtdq2ps %ymm0, %ymm0
psubus.ll 213 ; AVX2: vpsubusw LCPI6_0(%rip), %ymm0, %ymm0
237 ; AVX2: vpsubusw LCPI7_0(%rip), %ymm0, %ymm0
264 ; AVX2: vpsubusw %ymm0, %ymm1, %ymm1
289 ; AVX2: vpsubusb LCPI9_0(%rip), %ymm0, %ymm0
313 ; AVX2: vpsubusb LCPI10_0(%rip), %ymm0, %ymm0
339 ; AVX2: vpsubusb %ymm0, %ymm1, %ymm
    [all...]
avx-vinsertf128.ll 69 ; CHECK: vmovaps %ymm1, %ymm0
85 ; CHECK: vmovaps %ymm1, %ymm0
101 ; CHECK: vmovaps %ymm1, %ymm0
stackmap-liveness.ll 56 %a4 = call <4 x double> asm sideeffect "", "={ymm0}"() nounwind
86 ; LiveOut Entry 3: %YMM0 (32 bytes)
101 call void asm sideeffect "", "{r8},{ah},{ymm0},{ymm1}"(i64 %a2, i8 %a3, <4 x double> %a4, <4 x double> %a5) nounwind
combine-avx2-intrinsics.ll 13 ; CHECK: vpsrad $8, %ymm0, %ymm0
23 ; CHECK: vpsraw $8, %ymm0, %ymm0
  /external/chromium_org/third_party/libyuv/source/
compare_win.cc 69 vpxor ymm0, ymm0, ymm0 // sum local
86 vpaddd ymm0, ymm0, ymm1 local
87 vpaddd ymm0, ymm0, ymm2 local
90 vpshufd ymm1, ymm0, 0xee // 3, 2 + 1, 0 both lanes. local
91 vpaddd ymm0, ymm0, ymm local
92 vpshufd ymm1, ymm0, 0x01 \/\/ 1 + 0 both lanes. local
93 vpaddd ymm0, ymm0, ymm1 local
94 vpermq ymm1, ymm0, 0x02 \/\/ high + low lane. local
95 vpaddd ymm0, ymm0, ymm1 local
    [all...]
row_win.cc 708 vmovdqu ymm0, [eax] local
712 vpmaddubsw ymm0, ymm0, ymm4 local
717 vphaddw ymm0, ymm0, ymm1 // mutates. local
719 vpsrlw ymm0, ymm0, 7 local
721 vpackuswb ymm0, ymm0, ymm2 // mutates. local
722 vpermd ymm0, ymm6, ymm0 // For vphaddw + vpackuswb mutation local
723 vpaddb ymm0, ymm0, ymm5 local
748 vmovdqu ymm0, [eax] local
752 vpmaddubsw ymm0, ymm0, ymm4 local
757 vphaddw ymm0, ymm0, ymm1 \/\/ mutates. local
759 vpaddw ymm0, ymm0, ymm5 \/\/ Add .5 for rounding. local
761 vpsrlw ymm0, ymm0, 7 local
763 vpackuswb ymm0, ymm0, ymm2 \/\/ mutates. local
764 vpermd ymm0, ymm6, ymm0 \/\/ For vphaddw + vpackuswb mutation. local
1202 vmovdqu ymm0, [eax] local
1206 vpavgb ymm0, ymm0, [eax + esi] local
1211 vshufps ymm4, ymm0, ymm1, 0x88 local
1212 vshufps ymm0, ymm0, ymm1, 0xdd local
1213 vpavgb ymm0, ymm0, ymm4 \/\/ mutated by vshufps local
1221 vpmaddubsw ymm1, ymm0, ymm7 \/\/ U local
1223 vpmaddubsw ymm0, ymm0, ymm6 \/\/ V local
1226 vphaddw ymm0, ymm0, ymm2 local
1228 vpsraw ymm0, ymm0, 8 local
1229 vpacksswb ymm0, ymm1, ymm0 \/\/ mutates local
1230 vpermq ymm0, ymm0, 0xd8 \/\/ For vpacksswb local
1231 vpshufb ymm0, ymm0, kShufARGBToUV_AVX \/\/ For vshufps + vphaddw local
1232 vpaddb ymm0, ymm0, ymm5 \/\/ -> unsigned local
1236 vextractf128 [edx], ymm0, 0 \/\/ U local
1237 vextractf128 [edx + edi], ymm0, 1 \/\/ V local
2103 vpunpcklbw ymm0, ymm0, ymm1 \/\/ UV local
2104 vpermq ymm0, ymm0, 0xd8 local
2105 vpunpcklwd ymm0, ymm0, ymm0 \/\/ UVUV local
2106 vpmaddubsw ymm2, ymm0, kUVToB_AVX \/\/ scale B UV local
2107 vpmaddubsw ymm1, ymm0, kUVToG_AVX \/\/ scale G UV local
2108 vpmaddubsw ymm0, ymm0, kUVToR_AVX \/\/ scale R UV local
2111 vpsubw ymm0, ymm0, kUVBiasR_AVX local
2122 vpaddsw ymm0, ymm0, ymm3 \/\/ R += Y local
2125 vpsraw ymm0, ymm0, 6 local
2128 vpackuswb ymm0, ymm0, ymm0 \/\/ R local
2133 vpunpcklbw ymm0, ymm0, ymm5 \/\/ RA local
2134 vpermq ymm0, ymm0, 0xd8 local
3202 vmovdqu ymm0, [eax + ecx] local
3203 vpshufb ymm0, ymm0, ymm5 local
3204 vpermq ymm0, ymm0, 0x4e \/\/ swap high and low halfs local
3327 vpermd ymm0, ymm5, [eax + ecx * 4] \/\/ permute dword order local
3428 vmovdqu ymm0, [eax] local
3431 vpsrlw ymm2, ymm0, 8 \/\/ odd bytes local
3433 vpand ymm0, ymm0, ymm5 \/\/ even bytes local
3435 vpackuswb ymm0, ymm0, ymm1 local
3437 vpermq ymm0, ymm0, 0xd8 local
3528 vmovdqu ymm0, [eax] \/\/ read 32 U's local
3531 vpunpcklbw ymm2, ymm0, ymm1 \/\/ low 16 UV pairs. mutated qqword 0,2 local
3532 vpunpckhbw ymm0, ymm0, ymm1 \/\/ high 16 UV pairs. mutated qqword 1,3 local
3533 vperm2i128 ymm1, ymm2, ymm0, 0x20 \/\/ low 128 of ymm2 and low 128 of ymm0 local
3534 vperm2i128 ymm2, ymm2, ymm0, 0x31 \/\/ high 128 of ymm2 and high 128 of ymm0 local
3651 vpcmpeqb ymm0, ymm0, ymm0 local
3652 vpsrld ymm0, ymm0, 8 \/\/ generate mask 0x00ffffff local
3720 vpcmpeqb ymm0, ymm0, ymm0 local
3721 vpsrld ymm0, ymm0, 8 \/\/ generate mask 0x00ffffff local
3805 vmovdqu ymm0, [eax] local
3808 vpand ymm0, ymm0, ymm5 \/\/ even bytes are Y local
3810 vpackuswb ymm0, ymm0, ymm1 \/\/ mutates. local
3811 vpermq ymm0, ymm0, 0xd8 local
3838 vmovdqu ymm0, [eax] local
3840 vpavgb ymm0, ymm0, [eax + esi] local
3843 vpsrlw ymm0, ymm0, 8 \/\/ YUYV -> UVUV local
3845 vpackuswb ymm0, ymm0, ymm1 \/\/ mutates. local
3846 vpermq ymm0, ymm0, 0xd8 local
3847 vpand ymm1, ymm0, ymm5 \/\/ U local
3848 vpsrlw ymm0, ymm0, 8 \/\/ V local
3850 vpackuswb ymm0, ymm0, ymm0 \/\/ mutates. local
3852 vpermq ymm0, ymm0, 0xd8 local
3854 vextractf128 [edx + edi], ymm0, 0 \/\/ V local
3881 vmovdqu ymm0, [eax] local
3884 vpsrlw ymm0, ymm0, 8 \/\/ YUYV -> UVUV local
3886 vpackuswb ymm0, ymm0, ymm1 \/\/ mutates. local
3887 vpermq ymm0, ymm0, 0xd8 local
3888 vpand ymm1, ymm0, ymm5 \/\/ U local
3889 vpsrlw ymm0, ymm0, 8 \/\/ V local
3891 vpackuswb ymm0, ymm0, ymm0 \/\/ mutates. local
3893 vpermq ymm0, ymm0, 0xd8 local
3895 vextractf128 [edx + edi], ymm0, 0 \/\/ V local
3916 vmovdqu ymm0, [eax] local
3919 vpsrlw ymm0, ymm0, 8 \/\/ odd bytes are Y local
3921 vpackuswb ymm0, ymm0, ymm1 \/\/ mutates. local
3922 vpermq ymm0, ymm0, 0xd8 local
3949 vmovdqu ymm0, [eax] local
3951 vpavgb ymm0, ymm0, [eax + esi] local
3954 vpand ymm0, ymm0, ymm5 \/\/ UYVY -> UVUV local
3956 vpackuswb ymm0, ymm0, ymm1 \/\/ mutates. local
3957 vpermq ymm0, ymm0, 0xd8 local
3958 vpand ymm1, ymm0, ymm5 \/\/ U local
3959 vpsrlw ymm0, ymm0, 8 \/\/ V local
3961 vpackuswb ymm0, ymm0, ymm0 \/\/ mutates. local
3963 vpermq ymm0, ymm0, 0xd8 local
3965 vextractf128 [edx + edi], ymm0, 0 \/\/ V local
3992 vmovdqu ymm0, [eax] local
3995 vpand ymm0, ymm0, ymm5 \/\/ UYVY -> UVUV local
3997 vpackuswb ymm0, ymm0, ymm1 \/\/ mutates. local
3998 vpermq ymm0, ymm0, 0xd8 local
3999 vpand ymm1, ymm0, ymm5 \/\/ U local
4000 vpsrlw ymm0, ymm0, 8 \/\/ V local
4002 vpackuswb ymm0, ymm0, ymm0 \/\/ mutates. local
4004 vpermq ymm0, ymm0, 0xd8 local
4006 vextractf128 [edx + edi], ymm0, 0 \/\/ V local
4840 vpunpcklbw ymm0, ymm6, ymm6 \/\/ low 4 pixels. mutated. local
4842 vpshufb ymm2, ymm0, ymm4 \/\/ low 4 alphas local
4844 vpmulhuw ymm0, ymm0, ymm2 \/\/ rgb * a local
4847 vpsrlw ymm0, ymm0, 8 local
4849 vpackuswb ymm0, ymm0, ymm1 \/\/ unmutated. local
4850 vpor ymm0, ymm0, ymm6 \/\/ copy original alpha local
4936 vpunpcklbw ymm0, ymm6, ymm6 \/\/ low 4 pixels. mutated. local
4943 vpmulhuw ymm0, ymm0, ymm2 \/\/ rgb * ia local
4945 vpackuswb ymm0, ymm0, ymm1 \/\/ unmutated. local
4999 vpunpcklbw ymm0, ymm6, ymm6 \/\/ low 4 pixels. mutated. local
5005 vpmulhuw ymm0, ymm0, ymm2 \/\/ rgb * ia local
5007 vpackuswb ymm0, ymm0, ymm1 \/\/ unmutated. local
5423 vpunpcklbw ymm0, ymm1, ymm1 \/\/ low 4 local
5427 vpmulhuw ymm0, ymm0, ymm2 \/\/ src_argb0 * src_argb1 low 4 local
5429 vpackuswb ymm0, ymm0, ymm1 local
5456 vmovdqu ymm0, [eax] \/\/ read 8 pixels from src_argb0 local
5458 vpaddusb ymm0, ymm0, [esi] \/\/ add 8 pixels from src_argb1 local
5486 vmovdqu ymm0, [eax] \/\/ read 8 pixels from src_argb0 local
5488 vpsubusb ymm0, ymm0, [esi] \/\/ src_argb0 - src_argb1 local
6111 vpxor ymm0, ymm0, ymm0 local
6112 vpermd ymm5, ymm0, ymm5 local
6116 vmovdqu ymm0, [esi] local
6118 vpunpckhbw ymm1, ymm0, ymm2 \/\/ mutates local
6119 vpunpcklbw ymm0, ymm0, ymm2 \/\/ mutates local
6120 vpmaddubsw ymm0, ymm0, ymm5 local
6122 vpsrlw ymm0, ymm0, 7 local
6124 vpackuswb ymm0, ymm0, ymm1 \/\/ unmutates local
6134 vmovdqu ymm0, [esi] local
6135 vpavgb ymm0, ymm0, [esi + edx] local
6136 vpavgb ymm0, ymm0, [esi + edx] local
6146 vmovdqu ymm0, [esi] local
6147 vpavgb ymm0, ymm0, [esi + edx] local
6157 vmovdqu ymm0, [esi + edx] local
6158 vpavgb ymm0, ymm0, [esi] local
6159 vpavgb ymm0, ymm0, [esi] local
6656 vmovdqu ymm0, [eax] local
6657 vpavgb ymm0, ymm0, [eax + edx] local
6793 vmovdqu ymm0, [eax] local
6796 vpshufb ymm0, ymm0, ymm5 local
7090 vpmovzxbd ymm0, qword ptr [eax] \/\/ 2 BGRA pixels local
7092 vcvtdq2ps ymm0, ymm0 \/\/ X 8 floats local
7093 vmulps ymm2, ymm0, ymm0 \/\/ X * X local
7094 vmulps ymm3, ymm0, ymm7 \/\/ C3 * X local
7095 vfmadd132ps ymm0, ymm4, ymm5 \/\/ result = C0 + C1 * X local
7096 vfmadd231ps ymm0, ymm2, ymm6 \/\/ result += C2 * X * X local
7097 vfmadd231ps ymm0, ymm2, ymm3 \/\/ result += C3 * X * X * X local
7098 vcvttps2dq ymm0, ymm0 local
7099 vpackusdw ymm0, ymm0, ymm0 \/\/ b0g0r0a0_00000000_b0g0r0a0_00000000 local
7100 vpermq ymm0, ymm0, 0xd8 \/\/ b0g0r0a0_b0g0r0a0_00000000_00000000 local
    [all...]
row_x86.asm 124 vperm2i128 m1, m2, m0, 0x20 // low 128 of ymm2 and low 128 of ymm0
125 vperm2i128 m2, m2, m0, 0x31 // high 128 of ymm2 and high 128 of ymm0
  /external/llvm/test/MC/Disassembler/X86/
x86-32.txt 138 # CHECK: vaddpd %ymm5, %ymm1, %ymm0
141 # CHECK: vaddps %ymm3, %ymm1, %ymm0
144 # CHECK: vandpd %ymm5, %ymm1, %ymm0
147 # CHECK: vandps %ymm3, %ymm1, %ymm0
153 # CHECK: vcvtps2pd %xmm0, %ymm0
289 # CHECK: vmovups %ymm1, %ymm0
292 # CHECK: vmovups %ymm0, %ymm1
295 # CHECK: vmovaps %ymm1, %ymm0
298 # CHECK: vmovaps %ymm0, %ymm1
319 # CHECK: vmovdqa %ymm1, %ymm0
    [all...]
simple-tests.txt 102 # CHECK: vaddpd %ymm13, %ymm1, %ymm0
105 # CHECK: vaddps %ymm3, %ymm1, %ymm0
108 # CHECK: vandpd %ymm13, %ymm1, %ymm0
111 # CHECK: vandps %ymm3, %ymm1, %ymm0
117 # CHECK: vcvtps2pd %xmm0, %ymm0
260 # CHECK: vmovups %ymm1, %ymm0
263 # CHECK: vmovups %ymm0, %ymm1
266 # CHECK: vmovups %ymm0, %ymm1
269 # CHECK: vmovaps %ymm1, %ymm0
272 # CHECK: vmovaps %ymm0, %ymm
    [all...]
avx-512.txt 39 # CHECK: vgatherdpd (%rsi,%ymm0,4), %zmm1 {%k2}
  /external/llvm/include/llvm/CodeGen/
LivePhysRegs.h 22 // %YMM0<def> = ...
23 // %XMM0<def> = ... (Kills %XMM0, all %XMM0s sub-registers, and %YMM0)
25 // %YMM0<def> = ...
26 // %XMM0<def> = ..., %YMM0<imp-use> (%YMM0 and all its sub-registers are alive)
  /external/clang/test/CodeGen/
asm.c 221 __asm__ volatile("vmovaps %0, %%ymm0" :: "m" (*(__m256i*)p) : "ymm0");
  /external/lldb/test/functionalities/register/
TestRegisters.py 192 self.vector_write_and_read(currentFrame, "ymm0", new_value)
194 self.expect("expr $ymm0", substrs = ['vector_type'])
196 self.runCmd("register read ymm0")

Completed in 460 milliseconds

1 2 3