Lines Matching full:tmp1
7 %tmp1 = load <8 x i8>, <8 x i8>* %A
8 %tmp2 = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> %tmp1)
15 %tmp1 = load <16 x i8>, <16 x i8>* %A
16 %tmp2 = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %tmp1)
26 %tmp1 = load <8 x i8>, <8 x i8>* %A
27 %tmp2 = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %tmp1, i1 0)
34 %tmp1 = load <4 x i16>, <4 x i16>* %A
35 %tmp2 = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %tmp1, i1 0)
42 %tmp1 = load <2 x i32>, <2 x i32>* %A
43 %tmp2 = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %tmp1, i1 0)
50 %tmp1 = load <16 x i8>, <16 x i8>* %A
51 %tmp2 = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %tmp1, i1 0)
58 %tmp1 = load <8 x i16>, <8 x i16>* %A
59 %tmp2 = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %tmp1, i1 0)
66 %tmp1 = load <4 x i32>, <4 x i32>* %A
67 %tmp2 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %tmp1, i1 0)
82 %tmp1 = load <8 x i8>, <8 x i8>* %A
83 %tmp2 = call <8 x i8> @llvm.arm.neon.vcls.v8i8(<8 x i8> %tmp1)
90 %tmp1 = load <4 x i16>, <4 x i16>* %A
91 %tmp2 = call <4 x i16> @llvm.arm.neon.vcls.v4i16(<4 x i16> %tmp1)
98 %tmp1 = load <2 x i32>, <2 x i32>* %A
99 %tmp2 = call <2 x i32> @llvm.arm.neon.vcls.v2i32(<2 x i32> %tmp1)
106 %tmp1 = load <16 x i8>, <16 x i8>* %A
107 %tmp2 = call <16 x i8> @llvm.arm.neon.vcls.v16i8(<16 x i8> %tmp1)
114 %tmp1 = load <8 x i16>, <8 x i16>* %A
115 %tmp2 = call <8 x i16> @llvm.arm.neon.vcls.v8i16(<8 x i16> %tmp1)
122 %tmp1 = load <4 x i32>, <4 x i32>* %A
123 %tmp2 = call <4 x i32> @llvm.arm.neon.vcls.v4i32(<4 x i32> %tmp1)