Lines Matching full:tmp1
7 %tmp1 = load <8 x i8>, <8 x i8>* %B
8 call void @llvm.arm.neon.vst1.p0i8.v8i8(i8* %A, <8 x i8> %tmp1, i32 16)
16 %tmp1 = load <4 x i16>, <4 x i16>* %B
17 call void @llvm.arm.neon.vst1.p0i8.v4i16(i8* %tmp0, <4 x i16> %tmp1, i32 1)
25 %tmp1 = load <2 x i32>, <2 x i32>* %B
26 call void @llvm.arm.neon.vst1.p0i8.v2i32(i8* %tmp0, <2 x i32> %tmp1, i32 1)
34 %tmp1 = load <2 x float>, <2 x float>* %B
35 call void @llvm.arm.neon.vst1.p0i8.v2f32(i8* %tmp0, <2 x float> %tmp1, i32 1)
45 %tmp1 = load <2 x float>, <2 x float>* %B
46 call void @llvm.arm.neon.vst1.p0i8.v2f32(i8* %tmp0, <2 x float> %tmp1, i32 1)
56 %tmp1 = load <1 x i64>, <1 x i64>* %B
57 call void @llvm.arm.neon.vst1.p0i8.v1i64(i8* %tmp0, <1 x i64> %tmp1, i32 1)
65 %tmp1 = load <16 x i8>, <16 x i8>* %B
66 call void @llvm.arm.neon.vst1.p0i8.v16i8(i8* %A, <16 x i8> %tmp1, i32 8)
75 %tmp1 = load <8 x i16>, <8 x i16>* %B
76 call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %tmp0, <8 x i16> %tmp1, i32 32)
86 %tmp1 = load <8 x i16>, <8 x i16>* %B
87 call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %tmp0, <8 x i16> %tmp1, i32 8)
97 %tmp1 = load <4 x i32>, <4 x i32>* %B
98 call void @llvm.arm.neon.vst1.p0i8.v4i32(i8* %tmp0, <4 x i32> %tmp1, i32 1)
106 %tmp1 = load <4 x float>, <4 x float>* %B
107 call void @llvm.arm.neon.vst1.p0i8.v4f32(i8* %tmp0, <4 x float> %tmp1, i32 1)
115 %tmp1 = load <2 x i64>, <2 x i64>* %B
116 call void @llvm.arm.neon.vst1.p0i8.v2i64(i8* %tmp0, <2 x i64> %tmp1, i32 1)
124 %tmp1 = load <2 x double>, <2 x double>* %B
125 call void @llvm.arm.neon.vst1.p0i8.v2f64(i8* %tmp0, <2 x double> %tmp1, i32 1)