HomeSort by relevance Sort by last modified time
    Searched full:v2i32 (Results 1 - 25 of 213) sorted by null

1 2 3 4 5 6 7 8 9

  /external/llvm/lib/Target/Hexagon/
HexagonInstrInfoVector.td 21 def V2I32: PatLeaf<(v2i32 DoubleRegs:$R)>;
73 defm : bitconvert_64<v2i32, i64>;
75 defm : bitconvert_64<v8i8, v2i32>;
76 defm : bitconvert_64<v4i16, v2i32>;
112 [(set (v2i32 DoubleRegs:$dst),
113 (Op (v2i32 DoubleRegs:$src1), u5ImmPred:$src2))]> {
151 def: VArith_pat <A2_vaddw, add, V2I32>;
154 def: VArith_pat <A2_vsubw, sub, V2I32>;
162 def: VArith_pat <A2_andp, and, V2I32>;
    [all...]
  /external/llvm/test/Transforms/InstCombine/
neon-intrinsics.ll 6 ; CHECK: vld4.v2i32.p0i8({{.*}}, i32 32)
7 ; CHECK: vst4.p0i8.v2i32({{.*}}, i32 16)
15 %tmp1 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32.p0i8(i8* bitcast ([8 x i32]* @x to i8*), i32 1)
20 call void @llvm.arm.neon.vst4.p0i8.v2i32(i8* bitcast ([8 x i32]* @y to i8*), <2 x i32> %tmp2, <2 x i32> %tmp3, <2 x i32> %tmp4, <2 x i32> %tmp5, i32 1)
24 declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32.p0i8(i8*, i32) nounwind readonly
25 declare void @llvm.arm.neon.vst4.p0i8.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind
  /external/llvm/test/CodeGen/AArch64/
arm64-vaddlv.ll 9 %vaddlv.i = tail call i64 @llvm.aarch64.neon.saddlv.i64.v2i32(<2 x i32> %a1) nounwind
19 %vaddlv.i = tail call i64 @llvm.aarch64.neon.uaddlv.i64.v2i32(<2 x i32> %a1) nounwind
23 declare i64 @llvm.aarch64.neon.uaddlv.i64.v2i32(<2 x i32>) nounwind readnone
25 declare i64 @llvm.aarch64.neon.saddlv.i64.v2i32(<2 x i32>) nounwind readnone
arm64-vcvt_n.ll 7 %vcvt_n1 = tail call <2 x float> @llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32> %a, i32 9)
15 %vcvt_n1 = tail call <2 x float> @llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32(<2 x i32> %a, i32 12)
46 declare <2 x float> @llvm.aarch64.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32>, i32) nounwind readnone
47 declare <2 x float> @llvm.aarch64.neon.vcvtfxs2fp.v2f32.v2i32(<2 x i32>, i32) nounwind readnone
arm64-neon-add-pairwise.ll 40 declare <2 x i32> @llvm.aarch64.neon.addp.v2i32(<2 x i32>, <2 x i32>)
44 %tmp1 = call <2 x i32> @llvm.aarch64.neon.addp.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
93 define i32 @test_vaddv.v2i32(<2 x i32> %a) {
94 ; CHECK-LABEL: test_vaddv.v2i32
96 %1 = tail call i32 @llvm.aarch64.neon.saddv.i32.v2i32(<2 x i32> %a)
100 declare i32 @llvm.aarch64.neon.saddv.i32.v2i32(<2 x i32>)
  /external/llvm/test/CodeGen/ARM/
vld-vst-upgrade.ll 12 %tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %ptr, i32 1)
16 declare <2 x i32> @llvm.arm.neon.vld1.v2i32(i8*, i32) nounwind readonly
21 %tmp1 = call %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2.v2i32(i8* %ptr, i32 1)
25 declare %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2.v2i32(i8*, i32) nounwind readonly
30 %tmp1 = call %struct.__neon_int32x2x3_t @llvm.arm.neon.vld3.v2i32(i8* %ptr, i32 1)
34 declare %struct.__neon_int32x2x3_t @llvm.arm.neon.vld3.v2i32(i8*, i32) nounwind readonly
39 %tmp1 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32(i8* %ptr, i32 1)
43 declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32(i8*, i32) nounwind readonly
50 %tmp1 = call %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2lane.v2i32(i8* %ptr, <2 x i32> %A, <2 x i32> %B, i32 1, i32 1)
54 declare %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2lane.v2i32(i8*, <2 x i32>, <2 x i32>, i32, i32) nounwind readonl
    [all...]
2012-05-10-PreferVMOVtoVDUP32.ll 10 tail call void @llvm.arm.neon.vst1.p0i8.v2i32(i8* %0, <2 x i32> %vecinit1.i, i32 4)
14 declare void @llvm.arm.neon.vst1.p0i8.v2i32(i8*, <2 x i32>, i32) nounwind
vcvt-v8.ll 14 %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtas.v2i32.v2f32(<2 x float> %tmp1)
30 %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtns.v2i32.v2f32(<2 x float> %tmp1)
46 %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtps.v2i32.v2f32(<2 x float> %tmp1)
62 %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtms.v2i32.v2f32(<2 x float> %tmp1)
78 %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtau.v2i32.v2f32(<2 x float> %tmp1)
94 %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtnu.v2i32.v2f32(<2 x float> %tmp1)
110 %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtpu.v2i32.v2f32(<2 x float> %tmp1)
126 %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtmu.v2i32.v2f32(<2 x float> %tmp1)
131 declare <2 x i32> @llvm.arm.neon.vcvtas.v2i32.v2f32(<2 x float>) nounwind readnone
133 declare <2 x i32> @llvm.arm.neon.vcvtns.v2i32.v2f32(<2 x float>) nounwind readnon
    [all...]
2009-08-26-ScalarToVector.ll 11 declare <2 x i32> @llvm.arm.neon.vpadd.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
17 %2 = call <2 x i32> @llvm.arm.neon.vpadd.v2i32(<2 x i32> undef, <2 x i32> %1) nounwind ; <<2 x i32>> [#uses=1]
vpadd.ll 26 %tmp3 = call <2 x i32> @llvm.arm.neon.vpadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
41 declare <2 x i32> @llvm.arm.neon.vpadd.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
57 %tmp2 = call <2 x i32> @llvm.arm.neon.vpaddls.v2i32.v4i16(<4 x i16> %tmp1)
65 %tmp2 = call <1 x i64> @llvm.arm.neon.vpaddls.v1i64.v2i32(<2 x i32> %tmp1)
81 %tmp2 = call <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16> %tmp1)
89 %tmp2 = call <1 x i64> @llvm.arm.neon.vpaddlu.v1i64.v2i32(<2 x i32> %tmp1)
167 declare <2 x i32> @llvm.arm.neon.vpaddls.v2i32.v4i16(<4 x i16>) nounwind readnone
168 declare <1 x i64> @llvm.arm.neon.vpaddls.v1i64.v2i32(<2 x i32>) nounwind readnone
171 declare <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16>) nounwind readnone
172 declare <1 x i64> @llvm.arm.neon.vpaddlu.v1i64.v2i32(<2 x i32>) nounwind readnon
    [all...]
vqshrn.ll 23 %tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftns.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
47 %tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftnu.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
71 %tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftnsu.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
77 declare <2 x i32> @llvm.arm.neon.vqshiftns.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
81 declare <2 x i32> @llvm.arm.neon.vqshiftnu.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
85 declare <2 x i32> @llvm.arm.neon.vqshiftnsu.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
107 %tmp2 = call <2 x i32> @llvm.arm.neon.vqrshiftns.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
131 %tmp2 = call <2 x i32> @llvm.arm.neon.vqrshiftnu.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
155 %tmp2 = call <2 x i32> @llvm.arm.neon.vqrshiftnsu.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
161 declare <2 x i32> @llvm.arm.neon.vqrshiftns.v2i32(<2 x i64>, <2 x i64>) nounwind readnon
    [all...]
  /external/clang/test/CodeGen/
arm-neon-vcvtX.c 7 // CHECK-LABEL: call <2 x i32> @llvm.arm.neon.vcvtas.v2i32.v2f32(<2 x float> %a)
13 // CHECK-LABEL: call <2 x i32> @llvm.arm.neon.vcvtau.v2i32.v2f32(<2 x float> %a)
31 // CHECK-LABEL: call <2 x i32> @llvm.arm.neon.vcvtns.v2i32.v2f32(<2 x float> %a)
37 // CHECK-LABEL: call <2 x i32> @llvm.arm.neon.vcvtnu.v2i32.v2f32(<2 x float> %a)
55 // CHECK-LABEL: call <2 x i32> @llvm.arm.neon.vcvtps.v2i32.v2f32(<2 x float> %a)
61 // CHECK-LABEL: call <2 x i32> @llvm.arm.neon.vcvtpu.v2i32.v2f32(<2 x float> %a)
79 // CHECK-LABEL: call <2 x i32> @llvm.arm.neon.vcvtms.v2i32.v2f32(<2 x float> %a)
85 // CHECK-LABEL: call <2 x i32> @llvm.arm.neon.vcvtmu.v2i32.v2f32(<2 x float> %a)
neon-immediate-ubsan.c 21 // CHECK-AARCH64: call <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64> {{.*}}, i32 1)
22 // CHECK-ARMV7: call <2 x i32> @llvm.arm.neon.vqrshiftns.v2i32(<2 x i64> {{.*}}, <2 x i64> <i64 -1, i64 -1>)
arm64_vMaxMin.c 68 // CHECK: call <2 x i32> @llvm.aarch64.neon.smax.v2i32(
74 // CHECK: call <2 x i32> @llvm.aarch64.neon.umin.v2i32(
90 // CHECK@ llvm.aarch64.neon.saddlv.i64.v2i32
114 // CHECK: call i32 @llvm.aarch64.neon.smaxv.i32.v2i32(
123 // CHECK: call i32 @llvm.aarch64.neon.umaxv.i32.v2i32(
166 // CHECK@ llvm.aarch64.neon.saddlv.i64.v2i32
188 // CHECK: call i32 @llvm.aarch64.neon.sminv.i32.v2i32(
196 // CHECK: call i32 @llvm.aarch64.neon.uminv.i32.v2i32(
204 // CHECK@ llvm.aarch64.neon.saddlv.i64.v2i32
arm64_vadd.c 8 // CHECK: llvm.aarch64.neon.saddlv.i64.v2i32
15 // CHECK: llvm.aarch64.neon.uaddlv.i64.v2i32
36 // CHECK: llvm.aarch64.neon.saddv.i32.v2i32
57 // CHECK: llvm.aarch64.neon.uaddv.i32.v2i32
arm64_vcreate.c 20 // CHECK@ llvm.aarch64.neon.saddlv.i64.v2i32
  /external/llvm/test/CodeGen/Mips/
ctlz-v.ll 4 declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32>, i1)
16 %ret = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %x, i1 true)
  /external/llvm/test/Transforms/SLPVectorizer/AArch64/
mismatched-intrinsics.ll 8 ; CHECK: call i64 @llvm.arm64.neon.saddlv.i64.v2i32
11 %vaddlv_s32.i = tail call i64 @llvm.arm64.neon.saddlv.i64.v2i32(<2 x i32> %in2) #2
18 declare i64 @llvm.arm64.neon.saddlv.i64.v2i32(<2 x i32> %in1)
  /external/llvm/test/CodeGen/SystemZ/
vec-extract-01.ll 5 ; Test a memory copy of a v2i32 (via the constant pool).
  /external/llvm/test/CodeGen/X86/
widen_cast-5.ll 5 ; bitcast a i64 to v2i32
widen_conv-1.ll 4 ; truncate v2i64 to v2i32
widen_conv-2.ll 6 ; sign extension v2i16 to v2i32
vec_ctbits.ll 70 declare <2 x i32> @llvm.cttz.v2i32(<2 x i32>, i1)
71 declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32>, i1)
72 declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>)
91 %c = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %a, i1 false)
115 %c = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %a, i1 false)
142 %c = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %a)
  /external/llvm/test/CodeGen/Hexagon/vect/
vect-anyextend.ll 2 ; Used to fail with "Cannot select: 0x17300f0: v2i32 = any_extend"
  /external/llvm/lib/Target/AArch64/
AArch64CallingConvention.td 26 CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
31 CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v8i8],
70 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16],
79 CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16],
86 CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
91 CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v8i8],
106 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16],
119 CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
151 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16],
161 CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16]
    [all...]

Completed in 3802 milliseconds

1 2 3 4 5 6 7 8 9