/external/llvm/test/CodeGen/X86/ |
vec_ctbits.ll | 3 declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>) 4 declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>) 5 declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) 8 %c = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %a) 12 %c = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %a) 16 %c = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %a)
|
widen_conv-1.ll | 5 ; truncate v2i64 to v2i32
|
legalizedag_vec.ll | 6 ; v2i64 is a legal type but with mmx disabled, i64 is an illegal type. When
|
dagcombine-buildvector.ll | 4 ; with v2i64 build_vector i32, i32.
|
vec_i64.ll | 4 ; Used movq to load i64 into a v2i64 when the top i64 is 0.
|
/external/llvm/lib/Target/CellSPU/ |
SPU64InstrInfo.td | 21 // 4. v2i64 setcc results are v4i32, which can be converted to a FSM mask (TODO) 24 // 5. The code sequences for r64 and v2i64 are probably overly conservative, 67 // v2i64 seteq (equality): the setcc result is v4i32 71 def v2i64: CodeFrag<(i32 (COPY_TO_REGCLASS CEQv2i64compare.Fragment, R32C))>; 83 def : Pat<(seteq (v2i64 VECREG:$rA), (v2i64 VECREG:$rB)), I64EQv2i64.Fragment>; 120 def v2i64: CodeFrag<CLGTv2i64compare.Fragment>; 132 //def : Pat<(setugt (v2i64 VECREG:$rA), (v2i64 VECREG:$rB)), 154 def v2i64: CodeFrag<CLGEv2i64compare.Fragment> [all...] |
SPUCallingConv.td | 24 CCIfType<[i8,i16,i32,i64,i128,f32,f64,v16i8,v8i16,v4i32,v2i64,v4f32,v2f64], 41 v16i8, v8i16, v4i32, v4f32, v2i64, v2f64], 55 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
|
SPUInstrInfo.td | 63 def v2i64: LoadDFormVec<v2i64>; 95 def v2i64: LoadAFormVec<v2i64>; 127 def v2i64: LoadXFormVec<v2i64>; 175 def v2i64: StoreDFormVec<v2i64>; 205 def v2i64: StoreAFormVec<v2i64>; [all...] |
/external/llvm/test/CodeGen/ARM/ |
2010-06-29-PartialRedefFastAlloc.ll | 19 %0 = call <2 x i64> @llvm.arm.neon.vld1.v2i64(i8* %arg, i32 1) 25 declare <2 x i64> @llvm.arm.neon.vld1.v2i64(i8*, i32) nounwind readonly
|
vshll.ll | 23 %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftls.v2i64(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >) 47 %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftlu.v2i64(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >) 73 %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftls.v2i64(<2 x i32> %tmp1, <2 x i32> < i32 32, i32 32 >) 79 declare <2 x i64> @llvm.arm.neon.vshiftls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone 83 declare <2 x i64> @llvm.arm.neon.vshiftlu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
|
vpadal.ll | 80 %tmp3 = call <2 x i64> @llvm.arm.neon.vpadals.v2i64.v4i32(<2 x i64> %tmp1, <4 x i32> %tmp2) 107 %tmp3 = call <2 x i64> @llvm.arm.neon.vpadalu.v2i64.v4i32(<2 x i64> %tmp1, <4 x i32> %tmp2) 121 declare <2 x i64> @llvm.arm.neon.vpadals.v2i64.v4i32(<2 x i64>, <4 x i32>) nounwind readnone 125 declare <2 x i64> @llvm.arm.neon.vpadalu.v2i64.v4i32(<2 x i64>, <4 x i32>) nounwind readnone
|
vqshl.ll | 107 %tmp3 = call <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 143 %tmp3 = call <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 271 %tmp2 = call <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >) 303 %tmp2 = call <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >) 335 %tmp2 = call <2 x i64> @llvm.arm.neon.vqshiftsu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >) 357 declare <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 362 declare <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 367 declare <2 x i64> @llvm.arm.neon.vqshiftsu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 473 %tmp3 = call <2 x i64> @llvm.arm.neon.vqrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 509 %tmp3 = call <2 x i64> @llvm.arm.neon.vqrshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2 [all...] |
vpadd.ll | 113 %tmp2 = call <2 x i64> @llvm.arm.neon.vpaddls.v2i64.v4i32(<4 x i32> %tmp1) 137 %tmp2 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %tmp1) 165 declare <2 x i64> @llvm.arm.neon.vpaddls.v2i64.v4i32(<4 x i32>) nounwind readnone 169 declare <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32>) nounwind readnone
|
vqadd.ll | 107 %tmp3 = call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 143 %tmp3 = call <2 x i64> @llvm.arm.neon.vqaddu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 160 declare <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 165 declare <2 x i64> @llvm.arm.neon.vqaddu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
|
vqdmul.ll | 175 %tmp3 = call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) 193 %1 = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %arg0_int32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1] 198 declare <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone 216 %tmp4 = call <2 x i64> @llvm.arm.neon.vqdmlal.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3) 234 %1 = tail call <2 x i64> @llvm.arm.neon.vqdmlal.v2i64(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1] 239 declare <2 x i64> @llvm.arm.neon.vqdmlal.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone 257 %tmp4 = call <2 x i64> @llvm.arm.neon.vqdmlsl.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3) 275 %1 = tail call <2 x i64> @llvm.arm.neon.vqdmlsl.v2i64(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1] 280 declare <2 x i64> @llvm.arm.neon.vqdmlsl.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone
|
vqsub.ll | 107 %tmp3 = call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 143 %tmp3 = call <2 x i64> @llvm.arm.neon.vqsubu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 160 declare <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 165 declare <2 x i64> @llvm.arm.neon.vqsubu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
|
vshl.ll | 107 %tmp3 = call <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 143 %tmp3 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 210 %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >) 308 %tmp2 = call <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 >) 340 %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 >) 357 declare <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 362 declare <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 468 %tmp3 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 504 %tmp3 = call <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 600 %tmp2 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 > [all...] |
/external/llvm/test/CodeGen/CellSPU/useful-harnesses/ |
vecoperations.c | 7 typedef long long v2i64 __attribute__((ext_vector_type(2))); typedef 58 void print_v2i64(const char *str, v2i64 v) { 126 v2i64 v2i64_shuffle(v2i64 a) { 127 v2i64 c2 = a.yx; 147 v2i64 v3 = { 691043ll, 910301513ll };
|
/external/llvm/lib/Target/ARM/ |
ARMCallingConv.td | 32 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 50 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 64 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 76 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 120 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 130 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 145 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 157 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
|
/external/llvm/test/TableGen/ |
TargetInstrSpec.td | 10 def v2i64 : ValueType<128, 22>; // 2 x i64 vector value 59 def VR128 : RegisterClass<[v2i64, v2f64],
|
cast.td | 9 def v2i64 : ValueType<128, 22>; // 2 x i64 vector value 58 def VR128 : RegisterClass<[v2i64, v2f64],
|
/external/llvm/lib/Target/X86/ |
X86InstrMMX.td | 182 (i64 (vector_extract (v2i64 VR128:$src), 188 (v2i64 (scalar_to_vector 412 [SDTCisVT<0, v2i64>, SDTCisVT<1, x86mmx>]>>; 414 def : Pat<(v2i64 (MMX_X86movq2dq VR64:$src)), 415 (v2i64 (MMX_MOVQ2DQrr VR64:$src))>; 417 def : Pat<(v2i64 (MMX_X86movq2dq (load_mmx addr:$src))), 418 (v2i64 (MOVQI2PQIrm addr:$src))>; 420 def : Pat<(v2i64 (MMX_X86movq2dq 422 (v2i64 (MOVDI2PDIrm addr:$src))>; 426 [SDTCisVT<0, x86mmx>, SDTCisVT<1, v2i64>]>>; [all...] |
X86CallingConv.td | 40 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 154 CCPromoteToType<v2i64>>>>, 157 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 175 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>, 194 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCPassIndirect<i64>>, 213 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 235 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 273 CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 282 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
|
X86InstrSSE.td | 441 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))), 446 def : Pat<(v2i64 (movddup VR128:$src, (undef))), 447 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>; [all...] |
X86InstrFragmentsSIMD.td | 188 def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>; 220 (v2i64 (alignedload node:$ptr))>; 250 def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>; 302 def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>; 309 (bitconvert (v2i64 (X86vzmovl 310 (v2i64 (scalar_to_vector (loadi64 node:$src))))))>; 316 (bitconvert (v2i64 (X86vzload node:$src)))>;
|