/external/llvm/test/CodeGen/ARM/ |
arguments3.ll | 4 define i64 @f(i32 %a, i128 %b) { 5 %tmp = call i64 @g(i128 %b) 6 ret i64 %tmp 9 declare i64 @g(i128)
|
str_pre-2.ll | 6 @b = external global i64* 8 define i64 @t(i64 %a) nounwind readonly { 12 %0 = load i64** @b, align 4 13 %1 = load i64* %0, align 4 14 %2 = mul i64 %1, %a 15 ret i64 %2
|
/external/llvm/test/CodeGen/Alpha/ |
2010-08-01-mulreduce64.ll | 3 define fastcc i64 @getcount(i64 %s) { 4 %tmp431 = mul i64 %s, 12884901888 5 ret i64 %tmp431
|
/external/llvm/test/CodeGen/PowerPC/ |
2004-11-30-shr-var-crash.ll | 5 %shift.upgrd.1 = zext i8 %shamt to i64 ; <i64> [#uses=1] 6 %tr2 = ashr i64 1, %shift.upgrd.1 ; <i64> [#uses=0]
|
addc.ll | 4 define i64 @add_ll(i64 %a, i64 %b) nounwind { 6 %tmp.2 = add i64 %b, %a ; <i64> [#uses=1] 7 ret i64 %tmp.2 14 define i64 @add_l_5(i64 %a) nounwind { 16 %tmp.1 = add i64 %a, 5 ; <i64> [#uses=1 [all...] |
int-fp-conv-1.ll | 3 define i64 @__fixunstfdi(ppc_fp128 %a) nounwind { 5 %tmp1213 = uitofp i64 0 to ppc_fp128 ; <ppc_fp128> [#uses=1] 8 %tmp282930 = zext i32 %tmp2829 to i64 ; <i64> [#uses=1] 9 %tmp32 = add i64 %tmp282930, 0 ; <i64> [#uses=1] 10 ret i64 %tmp32
|
/external/llvm/test/CodeGen/SystemZ/ |
01-RetImm.ll | 11 define i64 @foo1() { 13 ret i64 1 16 define i64 @foo2() { 18 ret i64 65535 21 define i64 @foo3() { 23 ret i64 131072 26 define i64 @foo4() { 28 ret i64 8589934592 31 define i64 @foo5() { 33 ret i64 56294995342131 [all...] |
/external/llvm/test/CodeGen/X86/ |
2008-07-16-CoalescerCrash.ll | 3 %struct.SV = type { i8*, i64, i64 } 8 declare fastcc i64 @Perl_utf8n_to_uvuni(i8*, i64, i64*, i64) nounwind 10 define fastcc i8* @Perl_pv_uni_display(%struct.SV* %dsv, i8* %spv, i64 %len, i64 %pvlim, i64 %flags) nounwind { 15 tail call fastcc i64 @Perl_utf8n_to_uvuni( i8* null, i64 13, i64* null, i64 255 ) nounwind ; <i64>:0 [#uses=1 [all...] |
2008-08-19-SubAndFetch.ll | 3 @var = external global i64 ; <i64*> [#uses=1] 10 atomicrmw sub i64* @var, i64 1 monotonic
|
2011-06-03-x87chain.ll | 3 define float @chainfail1(i64* nocapture %a, i64* nocapture %b, i32 %x, i32 %y, float* nocapture %f) nounwind uwtable noinline ssp { 5 %tmp1 = load i64* %a, align 8 8 %conv = sitofp i64 %tmp1 to float 13 %conv5 = sext i32 %div to i64 14 store i64 %conv5, i64* %b, align 8 18 define float @chainfail2(i64* nocapture %a, i64* nocapture %b, i32 %x, i32 %y, float* nocapture %f) nounwind uwtable noinline ssp { 21 store i64 0, i64* %b, align [all...] |
sse-align-10.ll | 3 define <2 x i64> @bar(<2 x i64>* %p) nounwind { 4 %t = load <2 x i64>* %p, align 8 5 ret <2 x i64> %t
|
sse-align-5.ll | 3 define <2 x i64> @bar(<2 x i64>* %p) nounwind { 4 %t = load <2 x i64>* %p 5 ret <2 x i64> %t
|
sse-align-7.ll | 5 define void @bar(<2 x i64>* %p, <2 x i64> %x) nounwind { 6 store <2 x i64> %x, <2 x i64>* %p
|
sse-align-8.ll | 3 define void @bar(<2 x i64>* %p, <2 x i64> %x) nounwind { 4 store <2 x i64> %x, <2 x i64>* %p, align 8
|
vec_set-A.ll | 3 define <2 x i64> @test1() nounwind { 5 ret <2 x i64> < i64 1, i64 0 >
|
vsplit-and.ll | 3 define void @t0(<2 x i64>* %dst, <2 x i64> %src1, <2 x i64> %src2) nounwind readonly { 7 %cmp1 = icmp ne <2 x i64> %src1, zeroinitializer 8 %cmp2 = icmp ne <2 x i64> %src2, zeroinitializer 10 %t2 = sext <2 x i1> %t1 to <2 x i64> 11 store <2 x i64> %t2, <2 x i64>* %dst 15 define void @t2(<3 x i64>* %dst, <3 x i64> %src1, <3 x i64> %src2) nounwind readonly [all...] |
2009-07-19-AsmExtraOperands.ll | 4 define i32 @atomic_cmpset_long(i64* %dst, i64 %exp, i64 %src) nounwind ssp noredzone noimplicitfloat { 6 %0 = call i8 asm sideeffect "\09lock ; \09\09\09cmpxchgq $2,$1 ;\09 sete\09$0 ;\09\091:\09\09\09\09# atomic_cmpset_long", "={ax},=*m,r,{ax},*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* undef, i64 undef, i64 undef, i64* undef) nounwind ; <i8> [#uses=0]
|
2009-08-02-mmx-scalar-to-vector.ll | 5 define <1 x i64> @test(i64 %t) { 7 %t1 = insertelement <1 x i64> undef, i64 %t, i32 0 8 %t0 = bitcast <1 x i64> %t1 to x86_mmx 10 %t3 = bitcast x86_mmx %t2 to <1 x i64> 11 ret <1 x i64> %t3
|
h-register-addressing-64.ll | 5 define double @foo8(double* nocapture inreg %p, i64 inreg %x) nounwind readonly { 6 %t0 = lshr i64 %x, 8 7 %t1 = and i64 %t0, 255 8 %t2 = getelementptr double* %p, i64 %t1 12 define float @foo4(float* nocapture inreg %p, i64 inreg %x) nounwind readonly { 13 %t0 = lshr i64 %x, 8 14 %t1 = and i64 %t0, 255 15 %t2 = getelementptr float* %p, i64 %t1 19 define i16 @foo2(i16* nocapture inreg %p, i64 inreg %x) nounwind readonly { 20 %t0 = lshr i64 %x, [all...] |
longlong-deadload.ll | 4 define void @test(i64* %P) nounwind { 9 %tmp1 = load i64* %P, align 8 ; <i64> [#uses=1] 10 %tmp2 = xor i64 %tmp1, 1 ; <i64> [#uses=1] 11 store i64 %tmp2, i64* %P, align 8
|
vec_set-8.ll | 7 define <2 x i64> @test(i64 %i) nounwind { 9 %tmp10 = insertelement <2 x i64> undef, i64 %i, i32 0 10 %tmp11 = insertelement <2 x i64> %tmp10, i64 0, i32 1 11 ret <2 x i64> %tmp11
|
/external/llvm/test/Feature/ |
fold-fpcast.ll | 11 define i64 @test3() { 12 ret i64 bitcast (double 0x400921FB4D12D84A to i64) 16 ret double bitcast (i64 42 to double)
|
/external/llvm/test/Transforms/InstCombine/ |
2005-03-04-ShiftOverflow.ll | 4 define i1 @test(i64 %tmp.169) { 5 %tmp.1710 = lshr i64 %tmp.169, 1 ; <i64> [#uses=1] 6 %tmp.1912 = icmp ugt i64 %tmp.1710, 0 ; <i1> [#uses=1]
|
/external/llvm/test/CodeGen/Generic/ |
multiple-return-values-cross-block-with-invoke.ll | 3 declare { i64, double } @wild() 5 define void @foo(i64* %p, double* %q) nounwind { 6 %t = invoke { i64, double } @wild() to label %normal unwind label %handler 9 %mrv_gr = extractvalue { i64, double } %t, 0 10 store i64 %mrv_gr, i64* %p 11 %mrv_gr12681 = extractvalue { i64, double } %t, 1
|
/external/llvm/test/ExecutionEngine/ |
test-shift.ll | 11 %t2.s.upgrd.3 = shl i64 1, 4 ; <i64> [#uses=0] 12 %t2.upgrd.4 = shl i64 1, 5 ; <i64> [#uses=0] 19 %tr1.l = ashr i64 1, 4 ; <i64> [#uses=0] 20 %shift.upgrd.7 = zext i8 %shamt to i64 ; <i64> [#uses=1] 21 %tr2.l = ashr i64 1, %shift.upgrd.7 ; <i64> [#uses=0 [all...] |