/external/llvm/test/CodeGen/Mips/ |
shift-parts.ll | 3 define i64 @shl0(i64 %a, i32 %b) nounwind readnone { 7 %sh_prom = zext i32 %b to i64 8 %shl = shl i64 %a, %sh_prom 9 ret i64 %shl 12 define i64 @shr1(i64 %a, i32 %b) nounwind readnone { 16 %sh_prom = zext i32 %b to i64 17 %shr = lshr i64 %a, %sh_prom 18 ret i64 %sh [all...] |
2008-06-05-Carry.ll | 3 define i64 @add64(i64 %u, i64 %v) nounwind { 9 %tmp2 = add i64 %u, %v 10 ret i64 %tmp2 13 define i64 @sub64(i64 %u, i64 %v) nounwind { 20 %tmp2 = sub i64 %u, %v 21 ret i64 %tmp [all...] |
inlineasm64.ll | 3 @gl2 = external global i64 4 @gl1 = external global i64 5 @gl0 = external global i64 11 %0 = load i64* @gl1, align 8 12 %1 = load i64* @gl0, align 8 13 %2 = tail call i64 asm "daddu $0, $1, $2", "=r,r,r"(i64 %0, i64 %1) nounwind 14 store i64 %2, i64* @gl2, align [all...] |
mips64extins.ll | 3 define i64 @dext(i64 %i) nounwind readnone { 6 %shr = lshr i64 %i, 5 7 %and = and i64 %shr, 1023 8 ret i64 %and 11 define i64 @dextm(i64 %i) nounwind readnone { 14 %shr = lshr i64 %i, 5 15 %and = and i64 %shr, 17179869183 16 ret i64 %an [all...] |
/external/llvm/test/CodeGen/X86/ |
hipe-cc64.ll | 5 define void @zap(i64 %a, i64 %b) nounwind { 13 %0 = call cc 11 {i64, i64, i64} @addfour(i64 undef, i64 undef, i64 %a, i64 %b, i64 8, i64 9 [all...] |
extmul64.ll | 3 define i64 @i32_sext_i64(i32 %a, i32 %b) { 4 %aa = sext i32 %a to i64 5 %bb = sext i32 %b to i64 6 %cc = mul i64 %aa, %bb 7 ret i64 %cc 9 define i64 @i32_zext_i64(i32 %a, i32 %b) { 10 %aa = zext i32 %a to i64 11 %bb = zext i32 %b to i64 12 %cc = mul i64 %aa, %bb 13 ret i64 %c [all...] |
insertelement-legalize.ll | 4 define void @test(<2 x i64> %val, <2 x i64>* %dst, i64 %x) nounwind { 6 %tmp4 = insertelement <2 x i64> %val, i64 %x, i32 0 ; <<2 x i64>> [#uses=1] 7 %add = add <2 x i64> %tmp4, %val ; <<2 x i64>> [#uses=1] 8 store <2 x i64> %add, <2 x i64>* %ds [all...] |
zext-inreg-1.ll | 9 define i64 @l(i64 %d) nounwind { 10 %e = add i64 %d, 1 11 %retval = and i64 %e, 1099511627775 12 ret i64 %retval 14 define i64 @m(i64 %d) nounwind { 15 %e = add i64 %d, 1 16 %retval = and i64 %e, 281474976710655 17 ret i64 %retva [all...] |
2010-02-04-SchedulerBug.ll | 4 %struct.a_t = type { i8*, i64*, i8*, i32, i32, i64*, i64*, i64* } 5 %struct.b_t = type { i32, i32, i32, i32, i64, i64, i64, i64 } 7 define void @t(i32 %cNum, i64 %max) nounwind optsize ssp noimplicitfloat { 10 %1 = getelementptr inbounds %struct.b_t* %0, i32 %cNum, i32 5 ; <i64*> [#uses=1 [all...] |
2007-07-18-Vector-Extract.ll | 5 define i64 @foo_0(<2 x i64>* %val) { 7 %val12 = getelementptr <2 x i64>* %val, i32 0, i32 0 ; <i64*> [#uses=1] 8 %tmp7 = load i64* %val12 ; <i64> [#uses=1] 9 ret i64 %tmp7 12 define i64 @foo_1(<2 x i64>* %val) { 14 %tmp2.gep = getelementptr <2 x i64>* %val, i32 0, i32 1 ; <i64*> [#uses=1 [all...] |
2009-01-26-WrongCheck.ll | 8 %t712 = zext i32 %t711 to i64 ; <i64> [#uses=1] 9 %t804 = select i1 %t801, i64 0, i64 %t712 ; <i64> [#uses=1] 10 store i64 %t804, i64* null 12 %t814 = sext i32 %t711 to i64 ; <i64> [#uses=1] 13 %t816 = select i1 %t815, i64 0, i64 %t814 ; <i64> [#uses=1 [all...] |
adde-carry.ll | 3 define void @a(i64* nocapture %s, i64* nocapture %t, i64 %a, i64 %b, i64 %c) nounwind { 5 %0 = zext i64 %a to i128 6 %1 = zext i64 %b to i128 8 %3 = zext i64 %c to i128 12 %7 = trunc i128 %6 to i64 13 store i64 %7, i64* %s, align [all...] |
fast-cc-pass-in-regs.ll | 4 declare x86_fastcallcc i64 @callee(i64 inreg) 6 define i64 @caller() { 7 %X = call x86_fastcallcc i64 @callee( i64 4294967299 ) ; <i64> [#uses=1] 9 ret i64 %X 12 define x86_fastcallcc i64 @caller2(i64 inreg %X) { 13 ret i64 % [all...] |
/external/llvm/test/CodeGen/PowerPC/ |
frame-size.ll | 2 target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128-n32" 4 define i64 @foo() nounwind { 15 %s1 = call i64 @bar(i8* %x1) nounwind 16 %s2 = call i64 @bar(i8* %x1) nounwind 17 %s3 = call i64 @bar(i8* %x1) nounwind 18 %s4 = call i64 @bar(i8* %x1) nounwind 19 %s5 = call i64 @bar(i8* %x1) nounwind 20 %s6 = call i64 @bar(i8* %x1) nounwind 21 %s7 = call i64 @bar(i8* %x1) nounwind 22 %s8 = call i64 @bar(i8* %x1) nounwin [all...] |
2007-03-24-cntlzd.ll | 3 define i32 @_ZNK4llvm5APInt17countLeadingZerosEv(i64 *%t) nounwind { 4 %tmp19 = load i64* %t 5 %tmp22 = tail call i64 @llvm.ctlz.i64( i64 %tmp19, i1 true ) ; <i64> [#uses=1] 6 %tmp23 = trunc i64 %tmp22 to i32 12 declare i64 @llvm.ctlz.i64(i64, i1 [all...] |
2008-02-09-LocalRegAllocAssert.ll | 3 define i32 @bork(i64 %foo, i64 %bar) { 5 %tmp = load i64* null, align 8 ; <i64> [#uses=2] 6 %tmp2 = icmp ule i64 %tmp, 0 ; <i1> [#uses=1] 7 %min = select i1 %tmp2, i64 %tmp, i64 0 ; <i64> [#uses=1] 8 store i64 %min, i64* null, align [all...] |
rotl-64.ll | 5 define i64 @t1(i64 %A) { 6 %tmp1 = lshr i64 %A, 57 7 %tmp2 = shl i64 %A, 7 8 %tmp3 = or i64 %tmp1, %tmp2 9 ret i64 %tmp3 12 define i64 @t2(i64 %A, i8 zeroext %Amt) { 13 %Amt1 = zext i8 %Amt to i64 14 %tmp1 = lshr i64 %A, %Amt [all...] |
/external/llvm/test/Analysis/ScalarEvolution/ |
xor-and.ll | 2 ; RUN: | grep "\--> (zext i4 (-8 + (trunc i64 (8 \* %x) to i4)) to i64)" 5 ; --> (zext i4 (-1 + (-1 * (trunc i64 (8 * %x) to i4))) to i64) 7 define i64 @foo(i64 %x) { 8 %a = shl i64 %x, 3 9 %t = and i64 %a, 8 10 %z = xor i64 %t, 8 11 ret i64 % [all...] |
/external/llvm/test/Transforms/InstCombine/ |
x86-crc32-demanded.ll | 6 define i64 @test() nounwind { 9 ; CHECK: tail call i64 @llvm.x86.sse42.crc32.64.64 12 %0 = tail call i64 @llvm.x86.sse42.crc32.64.64(i64 0, i64 4) nounwind 13 %1 = and i64 %0, 4294967295 14 ret i64 %1 17 declare i64 @llvm.x86.sse42.crc32.64.64(i64, i64) nounwind readnon [all...] |
/external/llvm/test/Transforms/ScalarRepl/ |
sroa-fca.ll | 4 define i64 @test({i32, i32} %A) { 5 %X = alloca i64 6 %Y = bitcast i64* %X to {i32,i32}* 9 %Q = load i64* %X 10 ret i64 %Q 13 define {i32,i32} @test2(i64 %A) { 14 %X = alloca i64 15 %Y = bitcast i64* %X to {i32,i32}* 16 store i64 %A, i64* % [all...] |
/external/llvm/test/Assembler/ |
ConstantExprNoFold.ll | 9 @A = global i64 0 10 @B = global i64 0 16 ; CHECK: @C = global i1 icmp eq (i64* getelementptr inbounds (i64* @A, i64 1), i64* @B) 17 @C = global i1 icmp eq (i64* getelementptr inbounds (i64* @A, i64 1), i64* @B [all...] |
/external/llvm/test/MC/Mips/ |
mips64extins.ll | 5 define i64 @dext(i64 %i) nounwind readnone { 8 %shr = lshr i64 %i, 5 9 %and = and i64 %shr, 1023 10 ret i64 %and 13 define i64 @dextu(i64 %i) nounwind readnone { 16 %shr = lshr i64 %i, 34 17 %and = and i64 %shr, 63 18 ret i64 %an [all...] |
/external/llvm/test/CodeGen/ARM/ |
arguments8.ll | 4 define i64 @f(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i64 %b) { 5 %tmp = call i64 @g(i32 %a2, i32 %a3, i32 %a4, i32 %a5, i64 %b) 6 ret i64 %tmp 9 declare i64 @g(i32, i32, i32, i32, i64)
|
carry.ll | 3 define i64 @f1(i64 %a, i64 %b) { 8 %tmp = sub i64 %a, %b 9 ret i64 %tmp 12 define i64 @f2(i64 %a, i64 %b) { 18 %tmp1 = shl i64 %a, 1 19 %tmp2 = sub i64 %tmp1, % [all...] |
longMAC.ll | 4 define i64 @MACLongTest1(i32 %a, i32 %b, i64 %c) { 7 %conv = zext i32 %a to i64 8 %conv1 = zext i32 %b to i64 9 %mul = mul i64 %conv1, %conv 10 %add = add i64 %mul, %c 11 ret i64 %add 14 define i64 @MACLongTest2(i32 %a, i32 %b, i64 %c) { 17 %conv = sext i32 %a to i64 [all...] |