1 ; RUN: llc -mtriple=powerpc64-bgq-linux -mcpu=a2 < %s | FileCheck %s 2 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64" 3 target triple = "powerpc64-bgq-linux" 4 5 %struct.BG_CoordinateMapping_t = type { [4 x i8] } 6 7 ; Function Attrs: alwaysinline inlinehint nounwind 8 define zeroext i32 @Kernel_RanksToCoords(i64 %mapsize, %struct.BG_CoordinateMapping_t* %map, i64* %numentries) #0 { 9 entry: 10 %mapsize.addr = alloca i64, align 8 11 %map.addr = alloca %struct.BG_CoordinateMapping_t*, align 8 12 %numentries.addr = alloca i64*, align 8 13 %r0 = alloca i64, align 8 14 %r3 = alloca i64, align 8 15 %r4 = alloca i64, align 8 16 %r5 = alloca i64, align 8 17 %tmp = alloca i64, align 8 18 store i64 %mapsize, i64* %mapsize.addr, align 8 19 store %struct.BG_CoordinateMapping_t* %map, %struct.BG_CoordinateMapping_t** %map.addr, align 8 20 store i64* %numentries, i64** %numentries.addr, align 8 21 store i64 1055, i64* %r0, align 8 22 %0 = load i64, i64* %mapsize.addr, align 8 23 store i64 %0, i64* %r3, align 8 24 %1 = load %struct.BG_CoordinateMapping_t*, %struct.BG_CoordinateMapping_t** %map.addr, align 8 25 %2 = ptrtoint %struct.BG_CoordinateMapping_t* %1 to i64 26 store i64 %2, i64* %r4, align 8 27 %3 = load i64*, i64** %numentries.addr, align 8 28 %4 = ptrtoint i64* %3 to i64 29 store i64 %4, i64* %r5, align 8 30 %5 = load i64, i64* %r0, align 8 31 %6 = load i64, i64* %r3, align 8 32 %7 = load i64, i64* %r4, align 8 33 %8 = load i64, i64* %r5, align 8 34 %9 = call { i64, i64, i64, i64 } asm sideeffect "sc", "={r0},={r3},={r4},={r5},{r0},{r3},{r4},{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{cr0},~{memory}"(i64 %5, i64 %6, i64 %7, i64 %8) #1, !srcloc !0 35 36 ; CHECK-LABEL: @Kernel_RanksToCoords 37 38 ; These need to be 64-bit loads, not 32-bit loads (not lwz). 39 ; CHECK-NOT: lwz 40 41 ; CHECK: #APP 42 ; CHECK: sc 43 ; CHECK: #NO_APP 44 45 ; CHECK: blr 46 47 %asmresult = extractvalue { i64, i64, i64, i64 } %9, 0 48 %asmresult1 = extractvalue { i64, i64, i64, i64 } %9, 1 49 %asmresult2 = extractvalue { i64, i64, i64, i64 } %9, 2 50 %asmresult3 = extractvalue { i64, i64, i64, i64 } %9, 3 51 store i64 %asmresult, i64* %r0, align 8 52 store i64 %asmresult1, i64* %r3, align 8 53 store i64 %asmresult2, i64* %r4, align 8 54 store i64 %asmresult3, i64* %r5, align 8 55 %10 = load i64, i64* %r3, align 8 56 store i64 %10, i64* %tmp 57 %11 = load i64, i64* %tmp 58 %conv = trunc i64 %11 to i32 59 ret i32 %conv 60 } 61 62 declare void @mtrace() 63 64 define signext i32 @main(i32 signext %argc, i8** %argv) { 65 entry: 66 %argc.addr = alloca i32, align 4 67 store i32 %argc, i32* %argc.addr, align 4 68 %0 = call { i64, i64 } asm sideeffect "sc", "={r0},={r3},{r0},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{cr0},~{memory}"(i64 1076) 69 %asmresult1.i = extractvalue { i64, i64 } %0, 1 70 %conv.i = trunc i64 %asmresult1.i to i32 71 %cmp = icmp eq i32 %conv.i, 0 72 br i1 %cmp, label %if.then, label %if.end 73 74 ; CHECK-LABEL: @main 75 76 ; CHECK-DAG: mr [[REG:[0-9]+]], 3 77 ; CHECK-DAG: li 0, 1076 78 ; CHECK: stw [[REG]], 79 80 ; CHECK: #APP 81 ; CHECK: sc 82 ; CHECK: #NO_APP 83 84 ; CHECK: cmpwi {{[0-9]+}}, [[REG]], 1 85 86 ; CHECK: blr 87 88 if.then: ; preds = %entry 89 call void @mtrace() 90 %.pre = load i32, i32* %argc.addr, align 4 91 br label %if.end 92 93 if.end: ; preds = %if.then, %entry 94 %1 = phi i32 [ %.pre, %if.then ], [ %argc, %entry ] 95 %cmp1 = icmp slt i32 %1, 2 96 br i1 %cmp1, label %usage, label %if.end40 97 98 usage: 99 ret i32 8 100 101 if.end40: 102 ret i32 0 103 } 104 105 attributes #0 = { alwaysinline inlinehint nounwind } 106 attributes #1 = { nounwind } 107 108 !0 = !{i32 -2146895770} 109