1 ; RUN: llc -mcpu=pwr7 -O0 -disable-fp-elim -fast-isel=false < %s | FileCheck %s 2 3 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" 4 target triple = "powerpc64-unknown-linux-gnu" 5 6 %struct.s1 = type { i8 } 7 %struct.s2 = type { i16 } 8 %struct.s4 = type { i32 } 9 %struct.t1 = type { i8 } 10 %struct.t3 = type <{ i16, i8 }> 11 %struct.t5 = type <{ i32, i8 }> 12 %struct.t6 = type <{ i32, i16 }> 13 %struct.t7 = type <{ i32, i16, i8 }> 14 %struct.s3 = type { i16, i8 } 15 %struct.s5 = type { i32, i8 } 16 %struct.s6 = type { i32, i16 } 17 %struct.s7 = type { i32, i16, i8 } 18 %struct.t2 = type <{ i16 }> 19 %struct.t4 = type <{ i32 }> 20 21 @caller1.p1 = private unnamed_addr constant %struct.s1 { i8 1 }, align 1 22 @caller1.p2 = private unnamed_addr constant %struct.s2 { i16 2 }, align 2 23 @caller1.p3 = private unnamed_addr constant { i16, i8, i8 } { i16 4, i8 8, i8 undef }, align 2 24 @caller1.p4 = private unnamed_addr constant %struct.s4 { i32 16 }, align 4 25 @caller1.p5 = private unnamed_addr constant { i32, i8, [3 x i8] } { i32 32, i8 64, [3 x i8] undef }, align 4 26 @caller1.p6 = private unnamed_addr constant { i32, i16, [2 x i8] } { i32 128, i16 256, [2 x i8] undef }, align 4 27 @caller1.p7 = private unnamed_addr constant { i32, i16, i8, i8 } { i32 512, i16 1024, i8 -3, i8 undef }, align 4 28 @caller2.p1 = private unnamed_addr constant %struct.t1 { i8 1 }, align 1 29 @caller2.p2 = private unnamed_addr constant { i16 } { i16 2 }, align 1 30 @caller2.p3 = private unnamed_addr constant %struct.t3 <{ i16 4, i8 8 }>, align 1 31 @caller2.p4 = private unnamed_addr constant { i32 } { i32 16 }, align 1 32 @caller2.p5 = private unnamed_addr constant %struct.t5 <{ i32 32, i8 64 }>, align 1 33 @caller2.p6 = private unnamed_addr constant %struct.t6 <{ i32 128, i16 256 }>, align 1 34 @caller2.p7 = private unnamed_addr constant %struct.t7 <{ i32 512, i16 1024, i8 -3 }>, align 1 35 36 define i32 @caller1() nounwind { 37 entry: 38 %p1 = alloca %struct.s1, align 1 39 %p2 = alloca %struct.s2, align 2 40 %p3 = alloca %struct.s3, align 2 41 %p4 = alloca %struct.s4, align 4 42 %p5 = alloca %struct.s5, align 4 43 %p6 = alloca %struct.s6, align 4 44 %p7 = alloca %struct.s7, align 4 45 %0 = bitcast %struct.s1* %p1 to i8* 46 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.s1* @caller1.p1, i32 0, i32 0), i64 1, i32 1, i1 false) 47 %1 = bitcast %struct.s2* %p2 to i8* 48 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast (%struct.s2* @caller1.p2 to i8*), i64 2, i32 2, i1 false) 49 %2 = bitcast %struct.s3* %p3 to i8* 50 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* bitcast ({ i16, i8, i8 }* @caller1.p3 to i8*), i64 4, i32 2, i1 false) 51 %3 = bitcast %struct.s4* %p4 to i8* 52 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* bitcast (%struct.s4* @caller1.p4 to i8*), i64 4, i32 4, i1 false) 53 %4 = bitcast %struct.s5* %p5 to i8* 54 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* bitcast ({ i32, i8, [3 x i8] }* @caller1.p5 to i8*), i64 8, i32 4, i1 false) 55 %5 = bitcast %struct.s6* %p6 to i8* 56 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast ({ i32, i16, [2 x i8] }* @caller1.p6 to i8*), i64 8, i32 4, i1 false) 57 %6 = bitcast %struct.s7* %p7 to i8* 58 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast ({ i32, i16, i8, i8 }* @caller1.p7 to i8*), i64 8, i32 4, i1 false) 59 %call = call i32 @callee1(%struct.s1* byval %p1, %struct.s2* byval %p2, %struct.s3* byval %p3, %struct.s4* byval %p4, %struct.s5* byval %p5, %struct.s6* byval %p6, %struct.s7* byval %p7) 60 ret i32 %call 61 62 ; CHECK: ld 9, 112(31) 63 ; CHECK: ld 8, 120(31) 64 ; CHECK: ld 7, 128(31) 65 ; CHECK: lwz 6, 136(31) 66 ; CHECK: lwz 5, 144(31) 67 ; CHECK: lhz 4, 152(31) 68 ; CHECK: lbz 3, 160(31) 69 } 70 71 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind 72 73 define internal i32 @callee1(%struct.s1* byval %v1, %struct.s2* byval %v2, %struct.s3* byval %v3, %struct.s4* byval %v4, %struct.s5* byval %v5, %struct.s6* byval %v6, %struct.s7* byval %v7) nounwind { 74 entry: 75 %a = getelementptr inbounds %struct.s1* %v1, i32 0, i32 0 76 %0 = load i8* %a, align 1 77 %conv = zext i8 %0 to i32 78 %a1 = getelementptr inbounds %struct.s2* %v2, i32 0, i32 0 79 %1 = load i16* %a1, align 2 80 %conv2 = sext i16 %1 to i32 81 %add = add nsw i32 %conv, %conv2 82 %a3 = getelementptr inbounds %struct.s3* %v3, i32 0, i32 0 83 %2 = load i16* %a3, align 2 84 %conv4 = sext i16 %2 to i32 85 %add5 = add nsw i32 %add, %conv4 86 %a6 = getelementptr inbounds %struct.s4* %v4, i32 0, i32 0 87 %3 = load i32* %a6, align 4 88 %add7 = add nsw i32 %add5, %3 89 %a8 = getelementptr inbounds %struct.s5* %v5, i32 0, i32 0 90 %4 = load i32* %a8, align 4 91 %add9 = add nsw i32 %add7, %4 92 %a10 = getelementptr inbounds %struct.s6* %v6, i32 0, i32 0 93 %5 = load i32* %a10, align 4 94 %add11 = add nsw i32 %add9, %5 95 %a12 = getelementptr inbounds %struct.s7* %v7, i32 0, i32 0 96 %6 = load i32* %a12, align 4 97 %add13 = add nsw i32 %add11, %6 98 ret i32 %add13 99 100 ; CHECK: std 9, 96(1) 101 ; CHECK: std 8, 88(1) 102 ; CHECK: std 7, 80(1) 103 ; CHECK: stw 6, 76(1) 104 ; CHECK: stw 5, 68(1) 105 ; CHECK: sth 4, 62(1) 106 ; CHECK: stb 3, 55(1) 107 ; CHECK: lha {{[0-9]+}}, 62(1) 108 ; CHECK: lha {{[0-9]+}}, 68(1) 109 ; CHECK: lbz {{[0-9]+}}, 55(1) 110 ; CHECK: lwz {{[0-9]+}}, 76(1) 111 ; CHECK: lwz {{[0-9]+}}, 80(1) 112 ; CHECK: lwz {{[0-9]+}}, 88(1) 113 ; CHECK: lwz {{[0-9]+}}, 96(1) 114 } 115 116 define i32 @caller2() nounwind { 117 entry: 118 %p1 = alloca %struct.t1, align 1 119 %p2 = alloca %struct.t2, align 1 120 %p3 = alloca %struct.t3, align 1 121 %p4 = alloca %struct.t4, align 1 122 %p5 = alloca %struct.t5, align 1 123 %p6 = alloca %struct.t6, align 1 124 %p7 = alloca %struct.t7, align 1 125 %0 = bitcast %struct.t1* %p1 to i8* 126 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.t1* @caller2.p1, i32 0, i32 0), i64 1, i32 1, i1 false) 127 %1 = bitcast %struct.t2* %p2 to i8* 128 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ({ i16 }* @caller2.p2 to i8*), i64 2, i32 1, i1 false) 129 %2 = bitcast %struct.t3* %p3 to i8* 130 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* bitcast (%struct.t3* @caller2.p3 to i8*), i64 3, i32 1, i1 false) 131 %3 = bitcast %struct.t4* %p4 to i8* 132 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* bitcast ({ i32 }* @caller2.p4 to i8*), i64 4, i32 1, i1 false) 133 %4 = bitcast %struct.t5* %p5 to i8* 134 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* bitcast (%struct.t5* @caller2.p5 to i8*), i64 5, i32 1, i1 false) 135 %5 = bitcast %struct.t6* %p6 to i8* 136 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast (%struct.t6* @caller2.p6 to i8*), i64 6, i32 1, i1 false) 137 %6 = bitcast %struct.t7* %p7 to i8* 138 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast (%struct.t7* @caller2.p7 to i8*), i64 7, i32 1, i1 false) 139 %call = call i32 @callee2(%struct.t1* byval %p1, %struct.t2* byval %p2, %struct.t3* byval %p3, %struct.t4* byval %p4, %struct.t5* byval %p5, %struct.t6* byval %p6, %struct.t7* byval %p7) 140 ret i32 %call 141 142 ; CHECK: stb {{[0-9]+}}, 71(1) 143 ; CHECK: sth {{[0-9]+}}, 69(1) 144 ; CHECK: stb {{[0-9]+}}, 87(1) 145 ; CHECK: stw {{[0-9]+}}, 83(1) 146 ; CHECK: sth {{[0-9]+}}, 94(1) 147 ; CHECK: stw {{[0-9]+}}, 90(1) 148 ; CHECK: stb {{[0-9]+}}, 103(1) 149 ; CHECK: sth {{[0-9]+}}, 101(1) 150 ; CHECK: stw {{[0-9]+}}, 97(1) 151 ; CHECK: ld 9, 96(1) 152 ; CHECK: ld 8, 88(1) 153 ; CHECK: ld 7, 80(1) 154 ; CHECK: lwz 6, 136(31) 155 ; CHECK: ld 5, 64(1) 156 ; CHECK: lhz 4, 152(31) 157 ; CHECK: lbz 3, 160(31) 158 } 159 160 define internal i32 @callee2(%struct.t1* byval %v1, %struct.t2* byval %v2, %struct.t3* byval %v3, %struct.t4* byval %v4, %struct.t5* byval %v5, %struct.t6* byval %v6, %struct.t7* byval %v7) nounwind { 161 entry: 162 %a = getelementptr inbounds %struct.t1* %v1, i32 0, i32 0 163 %0 = load i8* %a, align 1 164 %conv = zext i8 %0 to i32 165 %a1 = getelementptr inbounds %struct.t2* %v2, i32 0, i32 0 166 %1 = load i16* %a1, align 1 167 %conv2 = sext i16 %1 to i32 168 %add = add nsw i32 %conv, %conv2 169 %a3 = getelementptr inbounds %struct.t3* %v3, i32 0, i32 0 170 %2 = load i16* %a3, align 1 171 %conv4 = sext i16 %2 to i32 172 %add5 = add nsw i32 %add, %conv4 173 %a6 = getelementptr inbounds %struct.t4* %v4, i32 0, i32 0 174 %3 = load i32* %a6, align 1 175 %add7 = add nsw i32 %add5, %3 176 %a8 = getelementptr inbounds %struct.t5* %v5, i32 0, i32 0 177 %4 = load i32* %a8, align 1 178 %add9 = add nsw i32 %add7, %4 179 %a10 = getelementptr inbounds %struct.t6* %v6, i32 0, i32 0 180 %5 = load i32* %a10, align 1 181 %add11 = add nsw i32 %add9, %5 182 %a12 = getelementptr inbounds %struct.t7* %v7, i32 0, i32 0 183 %6 = load i32* %a12, align 1 184 %add13 = add nsw i32 %add11, %6 185 ret i32 %add13 186 187 ; CHECK: std 9, 96(1) 188 ; CHECK: std 8, 88(1) 189 ; CHECK: std 7, 80(1) 190 ; CHECK: stw 6, 76(1) 191 ; CHECK: std 5, 64(1) 192 ; CHECK: sth 4, 62(1) 193 ; CHECK: stb 3, 55(1) 194 ; CHECK: lha {{[0-9]+}}, 62(1) 195 ; CHECK: lha {{[0-9]+}}, 69(1) 196 ; CHECK: lbz {{[0-9]+}}, 55(1) 197 ; CHECK: lwz {{[0-9]+}}, 76(1) 198 ; CHECK: lwz {{[0-9]+}}, 83(1) 199 ; CHECK: lwz {{[0-9]+}}, 90(1) 200 ; CHECK: lwz {{[0-9]+}}, 97(1) 201 } 202