1 ; RUN: opt < %s -sroa -S | FileCheck %s 2 ; RUN: opt -debugify -sroa -S < %s | FileCheck %s -check-prefix DEBUGLOC 3 4 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64" 5 6 declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i1) 7 8 define void @test1({ i8, i8 }* %a, { i8, i8 }* %b) { 9 ; CHECK-LABEL: @test1( 10 ; CHECK: %[[gep_a0:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %a, i64 0, i32 0 11 ; CHECK: %[[a0:.*]] = load i8, i8* %[[gep_a0]], align 16 12 ; CHECK: %[[gep_a1:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %a, i64 0, i32 1 13 ; CHECK: %[[a1:.*]] = load i8, i8* %[[gep_a1]], align 1 14 ; CHECK: %[[gep_b0:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %b, i64 0, i32 0 15 ; CHECK: store i8 %[[a0]], i8* %[[gep_b0]], align 16 16 ; CHECK: %[[gep_b1:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %b, i64 0, i32 1 17 ; CHECK: store i8 %[[a1]], i8* %[[gep_b1]], align 1 18 ; CHECK: ret void 19 20 entry: 21 %alloca = alloca { i8, i8 }, align 16 22 %gep_a = getelementptr { i8, i8 }, { i8, i8 }* %a, i32 0, i32 0 23 %gep_alloca = getelementptr { i8, i8 }, { i8, i8 }* %alloca, i32 0, i32 0 24 %gep_b = getelementptr { i8, i8 }, { i8, i8 }* %b, i32 0, i32 0 25 26 store i8 420, i8* %gep_alloca, align 16 27 28 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %gep_alloca, i8* align 16 %gep_a, i32 2, i1 false) 29 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %gep_b, i8* align 16 %gep_alloca, i32 2, i1 false) 30 ret void 31 } 32 33 define void @test2() { 34 ; CHECK-LABEL: @test2( 35 ; CHECK: alloca i16 36 ; CHECK: load i8, i8* %{{.*}} 37 ; CHECK: store i8 42, i8* %{{.*}} 38 ; CHECK: ret void 39 40 ; Check that when sroa rewrites the alloca partition 41 ; it preserves the original DebugLocation. 42 ; DEBUGLOC-LABEL: @test2( 43 ; DEBUGLOC: {{.*}} = alloca {{.*}} !dbg ![[DbgLoc:[0-9]+]] 44 ; 45 ; DEBUGLOC: ![[DbgLoc]] = !DILocation( 46 47 entry: 48 %a = alloca { i8, i8, i8, i8 }, align 2 49 %gep1 = getelementptr { i8, i8, i8, i8 }, { i8, i8, i8, i8 }* %a, i32 0, i32 1 50 %cast1 = bitcast i8* %gep1 to i16* 51 store volatile i16 0, i16* %cast1 52 %gep2 = getelementptr { i8, i8, i8, i8 }, { i8, i8, i8, i8 }* %a, i32 0, i32 2 53 %result = load i8, i8* %gep2 54 store i8 42, i8* %gep2 55 ret void 56 } 57 58 define void @PR13920(<2 x i64>* %a, i16* %b) { 59 ; Test that alignments on memcpy intrinsics get propagated to loads and stores. 60 ; CHECK-LABEL: @PR13920( 61 ; CHECK: load <2 x i64>, <2 x i64>* %a, align 2 62 ; CHECK: store <2 x i64> {{.*}}, <2 x i64>* {{.*}}, align 2 63 ; CHECK: ret void 64 65 entry: 66 %aa = alloca <2 x i64>, align 16 67 %aptr = bitcast <2 x i64>* %a to i8* 68 %aaptr = bitcast <2 x i64>* %aa to i8* 69 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 %aaptr, i8* align 2 %aptr, i32 16, i1 false) 70 %bptr = bitcast i16* %b to i8* 71 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 %bptr, i8* align 2 %aaptr, i32 16, i1 false) 72 ret void 73 } 74 75 define void @test3(i8* %x) { 76 ; Test that when we promote an alloca to a type with lower ABI alignment, we 77 ; provide the needed explicit alignment that code using the alloca may be 78 ; expecting. However, also check that any offset within an alloca can in turn 79 ; reduce the alignment. 80 ; CHECK-LABEL: @test3( 81 ; CHECK: alloca [22 x i8], align 8 82 ; CHECK: alloca [18 x i8], align 2 83 ; CHECK: ret void 84 85 entry: 86 %a = alloca { i8*, i8*, i8* } 87 %b = alloca { i8*, i8*, i8* } 88 %a_raw = bitcast { i8*, i8*, i8* }* %a to i8* 89 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %a_raw, i8* align 8 %x, i32 22, i1 false) 90 %b_raw = bitcast { i8*, i8*, i8* }* %b to i8* 91 %b_gep = getelementptr i8, i8* %b_raw, i32 6 92 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 %b_gep, i8* align 2 %x, i32 18, i1 false) 93 ret void 94 } 95 96 define void @test5() { 97 ; Test that we preserve underaligned loads and stores when splitting. The use 98 ; of volatile in this test case is just to force the loads and stores to not be 99 ; split or promoted out of existence. 100 ; 101 ; CHECK-LABEL: @test5( 102 ; CHECK: alloca [9 x i8] 103 ; CHECK: alloca [9 x i8] 104 ; CHECK: store volatile double 0.0{{.*}}, double* %{{.*}}, align 1 105 ; CHECK: load volatile i16, i16* %{{.*}}, align 1 106 ; CHECK: load double, double* %{{.*}}, align 1 107 ; CHECK: store volatile double %{{.*}}, double* %{{.*}}, align 1 108 ; CHECK: load volatile i16, i16* %{{.*}}, align 1 109 ; CHECK: ret void 110 111 entry: 112 %a = alloca [18 x i8] 113 %raw1 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 0 114 %ptr1 = bitcast i8* %raw1 to double* 115 store volatile double 0.0, double* %ptr1, align 1 116 %weird_gep1 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 7 117 %weird_cast1 = bitcast i8* %weird_gep1 to i16* 118 %weird_load1 = load volatile i16, i16* %weird_cast1, align 1 119 120 %raw2 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 9 121 %ptr2 = bitcast i8* %raw2 to double* 122 %d1 = load double, double* %ptr1, align 1 123 store volatile double %d1, double* %ptr2, align 1 124 %weird_gep2 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 16 125 %weird_cast2 = bitcast i8* %weird_gep2 to i16* 126 %weird_load2 = load volatile i16, i16* %weird_cast2, align 1 127 128 ret void 129 } 130 131 define void @test6() { 132 ; Test that we promote alignment when the underlying alloca switches to one 133 ; that innately provides it. 134 ; CHECK-LABEL: @test6( 135 ; CHECK: alloca double 136 ; CHECK: alloca double 137 ; CHECK-NOT: align 138 ; CHECK: ret void 139 140 entry: 141 %a = alloca [16 x i8] 142 %raw1 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 0 143 %ptr1 = bitcast i8* %raw1 to double* 144 store volatile double 0.0, double* %ptr1, align 1 145 146 %raw2 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 8 147 %ptr2 = bitcast i8* %raw2 to double* 148 %val = load double, double* %ptr1, align 1 149 store volatile double %val, double* %ptr2, align 1 150 151 ret void 152 } 153 154 define void @test7(i8* %out) { 155 ; Test that we properly compute the destination alignment when rewriting 156 ; memcpys as direct loads or stores. 157 ; CHECK-LABEL: @test7( 158 ; CHECK-NOT: alloca 159 160 entry: 161 %a = alloca [16 x i8] 162 %raw1 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 0 163 %ptr1 = bitcast i8* %raw1 to double* 164 %raw2 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 8 165 %ptr2 = bitcast i8* %raw2 to double* 166 167 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %raw1, i8* %out, i32 16, i1 false) 168 ; CHECK: %[[val2:.*]] = load double, double* %{{.*}}, align 1 169 ; CHECK: %[[val1:.*]] = load double, double* %{{.*}}, align 1 170 171 %val1 = load double, double* %ptr2, align 1 172 %val2 = load double, double* %ptr1, align 1 173 174 store double %val1, double* %ptr1, align 1 175 store double %val2, double* %ptr2, align 1 176 177 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %out, i8* %raw1, i32 16, i1 false) 178 ; CHECK: store double %[[val1]], double* %{{.*}}, align 1 179 ; CHECK: store double %[[val2]], double* %{{.*}}, align 1 180 181 ret void 182 ; CHECK: ret void 183 } 184