1 ; RUN: opt < %s -sroa -S | FileCheck %s 2 3 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64" 4 5 define i8 @test1() { 6 ; We fully promote these to the i24 load or store size, resulting in just masks 7 ; and other operations that instcombine will fold, but no alloca. Note this is 8 ; the same as test12 in basictest.ll, but here we assert big-endian byte 9 ; ordering. 10 ; 11 ; CHECK-LABEL: @test1( 12 13 entry: 14 %a = alloca [3 x i8] 15 %b = alloca [3 x i8] 16 ; CHECK-NOT: alloca 17 18 %a0ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 0 19 store i8 0, i8* %a0ptr 20 %a1ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 1 21 store i8 0, i8* %a1ptr 22 %a2ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 2 23 store i8 0, i8* %a2ptr 24 %aiptr = bitcast [3 x i8]* %a to i24* 25 %ai = load i24, i24* %aiptr 26 ; CHECK-NOT: store 27 ; CHECK-NOT: load 28 ; CHECK: %[[ext2:.*]] = zext i8 0 to i24 29 ; CHECK-NEXT: %[[mask2:.*]] = and i24 undef, -256 30 ; CHECK-NEXT: %[[insert2:.*]] = or i24 %[[mask2]], %[[ext2]] 31 ; CHECK-NEXT: %[[ext1:.*]] = zext i8 0 to i24 32 ; CHECK-NEXT: %[[shift1:.*]] = shl i24 %[[ext1]], 8 33 ; CHECK-NEXT: %[[mask1:.*]] = and i24 %[[insert2]], -65281 34 ; CHECK-NEXT: %[[insert1:.*]] = or i24 %[[mask1]], %[[shift1]] 35 ; CHECK-NEXT: %[[ext0:.*]] = zext i8 0 to i24 36 ; CHECK-NEXT: %[[shift0:.*]] = shl i24 %[[ext0]], 16 37 ; CHECK-NEXT: %[[mask0:.*]] = and i24 %[[insert1]], 65535 38 ; CHECK-NEXT: %[[insert0:.*]] = or i24 %[[mask0]], %[[shift0]] 39 40 %biptr = bitcast [3 x i8]* %b to i24* 41 store i24 %ai, i24* %biptr 42 %b0ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 0 43 %b0 = load i8, i8* %b0ptr 44 %b1ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 1 45 %b1 = load i8, i8* %b1ptr 46 %b2ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 2 47 %b2 = load i8, i8* %b2ptr 48 ; CHECK-NOT: store 49 ; CHECK-NOT: load 50 ; CHECK: %[[shift0:.*]] = lshr i24 %[[insert0]], 16 51 ; CHECK-NEXT: %[[trunc0:.*]] = trunc i24 %[[shift0]] to i8 52 ; CHECK-NEXT: %[[shift1:.*]] = lshr i24 %[[insert0]], 8 53 ; CHECK-NEXT: %[[trunc1:.*]] = trunc i24 %[[shift1]] to i8 54 ; CHECK-NEXT: %[[trunc2:.*]] = trunc i24 %[[insert0]] to i8 55 56 %bsum0 = add i8 %b0, %b1 57 %bsum1 = add i8 %bsum0, %b2 58 ret i8 %bsum1 59 ; CHECK: %[[sum0:.*]] = add i8 %[[trunc0]], %[[trunc1]] 60 ; CHECK-NEXT: %[[sum1:.*]] = add i8 %[[sum0]], %[[trunc2]] 61 ; CHECK-NEXT: ret i8 %[[sum1]] 62 } 63 64 define i64 @test2() { 65 ; Test for various mixed sizes of integer loads and stores all getting 66 ; promoted. 67 ; 68 ; CHECK-LABEL: @test2( 69 70 entry: 71 %a = alloca [7 x i8] 72 ; CHECK-NOT: alloca 73 74 %a0ptr = getelementptr [7 x i8], [7 x i8]* %a, i64 0, i32 0 75 %a1ptr = getelementptr [7 x i8], [7 x i8]* %a, i64 0, i32 1 76 %a2ptr = getelementptr [7 x i8], [7 x i8]* %a, i64 0, i32 2 77 %a3ptr = getelementptr [7 x i8], [7 x i8]* %a, i64 0, i32 3 78 79 ; CHECK-NOT: store 80 ; CHECK-NOT: load 81 82 %a0i16ptr = bitcast i8* %a0ptr to i16* 83 store i16 1, i16* %a0i16ptr 84 85 store i8 1, i8* %a2ptr 86 ; CHECK: %[[mask1:.*]] = and i40 undef, 4294967295 87 ; CHECK-NEXT: %[[insert1:.*]] = or i40 %[[mask1]], 4294967296 88 89 %a3i24ptr = bitcast i8* %a3ptr to i24* 90 store i24 1, i24* %a3i24ptr 91 ; CHECK-NEXT: %[[mask2:.*]] = and i40 %[[insert1]], -4294967041 92 ; CHECK-NEXT: %[[insert2:.*]] = or i40 %[[mask2]], 256 93 94 %a2i40ptr = bitcast i8* %a2ptr to i40* 95 store i40 1, i40* %a2i40ptr 96 ; CHECK-NEXT: %[[ext3:.*]] = zext i40 1 to i56 97 ; CHECK-NEXT: %[[mask3:.*]] = and i56 undef, -1099511627776 98 ; CHECK-NEXT: %[[insert3:.*]] = or i56 %[[mask3]], %[[ext3]] 99 100 ; CHECK-NOT: store 101 ; CHECK-NOT: load 102 103 %aiptr = bitcast [7 x i8]* %a to i56* 104 %ai = load i56, i56* %aiptr 105 %ret = zext i56 %ai to i64 106 ret i64 %ret 107 ; CHECK-NEXT: %[[ext4:.*]] = zext i16 1 to i56 108 ; CHECK-NEXT: %[[shift4:.*]] = shl i56 %[[ext4]], 40 109 ; CHECK-NEXT: %[[mask4:.*]] = and i56 %[[insert3]], 1099511627775 110 ; CHECK-NEXT: %[[insert4:.*]] = or i56 %[[mask4]], %[[shift4]] 111 ; CHECK-NEXT: %[[ret:.*]] = zext i56 %[[insert4]] to i64 112 ; CHECK-NEXT: ret i64 %[[ret]] 113 } 114 115 define i64 @PR14132(i1 %flag) { 116 ; CHECK-LABEL: @PR14132( 117 ; Here we form a PHI-node by promoting the pointer alloca first, and then in 118 ; order to promote the other two allocas, we speculate the load of the 119 ; now-phi-node-pointer. In doing so we end up loading a 64-bit value from an i8 120 ; alloca. While this is a bit dubious, we were asserting on trying to 121 ; rewrite it. The trick is that the code using the value may carefully take 122 ; steps to only use the not-undef bits, and so we need to at least loosely 123 ; support this. This test is particularly interesting because how we handle 124 ; a load of an i64 from an i8 alloca is dependent on endianness. 125 entry: 126 %a = alloca i64, align 8 127 %b = alloca i8, align 8 128 %ptr = alloca i64*, align 8 129 ; CHECK-NOT: alloca 130 131 %ptr.cast = bitcast i64** %ptr to i8** 132 store i64 0, i64* %a 133 store i8 1, i8* %b 134 store i64* %a, i64** %ptr 135 br i1 %flag, label %if.then, label %if.end 136 137 if.then: 138 store i8* %b, i8** %ptr.cast 139 br label %if.end 140 ; CHECK-NOT: store 141 ; CHECK: %[[ext:.*]] = zext i8 1 to i64 142 ; CHECK: %[[shift:.*]] = shl i64 %[[ext]], 56 143 144 if.end: 145 %tmp = load i64*, i64** %ptr 146 %result = load i64, i64* %tmp 147 ; CHECK-NOT: load 148 ; CHECK: %[[result:.*]] = phi i64 [ %[[shift]], %if.then ], [ 0, %entry ] 149 150 ret i64 %result 151 ; CHECK-NEXT: ret i64 %[[result]] 152 } 153 154 declare void @f(i64 %x, i32 %y) 155 156 define void @test3() { 157 ; CHECK-LABEL: @test3( 158 ; 159 ; This is a test that specifically exercises the big-endian lowering because it 160 ; ends up splitting a 64-bit integer into two smaller integers and has a number 161 ; of tricky aspects (the i24 type) that make that hard. Historically, SROA 162 ; would miscompile this by either dropping a most significant byte or least 163 ; significant byte due to shrinking the [4,8) slice to an i24, or by failing to 164 ; move the bytes around correctly. 165 ; 166 ; The magical number 34494054408 is used because it has bits set in various 167 ; bytes so that it is clear if those bytes fail to be propagated. 168 ; 169 ; If you're debugging this, rather than using the direct magical numbers, run 170 ; the IR through '-sroa -instcombine'. With '-instcombine' these will be 171 ; constant folded, and if the i64 doesn't round-trip correctly, you've found 172 ; a bug! 173 ; 174 entry: 175 %a = alloca { i32, i24 }, align 4 176 ; CHECK-NOT: alloca 177 178 %tmp0 = bitcast { i32, i24 }* %a to i64* 179 store i64 34494054408, i64* %tmp0 180 %tmp1 = load i64, i64* %tmp0, align 4 181 %tmp2 = bitcast { i32, i24 }* %a to i32* 182 %tmp3 = load i32, i32* %tmp2, align 4 183 ; CHECK: %[[HI_EXT:.*]] = zext i32 134316040 to i64 184 ; CHECK: %[[HI_INPUT:.*]] = and i64 undef, -4294967296 185 ; CHECK: %[[HI_MERGE:.*]] = or i64 %[[HI_INPUT]], %[[HI_EXT]] 186 ; CHECK: %[[LO_EXT:.*]] = zext i32 8 to i64 187 ; CHECK: %[[LO_SHL:.*]] = shl i64 %[[LO_EXT]], 32 188 ; CHECK: %[[LO_INPUT:.*]] = and i64 %[[HI_MERGE]], 4294967295 189 ; CHECK: %[[LO_MERGE:.*]] = or i64 %[[LO_INPUT]], %[[LO_SHL]] 190 191 call void @f(i64 %tmp1, i32 %tmp3) 192 ; CHECK: call void @f(i64 %[[LO_MERGE]], i32 8) 193 ret void 194 ; CHECK: ret void 195 } 196 197 define void @test4() { 198 ; CHECK-LABEL: @test4 199 ; 200 ; Much like @test3, this is specifically testing big-endian management of data. 201 ; Also similarly, it uses constants with particular bits set to help track 202 ; whether values are corrupted, and can be easily evaluated by running through 203 ; -instcombine to see that the i64 round-trips. 204 ; 205 entry: 206 %a = alloca { i32, i24 }, align 4 207 %a2 = alloca i64, align 4 208 ; CHECK-NOT: alloca 209 210 store i64 34494054408, i64* %a2 211 %tmp0 = bitcast { i32, i24 }* %a to i8* 212 %tmp1 = bitcast i64* %a2 to i8* 213 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp0, i8* %tmp1, i64 8, i32 4, i1 false) 214 ; CHECK: %[[LO_SHR:.*]] = lshr i64 34494054408, 32 215 ; CHECK: %[[LO_START:.*]] = trunc i64 %[[LO_SHR]] to i32 216 ; CHECK: %[[HI_START:.*]] = trunc i64 34494054408 to i32 217 218 %tmp2 = bitcast { i32, i24 }* %a to i64* 219 %tmp3 = load i64, i64* %tmp2, align 4 220 %tmp4 = bitcast { i32, i24 }* %a to i32* 221 %tmp5 = load i32, i32* %tmp4, align 4 222 ; CHECK: %[[HI_EXT:.*]] = zext i32 %[[HI_START]] to i64 223 ; CHECK: %[[HI_INPUT:.*]] = and i64 undef, -4294967296 224 ; CHECK: %[[HI_MERGE:.*]] = or i64 %[[HI_INPUT]], %[[HI_EXT]] 225 ; CHECK: %[[LO_EXT:.*]] = zext i32 %[[LO_START]] to i64 226 ; CHECK: %[[LO_SHL:.*]] = shl i64 %[[LO_EXT]], 32 227 ; CHECK: %[[LO_INPUT:.*]] = and i64 %[[HI_MERGE]], 4294967295 228 ; CHECK: %[[LO_MERGE:.*]] = or i64 %[[LO_INPUT]], %[[LO_SHL]] 229 230 call void @f(i64 %tmp3, i32 %tmp5) 231 ; CHECK: call void @f(i64 %[[LO_MERGE]], i32 %[[LO_START]]) 232 ret void 233 ; CHECK: ret void 234 } 235 236 declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i32, i1) 237