Home | History | Annotate | Download | only in SROA
      1 ; RUN: opt < %s -sroa -S | FileCheck %s
      2 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
      3 
      4 declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
      5 
      6 define void @test1({ i8, i8 }* %a, { i8, i8 }* %b) {
      7 ; CHECK-LABEL: @test1(
      8 ; CHECK: %[[gep_a0:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %a, i64 0, i32 0
      9 ; CHECK: %[[a0:.*]] = load i8, i8* %[[gep_a0]], align 16
     10 ; CHECK: %[[gep_a1:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %a, i64 0, i32 1
     11 ; CHECK: %[[a1:.*]] = load i8, i8* %[[gep_a1]], align 1
     12 ; CHECK: %[[gep_b0:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %b, i64 0, i32 0
     13 ; CHECK: store i8 %[[a0]], i8* %[[gep_b0]], align 16
     14 ; CHECK: %[[gep_b1:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %b, i64 0, i32 1
     15 ; CHECK: store i8 %[[a1]], i8* %[[gep_b1]], align 1
     16 ; CHECK: ret void
     17 
     18 entry:
     19   %alloca = alloca { i8, i8 }, align 16
     20   %gep_a = getelementptr { i8, i8 }, { i8, i8 }* %a, i32 0, i32 0
     21   %gep_alloca = getelementptr { i8, i8 }, { i8, i8 }* %alloca, i32 0, i32 0
     22   %gep_b = getelementptr { i8, i8 }, { i8, i8 }* %b, i32 0, i32 0
     23 
     24   store i8 420, i8* %gep_alloca, align 16
     25 
     26   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %gep_alloca, i8* %gep_a, i32 2, i32 16, i1 false)
     27   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %gep_b, i8* %gep_alloca, i32 2, i32 16, i1 false)
     28   ret void
     29 }
     30 
     31 define void @test2() {
     32 ; CHECK-LABEL: @test2(
     33 ; CHECK: alloca i16
     34 ; CHECK: load i8, i8* %{{.*}}
     35 ; CHECK: store i8 42, i8* %{{.*}}
     36 ; CHECK: ret void
     37 
     38 entry:
     39   %a = alloca { i8, i8, i8, i8 }, align 2
     40   %gep1 = getelementptr { i8, i8, i8, i8 }, { i8, i8, i8, i8 }* %a, i32 0, i32 1
     41   %cast1 = bitcast i8* %gep1 to i16*
     42   store volatile i16 0, i16* %cast1
     43   %gep2 = getelementptr { i8, i8, i8, i8 }, { i8, i8, i8, i8 }* %a, i32 0, i32 2
     44   %result = load i8, i8* %gep2
     45   store i8 42, i8* %gep2
     46   ret void
     47 }
     48 
     49 define void @PR13920(<2 x i64>* %a, i16* %b) {
     50 ; Test that alignments on memcpy intrinsics get propagated to loads and stores.
     51 ; CHECK-LABEL: @PR13920(
     52 ; CHECK: load <2 x i64>, <2 x i64>* %a, align 2
     53 ; CHECK: store <2 x i64> {{.*}}, <2 x i64>* {{.*}}, align 2
     54 ; CHECK: ret void
     55 
     56 entry:
     57   %aa = alloca <2 x i64>, align 16
     58   %aptr = bitcast <2 x i64>* %a to i8*
     59   %aaptr = bitcast <2 x i64>* %aa to i8*
     60   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %aaptr, i8* %aptr, i32 16, i32 2, i1 false)
     61   %bptr = bitcast i16* %b to i8*
     62   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %bptr, i8* %aaptr, i32 16, i32 2, i1 false)
     63   ret void
     64 }
     65 
     66 define void @test3(i8* %x) {
     67 ; Test that when we promote an alloca to a type with lower ABI alignment, we
     68 ; provide the needed explicit alignment that code using the alloca may be
     69 ; expecting. However, also check that any offset within an alloca can in turn
     70 ; reduce the alignment.
     71 ; CHECK-LABEL: @test3(
     72 ; CHECK: alloca [22 x i8], align 8
     73 ; CHECK: alloca [18 x i8], align 2
     74 ; CHECK: ret void
     75 
     76 entry:
     77   %a = alloca { i8*, i8*, i8* }
     78   %b = alloca { i8*, i8*, i8* }
     79   %a_raw = bitcast { i8*, i8*, i8* }* %a to i8*
     80   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a_raw, i8* %x, i32 22, i32 8, i1 false)
     81   %b_raw = bitcast { i8*, i8*, i8* }* %b to i8*
     82   %b_gep = getelementptr i8, i8* %b_raw, i32 6
     83   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b_gep, i8* %x, i32 18, i32 2, i1 false)
     84   ret void
     85 }
     86 
     87 define void @test5() {
     88 ; Test that we preserve underaligned loads and stores when splitting. The use
     89 ; of volatile in this test case is just to force the loads and stores to not be
     90 ; split or promoted out of existence.
     91 ;
     92 ; CHECK-LABEL: @test5(
     93 ; CHECK: alloca [9 x i8]
     94 ; CHECK: alloca [9 x i8]
     95 ; CHECK: store volatile double 0.0{{.*}}, double* %{{.*}}, align 1
     96 ; CHECK: load volatile i16, i16* %{{.*}}, align 1
     97 ; CHECK: load double, double* %{{.*}}, align 1
     98 ; CHECK: store volatile double %{{.*}}, double* %{{.*}}, align 1
     99 ; CHECK: load volatile i16, i16* %{{.*}}, align 1
    100 ; CHECK: ret void
    101 
    102 entry:
    103   %a = alloca [18 x i8]
    104   %raw1 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 0
    105   %ptr1 = bitcast i8* %raw1 to double*
    106   store volatile double 0.0, double* %ptr1, align 1
    107   %weird_gep1 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 7
    108   %weird_cast1 = bitcast i8* %weird_gep1 to i16*
    109   %weird_load1 = load volatile i16, i16* %weird_cast1, align 1
    110 
    111   %raw2 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 9
    112   %ptr2 = bitcast i8* %raw2 to double*
    113   %d1 = load double, double* %ptr1, align 1
    114   store volatile double %d1, double* %ptr2, align 1
    115   %weird_gep2 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 16
    116   %weird_cast2 = bitcast i8* %weird_gep2 to i16*
    117   %weird_load2 = load volatile i16, i16* %weird_cast2, align 1
    118 
    119   ret void
    120 }
    121 
    122 define void @test6() {
    123 ; Test that we promote alignment when the underlying alloca switches to one
    124 ; that innately provides it.
    125 ; CHECK-LABEL: @test6(
    126 ; CHECK: alloca double
    127 ; CHECK: alloca double
    128 ; CHECK-NOT: align
    129 ; CHECK: ret void
    130 
    131 entry:
    132   %a = alloca [16 x i8]
    133   %raw1 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 0
    134   %ptr1 = bitcast i8* %raw1 to double*
    135   store volatile double 0.0, double* %ptr1, align 1
    136 
    137   %raw2 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 8
    138   %ptr2 = bitcast i8* %raw2 to double*
    139   %val = load double, double* %ptr1, align 1
    140   store volatile double %val, double* %ptr2, align 1
    141 
    142   ret void
    143 }
    144 
    145 define void @test7(i8* %out) {
    146 ; Test that we properly compute the destination alignment when rewriting
    147 ; memcpys as direct loads or stores.
    148 ; CHECK-LABEL: @test7(
    149 ; CHECK-NOT: alloca
    150 
    151 entry:
    152   %a = alloca [16 x i8]
    153   %raw1 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 0
    154   %ptr1 = bitcast i8* %raw1 to double*
    155   %raw2 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 8
    156   %ptr2 = bitcast i8* %raw2 to double*
    157 
    158   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %raw1, i8* %out, i32 16, i32 0, i1 false)
    159 ; CHECK: %[[val2:.*]] = load double, double* %{{.*}}, align 1
    160 ; CHECK: %[[val1:.*]] = load double, double* %{{.*}}, align 1
    161 
    162   %val1 = load double, double* %ptr2, align 1
    163   %val2 = load double, double* %ptr1, align 1
    164 
    165   store double %val1, double* %ptr1, align 1
    166   store double %val2, double* %ptr2, align 1
    167 
    168   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %out, i8* %raw1, i32 16, i32 0, i1 false)
    169 ; CHECK: store double %[[val1]], double* %{{.*}}, align 1
    170 ; CHECK: store double %[[val2]], double* %{{.*}}, align 1
    171 
    172   ret void
    173 ; CHECK: ret void
    174 }
    175