1 ; RUN: opt < %s -S -indvars -loop-unroll -verify-loop-info | FileCheck %s 2 ; 3 ; Unit tests for loop unrolling using ScalarEvolution to compute trip counts. 4 ; 5 ; Indvars is run first to generate an "old" SCEV result. Some unit 6 ; tests may check that SCEV is properly invalidated between passes. 7 8 ; Completely unroll loops without a canonical IV. 9 ; 10 ; CHECK-LABEL: @sansCanonical( 11 ; CHECK-NOT: phi 12 ; CHECK-NOT: icmp 13 ; CHECK: ret 14 define i32 @sansCanonical(i32* %base) nounwind { 15 entry: 16 br label %while.body 17 18 while.body: 19 %iv = phi i64 [ 10, %entry ], [ %iv.next, %while.body ] 20 %sum = phi i32 [ 0, %entry ], [ %sum.next, %while.body ] 21 %iv.next = add i64 %iv, -1 22 %adr = getelementptr inbounds i32, i32* %base, i64 %iv.next 23 %tmp = load i32, i32* %adr, align 8 24 %sum.next = add i32 %sum, %tmp 25 %iv.narrow = trunc i64 %iv.next to i32 26 %cmp.i65 = icmp sgt i32 %iv.narrow, 0 27 br i1 %cmp.i65, label %while.body, label %exit 28 29 exit: 30 ret i32 %sum 31 } 32 33 ; SCEV unrolling properly handles loops with multiple exits. In this 34 ; case, the computed trip count based on a canonical IV is *not* for a 35 ; latch block. Canonical unrolling incorrectly unrolls it, but SCEV 36 ; unrolling does not. 37 ; 38 ; CHECK-LABEL: @earlyLoopTest( 39 ; CHECK: tail: 40 ; CHECK-NOT: br 41 ; CHECK: br i1 %cmp2, label %loop, label %exit2 42 define i64 @earlyLoopTest(i64* %base) nounwind { 43 entry: 44 br label %loop 45 46 loop: 47 %iv = phi i64 [ 0, %entry ], [ %inc, %tail ] 48 %s = phi i64 [ 0, %entry ], [ %s.next, %tail ] 49 %adr = getelementptr i64, i64* %base, i64 %iv 50 %val = load i64, i64* %adr 51 %s.next = add i64 %s, %val 52 %inc = add i64 %iv, 1 53 %cmp = icmp ne i64 %inc, 4 54 br i1 %cmp, label %tail, label %exit1 55 56 tail: 57 %cmp2 = icmp ne i64 %val, 0 58 br i1 %cmp2, label %loop, label %exit2 59 60 exit1: 61 ret i64 %s 62 63 exit2: 64 ret i64 %s.next 65 } 66 67 ; SCEV properly unrolls multi-exit loops. 68 ; 69 ; CHECK-LABEL: @multiExit( 70 ; CHECK: getelementptr i32, i32* %base, i32 10 71 ; CHECK-NEXT: load i32, i32* 72 ; CHECK: br i1 false, label %l2.10, label %exit1 73 ; CHECK: l2.10: 74 ; CHECK-NOT: br 75 ; CHECK: ret i32 76 define i32 @multiExit(i32* %base) nounwind { 77 entry: 78 br label %l1 79 l1: 80 %iv1 = phi i32 [ 0, %entry ], [ %inc1, %l2 ] 81 %iv2 = phi i32 [ 0, %entry ], [ %inc2, %l2 ] 82 %inc1 = add i32 %iv1, 1 83 %inc2 = add i32 %iv2, 1 84 %adr = getelementptr i32, i32* %base, i32 %iv1 85 %val = load i32, i32* %adr 86 %cmp1 = icmp slt i32 %iv1, 5 87 br i1 %cmp1, label %l2, label %exit1 88 l2: 89 %cmp2 = icmp slt i32 %iv2, 10 90 br i1 %cmp2, label %l1, label %exit2 91 exit1: 92 ret i32 1 93 exit2: 94 ret i32 %val 95 } 96 97 98 ; SCEV should not unroll a multi-exit loops unless the latch block has 99 ; a known trip count, regardless of the early exit trip counts. The 100 ; LoopUnroll utility uses this assumption to optimize the latch 101 ; block's branch. 102 ; 103 ; CHECK-LABEL: @multiExitIncomplete( 104 ; CHECK: l3: 105 ; CHECK-NOT: br 106 ; CHECK: br i1 %cmp3, label %l1, label %exit3 107 define i32 @multiExitIncomplete(i32* %base) nounwind { 108 entry: 109 br label %l1 110 l1: 111 %iv1 = phi i32 [ 0, %entry ], [ %inc1, %l3 ] 112 %iv2 = phi i32 [ 0, %entry ], [ %inc2, %l3 ] 113 %inc1 = add i32 %iv1, 1 114 %inc2 = add i32 %iv2, 1 115 %adr = getelementptr i32, i32* %base, i32 %iv1 116 %val = load i32, i32* %adr 117 %cmp1 = icmp slt i32 %iv1, 5 118 br i1 %cmp1, label %l2, label %exit1 119 l2: 120 %cmp2 = icmp slt i32 %iv2, 10 121 br i1 %cmp2, label %l3, label %exit2 122 l3: 123 %cmp3 = icmp ne i32 %val, 0 124 br i1 %cmp3, label %l1, label %exit3 125 126 exit1: 127 ret i32 1 128 exit2: 129 ret i32 2 130 exit3: 131 ret i32 3 132 } 133 134 ; When loop unroll merges a loop exit with one of its parent loop's 135 ; exits, SCEV must forget its ExitNotTaken info. 136 ; 137 ; CHECK-LABEL: @nestedUnroll( 138 ; CHECK-NOT: br i1 139 ; CHECK: for.body87: 140 define void @nestedUnroll() nounwind { 141 entry: 142 br label %for.inc 143 144 for.inc: 145 br i1 false, label %for.inc, label %for.body38.preheader 146 147 for.body38.preheader: 148 br label %for.body38 149 150 for.body38: 151 %i.113 = phi i32 [ %inc76, %for.inc74 ], [ 0, %for.body38.preheader ] 152 %mul48 = mul nsw i32 %i.113, 6 153 br label %for.body43 154 155 for.body43: 156 %j.011 = phi i32 [ 0, %for.body38 ], [ %inc72, %for.body43 ] 157 %add49 = add nsw i32 %j.011, %mul48 158 %sh_prom50 = zext i32 %add49 to i64 159 %inc72 = add nsw i32 %j.011, 1 160 br i1 false, label %for.body43, label %for.inc74 161 162 for.inc74: 163 %inc76 = add nsw i32 %i.113, 1 164 br i1 false, label %for.body38, label %for.body87.preheader 165 166 for.body87.preheader: 167 br label %for.body87 168 169 for.body87: 170 br label %for.body87 171 } 172 173 ; PR16130: clang produces incorrect code with loop/expression at -O2 174 ; rdar:14036816 loop-unroll makes assumptions about undefined behavior 175 ; 176 ; The loop latch is assumed to exit after the first iteration because 177 ; of the induction variable's NSW flag. However, the loop latch's 178 ; equality test is skipped and the loop exits after the second 179 ; iteration via the early exit. So loop unrolling cannot assume that 180 ; the loop latch's exit count of zero is an upper bound on the number 181 ; of iterations. 182 ; 183 ; CHECK-LABEL: @nsw_latch( 184 ; CHECK: for.body: 185 ; CHECK: %b.03 = phi i32 [ 0, %entry ], [ %add, %for.cond ] 186 ; CHECK: return: 187 ; CHECK: %b.03.lcssa = phi i32 [ %b.03, %for.body ], [ %b.03, %for.cond ] 188 define void @nsw_latch(i32* %a) nounwind { 189 entry: 190 br label %for.body 191 192 for.body: ; preds = %for.cond, %entry 193 %b.03 = phi i32 [ 0, %entry ], [ %add, %for.cond ] 194 %tobool = icmp eq i32 %b.03, 0 195 %add = add nsw i32 %b.03, 8 196 br i1 %tobool, label %for.cond, label %return 197 198 for.cond: ; preds = %for.body 199 %cmp = icmp eq i32 %add, 13 200 br i1 %cmp, label %return, label %for.body 201 202 return: ; preds = %for.body, %for.cond 203 %b.03.lcssa = phi i32 [ %b.03, %for.body ], [ %b.03, %for.cond ] 204 %retval.0 = phi i32 [ 1, %for.body ], [ 0, %for.cond ] 205 store i32 %b.03.lcssa, i32* %a, align 4 206 ret void 207 } 208