Home | History | Annotate | Download | only in X86
      1 ; RUN: llc -mtriple=i686-linux < %s | FileCheck %s
      2 
      3 declare void @error(i32 %i, i32 %a, i32 %b)
      4 
      5 define i32 @test_ifchains(i32 %i, i32* %a, i32 %b) {
      6 ; Test a chain of ifs, where the block guarded by the if is error handling code
      7 ; that is not expected to run.
      8 ; CHECK: test_ifchains:
      9 ; CHECK: %entry
     10 ; CHECK-NOT: .align
     11 ; CHECK: %else1
     12 ; CHECK-NOT: .align
     13 ; CHECK: %else2
     14 ; CHECK-NOT: .align
     15 ; CHECK: %else3
     16 ; CHECK-NOT: .align
     17 ; CHECK: %else4
     18 ; CHECK-NOT: .align
     19 ; CHECK: %exit
     20 ; CHECK: %then1
     21 ; CHECK: %then2
     22 ; CHECK: %then3
     23 ; CHECK: %then4
     24 ; CHECK: %then5
     25 
     26 entry:
     27   %gep1 = getelementptr i32* %a, i32 1
     28   %val1 = load i32* %gep1
     29   %cond1 = icmp ugt i32 %val1, 1
     30   br i1 %cond1, label %then1, label %else1, !prof !0
     31 
     32 then1:
     33   call void @error(i32 %i, i32 1, i32 %b)
     34   br label %else1
     35 
     36 else1:
     37   %gep2 = getelementptr i32* %a, i32 2
     38   %val2 = load i32* %gep2
     39   %cond2 = icmp ugt i32 %val2, 2
     40   br i1 %cond2, label %then2, label %else2, !prof !0
     41 
     42 then2:
     43   call void @error(i32 %i, i32 1, i32 %b)
     44   br label %else2
     45 
     46 else2:
     47   %gep3 = getelementptr i32* %a, i32 3
     48   %val3 = load i32* %gep3
     49   %cond3 = icmp ugt i32 %val3, 3
     50   br i1 %cond3, label %then3, label %else3, !prof !0
     51 
     52 then3:
     53   call void @error(i32 %i, i32 1, i32 %b)
     54   br label %else3
     55 
     56 else3:
     57   %gep4 = getelementptr i32* %a, i32 4
     58   %val4 = load i32* %gep4
     59   %cond4 = icmp ugt i32 %val4, 4
     60   br i1 %cond4, label %then4, label %else4, !prof !0
     61 
     62 then4:
     63   call void @error(i32 %i, i32 1, i32 %b)
     64   br label %else4
     65 
     66 else4:
     67   %gep5 = getelementptr i32* %a, i32 3
     68   %val5 = load i32* %gep5
     69   %cond5 = icmp ugt i32 %val5, 3
     70   br i1 %cond5, label %then5, label %exit, !prof !0
     71 
     72 then5:
     73   call void @error(i32 %i, i32 1, i32 %b)
     74   br label %exit
     75 
     76 exit:
     77   ret i32 %b
     78 }
     79 
     80 define i32 @test_loop_cold_blocks(i32 %i, i32* %a) {
     81 ; Check that we sink cold loop blocks after the hot loop body.
     82 ; CHECK: test_loop_cold_blocks:
     83 ; CHECK: %entry
     84 ; CHECK-NOT: .align
     85 ; CHECK: %unlikely1
     86 ; CHECK-NOT: .align
     87 ; CHECK: %unlikely2
     88 ; CHECK: .align
     89 ; CHECK: %body1
     90 ; CHECK: %body2
     91 ; CHECK: %body3
     92 ; CHECK: %exit
     93 
     94 entry:
     95   br label %body1
     96 
     97 body1:
     98   %iv = phi i32 [ 0, %entry ], [ %next, %body3 ]
     99   %base = phi i32 [ 0, %entry ], [ %sum, %body3 ]
    100   %unlikelycond1 = icmp slt i32 %base, 42
    101   br i1 %unlikelycond1, label %unlikely1, label %body2, !prof !0
    102 
    103 unlikely1:
    104   call void @error(i32 %i, i32 1, i32 %base)
    105   br label %body2
    106 
    107 body2:
    108   %unlikelycond2 = icmp sgt i32 %base, 21
    109   br i1 %unlikelycond2, label %unlikely2, label %body3, !prof !0
    110 
    111 unlikely2:
    112   call void @error(i32 %i, i32 2, i32 %base)
    113   br label %body3
    114 
    115 body3:
    116   %arrayidx = getelementptr inbounds i32* %a, i32 %iv
    117   %0 = load i32* %arrayidx
    118   %sum = add nsw i32 %0, %base
    119   %next = add i32 %iv, 1
    120   %exitcond = icmp eq i32 %next, %i
    121   br i1 %exitcond, label %exit, label %body1
    122 
    123 exit:
    124   ret i32 %sum
    125 }
    126 
    127 !0 = metadata !{metadata !"branch_weights", i32 4, i32 64}
    128 
    129 define i32 @test_loop_early_exits(i32 %i, i32* %a) {
    130 ; Check that we sink early exit blocks out of loop bodies.
    131 ; CHECK: test_loop_early_exits:
    132 ; CHECK: %entry
    133 ; CHECK: %body1
    134 ; CHECK: %body2
    135 ; CHECK: %body3
    136 ; CHECK: %body4
    137 ; CHECK: %exit
    138 ; CHECK: %bail1
    139 ; CHECK: %bail2
    140 ; CHECK: %bail3
    141 
    142 entry:
    143   br label %body1
    144 
    145 body1:
    146   %iv = phi i32 [ 0, %entry ], [ %next, %body4 ]
    147   %base = phi i32 [ 0, %entry ], [ %sum, %body4 ]
    148   %bailcond1 = icmp eq i32 %base, 42
    149   br i1 %bailcond1, label %bail1, label %body2
    150 
    151 bail1:
    152   ret i32 -1
    153 
    154 body2:
    155   %bailcond2 = icmp eq i32 %base, 43
    156   br i1 %bailcond2, label %bail2, label %body3
    157 
    158 bail2:
    159   ret i32 -2
    160 
    161 body3:
    162   %bailcond3 = icmp eq i32 %base, 44
    163   br i1 %bailcond3, label %bail3, label %body4
    164 
    165 bail3:
    166   ret i32 -3
    167 
    168 body4:
    169   %arrayidx = getelementptr inbounds i32* %a, i32 %iv
    170   %0 = load i32* %arrayidx
    171   %sum = add nsw i32 %0, %base
    172   %next = add i32 %iv, 1
    173   %exitcond = icmp eq i32 %next, %i
    174   br i1 %exitcond, label %exit, label %body1
    175 
    176 exit:
    177   ret i32 %sum
    178 }
    179 
    180 define i32 @test_loop_rotate(i32 %i, i32* %a) {
    181 ; Check that we rotate conditional exits from the loop to the bottom of the
    182 ; loop, eliminating unconditional branches to the top.
    183 ; CHECK: test_loop_rotate:
    184 ; CHECK: %entry
    185 ; CHECK: %body1
    186 ; CHECK: %body0
    187 ; CHECK: %exit
    188 
    189 entry:
    190   br label %body0
    191 
    192 body0:
    193   %iv = phi i32 [ 0, %entry ], [ %next, %body1 ]
    194   %base = phi i32 [ 0, %entry ], [ %sum, %body1 ]
    195   %next = add i32 %iv, 1
    196   %exitcond = icmp eq i32 %next, %i
    197   br i1 %exitcond, label %exit, label %body1
    198 
    199 body1:
    200   %arrayidx = getelementptr inbounds i32* %a, i32 %iv
    201   %0 = load i32* %arrayidx
    202   %sum = add nsw i32 %0, %base
    203   %bailcond1 = icmp eq i32 %sum, 42
    204   br label %body0
    205 
    206 exit:
    207   ret i32 %base
    208 }
    209 
    210 define i32 @test_no_loop_rotate(i32 %i, i32* %a) {
    211 ; Check that we don't try to rotate a loop which is already laid out with
    212 ; fallthrough opportunities into the top and out of the bottom.
    213 ; CHECK: test_no_loop_rotate:
    214 ; CHECK: %entry
    215 ; CHECK: %body0
    216 ; CHECK: %body1
    217 ; CHECK: %exit
    218 
    219 entry:
    220   br label %body0
    221 
    222 body0:
    223   %iv = phi i32 [ 0, %entry ], [ %next, %body1 ]
    224   %base = phi i32 [ 0, %entry ], [ %sum, %body1 ]
    225   %arrayidx = getelementptr inbounds i32* %a, i32 %iv
    226   %0 = load i32* %arrayidx
    227   %sum = add nsw i32 %0, %base
    228   %bailcond1 = icmp eq i32 %sum, 42
    229   br i1 %bailcond1, label %exit, label %body1
    230 
    231 body1:
    232   %next = add i32 %iv, 1
    233   %exitcond = icmp eq i32 %next, %i
    234   br i1 %exitcond, label %exit, label %body0
    235 
    236 exit:
    237   ret i32 %base
    238 }
    239 
    240 define void @test_loop_rotate_reversed_blocks() {
    241 ; This test case (greatly reduced from an Olden bencmark) ensures that the loop
    242 ; rotate implementation doesn't assume that loops are laid out in a particular
    243 ; order. The first loop will get split into two basic blocks, with the loop
    244 ; header coming after the loop latch.
    245 ;
    246 ; CHECK: test_loop_rotate_reversed_blocks
    247 ; CHECK: %entry
    248 ; Look for a jump into the middle of the loop, and no branches mid-way.
    249 ; CHECK: jmp
    250 ; CHECK: %loop1
    251 ; CHECK-NOT: j{{\w*}} .LBB{{.*}}
    252 ; CHECK: %loop1
    253 ; CHECK: je
    254 
    255 entry:
    256   %cond1 = load volatile i1* undef
    257   br i1 %cond1, label %loop2.preheader, label %loop1
    258 
    259 loop1:
    260   call i32 @f()
    261   %cond2 = load volatile i1* undef
    262   br i1 %cond2, label %loop2.preheader, label %loop1
    263 
    264 loop2.preheader:
    265   call i32 @f()
    266   %cond3 = load volatile i1* undef
    267   br i1 %cond3, label %exit, label %loop2
    268 
    269 loop2:
    270   call i32 @f()
    271   %cond4 = load volatile i1* undef
    272   br i1 %cond4, label %exit, label %loop2
    273 
    274 exit:
    275   ret void
    276 }
    277 
    278 define i32 @test_loop_align(i32 %i, i32* %a) {
    279 ; Check that we provide basic loop body alignment with the block placement
    280 ; pass.
    281 ; CHECK: test_loop_align:
    282 ; CHECK: %entry
    283 ; CHECK: .align [[ALIGN:[0-9]+]],
    284 ; CHECK-NEXT: %body
    285 ; CHECK: %exit
    286 
    287 entry:
    288   br label %body
    289 
    290 body:
    291   %iv = phi i32 [ 0, %entry ], [ %next, %body ]
    292   %base = phi i32 [ 0, %entry ], [ %sum, %body ]
    293   %arrayidx = getelementptr inbounds i32* %a, i32 %iv
    294   %0 = load i32* %arrayidx
    295   %sum = add nsw i32 %0, %base
    296   %next = add i32 %iv, 1
    297   %exitcond = icmp eq i32 %next, %i
    298   br i1 %exitcond, label %exit, label %body
    299 
    300 exit:
    301   ret i32 %sum
    302 }
    303 
    304 define i32 @test_nested_loop_align(i32 %i, i32* %a, i32* %b) {
    305 ; Check that we provide nested loop body alignment.
    306 ; CHECK: test_nested_loop_align:
    307 ; CHECK: %entry
    308 ; CHECK: .align [[ALIGN]],
    309 ; CHECK-NEXT: %loop.body.1
    310 ; CHECK: .align [[ALIGN]],
    311 ; CHECK-NEXT: %inner.loop.body
    312 ; CHECK-NOT: .align
    313 ; CHECK: %exit
    314 
    315 entry:
    316   br label %loop.body.1
    317 
    318 loop.body.1:
    319   %iv = phi i32 [ 0, %entry ], [ %next, %loop.body.2 ]
    320   %arrayidx = getelementptr inbounds i32* %a, i32 %iv
    321   %bidx = load i32* %arrayidx
    322   br label %inner.loop.body
    323 
    324 inner.loop.body:
    325   %inner.iv = phi i32 [ 0, %loop.body.1 ], [ %inner.next, %inner.loop.body ]
    326   %base = phi i32 [ 0, %loop.body.1 ], [ %sum, %inner.loop.body ]
    327   %scaled_idx = mul i32 %bidx, %iv
    328   %inner.arrayidx = getelementptr inbounds i32* %b, i32 %scaled_idx
    329   %0 = load i32* %inner.arrayidx
    330   %sum = add nsw i32 %0, %base
    331   %inner.next = add i32 %iv, 1
    332   %inner.exitcond = icmp eq i32 %inner.next, %i
    333   br i1 %inner.exitcond, label %loop.body.2, label %inner.loop.body
    334 
    335 loop.body.2:
    336   %next = add i32 %iv, 1
    337   %exitcond = icmp eq i32 %next, %i
    338   br i1 %exitcond, label %exit, label %loop.body.1
    339 
    340 exit:
    341   ret i32 %sum
    342 }
    343 
    344 define void @unnatural_cfg1() {
    345 ; Test that we can handle a loop with an inner unnatural loop at the end of
    346 ; a function. This is a gross CFG reduced out of the single source GCC.
    347 ; CHECK: unnatural_cfg1
    348 ; CHECK: %entry
    349 ; CHECK: %loop.body1
    350 ; CHECK: %loop.body2
    351 ; CHECK: %loop.body3
    352 
    353 entry:
    354   br label %loop.header
    355 
    356 loop.header:
    357   br label %loop.body1
    358 
    359 loop.body1:
    360   br i1 undef, label %loop.body3, label %loop.body2
    361 
    362 loop.body2:
    363   %ptr = load i32** undef, align 4
    364   br label %loop.body3
    365 
    366 loop.body3:
    367   %myptr = phi i32* [ %ptr2, %loop.body5 ], [ %ptr, %loop.body2 ], [ undef, %loop.body1 ]
    368   %bcmyptr = bitcast i32* %myptr to i32*
    369   %val = load i32* %bcmyptr, align 4
    370   %comp = icmp eq i32 %val, 48
    371   br i1 %comp, label %loop.body4, label %loop.body5
    372 
    373 loop.body4:
    374   br i1 undef, label %loop.header, label %loop.body5
    375 
    376 loop.body5:
    377   %ptr2 = load i32** undef, align 4
    378   br label %loop.body3
    379 }
    380 
    381 define void @unnatural_cfg2() {
    382 ; Test that we can handle a loop with a nested natural loop *and* an unnatural
    383 ; loop. This was reduced from a crash on block placement when run over
    384 ; single-source GCC.
    385 ; CHECK: unnatural_cfg2
    386 ; CHECK: %entry
    387 ; CHECK: %loop.body1
    388 ; CHECK: %loop.body2
    389 ; CHECK: %loop.body3
    390 ; CHECK: %loop.inner1.begin
    391 ; The end block is folded with %loop.body3...
    392 ; CHECK-NOT: %loop.inner1.end
    393 ; CHECK: %loop.body4
    394 ; CHECK: %loop.inner2.begin
    395 ; The loop.inner2.end block is folded
    396 ; CHECK: %loop.header
    397 ; CHECK: %bail
    398 
    399 entry:
    400   br label %loop.header
    401 
    402 loop.header:
    403   %comp0 = icmp eq i32* undef, null
    404   br i1 %comp0, label %bail, label %loop.body1
    405 
    406 loop.body1:
    407   %val0 = load i32** undef, align 4
    408   br i1 undef, label %loop.body2, label %loop.inner1.begin
    409 
    410 loop.body2:
    411   br i1 undef, label %loop.body4, label %loop.body3
    412 
    413 loop.body3:
    414   %ptr1 = getelementptr inbounds i32* %val0, i32 0
    415   %castptr1 = bitcast i32* %ptr1 to i32**
    416   %val1 = load i32** %castptr1, align 4
    417   br label %loop.inner1.begin
    418 
    419 loop.inner1.begin:
    420   %valphi = phi i32* [ %val2, %loop.inner1.end ], [ %val1, %loop.body3 ], [ %val0, %loop.body1 ]
    421   %castval = bitcast i32* %valphi to i32*
    422   %comp1 = icmp eq i32 undef, 48
    423   br i1 %comp1, label %loop.inner1.end, label %loop.body4
    424 
    425 loop.inner1.end:
    426   %ptr2 = getelementptr inbounds i32* %valphi, i32 0
    427   %castptr2 = bitcast i32* %ptr2 to i32**
    428   %val2 = load i32** %castptr2, align 4
    429   br label %loop.inner1.begin
    430 
    431 loop.body4.dead:
    432   br label %loop.body4
    433 
    434 loop.body4:
    435   %comp2 = icmp ult i32 undef, 3
    436   br i1 %comp2, label %loop.inner2.begin, label %loop.end
    437 
    438 loop.inner2.begin:
    439   br i1 false, label %loop.end, label %loop.inner2.end
    440 
    441 loop.inner2.end:
    442   %comp3 = icmp eq i32 undef, 1769472
    443   br i1 %comp3, label %loop.end, label %loop.inner2.begin
    444 
    445 loop.end:
    446   br label %loop.header
    447 
    448 bail:
    449   unreachable
    450 }
    451 
    452 define i32 @problematic_switch() {
    453 ; This function's CFG caused overlow in the machine branch probability
    454 ; calculation, triggering asserts. Make sure we don't crash on it.
    455 ; CHECK: problematic_switch
    456 
    457 entry:
    458   switch i32 undef, label %exit [
    459     i32 879, label %bogus
    460     i32 877, label %step
    461     i32 876, label %step
    462     i32 875, label %step
    463     i32 874, label %step
    464     i32 873, label %step
    465     i32 872, label %step
    466     i32 868, label %step
    467     i32 867, label %step
    468     i32 866, label %step
    469     i32 861, label %step
    470     i32 860, label %step
    471     i32 856, label %step
    472     i32 855, label %step
    473     i32 854, label %step
    474     i32 831, label %step
    475     i32 830, label %step
    476     i32 829, label %step
    477     i32 828, label %step
    478     i32 815, label %step
    479     i32 814, label %step
    480     i32 811, label %step
    481     i32 806, label %step
    482     i32 805, label %step
    483     i32 804, label %step
    484     i32 803, label %step
    485     i32 802, label %step
    486     i32 801, label %step
    487     i32 800, label %step
    488     i32 799, label %step
    489     i32 798, label %step
    490     i32 797, label %step
    491     i32 796, label %step
    492     i32 795, label %step
    493   ]
    494 bogus:
    495   unreachable
    496 step:
    497   br label %exit
    498 exit:
    499   %merge = phi i32 [ 3, %step ], [ 6, %entry ]
    500   ret i32 %merge
    501 }
    502 
    503 define void @fpcmp_unanalyzable_branch(i1 %cond) {
    504 ; This function's CFG contains an unanalyzable branch that is likely to be
    505 ; split due to having a different high-probability predecessor.
    506 ; CHECK: fpcmp_unanalyzable_branch
    507 ; CHECK: %entry
    508 ; CHECK: %exit
    509 ; CHECK-NOT: %if.then
    510 ; CHECK-NOT: %if.end
    511 ; CHECK-NOT: jne
    512 ; CHECK-NOT: jnp
    513 ; CHECK: jne
    514 ; CHECK-NEXT: jnp
    515 ; CHECK-NEXT: %if.then
    516 
    517 entry:
    518 ; Note that this branch must be strongly biased toward
    519 ; 'entry.if.then_crit_edge' to ensure that we would try to form a chain for
    520 ; 'entry' -> 'entry.if.then_crit_edge' -> 'if.then'. It is the last edge in that
    521 ; chain which would violate the unanalyzable branch in 'exit', but we won't even
    522 ; try this trick unless 'if.then' is believed to almost always be reached from
    523 ; 'entry.if.then_crit_edge'.
    524   br i1 %cond, label %entry.if.then_crit_edge, label %lor.lhs.false, !prof !1
    525 
    526 entry.if.then_crit_edge:
    527   %.pre14 = load i8* undef, align 1, !tbaa !0
    528   br label %if.then
    529 
    530 lor.lhs.false:
    531   br i1 undef, label %if.end, label %exit
    532 
    533 exit:
    534   %cmp.i = fcmp une double 0.000000e+00, undef
    535   br i1 %cmp.i, label %if.then, label %if.end
    536 
    537 if.then:
    538   %0 = phi i8 [ %.pre14, %entry.if.then_crit_edge ], [ undef, %exit ]
    539   %1 = and i8 %0, 1
    540   store i8 %1, i8* undef, align 4, !tbaa !0
    541   br label %if.end
    542 
    543 if.end:
    544   ret void
    545 }
    546 
    547 !1 = metadata !{metadata !"branch_weights", i32 1000, i32 1}
    548 
    549 declare i32 @f()
    550 declare i32 @g()
    551 declare i32 @h(i32 %x)
    552 
    553 define i32 @test_global_cfg_break_profitability() {
    554 ; Check that our metrics for the profitability of a CFG break are global rather
    555 ; than local. A successor may be very hot, but if the current block isn't, it
    556 ; doesn't matter. Within this test the 'then' block is slightly warmer than the
    557 ; 'else' block, but not nearly enough to merit merging it with the exit block
    558 ; even though the probability of 'then' branching to the 'exit' block is very
    559 ; high.
    560 ; CHECK: test_global_cfg_break_profitability
    561 ; CHECK: calll {{_?}}f
    562 ; CHECK: calll {{_?}}g
    563 ; CHECK: calll {{_?}}h
    564 ; CHECK: ret
    565 
    566 entry:
    567   br i1 undef, label %then, label %else, !prof !2
    568 
    569 then:
    570   %then.result = call i32 @f()
    571   br label %exit
    572 
    573 else:
    574   %else.result = call i32 @g()
    575   br label %exit
    576 
    577 exit:
    578   %result = phi i32 [ %then.result, %then ], [ %else.result, %else ]
    579   %result2 = call i32 @h(i32 %result)
    580   ret i32 %result
    581 }
    582 
    583 !2 = metadata !{metadata !"branch_weights", i32 3, i32 1}
    584 
    585 declare i32 @__gxx_personality_v0(...)
    586 
    587 define void @test_eh_lpad_successor() {
    588 ; Some times the landing pad ends up as the first successor of an invoke block.
    589 ; When this happens, a strange result used to fall out of updateTerminators: we
    590 ; didn't correctly locate the fallthrough successor, assuming blindly that the
    591 ; first one was the fallthrough successor. As a result, we would add an
    592 ; erroneous jump to the landing pad thinking *that* was the default successor.
    593 ; CHECK: test_eh_lpad_successor
    594 ; CHECK: %entry
    595 ; CHECK-NOT: jmp
    596 ; CHECK: %loop
    597 
    598 entry:
    599   invoke i32 @f() to label %preheader unwind label %lpad
    600 
    601 preheader:
    602   br label %loop
    603 
    604 lpad:
    605   %lpad.val = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
    606           cleanup
    607   resume { i8*, i32 } %lpad.val
    608 
    609 loop:
    610   br label %loop
    611 }
    612 
    613 declare void @fake_throw() noreturn
    614 
    615 define void @test_eh_throw() {
    616 ; For blocks containing a 'throw' (or similar functionality), we have
    617 ; a no-return invoke. In this case, only EH successors will exist, and
    618 ; fallthrough simply won't occur. Make sure we don't crash trying to update
    619 ; terminators for such constructs.
    620 ;
    621 ; CHECK: test_eh_throw
    622 ; CHECK: %entry
    623 ; CHECK: %cleanup
    624 
    625 entry:
    626   invoke void @fake_throw() to label %continue unwind label %cleanup
    627 
    628 continue:
    629   unreachable
    630 
    631 cleanup:
    632   %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
    633           cleanup
    634   unreachable
    635 }
    636 
    637 define void @test_unnatural_cfg_backwards_inner_loop() {
    638 ; Test that when we encounter an unnatural CFG structure after having formed
    639 ; a chain for an inner loop which happened to be laid out backwards we don't
    640 ; attempt to merge onto the wrong end of the inner loop just because we find it
    641 ; first. This was reduced from a crasher in GCC's single source.
    642 ;
    643 ; CHECK: test_unnatural_cfg_backwards_inner_loop
    644 ; CHECK: %entry
    645 ; CHECK: [[BODY:# BB#[0-9]+]]:
    646 ; CHECK: %loop2b
    647 ; CHECK: %loop1
    648 ; CHECK: %loop2a
    649 
    650 entry:
    651   br i1 undef, label %loop2a, label %body
    652 
    653 body:
    654   br label %loop2a
    655 
    656 loop1:
    657   %next.load = load i32** undef
    658   br i1 %comp.a, label %loop2a, label %loop2b
    659 
    660 loop2a:
    661   %var = phi i32* [ null, %entry ], [ null, %body ], [ %next.phi, %loop1 ]
    662   %next.var = phi i32* [ null, %entry ], [ undef, %body ], [ %next.load, %loop1 ]
    663   %comp.a = icmp eq i32* %var, null
    664   br label %loop3
    665 
    666 loop2b:
    667   %gep = getelementptr inbounds i32* %var.phi, i32 0
    668   %next.ptr = bitcast i32* %gep to i32**
    669   store i32* %next.phi, i32** %next.ptr
    670   br label %loop3
    671 
    672 loop3:
    673   %var.phi = phi i32* [ %next.phi, %loop2b ], [ %var, %loop2a ]
    674   %next.phi = phi i32* [ %next.load, %loop2b ], [ %next.var, %loop2a ]
    675   br label %loop1
    676 }
    677 
    678 define void @unanalyzable_branch_to_loop_header() {
    679 ; Ensure that we can handle unanalyzable branches into loop headers. We
    680 ; pre-form chains for unanalyzable branches, and will find the tail end of that
    681 ; at the start of the loop. This function uses floating point comparison
    682 ; fallthrough because that happens to always produce unanalyzable branches on
    683 ; x86.
    684 ;
    685 ; CHECK: unanalyzable_branch_to_loop_header
    686 ; CHECK: %entry
    687 ; CHECK: %loop
    688 ; CHECK: %exit
    689 
    690 entry:
    691   %cmp = fcmp une double 0.000000e+00, undef
    692   br i1 %cmp, label %loop, label %exit
    693 
    694 loop:
    695   %cond = icmp eq i8 undef, 42
    696   br i1 %cond, label %exit, label %loop
    697 
    698 exit:
    699   ret void
    700 }
    701 
    702 define void @unanalyzable_branch_to_best_succ(i1 %cond) {
    703 ; Ensure that we can handle unanalyzable branches where the destination block
    704 ; gets selected as the optimal sucessor to merge.
    705 ;
    706 ; CHECK: unanalyzable_branch_to_best_succ
    707 ; CHECK: %entry
    708 ; CHECK: %foo
    709 ; CHECK: %bar
    710 ; CHECK: %exit
    711 
    712 entry:
    713   ; Bias this branch toward bar to ensure we form that chain.
    714   br i1 %cond, label %bar, label %foo, !prof !1
    715 
    716 foo:
    717   %cmp = fcmp une double 0.000000e+00, undef
    718   br i1 %cmp, label %bar, label %exit
    719 
    720 bar:
    721   call i32 @f()
    722   br label %exit
    723 
    724 exit:
    725   ret void
    726 }
    727 
    728 define void @unanalyzable_branch_to_free_block(float %x) {
    729 ; Ensure that we can handle unanalyzable branches where the destination block
    730 ; gets selected as the best free block in the CFG.
    731 ;
    732 ; CHECK: unanalyzable_branch_to_free_block
    733 ; CHECK: %entry
    734 ; CHECK: %a
    735 ; CHECK: %b
    736 ; CHECK: %c
    737 ; CHECK: %exit
    738 
    739 entry:
    740   br i1 undef, label %a, label %b
    741 
    742 a:
    743   call i32 @f()
    744   br label %c
    745 
    746 b:
    747   %cmp = fcmp une float %x, undef
    748   br i1 %cmp, label %c, label %exit
    749 
    750 c:
    751   call i32 @g()
    752   br label %exit
    753 
    754 exit:
    755   ret void
    756 }
    757 
    758 define void @many_unanalyzable_branches() {
    759 ; Ensure that we don't crash as we're building up many unanalyzable branches,
    760 ; blocks, and loops.
    761 ;
    762 ; CHECK: many_unanalyzable_branches
    763 ; CHECK: %entry
    764 ; CHECK: %exit
    765 
    766 entry:
    767   br label %0
    768 
    769   %val0 = load volatile float* undef
    770   %cmp0 = fcmp une float %val0, undef
    771   br i1 %cmp0, label %1, label %0
    772   %val1 = load volatile float* undef
    773   %cmp1 = fcmp une float %val1, undef
    774   br i1 %cmp1, label %2, label %1
    775   %val2 = load volatile float* undef
    776   %cmp2 = fcmp une float %val2, undef
    777   br i1 %cmp2, label %3, label %2
    778   %val3 = load volatile float* undef
    779   %cmp3 = fcmp une float %val3, undef
    780   br i1 %cmp3, label %4, label %3
    781   %val4 = load volatile float* undef
    782   %cmp4 = fcmp une float %val4, undef
    783   br i1 %cmp4, label %5, label %4
    784   %val5 = load volatile float* undef
    785   %cmp5 = fcmp une float %val5, undef
    786   br i1 %cmp5, label %6, label %5
    787   %val6 = load volatile float* undef
    788   %cmp6 = fcmp une float %val6, undef
    789   br i1 %cmp6, label %7, label %6
    790   %val7 = load volatile float* undef
    791   %cmp7 = fcmp une float %val7, undef
    792   br i1 %cmp7, label %8, label %7
    793   %val8 = load volatile float* undef
    794   %cmp8 = fcmp une float %val8, undef
    795   br i1 %cmp8, label %9, label %8
    796   %val9 = load volatile float* undef
    797   %cmp9 = fcmp une float %val9, undef
    798   br i1 %cmp9, label %10, label %9
    799   %val10 = load volatile float* undef
    800   %cmp10 = fcmp une float %val10, undef
    801   br i1 %cmp10, label %11, label %10
    802   %val11 = load volatile float* undef
    803   %cmp11 = fcmp une float %val11, undef
    804   br i1 %cmp11, label %12, label %11
    805   %val12 = load volatile float* undef
    806   %cmp12 = fcmp une float %val12, undef
    807   br i1 %cmp12, label %13, label %12
    808   %val13 = load volatile float* undef
    809   %cmp13 = fcmp une float %val13, undef
    810   br i1 %cmp13, label %14, label %13
    811   %val14 = load volatile float* undef
    812   %cmp14 = fcmp une float %val14, undef
    813   br i1 %cmp14, label %15, label %14
    814   %val15 = load volatile float* undef
    815   %cmp15 = fcmp une float %val15, undef
    816   br i1 %cmp15, label %16, label %15
    817   %val16 = load volatile float* undef
    818   %cmp16 = fcmp une float %val16, undef
    819   br i1 %cmp16, label %17, label %16
    820   %val17 = load volatile float* undef
    821   %cmp17 = fcmp une float %val17, undef
    822   br i1 %cmp17, label %18, label %17
    823   %val18 = load volatile float* undef
    824   %cmp18 = fcmp une float %val18, undef
    825   br i1 %cmp18, label %19, label %18
    826   %val19 = load volatile float* undef
    827   %cmp19 = fcmp une float %val19, undef
    828   br i1 %cmp19, label %20, label %19
    829   %val20 = load volatile float* undef
    830   %cmp20 = fcmp une float %val20, undef
    831   br i1 %cmp20, label %21, label %20
    832   %val21 = load volatile float* undef
    833   %cmp21 = fcmp une float %val21, undef
    834   br i1 %cmp21, label %22, label %21
    835   %val22 = load volatile float* undef
    836   %cmp22 = fcmp une float %val22, undef
    837   br i1 %cmp22, label %23, label %22
    838   %val23 = load volatile float* undef
    839   %cmp23 = fcmp une float %val23, undef
    840   br i1 %cmp23, label %24, label %23
    841   %val24 = load volatile float* undef
    842   %cmp24 = fcmp une float %val24, undef
    843   br i1 %cmp24, label %25, label %24
    844   %val25 = load volatile float* undef
    845   %cmp25 = fcmp une float %val25, undef
    846   br i1 %cmp25, label %26, label %25
    847   %val26 = load volatile float* undef
    848   %cmp26 = fcmp une float %val26, undef
    849   br i1 %cmp26, label %27, label %26
    850   %val27 = load volatile float* undef
    851   %cmp27 = fcmp une float %val27, undef
    852   br i1 %cmp27, label %28, label %27
    853   %val28 = load volatile float* undef
    854   %cmp28 = fcmp une float %val28, undef
    855   br i1 %cmp28, label %29, label %28
    856   %val29 = load volatile float* undef
    857   %cmp29 = fcmp une float %val29, undef
    858   br i1 %cmp29, label %30, label %29
    859   %val30 = load volatile float* undef
    860   %cmp30 = fcmp une float %val30, undef
    861   br i1 %cmp30, label %31, label %30
    862   %val31 = load volatile float* undef
    863   %cmp31 = fcmp une float %val31, undef
    864   br i1 %cmp31, label %32, label %31
    865   %val32 = load volatile float* undef
    866   %cmp32 = fcmp une float %val32, undef
    867   br i1 %cmp32, label %33, label %32
    868   %val33 = load volatile float* undef
    869   %cmp33 = fcmp une float %val33, undef
    870   br i1 %cmp33, label %34, label %33
    871   %val34 = load volatile float* undef
    872   %cmp34 = fcmp une float %val34, undef
    873   br i1 %cmp34, label %35, label %34
    874   %val35 = load volatile float* undef
    875   %cmp35 = fcmp une float %val35, undef
    876   br i1 %cmp35, label %36, label %35
    877   %val36 = load volatile float* undef
    878   %cmp36 = fcmp une float %val36, undef
    879   br i1 %cmp36, label %37, label %36
    880   %val37 = load volatile float* undef
    881   %cmp37 = fcmp une float %val37, undef
    882   br i1 %cmp37, label %38, label %37
    883   %val38 = load volatile float* undef
    884   %cmp38 = fcmp une float %val38, undef
    885   br i1 %cmp38, label %39, label %38
    886   %val39 = load volatile float* undef
    887   %cmp39 = fcmp une float %val39, undef
    888   br i1 %cmp39, label %40, label %39
    889   %val40 = load volatile float* undef
    890   %cmp40 = fcmp une float %val40, undef
    891   br i1 %cmp40, label %41, label %40
    892   %val41 = load volatile float* undef
    893   %cmp41 = fcmp une float %val41, undef
    894   br i1 %cmp41, label %42, label %41
    895   %val42 = load volatile float* undef
    896   %cmp42 = fcmp une float %val42, undef
    897   br i1 %cmp42, label %43, label %42
    898   %val43 = load volatile float* undef
    899   %cmp43 = fcmp une float %val43, undef
    900   br i1 %cmp43, label %44, label %43
    901   %val44 = load volatile float* undef
    902   %cmp44 = fcmp une float %val44, undef
    903   br i1 %cmp44, label %45, label %44
    904   %val45 = load volatile float* undef
    905   %cmp45 = fcmp une float %val45, undef
    906   br i1 %cmp45, label %46, label %45
    907   %val46 = load volatile float* undef
    908   %cmp46 = fcmp une float %val46, undef
    909   br i1 %cmp46, label %47, label %46
    910   %val47 = load volatile float* undef
    911   %cmp47 = fcmp une float %val47, undef
    912   br i1 %cmp47, label %48, label %47
    913   %val48 = load volatile float* undef
    914   %cmp48 = fcmp une float %val48, undef
    915   br i1 %cmp48, label %49, label %48
    916   %val49 = load volatile float* undef
    917   %cmp49 = fcmp une float %val49, undef
    918   br i1 %cmp49, label %50, label %49
    919   %val50 = load volatile float* undef
    920   %cmp50 = fcmp une float %val50, undef
    921   br i1 %cmp50, label %51, label %50
    922   %val51 = load volatile float* undef
    923   %cmp51 = fcmp une float %val51, undef
    924   br i1 %cmp51, label %52, label %51
    925   %val52 = load volatile float* undef
    926   %cmp52 = fcmp une float %val52, undef
    927   br i1 %cmp52, label %53, label %52
    928   %val53 = load volatile float* undef
    929   %cmp53 = fcmp une float %val53, undef
    930   br i1 %cmp53, label %54, label %53
    931   %val54 = load volatile float* undef
    932   %cmp54 = fcmp une float %val54, undef
    933   br i1 %cmp54, label %55, label %54
    934   %val55 = load volatile float* undef
    935   %cmp55 = fcmp une float %val55, undef
    936   br i1 %cmp55, label %56, label %55
    937   %val56 = load volatile float* undef
    938   %cmp56 = fcmp une float %val56, undef
    939   br i1 %cmp56, label %57, label %56
    940   %val57 = load volatile float* undef
    941   %cmp57 = fcmp une float %val57, undef
    942   br i1 %cmp57, label %58, label %57
    943   %val58 = load volatile float* undef
    944   %cmp58 = fcmp une float %val58, undef
    945   br i1 %cmp58, label %59, label %58
    946   %val59 = load volatile float* undef
    947   %cmp59 = fcmp une float %val59, undef
    948   br i1 %cmp59, label %60, label %59
    949   %val60 = load volatile float* undef
    950   %cmp60 = fcmp une float %val60, undef
    951   br i1 %cmp60, label %61, label %60
    952   %val61 = load volatile float* undef
    953   %cmp61 = fcmp une float %val61, undef
    954   br i1 %cmp61, label %62, label %61
    955   %val62 = load volatile float* undef
    956   %cmp62 = fcmp une float %val62, undef
    957   br i1 %cmp62, label %63, label %62
    958   %val63 = load volatile float* undef
    959   %cmp63 = fcmp une float %val63, undef
    960   br i1 %cmp63, label %64, label %63
    961   %val64 = load volatile float* undef
    962   %cmp64 = fcmp une float %val64, undef
    963   br i1 %cmp64, label %65, label %64
    964 
    965   br label %exit
    966 exit:
    967   ret void
    968 }
    969 
    970 define void @benchmark_heapsort(i32 %n, double* nocapture %ra) {
    971 ; This test case comes from the heapsort benchmark, and exemplifies several
    972 ; important aspects to block placement in the presence of loops:
    973 ; 1) Loop rotation needs to *ensure* that the desired exiting edge can be
    974 ;    a fallthrough.
    975 ; 2) The exiting edge from the loop which is rotated to be laid out at the
    976 ;    bottom of the loop needs to be exiting into the nearest enclosing loop (to
    977 ;    which there is an exit). Otherwise, we force that enclosing loop into
    978 ;    strange layouts that are siginificantly less efficient, often times maing
    979 ;    it discontiguous.
    980 ;
    981 ; CHECK: @benchmark_heapsort
    982 ; CHECK: %entry
    983 ; First rotated loop top.
    984 ; CHECK: .align
    985 ; CHECK: %while.end
    986 ; CHECK: %for.cond
    987 ; CHECK: %if.then
    988 ; CHECK: %if.else
    989 ; CHECK: %if.end10
    990 ; Second rotated loop top
    991 ; CHECK: .align
    992 ; CHECK: %if.then24
    993 ; CHECK: %while.cond.outer
    994 ; Third rotated loop top
    995 ; CHECK: .align
    996 ; CHECK: %while.cond
    997 ; CHECK: %while.body
    998 ; CHECK: %land.lhs.true
    999 ; CHECK: %if.then19
   1000 ; CHECK: %if.then19
   1001 ; CHECK: %if.then8
   1002 ; CHECK: ret
   1003 
   1004 entry:
   1005   %shr = ashr i32 %n, 1
   1006   %add = add nsw i32 %shr, 1
   1007   %arrayidx3 = getelementptr inbounds double* %ra, i64 1
   1008   br label %for.cond
   1009 
   1010 for.cond:
   1011   %ir.0 = phi i32 [ %n, %entry ], [ %ir.1, %while.end ]
   1012   %l.0 = phi i32 [ %add, %entry ], [ %l.1, %while.end ]
   1013   %cmp = icmp sgt i32 %l.0, 1
   1014   br i1 %cmp, label %if.then, label %if.else
   1015 
   1016 if.then:
   1017   %dec = add nsw i32 %l.0, -1
   1018   %idxprom = sext i32 %dec to i64
   1019   %arrayidx = getelementptr inbounds double* %ra, i64 %idxprom
   1020   %0 = load double* %arrayidx, align 8
   1021   br label %if.end10
   1022 
   1023 if.else:
   1024   %idxprom1 = sext i32 %ir.0 to i64
   1025   %arrayidx2 = getelementptr inbounds double* %ra, i64 %idxprom1
   1026   %1 = load double* %arrayidx2, align 8
   1027   %2 = load double* %arrayidx3, align 8
   1028   store double %2, double* %arrayidx2, align 8
   1029   %dec6 = add nsw i32 %ir.0, -1
   1030   %cmp7 = icmp eq i32 %dec6, 1
   1031   br i1 %cmp7, label %if.then8, label %if.end10
   1032 
   1033 if.then8:
   1034   store double %1, double* %arrayidx3, align 8
   1035   ret void
   1036 
   1037 if.end10:
   1038   %ir.1 = phi i32 [ %ir.0, %if.then ], [ %dec6, %if.else ]
   1039   %l.1 = phi i32 [ %dec, %if.then ], [ %l.0, %if.else ]
   1040   %rra.0 = phi double [ %0, %if.then ], [ %1, %if.else ]
   1041   %add31 = add nsw i32 %ir.1, 1
   1042   br label %while.cond.outer
   1043 
   1044 while.cond.outer:
   1045   %j.0.ph.in = phi i32 [ %l.1, %if.end10 ], [ %j.1, %if.then24 ]
   1046   %j.0.ph = shl i32 %j.0.ph.in, 1
   1047   br label %while.cond
   1048 
   1049 while.cond:
   1050   %j.0 = phi i32 [ %add31, %if.end20 ], [ %j.0.ph, %while.cond.outer ]
   1051   %cmp11 = icmp sgt i32 %j.0, %ir.1
   1052   br i1 %cmp11, label %while.end, label %while.body
   1053 
   1054 while.body:
   1055   %cmp12 = icmp slt i32 %j.0, %ir.1
   1056   br i1 %cmp12, label %land.lhs.true, label %if.end20
   1057 
   1058 land.lhs.true:
   1059   %idxprom13 = sext i32 %j.0 to i64
   1060   %arrayidx14 = getelementptr inbounds double* %ra, i64 %idxprom13
   1061   %3 = load double* %arrayidx14, align 8
   1062   %add15 = add nsw i32 %j.0, 1
   1063   %idxprom16 = sext i32 %add15 to i64
   1064   %arrayidx17 = getelementptr inbounds double* %ra, i64 %idxprom16
   1065   %4 = load double* %arrayidx17, align 8
   1066   %cmp18 = fcmp olt double %3, %4
   1067   br i1 %cmp18, label %if.then19, label %if.end20
   1068 
   1069 if.then19:
   1070   br label %if.end20
   1071 
   1072 if.end20:
   1073   %j.1 = phi i32 [ %add15, %if.then19 ], [ %j.0, %land.lhs.true ], [ %j.0, %while.body ]
   1074   %idxprom21 = sext i32 %j.1 to i64
   1075   %arrayidx22 = getelementptr inbounds double* %ra, i64 %idxprom21
   1076   %5 = load double* %arrayidx22, align 8
   1077   %cmp23 = fcmp olt double %rra.0, %5
   1078   br i1 %cmp23, label %if.then24, label %while.cond
   1079 
   1080 if.then24:
   1081   %idxprom27 = sext i32 %j.0.ph.in to i64
   1082   %arrayidx28 = getelementptr inbounds double* %ra, i64 %idxprom27
   1083   store double %5, double* %arrayidx28, align 8
   1084   br label %while.cond.outer
   1085 
   1086 while.end:
   1087   %idxprom33 = sext i32 %j.0.ph.in to i64
   1088   %arrayidx34 = getelementptr inbounds double* %ra, i64 %idxprom33
   1089   store double %rra.0, double* %arrayidx34, align 8
   1090   br label %for.cond
   1091 }
   1092