Home | History | Annotate | Download | only in X86
      1 ; XFAIL: *
      2 ; RUN: llc < %s -march=x86-64 -O3 -asm-verbose=false | FileCheck %s
      3 target datalayout = "e-p:64:64:64"
      4 target triple = "x86_64-unknown-unknown"
      5 
      6 ; Full strength reduction reduces register pressure from 5 to 4 here.
      7 ; Instruction selection should use the FLAGS value from the dec for
      8 ; the branch. Scheduling should push the adds upwards.
      9 
     10 ; CHECK: full_me_0:
     11 ; CHECK: movsd   (%rsi), %xmm0
     12 ; CHECK: mulsd   (%rdx), %xmm0
     13 ; CHECK: movsd   %xmm0, (%rdi)
     14 ; CHECK: addq    $8, %rsi
     15 ; CHECK: addq    $8, %rdx
     16 ; CHECK: addq    $8, %rdi
     17 ; CHECK: decq    %rcx
     18 ; CHECK: jne
     19 
     20 define void @full_me_0(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
     21 entry:
     22   %t0 = icmp sgt i64 %n, 0
     23   br i1 %t0, label %loop, label %return
     24 
     25 loop:
     26   %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
     27   %Ai = getelementptr inbounds double* %A, i64 %i
     28   %Bi = getelementptr inbounds double* %B, i64 %i
     29   %Ci = getelementptr inbounds double* %C, i64 %i
     30   %t1 = load double* %Bi
     31   %t2 = load double* %Ci
     32   %m = fmul double %t1, %t2
     33   store double %m, double* %Ai
     34   %i.next = add nsw i64 %i, 1
     35   %exitcond = icmp eq i64 %i.next, %n
     36   br i1 %exitcond, label %return, label %loop
     37 
     38 return:
     39   ret void
     40 }
     41 
     42 ; Mostly-full strength reduction means we do full strength reduction on all
     43 ; except for the offsets.
     44 ;
     45 ; Given a choice between constant offsets -2048 and 2048, choose the negative
     46 ; value, because at boundary conditions it has a smaller encoding.
     47 ; TODO: That's an over-general heuristic. It would be better for the target
     48 ; to indicate what the encoding cost would be. Then using a 2048 offset
     49 ; would be better on x86-64, since the start value would be 0 instead of
     50 ; 2048.
     51 
     52 ; CHECK: mostly_full_me_0:
     53 ; CHECK: movsd   -2048(%rsi), %xmm0
     54 ; CHECK: mulsd   -2048(%rdx), %xmm0
     55 ; CHECK: movsd   %xmm0, -2048(%rdi)
     56 ; CHECK: movsd   (%rsi), %xmm0
     57 ; CHECK: divsd   (%rdx), %xmm0
     58 ; CHECK: movsd   %xmm0, (%rdi)
     59 ; CHECK: addq    $8, %rsi
     60 ; CHECK: addq    $8, %rdx
     61 ; CHECK: addq    $8, %rdi
     62 ; CHECK: decq    %rcx
     63 ; CHECK: jne
     64 
     65 define void @mostly_full_me_0(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
     66 entry:
     67   %t0 = icmp sgt i64 %n, 0
     68   br i1 %t0, label %loop, label %return
     69 
     70 loop:
     71   %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
     72   %Ai = getelementptr inbounds double* %A, i64 %i
     73   %Bi = getelementptr inbounds double* %B, i64 %i
     74   %Ci = getelementptr inbounds double* %C, i64 %i
     75   %t1 = load double* %Bi
     76   %t2 = load double* %Ci
     77   %m = fmul double %t1, %t2
     78   store double %m, double* %Ai
     79   %j = add i64 %i, 256
     80   %Aj = getelementptr inbounds double* %A, i64 %j
     81   %Bj = getelementptr inbounds double* %B, i64 %j
     82   %Cj = getelementptr inbounds double* %C, i64 %j
     83   %t3 = load double* %Bj
     84   %t4 = load double* %Cj
     85   %o = fdiv double %t3, %t4
     86   store double %o, double* %Aj
     87   %i.next = add nsw i64 %i, 1
     88   %exitcond = icmp eq i64 %i.next, %n
     89   br i1 %exitcond, label %return, label %loop
     90 
     91 return:
     92   ret void
     93 }
     94 
     95 ; A minor variation on mostly_full_me_0.
     96 ; Prefer to start the indvar at 0.
     97 
     98 ; CHECK: mostly_full_me_1:
     99 ; CHECK: movsd   (%rsi), %xmm0
    100 ; CHECK: mulsd   (%rdx), %xmm0
    101 ; CHECK: movsd   %xmm0, (%rdi)
    102 ; CHECK: movsd   -2048(%rsi), %xmm0
    103 ; CHECK: divsd   -2048(%rdx), %xmm0
    104 ; CHECK: movsd   %xmm0, -2048(%rdi)
    105 ; CHECK: addq    $8, %rsi
    106 ; CHECK: addq    $8, %rdx
    107 ; CHECK: addq    $8, %rdi
    108 ; CHECK: decq    %rcx
    109 ; CHECK: jne
    110 
    111 define void @mostly_full_me_1(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
    112 entry:
    113   %t0 = icmp sgt i64 %n, 0
    114   br i1 %t0, label %loop, label %return
    115 
    116 loop:
    117   %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
    118   %Ai = getelementptr inbounds double* %A, i64 %i
    119   %Bi = getelementptr inbounds double* %B, i64 %i
    120   %Ci = getelementptr inbounds double* %C, i64 %i
    121   %t1 = load double* %Bi
    122   %t2 = load double* %Ci
    123   %m = fmul double %t1, %t2
    124   store double %m, double* %Ai
    125   %j = sub i64 %i, 256
    126   %Aj = getelementptr inbounds double* %A, i64 %j
    127   %Bj = getelementptr inbounds double* %B, i64 %j
    128   %Cj = getelementptr inbounds double* %C, i64 %j
    129   %t3 = load double* %Bj
    130   %t4 = load double* %Cj
    131   %o = fdiv double %t3, %t4
    132   store double %o, double* %Aj
    133   %i.next = add nsw i64 %i, 1
    134   %exitcond = icmp eq i64 %i.next, %n
    135   br i1 %exitcond, label %return, label %loop
    136 
    137 return:
    138   ret void
    139 }
    140 
    141 ; A slightly less minor variation on mostly_full_me_0.
    142 
    143 ; CHECK: mostly_full_me_2:
    144 ; CHECK: movsd   (%rsi), %xmm0
    145 ; CHECK: mulsd   (%rdx), %xmm0
    146 ; CHECK: movsd   %xmm0, (%rdi)
    147 ; CHECK: movsd   -4096(%rsi), %xmm0
    148 ; CHECK: divsd   -4096(%rdx), %xmm0
    149 ; CHECK: movsd   %xmm0, -4096(%rdi)
    150 ; CHECK: addq    $8, %rsi
    151 ; CHECK: addq    $8, %rdx
    152 ; CHECK: addq    $8, %rdi
    153 ; CHECK: decq    %rcx
    154 ; CHECK: jne
    155 
    156 define void @mostly_full_me_2(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
    157 entry:
    158   %t0 = icmp sgt i64 %n, 0
    159   br i1 %t0, label %loop, label %return
    160 
    161 loop:
    162   %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
    163   %k = add i64 %i, 256
    164   %Ak = getelementptr inbounds double* %A, i64 %k
    165   %Bk = getelementptr inbounds double* %B, i64 %k
    166   %Ck = getelementptr inbounds double* %C, i64 %k
    167   %t1 = load double* %Bk
    168   %t2 = load double* %Ck
    169   %m = fmul double %t1, %t2
    170   store double %m, double* %Ak
    171   %j = sub i64 %i, 256
    172   %Aj = getelementptr inbounds double* %A, i64 %j
    173   %Bj = getelementptr inbounds double* %B, i64 %j
    174   %Cj = getelementptr inbounds double* %C, i64 %j
    175   %t3 = load double* %Bj
    176   %t4 = load double* %Cj
    177   %o = fdiv double %t3, %t4
    178   store double %o, double* %Aj
    179   %i.next = add nsw i64 %i, 1
    180   %exitcond = icmp eq i64 %i.next, %n
    181   br i1 %exitcond, label %return, label %loop
    182 
    183 return:
    184   ret void
    185 }
    186 
    187 ; In this test, the counting IV exit value is used, so full strength reduction
    188 ; would not reduce register pressure. IndVarSimplify ought to simplify such
    189 ; cases away, but it's useful here to verify that LSR's register pressure
    190 ; heuristics are working as expected.
    191 
    192 ; CHECK: count_me_0:
    193 ; CHECK: movsd   (%rsi,%rax,8), %xmm0
    194 ; CHECK: mulsd   (%rdx,%rax,8), %xmm0
    195 ; CHECK: movsd   %xmm0, (%rdi,%rax,8)
    196 ; CHECK: incq    %rax
    197 ; CHECK: cmpq    %rax, %rcx
    198 ; CHECK: jne
    199 
    200 define i64 @count_me_0(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
    201 entry:
    202   %t0 = icmp sgt i64 %n, 0
    203   br i1 %t0, label %loop, label %return
    204 
    205 loop:
    206   %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
    207   %Ai = getelementptr inbounds double* %A, i64 %i
    208   %Bi = getelementptr inbounds double* %B, i64 %i
    209   %Ci = getelementptr inbounds double* %C, i64 %i
    210   %t1 = load double* %Bi
    211   %t2 = load double* %Ci
    212   %m = fmul double %t1, %t2
    213   store double %m, double* %Ai
    214   %i.next = add nsw i64 %i, 1
    215   %exitcond = icmp eq i64 %i.next, %n
    216   br i1 %exitcond, label %return, label %loop
    217 
    218 return:
    219   %q = phi i64 [ 0, %entry ], [ %i.next, %loop ]
    220   ret i64 %q
    221 }
    222 
    223 ; In this test, the trip count value is used, so full strength reduction
    224 ; would not reduce register pressure.
    225 ; (though it would reduce register pressure inside the loop...)
    226 
    227 ; CHECK: count_me_1:
    228 ; CHECK: movsd   (%rsi,%rax,8), %xmm0
    229 ; CHECK: mulsd   (%rdx,%rax,8), %xmm0
    230 ; CHECK: movsd   %xmm0, (%rdi,%rax,8)
    231 ; CHECK: incq    %rax
    232 ; CHECK: cmpq    %rax, %rcx
    233 ; CHECK: jne
    234 
    235 define i64 @count_me_1(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
    236 entry:
    237   %t0 = icmp sgt i64 %n, 0
    238   br i1 %t0, label %loop, label %return
    239 
    240 loop:
    241   %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
    242   %Ai = getelementptr inbounds double* %A, i64 %i
    243   %Bi = getelementptr inbounds double* %B, i64 %i
    244   %Ci = getelementptr inbounds double* %C, i64 %i
    245   %t1 = load double* %Bi
    246   %t2 = load double* %Ci
    247   %m = fmul double %t1, %t2
    248   store double %m, double* %Ai
    249   %i.next = add nsw i64 %i, 1
    250   %exitcond = icmp eq i64 %i.next, %n
    251   br i1 %exitcond, label %return, label %loop
    252 
    253 return:
    254   %q = phi i64 [ 0, %entry ], [ %n, %loop ]
    255   ret i64 %q
    256 }
    257 
    258 ; Full strength reduction doesn't save any registers here because the
    259 ; loop tripcount is a constant.
    260 
    261 ; CHECK: count_me_2:
    262 ; CHECK: movl    $10, %eax
    263 ; CHECK: align
    264 ; CHECK: BB6_1:
    265 ; CHECK: movsd   -40(%rdi,%rax,8), %xmm0
    266 ; CHECK: addsd   -40(%rsi,%rax,8), %xmm0
    267 ; CHECK: movsd   %xmm0, -40(%rdx,%rax,8)
    268 ; CHECK: movsd   (%rdi,%rax,8), %xmm0
    269 ; CHECK: subsd   (%rsi,%rax,8), %xmm0
    270 ; CHECK: movsd   %xmm0, (%rdx,%rax,8)
    271 ; CHECK: incq    %rax
    272 ; CHECK: cmpq    $5010, %rax
    273 ; CHECK: jne
    274 
    275 define void @count_me_2(double* nocapture %A, double* nocapture %B, double* nocapture %C) nounwind {
    276 entry:
    277   br label %loop
    278 
    279 loop:
    280   %i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
    281   %i5 = add i64 %i, 5
    282   %Ai = getelementptr double* %A, i64 %i5
    283   %t2 = load double* %Ai
    284   %Bi = getelementptr double* %B, i64 %i5
    285   %t4 = load double* %Bi
    286   %t5 = fadd double %t2, %t4
    287   %Ci = getelementptr double* %C, i64 %i5
    288   store double %t5, double* %Ci
    289   %i10 = add i64 %i, 10
    290   %Ai10 = getelementptr double* %A, i64 %i10
    291   %t9 = load double* %Ai10
    292   %Bi10 = getelementptr double* %B, i64 %i10
    293   %t11 = load double* %Bi10
    294   %t12 = fsub double %t9, %t11
    295   %Ci10 = getelementptr double* %C, i64 %i10
    296   store double %t12, double* %Ci10
    297   %i.next = add i64 %i, 1
    298   %exitcond = icmp eq i64 %i.next, 5000
    299   br i1 %exitcond, label %return, label %loop
    300 
    301 return:
    302   ret void
    303 }
    304 
    305 ; This should be fully strength-reduced to reduce register pressure.
    306 
    307 ; CHECK: full_me_1:
    308 ; CHECK: align
    309 ; CHECK: BB7_1:
    310 ; CHECK: movsd   (%rdi), %xmm0
    311 ; CHECK: addsd   (%rsi), %xmm0
    312 ; CHECK: movsd   %xmm0, (%rdx)
    313 ; CHECK: movsd   40(%rdi), %xmm0
    314 ; CHECK: subsd   40(%rsi), %xmm0
    315 ; CHECK: movsd   %xmm0, 40(%rdx)
    316 ; CHECK: addq    $8, %rdi
    317 ; CHECK: addq    $8, %rsi
    318 ; CHECK: addq    $8, %rdx
    319 ; CHECK: decq    %rcx
    320 ; CHECK: jne
    321 
    322 define void @full_me_1(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
    323 entry:
    324   br label %loop
    325 
    326 loop:
    327   %i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
    328   %i5 = add i64 %i, 5
    329   %Ai = getelementptr double* %A, i64 %i5
    330   %t2 = load double* %Ai
    331   %Bi = getelementptr double* %B, i64 %i5
    332   %t4 = load double* %Bi
    333   %t5 = fadd double %t2, %t4
    334   %Ci = getelementptr double* %C, i64 %i5
    335   store double %t5, double* %Ci
    336   %i10 = add i64 %i, 10
    337   %Ai10 = getelementptr double* %A, i64 %i10
    338   %t9 = load double* %Ai10
    339   %Bi10 = getelementptr double* %B, i64 %i10
    340   %t11 = load double* %Bi10
    341   %t12 = fsub double %t9, %t11
    342   %Ci10 = getelementptr double* %C, i64 %i10
    343   store double %t12, double* %Ci10
    344   %i.next = add i64 %i, 1
    345   %exitcond = icmp eq i64 %i.next, %n
    346   br i1 %exitcond, label %return, label %loop
    347 
    348 return:
    349   ret void
    350 }
    351 
    352 ; This is a variation on full_me_0 in which the 0,+,1 induction variable
    353 ; has a non-address use, pinning that value in a register.
    354 
    355 ; CHECK: count_me_3:
    356 ; CHECK: call
    357 ; CHECK: movsd   (%r{{[^,]*}},%r{{[^,]*}},8), %xmm0
    358 ; CHECK: mulsd   (%r{{[^,]*}},%r{{[^,]*}},8), %xmm0
    359 ; CHECK: movsd   %xmm0, (%r{{[^,]*}},%r{{[^,]*}},8)
    360 ; CHECK: incq    %r{{.*}}
    361 ; CHECK: cmpq    %r{{.*}}, %r{{.*}}
    362 ; CHECK: jne
    363 
    364 declare void @use(i64)
    365 
    366 define void @count_me_3(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
    367 entry:
    368   %t0 = icmp sgt i64 %n, 0
    369   br i1 %t0, label %loop, label %return
    370 
    371 loop:
    372   %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
    373   call void @use(i64 %i)
    374   %Ai = getelementptr inbounds double* %A, i64 %i
    375   %Bi = getelementptr inbounds double* %B, i64 %i
    376   %Ci = getelementptr inbounds double* %C, i64 %i
    377   %t1 = load double* %Bi
    378   %t2 = load double* %Ci
    379   %m = fmul double %t1, %t2
    380   store double %m, double* %Ai
    381   %i.next = add nsw i64 %i, 1
    382   %exitcond = icmp eq i64 %i.next, %n
    383   br i1 %exitcond, label %return, label %loop
    384 
    385 return:
    386   ret void
    387 }
    388 
    389 ; LSR should use only one indvar for the inner loop.
    390 ; rdar://7657764
    391 
    392 ; CHECK: asd:
    393 ; CHECK: BB9_4:
    394 ; CHECK-NEXT: addl  (%r{{[^,]*}},%rdi,4), %e
    395 ; CHECK-NEXT: incq  %rdi
    396 ; CHECK-NEXT: cmpq  %rdi, %r{{[^,]*}}
    397 ; CHECK-NEXT: jg
    398 
    399 %struct.anon = type { i32, [4200 x i32] }
    400 
    401 @bars = common global [123123 x %struct.anon] zeroinitializer, align 32 ; <[123123 x %struct.anon]*> [#uses=2]
    402 
    403 define i32 @asd(i32 %n) nounwind readonly {
    404 entry:
    405   %0 = icmp sgt i32 %n, 0                         ; <i1> [#uses=1]
    406   br i1 %0, label %bb.nph14, label %bb5
    407 
    408 bb.nph14:                                         ; preds = %entry
    409   %tmp18 = zext i32 %n to i64                     ; <i64> [#uses=1]
    410   br label %bb
    411 
    412 bb:                                               ; preds = %bb3, %bb.nph14
    413   %indvar16 = phi i64 [ 0, %bb.nph14 ], [ %indvar.next17, %bb3 ] ; <i64> [#uses=3]
    414   %s.113 = phi i32 [ 0, %bb.nph14 ], [ %s.0.lcssa, %bb3 ] ; <i32> [#uses=2]
    415   %scevgep2526 = getelementptr [123123 x %struct.anon]* @bars, i64 0, i64 %indvar16, i32 0 ; <i32*> [#uses=1]
    416   %1 = load i32* %scevgep2526, align 4            ; <i32> [#uses=2]
    417   %2 = icmp sgt i32 %1, 0                         ; <i1> [#uses=1]
    418   br i1 %2, label %bb.nph, label %bb3
    419 
    420 bb.nph:                                           ; preds = %bb
    421   %tmp23 = sext i32 %1 to i64                     ; <i64> [#uses=1]
    422   br label %bb1
    423 
    424 bb1:                                              ; preds = %bb.nph, %bb1
    425   %indvar = phi i64 [ 0, %bb.nph ], [ %tmp19, %bb1 ] ; <i64> [#uses=2]
    426   %s.07 = phi i32 [ %s.113, %bb.nph ], [ %4, %bb1 ] ; <i32> [#uses=1]
    427   %c.08 = getelementptr [123123 x %struct.anon]* @bars, i64 0, i64 %indvar16, i32 1, i64 %indvar ; <i32*> [#uses=1]
    428   %3 = load i32* %c.08, align 4                   ; <i32> [#uses=1]
    429   %4 = add nsw i32 %3, %s.07                      ; <i32> [#uses=2]
    430   %tmp19 = add i64 %indvar, 1                     ; <i64> [#uses=2]
    431   %5 = icmp sgt i64 %tmp23, %tmp19                ; <i1> [#uses=1]
    432   br i1 %5, label %bb1, label %bb3
    433 
    434 bb3:                                              ; preds = %bb1, %bb
    435   %s.0.lcssa = phi i32 [ %s.113, %bb ], [ %4, %bb1 ] ; <i32> [#uses=2]
    436   %indvar.next17 = add i64 %indvar16, 1           ; <i64> [#uses=2]
    437   %exitcond = icmp eq i64 %indvar.next17, %tmp18  ; <i1> [#uses=1]
    438   br i1 %exitcond, label %bb5, label %bb
    439 
    440 bb5:                                              ; preds = %bb3, %entry
    441   %s.1.lcssa = phi i32 [ 0, %entry ], [ %s.0.lcssa, %bb3 ] ; <i32> [#uses=1]
    442   ret i32 %s.1.lcssa
    443 }
    444 
    445 ; Two loops here are of particular interest; the one at %bb21, where
    446 ; we don't want to leave extra induction variables around, or use an
    447 ; lea to compute an exit condition inside the loop:
    448 
    449 ; CHECK: test:
    450 
    451 ; CHECK:      BB10_4:
    452 ; CHECK-NEXT:   movaps  %xmm{{.*}}, %xmm{{.*}}
    453 ; CHECK-NEXT:   addss   %xmm{{.*}}, %xmm{{.*}}
    454 ; CHECK-NEXT:   mulss   (%r{{[^,]*}}), %xmm{{.*}}
    455 ; CHECK-NEXT:   movss   %xmm{{.*}}, (%r{{[^,]*}})
    456 ; CHECK-NEXT:   addq    $4, %r{{.*}}
    457 ; CHECK-NEXT:   decq    %r{{.*}}
    458 ; CHECK-NEXT:   addq    $4, %r{{.*}}
    459 ; CHECK-NEXT:   movaps  %xmm{{.*}}, %xmm{{.*}}
    460 ; CHECK-NEXT: BB10_2:
    461 ; CHECK-NEXT:   testq   %r{{.*}}, %r{{.*}}
    462 ; CHECK-NEXT:   jle
    463 ; CHECK-NEXT:   testb   $15, %r{{.*}}
    464 ; CHECK-NEXT:   jne
    465 
    466 ; And the one at %bb68, where we want to be sure to use superhero mode:
    467 
    468 ; CHECK:      BB10_7:
    469 ; CHECK-NEXT:   movaps  48(%r{{[^,]*}}), %xmm{{.*}}
    470 ; CHECK-NEXT:   mulps   %xmm{{.*}}, %xmm{{.*}}
    471 ; CHECK-NEXT:   movaps  32(%r{{[^,]*}}), %xmm{{.*}}
    472 ; CHECK-NEXT:   mulps   %xmm{{.*}}, %xmm{{.*}}
    473 ; CHECK-NEXT:   movaps  16(%r{{[^,]*}}), %xmm{{.*}}
    474 ; CHECK-NEXT:   mulps   %xmm{{.*}}, %xmm{{.*}}
    475 ; CHECK-NEXT:   movaps  (%r{{[^,]*}}), %xmm{{.*}}
    476 ; CHECK-NEXT:   mulps   %xmm{{.*}}, %xmm{{.*}}
    477 ; CHECK-NEXT:   movaps  %xmm{{.*}}, (%r{{[^,]*}})
    478 ; CHECK-NEXT:   movaps  %xmm{{.*}}, 16(%r{{[^,]*}})
    479 ; CHECK-NEXT:   movaps  %xmm{{.*}}, 32(%r{{[^,]*}})
    480 ; CHECK-NEXT:   movaps  %xmm{{.*}}, 48(%r{{[^,]*}})
    481 ; CHECK-NEXT:   addps   %xmm{{.*}}, %xmm{{.*}}
    482 ; CHECK-NEXT:   addps   %xmm{{.*}}, %xmm{{.*}}
    483 ; CHECK-NEXT:   addps   %xmm{{.*}}, %xmm{{.*}}
    484 ; CHECK-NEXT:   addps   %xmm{{.*}}, %xmm{{.*}}
    485 ; CHECK-NEXT:   addq    $64, %r{{.*}}
    486 ; CHECK-NEXT:   addq    $64, %r{{.*}}
    487 ; CHECK-NEXT:   addq    $-16, %r{{.*}}
    488 ; CHECK-NEXT:   cmpq    $15, %r{{.*}}
    489 ; CHECK-NEXT:   jg
    490 
    491 define void @test(float* %arg, i64 %arg1, float* nocapture %arg2, float* nocapture %arg3, float* %arg4, i64 %arg5, i64 %arg6) nounwind {
    492 bb:
    493   %t = alloca float, align 4                      ; <float*> [#uses=3]
    494   %t7 = alloca float, align 4                     ; <float*> [#uses=2]
    495   %t8 = load float* %arg3                         ; <float> [#uses=8]
    496   %t9 = ptrtoint float* %arg to i64               ; <i64> [#uses=1]
    497   %t10 = ptrtoint float* %arg4 to i64             ; <i64> [#uses=1]
    498   %t11 = xor i64 %t10, %t9                        ; <i64> [#uses=1]
    499   %t12 = and i64 %t11, 15                         ; <i64> [#uses=1]
    500   %t13 = icmp eq i64 %t12, 0                      ; <i1> [#uses=1]
    501   %t14 = xor i64 %arg1, 1                         ; <i64> [#uses=1]
    502   %t15 = xor i64 %arg5, 1                         ; <i64> [#uses=1]
    503   %t16 = or i64 %t15, %t14                        ; <i64> [#uses=1]
    504   %t17 = trunc i64 %t16 to i32                    ; <i32> [#uses=1]
    505   %t18 = icmp eq i32 %t17, 0                      ; <i1> [#uses=1]
    506   br i1 %t18, label %bb19, label %bb213
    507 
    508 bb19:                                             ; preds = %bb
    509   %t20 = load float* %arg2                        ; <float> [#uses=1]
    510   br label %bb21
    511 
    512 bb21:                                             ; preds = %bb32, %bb19
    513   %t22 = phi i64 [ %t36, %bb32 ], [ 0, %bb19 ]    ; <i64> [#uses=21]
    514   %t23 = phi float [ %t35, %bb32 ], [ %t20, %bb19 ] ; <float> [#uses=6]
    515   %t24 = sub i64 %arg6, %t22                      ; <i64> [#uses=4]
    516   %t25 = getelementptr float* %arg4, i64 %t22     ; <float*> [#uses=4]
    517   %t26 = getelementptr float* %arg, i64 %t22      ; <float*> [#uses=3]
    518   %t27 = icmp sgt i64 %t24, 0                     ; <i1> [#uses=1]
    519   br i1 %t27, label %bb28, label %bb37
    520 
    521 bb28:                                             ; preds = %bb21
    522   %t29 = ptrtoint float* %t25 to i64              ; <i64> [#uses=1]
    523   %t30 = and i64 %t29, 15                         ; <i64> [#uses=1]
    524   %t31 = icmp eq i64 %t30, 0                      ; <i1> [#uses=1]
    525   br i1 %t31, label %bb37, label %bb32
    526 
    527 bb32:                                             ; preds = %bb28
    528   %t33 = load float* %t26                         ; <float> [#uses=1]
    529   %t34 = fmul float %t23, %t33                    ; <float> [#uses=1]
    530   store float %t34, float* %t25
    531   %t35 = fadd float %t23, %t8                     ; <float> [#uses=1]
    532   %t36 = add i64 %t22, 1                          ; <i64> [#uses=1]
    533   br label %bb21
    534 
    535 bb37:                                             ; preds = %bb28, %bb21
    536   %t38 = fmul float %t8, 4.000000e+00             ; <float> [#uses=1]
    537   store float %t38, float* %t
    538   %t39 = fmul float %t8, 1.600000e+01             ; <float> [#uses=1]
    539   store float %t39, float* %t7
    540   %t40 = fmul float %t8, 0.000000e+00             ; <float> [#uses=1]
    541   %t41 = fadd float %t23, %t40                    ; <float> [#uses=1]
    542   %t42 = insertelement <4 x float> undef, float %t41, i32 0 ; <<4 x float>> [#uses=1]
    543   %t43 = fadd float %t23, %t8                     ; <float> [#uses=1]
    544   %t44 = insertelement <4 x float> %t42, float %t43, i32 1 ; <<4 x float>> [#uses=1]
    545   %t45 = fmul float %t8, 2.000000e+00             ; <float> [#uses=1]
    546   %t46 = fadd float %t23, %t45                    ; <float> [#uses=1]
    547   %t47 = insertelement <4 x float> %t44, float %t46, i32 2 ; <<4 x float>> [#uses=1]
    548   %t48 = fmul float %t8, 3.000000e+00             ; <float> [#uses=1]
    549   %t49 = fadd float %t23, %t48                    ; <float> [#uses=1]
    550   %t50 = insertelement <4 x float> %t47, float %t49, i32 3 ; <<4 x float>> [#uses=5]
    551   %t51 = call <4 x float> asm "movss $1, $0\09\0Apshufd $$0, $0, $0", "=x,*m,~{dirflag},~{fpsr},~{flags}"(float* %t) nounwind ; <<4 x float>> [#uses=3]
    552   %t52 = fadd <4 x float> %t50, %t51              ; <<4 x float>> [#uses=3]
    553   %t53 = fadd <4 x float> %t52, %t51              ; <<4 x float>> [#uses=3]
    554   %t54 = fadd <4 x float> %t53, %t51              ; <<4 x float>> [#uses=2]
    555   %t55 = call <4 x float> asm "movss $1, $0\09\0Apshufd $$0, $0, $0", "=x,*m,~{dirflag},~{fpsr},~{flags}"(float* %t7) nounwind ; <<4 x float>> [#uses=8]
    556   %t56 = icmp sgt i64 %t24, 15                    ; <i1> [#uses=2]
    557   br i1 %t13, label %bb57, label %bb118
    558 
    559 bb57:                                             ; preds = %bb37
    560   br i1 %t56, label %bb61, label %bb112
    561 
    562 bb58:                                             ; preds = %bb68
    563   %t59 = getelementptr float* %arg, i64 %t78      ; <float*> [#uses=1]
    564   %t60 = getelementptr float* %arg4, i64 %t78     ; <float*> [#uses=1]
    565   br label %bb112
    566 
    567 bb61:                                             ; preds = %bb57
    568   %t62 = add i64 %t22, 16                         ; <i64> [#uses=1]
    569   %t63 = add i64 %t22, 4                          ; <i64> [#uses=1]
    570   %t64 = add i64 %t22, 8                          ; <i64> [#uses=1]
    571   %t65 = add i64 %t22, 12                         ; <i64> [#uses=1]
    572   %t66 = add i64 %arg6, -16                       ; <i64> [#uses=1]
    573   %t67 = sub i64 %t66, %t22                       ; <i64> [#uses=1]
    574   br label %bb68
    575 
    576 bb68:                                             ; preds = %bb68, %bb61
    577   %t69 = phi i64 [ 0, %bb61 ], [ %t111, %bb68 ]   ; <i64> [#uses=3]
    578   %t70 = phi <4 x float> [ %t54, %bb61 ], [ %t107, %bb68 ] ; <<4 x float>> [#uses=2]
    579   %t71 = phi <4 x float> [ %t50, %bb61 ], [ %t103, %bb68 ] ; <<4 x float>> [#uses=2]
    580   %t72 = phi <4 x float> [ %t53, %bb61 ], [ %t108, %bb68 ] ; <<4 x float>> [#uses=2]
    581   %t73 = phi <4 x float> [ %t52, %bb61 ], [ %t109, %bb68 ] ; <<4 x float>> [#uses=2]
    582   %t74 = shl i64 %t69, 4                          ; <i64> [#uses=5]
    583   %t75 = add i64 %t22, %t74                       ; <i64> [#uses=2]
    584   %t76 = getelementptr float* %arg, i64 %t75      ; <float*> [#uses=1]
    585   %t77 = bitcast float* %t76 to <4 x float>*      ; <<4 x float>*> [#uses=1]
    586   %t78 = add i64 %t62, %t74                       ; <i64> [#uses=2]
    587   %t79 = add i64 %t63, %t74                       ; <i64> [#uses=2]
    588   %t80 = getelementptr float* %arg, i64 %t79      ; <float*> [#uses=1]
    589   %t81 = bitcast float* %t80 to <4 x float>*      ; <<4 x float>*> [#uses=1]
    590   %t82 = add i64 %t64, %t74                       ; <i64> [#uses=2]
    591   %t83 = getelementptr float* %arg, i64 %t82      ; <float*> [#uses=1]
    592   %t84 = bitcast float* %t83 to <4 x float>*      ; <<4 x float>*> [#uses=1]
    593   %t85 = add i64 %t65, %t74                       ; <i64> [#uses=2]
    594   %t86 = getelementptr float* %arg, i64 %t85      ; <float*> [#uses=1]
    595   %t87 = bitcast float* %t86 to <4 x float>*      ; <<4 x float>*> [#uses=1]
    596   %t88 = getelementptr float* %arg4, i64 %t75     ; <float*> [#uses=1]
    597   %t89 = bitcast float* %t88 to <4 x float>*      ; <<4 x float>*> [#uses=1]
    598   %t90 = getelementptr float* %arg4, i64 %t79     ; <float*> [#uses=1]
    599   %t91 = bitcast float* %t90 to <4 x float>*      ; <<4 x float>*> [#uses=1]
    600   %t92 = getelementptr float* %arg4, i64 %t82     ; <float*> [#uses=1]
    601   %t93 = bitcast float* %t92 to <4 x float>*      ; <<4 x float>*> [#uses=1]
    602   %t94 = getelementptr float* %arg4, i64 %t85     ; <float*> [#uses=1]
    603   %t95 = bitcast float* %t94 to <4 x float>*      ; <<4 x float>*> [#uses=1]
    604   %t96 = mul i64 %t69, -16                        ; <i64> [#uses=1]
    605   %t97 = add i64 %t67, %t96                       ; <i64> [#uses=2]
    606   %t98 = load <4 x float>* %t77                   ; <<4 x float>> [#uses=1]
    607   %t99 = load <4 x float>* %t81                   ; <<4 x float>> [#uses=1]
    608   %t100 = load <4 x float>* %t84                  ; <<4 x float>> [#uses=1]
    609   %t101 = load <4 x float>* %t87                  ; <<4 x float>> [#uses=1]
    610   %t102 = fmul <4 x float> %t98, %t71             ; <<4 x float>> [#uses=1]
    611   %t103 = fadd <4 x float> %t71, %t55             ; <<4 x float>> [#uses=2]
    612   %t104 = fmul <4 x float> %t99, %t73             ; <<4 x float>> [#uses=1]
    613   %t105 = fmul <4 x float> %t100, %t72            ; <<4 x float>> [#uses=1]
    614   %t106 = fmul <4 x float> %t101, %t70            ; <<4 x float>> [#uses=1]
    615   store <4 x float> %t102, <4 x float>* %t89
    616   store <4 x float> %t104, <4 x float>* %t91
    617   store <4 x float> %t105, <4 x float>* %t93
    618   store <4 x float> %t106, <4 x float>* %t95
    619   %t107 = fadd <4 x float> %t70, %t55             ; <<4 x float>> [#uses=1]
    620   %t108 = fadd <4 x float> %t72, %t55             ; <<4 x float>> [#uses=1]
    621   %t109 = fadd <4 x float> %t73, %t55             ; <<4 x float>> [#uses=1]
    622   %t110 = icmp sgt i64 %t97, 15                   ; <i1> [#uses=1]
    623   %t111 = add i64 %t69, 1                         ; <i64> [#uses=1]
    624   br i1 %t110, label %bb68, label %bb58
    625 
    626 bb112:                                            ; preds = %bb58, %bb57
    627   %t113 = phi float* [ %t59, %bb58 ], [ %t26, %bb57 ] ; <float*> [#uses=1]
    628   %t114 = phi float* [ %t60, %bb58 ], [ %t25, %bb57 ] ; <float*> [#uses=1]
    629   %t115 = phi <4 x float> [ %t103, %bb58 ], [ %t50, %bb57 ] ; <<4 x float>> [#uses=1]
    630   %t116 = phi i64 [ %t97, %bb58 ], [ %t24, %bb57 ] ; <i64> [#uses=1]
    631   %t117 = call <4 x float> asm "movss $1, $0\09\0Apshufd $$0, $0, $0", "=x,*m,~{dirflag},~{fpsr},~{flags}"(float* %t) nounwind ; <<4 x float>> [#uses=0]
    632   br label %bb194
    633 
    634 bb118:                                            ; preds = %bb37
    635   br i1 %t56, label %bb122, label %bb194
    636 
    637 bb119:                                            ; preds = %bb137
    638   %t120 = getelementptr float* %arg, i64 %t145    ; <float*> [#uses=1]
    639   %t121 = getelementptr float* %arg4, i64 %t145   ; <float*> [#uses=1]
    640   br label %bb194
    641 
    642 bb122:                                            ; preds = %bb118
    643   %t123 = add i64 %t22, -1                        ; <i64> [#uses=1]
    644   %t124 = getelementptr inbounds float* %arg, i64 %t123 ; <float*> [#uses=1]
    645   %t125 = bitcast float* %t124 to <4 x float>*    ; <<4 x float>*> [#uses=1]
    646   %t126 = load <4 x float>* %t125                 ; <<4 x float>> [#uses=1]
    647   %t127 = add i64 %t22, 16                        ; <i64> [#uses=1]
    648   %t128 = add i64 %t22, 3                         ; <i64> [#uses=1]
    649   %t129 = add i64 %t22, 7                         ; <i64> [#uses=1]
    650   %t130 = add i64 %t22, 11                        ; <i64> [#uses=1]
    651   %t131 = add i64 %t22, 15                        ; <i64> [#uses=1]
    652   %t132 = add i64 %t22, 4                         ; <i64> [#uses=1]
    653   %t133 = add i64 %t22, 8                         ; <i64> [#uses=1]
    654   %t134 = add i64 %t22, 12                        ; <i64> [#uses=1]
    655   %t135 = add i64 %arg6, -16                      ; <i64> [#uses=1]
    656   %t136 = sub i64 %t135, %t22                     ; <i64> [#uses=1]
    657   br label %bb137
    658 
    659 bb137:                                            ; preds = %bb137, %bb122
    660   %t138 = phi i64 [ 0, %bb122 ], [ %t193, %bb137 ] ; <i64> [#uses=3]
    661   %t139 = phi <4 x float> [ %t54, %bb122 ], [ %t189, %bb137 ] ; <<4 x float>> [#uses=2]
    662   %t140 = phi <4 x float> [ %t50, %bb122 ], [ %t185, %bb137 ] ; <<4 x float>> [#uses=2]
    663   %t141 = phi <4 x float> [ %t53, %bb122 ], [ %t190, %bb137 ] ; <<4 x float>> [#uses=2]
    664   %t142 = phi <4 x float> [ %t52, %bb122 ], [ %t191, %bb137 ] ; <<4 x float>> [#uses=2]
    665   %t143 = phi <4 x float> [ %t126, %bb122 ], [ %t175, %bb137 ] ; <<4 x float>> [#uses=1]
    666   %t144 = shl i64 %t138, 4                        ; <i64> [#uses=9]
    667   %t145 = add i64 %t127, %t144                    ; <i64> [#uses=2]
    668   %t146 = add i64 %t128, %t144                    ; <i64> [#uses=1]
    669   %t147 = getelementptr float* %arg, i64 %t146    ; <float*> [#uses=1]
    670   %t148 = bitcast float* %t147 to <4 x float>*    ; <<4 x float>*> [#uses=1]
    671   %t149 = add i64 %t129, %t144                    ; <i64> [#uses=1]
    672   %t150 = getelementptr float* %arg, i64 %t149    ; <float*> [#uses=1]
    673   %t151 = bitcast float* %t150 to <4 x float>*    ; <<4 x float>*> [#uses=1]
    674   %t152 = add i64 %t130, %t144                    ; <i64> [#uses=1]
    675   %t153 = getelementptr float* %arg, i64 %t152    ; <float*> [#uses=1]
    676   %t154 = bitcast float* %t153 to <4 x float>*    ; <<4 x float>*> [#uses=1]
    677   %t155 = add i64 %t131, %t144                    ; <i64> [#uses=1]
    678   %t156 = getelementptr float* %arg, i64 %t155    ; <float*> [#uses=1]
    679   %t157 = bitcast float* %t156 to <4 x float>*    ; <<4 x float>*> [#uses=1]
    680   %t158 = add i64 %t22, %t144                     ; <i64> [#uses=1]
    681   %t159 = getelementptr float* %arg4, i64 %t158   ; <float*> [#uses=1]
    682   %t160 = bitcast float* %t159 to <4 x float>*    ; <<4 x float>*> [#uses=1]
    683   %t161 = add i64 %t132, %t144                    ; <i64> [#uses=1]
    684   %t162 = getelementptr float* %arg4, i64 %t161   ; <float*> [#uses=1]
    685   %t163 = bitcast float* %t162 to <4 x float>*    ; <<4 x float>*> [#uses=1]
    686   %t164 = add i64 %t133, %t144                    ; <i64> [#uses=1]
    687   %t165 = getelementptr float* %arg4, i64 %t164   ; <float*> [#uses=1]
    688   %t166 = bitcast float* %t165 to <4 x float>*    ; <<4 x float>*> [#uses=1]
    689   %t167 = add i64 %t134, %t144                    ; <i64> [#uses=1]
    690   %t168 = getelementptr float* %arg4, i64 %t167   ; <float*> [#uses=1]
    691   %t169 = bitcast float* %t168 to <4 x float>*    ; <<4 x float>*> [#uses=1]
    692   %t170 = mul i64 %t138, -16                      ; <i64> [#uses=1]
    693   %t171 = add i64 %t136, %t170                    ; <i64> [#uses=2]
    694   %t172 = load <4 x float>* %t148                 ; <<4 x float>> [#uses=2]
    695   %t173 = load <4 x float>* %t151                 ; <<4 x float>> [#uses=2]
    696   %t174 = load <4 x float>* %t154                 ; <<4 x float>> [#uses=2]
    697   %t175 = load <4 x float>* %t157                 ; <<4 x float>> [#uses=2]
    698   %t176 = shufflevector <4 x float> %t143, <4 x float> %t172, <4 x i32> <i32 4, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
    699   %t177 = shufflevector <4 x float> %t176, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0> ; <<4 x float>> [#uses=1]
    700   %t178 = shufflevector <4 x float> %t172, <4 x float> %t173, <4 x i32> <i32 4, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
    701   %t179 = shufflevector <4 x float> %t178, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0> ; <<4 x float>> [#uses=1]
    702   %t180 = shufflevector <4 x float> %t173, <4 x float> %t174, <4 x i32> <i32 4, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
    703   %t181 = shufflevector <4 x float> %t180, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0> ; <<4 x float>> [#uses=1]
    704   %t182 = shufflevector <4 x float> %t174, <4 x float> %t175, <4 x i32> <i32 4, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
    705   %t183 = shufflevector <4 x float> %t182, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0> ; <<4 x float>> [#uses=1]
    706   %t184 = fmul <4 x float> %t177, %t140           ; <<4 x float>> [#uses=1]
    707   %t185 = fadd <4 x float> %t140, %t55            ; <<4 x float>> [#uses=2]
    708   %t186 = fmul <4 x float> %t179, %t142           ; <<4 x float>> [#uses=1]
    709   %t187 = fmul <4 x float> %t181, %t141           ; <<4 x float>> [#uses=1]
    710   %t188 = fmul <4 x float> %t183, %t139           ; <<4 x float>> [#uses=1]
    711   store <4 x float> %t184, <4 x float>* %t160
    712   store <4 x float> %t186, <4 x float>* %t163
    713   store <4 x float> %t187, <4 x float>* %t166
    714   store <4 x float> %t188, <4 x float>* %t169
    715   %t189 = fadd <4 x float> %t139, %t55            ; <<4 x float>> [#uses=1]
    716   %t190 = fadd <4 x float> %t141, %t55            ; <<4 x float>> [#uses=1]
    717   %t191 = fadd <4 x float> %t142, %t55            ; <<4 x float>> [#uses=1]
    718   %t192 = icmp sgt i64 %t171, 15                  ; <i1> [#uses=1]
    719   %t193 = add i64 %t138, 1                        ; <i64> [#uses=1]
    720   br i1 %t192, label %bb137, label %bb119
    721 
    722 bb194:                                            ; preds = %bb119, %bb118, %bb112
    723   %t195 = phi i64 [ %t116, %bb112 ], [ %t171, %bb119 ], [ %t24, %bb118 ] ; <i64> [#uses=2]
    724   %t196 = phi <4 x float> [ %t115, %bb112 ], [ %t185, %bb119 ], [ %t50, %bb118 ] ; <<4 x float>> [#uses=1]
    725   %t197 = phi float* [ %t114, %bb112 ], [ %t121, %bb119 ], [ %t25, %bb118 ] ; <float*> [#uses=1]
    726   %t198 = phi float* [ %t113, %bb112 ], [ %t120, %bb119 ], [ %t26, %bb118 ] ; <float*> [#uses=1]
    727   %t199 = extractelement <4 x float> %t196, i32 0 ; <float> [#uses=2]
    728   %t200 = icmp sgt i64 %t195, 0                   ; <i1> [#uses=1]
    729   br i1 %t200, label %bb201, label %bb211
    730 
    731 bb201:                                            ; preds = %bb201, %bb194
    732   %t202 = phi i64 [ %t209, %bb201 ], [ 0, %bb194 ] ; <i64> [#uses=3]
    733   %t203 = phi float [ %t208, %bb201 ], [ %t199, %bb194 ] ; <float> [#uses=2]
    734   %t204 = getelementptr float* %t198, i64 %t202   ; <float*> [#uses=1]
    735   %t205 = getelementptr float* %t197, i64 %t202   ; <float*> [#uses=1]
    736   %t206 = load float* %t204                       ; <float> [#uses=1]
    737   %t207 = fmul float %t203, %t206                 ; <float> [#uses=1]
    738   store float %t207, float* %t205
    739   %t208 = fadd float %t203, %t8                   ; <float> [#uses=2]
    740   %t209 = add i64 %t202, 1                        ; <i64> [#uses=2]
    741   %t210 = icmp eq i64 %t209, %t195                ; <i1> [#uses=1]
    742   br i1 %t210, label %bb211, label %bb201
    743 
    744 bb211:                                            ; preds = %bb201, %bb194
    745   %t212 = phi float [ %t199, %bb194 ], [ %t208, %bb201 ] ; <float> [#uses=1]
    746   store float %t212, float* %arg2
    747   ret void
    748 
    749 bb213:                                            ; preds = %bb
    750   ret void
    751 }
    752