Home | History | Annotate | Download | only in LoopStrengthReduce
      1 ; RUN: opt < %s -loop-reduce -S | FileCheck %s
      2 ; CHECK: bb1:
      3 ; CHECK: load double, double addrspace(1)* [[IV:%[^,]+]]
      4 ; CHECK: store double {{.*}}, double addrspace(1)* [[IV]]
      5 
      6 ; CHECK-NOT: cast
      7 ; Make sure the GEP has the right index type
      8 ; CHECK: getelementptr double, double addrspace(1)* [[IV]], i16 1
      9 ; CHECK: br {{.*}} label %bb1
     10 
     11 ; Make sure the GEP has the right index type
     12 ; CHECK: getelementptr double, double addrspace(1)* {{.*}}, i16
     13 
     14 
     15 ; This test tests several things. The load and store should use the
     16 ; same address instead of having it computed twice, and SCEVExpander should
     17 ; be able to reconstruct the full getelementptr, despite it having a few
     18 ; obstacles set in its way.
     19 ; We only check that the inner loop (bb1-bb2) is "reduced" because LSR
     20 ; currently only operates on inner loops.
     21 
     22 target datalayout = "e-p:64:64:64-p1:16:16:16-n16:32:64"
     23 
     24 define void @foo(i64 %n, i64 %m, i64 %o, i64 %q, double addrspace(1)* nocapture %p) nounwind {
     25 entry:
     26 	%tmp = icmp sgt i64 %n, 0		; <i1> [#uses=1]
     27 	br i1 %tmp, label %bb.nph3, label %return
     28 
     29 bb.nph:		; preds = %bb2.preheader
     30 	%tmp1 = mul i64 %tmp16, %i.02		; <i64> [#uses=1]
     31 	%tmp2 = mul i64 %tmp19, %i.02		; <i64> [#uses=1]
     32 	br label %bb1
     33 
     34 bb1:		; preds = %bb2, %bb.nph
     35 	%j.01 = phi i64 [ %tmp9, %bb2 ], [ 0, %bb.nph ]		; <i64> [#uses=3]
     36 	%tmp3 = add i64 %j.01, %tmp1		; <i64> [#uses=1]
     37 	%tmp4 = add i64 %j.01, %tmp2		; <i64> [#uses=1]
     38         %z0 = add i64 %tmp3, 5203
     39 	%tmp5 = getelementptr double, double addrspace(1)* %p, i64 %z0		; <double addrspace(1)*> [#uses=1]
     40 	%tmp6 = load double, double addrspace(1)* %tmp5, align 8		; <double> [#uses=1]
     41 	%tmp7 = fdiv double %tmp6, 2.100000e+00		; <double> [#uses=1]
     42         %z1 = add i64 %tmp4, 5203
     43 	%tmp8 = getelementptr double, double addrspace(1)* %p, i64 %z1		; <double addrspace(1)*> [#uses=1]
     44 	store double %tmp7, double addrspace(1)* %tmp8, align 8
     45 	%tmp9 = add i64 %j.01, 1		; <i64> [#uses=2]
     46 	br label %bb2
     47 
     48 bb2:		; preds = %bb1
     49 	%tmp10 = icmp slt i64 %tmp9, %m		; <i1> [#uses=1]
     50 	br i1 %tmp10, label %bb1, label %bb2.bb3_crit_edge
     51 
     52 bb2.bb3_crit_edge:		; preds = %bb2
     53 	br label %bb3
     54 
     55 bb3:		; preds = %bb2.preheader, %bb2.bb3_crit_edge
     56 	%tmp11 = add i64 %i.02, 1		; <i64> [#uses=2]
     57 	br label %bb4
     58 
     59 bb4:		; preds = %bb3
     60 	%tmp12 = icmp slt i64 %tmp11, %n		; <i1> [#uses=1]
     61 	br i1 %tmp12, label %bb2.preheader, label %bb4.return_crit_edge
     62 
     63 bb4.return_crit_edge:		; preds = %bb4
     64 	br label %bb4.return_crit_edge.split
     65 
     66 bb4.return_crit_edge.split:		; preds = %bb.nph3, %bb4.return_crit_edge
     67 	br label %return
     68 
     69 bb.nph3:		; preds = %entry
     70 	%tmp13 = icmp sgt i64 %m, 0		; <i1> [#uses=1]
     71 	%tmp14 = mul i64 %n, 37		; <i64> [#uses=1]
     72 	%tmp15 = mul i64 %tmp14, %o		; <i64> [#uses=1]
     73 	%tmp16 = mul i64 %tmp15, %q		; <i64> [#uses=1]
     74 	%tmp17 = mul i64 %n, 37		; <i64> [#uses=1]
     75 	%tmp18 = mul i64 %tmp17, %o		; <i64> [#uses=1]
     76 	%tmp19 = mul i64 %tmp18, %q		; <i64> [#uses=1]
     77 	br i1 %tmp13, label %bb.nph3.split, label %bb4.return_crit_edge.split
     78 
     79 bb.nph3.split:		; preds = %bb.nph3
     80 	br label %bb2.preheader
     81 
     82 bb2.preheader:		; preds = %bb.nph3.split, %bb4
     83 	%i.02 = phi i64 [ %tmp11, %bb4 ], [ 0, %bb.nph3.split ]		; <i64> [#uses=3]
     84 	br i1 true, label %bb.nph, label %bb3
     85 
     86 return:		; preds = %bb4.return_crit_edge.split, %entry
     87 	ret void
     88 }
     89