Home | History | Annotate | Download | only in LoopVectorize
      1 ; RUN: opt -basicaa -loop-vectorize -enable-mem-access-versioning -force-vector-width=2 -force-vector-interleave=1 < %s -S | FileCheck %s
      2 
      3 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
      4 
      5 ; Check that we version this loop with speculating the value 1 for symbolic
      6 ; strides.  This also checks that the symbolic stride information is correctly
      7 ; propagated to the memcheck generation.  Without this the loop wouldn't
      8 ; vectorize because we couldn't determine the array bounds for the required
      9 ; memchecks.
     10 
     11 ; CHECK-LABEL: test
     12 define void @test(i32*  %A, i64 %AStride,
     13                   i32*  %B, i32 %BStride,
     14                   i32*  %C, i64 %CStride, i32 %N) {
     15 entry:
     16   %cmp13 = icmp eq i32 %N, 0
     17   br i1 %cmp13, label %for.end, label %for.body.preheader
     18 
     19 ; CHECK-DAG: icmp ne i64 %AStride, 1
     20 ; CHECK-DAG: icmp ne i32 %BStride, 1
     21 ; CHECK-DAG: icmp ne i64 %CStride, 1
     22 ; CHECK: or
     23 ; CHECK: or
     24 ; CHECK: br
     25 
     26 ; CHECK: vector.body
     27 ; CHECK: load <2 x i32>
     28 
     29 for.body.preheader:
     30   br label %for.body
     31 
     32 for.body:
     33   %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
     34   %iv.trunc = trunc i64 %indvars.iv to i32
     35   %mul = mul i32 %iv.trunc, %BStride
     36   %mul64 = zext i32 %mul to i64
     37   %arrayidx = getelementptr inbounds i32, i32* %B, i64 %mul64
     38   %0 = load i32, i32* %arrayidx, align 4
     39   %mul2 = mul nsw i64 %indvars.iv, %CStride
     40   %arrayidx3 = getelementptr inbounds i32, i32* %C, i64 %mul2
     41   %1 = load i32, i32* %arrayidx3, align 4
     42   %mul4 = mul nsw i32 %1, %0
     43   %mul3 = mul nsw i64 %indvars.iv, %AStride
     44   %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %mul3
     45   store i32 %mul4, i32* %arrayidx7, align 4
     46   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
     47   %lftr.wideiv = trunc i64 %indvars.iv.next to i32
     48   %exitcond = icmp eq i32 %lftr.wideiv, %N
     49   br i1 %exitcond, label %for.end.loopexit, label %for.body
     50 
     51 for.end.loopexit:
     52   br label %for.end
     53 
     54 for.end:
     55   ret void
     56 }
     57 
     58 ; We used to crash on this function because we removed the fptosi cast when
     59 ; replacing the symbolic stride '%conv'.
     60 ; PR18480
     61 
     62 ; CHECK-LABEL: fn1
     63 ; CHECK: load <2 x double>
     64 
     65 define void @fn1(double* noalias %x, double* noalias %c, double %a) {
     66 entry:
     67   %conv = fptosi double %a to i32
     68   %cmp8 = icmp sgt i32 %conv, 0
     69   br i1 %cmp8, label %for.body.preheader, label %for.end
     70 
     71 for.body.preheader:
     72   br label %for.body
     73 
     74 for.body:
     75   %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
     76   %0 = trunc i64 %indvars.iv to i32
     77   %mul = mul nsw i32 %0, %conv
     78   %idxprom = sext i32 %mul to i64
     79   %arrayidx = getelementptr inbounds double, double* %x, i64 %idxprom
     80   %1 = load double, double* %arrayidx, align 8
     81   %arrayidx3 = getelementptr inbounds double, double* %c, i64 %indvars.iv
     82   store double %1, double* %arrayidx3, align 8
     83   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
     84   %lftr.wideiv = trunc i64 %indvars.iv.next to i32
     85   %exitcond = icmp eq i32 %lftr.wideiv, %conv
     86   br i1 %exitcond, label %for.end.loopexit, label %for.body
     87 
     88 for.end.loopexit:
     89   br label %for.end
     90 
     91 for.end:
     92   ret void
     93 }
     94