Home | History | Annotate | Download | only in X86
      1 ; RUN: opt < %s  -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -debug-only=loop-vectorize -stats -S 2>&1 | FileCheck %s
      2 ; REQUIRES: asserts
      3 
      4 ; CHECK: LV: Loop hints: force=enabled
      5 ; CHECK: LV: Loop hints: force=?
      6 ; No more loops in the module
      7 ; CHECK-NOT: LV: Loop hints: force=
      8 ; CHECK: 2 loop-vectorize               - Number of loops analyzed for vectorization
      9 ; CHECK: 1 loop-vectorize               - Number of loops vectorized
     10 
     11 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
     12 target triple = "x86_64-apple-macosx10.8.0"
     13 
     14 ;
     15 ; The source code for the test:
     16 ;
     17 ; #include <math.h>
     18 ; void foo(float* restrict A, float * restrict B, int size)
     19 ; {
     20 ;   for (int i = 0; i < size; ++i) A[i] = sinf(B[i]);
     21 ; }
     22 ;
     23 
     24 ;
     25 ; This loop will be vectorized, although the scalar cost is lower than any of vector costs, but vectorization is explicitly forced in metadata.
     26 ;
     27 
     28 define void @vectorized(float* noalias nocapture %A, float* noalias nocapture %B, i32 %size) {
     29 entry:
     30   %cmp6 = icmp sgt i32 %size, 0
     31   br i1 %cmp6, label %for.body.preheader, label %for.end
     32 
     33 for.body.preheader:
     34   br label %for.body
     35 
     36 for.body:
     37   %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
     38   %arrayidx = getelementptr inbounds float* %B, i64 %indvars.iv
     39   %0 = load float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !1
     40   %call = tail call float @llvm.sin.f32(float %0)
     41   %arrayidx2 = getelementptr inbounds float* %A, i64 %indvars.iv
     42   store float %call, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
     43   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
     44   %lftr.wideiv = trunc i64 %indvars.iv.next to i32
     45   %exitcond = icmp eq i32 %lftr.wideiv, %size
     46   br i1 %exitcond, label %for.end.loopexit, label %for.body, !llvm.loop !1
     47 
     48 for.end.loopexit:
     49   br label %for.end
     50 
     51 for.end:
     52   ret void
     53 }
     54 
     55 !1 = metadata !{metadata !1, metadata !2}
     56 !2 = metadata !{metadata !"llvm.loop.vectorize.enable", i1 true}
     57 
     58 ;
     59 ; This method will not be vectorized, as scalar cost is lower than any of vector costs.
     60 ;
     61 
     62 define void @not_vectorized(float* noalias nocapture %A, float* noalias nocapture %B, i32 %size) {
     63 entry:
     64   %cmp6 = icmp sgt i32 %size, 0
     65   br i1 %cmp6, label %for.body.preheader, label %for.end
     66 
     67 for.body.preheader:
     68   br label %for.body
     69 
     70 for.body:
     71   %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
     72   %arrayidx = getelementptr inbounds float* %B, i64 %indvars.iv
     73   %0 = load float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
     74   %call = tail call float @llvm.sin.f32(float %0)
     75   %arrayidx2 = getelementptr inbounds float* %A, i64 %indvars.iv
     76   store float %call, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
     77   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
     78   %lftr.wideiv = trunc i64 %indvars.iv.next to i32
     79   %exitcond = icmp eq i32 %lftr.wideiv, %size
     80   br i1 %exitcond, label %for.end.loopexit, label %for.body, !llvm.loop !3
     81 
     82 for.end.loopexit:
     83   br label %for.end
     84 
     85 for.end:
     86   ret void
     87 }
     88 
     89 declare float @llvm.sin.f32(float) nounwind readnone
     90 
     91 ; Dummy metadata
     92 !3 = metadata !{metadata !3}
     93 
     94