Home | History | Annotate | Download | only in X86
      1 ; RUN: opt < %s  -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -debug-only=loop-vectorize -stats -S 2>&1 | FileCheck %s
      2 ; REQUIRES: asserts
      3 
      4 ; CHECK: LV: Loop hints: force=enabled
      5 ; CHECK: LV: Loop hints: force=?
      6 ; No more loops in the module
      7 ; CHECK-NOT: LV: Loop hints: force=
      8 ; CHECK: 2 loop-vectorize               - Number of loops analyzed for vectorization
      9 ; CHECK: 1 loop-vectorize               - Number of loops vectorized
     10 
     11 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
     12 target triple = "x86_64-apple-macosx10.8.0"
     13 
     14 ;
     15 ; The source code for the test:
     16 ;
     17 ; #include <math.h>
     18 ; void foo(float* restrict A, float * restrict B)
     19 ; {
     20 ;   for (int i = 0; i < 1000; i+=2) A[i] = sinf(B[i]);
     21 ; }
     22 ;
     23 
     24 ;
     25 ; This loop will be vectorized, although the scalar cost is lower than any of vector costs, but vectorization is explicitly forced in metadata.
     26 ;
     27 
     28 define void @vectorized(float* noalias nocapture %A, float* noalias nocapture %B) {
     29 entry:
     30   br label %for.body
     31 
     32 for.body:
     33   %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
     34   %arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
     35   %0 = load float, float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !1
     36   %call = tail call float @llvm.sin.f32(float %0)
     37   %arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
     38   store float %call, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
     39   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
     40   %lftr.wideiv = trunc i64 %indvars.iv.next to i32
     41   %exitcond = icmp eq i32 %lftr.wideiv, 1000
     42   br i1 %exitcond, label %for.end.loopexit, label %for.body, !llvm.loop !1
     43 
     44 for.end.loopexit:
     45   br label %for.end
     46 
     47 for.end:
     48   ret void
     49 }
     50 
     51 !1 = !{!1, !2}
     52 !2 = !{!"llvm.loop.vectorize.enable", i1 true}
     53 
     54 ;
     55 ; This method will not be vectorized, as scalar cost is lower than any of vector costs.
     56 ;
     57 
     58 define void @not_vectorized(float* noalias nocapture %A, float* noalias nocapture %B) {
     59 entry:
     60   br label %for.body
     61 
     62 for.body:
     63   %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
     64   %arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
     65   %0 = load float, float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
     66   %call = tail call float @llvm.sin.f32(float %0)
     67   %arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
     68   store float %call, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
     69   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
     70   %lftr.wideiv = trunc i64 %indvars.iv.next to i32
     71   %exitcond = icmp eq i32 %lftr.wideiv, 1000
     72   br i1 %exitcond, label %for.end.loopexit, label %for.body, !llvm.loop !3
     73 
     74 for.end.loopexit:
     75   br label %for.end
     76 
     77 for.end:
     78   ret void
     79 }
     80 
     81 declare float @llvm.sin.f32(float) nounwind readnone
     82 
     83 ; Dummy metadata
     84 !3 = !{!3}
     85 
     86