Home | History | Annotate | Download | only in X86
      1 ; RUN: opt -loop-vectorize -mtriple=x86_64-apple-macosx -S -mcpu=corei7-avx < %s | FileCheck %s
      2 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
      3 
      4 @kernel = global [512 x float] zeroinitializer, align 16
      5 @kernel2 = global [512 x float] zeroinitializer, align 16
      6 @kernel3 = global [512 x float] zeroinitializer, align 16
      7 @kernel4 = global [512 x float] zeroinitializer, align 16
      8 @src_data = global [1536 x float] zeroinitializer, align 16
      9 @r_ = global i8 0, align 1
     10 @g_ = global i8 0, align 1
     11 @b_ = global i8 0, align 1
     12 
     13 ; We don't want to vectorize most loops containing gathers because they are
     14 ; expensive. This function represents a point where vectorization starts to
     15 ; become beneficial.
     16 ; Make sure we are conservative and don't vectorize it.
     17 ; CHECK-NOT: x float>
     18 
     19 define void @_Z4testmm(i64 %size, i64 %offset) {
     20 entry:
     21   %cmp53 = icmp eq i64 %size, 0
     22   br i1 %cmp53, label %for.end, label %for.body.lr.ph
     23 
     24 for.body.lr.ph:
     25   br label %for.body
     26 
     27 for.body:
     28   %r.057 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add10, %for.body ]
     29   %g.056 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add20, %for.body ]
     30   %v.055 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
     31   %b.054 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add30, %for.body ]
     32   %add = add i64 %v.055, %offset
     33   %mul = mul i64 %add, 3
     34   %arrayidx = getelementptr inbounds [1536 x float]* @src_data, i64 0, i64 %mul
     35   %0 = load float* %arrayidx, align 4
     36   %arrayidx2 = getelementptr inbounds [512 x float]* @kernel, i64 0, i64 %v.055
     37   %1 = load float* %arrayidx2, align 4
     38   %mul3 = fmul fast float %0, %1
     39   %arrayidx4 = getelementptr inbounds [512 x float]* @kernel2, i64 0, i64 %v.055
     40   %2 = load float* %arrayidx4, align 4
     41   %mul5 = fmul fast float %mul3, %2
     42   %arrayidx6 = getelementptr inbounds [512 x float]* @kernel3, i64 0, i64 %v.055
     43   %3 = load float* %arrayidx6, align 4
     44   %mul7 = fmul fast float %mul5, %3
     45   %arrayidx8 = getelementptr inbounds [512 x float]* @kernel4, i64 0, i64 %v.055
     46   %4 = load float* %arrayidx8, align 4
     47   %mul9 = fmul fast float %mul7, %4
     48   %add10 = fadd fast float %r.057, %mul9
     49   %arrayidx.sum = add i64 %mul, 1
     50   %arrayidx11 = getelementptr inbounds [1536 x float]* @src_data, i64 0, i64 %arrayidx.sum
     51   %5 = load float* %arrayidx11, align 4
     52   %mul13 = fmul fast float %1, %5
     53   %mul15 = fmul fast float %2, %mul13
     54   %mul17 = fmul fast float %3, %mul15
     55   %mul19 = fmul fast float %4, %mul17
     56   %add20 = fadd fast float %g.056, %mul19
     57   %arrayidx.sum52 = add i64 %mul, 2
     58   %arrayidx21 = getelementptr inbounds [1536 x float]* @src_data, i64 0, i64 %arrayidx.sum52
     59   %6 = load float* %arrayidx21, align 4
     60   %mul23 = fmul fast float %1, %6
     61   %mul25 = fmul fast float %2, %mul23
     62   %mul27 = fmul fast float %3, %mul25
     63   %mul29 = fmul fast float %4, %mul27
     64   %add30 = fadd fast float %b.054, %mul29
     65   %inc = add i64 %v.055, 1
     66   %exitcond = icmp ne i64 %inc, %size
     67   br i1 %exitcond, label %for.body, label %for.cond.for.end_crit_edge
     68 
     69 for.cond.for.end_crit_edge:
     70   %add30.lcssa = phi float [ %add30, %for.body ]
     71   %add20.lcssa = phi float [ %add20, %for.body ]
     72   %add10.lcssa = phi float [ %add10, %for.body ]
     73   %phitmp = fptoui float %add10.lcssa to i8
     74   %phitmp60 = fptoui float %add20.lcssa to i8
     75   %phitmp61 = fptoui float %add30.lcssa to i8
     76   br label %for.end
     77 
     78 for.end:
     79   %r.0.lcssa = phi i8 [ %phitmp, %for.cond.for.end_crit_edge ], [ 0, %entry ]
     80   %g.0.lcssa = phi i8 [ %phitmp60, %for.cond.for.end_crit_edge ], [ 0, %entry ]
     81   %b.0.lcssa = phi i8 [ %phitmp61, %for.cond.for.end_crit_edge ], [ 0, %entry ]
     82   store i8 %r.0.lcssa, i8* @r_, align 1
     83   store i8 %g.0.lcssa, i8* @g_, align 1
     84   store i8 %b.0.lcssa, i8* @b_, align 1
     85   ret void
     86 }
     87