Home | History | Annotate | Download | only in X86
      1 ; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
      2 
      3 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
      4 target triple = "x86_64-apple-macosx10.8.0"
      5 
      6 ; We will keep trying to vectorize the basic block even we already find vectorized store.
      7 ; CHECK: test1
      8 ; CHECK: store <2 x double>
      9 ; CHECK: ret
     10 define void @test1(double* %a, double* %b, double* %c, double* %d) {
     11 entry:
     12   %i0 = load double* %a, align 8
     13   %i1 = load double* %b, align 8
     14   %mul = fmul double %i0, %i1
     15   %arrayidx3 = getelementptr inbounds double* %a, i64 1
     16   %i3 = load double* %arrayidx3, align 8
     17   %arrayidx4 = getelementptr inbounds double* %b, i64 1
     18   %i4 = load double* %arrayidx4, align 8
     19   %mul5 = fmul double %i3, %i4
     20   store double %mul, double* %c, align 8
     21   %arrayidx5 = getelementptr inbounds double* %c, i64 1
     22   store double %mul5, double* %arrayidx5, align 8
     23   %0 = bitcast double* %a to <4 x i32>*
     24   %1 = load <4 x i32>* %0, align 8
     25   %2 = bitcast double* %b to <4 x i32>*
     26   %3 = load <4 x i32>* %2, align 8
     27   %4 = mul <4 x i32> %1, %3
     28   %5 = bitcast double* %d to <4 x i32>*
     29   store <4 x i32> %4, <4 x i32>* %5, align 8
     30   ret void
     31 }
     32