Home | History | Annotate | Download | only in X86
      1 ; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
      2 
      3 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
      4 target triple = "x86_64-apple-macosx10.8.0"
      5 
      6 ; Simple 3-pair chain with loads and stores
      7 ; CHECK-LABEL: @test1
      8 define void @test1(double* %a, double* %b, double* %c) {
      9 entry:
     10   %agg.tmp.i.i.sroa.0 = alloca [3 x double], align 16
     11 ; CHECK: %[[V0:[0-9]+]] = load <2 x double>, <2 x double>* %[[V2:[0-9]+]], align 8
     12   %i0 = load double, double* %a 
     13   %i1 = load double, double* %b 
     14   %mul = fmul double %i0, %i1
     15   %store1 = getelementptr inbounds [3 x double], [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 1
     16   %store2 = getelementptr inbounds [3 x double], [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 2
     17   %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
     18   %i3 = load double, double* %arrayidx3, align 8
     19   %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
     20   %i4 = load double, double* %arrayidx4, align 8
     21   %mul5 = fmul double %i3, %i4
     22 ; CHECK: store <2 x double> %[[V1:[0-9]+]], <2 x double>* %[[V2:[0-9]+]], align 8
     23   store double %mul, double* %store1
     24   store double %mul5, double* %store2, align 16
     25 ; CHECK: ret
     26   ret void
     27 }
     28 
     29 ; Float has 4 byte abi alignment on x86_64. We must use the alignmnet of the
     30 ; value being loaded/stored not the alignment of the pointer type.
     31 
     32 ; CHECK-LABEL: @test2
     33 ; CHECK-NOT: align 8
     34 ; CHECK: load <4 x float>{{.*}}, align 4
     35 ; CHECK: store <4 x float>{{.*}}, align 4
     36 ; CHECK: ret
     37 
     38 define void @test2(float * %a, float * %b) {
     39 entry:
     40   %l0 = load float, float* %a
     41   %a1 = getelementptr inbounds float, float* %a, i64 1
     42   %l1 = load float, float* %a1
     43   %a2 = getelementptr inbounds float, float* %a, i64 2
     44   %l2 = load float, float* %a2
     45   %a3 = getelementptr inbounds float, float* %a, i64 3
     46   %l3 = load float, float* %a3
     47   store float %l0, float* %b
     48   %b1 = getelementptr inbounds float, float* %b, i64 1
     49   store float %l1, float* %b1
     50   %b2 = getelementptr inbounds float, float* %b, i64 2
     51   store float %l2, float* %b2
     52   %b3 = getelementptr inbounds float, float* %b, i64 3
     53   store float %l3, float* %b3
     54   ret void
     55 }
     56