Home | History | Annotate | Download | only in X86
      1 ; RUN: opt < %s  -loop-vectorize -mattr=avx,+slow-unaligned-mem-32 -S | FileCheck %s --check-prefix=SLOWMEM32 --check-prefix=CHECK
      2 ; RUN: opt < %s  -loop-vectorize -mattr=avx,-slow-unaligned-mem-32 -S | FileCheck %s --check-prefix=FASTMEM32 --check-prefix=CHECK
      3 
      4 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
      5 target triple = "x86_64-apple-macosx10.8.0"
      6 
      7 ; CHECK-LABEL: @read_mod_write_single_ptr(
      8 ; CHECK: load <8 x float>
      9 ; CHECK: ret i32
     10 define i32 @read_mod_write_single_ptr(float* nocapture %a, i32 %n) nounwind uwtable ssp {
     11   %1 = icmp sgt i32 %n, 0
     12   br i1 %1, label %.lr.ph, label %._crit_edge
     13 
     14 .lr.ph:                                           ; preds = %0, %.lr.ph
     15   %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
     16   %2 = getelementptr inbounds float, float* %a, i64 %indvars.iv
     17   %3 = load float, float* %2, align 4
     18   %4 = fmul float %3, 3.000000e+00
     19   store float %4, float* %2, align 4
     20   %indvars.iv.next = add i64 %indvars.iv, 1
     21   %lftr.wideiv = trunc i64 %indvars.iv.next to i32
     22   %exitcond = icmp eq i32 %lftr.wideiv, %n
     23   br i1 %exitcond, label %._crit_edge, label %.lr.ph
     24 
     25 ._crit_edge:                                      ; preds = %.lr.ph, %0
     26   ret i32 undef
     27 }
     28 
     29 
     30 ; CHECK-LABEL: @read_mod_i64(
     31 ; SLOWMEM32: load <2 x i64>
     32 ; FASTMEM32: load <4 x i64>
     33 ; CHECK: ret i32
     34 define i32 @read_mod_i64(i64* nocapture %a, i32 %n) nounwind uwtable ssp {
     35   %1 = icmp sgt i32 %n, 0
     36   br i1 %1, label %.lr.ph, label %._crit_edge
     37 
     38 .lr.ph:                                           ; preds = %0, %.lr.ph
     39   %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
     40   %2 = getelementptr inbounds i64, i64* %a, i64 %indvars.iv
     41   %3 = load i64, i64* %2, align 4
     42   %4 = add i64 %3, 3
     43   store i64 %4, i64* %2, align 4
     44   %indvars.iv.next = add i64 %indvars.iv, 1
     45   %lftr.wideiv = trunc i64 %indvars.iv.next to i32
     46   %exitcond = icmp eq i32 %lftr.wideiv, %n
     47   br i1 %exitcond, label %._crit_edge, label %.lr.ph
     48 
     49 ._crit_edge:                                      ; preds = %.lr.ph, %0
     50   ret i32 undef
     51 }
     52 
     53