Home | History | Annotate | Download | only in ARM
      1 ; RUN: opt -S -mcpu=swift -mtriple=thumbv7-apple-ios -basicaa -slp-vectorizer < %s | FileCheck %s
      2 
      3 target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
      4 
      5 %class.Complex = type { double, double }
      6 
      7 ; Code like this is the result of SROA. Make sure we don't vectorize this
      8 ; because the in the scalar version of this the shl/or are handled by the
      9 ; backend and disappear, the vectorized code stays.
     10 
     11 ; CHECK-LABEL: SROAed
     12 ; CHECK-NOT: shl <2 x i64>
     13 ; CHECK-NOT: or <2 x i64>
     14 
     15 define void @SROAed(%class.Complex* noalias nocapture sret %agg.result, [4 x i32] %a.coerce, [4 x i32] %b.coerce) {
     16 entry:
     17   %a.coerce.fca.0.extract = extractvalue [4 x i32] %a.coerce, 0
     18   %a.sroa.0.0.insert.ext = zext i32 %a.coerce.fca.0.extract to i64
     19   %a.coerce.fca.1.extract = extractvalue [4 x i32] %a.coerce, 1
     20   %a.sroa.0.4.insert.ext = zext i32 %a.coerce.fca.1.extract to i64
     21   %a.sroa.0.4.insert.shift = shl nuw i64 %a.sroa.0.4.insert.ext, 32
     22   %a.sroa.0.4.insert.insert = or i64 %a.sroa.0.4.insert.shift, %a.sroa.0.0.insert.ext
     23   %0 = bitcast i64 %a.sroa.0.4.insert.insert to double
     24   %a.coerce.fca.2.extract = extractvalue [4 x i32] %a.coerce, 2
     25   %a.sroa.3.8.insert.ext = zext i32 %a.coerce.fca.2.extract to i64
     26   %a.coerce.fca.3.extract = extractvalue [4 x i32] %a.coerce, 3
     27   %a.sroa.3.12.insert.ext = zext i32 %a.coerce.fca.3.extract to i64
     28   %a.sroa.3.12.insert.shift = shl nuw i64 %a.sroa.3.12.insert.ext, 32
     29   %a.sroa.3.12.insert.insert = or i64 %a.sroa.3.12.insert.shift, %a.sroa.3.8.insert.ext
     30   %1 = bitcast i64 %a.sroa.3.12.insert.insert to double
     31   %b.coerce.fca.0.extract = extractvalue [4 x i32] %b.coerce, 0
     32   %b.sroa.0.0.insert.ext = zext i32 %b.coerce.fca.0.extract to i64
     33   %b.coerce.fca.1.extract = extractvalue [4 x i32] %b.coerce, 1
     34   %b.sroa.0.4.insert.ext = zext i32 %b.coerce.fca.1.extract to i64
     35   %b.sroa.0.4.insert.shift = shl nuw i64 %b.sroa.0.4.insert.ext, 32
     36   %b.sroa.0.4.insert.insert = or i64 %b.sroa.0.4.insert.shift, %b.sroa.0.0.insert.ext
     37   %2 = bitcast i64 %b.sroa.0.4.insert.insert to double
     38   %b.coerce.fca.2.extract = extractvalue [4 x i32] %b.coerce, 2
     39   %b.sroa.3.8.insert.ext = zext i32 %b.coerce.fca.2.extract to i64
     40   %b.coerce.fca.3.extract = extractvalue [4 x i32] %b.coerce, 3
     41   %b.sroa.3.12.insert.ext = zext i32 %b.coerce.fca.3.extract to i64
     42   %b.sroa.3.12.insert.shift = shl nuw i64 %b.sroa.3.12.insert.ext, 32
     43   %b.sroa.3.12.insert.insert = or i64 %b.sroa.3.12.insert.shift, %b.sroa.3.8.insert.ext
     44   %3 = bitcast i64 %b.sroa.3.12.insert.insert to double
     45   %add = fadd double %0, %2
     46   %add3 = fadd double %1, %3
     47   %re.i.i = getelementptr inbounds %class.Complex* %agg.result, i32 0, i32 0
     48   store double %add, double* %re.i.i, align 4
     49   %im.i.i = getelementptr inbounds %class.Complex* %agg.result, i32 0, i32 1
     50   store double %add3, double* %im.i.i, align 4
     51   ret void
     52 }
     53