Home | History | Annotate | Download | only in AArch64
      1 ; RUN: llc < %s -mtriple=aarch64-eabi -aarch64-neon-syntax=generic | FileCheck %s
      2 
      3 ; Function Attrs: nounwind readnone
      4 declare i64 @llvm.experimental.vector.reduce.add.i64.v2i64(<2 x i64>)
      5 declare i32 @llvm.experimental.vector.reduce.add.i32.v4i32(<4 x i32>)
      6 declare i16 @llvm.experimental.vector.reduce.add.i16.v8i16(<8 x i16>)
      7 declare i8 @llvm.experimental.vector.reduce.add.i8.v16i8(<16 x i8>)
      8 
      9 define i8 @add_B(<16 x i8>* %arr)  {
     10 ; CHECK-LABEL: add_B
     11 ; CHECK: addv {{b[0-9]+}}, {{v[0-9]+}}.16b
     12   %bin.rdx = load <16 x i8>, <16 x i8>* %arr
     13   %r = call i8 @llvm.experimental.vector.reduce.add.i8.v16i8(<16 x i8> %bin.rdx)
     14   ret i8 %r
     15 }
     16 
     17 define i16 @add_H(<8 x i16>* %arr)  {
     18 ; CHECK-LABEL: add_H
     19 ; CHECK: addv {{h[0-9]+}}, {{v[0-9]+}}.8h
     20   %bin.rdx = load <8 x i16>, <8 x i16>* %arr
     21   %r = call i16 @llvm.experimental.vector.reduce.add.i16.v8i16(<8 x i16> %bin.rdx)
     22   ret i16 %r
     23 }
     24 
     25 define i32 @add_S( <4 x i32>* %arr)  {
     26 ; CHECK-LABEL: add_S
     27 ; CHECK: addv {{s[0-9]+}}, {{v[0-9]+}}.4s
     28   %bin.rdx = load <4 x i32>, <4 x i32>* %arr
     29   %r = call i32 @llvm.experimental.vector.reduce.add.i32.v4i32(<4 x i32> %bin.rdx)
     30   ret i32 %r
     31 }
     32 
     33 define i64 @add_D(<2 x i64>* %arr)  {
     34 ; CHECK-LABEL: add_D
     35 ; CHECK-NOT: addv
     36   %bin.rdx = load <2 x i64>, <2 x i64>* %arr
     37   %r = call i64 @llvm.experimental.vector.reduce.add.i64.v2i64(<2 x i64> %bin.rdx)
     38   ret i64 %r
     39 }
     40 
     41 declare i32 @llvm.experimental.vector.reduce.add.i32.v8i32(<8 x i32>)
     42 
     43 define i32 @oversized_ADDV_256(i8* noalias nocapture readonly %arg1, i8* noalias nocapture readonly %arg2) {
     44 ; CHECK-LABEL: oversized_ADDV_256
     45 ; CHECK: addv {{s[0-9]+}}, {{v[0-9]+}}.4s
     46 entry:
     47   %0 = bitcast i8* %arg1 to <8 x i8>*
     48   %1 = load <8 x i8>, <8 x i8>* %0, align 1
     49   %2 = zext <8 x i8> %1 to <8 x i32>
     50   %3 = bitcast i8* %arg2 to <8 x i8>*
     51   %4 = load <8 x i8>, <8 x i8>* %3, align 1
     52   %5 = zext <8 x i8> %4 to <8 x i32>
     53   %6 = sub nsw <8 x i32> %2, %5
     54   %7 = icmp slt <8 x i32> %6, zeroinitializer
     55   %8 = sub nsw <8 x i32> zeroinitializer, %6
     56   %9 = select <8 x i1> %7, <8 x i32> %8, <8 x i32> %6
     57   %r = call i32 @llvm.experimental.vector.reduce.add.i32.v8i32(<8 x i32> %9)
     58   ret i32 %r
     59 }
     60 
     61 declare i32 @llvm.experimental.vector.reduce.add.i32.v16i32(<16 x i32>)
     62 
     63 define i32 @oversized_ADDV_512(<16 x i32>* %arr)  {
     64 ; CHECK-LABEL: oversized_ADDV_512
     65 ; CHECK: addv {{s[0-9]+}}, {{v[0-9]+}}.4s
     66   %bin.rdx = load <16 x i32>, <16 x i32>* %arr
     67   %r = call i32 @llvm.experimental.vector.reduce.add.i32.v16i32(<16 x i32> %bin.rdx)
     68   ret i32 %r
     69 }
     70