Home | History | Annotate | Download | only in AArch64
      1 ; RUN: llc -mtriple=aarch64-linux-gnu -mcpu=cortex-a57 -verify-machineinstrs < %s | FileCheck %s
      2 
      3 ; This file check a bug in MachineCopyPropagation pass. The last COPY will be
      4 ; incorrectly removed if the machine instructions are as follows:
      5 ;   %Q5_Q6<def> = COPY %Q2_Q3
      6 ;   %D5<def> =
      7 ;   %D3<def> =
      8 ;   %D3<def> = COPY %D6
      9 ; This is caused by a bug in function SourceNoLongerAvailable(), which fails to
     10 ; remove the relationship of D6 and "%Q5_Q6<def> = COPY %Q2_Q3".
     11 
     12 @failed = internal unnamed_addr global i1 false
     13 
     14 ; CHECK-LABEL: foo:
     15 ; CHECK: ld2
     16 ; CHECK-NOT: // kill: D{{[0-9]+}}<def> D{{[0-9]+}}<kill>
     17 define void @foo(<2 x i32> %shuffle251, <8 x i8> %vtbl1.i, i8* %t2, <2 x i32> %vrsubhn_v2.i1364) {
     18 entry:
     19   %val0 = alloca [2 x i64], align 8
     20   %val1 = alloca <2 x i64>, align 16
     21   %vmull = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> <i32 -1, i32 -1>, <2 x i32> %shuffle251)
     22   %vgetq_lane = extractelement <2 x i64> %vmull, i32 0
     23   %cmp = icmp eq i64 %vgetq_lane, 1
     24   br i1 %cmp, label %if.end, label %if.then
     25 
     26 if.then:                                          ; preds = %entry
     27   store i1 true, i1* @failed, align 1
     28   br label %if.end
     29 
     30 if.end:                                           ; preds = %if.then, %entry
     31   tail call void @f2()
     32   %sqdmull = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> <i16 1, i16 0, i16 0, i16 0>, <4 x i16> <i16 2, i16 0, i16 0, i16 0>)
     33   %sqadd = tail call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> zeroinitializer, <4 x i32> %sqdmull)
     34   %shuffle = shufflevector <4 x i32> %sqadd, <4 x i32> undef, <2 x i32> zeroinitializer
     35   %0 = mul <2 x i32> %shuffle, <i32 -1, i32 0>
     36   %sub = add <2 x i32> %0, <i32 1, i32 0>
     37   %sext = sext <2 x i32> %sub to <2 x i64>
     38   %vset_lane603 = shufflevector <2 x i64> %sext, <2 x i64> undef, <1 x i32> zeroinitializer
     39   %t1 = bitcast [2 x i64]* %val0 to i8*
     40   call void @llvm.aarch64.neon.st2lane.v2i64.p0i8(<2 x i64> zeroinitializer, <2 x i64> zeroinitializer, i64 1, i8* %t1)
     41   call void @llvm.aarch64.neon.st2lane.v1i64.p0i8(<1 x i64> <i64 4096>, <1 x i64> <i64 -1>, i64 0, i8* %t2)
     42   %vld2_lane = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0i8(<1 x i64> <i64 11>, <1 x i64> <i64 11>, i64 0, i8* %t2)
     43   %vld2_lane.0.extract = extractvalue { <1 x i64>, <1 x i64> } %vld2_lane, 0
     44   %vld2_lane.1.extract = extractvalue { <1 x i64>, <1 x i64> } %vld2_lane, 1
     45   %vld2_lane1 = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0i8(<1 x i64> %vld2_lane.0.extract, <1 x i64> %vld2_lane.1.extract, i64 0, i8* %t1)
     46   %vld2_lane1.0.extract = extractvalue { <1 x i64>, <1 x i64> } %vld2_lane1, 0
     47   %vld2_lane1.1.extract = extractvalue { <1 x i64>, <1 x i64> } %vld2_lane1, 1
     48   %t3 = bitcast <2 x i64>* %val1 to i8*
     49   call void @llvm.aarch64.neon.st2.v1i64.p0i8(<1 x i64> %vld2_lane1.0.extract, <1 x i64> %vld2_lane1.1.extract, i8* %t3)
     50   %t4 = load <2 x i64>, <2 x i64>* %val1, align 16
     51   %vsubhn = sub <2 x i64> <i64 11, i64 0>, %t4
     52   %vsubhn1 = lshr <2 x i64> %vsubhn, <i64 32, i64 32>
     53   %vsubhn2 = trunc <2 x i64> %vsubhn1 to <2 x i32>
     54   %neg = xor <2 x i32> %vsubhn2, <i32 -1, i32 -1>
     55   %sqadd1 = call <1 x i64> @llvm.aarch64.neon.usqadd.v1i64(<1 x i64> <i64 -1>, <1 x i64> <i64 1>)
     56   %sqadd2 = call <1 x i64> @llvm.aarch64.neon.usqadd.v1i64(<1 x i64> %vset_lane603, <1 x i64> %sqadd1)
     57   %sqadd3 = call <1 x i64> @llvm.aarch64.neon.usqadd.v1i64(<1 x i64> <i64 1>, <1 x i64> %sqadd2)
     58   %shuffle.i = shufflevector <2 x i32> <i32 undef, i32 0>, <2 x i32> %vrsubhn_v2.i1364, <2 x i32> <i32 1, i32 3>
     59   %cmp.i = icmp uge <2 x i32> %shuffle.i, %neg
     60   %sext.i = sext <2 x i1> %cmp.i to <2 x i32>
     61   %vpadal = call <1 x i64> @llvm.aarch64.neon.uaddlp.v1i64.v2i32(<2 x i32> %sext.i)
     62   %t5 = sub <1 x i64> %vpadal, %sqadd3
     63   %vget_lane1 = extractelement <1 x i64> %t5, i32 0
     64   %cmp2 = icmp eq i64 %vget_lane1, 15
     65   br i1 %cmp2, label %if.end2, label %if.then2
     66 
     67 if.then2:                                       ; preds = %if.end
     68   store i1 true, i1* @failed, align 1
     69   br label %if.end2
     70 
     71 if.end2:                                        ; preds = %if.then682, %if.end
     72   call void @f2()
     73   %vext = shufflevector <8 x i8> <i8 undef, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> %vtbl1.i, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
     74   %t6 = bitcast <8 x i8> %vext to <2 x i32>
     75   call void @f0(<2 x i32> %t6)
     76   ret void
     77 }
     78 
     79 declare void @f0(<2 x i32>)
     80 
     81 declare <8 x i8> @f1()
     82 
     83 declare void @f2()
     84 
     85 declare <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16>, <4 x i16>)
     86 
     87 declare void @llvm.aarch64.neon.st2lane.v2i64.p0i8(<2 x i64>, <2 x i64>, i64, i8* nocapture)
     88 
     89 declare void @llvm.aarch64.neon.st2lane.v1i64.p0i8(<1 x i64>, <1 x i64>, i64, i8* nocapture)
     90 
     91 declare { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0i8(<1 x i64>, <1 x i64>, i64, i8*)
     92 
     93 declare void @llvm.aarch64.neon.st2.v1i64.p0i8(<1 x i64>, <1 x i64>, i8* nocapture)
     94 
     95 declare <1 x i64> @llvm.aarch64.neon.usqadd.v1i64(<1 x i64>, <1 x i64>)
     96 
     97 declare <1 x i64> @llvm.aarch64.neon.uaddlp.v1i64.v2i32(<2 x i32>)
     98 
     99 declare <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32>, <4 x i32>)
    100 
    101 declare <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32>, <2 x i32>)
    102