Home | History | Annotate | Download | only in AArch64
      1 ; REQUIRES: asserts
      2 ; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a53 -pre-RA-sched=source -enable-misched -verify-misched -debug-only=misched -o - 2>&1 > /dev/null | FileCheck %s
      3 ; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a53 -pre-RA-sched=source -enable-misched -verify-misched -debug-only=misched -o - -misched-limit=2 2>&1 > /dev/null | FileCheck %s
      4 ;
      5 ; The Cortex-A53 machine model will cause the MADD instruction to be scheduled
      6 ; much higher than the ADD instructions in order to hide latency. When not
      7 ; specifying a subtarget, the MADD will remain near the end of the block.
      8 ;
      9 ; CHECK: ********** MI Scheduling **********
     10 ; CHECK: main
     11 ; CHECK: *** Final schedule for BB#2 ***
     12 ; CHECK: MADDWrrr
     13 ; CHECK: ADDWri
     14 ; CHECK: ********** INTERVALS **********
     15 @main.x = private unnamed_addr constant [8 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1], align 4
     16 @main.y = private unnamed_addr constant [8 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2], align 4
     17 
     18 ; Function Attrs: nounwind
     19 define i32 @main() #0 {
     20 entry:
     21   %retval = alloca i32, align 4
     22   %x = alloca [8 x i32], align 4
     23   %y = alloca [8 x i32], align 4
     24   %i = alloca i32, align 4
     25   %xx = alloca i32, align 4
     26   %yy = alloca i32, align 4
     27   store i32 0, i32* %retval
     28   %0 = bitcast [8 x i32]* %x to i8*
     29   call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast ([8 x i32]* @main.x to i8*), i64 32, i32 4, i1 false)
     30   %1 = bitcast [8 x i32]* %y to i8*
     31   call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ([8 x i32]* @main.y to i8*), i64 32, i32 4, i1 false)
     32   store i32 0, i32* %xx, align 4
     33   store i32 0, i32* %yy, align 4
     34   store i32 0, i32* %i, align 4
     35   br label %for.cond
     36 
     37 for.cond:                                         ; preds = %for.inc, %entry
     38   %2 = load i32, i32* %i, align 4
     39   %cmp = icmp slt i32 %2, 8
     40   br i1 %cmp, label %for.body, label %for.end
     41 
     42 for.body:                                         ; preds = %for.cond
     43   %3 = load i32, i32* %i, align 4
     44   %idxprom = sext i32 %3 to i64
     45   %arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* %x, i32 0, i64 %idxprom
     46   %4 = load i32, i32* %arrayidx, align 4
     47   %add = add nsw i32 %4, 1
     48   store i32 %add, i32* %xx, align 4
     49   %5 = load i32, i32* %xx, align 4
     50   %add1 = add nsw i32 %5, 12
     51   store i32 %add1, i32* %xx, align 4
     52   %6 = load i32, i32* %xx, align 4
     53   %add2 = add nsw i32 %6, 23
     54   store i32 %add2, i32* %xx, align 4
     55   %7 = load i32, i32* %xx, align 4
     56   %add3 = add nsw i32 %7, 34
     57   store i32 %add3, i32* %xx, align 4
     58   %8 = load i32, i32* %i, align 4
     59   %idxprom4 = sext i32 %8 to i64
     60   %arrayidx5 = getelementptr inbounds [8 x i32], [8 x i32]* %y, i32 0, i64 %idxprom4
     61   %9 = load i32, i32* %arrayidx5, align 4
     62   %10 = load i32, i32* %yy, align 4
     63   %mul = mul nsw i32 %10, %9
     64   store i32 %mul, i32* %yy, align 4
     65   br label %for.inc
     66 
     67 for.inc:                                          ; preds = %for.body
     68   %11 = load i32, i32* %i, align 4
     69   %inc = add nsw i32 %11, 1
     70   store i32 %inc, i32* %i, align 4
     71   br label %for.cond
     72 
     73 for.end:                                          ; preds = %for.cond
     74   %12 = load i32, i32* %xx, align 4
     75   %13 = load i32, i32* %yy, align 4
     76   %add6 = add nsw i32 %12, %13
     77   ret i32 %add6
     78 }
     79 
     80 
     81 ; The Cortex-A53 machine model will cause the FDIVvvv_42 to be raised to
     82 ; hide latency. Whereas normally there would only be a single FADDvvv_4s
     83 ; after it, this test checks to make sure there are more than one.
     84 ;
     85 ; CHECK: ********** MI Scheduling **********
     86 ; CHECK: neon4xfloat:BB#0
     87 ; CHECK: *** Final schedule for BB#0 ***
     88 ; CHECK: FDIVv4f32
     89 ; CHECK: FADDv4f32
     90 ; CHECK: FADDv4f32
     91 ; CHECK: ********** INTERVALS **********
     92 define <4 x float> @neon4xfloat(<4 x float> %A, <4 x float> %B) {
     93         %tmp1 = fadd <4 x float> %A, %B;
     94         %tmp2 = fadd <4 x float> %A, %tmp1;
     95         %tmp3 = fadd <4 x float> %A, %tmp2;
     96         %tmp4 = fadd <4 x float> %A, %tmp3;
     97         %tmp5 = fadd <4 x float> %A, %tmp4;
     98         %tmp6 = fadd <4 x float> %A, %tmp5;
     99         %tmp7 = fadd <4 x float> %A, %tmp6;
    100         %tmp8 = fadd <4 x float> %A, %tmp7;
    101         %tmp9 = fdiv <4 x float> %A, %B;
    102         %tmp10 = fadd <4 x float> %tmp8, %tmp9;
    103 
    104         ret <4 x float> %tmp10
    105 }
    106 
    107 ; Function Attrs: nounwind
    108 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1
    109 
    110 attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
    111 attributes #1 = { nounwind }
    112 
    113 
    114 ; Regression Test for PR19761
    115 ;   [ARM64] Cortex-a53 schedule mode can't handle NEON post-increment load
    116 ;
    117 ; Nothing explicit to check other than llc not crashing.
    118 define { <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld2(i8* %A, i8** %ptr) {
    119   %ld2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8* %A)
    120   %tmp = getelementptr i8, i8* %A, i32 32
    121   store i8* %tmp, i8** %ptr
    122   ret { <16 x i8>, <16 x i8> } %ld2
    123 }
    124 
    125 declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8*)
    126 
    127 ; Regression Test for PR20057.
    128 ;
    129 ; Cortex-A53 machine model stalls on A53UnitFPMDS contention. Instructions that
    130 ; are otherwise ready are jammed in the pending queue.
    131 ; CHECK: ********** MI Scheduling **********
    132 ; CHECK: testResourceConflict
    133 ; CHECK: *** Final schedule for BB#0 ***
    134 ; CHECK: BRK
    135 ; CHECK: ********** INTERVALS **********
    136 define void @testResourceConflict(float* %ptr) {
    137 entry:
    138   %add1 = fadd float undef, undef
    139   %mul2 = fmul float undef, undef
    140   %add3 = fadd float %mul2, undef
    141   %mul4 = fmul float undef, %add3
    142   %add5 = fadd float %mul4, undef
    143   %sub6 = fsub float 0.000000e+00, undef
    144   %sub7 = fsub float %add5, undef
    145   %div8 = fdiv float 1.000000e+00, undef
    146   %mul9 = fmul float %div8, %sub7
    147   %mul14 = fmul float %sub6, %div8
    148   %mul10 = fsub float -0.000000e+00, %mul14
    149   %mul15 = fmul float undef, %div8
    150   %mul11 = fsub float -0.000000e+00, %mul15
    151   %mul12 = fmul float 0.000000e+00, %div8
    152   %mul13 = fmul float %add1, %mul9
    153   %mul21 = fmul float %add5, %mul11
    154   %add22 = fadd float %mul13, %mul21
    155   store float %add22, float* %ptr, align 4
    156   %mul28 = fmul float %add1, %mul10
    157   %mul33 = fmul float %add5, %mul12
    158   %add34 = fadd float %mul33, %mul28
    159   store float %add34, float* %ptr, align 4
    160   %mul240 = fmul float undef, %mul9
    161   %add246 = fadd float %mul240, undef
    162   store float %add246, float* %ptr, align 4
    163   %mul52 = fmul float undef, %mul10
    164   %mul57 = fmul float undef, %mul12
    165   %add58 = fadd float %mul57, %mul52
    166   store float %add58, float* %ptr, align 4
    167   %mul27 = fmul float 0.000000e+00, %mul9
    168   %mul81 = fmul float undef, %mul10
    169   %add82 = fadd float %mul27, %mul81
    170   store float %add82, float* %ptr, align 4
    171   call void @llvm.trap()
    172   unreachable
    173 }
    174 
    175 declare void @llvm.trap()
    176 
    177 ; Regression test for PR20057: "permanent hazard"'
    178 ; Resource contention on LDST.
    179 ; CHECK: ********** MI Scheduling **********
    180 ; CHECK: testLdStConflict
    181 ; CHECK: *** Final schedule for BB#1 ***
    182 ; CHECK: LD4Fourv2d
    183 ; CHECK: STRQui
    184 ; CHECK: ********** INTERVALS **********
    185 define void @testLdStConflict() {
    186 entry:
    187   br label %loop
    188 
    189 loop:
    190   %0 = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0i8(i8* null)
    191   %ptr = bitcast i8* undef to <2 x i64>*
    192   store <2 x i64> zeroinitializer, <2 x i64>* %ptr, align 4
    193   %ptr1 = bitcast i8* undef to <2 x i64>*
    194   store <2 x i64> zeroinitializer, <2 x i64>* %ptr1, align 4
    195   %ptr2 = bitcast i8* undef to <2 x i64>*
    196   store <2 x i64> zeroinitializer, <2 x i64>* %ptr2, align 4
    197   %ptr3 = bitcast i8* undef to <2 x i64>*
    198   store <2 x i64> zeroinitializer, <2 x i64>* %ptr3, align 4
    199   %ptr4 = bitcast i8* undef to <2 x i64>*
    200   store <2 x i64> zeroinitializer, <2 x i64>* %ptr4, align 4
    201   br label %loop
    202 }
    203 
    204 declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0i8(i8*)
    205