Home | History | Annotate | Download | only in AMDGPU
      1 # RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-vgpr-index-mode -run-pass=greedy -stress-regalloc=16 -o - %s | FileCheck -check-prefixes=GCN %s
      2 
      3 # An interval for a register that was partially defined was split, creating
      4 # a new use (a COPY) which was reached by the undef point. In particular,
      5 # there was a subrange of the new register which was reached by an "undef"
      6 # point. When the code in extendSegmentsToUses verified value numbers between
      7 # the new and the old live ranges, it did not account for this kind of a
      8 # situation and asserted expecting the old value to exist. For a PHI node
      9 # it is legal to have a missing predecessor value as long as the end of
     10 # the predecessor is jointly dominated by the undefs.
     11 #
     12 # A simplified form of this can be illustrated as
     13 #
     14 # bb.1:
     15 #   %0:vreg_64 = IMPLICIT_DEF
     16 #   ...
     17 #   S_CBRANCH_SCC1 %bb.2, implicit $vcc
     18 #   S_BRANCH %bb.3
     19 #
     20 # bb.2:
     21 # ; predecessors: %bb.1, %bb.4
     22 #   dead %1:vreg_64 = COPY %0:vreg_64 ; This is the point of the inserted split
     23 #   ...
     24 #   S_BRANCH %bb.5
     25 #
     26 # bb.3:
     27 # ; predecessors: %bb.1
     28 #   undef %0.sub0:vreg_64 = COPY %123:sreg_32 ; undef point for %0.sub1
     29 #   ...
     30 #   S_BRANCH %bb.4
     31 #
     32 # bb.4
     33 # ; predecessors: %bb.4
     34 #   ...
     35 #   S_BRANCH %bb.2
     36 #
     37 # This test exposes this scenario which caused previously caused an assert
     38 
     39 ---
     40 name:            _amdgpu_ps_main
     41 tracksRegLiveness: true
     42 liveins:
     43   - { reg: '$vgpr2', virtual-reg: '%0' }
     44   - { reg: '$vgpr3', virtual-reg: '%1' }
     45   - { reg: '$vgpr4', virtual-reg: '%2' }
     46 body: |
     47   bb.0:
     48     successors: %bb.1(0x40000000), %bb.2(0x40000000)
     49     liveins: $vgpr2, $vgpr3, $vgpr4
     50     %2:vgpr_32 = COPY $vgpr4
     51     %1:vgpr_32 = COPY $vgpr3
     52     %0:vgpr_32 = COPY $vgpr2
     53     S_CBRANCH_SCC0 %bb.2, implicit undef $scc
     54 
     55   bb.1:
     56     successors: %bb.5(0x80000000)
     57     undef %3.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec
     58     %3.sub1:vreg_128 = COPY %3.sub0
     59     %3.sub2:vreg_128 = COPY %3.sub0
     60     S_BRANCH %bb.5
     61 
     62   bb.2:
     63     successors: %bb.3(0x40000000), %bb.4(0x40000000)
     64     S_CBRANCH_SCC0 %bb.4, implicit undef $scc
     65 
     66   bb.3:
     67     successors: %bb.5(0x80000000)
     68     undef %3.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec
     69     %3.sub1:vreg_128 = COPY %3.sub0
     70     S_BRANCH %bb.5
     71 
     72   bb.4:
     73     successors: %bb.5(0x80000000)
     74     %3:vreg_128 = IMPLICIT_DEF
     75 
     76   bb.5:
     77     successors: %bb.6(0x40000000), %bb.22(0x40000000)
     78     %4:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
     79     S_CBRANCH_SCC1 %bb.22, implicit undef $scc
     80     S_BRANCH %bb.6
     81 
     82   bb.6:
     83     successors: %bb.8(0x40000000), %bb.11(0x40000000)
     84     %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
     85     dead %6:vgpr_32 = V_MUL_F32_e32 0, undef %7:vgpr_32, implicit $exec
     86     dead %8:vgpr_32 = V_MUL_F32_e32 0, %2, implicit $exec
     87     undef %9.sub1:vreg_64 = V_MUL_F32_e32 0, %1, implicit $exec
     88     undef %10.sub0:vreg_128 = V_MUL_F32_e32 0, %0, implicit $exec
     89     undef %11.sub0:sreg_256 = S_MOV_B32 0
     90     %11.sub1:sreg_256 = COPY %11.sub0
     91     %11.sub2:sreg_256 = COPY %11.sub0
     92     %11.sub3:sreg_256 = COPY %11.sub0
     93     %11.sub4:sreg_256 = COPY %11.sub0
     94     %11.sub5:sreg_256 = COPY %11.sub0
     95     %11.sub6:sreg_256 = COPY %11.sub0
     96     %11.sub7:sreg_256 = COPY %11.sub0
     97     %12:vreg_128 = IMAGE_SAMPLE_LZ_V4_V2 %9, %11, undef %13:sreg_128, 15, 0, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from constant-pool, addrspace 4)
     98     %14:vgpr_32 = V_MOV_B32_e32 -1, implicit $exec
     99     %15:vreg_128 = IMPLICIT_DEF
    100     S_CBRANCH_SCC1 %bb.8, implicit undef $scc
    101     S_BRANCH %bb.11
    102 
    103   bb.7:
    104     successors: %bb.13(0x80000000)
    105     undef %15.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec
    106     %15.sub1:vreg_128 = COPY %15.sub0
    107     %15.sub2:vreg_128 = COPY %15.sub0
    108     %5:vgpr_32 = IMPLICIT_DEF
    109     S_BRANCH %bb.13
    110 
    111   bb.8:
    112     successors: %bb.9(0x40000000), %bb.10(0x40000000)
    113     S_CBRANCH_SCC0 %bb.10, implicit undef $scc
    114 
    115   bb.9:
    116     successors: %bb.12(0x80000000)
    117     undef %15.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec
    118     %15.sub1:vreg_128 = COPY %15.sub0
    119     %15.sub2:vreg_128 = COPY %15.sub0
    120     S_BRANCH %bb.12
    121 
    122   bb.10:
    123     successors: %bb.12(0x80000000)
    124     undef %15.sub0:vreg_128 = V_MOV_B32_e32 2143289344, implicit $exec
    125     %15.sub1:vreg_128 = COPY %15.sub0
    126     %15.sub2:vreg_128 = COPY %15.sub0
    127     S_BRANCH %bb.12
    128 
    129   bb.11:
    130     successors: %bb.7(0x40000000), %bb.13(0x40000000)
    131     %16:sreg_64 = V_CMP_NE_U32_e64 0, %14, implicit $exec
    132     %17:sreg_64 = S_AND_B64 $exec, %16, implicit-def dead $scc
    133     $vcc = COPY %17
    134     S_CBRANCH_VCCNZ %bb.7, implicit $vcc
    135     S_BRANCH %bb.13
    136 
    137   bb.12:
    138     successors: %bb.11(0x80000000)
    139     %14:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
    140     %5:vgpr_32 = V_MOV_B32_e32 -1, implicit $exec
    141     S_BRANCH %bb.11
    142 
    143   bb.13:
    144     successors: %bb.15(0x40000000), %bb.14(0x40000000)
    145 
    146     ; In reality we are checking that this code doesn't assert when splitting
    147     ; and inserting a spill. Here we just check that the point where the error
    148     ; occurs we see a correctly generated spill.
    149     ; GCN-LABEL: bb.13:
    150     ; GCN: SI_SPILL_V128_SAVE %{{[0-9]+}}, %stack.1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr5, 0, implicit $exec
    151 
    152     %18:vgpr_32 = V_MAD_F32 0, %10.sub0, 0, target-flags(amdgpu-gotprel) 1073741824, 0, -1082130432, 0, 0, implicit $exec
    153     %19:vgpr_32 = V_MAD_F32 0, %12.sub0, 0, target-flags(amdgpu-gotprel) 0, 0, 0, 0, 0, implicit $exec
    154     %20:sreg_128 = S_BUFFER_LOAD_DWORDX4_IMM undef %21:sreg_128, 1040, 0 :: (dereferenceable invariant load 16)
    155     %22:vgpr_32 = V_ADD_F32_e32 0, %19, implicit $exec
    156     %23:vgpr_32 = V_MAD_F32 0, %18, 0, 0, 0, 0, 0, 0, implicit $exec
    157     %24:vgpr_32 = COPY %20.sub3
    158     %25:vgpr_32 = V_MUL_F32_e64 0, target-flags(amdgpu-gotprel32-lo) 0, 0, %20.sub1, 0, 0, implicit $exec
    159     %26:sreg_128 = S_BUFFER_LOAD_DWORDX4_IMM undef %27:sreg_128, 1056, 0 :: (dereferenceable invariant load 16)
    160     %28:vgpr_32 = V_MAD_F32 0, %18, 0, %26.sub0, 0, 0, 0, 0, implicit $exec
    161     %29:vgpr_32 = V_ADD_F32_e32 %28, %19, implicit $exec
    162     %30:vgpr_32 = V_RCP_F32_e32 %29, implicit $exec
    163     %25:vgpr_32 = V_MAC_F32_e32 0, %18, %25, implicit $exec
    164     %31:vgpr_32 = V_MAD_F32 0, target-flags(amdgpu-gotprel) 0, 0, %12.sub0, 0, %24, 0, 0, implicit $exec
    165     %32:vgpr_32 = V_ADD_F32_e32 %25, %31, implicit $exec
    166     %33:vgpr_32 = V_MUL_F32_e32 %22, %30, implicit $exec
    167     %34:vgpr_32 = V_MUL_F32_e32 %23, %30, implicit $exec
    168     %35:vgpr_32 = V_MUL_F32_e32 %32, %30, implicit $exec
    169     %36:vgpr_32 = V_MUL_F32_e32 0, %34, implicit $exec
    170     %36:vgpr_32 = V_MAC_F32_e32 0, %33, %36, implicit $exec
    171     %37:vgpr_32 = V_MAD_F32 0, %35, 0, 0, 0, 0, 0, 0, implicit $exec
    172     %38:sreg_64_xexec = V_CMP_NE_U32_e64 0, %5, implicit $exec
    173     %39:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %38, implicit $exec
    174     V_CMP_NE_U32_e32 1, %39, implicit-def $vcc, implicit $exec
    175     $vcc = S_AND_B64 $exec, $vcc, implicit-def dead $scc
    176     %40:vgpr_32 = V_ADD_F32_e32 %36, %37, implicit $exec
    177     S_CBRANCH_VCCZ %bb.15, implicit $vcc
    178 
    179   bb.14:
    180     successors: %bb.17(0x80000000)
    181     S_BRANCH %bb.17
    182 
    183   bb.15:
    184     successors: %bb.16(0x40000000), %bb.18(0x40000000)
    185     %41:vgpr_32 = V_MAD_F32 0, %40, 0, 0, 0, 0, 0, 0, implicit $exec
    186     %42:sreg_64 = V_CMP_LE_F32_e64 0, 0, 0, %41, 0, implicit $exec
    187     %43:sreg_64 = V_CMP_GE_F32_e64 0, 1065353216, 0, %41, 0, implicit $exec
    188     %44:sreg_64 = S_AND_B64 %43, %43, implicit-def dead $scc
    189     %45:sreg_64 = S_AND_B64 %42, %42, implicit-def dead $scc
    190     %46:sreg_64 = S_AND_B64 %45, %44, implicit-def dead $scc
    191     %47:sreg_64 = COPY $exec, implicit-def $exec
    192     %48:sreg_64 = S_AND_B64 %47, %46, implicit-def dead $scc
    193     $exec = S_MOV_B64_term %48
    194     SI_MASK_BRANCH %bb.18, implicit $exec
    195     S_BRANCH %bb.16
    196 
    197   bb.16:
    198     successors: %bb.18(0x80000000)
    199     S_BRANCH %bb.18
    200 
    201   bb.17:
    202     successors: %bb.21(0x40000000), %bb.23(0x40000000)
    203     %49:sreg_64 = V_CMP_NE_U32_e64 0, %5, implicit $exec
    204     %50:sreg_64 = S_AND_B64 $exec, %49, implicit-def dead $scc
    205     %51:vreg_128 = IMPLICIT_DEF
    206     $vcc = COPY %50
    207     S_CBRANCH_VCCNZ %bb.21, implicit $vcc
    208     S_BRANCH %bb.23
    209 
    210   bb.18:
    211     successors: %bb.20(0x40000000), %bb.19(0x40000000)
    212     $exec = S_OR_B64 $exec, %47, implicit-def $scc
    213     %52:vgpr_32 = V_MAD_F32 0, %3.sub1, 0, target-flags(amdgpu-gotprel32-lo) 0, 1, %3.sub0, 0, 0, implicit $exec
    214     %53:vgpr_32 = V_MUL_F32_e32 -2147483648, %3.sub1, implicit $exec
    215     %53:vgpr_32 = V_MAC_F32_e32 target-flags(amdgpu-gotprel32-hi) 1065353216, %3.sub2, %53, implicit $exec
    216     %54:vgpr_32 = V_MUL_F32_e32 %53, %53, implicit $exec
    217     %54:vgpr_32 = V_MAC_F32_e32 %52, %52, %54, implicit $exec
    218     %55:vgpr_32 = V_SQRT_F32_e32 %54, implicit $exec
    219     %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
    220     %56:vgpr_32 = V_MOV_B32_e32 981668463, implicit $exec
    221     %57:sreg_64 = V_CMP_NGT_F32_e64 0, %55, 0, %56, 0, implicit $exec
    222     %58:sreg_64 = S_AND_B64 $exec, %57, implicit-def dead $scc
    223     $vcc = COPY %58
    224     S_CBRANCH_VCCZ %bb.20, implicit $vcc
    225 
    226   bb.19:
    227     successors: %bb.17(0x80000000)
    228     S_BRANCH %bb.17
    229 
    230   bb.20:
    231     successors: %bb.17(0x80000000)
    232     S_BRANCH %bb.17
    233 
    234   bb.21:
    235     successors: %bb.23(0x80000000)
    236     %59:sreg_32 = S_MOV_B32 0
    237     undef %51.sub0:vreg_128 = COPY %59
    238     S_BRANCH %bb.23
    239 
    240   bb.22:
    241     successors: %bb.24(0x80000000)
    242     S_BRANCH %bb.24
    243 
    244   bb.23:
    245     successors: %bb.22(0x80000000)
    246     undef %60.sub1:vreg_64 = V_CVT_I32_F32_e32 %1, implicit $exec
    247     %60.sub0:vreg_64 = V_CVT_I32_F32_e32 %0, implicit $exec
    248     undef %61.sub0:sreg_256 = S_MOV_B32 0
    249     %61.sub1:sreg_256 = COPY %61.sub0
    250     %61.sub2:sreg_256 = COPY %61.sub0
    251     %61.sub3:sreg_256 = COPY %61.sub0
    252     %61.sub4:sreg_256 = COPY %61.sub0
    253     %61.sub5:sreg_256 = COPY %61.sub0
    254     %61.sub6:sreg_256 = COPY %61.sub0
    255     %61.sub7:sreg_256 = COPY %61.sub0
    256     %62:vgpr_32 = V_MOV_B32_e32 1033100696, implicit $exec
    257     %63:vgpr_32 = V_MUL_F32_e32 1060575065, %15.sub1, implicit $exec
    258     %63:vgpr_32 = V_MAC_F32_e32 1046066128, %15.sub0, %63, implicit $exec
    259     %64:vgpr_32 = IMAGE_LOAD_V1_V2 %60, %61, 1, -1, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from constant-pool, addrspace 4)
    260     %64:vgpr_32 = V_MAC_F32_e32 target-flags(amdgpu-gotprel) 0, %51.sub0, %64, implicit $exec
    261     %65:vgpr_32 = V_MUL_F32_e32 0, %64, implicit $exec
    262     %66:vgpr_32 = V_MUL_F32_e32 0, %65, implicit $exec
    263     %67:vgpr_32 = V_MAD_F32 0, %66, 0, %62, 0, 0, 0, 0, implicit $exec
    264     %63:vgpr_32 = V_MAC_F32_e32 %15.sub2, %62, %63, implicit $exec
    265     %4:vgpr_32 = V_ADD_F32_e32 %63, %67, implicit $exec
    266     S_BRANCH %bb.22
    267 
    268   bb.24:
    269     %68:vgpr_32 = V_MUL_F32_e32 0, %4, implicit $exec
    270     %69:vgpr_32 = V_CVT_PKRTZ_F16_F32_e64 0, undef %70:vgpr_32, 0, %68, 0, implicit $exec
    271     EXP 0, undef %71:vgpr_32, %69, undef %72:vgpr_32, undef %73:vgpr_32, -1, -1, 15, implicit $exec
    272     S_ENDPGM
    273 ...
    274