Home | History | Annotate | Download | only in AMDGPU
      1 ; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=1 -march=amdgcn -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=TOVGPR -check-prefix=GCN %s
      2 ; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=1 -amdgpu-spill-sgpr-to-smem=0 -march=amdgcn -mcpu=tonga -mattr=+vgpr-spilling  -verify-machineinstrs < %s | FileCheck -check-prefix=TOVGPR -check-prefix=GCN %s
      3 ; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=0 -march=amdgcn -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=TOVMEM -check-prefix=GCN %s
      4 ; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=0 -amdgpu-spill-sgpr-to-smem=0 -march=amdgcn -mcpu=tonga -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=TOVMEM -check-prefix=GCN %s
      5 ; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=0 -amdgpu-spill-sgpr-to-smem=1 -march=amdgcn -mcpu=tonga -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=TOSMEM -check-prefix=GCN %s
      6 
      7 ; XXX - Why does it like to use vcc?
      8 
      9 ; GCN-LABEL: {{^}}spill_m0:
     10 ; TOSMEM: s_mov_b32 s[[LO:[0-9]+]], SCRATCH_RSRC_DWORD0
     11 ; TOSMEM: s_mov_b32 s[[HI:[0-9]+]], 0xe80000
     12 
     13 ; GCN-DAG: s_cmp_lg_u32
     14 
     15 ; TOVGPR-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0
     16 ; TOVGPR: v_writelane_b32 [[SPILL_VREG:v[0-9]+]], [[M0_COPY]], 0
     17 
     18 ; TOVMEM-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0
     19 ; TOVMEM-DAG: v_mov_b32_e32 [[SPILL_VREG:v[0-9]+]], [[M0_COPY]]
     20 ; TOVMEM: buffer_store_dword [[SPILL_VREG]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:4 ; 4-byte Folded Spill
     21 
     22 ; TOSMEM-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0
     23 ; TOSMEM: s_add_u32 m0, s3, 0x100{{$}}
     24 ; TOSMEM-NOT: [[M0_COPY]]
     25 ; TOSMEM: s_buffer_store_dword [[M0_COPY]], s{{\[}}[[LO]]:[[HI]]], m0 ; 4-byte Folded Spill
     26 
     27 ; GCN: s_cbranch_scc1 [[ENDIF:BB[0-9]+_[0-9]+]]
     28 
     29 ; GCN: [[ENDIF]]:
     30 ; TOVGPR: v_readlane_b32 [[M0_RESTORE:s[0-9]+]], [[SPILL_VREG]], 0
     31 ; TOVGPR: s_mov_b32 m0, [[M0_RESTORE]]
     32 
     33 ; TOVMEM: buffer_load_dword [[RELOAD_VREG:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:4 ; 4-byte Folded Reload
     34 ; TOVMEM: s_waitcnt vmcnt(0)
     35 ; TOVMEM: v_readfirstlane_b32 [[M0_RESTORE:s[0-9]+]], [[RELOAD_VREG]]
     36 ; TOVMEM: s_mov_b32 m0, [[M0_RESTORE]]
     37 
     38 ; TOSMEM: s_add_u32 m0, s3, 0x100{{$}}
     39 ; TOSMEM: s_buffer_load_dword [[M0_RESTORE:s[0-9]+]], s{{\[}}[[LO]]:[[HI]]], m0 ; 4-byte Folded Reload
     40 ; TOSMEM-NOT: [[M0_RESTORE]]
     41 ; TOSMEM: s_mov_b32 m0, [[M0_RESTORE]]
     42 
     43 ; GCN: s_add_i32 s{{[0-9]+}}, m0, 1
     44 define amdgpu_kernel void @spill_m0(i32 %cond, i32 addrspace(1)* %out) #0 {
     45 entry:
     46   %m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={M0}"() #0
     47   %cmp0 = icmp eq i32 %cond, 0
     48   br i1 %cmp0, label %if, label %endif
     49 
     50 if:
     51   call void asm sideeffect "v_nop", ""() #0
     52   br label %endif
     53 
     54 endif:
     55   %foo = call i32 asm sideeffect "s_add_i32 $0, $1, 1", "=s,{M0}"(i32 %m0) #0
     56   store i32 %foo, i32 addrspace(1)* %out
     57   ret void
     58 }
     59 
     60 @lds = internal addrspace(3) global [64 x float] undef
     61 
     62 ; m0 is killed, so it isn't necessary during the entry block spill to preserve it
     63 ; GCN-LABEL: {{^}}spill_kill_m0_lds:
     64 ; GCN: s_mov_b32 m0, s6
     65 ; GCN: v_interp_mov_f32
     66 
     67 ; TOSMEM-NOT: s_m0
     68 ; TOSMEM: s_add_u32 m0, s7, 0x100
     69 ; TOSMEM-NEXT: s_buffer_store_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Spill
     70 ; FIXME: RegScavenger::isRegUsed() always returns true if m0 is reserved, so we have to save and restore it
     71 ; FIXME-TOSMEM-NOT: m0
     72 
     73 ; FIXME-TOSMEM-NOT: m0
     74 ; TOSMEM: s_add_u32 m0, s7, 0x300
     75 ; TOSMEM: s_buffer_store_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 4-byte Folded Spill
     76 ; FIXME-TOSMEM-NOT: m0
     77 
     78 ; TOSMEM: s_mov_b64 exec,
     79 ; TOSMEM: s_cbranch_execz
     80 ; TOSMEM: s_branch
     81 
     82 ; TOSMEM: BB{{[0-9]+_[0-9]+}}:
     83 ; TOSMEM: s_add_u32 m0, s7, 0x400
     84 ; TOSMEM-NEXT: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Reload
     85 
     86 
     87 ; GCN-NOT: v_readlane_b32 m0
     88 ; GCN-NOT: s_buffer_store_dword m0
     89 ; GCN-NOT: s_buffer_load_dword m0
     90 define amdgpu_ps void @spill_kill_m0_lds(<16 x i8> addrspace(4)* inreg %arg, <16 x i8> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %m0) #0 {
     91 main_body:
     92   %tmp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0)
     93   %cmp = fcmp ueq float 0.000000e+00, %tmp
     94   br i1 %cmp, label %if, label %else
     95 
     96 if:                                               ; preds = %main_body
     97   %lds_ptr = getelementptr [64 x float], [64 x float] addrspace(3)* @lds, i32 0, i32 0
     98   %lds_data_ = load float, float addrspace(3)* %lds_ptr
     99   %lds_data = call float @llvm.amdgcn.wqm.f32(float %lds_data_)
    100   br label %endif
    101 
    102 else:                                             ; preds = %main_body
    103   %interp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0)
    104   br label %endif
    105 
    106 endif:                                            ; preds = %else, %if
    107   %export = phi float [ %lds_data, %if ], [ %interp, %else ]
    108   %tmp4 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %export, float %export)
    109   call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %tmp4, <2 x half> %tmp4, i1 true, i1 true) #0
    110   ret void
    111 }
    112 
    113 ; Force save and restore of m0 during SMEM spill
    114 ; GCN-LABEL: {{^}}m0_unavailable_spill:
    115 
    116 ; GCN: ; def m0, 1
    117 
    118 ; GCN: s_mov_b32 m0, s2
    119 ; GCN: v_interp_mov_f32
    120 
    121 ; GCN: ; clobber m0
    122 
    123 ; TOSMEM: s_mov_b32 s2, m0
    124 ; TOSMEM: s_add_u32 m0, s3, 0x100
    125 ; TOSMEM-NEXT: s_buffer_store_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Spill
    126 ; TOSMEM: s_mov_b32 m0, s2
    127 
    128 ; TOSMEM: s_mov_b64 exec,
    129 ; TOSMEM: s_cbranch_execz
    130 ; TOSMEM: s_branch
    131 
    132 ; TOSMEM: BB{{[0-9]+_[0-9]+}}:
    133 ; TOSMEM: s_add_u32 m0, s3, 0x100
    134 ; TOSMEM-NEXT: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Reload
    135 
    136 ; GCN-NOT: v_readlane_b32 m0
    137 ; GCN-NOT: s_buffer_store_dword m0
    138 ; GCN-NOT: s_buffer_load_dword m0
    139 define amdgpu_kernel void @m0_unavailable_spill(i32 %m0.arg) #0 {
    140 main_body:
    141   %m0 = call i32 asm sideeffect "; def $0, 1", "={M0}"() #0
    142   %tmp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0.arg)
    143   call void asm sideeffect "; clobber $0", "~{M0}"() #0
    144   %cmp = fcmp ueq float 0.000000e+00, %tmp
    145    br i1 %cmp, label %if, label %else
    146 
    147 if:                                               ; preds = %main_body
    148   store volatile i32 8, i32 addrspace(1)* undef
    149   br label %endif
    150 
    151 else:                                             ; preds = %main_body
    152   store volatile i32 11, i32 addrspace(1)* undef
    153   br label %endif
    154 
    155 endif:
    156   ret void
    157 }
    158 
    159 ; GCN-LABEL: {{^}}restore_m0_lds:
    160 ; TOSMEM: s_load_dwordx2 [[REG:s\[[0-9]+:[0-9]+\]]]
    161 ; TOSMEM: s_cmp_eq_u32
    162 ; FIXME: RegScavenger::isRegUsed() always returns true if m0 is reserved, so we have to save and restore it
    163 ; FIXME-TOSMEM-NOT: m0
    164 ; TOSMEM: s_add_u32 m0, s3, 0x100
    165 ; TOSMEM: s_buffer_store_dwordx2 [[REG]], s[88:91], m0 ; 8-byte Folded Spill
    166 ; FIXME-TOSMEM-NOT: m0
    167 ; TOSMEM: s_add_u32 m0, s3, 0x300
    168 ; TOSMEM: s_buffer_store_dword s{{[0-9]+}}, s[88:91], m0 ; 4-byte Folded Spill
    169 ; FIXME-TOSMEM-NOT: m0
    170 ; TOSMEM: s_cbranch_scc1
    171 
    172 ; TOSMEM: s_mov_b32 m0, -1
    173 
    174 ; TOSMEM: s_mov_b32 s0, m0
    175 ; TOSMEM: s_add_u32 m0, s3, 0x100
    176 ; TOSMEM: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[88:91], m0 ; 8-byte Folded Reload
    177 ; TOSMEM: s_mov_b32 m0, s0
    178 ; TOSMEM: s_waitcnt lgkmcnt(0)
    179 
    180 ; TOSMEM: ds_write_b64
    181 
    182 ; FIXME-TOSMEM-NOT: m0
    183 ; TOSMEM: s_add_u32 m0, s3, 0x300
    184 ; TOSMEM: s_buffer_load_dword s0, s[88:91], m0 ; 4-byte Folded Reload
    185 ; FIXME-TOSMEM-NOT: m0
    186 ; TOSMEM: s_waitcnt lgkmcnt(0)
    187 ; TOSMEM-NOT: m0
    188 ; TOSMEM: s_mov_b32 m0, s0
    189 ; TOSMEM: ; use m0
    190 
    191 ; TOSMEM: s_dcache_wb
    192 ; TOSMEM: s_endpgm
    193 define amdgpu_kernel void @restore_m0_lds(i32 %arg) {
    194   %m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={M0}"() #0
    195   %sval = load volatile i64, i64 addrspace(4)* undef
    196   %cmp = icmp eq i32 %arg, 0
    197   br i1 %cmp, label %ret, label %bb
    198 
    199 bb:
    200   store volatile i64 %sval, i64 addrspace(3)* undef
    201   call void asm sideeffect "; use $0", "{M0}"(i32 %m0) #0
    202   br label %ret
    203 
    204 ret:
    205   ret void
    206 }
    207 
    208 declare float @llvm.amdgcn.interp.mov(i32, i32, i32, i32) #1
    209 declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
    210 declare void @llvm.amdgcn.exp.compr.v2f16(i32, i32, <2 x half>, <2 x half>, i1, i1) #0
    211 declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) #1
    212 declare float @llvm.amdgcn.wqm.f32(float) #1
    213 
    214 attributes #0 = { nounwind }
    215 attributes #1 = { nounwind readnone }
    216