Home | History | Annotate | Download | only in AMDGPU
      1 ; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=SI -mattr=-promote-alloca < %s | FileCheck -check-prefix=SI-ALLOCA -check-prefix=SI %s
      2 ; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=SI -mattr=+promote-alloca < %s | FileCheck -check-prefix=SI-PROMOTE -check-prefix=SI %s
      3 
      4 declare i32 @llvm.SI.tid() nounwind readnone
      5 declare void @llvm.AMDGPU.barrier.local() nounwind convergent
      6 
      7 ; The required pointer calculations for the alloca'd actually requires
      8 ; an add and won't be folded into the addressing, which fails with a
      9 ; 64-bit pointer add. This should work since private pointers should
     10 ; be 32-bits.
     11 
     12 ; SI-LABEL: {{^}}test_private_array_ptr_calc:
     13 
     14 ; FIXME: We end up with zero argument for ADD, because
     15 ; SIRegisterInfo::eliminateFrameIndex() blindly replaces the frame index
     16 ; with the appropriate offset.  We should fold this into the store.
     17 ; SI-ALLOCA: v_add_i32_e32 [[PTRREG:v[0-9]+]], vcc, 0, v{{[0-9]+}}
     18 ; SI-ALLOCA: buffer_store_dword {{v[0-9]+}}, [[PTRREG]], s[{{[0-9]+:[0-9]+}}]
     19 ;
     20 ; FIXME: The AMDGPUPromoteAlloca pass should be able to convert this
     21 ; alloca to a vector.  It currently fails because it does not know how
     22 ; to interpret:
     23 ; getelementptr [4 x i32], [4 x i32]* %alloca, i32 1, i32 %b
     24 
     25 ; SI-PROMOTE: v_add_i32_e32 [[PTRREG:v[0-9]+]], vcc, 16
     26 ; SI-PROMOTE: ds_write_b32 [[PTRREG]]
     27 define void @test_private_array_ptr_calc(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %inA, i32 addrspace(1)* noalias %inB) {
     28   %alloca = alloca [4 x i32], i32 4, align 16
     29   %tid = call i32 @llvm.SI.tid() readnone
     30   %a_ptr = getelementptr i32, i32 addrspace(1)* %inA, i32 %tid
     31   %b_ptr = getelementptr i32, i32 addrspace(1)* %inB, i32 %tid
     32   %a = load i32, i32 addrspace(1)* %a_ptr
     33   %b = load i32, i32 addrspace(1)* %b_ptr
     34   %result = add i32 %a, %b
     35   %alloca_ptr = getelementptr [4 x i32], [4 x i32]* %alloca, i32 1, i32 %b
     36   store i32 %result, i32* %alloca_ptr, align 4
     37   ; Dummy call
     38   call void @llvm.AMDGPU.barrier.local() nounwind convergent
     39   %reload = load i32, i32* %alloca_ptr, align 4
     40   %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
     41   store i32 %reload, i32 addrspace(1)* %out_ptr, align 4
     42   ret void
     43 }
     44 
     45