1 ; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck --check-prefix=CHECK %s 2 ; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck --check-prefix=CHECK %s 3 4 ; This test is for a bug in the machine scheduler where stores without 5 ; an underlying object would be moved across the barrier. In this 6 ; test, the <2 x i8> store will be split into two i8 stores, so they 7 ; won't have an underlying object. 8 9 ; CHECK-LABEL: {{^}}test: 10 ; CHECK: ds_write_b8 11 ; CHECK: ds_write_b8 12 ; CHECK: s_barrier 13 ; CHECK: s_endpgm 14 ; Function Attrs: nounwind 15 define void @test(<2 x i8> addrspace(3)* nocapture %arg, <2 x i8> addrspace(1)* nocapture readonly %arg1, i32 addrspace(1)* nocapture readonly %arg2, <2 x i8> addrspace(1)* nocapture %arg3, i32 %arg4, i64 %tmp9) { 16 bb: 17 %tmp10 = getelementptr inbounds i32, i32 addrspace(1)* %arg2, i64 %tmp9 18 %tmp13 = load i32, i32 addrspace(1)* %tmp10, align 2 19 %tmp14 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(3)* %arg, i32 %tmp13 20 %tmp15 = load <2 x i8>, <2 x i8> addrspace(3)* %tmp14, align 2 21 %tmp16 = add i32 %tmp13, 1 22 %tmp17 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(3)* %arg, i32 %tmp16 23 store <2 x i8> %tmp15, <2 x i8> addrspace(3)* %tmp17, align 2 24 tail call void @llvm.AMDGPU.barrier.local() #2 25 %tmp25 = load i32, i32 addrspace(1)* %tmp10, align 4 26 %tmp26 = sext i32 %tmp25 to i64 27 %tmp27 = sext i32 %arg4 to i64 28 %tmp28 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(3)* %arg, i32 %tmp25, i32 %arg4 29 %tmp29 = load i8, i8 addrspace(3)* %tmp28, align 1 30 %tmp30 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(1)* %arg3, i64 %tmp26, i64 %tmp27 31 store i8 %tmp29, i8 addrspace(1)* %tmp30, align 1 32 %tmp32 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(3)* %arg, i32 %tmp25, i32 0 33 %tmp33 = load i8, i8 addrspace(3)* %tmp32, align 1 34 %tmp35 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(1)* %arg3, i64 %tmp26, i64 0 35 store i8 %tmp33, i8 addrspace(1)* %tmp35, align 1 36 ret void 37 } 38 39 ; Function Attrs: convergent nounwind 40 declare void @llvm.AMDGPU.barrier.local() #2 41 42 attributes #2 = { convergent nounwind } 43