1 ; RUN: llc -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s 2 3 declare i32 @llvm.amdgcn.workitem.id.x() #0 4 ; GCN-LABEL: {{^}}convergent_inlineasm: 5 ; GCN: %bb.0: 6 ; GCN: v_cmp_ne_u32_e64 7 ; GCN: ; mask branch 8 ; GCN: BB{{[0-9]+_[0-9]+}}: 9 define amdgpu_kernel void @convergent_inlineasm(i64 addrspace(1)* nocapture %arg) { 10 bb: 11 %tmp = call i32 @llvm.amdgcn.workitem.id.x() 12 %tmp1 = tail call i64 asm "v_cmp_ne_u32_e64 $0, 0, $1", "=s,v"(i32 1) #1 13 %tmp2 = icmp eq i32 %tmp, 8 14 br i1 %tmp2, label %bb3, label %bb5 15 16 bb3: ; preds = %bb 17 %tmp4 = getelementptr i64, i64 addrspace(1)* %arg, i32 %tmp 18 store i64 %tmp1, i64 addrspace(1)* %arg, align 8 19 br label %bb5 20 21 bb5: ; preds = %bb3, %bb 22 ret void 23 } 24 25 ; GCN-LABEL: {{^}}nonconvergent_inlineasm: 26 ; GCN: ; mask branch 27 28 ; GCN: BB{{[0-9]+_[0-9]+}}: 29 ; GCN: v_cmp_ne_u32_e64 30 31 ; GCN: BB{{[0-9]+_[0-9]+}}: 32 33 define amdgpu_kernel void @nonconvergent_inlineasm(i64 addrspace(1)* nocapture %arg) { 34 bb: 35 %tmp = call i32 @llvm.amdgcn.workitem.id.x() 36 %tmp1 = tail call i64 asm "v_cmp_ne_u32_e64 $0, 0, $1", "=s,v"(i32 1) 37 %tmp2 = icmp eq i32 %tmp, 8 38 br i1 %tmp2, label %bb3, label %bb5 39 40 bb3: ; preds = %bb 41 %tmp4 = getelementptr i64, i64 addrspace(1)* %arg, i32 %tmp 42 store i64 %tmp1, i64 addrspace(1)* %arg, align 8 43 br label %bb5 44 45 bb5: ; preds = %bb3, %bb 46 ret void 47 } 48 49 attributes #0 = { nounwind readnone } 50 attributes #1 = { convergent nounwind readnone } 51