1 ;RUN: opt -mtriple=amdgcn-mesa-mesa3d -analyze -divergence %s | FileCheck %s 2 3 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.swap( 4 define float @buffer_atomic_swap(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 5 main_body: 6 %orig = call i32 @llvm.amdgcn.buffer.atomic.swap(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0) 7 %r = bitcast i32 %orig to float 8 ret float %r 9 } 10 11 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.add( 12 define float @buffer_atomic_add(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 13 main_body: 14 %orig = call i32 @llvm.amdgcn.buffer.atomic.add(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0) 15 %r = bitcast i32 %orig to float 16 ret float %r 17 } 18 19 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.sub( 20 define float @buffer_atomic_sub(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 21 main_body: 22 %orig = call i32 @llvm.amdgcn.buffer.atomic.sub(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0) 23 %r = bitcast i32 %orig to float 24 ret float %r 25 } 26 27 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.smin( 28 define float @buffer_atomic_smin(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 29 main_body: 30 %orig = call i32 @llvm.amdgcn.buffer.atomic.smin(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0) 31 %r = bitcast i32 %orig to float 32 ret float %r 33 } 34 35 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.umin( 36 define float @buffer_atomic_umin(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 37 main_body: 38 %orig = call i32 @llvm.amdgcn.buffer.atomic.umin(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0) 39 %r = bitcast i32 %orig to float 40 ret float %r 41 } 42 43 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.smax( 44 define float @buffer_atomic_smax(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 45 main_body: 46 %orig = call i32 @llvm.amdgcn.buffer.atomic.smax(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0) 47 %r = bitcast i32 %orig to float 48 ret float %r 49 } 50 51 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.umax( 52 define float @buffer_atomic_umax(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 53 main_body: 54 %orig = call i32 @llvm.amdgcn.buffer.atomic.umax(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0) 55 %r = bitcast i32 %orig to float 56 ret float %r 57 } 58 59 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.and( 60 define float @buffer_atomic_and(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 61 main_body: 62 %orig = call i32 @llvm.amdgcn.buffer.atomic.and(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0) 63 %r = bitcast i32 %orig to float 64 ret float %r 65 } 66 67 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.or( 68 define float @buffer_atomic_or(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 69 main_body: 70 %orig = call i32 @llvm.amdgcn.buffer.atomic.or(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0) 71 %r = bitcast i32 %orig to float 72 ret float %r 73 } 74 75 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.xor( 76 define float @buffer_atomic_xor(<4 x i32> inreg %rsrc, i32 inreg %data) #0 { 77 main_body: 78 %orig = call i32 @llvm.amdgcn.buffer.atomic.xor(i32 %data, <4 x i32> %rsrc, i32 0, i32 0, i1 0) 79 %r = bitcast i32 %orig to float 80 ret float %r 81 } 82 83 ;CHECK: DIVERGENT: %orig = call i32 @llvm.amdgcn.buffer.atomic.cmpswap( 84 define float @buffer_atomic_cmpswap(<4 x i32> inreg %rsrc, i32 inreg %data, i32 inreg %cmp) #0 { 85 main_body: 86 %orig = call i32 @llvm.amdgcn.buffer.atomic.cmpswap(i32 %data, i32 %cmp, <4 x i32> %rsrc, i32 0, i32 0, i1 0) 87 %r = bitcast i32 %orig to float 88 ret float %r 89 } 90 91 declare i32 @llvm.amdgcn.buffer.atomic.swap(i32, <4 x i32>, i32, i32, i1) #0 92 declare i32 @llvm.amdgcn.buffer.atomic.add(i32, <4 x i32>, i32, i32, i1) #0 93 declare i32 @llvm.amdgcn.buffer.atomic.sub(i32, <4 x i32>, i32, i32, i1) #0 94 declare i32 @llvm.amdgcn.buffer.atomic.smin(i32, <4 x i32>, i32, i32, i1) #0 95 declare i32 @llvm.amdgcn.buffer.atomic.umin(i32, <4 x i32>, i32, i32, i1) #0 96 declare i32 @llvm.amdgcn.buffer.atomic.smax(i32, <4 x i32>, i32, i32, i1) #0 97 declare i32 @llvm.amdgcn.buffer.atomic.umax(i32, <4 x i32>, i32, i32, i1) #0 98 declare i32 @llvm.amdgcn.buffer.atomic.and(i32, <4 x i32>, i32, i32, i1) #0 99 declare i32 @llvm.amdgcn.buffer.atomic.or(i32, <4 x i32>, i32, i32, i1) #0 100 declare i32 @llvm.amdgcn.buffer.atomic.xor(i32, <4 x i32>, i32, i32, i1) #0 101 declare i32 @llvm.amdgcn.buffer.atomic.cmpswap(i32, i32, <4 x i32>, i32, i32, i1) #0 102 103 attributes #0 = { nounwind } 104