1 ; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI-SAFE -check-prefix=SI -check-prefix=FUNC %s 2 ; RUN: llc -enable-no-nans-fp-math -enable-unsafe-fp-math -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI-NONAN -check-prefix=SI -check-prefix=FUNC %s 3 ; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s 4 5 ; FIXME: Should replace unsafe-fp-math with no signed zeros. 6 7 declare i32 @llvm.r600.read.tidig.x() #1 8 9 ; FUNC-LABEL: @test_fmin_legacy_f32 10 ; EG: MIN * 11 ; SI-SAFE: v_min_legacy_f32_e32 12 ; SI-NONAN: v_min_f32_e32 13 define void @test_fmin_legacy_f32(<4 x float> addrspace(1)* %out, <4 x float> inreg %reg0) #0 { 14 %r0 = extractelement <4 x float> %reg0, i32 0 15 %r1 = extractelement <4 x float> %reg0, i32 1 16 %r2 = fcmp uge float %r0, %r1 17 %r3 = select i1 %r2, float %r1, float %r0 18 %vec = insertelement <4 x float> undef, float %r3, i32 0 19 store <4 x float> %vec, <4 x float> addrspace(1)* %out, align 16 20 ret void 21 } 22 23 ; FUNC-LABEL: @test_fmin_legacy_ule_f32 24 ; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} 25 ; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4 26 ; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] 27 ; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]] 28 define void @test_fmin_legacy_ule_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 { 29 %tid = call i32 @llvm.r600.read.tidig.x() #1 30 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid 31 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 32 33 %a = load float, float addrspace(1)* %gep.0, align 4 34 %b = load float, float addrspace(1)* %gep.1, align 4 35 36 %cmp = fcmp ule float %a, %b 37 %val = select i1 %cmp, float %a, float %b 38 store float %val, float addrspace(1)* %out, align 4 39 ret void 40 } 41 42 ; FUNC-LABEL: @test_fmin_legacy_ole_f32 43 ; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} 44 ; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4 45 ; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] 46 ; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]] 47 define void @test_fmin_legacy_ole_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 { 48 %tid = call i32 @llvm.r600.read.tidig.x() #1 49 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid 50 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 51 52 %a = load float, float addrspace(1)* %gep.0, align 4 53 %b = load float, float addrspace(1)* %gep.1, align 4 54 55 %cmp = fcmp ole float %a, %b 56 %val = select i1 %cmp, float %a, float %b 57 store float %val, float addrspace(1)* %out, align 4 58 ret void 59 } 60 61 ; FUNC-LABEL: @test_fmin_legacy_olt_f32 62 ; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} 63 ; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4 64 ; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] 65 ; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]] 66 define void @test_fmin_legacy_olt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 { 67 %tid = call i32 @llvm.r600.read.tidig.x() #1 68 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid 69 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 70 71 %a = load float, float addrspace(1)* %gep.0, align 4 72 %b = load float, float addrspace(1)* %gep.1, align 4 73 74 %cmp = fcmp olt float %a, %b 75 %val = select i1 %cmp, float %a, float %b 76 store float %val, float addrspace(1)* %out, align 4 77 ret void 78 } 79 80 ; FUNC-LABEL: @test_fmin_legacy_ult_f32 81 ; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} 82 ; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4 83 ; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] 84 ; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]] 85 define void @test_fmin_legacy_ult_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 { 86 %tid = call i32 @llvm.r600.read.tidig.x() #1 87 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid 88 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 89 90 %a = load float, float addrspace(1)* %gep.0, align 4 91 %b = load float, float addrspace(1)* %gep.1, align 4 92 93 %cmp = fcmp ult float %a, %b 94 %val = select i1 %cmp, float %a, float %b 95 store float %val, float addrspace(1)* %out, align 4 96 ret void 97 } 98 99 ; FUNC-LABEL: @test_fmin_legacy_ole_f32_multi_use 100 ; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} 101 ; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4 102 ; SI-NOT: v_min 103 ; SI: v_cmp_le_f32 104 ; SI-NEXT: v_cndmask_b32 105 ; SI-NOT: v_min 106 ; SI: s_endpgm 107 define void @test_fmin_legacy_ole_f32_multi_use(float addrspace(1)* %out0, i1 addrspace(1)* %out1, float addrspace(1)* %in) #0 { 108 %tid = call i32 @llvm.r600.read.tidig.x() #1 109 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid 110 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 111 112 %a = load float, float addrspace(1)* %gep.0, align 4 113 %b = load float, float addrspace(1)* %gep.1, align 4 114 115 %cmp = fcmp ole float %a, %b 116 %val0 = select i1 %cmp, float %a, float %b 117 store float %val0, float addrspace(1)* %out0, align 4 118 store i1 %cmp, i1 addrspace(1)* %out1 119 ret void 120 } 121 122 attributes #0 = { nounwind } 123 attributes #1 = { nounwind readnone } 124