Home | History | Annotate | Download | only in AMDGPU
      1 ; RUN: llc -march=amdgcn -mcpu=SI -enable-misched < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
      2 ; RUN: llc -march=amdgcn -mcpu=bonaire -enable-misched < %s | FileCheck -check-prefix=CI -check-prefix=GCN -check-prefix=FUNC %s
      3 ; RUN: llc -march=amdgcn -mcpu=tonga -enable-misched < %s | FileCheck -check-prefix=CI -check-prefix=GCN -check-prefix=FUNC %s
      4 
      5 ; FUNC-LABEL: {{^}}frem_f32:
      6 ; GCN-DAG: buffer_load_dword [[X:v[0-9]+]], {{.*$}}
      7 ; GCN-DAG: buffer_load_dword [[Y:v[0-9]+]], {{.*}} offset:16
      8 ; GCN-DAG: v_cmp
      9 ; GCN-DAG: v_mul_f32
     10 ; GCN: v_rcp_f32_e32
     11 ; GCN: v_mul_f32_e32
     12 ; GCN: v_mul_f32_e32
     13 ; GCN: v_trunc_f32_e32
     14 ; GCN: v_mad_f32
     15 ; GCN: s_endpgm
     16 define void @frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
     17                       float addrspace(1)* %in2) #0 {
     18    %gep2 = getelementptr float, float addrspace(1)* %in2, i32 4
     19    %r0 = load float, float addrspace(1)* %in1, align 4
     20    %r1 = load float, float addrspace(1)* %gep2, align 4
     21    %r2 = frem float %r0, %r1
     22    store float %r2, float addrspace(1)* %out, align 4
     23    ret void
     24 }
     25 
     26 ; FUNC-LABEL: {{^}}unsafe_frem_f32:
     27 ; GCN: buffer_load_dword [[Y:v[0-9]+]], {{.*}} offset:16
     28 ; GCN: buffer_load_dword [[X:v[0-9]+]], {{.*}}
     29 ; GCN: v_rcp_f32_e32 [[INVY:v[0-9]+]], [[Y]]
     30 ; GCN: v_mul_f32_e32 [[DIV:v[0-9]+]], [[INVY]], [[X]]
     31 ; GCN: v_trunc_f32_e32 [[TRUNC:v[0-9]+]], [[DIV]]
     32 ; GCN: v_mad_f32 [[RESULT:v[0-9]+]], -[[TRUNC]], [[Y]], [[X]]
     33 ; GCN: buffer_store_dword [[RESULT]]
     34 ; GCN: s_endpgm
     35 define void @unsafe_frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
     36                              float addrspace(1)* %in2) #1 {
     37    %gep2 = getelementptr float, float addrspace(1)* %in2, i32 4
     38    %r0 = load float, float addrspace(1)* %in1, align 4
     39    %r1 = load float, float addrspace(1)* %gep2, align 4
     40    %r2 = frem float %r0, %r1
     41    store float %r2, float addrspace(1)* %out, align 4
     42    ret void
     43 }
     44 
     45 ; FUNC-LABEL: {{^}}frem_f64:
     46 ; GCN: buffer_load_dwordx2 [[Y:v\[[0-9]+:[0-9]+\]]], {{.*}}, 0
     47 ; GCN: buffer_load_dwordx2 [[X:v\[[0-9]+:[0-9]+\]]], {{.*}}, 0
     48 ; GCN-DAG: v_div_fmas_f64
     49 ; GCN-DAG: v_div_scale_f64
     50 ; GCN-DAG: v_mul_f64
     51 ; CI: v_trunc_f64_e32
     52 ; CI: v_mul_f64
     53 ; GCN: v_add_f64
     54 ; GCN: buffer_store_dwordx2
     55 ; GCN: s_endpgm
     56 define void @frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
     57                       double addrspace(1)* %in2) #0 {
     58    %r0 = load double, double addrspace(1)* %in1, align 8
     59    %r1 = load double, double addrspace(1)* %in2, align 8
     60    %r2 = frem double %r0, %r1
     61    store double %r2, double addrspace(1)* %out, align 8
     62    ret void
     63 }
     64 
     65 ; FUNC-LABEL: {{^}}unsafe_frem_f64:
     66 ; GCN: v_rcp_f64_e32
     67 ; GCN: v_mul_f64
     68 ; SI: v_bfe_u32
     69 ; CI: v_trunc_f64_e32
     70 ; GCN: v_fma_f64
     71 ; GCN: s_endpgm
     72 define void @unsafe_frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
     73                              double addrspace(1)* %in2) #1 {
     74    %r0 = load double, double addrspace(1)* %in1, align 8
     75    %r1 = load double, double addrspace(1)* %in2, align 8
     76    %r2 = frem double %r0, %r1
     77    store double %r2, double addrspace(1)* %out, align 8
     78    ret void
     79 }
     80 
     81 define void @frem_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in1,
     82                         <2 x float> addrspace(1)* %in2) #0 {
     83    %gep2 = getelementptr <2 x float>, <2 x float> addrspace(1)* %in2, i32 4
     84    %r0 = load <2 x float>, <2 x float> addrspace(1)* %in1, align 8
     85    %r1 = load <2 x float>, <2 x float> addrspace(1)* %gep2, align 8
     86    %r2 = frem <2 x float> %r0, %r1
     87    store <2 x float> %r2, <2 x float> addrspace(1)* %out, align 8
     88    ret void
     89 }
     90 
     91 define void @frem_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in1,
     92                         <4 x float> addrspace(1)* %in2) #0 {
     93    %gep2 = getelementptr <4 x float>, <4 x float> addrspace(1)* %in2, i32 4
     94    %r0 = load <4 x float>, <4 x float> addrspace(1)* %in1, align 16
     95    %r1 = load <4 x float>, <4 x float> addrspace(1)* %gep2, align 16
     96    %r2 = frem <4 x float> %r0, %r1
     97    store <4 x float> %r2, <4 x float> addrspace(1)* %out, align 16
     98    ret void
     99 }
    100 
    101 define void @frem_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in1,
    102                         <2 x double> addrspace(1)* %in2) #0 {
    103    %gep2 = getelementptr <2 x double>, <2 x double> addrspace(1)* %in2, i32 4
    104    %r0 = load <2 x double>, <2 x double> addrspace(1)* %in1, align 16
    105    %r1 = load <2 x double>, <2 x double> addrspace(1)* %gep2, align 16
    106    %r2 = frem <2 x double> %r0, %r1
    107    store <2 x double> %r2, <2 x double> addrspace(1)* %out, align 16
    108    ret void
    109 }
    110 
    111 attributes #0 = { nounwind "unsafe-fp-math"="false" }
    112 attributes #1 = { nounwind "unsafe-fp-math"="true" }
    113