Home | History | Annotate | Download | only in AMDGPU
      1 ; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
      2 ; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
      3 ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
      4 
      5 
      6 ; FUNC-LABEL: {{^}}v_fsub_f32:
      7 ; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
      8 define void @v_fsub_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
      9   %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
     10   %a = load float, float addrspace(1)* %in, align 4
     11   %b = load float, float addrspace(1)* %b_ptr, align 4
     12   %result = fsub float %a, %b
     13   store float %result, float addrspace(1)* %out, align 4
     14   ret void
     15 }
     16 
     17 ; FUNC-LABEL: {{^}}s_fsub_f32:
     18 ; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, -KC0[2].W
     19 
     20 ; SI: v_sub_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
     21 define void @s_fsub_f32(float addrspace(1)* %out, float %a, float %b) {
     22   %sub = fsub float %a, %b
     23   store float %sub, float addrspace(1)* %out, align 4
     24   ret void
     25 }
     26 
     27 declare float @llvm.R600.load.input(i32) readnone
     28 
     29 declare void @llvm.AMDGPU.store.output(float, i32)
     30 
     31 ; FUNC-LABEL: {{^}}fsub_v2f32:
     32 ; R600-DAG: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[3].X, -KC0[3].Z
     33 ; R600-DAG: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].W, -KC0[3].Y
     34 
     35 ; FIXME: Should be using SGPR directly for first operand
     36 ; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
     37 ; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
     38 define void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) {
     39   %sub = fsub <2 x float> %a, %b
     40   store <2 x float> %sub, <2 x float> addrspace(1)* %out, align 8
     41   ret void
     42 }
     43 
     44 ; FUNC-LABEL: {{^}}v_fsub_v4f32:
     45 ; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
     46 ; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
     47 ; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
     48 ; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
     49 
     50 ; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
     51 ; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
     52 ; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
     53 ; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
     54 define void @v_fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
     55   %b_ptr = getelementptr <4 x float>, <4 x float> addrspace(1)* %in, i32 1
     56   %a = load <4 x float>, <4 x float> addrspace(1)* %in, align 16
     57   %b = load <4 x float>, <4 x float> addrspace(1)* %b_ptr, align 16
     58   %result = fsub <4 x float> %a, %b
     59   store <4 x float> %result, <4 x float> addrspace(1)* %out, align 16
     60   ret void
     61 }
     62 
     63 ; FIXME: Should be using SGPR directly for first operand
     64 
     65 ; FUNC-LABEL: {{^}}s_fsub_v4f32:
     66 ; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
     67 ; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
     68 ; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
     69 ; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
     70 ; SI: s_endpgm
     71 define void @s_fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b) {
     72   %result = fsub <4 x float> %a, %b
     73   store <4 x float> %result, <4 x float> addrspace(1)* %out, align 16
     74   ret void
     75 }
     76