Home | History | Annotate | Download | only in AMDGPU
      1 ; RUN: opt -S -mtriple=amdgcn-- -structurizecfg -si-annotate-control-flow < %s | FileCheck -check-prefix=OPT %s
      2 ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
      3 
      4 ; OPT-LABEL: {{^}}define amdgpu_vs void @multi_else_break(
      5 ; OPT: main_body:
      6 ; OPT: LOOP.outer:
      7 ; OPT: LOOP:
      8 ; OPT:     [[if:%[0-9]+]] = call { i1, i64 } @llvm.amdgcn.if(
      9 ; OPT:     [[if_exec:%[0-9]+]] = extractvalue { i1, i64 } [[if]], 1
     10 ;
     11 ; OPT: Flow:
     12 ;
     13 ; Ensure two else.break calls, for both the inner and outer loops
     14 
     15 ; OPT:        call i64 @llvm.amdgcn.else.break(i64 [[if_exec]],
     16 ; OPT-NEXT:   call i64 @llvm.amdgcn.else.break(i64 [[if_exec]],
     17 ; OPT-NEXT:   call void @llvm.amdgcn.end.cf
     18 ;
     19 ; OPT: Flow1:
     20 
     21 ; GCN-LABEL: {{^}}multi_else_break:
     22 
     23 ; GCN: [[OUTER_LOOP:BB[0-9]+_[0-9]+]]: ; %LOOP.outer{{$}}
     24 
     25 ; GCN: [[INNER_LOOP:BB[0-9]+_[0-9]+]]: ; %LOOP{{$}}
     26 ; GCN: s_and_saveexec_b64 [[SAVE_BREAK:s\[[0-9]+:[0-9]+\]]], vcc
     27 
     28 ; GCN: BB{{[0-9]+}}_{{[0-9]+}}: ; %Flow{{$}}
     29 ; GCN-NEXT: ; in Loop: Header=[[INNER_LOOP]] Depth=2
     30 
     31 ; Ensure extra or eliminated
     32 ; GCN-NEXT: s_or_b64 exec, exec, [[SAVE_BREAK]]
     33 ; GCN-NEXT: s_mov_b64
     34 ; GCN-NEXT: s_and_b64 [[MASKED_SAVE_BREAK:s\[[0-9]+:[0-9]+\]]], exec, [[SAVE_BREAK]]
     35 ; GCN-NEXT: s_or_b64 [[OR_BREAK:s\[[0-9]+:[0-9]+\]]], [[MASKED_SAVE_BREAK]], s{{\[[0-9]+:[0-9]+\]}}
     36 ; GCN-NEXT: s_andn2_b64 exec, exec, [[OR_BREAK]]
     37 ; GCN-NEXT: s_cbranch_execnz [[INNER_LOOP]]
     38 
     39 ; GCN: ; %bb.{{[0-9]+}}: ; %Flow2{{$}}
     40 ; GCN-NEXT: ; in Loop: Header=[[OUTER_LOOP]] Depth=1
     41 
     42 ; Ensure copy is eliminated
     43 ; GCN-NEXT: s_or_b64 exec, exec, [[OR_BREAK]]
     44 ; GCN-NEXT: s_and_b64 [[MASKED2_SAVE_BREAK:s\[[0-9]+:[0-9]+\]]], exec, [[SAVE_BREAK]]
     45 ; GCN-NEXT: s_or_b64 [[OUTER_OR_BREAK:s\[[0-9]+:[0-9]+\]]], [[MASKED2_SAVE_BREAK]], s{{\[[0-9]+:[0-9]+\]}}
     46 ; GCN-NEXT: s_andn2_b64 exec, exec, [[OUTER_OR_BREAK]]
     47 ; GCN-NEXT: s_cbranch_execnz [[OUTER_LOOP]]
     48 define amdgpu_vs void @multi_else_break(<4 x float> %vec, i32 %ub, i32 %cont) {
     49 main_body:
     50   br label %LOOP.outer
     51 
     52 LOOP.outer:                                       ; preds = %ENDIF, %main_body
     53   %tmp43 = phi i32 [ 0, %main_body ], [ %tmp47, %ENDIF ]
     54   br label %LOOP
     55 
     56 LOOP:                                             ; preds = %ENDIF, %LOOP.outer
     57   %tmp45 = phi i32 [ %tmp43, %LOOP.outer ], [ %tmp47, %ENDIF ]
     58   %tmp47 = add i32 %tmp45, 1
     59   %tmp48 = icmp slt i32 %tmp45, %ub
     60   br i1 %tmp48, label %ENDIF, label %IF
     61 
     62 IF:                                               ; preds = %LOOP
     63   ret void
     64 
     65 ENDIF:                                            ; preds = %LOOP
     66   %tmp51 = icmp eq i32 %tmp47, %cont
     67   br i1 %tmp51, label %LOOP, label %LOOP.outer
     68 }
     69 
     70 ; OPT-LABEL: define amdgpu_kernel void @multi_if_break_loop(
     71 ; OPT: llvm.amdgcn.break
     72 ; OPT: llvm.amdgcn.loop
     73 ; OPT: llvm.amdgcn.if.break
     74 ; OPT: llvm.amdgcn.if.break
     75 ; OPT: llvm.amdgcn.end.cf
     76 
     77 ; GCN-LABEL: {{^}}multi_if_break_loop:
     78 ; GCN: s_mov_b64 [[BREAK_REG:s\[[0-9]+:[0-9]+\]]], 0{{$}}
     79 
     80 ; GCN: [[LOOP:BB[0-9]+_[0-9]+]]: ; %bb1{{$}}
     81 
     82 ; Uses a copy intsead of an or
     83 ; GCN: s_mov_b64 [[COPY:s\[[0-9]+:[0-9]+\]]], [[BREAK_REG]]
     84 ; GCN: s_or_b64 [[BREAK_REG]], exec, [[BREAK_REG]]
     85 define amdgpu_kernel void @multi_if_break_loop(i32 %arg) #0 {
     86 bb:
     87   %id = call i32 @llvm.amdgcn.workitem.id.x()
     88   %tmp = sub i32 %id, %arg
     89   br label %bb1
     90 
     91 bb1:
     92   %lsr.iv = phi i32 [ undef, %bb ], [ %lsr.iv.next, %case0 ], [ %lsr.iv.next, %case1 ]
     93   %lsr.iv.next = add i32 %lsr.iv, 1
     94   %cmp0 = icmp slt i32 %lsr.iv.next, 0
     95   %load0 = load volatile i32, i32 addrspace(1)* undef, align 4
     96   switch i32 %load0, label %bb9 [
     97     i32 0, label %case0
     98     i32 1, label %case1
     99   ]
    100 
    101 case0:
    102   %load1 = load volatile i32, i32 addrspace(1)* undef, align 4
    103   %cmp1 = icmp slt i32 %tmp, %load1
    104   br i1 %cmp1, label %bb1, label %bb9
    105 
    106 case1:
    107   %load2 = load volatile i32, i32 addrspace(1)* undef, align 4
    108   %cmp2 = icmp slt i32 %tmp, %load2
    109   br i1 %cmp2, label %bb1, label %bb9
    110 
    111 bb9:
    112   ret void
    113 }
    114 
    115 declare i32 @llvm.amdgcn.workitem.id.x() #1
    116 
    117 attributes #0 = { nounwind }
    118 attributes #1 = { nounwind readnone }
    119