Home | History | Annotate | Download | only in Hexagon
      1 ; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
      2 
      3 ; We do not pipeline sigma yet, but the non-pipelined version
      4 ; with good scheduling is pretty fast. The compiler generates
      5 ; 19 packets, and the assembly version is 16.
      6 
      7 ; CHECK:  loop0(.LBB0_[[LOOP:.]],
      8 ; CHECK: .LBB0_[[LOOP]]:
      9 ; CHECK: }
     10 ; CHECK: }
     11 ; CHECK: }
     12 ; CHECK: }
     13 ; CHECK: }
     14 ; CHECK: }
     15 ; CHECK: }
     16 ; CHECK: }
     17 ; CHECK: }
     18 ; CHECK: }
     19 ; CHECK: }
     20 ; CHECK: }
     21 ; CHECK: }
     22 ; CHECK: }
     23 ; CHECK: }
     24 ; CHECK: }
     25 ; CHECK: }
     26 ; CHECK: }
     27 ; CHECK-NOT: }
     28 ; CHECK: }{{[ \t]*}}:endloop
     29 
     30 @g0 = external constant [10 x i16], align 128
     31 
     32 declare i32 @llvm.hexagon.S2.vsplatrb(i32) #0
     33 declare <16 x i32> @llvm.hexagon.V6.vshuffh(<16 x i32>) #0
     34 declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #0
     35 declare <16 x i32> @llvm.hexagon.V6.vd0() #0
     36 declare <32 x i32> @llvm.hexagon.V6.vsububh(<16 x i32>, <16 x i32>) #0
     37 declare <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32>, <16 x i32>) #0
     38 declare <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32>, <16 x i32>) #0
     39 declare <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1>, <16 x i32>, <16 x i32>) #0
     40 declare <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1>, <16 x i32>, <16 x i32>) #0
     41 declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #0
     42 declare <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32>, <32 x i32>, i32) #0
     43 declare <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32>, <16 x i32>, i32) #0
     44 declare <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>, <16 x i32>, i32) #0
     45 declare <32 x i32> @llvm.hexagon.V6.vlutvwh(<16 x i32>, <16 x i32>, i32) #0
     46 declare <16 x i32> @llvm.hexagon.V6.vmpyhvsrs(<16 x i32>, <16 x i32>) #0
     47 declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #0
     48 declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #0
     49 declare <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32>, <16 x i32>) #0
     50 
     51 define void @f0(i8* nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, i8 zeroext %a4, i8* nocapture %a5) #1 {
     52 b0:
     53   %v0 = add nsw i32 %a3, -1
     54   %v1 = icmp sgt i32 %v0, 1
     55   br i1 %v1, label %b1, label %b8
     56 
     57 b1:                                               ; preds = %b0
     58   %v2 = mul i32 %a1, 2
     59   %v3 = load <16 x i32>, <16 x i32>* bitcast ([10 x i16]* @g0 to <16 x i32>*), align 128
     60   %v4 = tail call <16 x i32> @llvm.hexagon.V6.vshuffh(<16 x i32> %v3) #2
     61   %v5 = zext i8 %a4 to i32
     62   %v6 = tail call i32 @llvm.hexagon.S2.vsplatrb(i32 %v5) #2
     63   %v7 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 %v6) #2
     64   %v8 = tail call <16 x i32> @llvm.hexagon.V6.vd0() #2
     65   %v9 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 16843009) #2
     66   %v10 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 33686018) #2
     67   %v11 = icmp sgt i32 %a2, 64
     68   %v12 = add i32 %a1, 64
     69   %v13 = add i32 %v12, %a1
     70   %v14 = icmp sgt i32 %a2, 0
     71   %v15 = add i32 %a3, -2
     72   %v16 = bitcast i8* %a0 to <16 x i32>*
     73   %v17 = load <16 x i32>, <16 x i32>* %v16, align 64
     74   br label %b2
     75 
     76 b2:                                               ; preds = %b7, %b1
     77   %v18 = phi <16 x i32> [ %v17, %b1 ], [ %v28, %b7 ]
     78   %v19 = phi i8* [ %a0, %b1 ], [ %v23, %b7 ]
     79   %v20 = phi i8* [ %a5, %b1 ], [ %v22, %b7 ]
     80   %v21 = phi i32 [ 1, %b1 ], [ %v118, %b7 ]
     81   %v22 = getelementptr inbounds i8, i8* %v20, i32 %a1
     82   %v23 = getelementptr inbounds i8, i8* %v19, i32 %a1
     83   %v24 = bitcast i8* %v23 to <16 x i32>*
     84   %v25 = getelementptr inbounds i8, i8* %v19, i32 %v2
     85   %v26 = bitcast i8* %v25 to <16 x i32>*
     86   %v27 = bitcast i8* %v22 to <16 x i32>*
     87   %v28 = load <16 x i32>, <16 x i32>* %v24, align 64
     88   %v29 = load <16 x i32>, <16 x i32>* %v26, align 64
     89   br i1 %v11, label %b3, label %b4
     90 
     91 b3:                                               ; preds = %b2
     92   %v30 = getelementptr inbounds i8, i8* %v19, i32 64
     93   %v31 = getelementptr inbounds i8, i8* %v19, i32 %v12
     94   %v32 = bitcast i8* %v31 to <16 x i32>*
     95   %v33 = getelementptr inbounds i8, i8* %v19, i32 %v13
     96   %v34 = bitcast i8* %v33 to <16 x i32>*
     97   br label %b5
     98 
     99 b4:                                               ; preds = %b2
    100   br i1 %v14, label %b5, label %b7
    101 
    102 b5:                                               ; preds = %b4, %b3
    103   %v35 = phi <16 x i32>* [ %v26, %b4 ], [ %v34, %b3 ]
    104   %v36 = phi <16 x i32>* [ %v24, %b4 ], [ %v32, %b3 ]
    105   %v37 = phi i8* [ %v19, %b4 ], [ %v30, %b3 ]
    106   %v38 = bitcast i8* %v37 to <16 x i32>*
    107   br label %b6
    108 
    109 b6:                                               ; preds = %b6, %b5
    110   %v39 = phi <16 x i32>* [ %v108, %b6 ], [ %v27, %b5 ]
    111   %v40 = phi <16 x i32>* [ %v115, %b6 ], [ %v35, %b5 ]
    112   %v41 = phi <16 x i32>* [ %v114, %b6 ], [ %v36, %b5 ]
    113   %v42 = phi <16 x i32>* [ %v113, %b6 ], [ %v38, %b5 ]
    114   %v43 = phi i32 [ %v116, %b6 ], [ %a2, %b5 ]
    115   %v44 = phi <16 x i32> [ %v45, %b6 ], [ %v8, %b5 ]
    116   %v45 = phi <16 x i32> [ %v50, %b6 ], [ %v18, %b5 ]
    117   %v46 = phi <16 x i32> [ %v47, %b6 ], [ %v8, %b5 ]
    118   %v47 = phi <16 x i32> [ %v51, %b6 ], [ %v28, %b5 ]
    119   %v48 = phi <16 x i32> [ %v49, %b6 ], [ %v8, %b5 ]
    120   %v49 = phi <16 x i32> [ %v52, %b6 ], [ %v29, %b5 ]
    121   %v50 = load <16 x i32>, <16 x i32>* %v42, align 64
    122   %v51 = load <16 x i32>, <16 x i32>* %v41, align 64
    123   %v52 = load <16 x i32>, <16 x i32>* %v40, align 64
    124   %v53 = tail call <32 x i32> @llvm.hexagon.V6.vsububh(<16 x i32> %v8, <16 x i32> %v47) #2
    125   %v54 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v45, <16 x i32> %v47) #2
    126   %v55 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v49, <16 x i32> %v47) #2
    127   %v56 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v54, <16 x i32> %v7) #2
    128   %v57 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v55, <16 x i32> %v7) #2
    129   %v58 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v56, <16 x i32> %v9, <16 x i32> %v10) #2
    130   %v59 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v57, <16 x i32> %v58, <16 x i32> %v9) #2
    131   %v60 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v56, <16 x i32> %v8, <16 x i32> %v45) #2
    132   %v61 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v57, <16 x i32> %v8, <16 x i32> %v49) #2
    133   %v62 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v61, <16 x i32> %v60) #2
    134   %v63 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v53, <32 x i32> %v62, i32 -1) #2
    135   %v64 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v45, <16 x i32> %v44, i32 1) #2
    136   %v65 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v49, <16 x i32> %v48, i32 1) #2
    137   %v66 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v64, <16 x i32> %v47) #2
    138   %v67 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v65, <16 x i32> %v47) #2
    139   %v68 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v66, <16 x i32> %v7) #2
    140   %v69 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v67, <16 x i32> %v7) #2
    141   %v70 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v68, <16 x i32> %v59, <16 x i32> %v9) #2
    142   %v71 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v69, <16 x i32> %v70, <16 x i32> %v9) #2
    143   %v72 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v68, <16 x i32> %v8, <16 x i32> %v64) #2
    144   %v73 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v69, <16 x i32> %v8, <16 x i32> %v65) #2
    145   %v74 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v73, <16 x i32> %v72) #2
    146   %v75 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v63, <32 x i32> %v74, i32 -1) #2
    147   %v76 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v50, <16 x i32> %v45, i32 1) #2
    148   %v77 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v52, <16 x i32> %v49, i32 1) #2
    149   %v78 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v76, <16 x i32> %v47) #2
    150   %v79 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v77, <16 x i32> %v47) #2
    151   %v80 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v78, <16 x i32> %v7) #2
    152   %v81 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v79, <16 x i32> %v7) #2
    153   %v82 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v80, <16 x i32> %v71, <16 x i32> %v9) #2
    154   %v83 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v81, <16 x i32> %v82, <16 x i32> %v9) #2
    155   %v84 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v80, <16 x i32> %v8, <16 x i32> %v76) #2
    156   %v85 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v81, <16 x i32> %v8, <16 x i32> %v77) #2
    157   %v86 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v85, <16 x i32> %v84) #2
    158   %v87 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v75, <32 x i32> %v86, i32 -1) #2
    159   %v88 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v47, <16 x i32> %v46, i32 1) #2
    160   %v89 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v51, <16 x i32> %v47, i32 1) #2
    161   %v90 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v88, <16 x i32> %v47) #2
    162   %v91 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v89, <16 x i32> %v47) #2
    163   %v92 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v90, <16 x i32> %v7) #2
    164   %v93 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v91, <16 x i32> %v7) #2
    165   %v94 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v92, <16 x i32> %v83, <16 x i32> %v9) #2
    166   %v95 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v93, <16 x i32> %v94, <16 x i32> %v9) #2
    167   %v96 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v92, <16 x i32> %v8, <16 x i32> %v88) #2
    168   %v97 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v93, <16 x i32> %v8, <16 x i32> %v89) #2
    169   %v98 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v97, <16 x i32> %v96) #2
    170   %v99 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v87, <32 x i32> %v98, i32 -1) #2
    171   %v100 = tail call <32 x i32> @llvm.hexagon.V6.vlutvwh(<16 x i32> %v95, <16 x i32> %v4, i32 0) #2
    172   %v101 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v99) #2
    173   %v102 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v100) #2
    174   %v103 = tail call <16 x i32> @llvm.hexagon.V6.vmpyhvsrs(<16 x i32> %v101, <16 x i32> %v102) #2
    175   %v104 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v99) #2
    176   %v105 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v100) #2
    177   %v106 = tail call <16 x i32> @llvm.hexagon.V6.vmpyhvsrs(<16 x i32> %v104, <16 x i32> %v105) #2
    178   %v107 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v106, <16 x i32> %v103) #2
    179   %v108 = getelementptr inbounds <16 x i32>, <16 x i32>* %v39, i32 1
    180   store <16 x i32> %v107, <16 x i32>* %v39, align 64
    181   %v109 = icmp sgt i32 %v43, 128
    182   %v110 = getelementptr inbounds <16 x i32>, <16 x i32>* %v42, i32 1
    183   %v111 = getelementptr inbounds <16 x i32>, <16 x i32>* %v41, i32 1
    184   %v112 = getelementptr inbounds <16 x i32>, <16 x i32>* %v40, i32 1
    185   %v113 = select i1 %v109, <16 x i32>* %v110, <16 x i32>* %v42
    186   %v114 = select i1 %v109, <16 x i32>* %v111, <16 x i32>* %v41
    187   %v115 = select i1 %v109, <16 x i32>* %v112, <16 x i32>* %v40
    188   %v116 = add nsw i32 %v43, -64
    189   %v117 = icmp sgt i32 %v43, 64
    190   br i1 %v117, label %b6, label %b7
    191 
    192 b7:                                               ; preds = %b6, %b4
    193   %v118 = add nuw nsw i32 %v21, 1
    194   %v119 = icmp eq i32 %v21, %v15
    195   br i1 %v119, label %b8, label %b2
    196 
    197 b8:                                               ; preds = %b7, %b0
    198   ret void
    199 }
    200 
    201 attributes #0 = { nounwind readnone }
    202 attributes #1 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length64b" }
    203 attributes #2 = { nounwind }
    204