Home | History | Annotate | Download | only in Hexagon
      1 //===- HexagonInstrInfoVector.td - Hexagon Vector Patterns -*- tablegen -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file describes the Hexagon Vector instructions in TableGen format.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 def V2I1:  PatLeaf<(v2i1  PredRegs:$R)>;
     15 def V4I1:  PatLeaf<(v4i1  PredRegs:$R)>;
     16 def V8I1:  PatLeaf<(v8i1  PredRegs:$R)>;
     17 def V4I8:  PatLeaf<(v4i8  IntRegs:$R)>;
     18 def V2I16: PatLeaf<(v2i16 IntRegs:$R)>;
     19 def V8I8:  PatLeaf<(v8i8  DoubleRegs:$R)>;
     20 def V4I16: PatLeaf<(v4i16 DoubleRegs:$R)>;
     21 def V2I32: PatLeaf<(v2i32 DoubleRegs:$R)>;
     22 
     23 
     24 multiclass bitconvert_32<ValueType a, ValueType b> {
     25   def : Pat <(b (bitconvert (a IntRegs:$src))),
     26              (b IntRegs:$src)>;
     27   def : Pat <(a (bitconvert (b IntRegs:$src))),
     28              (a IntRegs:$src)>;
     29 }
     30 
     31 multiclass bitconvert_64<ValueType a, ValueType b> {
     32   def : Pat <(b (bitconvert (a DoubleRegs:$src))),
     33              (b DoubleRegs:$src)>;
     34   def : Pat <(a (bitconvert (b DoubleRegs:$src))),
     35              (a DoubleRegs:$src)>;
     36 }
     37 
     38 // Bit convert vector types to integers.
     39 defm : bitconvert_32<v4i8,  i32>;
     40 defm : bitconvert_32<v2i16, i32>;
     41 defm : bitconvert_64<v8i8,  i64>;
     42 defm : bitconvert_64<v4i16, i64>;
     43 defm : bitconvert_64<v2i32, i64>;
     44 
     45 // Vector shift support. Vector shifting in Hexagon is rather different
     46 // from internal representation of LLVM.
     47 // LLVM assumes all shifts (in vector case) will have the form
     48 // <VT> = SHL/SRA/SRL <VT> by <VT>
     49 // while Hexagon has the following format:
     50 // <VT> = SHL/SRA/SRL <VT> by <IT/i32>
     51 // As a result, special care is needed to guarantee correctness and
     52 // performance.
     53 class vshift_v4i16<SDNode Op, string Str, bits<3>MajOp, bits<3>MinOp>
     54   : S_2OpInstImm<Str, MajOp, MinOp, u4Imm,
     55       [(set (v4i16 DoubleRegs:$dst),
     56             (Op (v4i16 DoubleRegs:$src1), u4ImmPred:$src2))]> {
     57   bits<4> src2;
     58   let Inst{11-8} = src2;
     59 }
     60 
     61 class vshift_v2i32<SDNode Op, string Str, bits<3>MajOp, bits<3>MinOp>
     62   : S_2OpInstImm<Str, MajOp, MinOp, u5Imm,
     63       [(set (v2i32 DoubleRegs:$dst),
     64             (Op (v2i32 DoubleRegs:$src1), u5ImmPred:$src2))]> {
     65   bits<5> src2;
     66   let Inst{12-8} = src2;
     67 }
     68 
     69 def : Pat<(v2i16 (add (v2i16 IntRegs:$src1), (v2i16 IntRegs:$src2))),
     70           (A2_svaddh IntRegs:$src1, IntRegs:$src2)>;
     71 
     72 def : Pat<(v2i16 (sub (v2i16 IntRegs:$src1), (v2i16 IntRegs:$src2))),
     73           (A2_svsubh IntRegs:$src1, IntRegs:$src2)>;
     74 
     75 def S2_asr_i_vw : vshift_v2i32<sra, "vasrw", 0b010, 0b000>;
     76 def S2_lsr_i_vw : vshift_v2i32<srl, "vlsrw", 0b010, 0b001>;
     77 def S2_asl_i_vw : vshift_v2i32<shl, "vaslw", 0b010, 0b010>;
     78 
     79 def S2_asr_i_vh : vshift_v4i16<sra, "vasrh", 0b100, 0b000>;
     80 def S2_lsr_i_vh : vshift_v4i16<srl, "vlsrh", 0b100, 0b001>;
     81 def S2_asl_i_vh : vshift_v4i16<shl, "vaslh", 0b100, 0b010>;
     82 
     83 
     84 def HexagonVSPLATB: SDNode<"HexagonISD::VSPLATB", SDTUnaryOp>;
     85 def HexagonVSPLATH: SDNode<"HexagonISD::VSPLATH", SDTUnaryOp>;
     86 
     87 // Replicate the low 8-bits from 32-bits input register into each of the
     88 // four bytes of 32-bits destination register.
     89 def: Pat<(v4i8  (HexagonVSPLATB I32:$Rs)), (S2_vsplatrb I32:$Rs)>;
     90 
     91 // Replicate the low 16-bits from 32-bits input register into each of the
     92 // four halfwords of 64-bits destination register.
     93 def: Pat<(v4i16 (HexagonVSPLATH I32:$Rs)), (S2_vsplatrh I32:$Rs)>;
     94 
     95 
     96 class VArith_pat <InstHexagon MI, SDNode Op, PatFrag Type>
     97   : Pat <(Op Type:$Rss, Type:$Rtt),
     98          (MI Type:$Rss, Type:$Rtt)>;
     99 
    100 def: VArith_pat <A2_vaddub, add, V8I8>;
    101 def: VArith_pat <A2_vaddh,  add, V4I16>;
    102 def: VArith_pat <A2_vaddw,  add, V2I32>;
    103 def: VArith_pat <A2_vsubub, sub, V8I8>;
    104 def: VArith_pat <A2_vsubh,  sub, V4I16>;
    105 def: VArith_pat <A2_vsubw,  sub, V2I32>;
    106 
    107 def: VArith_pat <A2_and,    and, V2I16>;
    108 def: VArith_pat <A2_xor,    xor, V2I16>;
    109 def: VArith_pat <A2_or,     or,  V2I16>;
    110 
    111 def: VArith_pat <A2_andp,   and, V8I8>;
    112 def: VArith_pat <A2_andp,   and, V4I16>;
    113 def: VArith_pat <A2_andp,   and, V2I32>;
    114 def: VArith_pat <A2_orp,    or,  V8I8>;
    115 def: VArith_pat <A2_orp,    or,  V4I16>;
    116 def: VArith_pat <A2_orp,    or,  V2I32>;
    117 def: VArith_pat <A2_xorp,   xor, V8I8>;
    118 def: VArith_pat <A2_xorp,   xor, V4I16>;
    119 def: VArith_pat <A2_xorp,   xor, V2I32>;
    120 
    121 def: Pat<(v2i32 (sra V2I32:$b, (i64 (HexagonCOMBINE (i32 u5ImmPred:$c),
    122                                                     (i32 u5ImmPred:$c))))),
    123          (S2_asr_i_vw V2I32:$b, imm:$c)>;
    124 def: Pat<(v2i32 (srl V2I32:$b, (i64 (HexagonCOMBINE (i32 u5ImmPred:$c),
    125                                                     (i32 u5ImmPred:$c))))),
    126          (S2_lsr_i_vw V2I32:$b, imm:$c)>;
    127 def: Pat<(v2i32 (shl V2I32:$b, (i64 (HexagonCOMBINE (i32 u5ImmPred:$c),
    128                                                     (i32 u5ImmPred:$c))))),
    129          (S2_asl_i_vw V2I32:$b, imm:$c)>;
    130 
    131 def: Pat<(v4i16 (sra V4I16:$b, (v4i16 (HexagonVSPLATH (i32 (u4ImmPred:$c)))))),
    132          (S2_asr_i_vh V4I16:$b, imm:$c)>;
    133 def: Pat<(v4i16 (srl V4I16:$b, (v4i16 (HexagonVSPLATH (i32 (u4ImmPred:$c)))))),
    134          (S2_lsr_i_vh V4I16:$b, imm:$c)>;
    135 def: Pat<(v4i16 (shl V4I16:$b, (v4i16 (HexagonVSPLATH (i32 (u4ImmPred:$c)))))),
    136          (S2_asl_i_vh V4I16:$b, imm:$c)>;
    137 
    138 
    139 def SDTHexagon_v2i32_v2i32_i32 : SDTypeProfile<1, 2,
    140   [SDTCisSameAs<0, 1>, SDTCisVT<0, v2i32>, SDTCisInt<2>]>;
    141 def SDTHexagon_v4i16_v4i16_i32 : SDTypeProfile<1, 2,
    142   [SDTCisSameAs<0, 1>, SDTCisVT<0, v4i16>, SDTCisInt<2>]>;
    143 
    144 def HexagonVSRAW: SDNode<"HexagonISD::VSRAW", SDTHexagon_v2i32_v2i32_i32>;
    145 def HexagonVSRAH: SDNode<"HexagonISD::VSRAH", SDTHexagon_v4i16_v4i16_i32>;
    146 def HexagonVSRLW: SDNode<"HexagonISD::VSRLW", SDTHexagon_v2i32_v2i32_i32>;
    147 def HexagonVSRLH: SDNode<"HexagonISD::VSRLH", SDTHexagon_v4i16_v4i16_i32>;
    148 def HexagonVSHLW: SDNode<"HexagonISD::VSHLW", SDTHexagon_v2i32_v2i32_i32>;
    149 def HexagonVSHLH: SDNode<"HexagonISD::VSHLH", SDTHexagon_v4i16_v4i16_i32>;
    150 
    151 def: Pat<(v2i32 (HexagonVSRAW V2I32:$Rs, u5ImmPred:$u5)),
    152          (S2_asr_i_vw V2I32:$Rs, imm:$u5)>;
    153 def: Pat<(v4i16 (HexagonVSRAH V4I16:$Rs, u4ImmPred:$u4)),
    154          (S2_asr_i_vh V4I16:$Rs, imm:$u4)>;
    155 def: Pat<(v2i32 (HexagonVSRLW V2I32:$Rs, u5ImmPred:$u5)),
    156          (S2_lsr_i_vw V2I32:$Rs, imm:$u5)>;
    157 def: Pat<(v4i16 (HexagonVSRLH V4I16:$Rs, u4ImmPred:$u4)),
    158          (S2_lsr_i_vh V4I16:$Rs, imm:$u4)>;
    159 def: Pat<(v2i32 (HexagonVSHLW V2I32:$Rs, u5ImmPred:$u5)),
    160          (S2_asl_i_vw V2I32:$Rs, imm:$u5)>;
    161 def: Pat<(v4i16 (HexagonVSHLH V4I16:$Rs, u4ImmPred:$u4)),
    162          (S2_asl_i_vh V4I16:$Rs, imm:$u4)>;
    163 
    164 // Vector shift words by register
    165 def S2_asr_r_vw : T_S3op_shiftVect < "vasrw", 0b00, 0b00>;
    166 def S2_lsr_r_vw : T_S3op_shiftVect < "vlsrw", 0b00, 0b01>;
    167 def S2_asl_r_vw : T_S3op_shiftVect < "vaslw", 0b00, 0b10>;
    168 def S2_lsl_r_vw : T_S3op_shiftVect < "vlslw", 0b00, 0b11>;
    169 
    170 // Vector shift halfwords by register
    171 def S2_asr_r_vh : T_S3op_shiftVect < "vasrh", 0b01, 0b00>;
    172 def S2_lsr_r_vh : T_S3op_shiftVect < "vlsrh", 0b01, 0b01>;
    173 def S2_asl_r_vh : T_S3op_shiftVect < "vaslh", 0b01, 0b10>;
    174 def S2_lsl_r_vh : T_S3op_shiftVect < "vlslh", 0b01, 0b11>;
    175 
    176 class vshift_rr_pat<InstHexagon MI, SDNode Op, PatFrag Value>
    177   : Pat <(Op Value:$Rs, I32:$Rt),
    178          (MI Value:$Rs, I32:$Rt)>;
    179 
    180 def: vshift_rr_pat <S2_asr_r_vw, HexagonVSRAW, V2I32>;
    181 def: vshift_rr_pat <S2_asr_r_vh, HexagonVSRAH, V4I16>;
    182 def: vshift_rr_pat <S2_lsr_r_vw, HexagonVSRLW, V2I32>;
    183 def: vshift_rr_pat <S2_lsr_r_vh, HexagonVSRLH, V4I16>;
    184 def: vshift_rr_pat <S2_asl_r_vw, HexagonVSHLW, V2I32>;
    185 def: vshift_rr_pat <S2_asl_r_vh, HexagonVSHLH, V4I16>;
    186 
    187 
    188 def SDTHexagonVecCompare_v8i8 : SDTypeProfile<1, 2,
    189   [SDTCisSameAs<1, 2>, SDTCisVT<0, i1>, SDTCisVT<1, v8i8>]>;
    190 def SDTHexagonVecCompare_v4i16 : SDTypeProfile<1, 2,
    191   [SDTCisSameAs<1, 2>, SDTCisVT<0, i1>, SDTCisVT<1, v4i16>]>;
    192 def SDTHexagonVecCompare_v2i32 : SDTypeProfile<1, 2,
    193   [SDTCisSameAs<1, 2>, SDTCisVT<0, i1>, SDTCisVT<1, v2i32>]>;
    194 
    195 def HexagonVCMPBEQ:  SDNode<"HexagonISD::VCMPBEQ",  SDTHexagonVecCompare_v8i8>;
    196 def HexagonVCMPBGT:  SDNode<"HexagonISD::VCMPBGT",  SDTHexagonVecCompare_v8i8>;
    197 def HexagonVCMPBGTU: SDNode<"HexagonISD::VCMPBGTU", SDTHexagonVecCompare_v8i8>;
    198 def HexagonVCMPHEQ:  SDNode<"HexagonISD::VCMPHEQ",  SDTHexagonVecCompare_v4i16>;
    199 def HexagonVCMPHGT:  SDNode<"HexagonISD::VCMPHGT",  SDTHexagonVecCompare_v4i16>;
    200 def HexagonVCMPHGTU: SDNode<"HexagonISD::VCMPHGTU", SDTHexagonVecCompare_v4i16>;
    201 def HexagonVCMPWEQ:  SDNode<"HexagonISD::VCMPWEQ",  SDTHexagonVecCompare_v2i32>;
    202 def HexagonVCMPWGT:  SDNode<"HexagonISD::VCMPWGT",  SDTHexagonVecCompare_v2i32>;
    203 def HexagonVCMPWGTU: SDNode<"HexagonISD::VCMPWGTU", SDTHexagonVecCompare_v2i32>;
    204 
    205 
    206 class vcmp_i1_pat<InstHexagon MI, SDNode Op, PatFrag Value>
    207   : Pat <(i1 (Op Value:$Rs, Value:$Rt)),
    208          (MI Value:$Rs, Value:$Rt)>;
    209 
    210 def: vcmp_i1_pat<A2_vcmpbeq,  HexagonVCMPBEQ,  V8I8>;
    211 def: vcmp_i1_pat<A4_vcmpbgt,  HexagonVCMPBGT,  V8I8>;
    212 def: vcmp_i1_pat<A2_vcmpbgtu, HexagonVCMPBGTU, V8I8>;
    213 
    214 def: vcmp_i1_pat<A2_vcmpheq,  HexagonVCMPHEQ,  V4I16>;
    215 def: vcmp_i1_pat<A2_vcmphgt,  HexagonVCMPHGT,  V4I16>;
    216 def: vcmp_i1_pat<A2_vcmphgtu, HexagonVCMPHGTU, V4I16>;
    217 
    218 def: vcmp_i1_pat<A2_vcmpweq,  HexagonVCMPWEQ,  V2I32>;
    219 def: vcmp_i1_pat<A2_vcmpwgt,  HexagonVCMPWGT,  V2I32>;
    220 def: vcmp_i1_pat<A2_vcmpwgtu, HexagonVCMPWGTU, V2I32>;
    221 
    222 
    223 class vcmp_vi1_pat<InstHexagon MI, PatFrag Op, PatFrag InVal, ValueType OutTy>
    224   : Pat <(OutTy (Op InVal:$Rs, InVal:$Rt)),
    225          (MI InVal:$Rs, InVal:$Rt)>;
    226 
    227 def: vcmp_vi1_pat<A2_vcmpweq,  seteq,  V2I32, v2i1>;
    228 def: vcmp_vi1_pat<A2_vcmpwgt,  setgt,  V2I32, v2i1>;
    229 def: vcmp_vi1_pat<A2_vcmpwgtu, setugt, V2I32, v2i1>;
    230 
    231 def: vcmp_vi1_pat<A2_vcmpheq,  seteq,  V4I16, v4i1>;
    232 def: vcmp_vi1_pat<A2_vcmphgt,  setgt,  V4I16, v4i1>;
    233 def: vcmp_vi1_pat<A2_vcmphgtu, setugt, V4I16, v4i1>;
    234 
    235 
    236 // Hexagon doesn't have a vector multiply with C semantics.
    237 // Instead, generate a pseudo instruction that gets expaneded into two
    238 // scalar MPYI instructions.
    239 // This is expanded by ExpandPostRAPseudos.
    240 let isPseudo = 1 in
    241 def VMULW : PseudoM<(outs DoubleRegs:$Rd),
    242       (ins DoubleRegs:$Rs, DoubleRegs:$Rt),
    243       ".error \"Should never try to emit VMULW\"",
    244       [(set V2I32:$Rd, (mul V2I32:$Rs, V2I32:$Rt))]>;
    245 
    246 let isPseudo = 1 in
    247 def VMULW_ACC : PseudoM<(outs DoubleRegs:$Rd),
    248       (ins DoubleRegs:$Rx, DoubleRegs:$Rs, DoubleRegs:$Rt),
    249       ".error \"Should never try to emit VMULW_ACC\"",
    250       [(set V2I32:$Rd, (add V2I32:$Rx, (mul V2I32:$Rs, V2I32:$Rt)))],
    251       "$Rd = $Rx">;
    252 
    253 // Adds two v4i8: Hexagon does not have an insn for this one, so we
    254 // use the double add v8i8, and use only the low part of the result.
    255 def: Pat<(v4i8 (add (v4i8 IntRegs:$Rs), (v4i8 IntRegs:$Rt))),
    256          (LoReg (A2_vaddub (Zext64 $Rs), (Zext64 $Rt)))>;
    257 
    258 // Subtract two v4i8: Hexagon does not have an insn for this one, so we
    259 // use the double sub v8i8, and use only the low part of the result.
    260 def: Pat<(v4i8 (sub (v4i8 IntRegs:$Rs), (v4i8 IntRegs:$Rt))),
    261          (LoReg (A2_vsubub (Zext64 $Rs), (Zext64 $Rt)))>;
    262 
    263 //
    264 // No 32 bit vector mux.
    265 //
    266 def: Pat<(v4i8 (select I1:$Pu, V4I8:$Rs, V4I8:$Rt)),
    267          (LoReg (C2_vmux I1:$Pu, (Zext64 $Rs), (Zext64 $Rt)))>;
    268 def: Pat<(v2i16 (select I1:$Pu, V2I16:$Rs, V2I16:$Rt)),
    269          (LoReg (C2_vmux I1:$Pu, (Zext64 $Rs), (Zext64 $Rt)))>;
    270 
    271 //
    272 // 64-bit vector mux.
    273 //
    274 def: Pat<(v8i8 (vselect V8I1:$Pu, V8I8:$Rs, V8I8:$Rt)),
    275          (C2_vmux V8I1:$Pu, V8I8:$Rs, V8I8:$Rt)>;
    276 def: Pat<(v4i16 (vselect V4I1:$Pu, V4I16:$Rs, V4I16:$Rt)),
    277          (C2_vmux V4I1:$Pu, V4I16:$Rs, V4I16:$Rt)>;
    278 def: Pat<(v2i32 (vselect V2I1:$Pu, V2I32:$Rs, V2I32:$Rt)),
    279          (C2_vmux V2I1:$Pu, V2I32:$Rs, V2I32:$Rt)>;
    280 
    281 //
    282 // No 32 bit vector compare.
    283 //
    284 def: Pat<(i1 (seteq V4I8:$Rs, V4I8:$Rt)),
    285          (A2_vcmpbeq (Zext64 $Rs), (Zext64 $Rt))>;
    286 def: Pat<(i1 (setgt V4I8:$Rs, V4I8:$Rt)),
    287          (A4_vcmpbgt (Zext64 $Rs), (Zext64 $Rt))>;
    288 def: Pat<(i1 (setugt V4I8:$Rs, V4I8:$Rt)),
    289          (A2_vcmpbgtu (Zext64 $Rs), (Zext64 $Rt))>;
    290 
    291 def: Pat<(i1 (seteq V2I16:$Rs, V2I16:$Rt)),
    292          (A2_vcmpheq (Zext64 $Rs), (Zext64 $Rt))>;
    293 def: Pat<(i1 (setgt V2I16:$Rs, V2I16:$Rt)),
    294          (A2_vcmphgt (Zext64 $Rs), (Zext64 $Rt))>;
    295 def: Pat<(i1 (setugt V2I16:$Rs, V2I16:$Rt)),
    296          (A2_vcmphgtu (Zext64 $Rs), (Zext64 $Rt))>;
    297 
    298 
    299 class InvertCmp_pat<InstHexagon InvMI, PatFrag CmpOp, PatFrag Value,
    300                     ValueType CmpTy>
    301   : Pat<(CmpTy (CmpOp Value:$Rs, Value:$Rt)),
    302         (InvMI Value:$Rt, Value:$Rs)>;
    303 
    304 // Map from a compare operation to the corresponding instruction with the
    305 // order of operands reversed, e.g.  x > y --> cmp.lt(y,x).
    306 def: InvertCmp_pat<A4_vcmpbgt,  setlt,  V8I8,  i1>;
    307 def: InvertCmp_pat<A4_vcmpbgt,  setlt,  V8I8,  v8i1>;
    308 def: InvertCmp_pat<A2_vcmphgt,  setlt,  V4I16, i1>;
    309 def: InvertCmp_pat<A2_vcmphgt,  setlt,  V4I16, v4i1>;
    310 def: InvertCmp_pat<A2_vcmpwgt,  setlt,  V2I32, i1>;
    311 def: InvertCmp_pat<A2_vcmpwgt,  setlt,  V2I32, v2i1>;
    312 
    313 def: InvertCmp_pat<A2_vcmpbgtu, setult, V8I8,  i1>;
    314 def: InvertCmp_pat<A2_vcmpbgtu, setult, V8I8,  v8i1>;
    315 def: InvertCmp_pat<A2_vcmphgtu, setult, V4I16, i1>;
    316 def: InvertCmp_pat<A2_vcmphgtu, setult, V4I16, v4i1>;
    317 def: InvertCmp_pat<A2_vcmpwgtu, setult, V2I32, i1>;
    318 def: InvertCmp_pat<A2_vcmpwgtu, setult, V2I32, v2i1>;
    319 
    320 // Map from vcmpne(Rss) -> !vcmpew(Rss).
    321 // rs != rt -> !(rs == rt).
    322 def: Pat<(v2i1 (setne V2I32:$Rs, V2I32:$Rt)),
    323          (C2_not (v2i1 (A2_vcmpbeq V2I32:$Rs, V2I32:$Rt)))>;
    324 
    325 
    326 // Truncate: from vector B copy all 'E'ven 'B'yte elements:
    327 // A[0] = B[0];  A[1] = B[2];  A[2] = B[4];  A[3] = B[6];
    328 def: Pat<(v4i8 (trunc V4I16:$Rs)),
    329          (S2_vtrunehb V4I16:$Rs)>;
    330 
    331 // Truncate: from vector B copy all 'O'dd 'B'yte elements:
    332 // A[0] = B[1];  A[1] = B[3];  A[2] = B[5];  A[3] = B[7];
    333 // S2_vtrunohb
    334 
    335 // Truncate: from vectors B and C copy all 'E'ven 'H'alf-word elements:
    336 // A[0] = B[0];  A[1] = B[2];  A[2] = C[0];  A[3] = C[2];
    337 // S2_vtruneh
    338 
    339 def: Pat<(v2i16 (trunc V2I32:$Rs)),
    340          (LoReg (S2_packhl (HiReg $Rs), (LoReg $Rs)))>;
    341 
    342 
    343 def HexagonVSXTBH : SDNode<"HexagonISD::VSXTBH", SDTUnaryOp>;
    344 def HexagonVSXTBW : SDNode<"HexagonISD::VSXTBW", SDTUnaryOp>;
    345 
    346 def: Pat<(i64 (HexagonVSXTBH I32:$Rs)), (S2_vsxtbh I32:$Rs)>;
    347 def: Pat<(i64 (HexagonVSXTBW I32:$Rs)), (S2_vsxthw I32:$Rs)>;
    348 
    349 def: Pat<(v4i16 (zext   V4I8:$Rs)),  (S2_vzxtbh V4I8:$Rs)>;
    350 def: Pat<(v2i32 (zext   V2I16:$Rs)), (S2_vzxthw V2I16:$Rs)>;
    351 def: Pat<(v4i16 (anyext V4I8:$Rs)),  (S2_vzxtbh V4I8:$Rs)>;
    352 def: Pat<(v2i32 (anyext V2I16:$Rs)), (S2_vzxthw V2I16:$Rs)>;
    353 def: Pat<(v4i16 (sext   V4I8:$Rs)),  (S2_vsxtbh V4I8:$Rs)>;
    354 def: Pat<(v2i32 (sext   V2I16:$Rs)), (S2_vsxthw V2I16:$Rs)>;
    355 
    356 // Sign extends a v2i8 into a v2i32.
    357 def: Pat<(v2i32 (sext_inreg V2I32:$Rs, v2i8)),
    358          (A2_combinew (A2_sxtb (HiReg $Rs)), (A2_sxtb (LoReg $Rs)))>;
    359 
    360 // Sign extends a v2i16 into a v2i32.
    361 def: Pat<(v2i32 (sext_inreg V2I32:$Rs, v2i16)),
    362          (A2_combinew (A2_sxth (HiReg $Rs)), (A2_sxth (LoReg $Rs)))>;
    363 
    364 
    365 // Multiplies two v2i16 and returns a v2i32.  We are using here the
    366 // saturating multiply, as hexagon does not provide a non saturating
    367 // vector multiply, and saturation does not impact the result that is
    368 // in double precision of the operands.
    369 
    370 // Multiplies two v2i16 vectors: as Hexagon does not have a multiply
    371 // with the C semantics for this one, this pattern uses the half word
    372 // multiply vmpyh that takes two v2i16 and returns a v2i32.  This is
    373 // then truncated to fit this back into a v2i16 and to simulate the
    374 // wrap around semantics for unsigned in C.
    375 def vmpyh: OutPatFrag<(ops node:$Rs, node:$Rt),
    376                       (M2_vmpy2s_s0 (i32 $Rs), (i32 $Rt))>;
    377 
    378 def: Pat<(v2i16 (mul V2I16:$Rs, V2I16:$Rt)),
    379          (LoReg (S2_vtrunewh (v2i32 (A2_combineii 0, 0)),
    380                              (v2i32 (vmpyh V2I16:$Rs, V2I16:$Rt))))>;
    381 
    382 // Multiplies two v4i16 vectors.
    383 def: Pat<(v4i16 (mul V4I16:$Rs, V4I16:$Rt)),
    384          (S2_vtrunewh (vmpyh (HiReg $Rs), (HiReg $Rt)),
    385                       (vmpyh (LoReg $Rs), (LoReg $Rt)))>;
    386 
    387 def VMPYB_no_V5: OutPatFrag<(ops node:$Rs, node:$Rt),
    388   (S2_vtrunewh (vmpyh (HiReg (S2_vsxtbh $Rs)), (HiReg (S2_vsxtbh $Rt))),
    389                (vmpyh (LoReg (S2_vsxtbh $Rs)), (LoReg (S2_vsxtbh $Rt))))>;
    390 
    391 // Multiplies two v4i8 vectors.
    392 def: Pat<(v4i8 (mul V4I8:$Rs, V4I8:$Rt)),
    393          (S2_vtrunehb (M5_vmpybsu V4I8:$Rs, V4I8:$Rt))>,
    394      Requires<[HasV5T]>;
    395 
    396 def: Pat<(v4i8 (mul V4I8:$Rs, V4I8:$Rt)),
    397          (S2_vtrunehb (VMPYB_no_V5 V4I8:$Rs, V4I8:$Rt))>;
    398 
    399 // Multiplies two v8i8 vectors.
    400 def: Pat<(v8i8 (mul V8I8:$Rs, V8I8:$Rt)),
    401          (A2_combinew (S2_vtrunehb (M5_vmpybsu (HiReg $Rs), (HiReg $Rt))),
    402                       (S2_vtrunehb (M5_vmpybsu (LoReg $Rs), (LoReg $Rt))))>,
    403      Requires<[HasV5T]>;
    404 
    405 def: Pat<(v8i8 (mul V8I8:$Rs, V8I8:$Rt)),
    406          (A2_combinew (S2_vtrunehb (VMPYB_no_V5 (HiReg $Rs), (HiReg $Rt))),
    407                       (S2_vtrunehb (VMPYB_no_V5 (LoReg $Rs), (LoReg $Rt))))>;
    408 
    409 
    410 class shuffler<SDNode Op, string Str>
    411   : SInst<(outs DoubleRegs:$a), (ins DoubleRegs:$b, DoubleRegs:$c),
    412       "$a = " # Str # "($b, $c)",
    413       [(set (i64 DoubleRegs:$a),
    414             (i64 (Op (i64 DoubleRegs:$b), (i64 DoubleRegs:$c))))],
    415       "", S_3op_tc_1_SLOT23>;
    416 
    417 def SDTHexagonBinOp64 : SDTypeProfile<1, 2,
    418   [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVT<0, i64>]>;
    419 
    420 def HexagonSHUFFEB: SDNode<"HexagonISD::SHUFFEB", SDTHexagonBinOp64>;
    421 def HexagonSHUFFEH: SDNode<"HexagonISD::SHUFFEH", SDTHexagonBinOp64>;
    422 def HexagonSHUFFOB: SDNode<"HexagonISD::SHUFFOB", SDTHexagonBinOp64>;
    423 def HexagonSHUFFOH: SDNode<"HexagonISD::SHUFFOH", SDTHexagonBinOp64>;
    424 
    425 class ShufflePat<InstHexagon MI, SDNode Op>
    426   : Pat<(i64 (Op DoubleRegs:$src1, DoubleRegs:$src2)),
    427         (i64 (MI DoubleRegs:$src1, DoubleRegs:$src2))>;
    428 
    429 // Shuffles even bytes for i=0..3: A[2*i].b = C[2*i].b; A[2*i+1].b = B[2*i].b
    430 def: ShufflePat<S2_shuffeb, HexagonSHUFFEB>;
    431 
    432 // Shuffles odd bytes for i=0..3: A[2*i].b = C[2*i+1].b; A[2*i+1].b = B[2*i+1].b
    433 def: ShufflePat<S2_shuffob, HexagonSHUFFOB>;
    434 
    435 // Shuffles even half for i=0,1: A[2*i].h = C[2*i].h; A[2*i+1].h = B[2*i].h
    436 def: ShufflePat<S2_shuffeh, HexagonSHUFFEH>;
    437 
    438 // Shuffles odd half for i=0,1: A[2*i].h = C[2*i+1].h; A[2*i+1].h = B[2*i+1].h
    439 def: ShufflePat<S2_shuffoh, HexagonSHUFFOH>;
    440 
    441 
    442 // Truncated store from v4i16 to v4i8.
    443 def truncstorev4i8: PatFrag<(ops node:$val, node:$ptr),
    444                             (truncstore node:$val, node:$ptr),
    445     [{ return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v4i8; }]>;
    446 
    447 // Truncated store from v2i32 to v2i16.
    448 def truncstorev2i16: PatFrag<(ops node:$val, node:$ptr),
    449                              (truncstore node:$val, node:$ptr),
    450     [{ return cast<StoreSDNode>(N)->getMemoryVT() == MVT::v2i16; }]>;
    451 
    452 def: Pat<(truncstorev2i16 V2I32:$Rs, I32:$Rt),
    453          (S2_storeri_io I32:$Rt, 0, (LoReg (S2_packhl (HiReg $Rs),
    454                                                       (LoReg $Rs))))>;
    455 
    456 def: Pat<(truncstorev4i8 V4I16:$Rs, I32:$Rt),
    457          (S2_storeri_io I32:$Rt, 0, (S2_vtrunehb V4I16:$Rs))>;
    458 
    459 
    460 // Zero and sign extended load from v2i8 into v2i16.
    461 def zextloadv2i8: PatFrag<(ops node:$ptr), (zextload node:$ptr),
    462     [{ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v2i8; }]>;
    463 
    464 def sextloadv2i8: PatFrag<(ops node:$ptr), (sextload node:$ptr),
    465     [{ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::v2i8; }]>;
    466 
    467 def: Pat<(v2i16 (zextloadv2i8 I32:$Rs)),
    468          (LoReg (v4i16 (S2_vzxtbh (L2_loadruh_io I32:$Rs, 0))))>;
    469 
    470 def: Pat<(v2i16 (sextloadv2i8 I32:$Rs)),
    471          (LoReg (v4i16 (S2_vsxtbh (L2_loadrh_io I32:$Rs, 0))))>;
    472 
    473 def: Pat<(v2i32 (zextloadv2i8 I32:$Rs)),
    474          (S2_vzxthw (LoReg (v4i16 (S2_vzxtbh (L2_loadruh_io I32:$Rs, 0)))))>;
    475 
    476 def: Pat<(v2i32 (sextloadv2i8 I32:$Rs)),
    477          (S2_vsxthw (LoReg (v4i16 (S2_vsxtbh (L2_loadrh_io I32:$Rs, 0)))))>;
    478