Home | History | Annotate | Download | only in gen
      1 // Copyright 2016 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // Lowering arithmetic
      6 (Add64  x y) -> (ADD  x y)
      7 (AddPtr x y) -> (ADD  x y)
      8 (Add32  x y) -> (ADD x y)
      9 (Add16  x y) -> (ADD x y)
     10 (Add8   x y) -> (ADD x y)
     11 (Add64F x y) -> (FADD x y)
     12 (Add32F x y) -> (FADDS x y)
     13 
     14 (Sub64  x y) -> (SUB  x y)
     15 (SubPtr x y) -> (SUB  x y)
     16 (Sub32  x y) -> (SUB x y)
     17 (Sub16  x y) -> (SUB x y)
     18 (Sub8   x y) -> (SUB x y)
     19 (Sub32F x y) -> (FSUBS x y)
     20 (Sub64F x y) -> (FSUB x y)
     21 
     22 (Mod16 x y) -> (Mod32 (SignExt16to32 x) (SignExt16to32 y))
     23 (Mod16u x y) -> (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
     24 (Mod8 x y) -> (Mod32 (SignExt8to32 x) (SignExt8to32 y))
     25 (Mod8u x y) -> (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
     26 (Mod64 x y) -> (SUB x (MULLD y (DIVD x y)))
     27 (Mod64u x y) -> (SUB x (MULLD y (DIVDU x y)))
     28 (Mod32 x y) -> (SUB x (MULLW y (DIVW x y)))
     29 (Mod32u x y) -> (SUB x (MULLW y (DIVWU x y)))
     30 
     31 // (x + y) / 2 with x>=y -> (x - y) / 2 + y
     32 (Avg64u <t> x y) -> (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
     33 
     34 (Mul64  x y) -> (MULLD  x y)
     35 (Mul32  x y) -> (MULLW  x y)
     36 (Mul16  x y) -> (MULLW x y)
     37 (Mul8   x y) -> (MULLW x y)
     38 
     39 (Div64  x y) -> (DIVD  x y)
     40 (Div64u x y) -> (DIVDU x y)
     41 (Div32  x y) -> (DIVW  x y)
     42 (Div32u x y) -> (DIVWU x y)
     43 (Div16  x y) -> (DIVW  (SignExt16to32 x) (SignExt16to32 y))
     44 (Div16u x y) -> (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
     45 (Div8   x y) -> (DIVW  (SignExt8to32 x) (SignExt8to32 y))
     46 (Div8u  x y) -> (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
     47 
     48 (Hmul64  x y) -> (MULHD  x y)
     49 (Hmul64u  x y) -> (MULHDU x y)
     50 (Hmul32  x y) -> (MULHW  x y)
     51 (Hmul32u  x y) -> (MULHWU x y)
     52 
     53 (Mul32F x y) -> (FMULS x y)
     54 (Mul64F x y) -> (FMUL x y)
     55 
     56 (Div32F x y) -> (FDIVS x y)
     57 (Div64F x y) -> (FDIV x y)
     58 
     59 // Lowering float <-> int
     60 (Cvt32to32F x) -> (FCFIDS (MTVSRD (SignExt32to64 x)))
     61 (Cvt32to64F x) -> (FCFID (MTVSRD (SignExt32to64 x)))
     62 (Cvt64to32F x) -> (FCFIDS (MTVSRD x))
     63 (Cvt64to64F x) -> (FCFID (MTVSRD x))
     64 
     65 (Cvt32Fto32 x) -> (MFVSRD (FCTIWZ x))
     66 (Cvt32Fto64 x) -> (MFVSRD (FCTIDZ x))
     67 (Cvt64Fto32 x) -> (MFVSRD (FCTIWZ x))
     68 (Cvt64Fto64 x) -> (MFVSRD (FCTIDZ x))
     69 
     70 (Cvt32Fto64F x) -> x // Note x will have the wrong type for patterns dependent on Float32/Float64
     71 (Cvt64Fto32F x) -> (FRSP x)
     72 
     73 (Round32F x) -> (LoweredRound32F x)
     74 (Round64F x) -> (LoweredRound64F x)
     75 
     76 (Sqrt x) -> (FSQRT x)
     77 (Floor x) -> (FFLOOR x)
     78 (Ceil x) -> (FCEIL x)
     79 (Trunc x) -> (FTRUNC x)
     80 (Copysign x y) -> (FCPSGN y x)
     81 (Abs x) -> (FABS x)
     82 
     83 // Lowering constants
     84 (Const8   [val]) -> (MOVDconst [val])
     85 (Const16  [val]) -> (MOVDconst [val])
     86 (Const32  [val]) -> (MOVDconst [val])
     87 (Const64  [val]) -> (MOVDconst [val])
     88 (Const32F [val]) -> (FMOVSconst [val])
     89 (Const64F [val]) -> (FMOVDconst [val])
     90 (ConstNil) -> (MOVDconst [0])
     91 (ConstBool [b]) -> (MOVDconst [b])
     92 
     93 // Constant folding
     94 (FABS (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Abs(i2f(x)))])
     95 (FSQRT (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Sqrt(i2f(x)))])
     96 (FFLOOR (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Floor(i2f(x)))])
     97 (FCEIL (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Ceil(i2f(x)))])
     98 (FTRUNC (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Trunc(i2f(x)))])
     99 
    100 // Rotate generation with const shift
    101 (ADD (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x)
    102 ( OR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x)
    103 (XOR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x)
    104 
    105 (ADD (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x)
    106 ( OR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x)
    107 (XOR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x)
    108 
    109 // Rotate generation with non-const shift
    110 // these match patterns from math/bits/RotateLeft[32|64], but there could be others
    111 (ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) -> (ROTL x y)
    112 ( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) -> (ROTL x y)
    113 (XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) -> (ROTL x y)
    114 
    115 (ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) -> (ROTLW x y)
    116 ( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) -> (ROTLW x y)
    117 (XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) -> (ROTLW x y)
    118 
    119 (Lsh64x64  x (Const64 [c])) && uint64(c) < 64 -> (SLDconst x [c])
    120 (Rsh64x64  x (Const64 [c])) && uint64(c) < 64 -> (SRADconst x [c])
    121 (Rsh64Ux64 x (Const64 [c])) && uint64(c) < 64 -> (SRDconst x [c])
    122 (Lsh32x64  x (Const64 [c])) && uint64(c) < 32 -> (SLWconst x [c])
    123 (Rsh32x64  x (Const64 [c])) && uint64(c) < 32 -> (SRAWconst x [c])
    124 (Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 -> (SRWconst x [c])
    125 (Lsh16x64  x (Const64 [c])) && uint64(c) < 16 -> (SLWconst x [c])
    126 (Rsh16x64  x (Const64 [c])) && uint64(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
    127 (Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
    128 (Lsh8x64   x (Const64 [c])) && uint64(c) < 8  -> (SLWconst x [c])
    129 (Rsh8x64   x (Const64 [c])) && uint64(c) < 8  -> (SRAWconst (SignExt8to32  x) [c])
    130 (Rsh8Ux64  x (Const64 [c])) && uint64(c) < 8  -> (SRWconst (ZeroExt8to32  x) [c])
    131 
    132 (Lsh64x32  x (Const64 [c])) && uint32(c) < 64 -> (SLDconst x [c])
    133 (Rsh64x32  x (Const64 [c])) && uint32(c) < 64 -> (SRADconst x [c])
    134 (Rsh64Ux32 x (Const64 [c])) && uint32(c) < 64 -> (SRDconst x [c])
    135 (Lsh32x32  x (Const64 [c])) && uint32(c) < 32 -> (SLWconst x [c])
    136 (Rsh32x32  x (Const64 [c])) && uint32(c) < 32 -> (SRAWconst x [c])
    137 (Rsh32Ux32 x (Const64 [c])) && uint32(c) < 32 -> (SRWconst x [c])
    138 (Lsh16x32  x (Const64 [c])) && uint32(c) < 16 -> (SLWconst x [c])
    139 (Rsh16x32  x (Const64 [c])) && uint32(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
    140 (Rsh16Ux32 x (Const64 [c])) && uint32(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
    141 (Lsh8x32   x (Const64 [c])) && uint32(c) < 8  -> (SLWconst x [c])
    142 (Rsh8x32   x (Const64 [c])) && uint32(c) < 8  -> (SRAWconst (SignExt8to32  x) [c])
    143 (Rsh8Ux32  x (Const64 [c])) && uint32(c) < 8  -> (SRWconst (ZeroExt8to32  x) [c])
    144 
    145 // large constant shifts
    146 (Lsh64x64  _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0])
    147 (Rsh64Ux64 _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0])
    148 (Lsh32x64  _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0])
    149 (Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0])
    150 (Lsh16x64  _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0])
    151 (Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0])
    152 (Lsh8x64   _ (Const64 [c])) && uint64(c) >= 8  -> (MOVDconst [0])
    153 (Rsh8Ux64  _ (Const64 [c])) && uint64(c) >= 8  -> (MOVDconst [0])
    154 
    155 // large constant signed right shift, we leave the sign bit
    156 (Rsh64x64 x (Const64 [c])) && uint64(c) >= 64 -> (SRADconst x [63])
    157 (Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 -> (SRAWconst x [63])
    158 (Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAWconst (SignExt16to32 x) [63])
    159 (Rsh8x64  x (Const64 [c])) && uint64(c) >= 8  -> (SRAWconst (SignExt8to32  x) [63])
    160 
    161 // constant shifts
    162 (Lsh64x64  x (MOVDconst [c])) && uint64(c) < 64 -> (SLDconst x [c])
    163 (Rsh64x64  x (MOVDconst [c])) && uint64(c) < 64 -> (SRADconst x [c])
    164 (Rsh64Ux64 x (MOVDconst [c])) && uint64(c) < 64 -> (SRDconst x [c])
    165 (Lsh32x64  x (MOVDconst [c])) && uint64(c) < 32 -> (SLWconst x [c])
    166 (Rsh32x64  x (MOVDconst [c])) && uint64(c) < 32 -> (SRAWconst x [c])
    167 (Rsh32Ux64 x (MOVDconst [c])) && uint64(c) < 32 -> (SRWconst x [c])
    168 (Lsh16x64  x (MOVDconst [c])) && uint64(c) < 16 -> (SLWconst x [c])
    169 (Rsh16x64  x (MOVDconst [c])) && uint64(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
    170 (Rsh16Ux64 x (MOVDconst [c])) && uint64(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
    171 (Lsh8x64   x (MOVDconst [c])) && uint64(c) < 8  -> (SLWconst x [c])
    172 (Rsh8x64   x (MOVDconst [c])) && uint64(c) < 8  -> (SRAWconst (SignExt8to32  x) [c])
    173 (Rsh8Ux64  x (MOVDconst [c])) && uint64(c) < 8  -> (SRWconst (ZeroExt8to32  x) [c])
    174 
    175 (Lsh64x32  x (MOVDconst [c])) && uint32(c) < 64 -> (SLDconst x [c])
    176 (Rsh64x32  x (MOVDconst [c])) && uint32(c) < 64 -> (SRADconst x [c])
    177 (Rsh64Ux32 x (MOVDconst [c])) && uint32(c) < 64 -> (SRDconst x [c])
    178 (Lsh32x32  x (MOVDconst [c])) && uint32(c) < 32 -> (SLWconst x [c])
    179 (Rsh32x32  x (MOVDconst [c])) && uint32(c) < 32 -> (SRAWconst x [c])
    180 (Rsh32Ux32 x (MOVDconst [c])) && uint32(c) < 32 -> (SRWconst x [c])
    181 (Lsh16x32  x (MOVDconst [c])) && uint32(c) < 16 -> (SLWconst x [c])
    182 (Rsh16x32  x (MOVDconst [c])) && uint32(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
    183 (Rsh16Ux32 x (MOVDconst [c])) && uint32(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
    184 (Lsh8x32   x (MOVDconst [c])) && uint32(c) < 8  -> (SLWconst x [c])
    185 (Rsh8x32   x (MOVDconst [c])) && uint32(c) < 8  -> (SRAWconst (SignExt8to32  x) [c])
    186 (Rsh8Ux32  x (MOVDconst [c])) && uint32(c) < 8  -> (SRWconst (ZeroExt8to32  x) [c])
    187 
    188 // non-constant rotates
    189 // These are subexpressions found in statements that can become rotates
    190 // In these cases the shift count is known to be < 64 so the more complicated expressions
    191 // with Mask & Carry is not needed
    192 (Lsh64x64 x (AND y (MOVDconst [63]))) -> (SLD x (ANDconst <typ.Int64> [63] y))
    193 (Lsh64x64 x (ANDconst <typ.Int64> [63] y)) -> (SLD x (ANDconst <typ.Int64> [63] y))
    194 (Rsh64Ux64 x (AND y (MOVDconst [63]))) -> (SRD x (ANDconst <typ.Int64> [63] y))
    195 (Rsh64Ux64 x (ANDconst <typ.UInt> [63] y)) -> (SRD x (ANDconst <typ.UInt> [63] y))
    196 (Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) -> (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
    197 (Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) -> (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
    198 (Rsh64x64 x (AND y (MOVDconst [63]))) -> (SRAD x (ANDconst <typ.Int64> [63] y))
    199 (Rsh64x64 x (ANDconst <typ.UInt> [63] y)) -> (SRAD x (ANDconst <typ.UInt> [63] y))
    200 (Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) -> (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
    201 (Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) -> (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
    202 
    203 (Rsh64x64 x y)  -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
    204 (Rsh64Ux64 x y) -> (SRD  x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
    205 (Lsh64x64 x y)  -> (SLD  x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
    206 
    207 (Lsh32x64 x (AND y (MOVDconst [31]))) -> (SLW x (ANDconst <typ.Int32> [31] y))
    208 (Lsh32x64 x (ANDconst <typ.Int32> [31] y)) -> (SLW x (ANDconst <typ.Int32> [31] y))
    209 
    210 (Rsh32Ux64 x (AND y (MOVDconst [31]))) -> (SRW x (ANDconst <typ.Int32> [31] y))
    211 (Rsh32Ux64 x (ANDconst <typ.UInt> [31] y)) -> (SRW x (ANDconst <typ.UInt> [31] y))
    212 (Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) -> (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
    213 (Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) -> (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
    214 
    215 (Rsh32x64 x (AND y (MOVDconst [31]))) -> (SRAW x (ANDconst <typ.Int32> [31] y))
    216 (Rsh32x64 x (ANDconst <typ.UInt> [31] y)) -> (SRAW x (ANDconst <typ.UInt> [31] y))
    217 (Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) -> (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
    218 (Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) -> (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
    219 
    220 (Rsh32x64 x y)  -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
    221 (Rsh32Ux64 x y) -> (SRW  x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
    222 (Lsh32x64 x y)  -> (SLW  x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
    223 
    224 (Rsh16x64 x y)  -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
    225 (Rsh16Ux64 x y) -> (SRW  (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
    226 (Lsh16x64 x y)  -> (SLW  x                 (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
    227 
    228 (Rsh8x64 x y)  -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
    229 (Rsh8Ux64 x y) -> (SRW  (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
    230 (Lsh8x64 x y)  -> (SLW  x                (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
    231 
    232 (Rsh64x32 x y)  -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
    233 (Rsh64Ux32 x y) -> (SRD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
    234 (Lsh64x32 x y)  -> (SLD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
    235 
    236 (Rsh32x32 x y)  -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
    237 (Rsh32Ux32 x y) -> (SRW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
    238 (Lsh32x32 x y)  -> (SLW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
    239 
    240 (Rsh16x32 x y)  -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
    241 (Rsh16Ux32 x y) -> (SRW  (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
    242 (Lsh16x32 x y)  -> (SLW  x                 (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
    243 
    244 (Rsh8x32 x y)  -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
    245 (Rsh8Ux32 x y) -> (SRW  (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
    246 (Lsh8x32 x y)  -> (SLW  x                (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
    247 
    248 
    249 (Rsh64x16 x y)  -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
    250 (Rsh64Ux16 x y) -> (SRD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
    251 (Lsh64x16 x y)  -> (SLD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
    252 
    253 (Rsh32x16 x y)  -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
    254 (Rsh32Ux16 x y) -> (SRW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
    255 (Lsh32x16 x y)  -> (SLW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
    256 
    257 (Rsh16x16 x y)  -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
    258 (Rsh16Ux16 x y) -> (SRW  (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
    259 (Lsh16x16 x y)  -> (SLW  x                 (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
    260 
    261 (Rsh8x16 x y)  -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
    262 (Rsh8Ux16 x y) -> (SRW  (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
    263 (Lsh8x16 x y)  -> (SLW  x                (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
    264 
    265 
    266 (Rsh64x8 x y)  -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
    267 (Rsh64Ux8 x y) -> (SRD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
    268 (Lsh64x8 x y)  -> (SLD x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
    269 
    270 (Rsh32x8 x y)  -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
    271 (Rsh32Ux8 x y) -> (SRW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
    272 (Lsh32x8 x y)  -> (SLW x  (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
    273 
    274 (Rsh16x8 x y)  -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
    275 (Rsh16Ux8 x y) -> (SRW  (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
    276 (Lsh16x8 x y)  -> (SLW  x                 (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
    277 
    278 (Rsh8x8 x y)  -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
    279 (Rsh8Ux8 x y) -> (SRW  (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
    280 (Lsh8x8 x y)  -> (SLW  x                (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
    281 
    282 // Cleaning up shift ops when input is masked
    283 (MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) && c < 0 && d > 0 && c + d < 0 -> (MOVDconst [-1])
    284 (ORN x (MOVDconst [-1])) -> x
    285 
    286 // Potentially useful optimizing rewrites.
    287 // (ADDconstForCarry [k] c), k < 0 && (c < 0 || k+c >= 0) -> CarrySet
    288 // (ADDconstForCarry [k] c), K < 0 && (c >= 0 && k+c < 0) -> CarryClear
    289 // (MaskIfNotCarry CarrySet) -> 0
    290 // (MaskIfNotCarry CarrySet) -> -1
    291 
    292 (Addr {sym} base) -> (MOVDaddr {sym} base)
    293 (OffPtr [off] ptr) -> (ADD (MOVDconst <typ.Int64> [off]) ptr)
    294 
    295 (Ctz64 x) -> (POPCNTD (ANDN <typ.Int64> (ADDconst <typ.Int64> [-1] x) x))
    296 (Ctz32 x) -> (POPCNTW (MOVWZreg (ANDN <typ.Int> (ADDconst <typ.Int> [-1] x) x)))
    297 
    298 (BitLen64 x) -> (SUB (MOVDconst [64]) (CNTLZD <typ.Int> x))
    299 (BitLen32 x) -> (SUB (MOVDconst [32]) (CNTLZW <typ.Int> x))
    300 
    301 (PopCount64 x) -> (POPCNTD x)
    302 (PopCount32 x) -> (POPCNTW (MOVWZreg x))
    303 (PopCount16 x) -> (POPCNTW (MOVHZreg x))
    304 (PopCount8 x) -> (POPCNTB (MOVBreg x))
    305 
    306 (And64 x y) -> (AND x y)
    307 (And32 x y) -> (AND x y)
    308 (And16 x y) -> (AND x y)
    309 (And8  x y) -> (AND x y)
    310 
    311 (Or64 x y) -> (OR x y)
    312 (Or32 x y) -> (OR x y)
    313 (Or16 x y) -> (OR x y)
    314 (Or8  x y) -> (OR x y)
    315 
    316 (Xor64 x y) -> (XOR x y)
    317 (Xor32 x y) -> (XOR x y)
    318 (Xor16 x y) -> (XOR x y)
    319 (Xor8  x y) -> (XOR x y)
    320 
    321 (Neg64F x) -> (FNEG x)
    322 (Neg32F x) -> (FNEG x)
    323 (Neg64  x) -> (NEG x)
    324 (Neg32  x) -> (NEG x)
    325 (Neg16  x) -> (NEG x)
    326 (Neg8   x) -> (NEG x)
    327 
    328 (Com64 x) -> (NOR x x)
    329 (Com32 x) -> (NOR x x)
    330 (Com16 x) -> (NOR x x)
    331 (Com8  x) -> (NOR x x)
    332 
    333 // Lowering boolean ops
    334 (AndB x y) -> (AND x y)
    335 (OrB x y) -> (OR x y)
    336 (Not x) -> (XORconst [1] x)
    337 
    338 // Use ANDN for AND x NOT y
    339 (AND x (NOR y y)) -> (ANDN x y)
    340 
    341 // Lowering comparisons
    342 (EqB x y)  -> (ANDconst [1] (EQV x y))
    343 // Sign extension dependence on operand sign sets up for sign/zero-extension elision later
    344 (Eq8 x y) && isSigned(x.Type) && isSigned(y.Type) -> (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y)))
    345 (Eq16 x y) && isSigned(x.Type) && isSigned(y.Type) -> (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y)))
    346 (Eq8 x y)  -> (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
    347 (Eq16 x y) -> (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
    348 (Eq32 x y) -> (Equal (CMPW x y))
    349 (Eq64 x y) -> (Equal (CMP x y))
    350 (Eq32F x y) -> (Equal (FCMPU x y))
    351 (Eq64F x y) -> (Equal (FCMPU x y))
    352 (EqPtr x y) -> (Equal (CMP x y))
    353 
    354 (NeqB x y)  -> (XOR x y)
    355 // Like Eq8 and Eq16, prefer sign extension likely to enable later elision.
    356 (Neq8 x y) && isSigned(x.Type) && isSigned(y.Type) -> (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
    357 (Neq16 x y) && isSigned(x.Type) && isSigned(y.Type) -> (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
    358 (Neq8 x y)  -> (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
    359 (Neq16 x y) -> (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
    360 (Neq32 x y) -> (NotEqual (CMPW x y))
    361 (Neq64 x y) -> (NotEqual (CMP x y))
    362 (Neq32F x y) -> (NotEqual (FCMPU x y))
    363 (Neq64F x y) -> (NotEqual (FCMPU x y))
    364 (NeqPtr x y) -> (NotEqual (CMP x y))
    365 
    366 (Less8 x y)  -> (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
    367 (Less16 x y) -> (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
    368 (Less32 x y) -> (LessThan (CMPW x y))
    369 (Less64 x y) -> (LessThan (CMP x y))
    370 (Less32F x y) -> (FLessThan (FCMPU x y))
    371 (Less64F x y) -> (FLessThan (FCMPU x y))
    372 
    373 (Less8U x y)  -> (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
    374 (Less16U x y) -> (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
    375 (Less32U x y) -> (LessThan (CMPWU x y))
    376 (Less64U x y) -> (LessThan (CMPU x y))
    377 
    378 (Leq8 x y)  -> (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
    379 (Leq16 x y) -> (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
    380 (Leq32 x y) -> (LessEqual (CMPW x y))
    381 (Leq64 x y) -> (LessEqual (CMP x y))
    382 (Leq32F x y) -> (FLessEqual (FCMPU x y))
    383 (Leq64F x y) -> (FLessEqual (FCMPU x y))
    384 
    385 (Leq8U x y)  -> (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
    386 (Leq16U x y) -> (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
    387 (Leq32U x y) -> (LessEqual (CMPWU x y))
    388 (Leq64U x y) -> (LessEqual (CMPU x y))
    389 
    390 (Greater8 x y)  -> (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
    391 (Greater16 x y) -> (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
    392 (Greater32 x y) -> (GreaterThan (CMPW x y))
    393 (Greater64 x y) -> (GreaterThan (CMP x y))
    394 (Greater32F x y) -> (FGreaterThan (FCMPU x y))
    395 (Greater64F x y) -> (FGreaterThan (FCMPU x y))
    396 
    397 (Greater8U x y)  -> (GreaterThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
    398 (Greater16U x y) -> (GreaterThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
    399 (Greater32U x y) -> (GreaterThan (CMPWU x y))
    400 (Greater64U x y) -> (GreaterThan (CMPU x y))
    401 
    402 (Geq8 x y)  -> (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
    403 (Geq16 x y) -> (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
    404 (Geq32 x y) -> (GreaterEqual (CMPW x y))
    405 (Geq64 x y) -> (GreaterEqual (CMP x y))
    406 (Geq32F x y) -> (FGreaterEqual (FCMPU x y))
    407 (Geq64F x y) -> (FGreaterEqual (FCMPU x y))
    408 
    409 (Geq8U x y)  -> (GreaterEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
    410 (Geq16U x y) -> (GreaterEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
    411 (Geq32U x y) -> (GreaterEqual (CMPWU x y))
    412 (Geq64U x y) -> (GreaterEqual (CMPU x y))
    413 
    414 // Absorb pseudo-ops into blocks.
    415 (If (Equal cc) yes no) -> (EQ cc yes no)
    416 (If (NotEqual cc) yes no) -> (NE cc yes no)
    417 (If (LessThan cc) yes no) -> (LT cc yes no)
    418 (If (LessEqual cc) yes no) -> (LE cc yes no)
    419 (If (GreaterThan cc) yes no) -> (GT cc yes no)
    420 (If (GreaterEqual cc) yes no) -> (GE cc yes no)
    421 (If (FLessThan cc) yes no) -> (FLT cc yes no)
    422 (If (FLessEqual cc) yes no) -> (FLE cc yes no)
    423 (If (FGreaterThan cc) yes no) -> (FGT cc yes no)
    424 (If (FGreaterEqual cc) yes no) -> (FGE cc yes no)
    425 
    426 (If cond yes no) -> (NE (CMPWconst [0] cond) yes no)
    427 
    428 // Absorb boolean tests into block
    429 (NE (CMPWconst [0] (Equal cc)) yes no) -> (EQ cc yes no)
    430 (NE (CMPWconst [0] (NotEqual cc)) yes no) -> (NE cc yes no)
    431 (NE (CMPWconst [0] (LessThan cc)) yes no) -> (LT cc yes no)
    432 (NE (CMPWconst [0] (LessEqual cc)) yes no) -> (LE cc yes no)
    433 (NE (CMPWconst [0] (GreaterThan cc)) yes no) -> (GT cc yes no)
    434 (NE (CMPWconst [0] (GreaterEqual cc)) yes no) -> (GE cc yes no)
    435 (NE (CMPWconst [0] (FLessThan cc)) yes no) -> (FLT cc yes no)
    436 (NE (CMPWconst [0] (FLessEqual cc)) yes no) -> (FLE cc yes no)
    437 (NE (CMPWconst [0] (FGreaterThan cc)) yes no) -> (FGT cc yes no)
    438 (NE (CMPWconst [0] (FGreaterEqual cc)) yes no) -> (FGE cc yes no)
    439 
    440 // Elide compares of bit tests // TODO need to make both CC and result of ANDCC available.
    441 (EQ (CMPconst [0] (ANDconst [c] x)) yes no) -> (EQ (ANDCCconst [c] x) yes no)
    442 (NE (CMPconst [0] (ANDconst [c] x)) yes no) -> (NE (ANDCCconst [c] x) yes no)
    443 (EQ (CMPWconst [0] (ANDconst [c] x)) yes no) -> (EQ (ANDCCconst [c] x) yes no)
    444 (NE (CMPWconst [0] (ANDconst [c] x)) yes no) -> (NE (ANDCCconst [c] x) yes no)
    445 
    446 // absorb flag constants into branches
    447 (EQ (FlagEQ) yes no) -> (First nil yes no)
    448 (EQ (FlagLT) yes no) -> (First nil no yes)
    449 (EQ (FlagGT) yes no) -> (First nil no yes)
    450 
    451 (NE (FlagEQ) yes no) -> (First nil no yes)
    452 (NE (FlagLT) yes no) -> (First nil yes no)
    453 (NE (FlagGT) yes no) -> (First nil yes no)
    454 
    455 (LT (FlagEQ) yes no) -> (First nil no yes)
    456 (LT (FlagLT) yes no) -> (First nil yes no)
    457 (LT (FlagGT) yes no) -> (First nil no yes)
    458 
    459 (LE (FlagEQ) yes no) -> (First nil yes no)
    460 (LE (FlagLT) yes no) -> (First nil yes no)
    461 (LE (FlagGT) yes no) -> (First nil no yes)
    462 
    463 (GT (FlagEQ) yes no) -> (First nil no yes)
    464 (GT (FlagLT) yes no) -> (First nil no yes)
    465 (GT (FlagGT) yes no) -> (First nil yes no)
    466 
    467 (GE (FlagEQ) yes no) -> (First nil yes no)
    468 (GE (FlagLT) yes no) -> (First nil no yes)
    469 (GE (FlagGT) yes no) -> (First nil yes no)
    470 
    471 // absorb InvertFlags into branches
    472 (LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
    473 (GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
    474 (LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
    475 (GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
    476 (EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
    477 (NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
    478 
    479 // constant comparisons
    480 (CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
    481 (CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y)  -> (FlagLT)
    482 (CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y)  -> (FlagGT)
    483 
    484 (CMPconst (MOVDconst [x]) [y]) && int64(x)==int64(y) -> (FlagEQ)
    485 (CMPconst (MOVDconst [x]) [y]) && int64(x)<int64(y)  -> (FlagLT)
    486 (CMPconst (MOVDconst [x]) [y]) && int64(x)>int64(y)  -> (FlagGT)
    487 
    488 (CMPWUconst (MOVDconst [x]) [y]) && int32(x)==int32(y)  -> (FlagEQ)
    489 (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) -> (FlagLT)
    490 (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) -> (FlagGT)
    491 
    492 (CMPUconst (MOVDconst [x]) [y]) && int64(x)==int64(y)  -> (FlagEQ)
    493 (CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) -> (FlagLT)
    494 (CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) -> (FlagGT)
    495 
    496 // other known comparisons
    497 //(CMPconst (MOVBUreg _) [c]) && 0xff < c -> (FlagLT)
    498 //(CMPconst (MOVHUreg _) [c]) && 0xffff < c -> (FlagLT)
    499 //(CMPconst (ANDconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT)
    500 //(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n) -> (FlagLT)
    501 
    502 // absorb flag constants into boolean values
    503 (Equal (FlagEQ)) -> (MOVDconst [1])
    504 (Equal (FlagLT)) -> (MOVDconst [0])
    505 (Equal (FlagGT)) -> (MOVDconst [0])
    506 
    507 (NotEqual (FlagEQ)) -> (MOVDconst [0])
    508 (NotEqual (FlagLT)) -> (MOVDconst [1])
    509 (NotEqual (FlagGT)) -> (MOVDconst [1])
    510 
    511 (LessThan (FlagEQ)) -> (MOVDconst [0])
    512 (LessThan (FlagLT)) -> (MOVDconst [1])
    513 (LessThan (FlagGT)) -> (MOVDconst [0])
    514 
    515 (LessEqual (FlagEQ)) -> (MOVDconst [1])
    516 (LessEqual (FlagLT)) -> (MOVDconst [1])
    517 (LessEqual (FlagGT)) -> (MOVDconst [0])
    518 
    519 (GreaterThan (FlagEQ)) -> (MOVDconst [0])
    520 (GreaterThan (FlagLT)) -> (MOVDconst [0])
    521 (GreaterThan (FlagGT)) -> (MOVDconst [1])
    522 
    523 (GreaterEqual (FlagEQ)) -> (MOVDconst [1])
    524 (GreaterEqual (FlagLT)) -> (MOVDconst [0])
    525 (GreaterEqual (FlagGT)) -> (MOVDconst [1])
    526 
    527 // absorb InvertFlags into boolean values
    528 (Equal (InvertFlags x)) -> (Equal x)
    529 (NotEqual (InvertFlags x)) -> (NotEqual x)
    530 (LessThan (InvertFlags x)) -> (GreaterThan x)
    531 (GreaterThan (InvertFlags x)) -> (LessThan x)
    532 (LessEqual (InvertFlags x)) -> (GreaterEqual x)
    533 (GreaterEqual (InvertFlags x)) -> (LessEqual x)
    534 
    535 // Lowering loads
    536 (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem)
    537 (Load <t> ptr mem) && is32BitInt(t) && isSigned(t) -> (MOVWload ptr mem)
    538 (Load <t> ptr mem) && is32BitInt(t) && !isSigned(t) -> (MOVWZload ptr mem)
    539 (Load <t> ptr mem) && is16BitInt(t) && isSigned(t) -> (MOVHload ptr mem)
    540 (Load <t> ptr mem) && is16BitInt(t) && !isSigned(t) -> (MOVHZload ptr mem)
    541 (Load <t> ptr mem) && t.IsBoolean() -> (MOVBZload ptr mem)
    542 (Load <t> ptr mem) && is8BitInt(t) && isSigned(t) -> (MOVBreg (MOVBZload ptr mem)) // PPC has no signed-byte load.
    543 (Load <t> ptr mem) && is8BitInt(t) && !isSigned(t) -> (MOVBZload ptr mem)
    544 
    545 (Load <t> ptr mem) && is32BitFloat(t) -> (FMOVSload ptr mem)
    546 (Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem)
    547 
    548 (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
    549 (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is32BitFloat(val.Type) -> (FMOVDstore ptr val mem) // glitch from (Cvt32Fto64F x) -> x -- type is wrong
    550 (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
    551 (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type)) -> (MOVDstore ptr val mem)
    552 (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitInt(val.Type) -> (MOVWstore ptr val mem)
    553 (Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem)
    554 (Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
    555 
    556 // Using Zero instead of LoweredZero allows the
    557 // target address to be folded where possible.
    558 (Zero [0] _ mem) -> mem
    559 (Zero [1] destptr mem) -> (MOVBstorezero destptr mem)
    560 (Zero [2] destptr mem) ->
    561 	(MOVHstorezero destptr mem)
    562 (Zero [3] destptr mem) ->
    563 	(MOVBstorezero [2] destptr
    564 		(MOVHstorezero destptr mem))
    565 (Zero [4] destptr mem) ->
    566 	(MOVWstorezero destptr mem)
    567 (Zero [5] destptr mem) ->
    568 	(MOVBstorezero [4] destptr
    569         	(MOVWstorezero destptr mem))
    570 (Zero [6] destptr mem) ->
    571 	(MOVHstorezero [4] destptr
    572 		(MOVWstorezero destptr mem))
    573 (Zero [7] destptr mem) ->
    574 	(MOVBstorezero [6] destptr
    575 		(MOVHstorezero [4] destptr
    576 			(MOVWstorezero destptr mem)))
    577 
    578 // MOVD for store with DS must have offsets that are multiple of 4
    579 (Zero [8] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
    580         (MOVDstorezero destptr mem)
    581 (Zero [8] destptr mem) ->
    582         (MOVWstorezero [4] destptr
    583                 (MOVWstorezero [0] destptr mem))
    584 // Handle these cases only if aligned properly, otherwise use general case below
    585 (Zero [12] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
    586         (MOVWstorezero [8] destptr
    587                 (MOVDstorezero [0] destptr mem))
    588 (Zero [16] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
    589        (MOVDstorezero [8] destptr
    590                 (MOVDstorezero [0] destptr mem))
    591 (Zero [24] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
    592        (MOVDstorezero [16] destptr
    593                (MOVDstorezero [8] destptr
    594                        (MOVDstorezero [0] destptr mem)))
    595 (Zero [32] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
    596        (MOVDstorezero [24] destptr
    597                (MOVDstorezero [16] destptr
    598                        (MOVDstorezero [8] destptr
    599                                (MOVDstorezero [0] destptr mem))))
    600 
    601 // Handle cases not handled above
    602 (Zero [s] ptr mem) -> (LoweredZero [s] ptr mem)
    603 
    604 // moves
    605 // Only the MOVD and MOVW instructions require 4 byte
    606 // alignment in the offset field.  The other MOVx instructions
    607 // allow any alignment.
    608 (Move [0] _ _ mem) -> mem
    609 (Move [1] dst src mem) -> (MOVBstore dst (MOVBZload src mem) mem)
    610 (Move [2] dst src mem) ->
    611         (MOVHstore dst (MOVHZload src mem) mem)
    612 (Move [4] dst src mem) ->
    613 	(MOVWstore dst (MOVWZload src mem) mem)
    614 // MOVD for load and store must have offsets that are multiple of 4
    615 (Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
    616 	(MOVDstore dst (MOVDload src mem) mem)
    617 (Move [8] dst src mem) ->
    618 	(MOVWstore [4] dst (MOVWZload [4] src mem)
    619 		(MOVWstore dst (MOVWZload src mem) mem))
    620 (Move [3] dst src mem) ->
    621         (MOVBstore [2] dst (MOVBZload [2] src mem)
    622                 (MOVHstore dst (MOVHload src mem) mem))
    623 (Move [5] dst src mem) ->
    624         (MOVBstore [4] dst (MOVBZload [4] src mem)
    625                 (MOVWstore dst (MOVWZload src mem) mem))
    626 (Move [6] dst src mem) ->
    627         (MOVHstore [4] dst (MOVHZload [4] src mem)
    628                 (MOVWstore dst (MOVWZload src mem) mem))
    629 (Move [7] dst src mem) ->
    630         (MOVBstore [6] dst (MOVBZload [6] src mem)
    631                 (MOVHstore [4] dst (MOVHZload [4] src mem)
    632                         (MOVWstore dst (MOVWZload src mem) mem)))
    633 
    634 // Large move uses a loop. Since the address is computed and the
    635 // offset is zero, any alignment can be used.
    636 (Move [s] dst src mem) && s > 8 ->
    637         (LoweredMove [s] dst src mem)
    638 
    639 // Calls
    640 // Lowering calls
    641 (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
    642 (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
    643 (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
    644 
    645 // Miscellaneous
    646 (Convert <t> x mem) -> (MOVDconvert <t> x mem)
    647 (GetClosurePtr) -> (LoweredGetClosurePtr)
    648 (GetCallerSP) -> (LoweredGetCallerSP)
    649 (IsNonNil ptr) -> (NotEqual (CMPconst [0] ptr))
    650 (IsInBounds idx len) -> (LessThan (CMPU idx len))
    651 (IsSliceInBounds idx len) -> (LessEqual (CMPU idx len))
    652 (NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
    653 
    654 // Optimizations
    655 // Note that PPC "logical" immediates come in 0:15 and 16:31 unsigned immediate forms,
    656 // so ORconst, XORconst easily expand into a pair.
    657 
    658 // Include very-large constants in the const-const case.
    659 (AND (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c&d])
    660 (OR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c|d])
    661 (XOR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c^d])
    662 
    663 // Discover consts
    664 (AND x (MOVDconst [c])) && isU16Bit(c) -> (ANDconst [c] x)
    665 (XOR x (MOVDconst [c])) && isU32Bit(c) -> (XORconst [c] x)
    666 (OR x (MOVDconst [c])) && isU32Bit(c) -> (ORconst [c] x)
    667 
    668 // Simplify consts
    669 (ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x)
    670 (ORconst [c] (ORconst [d] x)) -> (ORconst [c|d] x)
    671 (XORconst [c] (XORconst [d] x)) -> (XORconst [c^d] x)
    672 (ANDconst [-1] x) -> x
    673 (ANDconst [0] _) -> (MOVDconst [0])
    674 (XORconst [0] x) -> x
    675 (ORconst [-1] _) -> (MOVDconst [-1])
    676 (ORconst [0] x) -> x
    677 
    678 // zero-extend of small and -> small and
    679 (MOVBZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFF -> y
    680 (MOVHZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF -> y
    681 (MOVWZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFFFFFF -> y
    682 (MOVWZreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0xFFFFFFFF -> y
    683 
    684 // sign extend of small-positive and -> small-positive-and
    685 (MOVBreg y:(ANDconst [c] _)) && uint64(c) <= 0x7F -> y
    686 (MOVHreg y:(ANDconst [c] _)) && uint64(c) <= 0x7FFF -> y
    687 (MOVWreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF -> y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0
    688 (MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF -> y
    689 
    690 // small and of zero-extend -> either zero-extend or small and
    691   // degenerate-and
    692 (ANDconst [c] y:(MOVBZreg _)) && c&0xFF == 0xFF -> y
    693 (ANDconst [c] y:(MOVHZreg _))  && c&0xFFFF == 0xFFFF -> y
    694 (ANDconst [c] y:(MOVWZreg _))  && c&0xFFFFFFFF == 0xFFFFFFFF -> y
    695   // normal case
    696 (ANDconst [c] (MOVBZreg x)) -> (ANDconst [c&0xFF] x)
    697 (ANDconst [c] (MOVHZreg x)) -> (ANDconst [c&0xFFFF] x)
    698 (ANDconst [c] (MOVWZreg x)) -> (ANDconst [c&0xFFFFFFFF] x)
    699 
    700 // Various redundant zero/sign extension combinations.
    701 (MOVBZreg y:(MOVBZreg _)) -> y  // repeat
    702 (MOVBreg y:(MOVBreg _)) -> y // repeat
    703 (MOVBreg (MOVBZreg x)) -> (MOVBreg x)
    704 (MOVBZreg (MOVBreg x)) -> (MOVBZreg x)
    705 
    706 // H - there are more combinations than these
    707 
    708 (MOVHZreg y:(MOVHZreg _)) -> y // repeat
    709 (MOVHZreg y:(MOVBZreg _)) -> y // wide of narrow
    710 
    711 (MOVHreg y:(MOVHreg _)) -> y // repeat
    712 (MOVHreg y:(MOVBreg _)) -> y // wide of narrow
    713 
    714 (MOVHreg y:(MOVHZreg x)) -> (MOVHreg x)
    715 (MOVHZreg y:(MOVHreg x)) -> (MOVHZreg x)
    716 
    717 // W - there are more combinations than these
    718 
    719 (MOVWZreg y:(MOVWZreg _)) -> y // repeat
    720 (MOVWZreg y:(MOVHZreg _)) -> y // wide of narrow
    721 (MOVWZreg y:(MOVBZreg _)) -> y // wide of narrow
    722 
    723 (MOVWreg y:(MOVWreg _)) -> y // repeat
    724 (MOVWreg y:(MOVHreg _)) -> y // wide of narrow
    725 (MOVWreg y:(MOVBreg _)) -> y // wide of narrow
    726 
    727 (MOVWreg y:(MOVWZreg x)) -> (MOVWreg x)
    728 (MOVWZreg y:(MOVWreg x)) -> (MOVWZreg x)
    729 
    730 // Arithmetic constant ops
    731 
    732 (ADD x (MOVDconst [c])) && is32Bit(c) -> (ADDconst [c] x)
    733 (ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) -> (ADDconst [c+d] x)
    734 (ADDconst [0] x) -> x
    735 (SUB x (MOVDconst [c])) && is32Bit(-c) -> (ADDconst [-c] x)
    736 // TODO deal with subtract-from-const
    737 
    738 (ADDconst [c] (MOVDaddr [d] {sym} x)) -> (MOVDaddr [c+d] {sym} x)
    739 
    740 // Use register moves instead of stores and loads to move int<->float values
    741 // Common with math Float64bits, Float64frombits
    742 (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) -> (MFVSRD x)
    743 (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _)) -> (MTVSRD x)
    744 
    745 (FMOVDstore [off] {sym} ptr (MTVSRD x) mem) -> (MOVDstore [off] {sym} ptr x mem)
    746 (MOVDstore [off] {sym} ptr (MFVSRD x) mem) -> (FMOVDstore [off] {sym} ptr x mem)
    747 
    748 (MTVSRD (MOVDconst [c])) -> (FMOVDconst [c])
    749 (MFVSRD (FMOVDconst [c])) -> (MOVDconst [c])
    750 
    751 (MTVSRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (FMOVDload [off] {sym} ptr mem)
    752 (MFVSRD x:(FMOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVDload [off] {sym} ptr mem)
    753 
    754 // Fold offsets for stores.
    755 (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVDstore [off1+off2] {sym} x val mem)
    756 (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} x val mem)
    757 (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVHstore [off1+off2] {sym} x val mem)
    758 (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} x val mem)
    759 
    760 (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(off1+off2) -> (FMOVSstore [off1+off2] {sym} ptr val mem)
    761 (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(off1+off2) -> (FMOVDstore [off1+off2] {sym} ptr val mem)
    762 
    763 // Fold address into load/store.
    764 // The assembler needs to generate several instructions and use
    765 // temp register for accessing global, and each time it will reload
    766 // the temp register. So don't fold address of global, unless there
    767 // is only one use.
    768 (MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
    769 	&& (ptr.Op != OpSB || p.Uses == 1) ->
    770         (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
    771 (MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
    772 	&& (ptr.Op != OpSB || p.Uses == 1) ->
    773         (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
    774 (MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
    775 	&& (ptr.Op != OpSB || p.Uses == 1) ->
    776         (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
    777 (MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
    778 	&& (ptr.Op != OpSB || p.Uses == 1) ->
    779         (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
    780 
    781 (FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
    782 	&& (ptr.Op != OpSB || p.Uses == 1) ->
    783         (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
    784 (FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
    785 	&& (ptr.Op != OpSB || p.Uses == 1) ->
    786         (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
    787 
    788 (MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
    789 	&& (ptr.Op != OpSB || p.Uses == 1) ->
    790         (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
    791 (MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
    792 	&& (ptr.Op != OpSB || p.Uses == 1) ->
    793         (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
    794 (MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
    795 	&& (ptr.Op != OpSB || p.Uses == 1) ->
    796         (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
    797 (MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
    798 	&& (ptr.Op != OpSB || p.Uses == 1) ->
    799         (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
    800 (MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
    801 	&& (ptr.Op != OpSB || p.Uses == 1) ->
    802         (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
    803 (MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
    804 	&& (ptr.Op != OpSB || p.Uses == 1) ->
    805         (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
    806 (FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
    807 	&& (ptr.Op != OpSB || p.Uses == 1) ->
    808         (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
    809 (FMOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
    810 	&& (ptr.Op != OpSB || p.Uses == 1) ->
    811         (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
    812 
    813 // Fold offsets for loads.
    814 (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(off1+off2) -> (FMOVSload [off1+off2] {sym} ptr mem)
    815 (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(off1+off2) -> (FMOVDload [off1+off2] {sym} ptr mem)
    816 
    817 (MOVDload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVDload [off1+off2] {sym} x mem)
    818 (MOVWload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVWload [off1+off2] {sym} x mem)
    819 (MOVWZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVWZload [off1+off2] {sym} x mem)
    820 (MOVHload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVHload [off1+off2] {sym} x mem)
    821 (MOVHZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVHZload [off1+off2] {sym} x mem)
    822 (MOVBZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVBZload [off1+off2] {sym} x mem)
    823 
    824 // Store of zero -> storezero
    825 (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVDstorezero [off] {sym} ptr mem)
    826 (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVWstorezero [off] {sym} ptr mem)
    827 (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVHstorezero [off] {sym} ptr mem)
    828 (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVBstorezero [off] {sym} ptr mem)
    829 
    830 // Fold offsets for storezero
    831 (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
    832     (MOVDstorezero [off1+off2] {sym} x mem)
    833 (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
    834     (MOVWstorezero [off1+off2] {sym} x mem)
    835 (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
    836     (MOVHstorezero [off1+off2] {sym} x mem)
    837 (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
    838     (MOVBstorezero [off1+off2] {sym} x mem)
    839 
    840 // Fold symbols into storezero
    841 (MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
    842 	&& (x.Op != OpSB || p.Uses == 1) ->
    843     (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
    844 (MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
    845 	&& (x.Op != OpSB || p.Uses == 1) ->
    846     (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
    847 (MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
    848 	&& (x.Op != OpSB || p.Uses == 1) ->
    849     (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
    850 (MOVBstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
    851 	&& (x.Op != OpSB || p.Uses == 1) ->
    852     (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
    853 
    854 // atomic intrinsics
    855 (AtomicLoad32  ptr mem) -> (LoweredAtomicLoad32 ptr mem)
    856 (AtomicLoad64  ptr mem) -> (LoweredAtomicLoad64 ptr mem)
    857 (AtomicLoadPtr ptr mem) -> (LoweredAtomicLoadPtr ptr mem)
    858 
    859 (AtomicStore32      ptr val mem) -> (LoweredAtomicStore32 ptr val mem)
    860 (AtomicStore64      ptr val mem) -> (LoweredAtomicStore64 ptr val mem)
    861 //(AtomicStorePtrNoWB ptr val mem) -> (STLR  ptr val mem)
    862 
    863 (AtomicExchange32 ptr val mem) -> (LoweredAtomicExchange32 ptr val mem)
    864 (AtomicExchange64 ptr val mem) -> (LoweredAtomicExchange64 ptr val mem)
    865 
    866 (AtomicAdd32 ptr val mem) -> (LoweredAtomicAdd32 ptr val mem)
    867 (AtomicAdd64 ptr val mem) -> (LoweredAtomicAdd64 ptr val mem)
    868 
    869 (AtomicCompareAndSwap32 ptr old new_ mem) -> (LoweredAtomicCas32 ptr old new_ mem)
    870 (AtomicCompareAndSwap64 ptr old new_ mem) -> (LoweredAtomicCas64 ptr old new_ mem)
    871 
    872 (AtomicAnd8 ptr val mem) -> (LoweredAtomicAnd8 ptr val mem)
    873 (AtomicOr8  ptr val mem) -> (LoweredAtomicOr8  ptr val mem)
    874 
    875 // Lowering extension
    876 // Note: we always extend to 64 bits even though some ops don't need that many result bits.
    877 (SignExt8to16  x) -> (MOVBreg x)
    878 (SignExt8to32  x) -> (MOVBreg x)
    879 (SignExt8to64  x) -> (MOVBreg x)
    880 (SignExt16to32 x) -> (MOVHreg x)
    881 (SignExt16to64 x) -> (MOVHreg x)
    882 (SignExt32to64 x) -> (MOVWreg x)
    883 
    884 (ZeroExt8to16  x) -> (MOVBZreg x)
    885 (ZeroExt8to32  x) -> (MOVBZreg x)
    886 (ZeroExt8to64  x) -> (MOVBZreg x)
    887 (ZeroExt16to32 x) -> (MOVHZreg x)
    888 (ZeroExt16to64 x) -> (MOVHZreg x)
    889 (ZeroExt32to64 x) -> (MOVWZreg x)
    890 
    891 (Trunc16to8  x) -> (MOVBreg x)
    892 (Trunc32to8  x) -> (MOVBreg x)
    893 (Trunc32to16 x) -> (MOVHreg x)
    894 (Trunc64to8  x) -> (MOVBreg x)
    895 (Trunc64to16 x) -> (MOVHreg x)
    896 (Trunc64to32 x) -> (MOVWreg x)
    897 
    898 (Slicemask <t> x) -> (SRADconst (NEG <t> x) [63])
    899 
    900 // Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
    901 // This may interact with other patterns in the future. (Compare with arm64)
    902 (MOVBZreg x:(MOVBZload _ _))  -> x
    903 (MOVHZreg x:(MOVHZload _ _))  -> x
    904 (MOVHreg x:(MOVHload _ _))  -> x
    905 
    906 (MOVBZreg (MOVDconst [c]))  -> (MOVDconst [int64(uint8(c))])
    907 (MOVBreg (MOVDconst [c]))  -> (MOVDconst [int64(int8(c))])
    908 (MOVHZreg (MOVDconst [c]))  -> (MOVDconst [int64(uint16(c))])
    909 (MOVHreg (MOVDconst [c]))  -> (MOVDconst [int64(int16(c))])
    910 
    911 // Lose widening ops fed to to stores
    912 (MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
    913 (MOVBstore [off] {sym} ptr (MOVBZreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
    914 (MOVHstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
    915 (MOVHstore [off] {sym} ptr (MOVHZreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
    916 (MOVWstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
    917 (MOVWstore [off] {sym} ptr (MOVWZreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
    918 
    919 // Lose W-widening ops fed to compare-W
    920 (CMPW x (MOVWreg y)) -> (CMPW x y)
    921 (CMPW (MOVWreg x) y) -> (CMPW x y)
    922 (CMPWU x (MOVWZreg y)) -> (CMPWU x y)
    923 (CMPWU (MOVWZreg x) y) -> (CMPWU x y)
    924 
    925 (CMP x (MOVDconst [c])) && is16Bit(c) -> (CMPconst x [c])
    926 (CMP (MOVDconst [c]) y) && is16Bit(c) -> (InvertFlags (CMPconst y [c]))
    927 (CMPW x (MOVDconst [c])) && is16Bit(c) -> (CMPWconst x [c])
    928 (CMPW (MOVDconst [c]) y) && is16Bit(c) -> (InvertFlags (CMPWconst y [c]))
    929 
    930 (CMPU x (MOVDconst [c])) && isU16Bit(c) -> (CMPUconst x [c])
    931 (CMPU (MOVDconst [c]) y) && isU16Bit(c) -> (InvertFlags (CMPUconst y [c]))
    932 (CMPWU x (MOVDconst [c])) && isU16Bit(c) -> (CMPWUconst x [c])
    933 (CMPWU (MOVDconst [c]) y) && isU16Bit(c) -> (InvertFlags (CMPWUconst y [c]))
    934 
    935 // A particular pattern seen in cgo code:
    936 (AND (MOVDconst [c]) x:(MOVBZload _ _)) -> (ANDconst [c&0xFF] x)
    937 (AND x:(MOVBZload _ _) (MOVDconst [c])) -> (ANDconst [c&0xFF] x)
    938 
    939 // floating point negative abs
    940 (FNEG (FABS x)) -> (FNABS x)
    941 (FNEG (FNABS x)) -> (FABS x)
    942 
    943 // floating-point fused multiply-add/sub
    944 (FADD (FMUL x y) z) -> (FMADD x y z)
    945 (FSUB (FMUL x y) z) -> (FMSUB x y z)
    946 (FADDS (FMULS x y) z) -> (FMADDS x y z)
    947 (FSUBS (FMULS x y) z) -> (FMSUBS x y z)
    948 
    949 
    950 // The following statements are found in encoding/binary functions UintXX (load) and PutUintXX (store)
    951 // and convert the statements in these functions from multiple single byte loads or stores to
    952 // the single largest possible load or store. For now only little endian loads and stores on
    953 // little endian machines are implemented. Longer rules make use of the match with shorter rules
    954 // where possible.
    955 // TODO implement big endian loads and stores for little endian machines (using byte reverse
    956 // loads and stores).
    957 // b[0] | b[1]<<8 -> load 16-bit Little endian
    958 (OR <t> x0:(MOVBZload [i0] {s} p mem)
    959 	o1:(SLWconst x1:(MOVBZload [i1] {s} p mem) [8]))
    960 	&& !config.BigEndian
    961 	&& i1 == i0+1
    962 	&& x0.Uses ==1 && x1.Uses == 1
    963 	&& o1.Uses == 1
    964 	&& mergePoint(b, x0, x1) != nil
    965 	&& clobber(x0) && clobber(x1) && clobber(o1)
    966 	 -> @mergePoint(b,x0,x1) (MOVHZload <t> {s} [i0] p mem)
    967 
    968 // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit Little endian
    969 (OR <t> s1:(SLWconst x2:(MOVBZload [i3] {s} p mem) [24])
    970 	o0:(OR <t> s0:(SLWconst x1:(MOVBZload [i2] {s} p mem) [16]) x0:(MOVHZload [i0] {s} p mem)))
    971 	&& !config.BigEndian
    972 	&& i2 == i0+2
    973 	&& i3 == i0+3
    974 	&& x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1
    975 	&& o0.Uses == 1
    976 	&& s0.Uses == 1 && s1.Uses == 1
    977 	&& mergePoint(b, x0, x1, x2) != nil
    978 	&& clobber(x0) && clobber(x1) && clobber(x2)
    979 	&& clobber(s0) && clobber(s1)
    980 	&& clobber(o0)
    981 	 -> @mergePoint(b,x0,x1,x2) (MOVWZload <t> {s} [i0] p mem)
    982 
    983 // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4] <<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit Little endian
    984 // Can't build on shorter rules because they use SLW instead of SLD
    985 // Offset must be multiple of 4 for MOVD
    986 (OR <t> s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])
    987 	o5:(OR <t> s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])
    988 	o4:(OR <t> s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])
    989 	o3:(OR <t> s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])
    990 	o2:(OR <t> s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])
    991 	o1:(OR <t> s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])
    992 	o0:(OR <t> s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem))))))))
    993 	&& !config.BigEndian
    994 	&& i0%4 == 0
    995 	&& i1 == i0+1
    996 	&& i2 == i0+2
    997 	&& i3 == i0+3
    998 	&& i4 == i0+4
    999 	&& i5 == i0+5
   1000 	&& i6 == i0+6
   1001 	&& i7 == i0+7
   1002 	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1
   1003 	&& o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1
   1004 	&& s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1
   1005 	&& mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil
   1006 	&& clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7)
   1007 	&& clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6)
   1008 	&& clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)
   1009 	  -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload <t> {s} [i0] p mem)
   1010 
   1011 // 2 byte store Little endian as in:
   1012 //      b[0] = byte(v)
   1013 //      b[1] = byte(v >> 8)
   1014 (MOVBstore [i1] {s} p (SRWconst (MOVHZreg w) [8])
   1015 	x0:(MOVBstore [i0] {s} p w mem))
   1016 	&& !config.BigEndian
   1017 	&& x0.Uses == 1
   1018 	&& i1 == i0+1
   1019 	&& clobber(x0)
   1020 	  -> (MOVHstore [i0] {s} p w mem)
   1021 
   1022 // 4 byte store Little endian as in:
   1023 //     b[0] = byte(v)
   1024 //     b[1] = byte(v >> 8)
   1025 //     b[2] = byte(v >> 16)
   1026 //     b[3] = byte(v >> 24)
   1027 (MOVBstore [i3] {s} p (SRWconst w [24])
   1028 	x0:(MOVBstore [i2] {s} p (SRWconst w [16])
   1029 	x1:(MOVBstore [i1] {s} p (SRWconst w [8])
   1030 	x2:(MOVBstore [i0] {s} p w mem))))
   1031 	&& !config.BigEndian
   1032 	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
   1033 	&& i1 == i0+1 && i2 == i0+2 && i3 == i0+3
   1034 	&& clobber(x0) && clobber(x1) && clobber(x2)
   1035 	  -> (MOVWstore [i0] {s} p w mem)
   1036 
   1037 // 8 byte store Little endian as in:
   1038 //	b[0] = byte(v)
   1039 //	b[1] = byte(v >> 8)
   1040 //	b[2] = byte(v >> 16)
   1041 //	b[3] = byte(v >> 24)
   1042 //	b[4] = byte(v >> 32)
   1043 //	b[5] = byte(v >> 40)
   1044 //	b[6] = byte(v >> 48)
   1045 //	b[7] = byte(v >> 56)
   1046 // Offset must be multiple of 4 for MOVDstore
   1047 // Can't build on previous rules for 2 or 4 bytes because they use SRW not SRD
   1048 (MOVBstore [i7] {s} p (SRDconst w [56])
   1049 	x0:(MOVBstore [i6] {s} p (SRDconst w [48])
   1050 	x1:(MOVBstore [i5] {s} p (SRDconst w [40])
   1051 	x2:(MOVBstore [i4] {s} p (SRDconst w [32])
   1052 	x3:(MOVBstore [i3] {s} p (SRDconst w [24])
   1053 	x4:(MOVBstore [i2] {s} p (SRDconst w [16])
   1054 	x5:(MOVBstore [i1] {s} p (SRDconst w [8])
   1055 	x6:(MOVBstore [i0] {s} p w mem))))))))
   1056 	&& !config.BigEndian
   1057 	&& i0%4 == 0
   1058 	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1
   1059 	&& i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7
   1060 	&& clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)
   1061 	  -> (MOVDstore [i0] {s} p w mem)
   1062