Home | History | Annotate | Download | only in gen
      1 // Copyright 2016 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // Lowering arithmetic
      6 (Add64  x y) -> (ADD  x y)
      7 (AddPtr x y) -> (ADD  x y)
      8 (Add32  x y) -> (ADD x y)
      9 (Add16  x y) -> (ADD x y)
     10 (Add8   x y) -> (ADD x y)
     11 (Add64F x y) -> (FADD x y)
     12 (Add32F x y) -> (FADDS x y)
     13 
     14 (Sub64  x y) -> (SUB  x y)
     15 (SubPtr x y) -> (SUB  x y)
     16 (Sub32  x y) -> (SUB x y)
     17 (Sub16  x y) -> (SUB x y)
     18 (Sub8   x y) -> (SUB x y)
     19 (Sub32F x y) -> (FSUBS x y)
     20 (Sub64F x y) -> (FSUB x y)
     21 
     22 (Mod16 x y) -> (Mod32 (SignExt16to32 x) (SignExt16to32 y))
     23 (Mod16u x y) -> (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
     24 (Mod8 x y) -> (Mod32 (SignExt8to32 x) (SignExt8to32 y))
     25 (Mod8u x y) -> (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
     26 (Mod64 x y) -> (SUB x (MULLD y (DIVD x y)))
     27 (Mod64u x y) -> (SUB x (MULLD y (DIVDU x y)))
     28 (Mod32 x y) -> (SUB x (MULLW y (DIVW x y)))
     29 (Mod32u x y) -> (SUB x (MULLW y (DIVWU x y)))
     30 
     31 (Avg64u <t> x y) -> (ADD (ADD <t> (SRD <t> x (MOVDconst <t> [1])) (SRD <t> y (MOVDconst <t> [1]))) (ANDconst <t> (AND <t> x y) [1]))
     32 
     33 (Mul64  x y) -> (MULLD  x y)
     34 (Mul32  x y) -> (MULLW  x y)
     35 (Mul16  x y) -> (MULLW x y)
     36 (Mul8   x y) -> (MULLW x y)
     37 
     38 (Div64  x y) -> (DIVD  x y)
     39 (Div64u x y) -> (DIVDU x y)
     40 (Div32  x y) -> (DIVW  x y)
     41 (Div32u x y) -> (DIVWU x y)
     42 (Div16  x y) -> (DIVW  (SignExt16to32 x) (SignExt16to32 y))
     43 (Div16u x y) -> (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
     44 (Div8   x y) -> (DIVW  (SignExt8to32 x) (SignExt8to32 y))
     45 (Div8u  x y) -> (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
     46 
     47 (Hmul64  x y) -> (MULHD  x y)
     48 (Hmul64u  x y) -> (MULHDU x y)
     49 (Hmul32  x y) -> (MULHW  x y)
     50 (Hmul32u  x y) -> (MULHWU x y)
     51 (Hmul16 x y) -> (SRAWconst (MULLW <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
     52 (Hmul16u x y) -> (SRWconst (MULLW <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
     53 (Hmul8 x y) -> (SRAWconst (MULLW <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
     54 (Hmul8u x y) -> (SRWconst (MULLW <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
     55 
     56 (Mul32F x y) -> (FMULS x y)
     57 (Mul64F x y) -> (FMUL x y)
     58 
     59 (Div32F x y) -> (FDIVS x y)
     60 (Div64F x y) -> (FDIV x y)
     61 
     62 // Lowering float <-> int
     63 (Cvt32to32F x) -> (FRSP (FCFID (Xi2f64 (SignExt32to64 x))))
     64 (Cvt32to64F x) -> (FCFID (Xi2f64 (SignExt32to64 x)))
     65 (Cvt64to32F x) -> (FRSP (FCFID (Xi2f64 x)))
     66 (Cvt64to64F x) -> (FCFID (Xi2f64 x))
     67 
     68 (Cvt32Fto32 x) -> (Xf2i64 (FCTIWZ x))
     69 (Cvt32Fto64 x) -> (Xf2i64 (FCTIDZ x))
     70 (Cvt64Fto32 x) -> (Xf2i64 (FCTIWZ x))
     71 (Cvt64Fto64 x) -> (Xf2i64 (FCTIDZ x))
     72 
     73 (Cvt32Fto64F x) -> x // Note x will have the wrong type for patterns dependent on Float32/Float64
     74 (Cvt64Fto32F x) -> (FRSP x)
     75 
     76 (Sqrt x) -> (FSQRT x)
     77 
     78 // Lowering constants
     79 (Const8   [val]) -> (MOVDconst [val])
     80 (Const16  [val]) -> (MOVDconst [val])
     81 (Const32  [val]) -> (MOVDconst [val])
     82 (Const64  [val]) -> (MOVDconst [val])
     83 (Const32F [val]) -> (FMOVSconst [val])
     84 (Const64F [val]) -> (FMOVDconst [val])
     85 (ConstNil) -> (MOVDconst [0])
     86 (ConstBool [b]) -> (MOVDconst [b])
     87 
     88 (Lsh64x64  x (Const64 [c])) && uint64(c) < 64 -> (SLDconst x [c])
     89 (Rsh64x64  x (Const64 [c])) && uint64(c) < 64 -> (SRADconst x [c])
     90 (Rsh64Ux64 x (Const64 [c])) && uint64(c) < 64 -> (SRDconst x [c])
     91 (Lsh32x64  x (Const64 [c])) && uint64(c) < 32 -> (SLWconst x [c])
     92 (Rsh32x64  x (Const64 [c])) && uint64(c) < 32 -> (SRAWconst x [c])
     93 (Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 -> (SRWconst x [c])
     94 (Lsh16x64  x (Const64 [c])) && uint64(c) < 16 -> (SLWconst x [c])
     95 (Rsh16x64  x (Const64 [c])) && uint64(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
     96 (Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
     97 (Lsh8x64   x (Const64 [c])) && uint64(c) < 8  -> (SLWconst x [c])
     98 (Rsh8x64   x (Const64 [c])) && uint64(c) < 8  -> (SRAWconst (SignExt8to32  x) [c])
     99 (Rsh8Ux64  x (Const64 [c])) && uint64(c) < 8  -> (SRWconst (ZeroExt8to32  x) [c])
    100 
    101 (Lsh64x32  x (Const64 [c])) && uint32(c) < 64 -> (SLDconst x [c])
    102 (Rsh64x32  x (Const64 [c])) && uint32(c) < 64 -> (SRADconst x [c])
    103 (Rsh64Ux32 x (Const64 [c])) && uint32(c) < 64 -> (SRDconst x [c])
    104 (Lsh32x32  x (Const64 [c])) && uint32(c) < 32 -> (SLWconst x [c])
    105 (Rsh32x32  x (Const64 [c])) && uint32(c) < 32 -> (SRAWconst x [c])
    106 (Rsh32Ux32 x (Const64 [c])) && uint32(c) < 32 -> (SRWconst x [c])
    107 (Lsh16x32  x (Const64 [c])) && uint32(c) < 16 -> (SLWconst x [c])
    108 (Rsh16x32  x (Const64 [c])) && uint32(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
    109 (Rsh16Ux32 x (Const64 [c])) && uint32(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
    110 (Lsh8x32   x (Const64 [c])) && uint32(c) < 8  -> (SLWconst x [c])
    111 (Rsh8x32   x (Const64 [c])) && uint32(c) < 8  -> (SRAWconst (SignExt8to32  x) [c])
    112 (Rsh8Ux32  x (Const64 [c])) && uint32(c) < 8  -> (SRWconst (ZeroExt8to32  x) [c])
    113 
    114 // large constant shifts
    115 (Lsh64x64  _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0])
    116 (Rsh64Ux64 _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0])
    117 (Lsh32x64  _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0])
    118 (Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0])
    119 (Lsh16x64  _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0])
    120 (Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0])
    121 (Lsh8x64   _ (Const64 [c])) && uint64(c) >= 8  -> (MOVDconst [0])
    122 (Rsh8Ux64  _ (Const64 [c])) && uint64(c) >= 8  -> (MOVDconst [0])
    123 
    124 // large constant signed right shift, we leave the sign bit
    125 (Rsh64x64 x (Const64 [c])) && uint64(c) >= 64 -> (SRADconst x [63])
    126 (Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 -> (SRAWconst x [63])
    127 (Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAWconst (SignExt16to32 x) [63])
    128 (Rsh8x64  x (Const64 [c])) && uint64(c) >= 8  -> (SRAWconst (SignExt8to32  x) [63])
    129 
    130 // constant shifts
    131 (Lsh64x64  x (MOVDconst [c])) && uint64(c) < 64 -> (SLDconst x [c])
    132 (Rsh64x64  x (MOVDconst [c])) && uint64(c) < 64 -> (SRADconst x [c])
    133 (Rsh64Ux64 x (MOVDconst [c])) && uint64(c) < 64 -> (SRDconst x [c])
    134 (Lsh32x64  x (MOVDconst [c])) && uint64(c) < 32 -> (SLWconst x [c])
    135 (Rsh32x64  x (MOVDconst [c])) && uint64(c) < 32 -> (SRAWconst x [c])
    136 (Rsh32Ux64 x (MOVDconst [c])) && uint64(c) < 32 -> (SRWconst x [c])
    137 (Lsh16x64  x (MOVDconst [c])) && uint64(c) < 16 -> (SLWconst x [c])
    138 (Rsh16x64  x (MOVDconst [c])) && uint64(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
    139 (Rsh16Ux64 x (MOVDconst [c])) && uint64(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
    140 (Lsh8x64   x (MOVDconst [c])) && uint64(c) < 8  -> (SLWconst x [c])
    141 (Rsh8x64   x (MOVDconst [c])) && uint64(c) < 8  -> (SRAWconst (SignExt8to32  x) [c])
    142 (Rsh8Ux64  x (MOVDconst [c])) && uint64(c) < 8  -> (SRWconst (ZeroExt8to32  x) [c])
    143 
    144 (Lsh64x32  x (MOVDconst [c])) && uint32(c) < 64 -> (SLDconst x [c])
    145 (Rsh64x32  x (MOVDconst [c])) && uint32(c) < 64 -> (SRADconst x [c])
    146 (Rsh64Ux32 x (MOVDconst [c])) && uint32(c) < 64 -> (SRDconst x [c])
    147 (Lsh32x32  x (MOVDconst [c])) && uint32(c) < 32 -> (SLWconst x [c])
    148 (Rsh32x32  x (MOVDconst [c])) && uint32(c) < 32 -> (SRAWconst x [c])
    149 (Rsh32Ux32 x (MOVDconst [c])) && uint32(c) < 32 -> (SRWconst x [c])
    150 (Lsh16x32  x (MOVDconst [c])) && uint32(c) < 16 -> (SLWconst x [c])
    151 (Rsh16x32  x (MOVDconst [c])) && uint32(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
    152 (Rsh16Ux32 x (MOVDconst [c])) && uint32(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
    153 (Lsh8x32   x (MOVDconst [c])) && uint32(c) < 8  -> (SLWconst x [c])
    154 (Rsh8x32   x (MOVDconst [c])) && uint32(c) < 8  -> (SRAWconst (SignExt8to32  x) [c])
    155 (Rsh8Ux32  x (MOVDconst [c])) && uint32(c) < 8  -> (SRWconst (ZeroExt8to32  x) [c])
    156 
    157 (Rsh64x64 x y)  -> (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
    158 (Rsh64Ux64 x y) -> (SRD  x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
    159 (Lsh64x64 x y)  -> (SLD  x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
    160 
    161 (Rsh32x64 x y)  -> (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
    162 (Rsh32Ux64 x y) -> (SRW  x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
    163 (Lsh32x64 x y)  -> (SLW  x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
    164 
    165 (Rsh16x64 x y)  -> (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
    166 (Rsh16Ux64 x y) -> (SRW  (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
    167 (Lsh16x64 x y)  -> (SLW  x                 (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
    168 
    169 (Rsh8x64 x y)  -> (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
    170 (Rsh8Ux64 x y) -> (SRW  (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
    171 (Lsh8x64 x y)  -> (SLW  x                (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
    172 
    173 
    174 (Rsh64x32 x y)  -> (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
    175 (Rsh64Ux32 x y) -> (SRD x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
    176 (Lsh64x32 x y)  -> (SLD x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
    177 
    178 (Rsh32x32 x y)  -> (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
    179 (Rsh32Ux32 x y) -> (SRW x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
    180 (Lsh32x32 x y)  -> (SLW x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
    181 
    182 (Rsh16x32 x y)  -> (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
    183 (Rsh16Ux32 x y) -> (SRW  (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
    184 (Lsh16x32 x y)  -> (SLW  x                 (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
    185 
    186 (Rsh8x32 x y)  -> (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
    187 (Rsh8Ux32 x y) -> (SRW  (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
    188 (Lsh8x32 x y)  -> (SLW  x                (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
    189 
    190 
    191 (Rsh64x16 x y)  -> (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
    192 (Rsh64Ux16 x y) -> (SRD x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
    193 (Lsh64x16 x y)  -> (SLD x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
    194 
    195 (Rsh32x16 x y)  -> (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
    196 (Rsh32Ux16 x y) -> (SRW x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
    197 (Lsh32x16 x y)  -> (SLW x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
    198 
    199 (Rsh16x16 x y)  -> (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
    200 (Rsh16Ux16 x y) -> (SRW  (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
    201 (Lsh16x16 x y)  -> (SLW  x                 (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
    202 
    203 (Rsh8x16 x y)  -> (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
    204 (Rsh8Ux16 x y) -> (SRW  (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
    205 (Lsh8x16 x y)  -> (SLW  x                (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
    206 
    207 
    208 (Rsh64x8 x y)  -> (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
    209 (Rsh64Ux8 x y) -> (SRD x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
    210 (Lsh64x8 x y)  -> (SLD x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
    211 
    212 (Rsh32x8 x y)  -> (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
    213 (Rsh32Ux8 x y) -> (SRW x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
    214 (Lsh32x8 x y)  -> (SLW x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
    215 
    216 (Rsh16x8 x y)  -> (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
    217 (Rsh16Ux8 x y) -> (SRW  (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
    218 (Lsh16x8 x y)  -> (SLW  x                 (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
    219 
    220 (Rsh8x8 x y)  -> (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
    221 (Rsh8Ux8 x y) -> (SRW  (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
    222 (Lsh8x8 x y)  -> (SLW  x                (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
    223 
    224 // Cleaning up shift ops when input is masked
    225 (MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) && c < 0 && d > 0 && c + d < 0 -> (MOVDconst [-1])
    226 (ORN x (MOVDconst [-1])) -> x
    227 
    228 // Potentially useful optimizing rewrites.
    229 // (ADDconstForCarry [k] c), k < 0 && (c < 0 || k+c >= 0) -> CarrySet
    230 // (ADDconstForCarry [k] c), K < 0 && (c >= 0 && k+c < 0) -> CarryClear
    231 // (MaskIfNotCarry CarrySet) -> 0
    232 // (MaskIfNotCarry CarrySet) -> -1
    233 
    234 (Addr {sym} base) -> (MOVDaddr {sym} base)
    235 // (Addr {sym} base) -> (ADDconst {sym} base)
    236 (OffPtr [off] ptr) -> (ADD (MOVDconst <config.Frontend().TypeInt64()> [off]) ptr)
    237 
    238 (And64 x y) -> (AND x y)
    239 (And32 x y) -> (AND x y)
    240 (And16 x y) -> (AND x y)
    241 (And8  x y) -> (AND x y)
    242 
    243 (Or64 x y) -> (OR x y)
    244 (Or32 x y) -> (OR x y)
    245 (Or16 x y) -> (OR x y)
    246 (Or8  x y) -> (OR x y)
    247 
    248 (Xor64 x y) -> (XOR x y)
    249 (Xor32 x y) -> (XOR x y)
    250 (Xor16 x y) -> (XOR x y)
    251 (Xor8  x y) -> (XOR x y)
    252 
    253 (Neg64F x) -> (FNEG x)
    254 (Neg32F x) -> (FNEG x)
    255 (Neg64  x) -> (NEG x)
    256 (Neg32  x) -> (NEG x)
    257 (Neg16  x) -> (NEG x)
    258 (Neg8   x) -> (NEG x)
    259 
    260 (Com64 x) -> (XORconst [-1] x)
    261 (Com32 x) -> (XORconst [-1] x)
    262 (Com16 x) -> (XORconst [-1] x)
    263 (Com8  x) -> (XORconst [-1] x)
    264 
    265 // Lowering boolean ops
    266 (AndB x y) -> (AND x y)
    267 (OrB x y) -> (OR x y)
    268 (Not x) -> (XORconst [1] x)
    269 
    270 // Use ANDN for AND x NOT y
    271 (AND x (XORconst [-1] y)) -> (ANDN x y)
    272 
    273 // Lowering comparisons
    274 (EqB x y)  -> (ANDconst [1] (EQV x y))
    275 // Sign extension dependence on operand sign sets up for sign/zero-extension elision later
    276 (Eq8 x y) && isSigned(x.Type) && isSigned(y.Type) -> (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y)))
    277 (Eq16 x y) && isSigned(x.Type) && isSigned(y.Type) -> (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y)))
    278 (Eq8 x y)  -> (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
    279 (Eq16 x y) -> (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
    280 (Eq32 x y) -> (Equal (CMPW x y))
    281 (Eq64 x y) -> (Equal (CMP x y))
    282 (Eq32F x y) -> (Equal (FCMPU x y))
    283 (Eq64F x y) -> (Equal (FCMPU x y))
    284 (EqPtr x y) -> (Equal (CMP x y))
    285 
    286 (NeqB x y)  -> (XOR x y)
    287 // Like Eq8 and Eq16, prefer sign extension likely to enable later elision.
    288 (Neq8 x y) && isSigned(x.Type) && isSigned(y.Type) -> (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
    289 (Neq16 x y) && isSigned(x.Type) && isSigned(y.Type) -> (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
    290 (Neq8 x y)  -> (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
    291 (Neq16 x y) -> (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
    292 (Neq32 x y) -> (NotEqual (CMPW x y))
    293 (Neq64 x y) -> (NotEqual (CMP x y))
    294 (Neq32F x y) -> (NotEqual (FCMPU x y))
    295 (Neq64F x y) -> (NotEqual (FCMPU x y))
    296 (NeqPtr x y) -> (NotEqual (CMP x y))
    297 
    298 (Less8 x y)  -> (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
    299 (Less16 x y) -> (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
    300 (Less32 x y) -> (LessThan (CMPW x y))
    301 (Less64 x y) -> (LessThan (CMP x y))
    302 (Less32F x y) -> (FLessThan (FCMPU x y))
    303 (Less64F x y) -> (FLessThan (FCMPU x y))
    304 
    305 (Less8U x y)  -> (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
    306 (Less16U x y) -> (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
    307 (Less32U x y) -> (LessThan (CMPWU x y))
    308 (Less64U x y) -> (LessThan (CMPU x y))
    309 
    310 (Leq8 x y)  -> (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
    311 (Leq16 x y) -> (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
    312 (Leq32 x y) -> (LessEqual (CMPW x y))
    313 (Leq64 x y) -> (LessEqual (CMP x y))
    314 (Leq32F x y) -> (FLessEqual (FCMPU x y))
    315 (Leq64F x y) -> (FLessEqual (FCMPU x y))
    316 
    317 (Leq8U x y)  -> (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
    318 (Leq16U x y) -> (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
    319 (Leq32U x y) -> (LessEqual (CMPWU x y))
    320 (Leq64U x y) -> (LessEqual (CMPU x y))
    321 
    322 (Greater8 x y)  -> (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
    323 (Greater16 x y) -> (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
    324 (Greater32 x y) -> (GreaterThan (CMPW x y))
    325 (Greater64 x y) -> (GreaterThan (CMP x y))
    326 (Greater32F x y) -> (FGreaterThan (FCMPU x y))
    327 (Greater64F x y) -> (FGreaterThan (FCMPU x y))
    328 
    329 (Greater8U x y)  -> (GreaterThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
    330 (Greater16U x y) -> (GreaterThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
    331 (Greater32U x y) -> (GreaterThan (CMPWU x y))
    332 (Greater64U x y) -> (GreaterThan (CMPU x y))
    333 
    334 (Geq8 x y)  -> (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
    335 (Geq16 x y) -> (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
    336 (Geq32 x y) -> (GreaterEqual (CMPW x y))
    337 (Geq64 x y) -> (GreaterEqual (CMP x y))
    338 (Geq32F x y) -> (FGreaterEqual (FCMPU x y))
    339 (Geq64F x y) -> (FGreaterEqual (FCMPU x y))
    340 
    341 (Geq8U x y)  -> (GreaterEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
    342 (Geq16U x y) -> (GreaterEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
    343 (Geq32U x y) -> (GreaterEqual (CMPWU x y))
    344 (Geq64U x y) -> (GreaterEqual (CMPU x y))
    345 
    346 // Absorb pseudo-ops into blocks.
    347 (If (Equal cc) yes no) -> (EQ cc yes no)
    348 (If (NotEqual cc) yes no) -> (NE cc yes no)
    349 (If (LessThan cc) yes no) -> (LT cc yes no)
    350 (If (LessEqual cc) yes no) -> (LE cc yes no)
    351 (If (GreaterThan cc) yes no) -> (GT cc yes no)
    352 (If (GreaterEqual cc) yes no) -> (GE cc yes no)
    353 (If (FLessThan cc) yes no) -> (FLT cc yes no)
    354 (If (FLessEqual cc) yes no) -> (FLE cc yes no)
    355 (If (FGreaterThan cc) yes no) -> (FGT cc yes no)
    356 (If (FGreaterEqual cc) yes no) -> (FGE cc yes no)
    357 
    358 (If cond yes no) -> (NE (CMPWconst [0] cond) yes no)
    359 
    360 // Absorb boolean tests into block
    361 (NE (CMPWconst [0] (Equal cc)) yes no) -> (EQ cc yes no)
    362 (NE (CMPWconst [0] (NotEqual cc)) yes no) -> (NE cc yes no)
    363 (NE (CMPWconst [0] (LessThan cc)) yes no) -> (LT cc yes no)
    364 (NE (CMPWconst [0] (LessEqual cc)) yes no) -> (LE cc yes no)
    365 (NE (CMPWconst [0] (GreaterThan cc)) yes no) -> (GT cc yes no)
    366 (NE (CMPWconst [0] (GreaterEqual cc)) yes no) -> (GE cc yes no)
    367 (NE (CMPWconst [0] (FLessThan cc)) yes no) -> (FLT cc yes no)
    368 (NE (CMPWconst [0] (FLessEqual cc)) yes no) -> (FLE cc yes no)
    369 (NE (CMPWconst [0] (FGreaterThan cc)) yes no) -> (FGT cc yes no)
    370 (NE (CMPWconst [0] (FGreaterEqual cc)) yes no) -> (FGE cc yes no)
    371 
    372 // Elide compares of bit tests // TODO need to make both CC and result of ANDCC available.
    373 (EQ (CMPconst [0] (ANDconst [c] x)) yes no) -> (EQ (ANDCCconst [c] x) yes no)
    374 (NE (CMPconst [0] (ANDconst [c] x)) yes no) -> (NE (ANDCCconst [c] x) yes no)
    375 (EQ (CMPWconst [0] (ANDconst [c] x)) yes no) -> (EQ (ANDCCconst [c] x) yes no)
    376 (NE (CMPWconst [0] (ANDconst [c] x)) yes no) -> (NE (ANDCCconst [c] x) yes no)
    377 
    378 // absorb flag constants into branches
    379 (EQ (FlagEQ) yes no) -> (First nil yes no)
    380 (EQ (FlagLT) yes no) -> (First nil no yes)
    381 (EQ (FlagGT) yes no) -> (First nil no yes)
    382 
    383 (NE (FlagEQ) yes no) -> (First nil no yes)
    384 (NE (FlagLT) yes no) -> (First nil yes no)
    385 (NE (FlagGT) yes no) -> (First nil yes no)
    386 
    387 (LT (FlagEQ) yes no) -> (First nil no yes)
    388 (LT (FlagLT) yes no) -> (First nil yes no)
    389 (LT (FlagGT) yes no) -> (First nil no yes)
    390 
    391 (LE (FlagEQ) yes no) -> (First nil yes no)
    392 (LE (FlagLT) yes no) -> (First nil yes no)
    393 (LE (FlagGT) yes no) -> (First nil no yes)
    394 
    395 (GT (FlagEQ) yes no) -> (First nil no yes)
    396 (GT (FlagLT) yes no) -> (First nil no yes)
    397 (GT (FlagGT) yes no) -> (First nil yes no)
    398 
    399 (GE (FlagEQ) yes no) -> (First nil yes no)
    400 (GE (FlagLT) yes no) -> (First nil no yes)
    401 (GE (FlagGT) yes no) -> (First nil yes no)
    402 
    403 // absorb InvertFlags into branches
    404 (LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
    405 (GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
    406 (LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
    407 (GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
    408 (EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
    409 (NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
    410 
    411 // constant comparisons
    412 (CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
    413 (CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y)  -> (FlagLT)
    414 (CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y)  -> (FlagGT)
    415 
    416 (CMPconst (MOVDconst [x]) [y]) && int64(x)==int64(y) -> (FlagEQ)
    417 (CMPconst (MOVDconst [x]) [y]) && int64(x)<int64(y)  -> (FlagLT)
    418 (CMPconst (MOVDconst [x]) [y]) && int64(x)>int64(y)  -> (FlagGT)
    419 
    420 (CMPWUconst (MOVDconst [x]) [y]) && int32(x)==int32(y)  -> (FlagEQ)
    421 (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) -> (FlagLT)
    422 (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) -> (FlagGT)
    423 
    424 (CMPUconst (MOVDconst [x]) [y]) && int64(x)==int64(y)  -> (FlagEQ)
    425 (CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) -> (FlagLT)
    426 (CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) -> (FlagGT)
    427 
    428 // other known comparisons
    429 //(CMPconst (MOVBUreg _) [c]) && 0xff < c -> (FlagLT)
    430 //(CMPconst (MOVHUreg _) [c]) && 0xffff < c -> (FlagLT)
    431 //(CMPconst (ANDconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT)
    432 //(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n) -> (FlagLT)
    433 
    434 // absorb flag constants into boolean values
    435 (Equal (FlagEQ)) -> (MOVDconst [1])
    436 (Equal (FlagLT)) -> (MOVDconst [0])
    437 (Equal (FlagGT)) -> (MOVDconst [0])
    438 
    439 (NotEqual (FlagEQ)) -> (MOVDconst [0])
    440 (NotEqual (FlagLT)) -> (MOVDconst [1])
    441 (NotEqual (FlagGT)) -> (MOVDconst [1])
    442 
    443 (LessThan (FlagEQ)) -> (MOVDconst [0])
    444 (LessThan (FlagLT)) -> (MOVDconst [1])
    445 (LessThan (FlagGT)) -> (MOVDconst [0])
    446 
    447 (LessEqual (FlagEQ)) -> (MOVDconst [1])
    448 (LessEqual (FlagLT)) -> (MOVDconst [1])
    449 (LessEqual (FlagGT)) -> (MOVDconst [0])
    450 
    451 (GreaterThan (FlagEQ)) -> (MOVDconst [0])
    452 (GreaterThan (FlagLT)) -> (MOVDconst [0])
    453 (GreaterThan (FlagGT)) -> (MOVDconst [1])
    454 
    455 (GreaterEqual (FlagEQ)) -> (MOVDconst [1])
    456 (GreaterEqual (FlagLT)) -> (MOVDconst [0])
    457 (GreaterEqual (FlagGT)) -> (MOVDconst [1])
    458 
    459 // absorb InvertFlags into boolean values
    460 (Equal (InvertFlags x)) -> (Equal x)
    461 (NotEqual (InvertFlags x)) -> (NotEqual x)
    462 (LessThan (InvertFlags x)) -> (GreaterThan x)
    463 (GreaterThan (InvertFlags x)) -> (LessThan x)
    464 (LessEqual (InvertFlags x)) -> (GreaterEqual x)
    465 (GreaterEqual (InvertFlags x)) -> (LessEqual x)
    466 
    467 // Lowering loads
    468 (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem)
    469 (Load <t> ptr mem) && is32BitInt(t) && isSigned(t) -> (MOVWload ptr mem)
    470 (Load <t> ptr mem) && is32BitInt(t) && !isSigned(t) -> (MOVWZload ptr mem)
    471 (Load <t> ptr mem) && is16BitInt(t) && isSigned(t) -> (MOVHload ptr mem)
    472 (Load <t> ptr mem) && is16BitInt(t) && !isSigned(t) -> (MOVHZload ptr mem)
    473 (Load <t> ptr mem) && t.IsBoolean() -> (MOVBZload ptr mem)
    474 (Load <t> ptr mem) && is8BitInt(t) && isSigned(t) -> (MOVBreg (MOVBZload ptr mem)) // PPC has no signed-byte load.
    475 (Load <t> ptr mem) && is8BitInt(t) && !isSigned(t) -> (MOVBZload ptr mem)
    476 
    477 (Load <t> ptr mem) && is32BitFloat(t) -> (FMOVSload ptr mem)
    478 (Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem)
    479 
    480 (Store [8] ptr val mem) && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
    481 (Store [8] ptr val mem) && is32BitFloat(val.Type) -> (FMOVDstore ptr val mem) // glitch from (Cvt32Fto64F x) -> x -- type is wrong
    482 (Store [4] ptr val mem) && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
    483 (Store [8] ptr val mem) && (is64BitInt(val.Type) || isPtr(val.Type)) -> (MOVDstore ptr val mem)
    484 (Store [4] ptr val mem) && is32BitInt(val.Type) -> (MOVWstore ptr val mem)
    485 (Store [2] ptr val mem) -> (MOVHstore ptr val mem)
    486 (Store [1] ptr val mem) -> (MOVBstore ptr val mem)
    487 
    488 (Zero [s] _ mem) && SizeAndAlign(s).Size() == 0 -> mem
    489 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstorezero destptr mem)
    490 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0 ->
    491 	(MOVHstorezero destptr mem)
    492 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 2 ->
    493 	(MOVBstorezero [1] destptr
    494 		(MOVBstorezero [0] destptr mem))
    495 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0 ->
    496 	(MOVWstorezero destptr mem)
    497 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0 ->
    498 	(MOVHstorezero [2] destptr
    499 		(MOVHstorezero [0] destptr mem))
    500 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 4 ->
    501 	(MOVBstorezero [3] destptr
    502 		(MOVBstorezero [2] destptr
    503 			(MOVBstorezero [1] destptr
    504 				(MOVBstorezero [0] destptr mem))))
    505 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0 ->
    506 	(MOVDstorezero [0] destptr mem)
    507 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0 ->
    508 	(MOVWstorezero [4] destptr
    509 		(MOVWstorezero [0] destptr mem))
    510 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0 ->
    511 	(MOVHstorezero [6] destptr
    512 		(MOVHstorezero [4] destptr
    513 			(MOVHstorezero [2] destptr
    514 				(MOVHstorezero [0] destptr mem))))
    515 
    516 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 3 ->
    517 	(MOVBstorezero [2] destptr
    518 		(MOVBstorezero [1] destptr
    519 			(MOVBstorezero [0] destptr mem)))
    520 
    521 // Zero small numbers of words directly.
    522 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0 ->
    523 	(MOVDstorezero [8] destptr
    524                 (MOVDstorezero [0] destptr mem))
    525 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0 ->
    526 	(MOVDstorezero [16] destptr
    527 		(MOVDstorezero [8] destptr
    528 			(MOVDstorezero [0] destptr mem)))
    529 (Zero [s] destptr mem) && SizeAndAlign(s).Size() == 32 && SizeAndAlign(s).Align()%8 == 0 ->
    530 	(MOVDstorezero [24] destptr
    531 		(MOVDstorezero [16] destptr
    532 			(MOVDstorezero [8] destptr
    533 				(MOVDstorezero [0] destptr mem))))
    534 
    535 // Large zeroing uses a loop
    536 (Zero [s] ptr mem)
    537 	&& (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0 ->
    538 	(LoweredZero [SizeAndAlign(s).Align()]
    539 		ptr
    540 		(ADDconst <ptr.Type> ptr [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)])
    541 		mem)
    542 
    543 // moves
    544 (Move [s] _ _ mem) && SizeAndAlign(s).Size() == 0 -> mem
    545 (Move [s] dst src mem) && SizeAndAlign(s).Size() == 1 -> (MOVBstore dst (MOVBZload src mem) mem)
    546 (Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0 ->
    547 	(MOVHstore dst (MOVHZload src mem) mem)
    548 (Move [s] dst src mem) && SizeAndAlign(s).Size() == 2 ->
    549 	(MOVBstore [1] dst (MOVBZload [1] src mem)
    550 		(MOVBstore dst (MOVBZload src mem) mem))
    551 (Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0 ->
    552 	(MOVWstore dst (MOVWload src mem) mem)
    553 (Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0 ->
    554 	(MOVHstore [2] dst (MOVHZload [2] src mem)
    555 		(MOVHstore dst (MOVHZload src mem) mem))
    556 (Move [s] dst src mem) && SizeAndAlign(s).Size() == 4 ->
    557 	(MOVBstore [3] dst (MOVBZload [3] src mem)
    558 		(MOVBstore [2] dst (MOVBZload [2] src mem)
    559 			(MOVBstore [1] dst (MOVBZload [1] src mem)
    560 				(MOVBstore dst (MOVBZload src mem) mem))))
    561 
    562 (Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0 ->
    563 	(MOVDstore dst (MOVDload src mem) mem)
    564 (Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0 ->
    565 	(MOVWstore [4] dst (MOVWZload [4] src mem)
    566 		(MOVWstore dst (MOVWZload src mem) mem))
    567 (Move [s] dst src mem) && SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0->
    568 	(MOVHstore [6] dst (MOVHZload [6] src mem)
    569 		(MOVHstore [4] dst (MOVHZload [4] src mem)
    570 			(MOVHstore [2] dst (MOVHZload [2] src mem)
    571 				(MOVHstore dst (MOVHZload src mem) mem))))
    572 
    573 (Move [s] dst src mem) && SizeAndAlign(s).Size() == 3 ->
    574 	(MOVBstore [2] dst (MOVBZload [2] src mem)
    575 		(MOVBstore [1] dst (MOVBZload [1] src mem)
    576 			(MOVBstore dst (MOVBZload src mem) mem)))
    577 
    578 // Large move uses a loop
    579 (Move [s] dst src mem)
    580 	&& (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0 ->
    581 	(LoweredMove [SizeAndAlign(s).Align()]
    582 		dst
    583 		src
    584 		(ADDconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)])
    585 		mem)
    586 
    587 // Calls
    588 // Lowering calls
    589 (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
    590 (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
    591 (DeferCall [argwid] mem) -> (CALLdefer [argwid] mem)
    592 (GoCall [argwid] mem) -> (CALLgo [argwid] mem)
    593 (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
    594 
    595 // Miscellaneous
    596 (Convert <t> x mem) -> (MOVDconvert <t> x mem)
    597 (GetClosurePtr) -> (LoweredGetClosurePtr)
    598 (IsNonNil ptr) -> (NotEqual (CMPconst [0] ptr))
    599 (IsInBounds idx len) -> (LessThan (CMPU idx len))
    600 (IsSliceInBounds idx len) -> (LessEqual (CMPU idx len))
    601 (NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
    602 
    603 // Optimizations
    604 // Note that PPC "logical" immediates come in 0:15 and 16:31 unsigned immediate forms,
    605 // so ORconst, XORconst easily expand into a pair.
    606 
    607 // Include very-large constants in the const-const case.
    608 (AND (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c&d])
    609 (OR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c|d])
    610 (XOR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c^d])
    611 
    612 // Discover consts
    613 (AND x (MOVDconst [c])) && isU16Bit(c) -> (ANDconst [c] x)
    614 (XOR x (MOVDconst [c])) && isU32Bit(c) -> (XORconst [c] x)
    615 (OR x (MOVDconst [c])) && isU32Bit(c) -> (ORconst [c] x)
    616 (AND (MOVDconst [c]) x) && isU16Bit(c) -> (ANDconst [c] x)
    617 (XOR (MOVDconst [c]) x) && isU32Bit(c) -> (XORconst [c] x)
    618 (OR (MOVDconst [c]) x) && isU32Bit(c) -> (ORconst [c] x)
    619 
    620 // Simplify consts
    621 (ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x)
    622 (ORconst [c] (ORconst [d] x)) -> (ORconst [c|d] x)
    623 (XORconst [c] (XORconst [d] x)) -> (XORconst [c^d] x)
    624 (ANDconst [-1] x) -> x
    625 (ANDconst [0] _) -> (MOVDconst [0])
    626 (XORconst [0] x) -> x
    627 (ORconst [-1] _) -> (MOVDconst [-1])
    628 (ORconst [0] x) -> x
    629 
    630 // zero-extend of small and -> small and
    631 (MOVBZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFF -> y
    632 (MOVHZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF -> y
    633 (MOVWZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFFFFFF -> y
    634 (MOVWZreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0xFFFFFFFF -> y
    635 
    636 // sign extend of small-positive and -> small-positive-and
    637 (MOVBreg y:(ANDconst [c] _)) && uint64(c) <= 0x7F -> y
    638 (MOVHreg y:(ANDconst [c] _)) && uint64(c) <= 0x7FFF -> y
    639 (MOVWreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF -> y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0
    640 (MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF -> y
    641 
    642 // small and of zero-extend -> either zero-extend or small and
    643   // degenerate-and
    644 (ANDconst [c] y:(MOVBZreg _)) && c&0xFF == 0xFF -> y
    645 (ANDconst [c] y:(MOVHZreg _))  && c&0xFFFF == 0xFFFF -> y
    646 (ANDconst [c] y:(MOVWZreg _))  && c&0xFFFFFFFF == 0xFFFFFFFF -> y
    647   // normal case
    648 (ANDconst [c] (MOVBZreg x)) -> (ANDconst [c&0xFF] x)
    649 (ANDconst [c] (MOVHZreg x)) -> (ANDconst [c&0xFFFF] x)
    650 (ANDconst [c] (MOVWZreg x)) -> (ANDconst [c&0xFFFFFFFF] x)
    651 
    652 // Various redundant zero/sign extension combinations.
    653 (MOVBZreg y:(MOVBZreg _)) -> y  // repeat
    654 (MOVBreg y:(MOVBreg _)) -> y // repeat
    655 (MOVBreg (MOVBZreg x)) -> (MOVBreg x)
    656 (MOVBZreg (MOVBreg x)) -> (MOVBZreg x)
    657 
    658 // H - there are more combinations than these
    659 
    660 (MOVHZreg y:(MOVHZreg _)) -> y // repeat
    661 (MOVHZreg y:(MOVBZreg _)) -> y // wide of narrow
    662 
    663 (MOVHreg y:(MOVHreg _)) -> y // repeat
    664 (MOVHreg y:(MOVBreg _)) -> y // wide of narrow
    665 
    666 (MOVHreg y:(MOVHZreg x)) -> (MOVHreg x)
    667 (MOVHZreg y:(MOVHreg x)) -> (MOVHZreg x)
    668 
    669 // W - there are more combinations than these
    670 
    671 (MOVWZreg y:(MOVWZreg _)) -> y // repeat
    672 (MOVWZreg y:(MOVHZreg _)) -> y // wide of narrow
    673 (MOVWZreg y:(MOVBZreg _)) -> y // wide of narrow
    674 
    675 (MOVWreg y:(MOVWreg _)) -> y // repeat
    676 (MOVWreg y:(MOVHreg _)) -> y // wide of narrow
    677 (MOVWreg y:(MOVBreg _)) -> y // wide of narrow
    678 
    679 (MOVWreg y:(MOVWZreg x)) -> (MOVWreg x)
    680 (MOVWZreg y:(MOVWreg x)) -> (MOVWZreg x)
    681 
    682 // Arithmetic constant ops
    683 
    684 (ADD (MOVDconst [c]) x) && is32Bit(c) -> (ADDconst [c] x)
    685 (ADD x (MOVDconst [c])) && is32Bit(c) -> (ADDconst [c] x)
    686 (ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) -> (ADDconst [c+d] x)
    687 (ADDconst [0] x) -> x
    688 (SUB x (MOVDconst [c])) && is32Bit(-c) -> (ADDconst [-c] x)
    689 // TODO deal with subtract-from-const
    690 
    691 (ADDconst [c] (MOVDaddr [d] {sym} x)) -> (MOVDaddr [c+d] {sym} x)
    692 
    693 // Fold offsets for stores.
    694 (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVDstore [off1+off2] {sym} x val mem)
    695 (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} x val mem)
    696 (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVHstore [off1+off2] {sym} x val mem)
    697 (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} x val mem)
    698 
    699 (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(off1+off2) -> (FMOVSstore [off1+off2] {sym} ptr val mem)
    700 (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(off1+off2) -> (FMOVDstore [off1+off2] {sym} ptr val mem)
    701 
    702 (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
    703         (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
    704 (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
    705         (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
    706 (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
    707         (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
    708 (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
    709         (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
    710 
    711 (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
    712         (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
    713 (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
    714         (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
    715 
    716 (MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
    717         (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
    718 (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
    719         (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
    720 (MOVHZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
    721         (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
    722 (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
    723         (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
    724 (MOVWZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
    725         (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
    726 (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
    727         (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
    728 (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
    729         (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
    730 (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
    731         (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
    732 
    733 // Fold offsets for loads.
    734 (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(off1+off2) -> (FMOVSload [off1+off2] {sym} ptr mem)
    735 (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(off1+off2) -> (FMOVDload [off1+off2] {sym} ptr mem)
    736 
    737 (MOVDload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVDload [off1+off2] {sym} x mem)
    738 (MOVWload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVWload [off1+off2] {sym} x mem)
    739 (MOVWZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVWZload [off1+off2] {sym} x mem)
    740 (MOVHload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVHload [off1+off2] {sym} x mem)
    741 (MOVHZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVHZload [off1+off2] {sym} x mem)
    742 (MOVBZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVBZload [off1+off2] {sym} x mem)
    743 
    744 // Store of zero -> storezero
    745 (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVDstorezero [off] {sym} ptr mem)
    746 (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVWstorezero [off] {sym} ptr mem)
    747 (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVHstorezero [off] {sym} ptr mem)
    748 (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVBstorezero [off] {sym} ptr mem)
    749 
    750 // Fold offsets for storezero
    751 (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
    752     (MOVDstorezero [off1+off2] {sym} x mem)
    753 (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
    754     (MOVWstorezero [off1+off2] {sym} x mem)
    755 (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
    756     (MOVHstorezero [off1+off2] {sym} x mem)
    757 (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
    758     (MOVBstorezero [off1+off2] {sym} x mem)
    759 
    760 // Fold symbols into storezero
    761 (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) ->
    762     (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
    763 (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) ->
    764     (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
    765 (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) ->
    766     (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
    767 (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) ->
    768     (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
    769 
    770 // Lowering extension
    771 // Note: we always extend to 64 bits even though some ops don't need that many result bits.
    772 (SignExt8to16  x) -> (MOVBreg x)
    773 (SignExt8to32  x) -> (MOVBreg x)
    774 (SignExt8to64  x) -> (MOVBreg x)
    775 (SignExt16to32 x) -> (MOVHreg x)
    776 (SignExt16to64 x) -> (MOVHreg x)
    777 (SignExt32to64 x) -> (MOVWreg x)
    778 
    779 (ZeroExt8to16  x) -> (MOVBZreg x)
    780 (ZeroExt8to32  x) -> (MOVBZreg x)
    781 (ZeroExt8to64  x) -> (MOVBZreg x)
    782 (ZeroExt16to32 x) -> (MOVHZreg x)
    783 (ZeroExt16to64 x) -> (MOVHZreg x)
    784 (ZeroExt32to64 x) -> (MOVWZreg x)
    785 
    786 (Trunc16to8  x) -> (MOVBreg x)
    787 (Trunc32to8  x) -> (MOVBreg x)
    788 (Trunc32to16 x) -> (MOVHreg x)
    789 (Trunc64to8  x) -> (MOVBreg x)
    790 (Trunc64to16 x) -> (MOVHreg x)
    791 (Trunc64to32 x) -> (MOVWreg x)
    792 
    793 (Slicemask <t> x) -> (XORconst [-1] (SRADconst <t> (ADDconst <t> x [-1]) [63]))
    794 
    795 // Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
    796 // This may interact with other patterns in the future. (Compare with arm64)
    797 (MOVBZreg x:(MOVBZload _ _))  -> x
    798 (MOVHZreg x:(MOVHZload _ _))  -> x
    799 (MOVHreg x:(MOVHload _ _))  -> x
    800 
    801 (MOVBZreg (MOVDconst [c]))  -> (MOVDconst [int64(uint8(c))])
    802 (MOVBreg (MOVDconst [c]))  -> (MOVDconst [int64(int8(c))])
    803 (MOVHZreg (MOVDconst [c]))  -> (MOVDconst [int64(uint16(c))])
    804 (MOVHreg (MOVDconst [c]))  -> (MOVDconst [int64(int16(c))])
    805 
    806 // Lose widening ops fed to to stores
    807 (MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
    808 (MOVBstore [off] {sym} ptr (MOVBZreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
    809 (MOVHstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
    810 (MOVHstore [off] {sym} ptr (MOVHZreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
    811 (MOVWstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
    812 (MOVWstore [off] {sym} ptr (MOVWZreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
    813 
    814 // Lose W-widening ops fed to compare-W
    815 (CMPW x (MOVWreg y)) -> (CMPW x y)
    816 (CMPW (MOVWreg x) y) -> (CMPW x y)
    817 (CMPWU x (MOVWZreg y)) -> (CMPWU x y)
    818 (CMPWU (MOVWZreg x) y) -> (CMPWU x y)
    819 
    820 (CMP x (MOVDconst [c])) && is16Bit(c) -> (CMPconst x [c])
    821 (CMP (MOVDconst [c]) y) && is16Bit(c) -> (InvertFlags (CMPconst y [c]))
    822 (CMPW x (MOVDconst [c])) && is16Bit(c) -> (CMPWconst x [c])
    823 (CMPW (MOVDconst [c]) y) && is16Bit(c) -> (InvertFlags (CMPWconst y [c]))
    824 
    825 (CMPU x (MOVDconst [c])) && isU16Bit(c) -> (CMPUconst x [c])
    826 (CMPU (MOVDconst [c]) y) && isU16Bit(c) -> (InvertFlags (CMPUconst y [c]))
    827 (CMPWU x (MOVDconst [c])) && isU16Bit(c) -> (CMPWUconst x [c])
    828 (CMPWU (MOVDconst [c]) y) && isU16Bit(c) -> (InvertFlags (CMPWUconst y [c]))
    829 
    830 // A particular pattern seen in cgo code:
    831 (AND (MOVDconst [c]) x:(MOVBZload _ _)) -> (ANDconst [c&0xFF] x)
    832 (AND x:(MOVBZload _ _) (MOVDconst [c])) -> (ANDconst [c&0xFF] x)
    833