Home | History | Annotate | Download | only in ssa
      1 // autogenerated from gen/AMD64.rules: do not edit!
      2 // generated with: cd gen; go run *.go
      3 
      4 package ssa
      5 
      6 import "math"
      7 
      8 var _ = math.MinInt8 // in case not otherwise used
      9 func rewriteValueAMD64(v *Value, config *Config) bool {
     10 	switch v.Op {
     11 	case OpAMD64ADDL:
     12 		return rewriteValueAMD64_OpAMD64ADDL(v, config)
     13 	case OpAMD64ADDLconst:
     14 		return rewriteValueAMD64_OpAMD64ADDLconst(v, config)
     15 	case OpAMD64ADDQ:
     16 		return rewriteValueAMD64_OpAMD64ADDQ(v, config)
     17 	case OpAMD64ADDQconst:
     18 		return rewriteValueAMD64_OpAMD64ADDQconst(v, config)
     19 	case OpAMD64ANDL:
     20 		return rewriteValueAMD64_OpAMD64ANDL(v, config)
     21 	case OpAMD64ANDLconst:
     22 		return rewriteValueAMD64_OpAMD64ANDLconst(v, config)
     23 	case OpAMD64ANDQ:
     24 		return rewriteValueAMD64_OpAMD64ANDQ(v, config)
     25 	case OpAMD64ANDQconst:
     26 		return rewriteValueAMD64_OpAMD64ANDQconst(v, config)
     27 	case OpAMD64CMPB:
     28 		return rewriteValueAMD64_OpAMD64CMPB(v, config)
     29 	case OpAMD64CMPBconst:
     30 		return rewriteValueAMD64_OpAMD64CMPBconst(v, config)
     31 	case OpAMD64CMPL:
     32 		return rewriteValueAMD64_OpAMD64CMPL(v, config)
     33 	case OpAMD64CMPLconst:
     34 		return rewriteValueAMD64_OpAMD64CMPLconst(v, config)
     35 	case OpAMD64CMPQ:
     36 		return rewriteValueAMD64_OpAMD64CMPQ(v, config)
     37 	case OpAMD64CMPQconst:
     38 		return rewriteValueAMD64_OpAMD64CMPQconst(v, config)
     39 	case OpAMD64CMPW:
     40 		return rewriteValueAMD64_OpAMD64CMPW(v, config)
     41 	case OpAMD64CMPWconst:
     42 		return rewriteValueAMD64_OpAMD64CMPWconst(v, config)
     43 	case OpAMD64CMPXCHGLlock:
     44 		return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v, config)
     45 	case OpAMD64CMPXCHGQlock:
     46 		return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v, config)
     47 	case OpAMD64LEAL:
     48 		return rewriteValueAMD64_OpAMD64LEAL(v, config)
     49 	case OpAMD64LEAQ:
     50 		return rewriteValueAMD64_OpAMD64LEAQ(v, config)
     51 	case OpAMD64LEAQ1:
     52 		return rewriteValueAMD64_OpAMD64LEAQ1(v, config)
     53 	case OpAMD64LEAQ2:
     54 		return rewriteValueAMD64_OpAMD64LEAQ2(v, config)
     55 	case OpAMD64LEAQ4:
     56 		return rewriteValueAMD64_OpAMD64LEAQ4(v, config)
     57 	case OpAMD64LEAQ8:
     58 		return rewriteValueAMD64_OpAMD64LEAQ8(v, config)
     59 	case OpAMD64MOVBQSX:
     60 		return rewriteValueAMD64_OpAMD64MOVBQSX(v, config)
     61 	case OpAMD64MOVBQSXload:
     62 		return rewriteValueAMD64_OpAMD64MOVBQSXload(v, config)
     63 	case OpAMD64MOVBQZX:
     64 		return rewriteValueAMD64_OpAMD64MOVBQZX(v, config)
     65 	case OpAMD64MOVBload:
     66 		return rewriteValueAMD64_OpAMD64MOVBload(v, config)
     67 	case OpAMD64MOVBloadidx1:
     68 		return rewriteValueAMD64_OpAMD64MOVBloadidx1(v, config)
     69 	case OpAMD64MOVBstore:
     70 		return rewriteValueAMD64_OpAMD64MOVBstore(v, config)
     71 	case OpAMD64MOVBstoreconst:
     72 		return rewriteValueAMD64_OpAMD64MOVBstoreconst(v, config)
     73 	case OpAMD64MOVBstoreconstidx1:
     74 		return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v, config)
     75 	case OpAMD64MOVBstoreidx1:
     76 		return rewriteValueAMD64_OpAMD64MOVBstoreidx1(v, config)
     77 	case OpAMD64MOVLQSX:
     78 		return rewriteValueAMD64_OpAMD64MOVLQSX(v, config)
     79 	case OpAMD64MOVLQSXload:
     80 		return rewriteValueAMD64_OpAMD64MOVLQSXload(v, config)
     81 	case OpAMD64MOVLQZX:
     82 		return rewriteValueAMD64_OpAMD64MOVLQZX(v, config)
     83 	case OpAMD64MOVLatomicload:
     84 		return rewriteValueAMD64_OpAMD64MOVLatomicload(v, config)
     85 	case OpAMD64MOVLload:
     86 		return rewriteValueAMD64_OpAMD64MOVLload(v, config)
     87 	case OpAMD64MOVLloadidx1:
     88 		return rewriteValueAMD64_OpAMD64MOVLloadidx1(v, config)
     89 	case OpAMD64MOVLloadidx4:
     90 		return rewriteValueAMD64_OpAMD64MOVLloadidx4(v, config)
     91 	case OpAMD64MOVLstore:
     92 		return rewriteValueAMD64_OpAMD64MOVLstore(v, config)
     93 	case OpAMD64MOVLstoreconst:
     94 		return rewriteValueAMD64_OpAMD64MOVLstoreconst(v, config)
     95 	case OpAMD64MOVLstoreconstidx1:
     96 		return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v, config)
     97 	case OpAMD64MOVLstoreconstidx4:
     98 		return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v, config)
     99 	case OpAMD64MOVLstoreidx1:
    100 		return rewriteValueAMD64_OpAMD64MOVLstoreidx1(v, config)
    101 	case OpAMD64MOVLstoreidx4:
    102 		return rewriteValueAMD64_OpAMD64MOVLstoreidx4(v, config)
    103 	case OpAMD64MOVOload:
    104 		return rewriteValueAMD64_OpAMD64MOVOload(v, config)
    105 	case OpAMD64MOVOstore:
    106 		return rewriteValueAMD64_OpAMD64MOVOstore(v, config)
    107 	case OpAMD64MOVQatomicload:
    108 		return rewriteValueAMD64_OpAMD64MOVQatomicload(v, config)
    109 	case OpAMD64MOVQload:
    110 		return rewriteValueAMD64_OpAMD64MOVQload(v, config)
    111 	case OpAMD64MOVQloadidx1:
    112 		return rewriteValueAMD64_OpAMD64MOVQloadidx1(v, config)
    113 	case OpAMD64MOVQloadidx8:
    114 		return rewriteValueAMD64_OpAMD64MOVQloadidx8(v, config)
    115 	case OpAMD64MOVQstore:
    116 		return rewriteValueAMD64_OpAMD64MOVQstore(v, config)
    117 	case OpAMD64MOVQstoreconst:
    118 		return rewriteValueAMD64_OpAMD64MOVQstoreconst(v, config)
    119 	case OpAMD64MOVQstoreconstidx1:
    120 		return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v, config)
    121 	case OpAMD64MOVQstoreconstidx8:
    122 		return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v, config)
    123 	case OpAMD64MOVQstoreidx1:
    124 		return rewriteValueAMD64_OpAMD64MOVQstoreidx1(v, config)
    125 	case OpAMD64MOVQstoreidx8:
    126 		return rewriteValueAMD64_OpAMD64MOVQstoreidx8(v, config)
    127 	case OpAMD64MOVSDload:
    128 		return rewriteValueAMD64_OpAMD64MOVSDload(v, config)
    129 	case OpAMD64MOVSDloadidx1:
    130 		return rewriteValueAMD64_OpAMD64MOVSDloadidx1(v, config)
    131 	case OpAMD64MOVSDloadidx8:
    132 		return rewriteValueAMD64_OpAMD64MOVSDloadidx8(v, config)
    133 	case OpAMD64MOVSDstore:
    134 		return rewriteValueAMD64_OpAMD64MOVSDstore(v, config)
    135 	case OpAMD64MOVSDstoreidx1:
    136 		return rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v, config)
    137 	case OpAMD64MOVSDstoreidx8:
    138 		return rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v, config)
    139 	case OpAMD64MOVSSload:
    140 		return rewriteValueAMD64_OpAMD64MOVSSload(v, config)
    141 	case OpAMD64MOVSSloadidx1:
    142 		return rewriteValueAMD64_OpAMD64MOVSSloadidx1(v, config)
    143 	case OpAMD64MOVSSloadidx4:
    144 		return rewriteValueAMD64_OpAMD64MOVSSloadidx4(v, config)
    145 	case OpAMD64MOVSSstore:
    146 		return rewriteValueAMD64_OpAMD64MOVSSstore(v, config)
    147 	case OpAMD64MOVSSstoreidx1:
    148 		return rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v, config)
    149 	case OpAMD64MOVSSstoreidx4:
    150 		return rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v, config)
    151 	case OpAMD64MOVWQSX:
    152 		return rewriteValueAMD64_OpAMD64MOVWQSX(v, config)
    153 	case OpAMD64MOVWQSXload:
    154 		return rewriteValueAMD64_OpAMD64MOVWQSXload(v, config)
    155 	case OpAMD64MOVWQZX:
    156 		return rewriteValueAMD64_OpAMD64MOVWQZX(v, config)
    157 	case OpAMD64MOVWload:
    158 		return rewriteValueAMD64_OpAMD64MOVWload(v, config)
    159 	case OpAMD64MOVWloadidx1:
    160 		return rewriteValueAMD64_OpAMD64MOVWloadidx1(v, config)
    161 	case OpAMD64MOVWloadidx2:
    162 		return rewriteValueAMD64_OpAMD64MOVWloadidx2(v, config)
    163 	case OpAMD64MOVWstore:
    164 		return rewriteValueAMD64_OpAMD64MOVWstore(v, config)
    165 	case OpAMD64MOVWstoreconst:
    166 		return rewriteValueAMD64_OpAMD64MOVWstoreconst(v, config)
    167 	case OpAMD64MOVWstoreconstidx1:
    168 		return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v, config)
    169 	case OpAMD64MOVWstoreconstidx2:
    170 		return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v, config)
    171 	case OpAMD64MOVWstoreidx1:
    172 		return rewriteValueAMD64_OpAMD64MOVWstoreidx1(v, config)
    173 	case OpAMD64MOVWstoreidx2:
    174 		return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v, config)
    175 	case OpAMD64MULL:
    176 		return rewriteValueAMD64_OpAMD64MULL(v, config)
    177 	case OpAMD64MULLconst:
    178 		return rewriteValueAMD64_OpAMD64MULLconst(v, config)
    179 	case OpAMD64MULQ:
    180 		return rewriteValueAMD64_OpAMD64MULQ(v, config)
    181 	case OpAMD64MULQconst:
    182 		return rewriteValueAMD64_OpAMD64MULQconst(v, config)
    183 	case OpAMD64NEGL:
    184 		return rewriteValueAMD64_OpAMD64NEGL(v, config)
    185 	case OpAMD64NEGQ:
    186 		return rewriteValueAMD64_OpAMD64NEGQ(v, config)
    187 	case OpAMD64NOTL:
    188 		return rewriteValueAMD64_OpAMD64NOTL(v, config)
    189 	case OpAMD64NOTQ:
    190 		return rewriteValueAMD64_OpAMD64NOTQ(v, config)
    191 	case OpAMD64ORL:
    192 		return rewriteValueAMD64_OpAMD64ORL(v, config)
    193 	case OpAMD64ORLconst:
    194 		return rewriteValueAMD64_OpAMD64ORLconst(v, config)
    195 	case OpAMD64ORQ:
    196 		return rewriteValueAMD64_OpAMD64ORQ(v, config)
    197 	case OpAMD64ORQconst:
    198 		return rewriteValueAMD64_OpAMD64ORQconst(v, config)
    199 	case OpAMD64ROLBconst:
    200 		return rewriteValueAMD64_OpAMD64ROLBconst(v, config)
    201 	case OpAMD64ROLLconst:
    202 		return rewriteValueAMD64_OpAMD64ROLLconst(v, config)
    203 	case OpAMD64ROLQconst:
    204 		return rewriteValueAMD64_OpAMD64ROLQconst(v, config)
    205 	case OpAMD64ROLWconst:
    206 		return rewriteValueAMD64_OpAMD64ROLWconst(v, config)
    207 	case OpAMD64SARB:
    208 		return rewriteValueAMD64_OpAMD64SARB(v, config)
    209 	case OpAMD64SARBconst:
    210 		return rewriteValueAMD64_OpAMD64SARBconst(v, config)
    211 	case OpAMD64SARL:
    212 		return rewriteValueAMD64_OpAMD64SARL(v, config)
    213 	case OpAMD64SARLconst:
    214 		return rewriteValueAMD64_OpAMD64SARLconst(v, config)
    215 	case OpAMD64SARQ:
    216 		return rewriteValueAMD64_OpAMD64SARQ(v, config)
    217 	case OpAMD64SARQconst:
    218 		return rewriteValueAMD64_OpAMD64SARQconst(v, config)
    219 	case OpAMD64SARW:
    220 		return rewriteValueAMD64_OpAMD64SARW(v, config)
    221 	case OpAMD64SARWconst:
    222 		return rewriteValueAMD64_OpAMD64SARWconst(v, config)
    223 	case OpAMD64SBBLcarrymask:
    224 		return rewriteValueAMD64_OpAMD64SBBLcarrymask(v, config)
    225 	case OpAMD64SBBQcarrymask:
    226 		return rewriteValueAMD64_OpAMD64SBBQcarrymask(v, config)
    227 	case OpAMD64SETA:
    228 		return rewriteValueAMD64_OpAMD64SETA(v, config)
    229 	case OpAMD64SETAE:
    230 		return rewriteValueAMD64_OpAMD64SETAE(v, config)
    231 	case OpAMD64SETB:
    232 		return rewriteValueAMD64_OpAMD64SETB(v, config)
    233 	case OpAMD64SETBE:
    234 		return rewriteValueAMD64_OpAMD64SETBE(v, config)
    235 	case OpAMD64SETEQ:
    236 		return rewriteValueAMD64_OpAMD64SETEQ(v, config)
    237 	case OpAMD64SETG:
    238 		return rewriteValueAMD64_OpAMD64SETG(v, config)
    239 	case OpAMD64SETGE:
    240 		return rewriteValueAMD64_OpAMD64SETGE(v, config)
    241 	case OpAMD64SETL:
    242 		return rewriteValueAMD64_OpAMD64SETL(v, config)
    243 	case OpAMD64SETLE:
    244 		return rewriteValueAMD64_OpAMD64SETLE(v, config)
    245 	case OpAMD64SETNE:
    246 		return rewriteValueAMD64_OpAMD64SETNE(v, config)
    247 	case OpAMD64SHLL:
    248 		return rewriteValueAMD64_OpAMD64SHLL(v, config)
    249 	case OpAMD64SHLQ:
    250 		return rewriteValueAMD64_OpAMD64SHLQ(v, config)
    251 	case OpAMD64SHRB:
    252 		return rewriteValueAMD64_OpAMD64SHRB(v, config)
    253 	case OpAMD64SHRL:
    254 		return rewriteValueAMD64_OpAMD64SHRL(v, config)
    255 	case OpAMD64SHRQ:
    256 		return rewriteValueAMD64_OpAMD64SHRQ(v, config)
    257 	case OpAMD64SHRW:
    258 		return rewriteValueAMD64_OpAMD64SHRW(v, config)
    259 	case OpAMD64SUBL:
    260 		return rewriteValueAMD64_OpAMD64SUBL(v, config)
    261 	case OpAMD64SUBLconst:
    262 		return rewriteValueAMD64_OpAMD64SUBLconst(v, config)
    263 	case OpAMD64SUBQ:
    264 		return rewriteValueAMD64_OpAMD64SUBQ(v, config)
    265 	case OpAMD64SUBQconst:
    266 		return rewriteValueAMD64_OpAMD64SUBQconst(v, config)
    267 	case OpAMD64XADDLlock:
    268 		return rewriteValueAMD64_OpAMD64XADDLlock(v, config)
    269 	case OpAMD64XADDQlock:
    270 		return rewriteValueAMD64_OpAMD64XADDQlock(v, config)
    271 	case OpAMD64XCHGL:
    272 		return rewriteValueAMD64_OpAMD64XCHGL(v, config)
    273 	case OpAMD64XCHGQ:
    274 		return rewriteValueAMD64_OpAMD64XCHGQ(v, config)
    275 	case OpAMD64XORL:
    276 		return rewriteValueAMD64_OpAMD64XORL(v, config)
    277 	case OpAMD64XORLconst:
    278 		return rewriteValueAMD64_OpAMD64XORLconst(v, config)
    279 	case OpAMD64XORQ:
    280 		return rewriteValueAMD64_OpAMD64XORQ(v, config)
    281 	case OpAMD64XORQconst:
    282 		return rewriteValueAMD64_OpAMD64XORQconst(v, config)
    283 	case OpAdd16:
    284 		return rewriteValueAMD64_OpAdd16(v, config)
    285 	case OpAdd32:
    286 		return rewriteValueAMD64_OpAdd32(v, config)
    287 	case OpAdd32F:
    288 		return rewriteValueAMD64_OpAdd32F(v, config)
    289 	case OpAdd64:
    290 		return rewriteValueAMD64_OpAdd64(v, config)
    291 	case OpAdd64F:
    292 		return rewriteValueAMD64_OpAdd64F(v, config)
    293 	case OpAdd8:
    294 		return rewriteValueAMD64_OpAdd8(v, config)
    295 	case OpAddPtr:
    296 		return rewriteValueAMD64_OpAddPtr(v, config)
    297 	case OpAddr:
    298 		return rewriteValueAMD64_OpAddr(v, config)
    299 	case OpAnd16:
    300 		return rewriteValueAMD64_OpAnd16(v, config)
    301 	case OpAnd32:
    302 		return rewriteValueAMD64_OpAnd32(v, config)
    303 	case OpAnd64:
    304 		return rewriteValueAMD64_OpAnd64(v, config)
    305 	case OpAnd8:
    306 		return rewriteValueAMD64_OpAnd8(v, config)
    307 	case OpAndB:
    308 		return rewriteValueAMD64_OpAndB(v, config)
    309 	case OpAtomicAdd32:
    310 		return rewriteValueAMD64_OpAtomicAdd32(v, config)
    311 	case OpAtomicAdd64:
    312 		return rewriteValueAMD64_OpAtomicAdd64(v, config)
    313 	case OpAtomicAnd8:
    314 		return rewriteValueAMD64_OpAtomicAnd8(v, config)
    315 	case OpAtomicCompareAndSwap32:
    316 		return rewriteValueAMD64_OpAtomicCompareAndSwap32(v, config)
    317 	case OpAtomicCompareAndSwap64:
    318 		return rewriteValueAMD64_OpAtomicCompareAndSwap64(v, config)
    319 	case OpAtomicExchange32:
    320 		return rewriteValueAMD64_OpAtomicExchange32(v, config)
    321 	case OpAtomicExchange64:
    322 		return rewriteValueAMD64_OpAtomicExchange64(v, config)
    323 	case OpAtomicLoad32:
    324 		return rewriteValueAMD64_OpAtomicLoad32(v, config)
    325 	case OpAtomicLoad64:
    326 		return rewriteValueAMD64_OpAtomicLoad64(v, config)
    327 	case OpAtomicLoadPtr:
    328 		return rewriteValueAMD64_OpAtomicLoadPtr(v, config)
    329 	case OpAtomicOr8:
    330 		return rewriteValueAMD64_OpAtomicOr8(v, config)
    331 	case OpAtomicStore32:
    332 		return rewriteValueAMD64_OpAtomicStore32(v, config)
    333 	case OpAtomicStore64:
    334 		return rewriteValueAMD64_OpAtomicStore64(v, config)
    335 	case OpAtomicStorePtrNoWB:
    336 		return rewriteValueAMD64_OpAtomicStorePtrNoWB(v, config)
    337 	case OpAvg64u:
    338 		return rewriteValueAMD64_OpAvg64u(v, config)
    339 	case OpBswap32:
    340 		return rewriteValueAMD64_OpBswap32(v, config)
    341 	case OpBswap64:
    342 		return rewriteValueAMD64_OpBswap64(v, config)
    343 	case OpClosureCall:
    344 		return rewriteValueAMD64_OpClosureCall(v, config)
    345 	case OpCom16:
    346 		return rewriteValueAMD64_OpCom16(v, config)
    347 	case OpCom32:
    348 		return rewriteValueAMD64_OpCom32(v, config)
    349 	case OpCom64:
    350 		return rewriteValueAMD64_OpCom64(v, config)
    351 	case OpCom8:
    352 		return rewriteValueAMD64_OpCom8(v, config)
    353 	case OpConst16:
    354 		return rewriteValueAMD64_OpConst16(v, config)
    355 	case OpConst32:
    356 		return rewriteValueAMD64_OpConst32(v, config)
    357 	case OpConst32F:
    358 		return rewriteValueAMD64_OpConst32F(v, config)
    359 	case OpConst64:
    360 		return rewriteValueAMD64_OpConst64(v, config)
    361 	case OpConst64F:
    362 		return rewriteValueAMD64_OpConst64F(v, config)
    363 	case OpConst8:
    364 		return rewriteValueAMD64_OpConst8(v, config)
    365 	case OpConstBool:
    366 		return rewriteValueAMD64_OpConstBool(v, config)
    367 	case OpConstNil:
    368 		return rewriteValueAMD64_OpConstNil(v, config)
    369 	case OpConvert:
    370 		return rewriteValueAMD64_OpConvert(v, config)
    371 	case OpCtz32:
    372 		return rewriteValueAMD64_OpCtz32(v, config)
    373 	case OpCtz64:
    374 		return rewriteValueAMD64_OpCtz64(v, config)
    375 	case OpCvt32Fto32:
    376 		return rewriteValueAMD64_OpCvt32Fto32(v, config)
    377 	case OpCvt32Fto64:
    378 		return rewriteValueAMD64_OpCvt32Fto64(v, config)
    379 	case OpCvt32Fto64F:
    380 		return rewriteValueAMD64_OpCvt32Fto64F(v, config)
    381 	case OpCvt32to32F:
    382 		return rewriteValueAMD64_OpCvt32to32F(v, config)
    383 	case OpCvt32to64F:
    384 		return rewriteValueAMD64_OpCvt32to64F(v, config)
    385 	case OpCvt64Fto32:
    386 		return rewriteValueAMD64_OpCvt64Fto32(v, config)
    387 	case OpCvt64Fto32F:
    388 		return rewriteValueAMD64_OpCvt64Fto32F(v, config)
    389 	case OpCvt64Fto64:
    390 		return rewriteValueAMD64_OpCvt64Fto64(v, config)
    391 	case OpCvt64to32F:
    392 		return rewriteValueAMD64_OpCvt64to32F(v, config)
    393 	case OpCvt64to64F:
    394 		return rewriteValueAMD64_OpCvt64to64F(v, config)
    395 	case OpDeferCall:
    396 		return rewriteValueAMD64_OpDeferCall(v, config)
    397 	case OpDiv128u:
    398 		return rewriteValueAMD64_OpDiv128u(v, config)
    399 	case OpDiv16:
    400 		return rewriteValueAMD64_OpDiv16(v, config)
    401 	case OpDiv16u:
    402 		return rewriteValueAMD64_OpDiv16u(v, config)
    403 	case OpDiv32:
    404 		return rewriteValueAMD64_OpDiv32(v, config)
    405 	case OpDiv32F:
    406 		return rewriteValueAMD64_OpDiv32F(v, config)
    407 	case OpDiv32u:
    408 		return rewriteValueAMD64_OpDiv32u(v, config)
    409 	case OpDiv64:
    410 		return rewriteValueAMD64_OpDiv64(v, config)
    411 	case OpDiv64F:
    412 		return rewriteValueAMD64_OpDiv64F(v, config)
    413 	case OpDiv64u:
    414 		return rewriteValueAMD64_OpDiv64u(v, config)
    415 	case OpDiv8:
    416 		return rewriteValueAMD64_OpDiv8(v, config)
    417 	case OpDiv8u:
    418 		return rewriteValueAMD64_OpDiv8u(v, config)
    419 	case OpEq16:
    420 		return rewriteValueAMD64_OpEq16(v, config)
    421 	case OpEq32:
    422 		return rewriteValueAMD64_OpEq32(v, config)
    423 	case OpEq32F:
    424 		return rewriteValueAMD64_OpEq32F(v, config)
    425 	case OpEq64:
    426 		return rewriteValueAMD64_OpEq64(v, config)
    427 	case OpEq64F:
    428 		return rewriteValueAMD64_OpEq64F(v, config)
    429 	case OpEq8:
    430 		return rewriteValueAMD64_OpEq8(v, config)
    431 	case OpEqB:
    432 		return rewriteValueAMD64_OpEqB(v, config)
    433 	case OpEqPtr:
    434 		return rewriteValueAMD64_OpEqPtr(v, config)
    435 	case OpGeq16:
    436 		return rewriteValueAMD64_OpGeq16(v, config)
    437 	case OpGeq16U:
    438 		return rewriteValueAMD64_OpGeq16U(v, config)
    439 	case OpGeq32:
    440 		return rewriteValueAMD64_OpGeq32(v, config)
    441 	case OpGeq32F:
    442 		return rewriteValueAMD64_OpGeq32F(v, config)
    443 	case OpGeq32U:
    444 		return rewriteValueAMD64_OpGeq32U(v, config)
    445 	case OpGeq64:
    446 		return rewriteValueAMD64_OpGeq64(v, config)
    447 	case OpGeq64F:
    448 		return rewriteValueAMD64_OpGeq64F(v, config)
    449 	case OpGeq64U:
    450 		return rewriteValueAMD64_OpGeq64U(v, config)
    451 	case OpGeq8:
    452 		return rewriteValueAMD64_OpGeq8(v, config)
    453 	case OpGeq8U:
    454 		return rewriteValueAMD64_OpGeq8U(v, config)
    455 	case OpGetClosurePtr:
    456 		return rewriteValueAMD64_OpGetClosurePtr(v, config)
    457 	case OpGetG:
    458 		return rewriteValueAMD64_OpGetG(v, config)
    459 	case OpGoCall:
    460 		return rewriteValueAMD64_OpGoCall(v, config)
    461 	case OpGreater16:
    462 		return rewriteValueAMD64_OpGreater16(v, config)
    463 	case OpGreater16U:
    464 		return rewriteValueAMD64_OpGreater16U(v, config)
    465 	case OpGreater32:
    466 		return rewriteValueAMD64_OpGreater32(v, config)
    467 	case OpGreater32F:
    468 		return rewriteValueAMD64_OpGreater32F(v, config)
    469 	case OpGreater32U:
    470 		return rewriteValueAMD64_OpGreater32U(v, config)
    471 	case OpGreater64:
    472 		return rewriteValueAMD64_OpGreater64(v, config)
    473 	case OpGreater64F:
    474 		return rewriteValueAMD64_OpGreater64F(v, config)
    475 	case OpGreater64U:
    476 		return rewriteValueAMD64_OpGreater64U(v, config)
    477 	case OpGreater8:
    478 		return rewriteValueAMD64_OpGreater8(v, config)
    479 	case OpGreater8U:
    480 		return rewriteValueAMD64_OpGreater8U(v, config)
    481 	case OpHmul16:
    482 		return rewriteValueAMD64_OpHmul16(v, config)
    483 	case OpHmul16u:
    484 		return rewriteValueAMD64_OpHmul16u(v, config)
    485 	case OpHmul32:
    486 		return rewriteValueAMD64_OpHmul32(v, config)
    487 	case OpHmul32u:
    488 		return rewriteValueAMD64_OpHmul32u(v, config)
    489 	case OpHmul64:
    490 		return rewriteValueAMD64_OpHmul64(v, config)
    491 	case OpHmul64u:
    492 		return rewriteValueAMD64_OpHmul64u(v, config)
    493 	case OpHmul8:
    494 		return rewriteValueAMD64_OpHmul8(v, config)
    495 	case OpHmul8u:
    496 		return rewriteValueAMD64_OpHmul8u(v, config)
    497 	case OpInt64Hi:
    498 		return rewriteValueAMD64_OpInt64Hi(v, config)
    499 	case OpInterCall:
    500 		return rewriteValueAMD64_OpInterCall(v, config)
    501 	case OpIsInBounds:
    502 		return rewriteValueAMD64_OpIsInBounds(v, config)
    503 	case OpIsNonNil:
    504 		return rewriteValueAMD64_OpIsNonNil(v, config)
    505 	case OpIsSliceInBounds:
    506 		return rewriteValueAMD64_OpIsSliceInBounds(v, config)
    507 	case OpLeq16:
    508 		return rewriteValueAMD64_OpLeq16(v, config)
    509 	case OpLeq16U:
    510 		return rewriteValueAMD64_OpLeq16U(v, config)
    511 	case OpLeq32:
    512 		return rewriteValueAMD64_OpLeq32(v, config)
    513 	case OpLeq32F:
    514 		return rewriteValueAMD64_OpLeq32F(v, config)
    515 	case OpLeq32U:
    516 		return rewriteValueAMD64_OpLeq32U(v, config)
    517 	case OpLeq64:
    518 		return rewriteValueAMD64_OpLeq64(v, config)
    519 	case OpLeq64F:
    520 		return rewriteValueAMD64_OpLeq64F(v, config)
    521 	case OpLeq64U:
    522 		return rewriteValueAMD64_OpLeq64U(v, config)
    523 	case OpLeq8:
    524 		return rewriteValueAMD64_OpLeq8(v, config)
    525 	case OpLeq8U:
    526 		return rewriteValueAMD64_OpLeq8U(v, config)
    527 	case OpLess16:
    528 		return rewriteValueAMD64_OpLess16(v, config)
    529 	case OpLess16U:
    530 		return rewriteValueAMD64_OpLess16U(v, config)
    531 	case OpLess32:
    532 		return rewriteValueAMD64_OpLess32(v, config)
    533 	case OpLess32F:
    534 		return rewriteValueAMD64_OpLess32F(v, config)
    535 	case OpLess32U:
    536 		return rewriteValueAMD64_OpLess32U(v, config)
    537 	case OpLess64:
    538 		return rewriteValueAMD64_OpLess64(v, config)
    539 	case OpLess64F:
    540 		return rewriteValueAMD64_OpLess64F(v, config)
    541 	case OpLess64U:
    542 		return rewriteValueAMD64_OpLess64U(v, config)
    543 	case OpLess8:
    544 		return rewriteValueAMD64_OpLess8(v, config)
    545 	case OpLess8U:
    546 		return rewriteValueAMD64_OpLess8U(v, config)
    547 	case OpLoad:
    548 		return rewriteValueAMD64_OpLoad(v, config)
    549 	case OpLrot16:
    550 		return rewriteValueAMD64_OpLrot16(v, config)
    551 	case OpLrot32:
    552 		return rewriteValueAMD64_OpLrot32(v, config)
    553 	case OpLrot64:
    554 		return rewriteValueAMD64_OpLrot64(v, config)
    555 	case OpLrot8:
    556 		return rewriteValueAMD64_OpLrot8(v, config)
    557 	case OpLsh16x16:
    558 		return rewriteValueAMD64_OpLsh16x16(v, config)
    559 	case OpLsh16x32:
    560 		return rewriteValueAMD64_OpLsh16x32(v, config)
    561 	case OpLsh16x64:
    562 		return rewriteValueAMD64_OpLsh16x64(v, config)
    563 	case OpLsh16x8:
    564 		return rewriteValueAMD64_OpLsh16x8(v, config)
    565 	case OpLsh32x16:
    566 		return rewriteValueAMD64_OpLsh32x16(v, config)
    567 	case OpLsh32x32:
    568 		return rewriteValueAMD64_OpLsh32x32(v, config)
    569 	case OpLsh32x64:
    570 		return rewriteValueAMD64_OpLsh32x64(v, config)
    571 	case OpLsh32x8:
    572 		return rewriteValueAMD64_OpLsh32x8(v, config)
    573 	case OpLsh64x16:
    574 		return rewriteValueAMD64_OpLsh64x16(v, config)
    575 	case OpLsh64x32:
    576 		return rewriteValueAMD64_OpLsh64x32(v, config)
    577 	case OpLsh64x64:
    578 		return rewriteValueAMD64_OpLsh64x64(v, config)
    579 	case OpLsh64x8:
    580 		return rewriteValueAMD64_OpLsh64x8(v, config)
    581 	case OpLsh8x16:
    582 		return rewriteValueAMD64_OpLsh8x16(v, config)
    583 	case OpLsh8x32:
    584 		return rewriteValueAMD64_OpLsh8x32(v, config)
    585 	case OpLsh8x64:
    586 		return rewriteValueAMD64_OpLsh8x64(v, config)
    587 	case OpLsh8x8:
    588 		return rewriteValueAMD64_OpLsh8x8(v, config)
    589 	case OpMod16:
    590 		return rewriteValueAMD64_OpMod16(v, config)
    591 	case OpMod16u:
    592 		return rewriteValueAMD64_OpMod16u(v, config)
    593 	case OpMod32:
    594 		return rewriteValueAMD64_OpMod32(v, config)
    595 	case OpMod32u:
    596 		return rewriteValueAMD64_OpMod32u(v, config)
    597 	case OpMod64:
    598 		return rewriteValueAMD64_OpMod64(v, config)
    599 	case OpMod64u:
    600 		return rewriteValueAMD64_OpMod64u(v, config)
    601 	case OpMod8:
    602 		return rewriteValueAMD64_OpMod8(v, config)
    603 	case OpMod8u:
    604 		return rewriteValueAMD64_OpMod8u(v, config)
    605 	case OpMove:
    606 		return rewriteValueAMD64_OpMove(v, config)
    607 	case OpMul16:
    608 		return rewriteValueAMD64_OpMul16(v, config)
    609 	case OpMul32:
    610 		return rewriteValueAMD64_OpMul32(v, config)
    611 	case OpMul32F:
    612 		return rewriteValueAMD64_OpMul32F(v, config)
    613 	case OpMul64:
    614 		return rewriteValueAMD64_OpMul64(v, config)
    615 	case OpMul64F:
    616 		return rewriteValueAMD64_OpMul64F(v, config)
    617 	case OpMul64uhilo:
    618 		return rewriteValueAMD64_OpMul64uhilo(v, config)
    619 	case OpMul8:
    620 		return rewriteValueAMD64_OpMul8(v, config)
    621 	case OpNeg16:
    622 		return rewriteValueAMD64_OpNeg16(v, config)
    623 	case OpNeg32:
    624 		return rewriteValueAMD64_OpNeg32(v, config)
    625 	case OpNeg32F:
    626 		return rewriteValueAMD64_OpNeg32F(v, config)
    627 	case OpNeg64:
    628 		return rewriteValueAMD64_OpNeg64(v, config)
    629 	case OpNeg64F:
    630 		return rewriteValueAMD64_OpNeg64F(v, config)
    631 	case OpNeg8:
    632 		return rewriteValueAMD64_OpNeg8(v, config)
    633 	case OpNeq16:
    634 		return rewriteValueAMD64_OpNeq16(v, config)
    635 	case OpNeq32:
    636 		return rewriteValueAMD64_OpNeq32(v, config)
    637 	case OpNeq32F:
    638 		return rewriteValueAMD64_OpNeq32F(v, config)
    639 	case OpNeq64:
    640 		return rewriteValueAMD64_OpNeq64(v, config)
    641 	case OpNeq64F:
    642 		return rewriteValueAMD64_OpNeq64F(v, config)
    643 	case OpNeq8:
    644 		return rewriteValueAMD64_OpNeq8(v, config)
    645 	case OpNeqB:
    646 		return rewriteValueAMD64_OpNeqB(v, config)
    647 	case OpNeqPtr:
    648 		return rewriteValueAMD64_OpNeqPtr(v, config)
    649 	case OpNilCheck:
    650 		return rewriteValueAMD64_OpNilCheck(v, config)
    651 	case OpNot:
    652 		return rewriteValueAMD64_OpNot(v, config)
    653 	case OpOffPtr:
    654 		return rewriteValueAMD64_OpOffPtr(v, config)
    655 	case OpOr16:
    656 		return rewriteValueAMD64_OpOr16(v, config)
    657 	case OpOr32:
    658 		return rewriteValueAMD64_OpOr32(v, config)
    659 	case OpOr64:
    660 		return rewriteValueAMD64_OpOr64(v, config)
    661 	case OpOr8:
    662 		return rewriteValueAMD64_OpOr8(v, config)
    663 	case OpOrB:
    664 		return rewriteValueAMD64_OpOrB(v, config)
    665 	case OpRsh16Ux16:
    666 		return rewriteValueAMD64_OpRsh16Ux16(v, config)
    667 	case OpRsh16Ux32:
    668 		return rewriteValueAMD64_OpRsh16Ux32(v, config)
    669 	case OpRsh16Ux64:
    670 		return rewriteValueAMD64_OpRsh16Ux64(v, config)
    671 	case OpRsh16Ux8:
    672 		return rewriteValueAMD64_OpRsh16Ux8(v, config)
    673 	case OpRsh16x16:
    674 		return rewriteValueAMD64_OpRsh16x16(v, config)
    675 	case OpRsh16x32:
    676 		return rewriteValueAMD64_OpRsh16x32(v, config)
    677 	case OpRsh16x64:
    678 		return rewriteValueAMD64_OpRsh16x64(v, config)
    679 	case OpRsh16x8:
    680 		return rewriteValueAMD64_OpRsh16x8(v, config)
    681 	case OpRsh32Ux16:
    682 		return rewriteValueAMD64_OpRsh32Ux16(v, config)
    683 	case OpRsh32Ux32:
    684 		return rewriteValueAMD64_OpRsh32Ux32(v, config)
    685 	case OpRsh32Ux64:
    686 		return rewriteValueAMD64_OpRsh32Ux64(v, config)
    687 	case OpRsh32Ux8:
    688 		return rewriteValueAMD64_OpRsh32Ux8(v, config)
    689 	case OpRsh32x16:
    690 		return rewriteValueAMD64_OpRsh32x16(v, config)
    691 	case OpRsh32x32:
    692 		return rewriteValueAMD64_OpRsh32x32(v, config)
    693 	case OpRsh32x64:
    694 		return rewriteValueAMD64_OpRsh32x64(v, config)
    695 	case OpRsh32x8:
    696 		return rewriteValueAMD64_OpRsh32x8(v, config)
    697 	case OpRsh64Ux16:
    698 		return rewriteValueAMD64_OpRsh64Ux16(v, config)
    699 	case OpRsh64Ux32:
    700 		return rewriteValueAMD64_OpRsh64Ux32(v, config)
    701 	case OpRsh64Ux64:
    702 		return rewriteValueAMD64_OpRsh64Ux64(v, config)
    703 	case OpRsh64Ux8:
    704 		return rewriteValueAMD64_OpRsh64Ux8(v, config)
    705 	case OpRsh64x16:
    706 		return rewriteValueAMD64_OpRsh64x16(v, config)
    707 	case OpRsh64x32:
    708 		return rewriteValueAMD64_OpRsh64x32(v, config)
    709 	case OpRsh64x64:
    710 		return rewriteValueAMD64_OpRsh64x64(v, config)
    711 	case OpRsh64x8:
    712 		return rewriteValueAMD64_OpRsh64x8(v, config)
    713 	case OpRsh8Ux16:
    714 		return rewriteValueAMD64_OpRsh8Ux16(v, config)
    715 	case OpRsh8Ux32:
    716 		return rewriteValueAMD64_OpRsh8Ux32(v, config)
    717 	case OpRsh8Ux64:
    718 		return rewriteValueAMD64_OpRsh8Ux64(v, config)
    719 	case OpRsh8Ux8:
    720 		return rewriteValueAMD64_OpRsh8Ux8(v, config)
    721 	case OpRsh8x16:
    722 		return rewriteValueAMD64_OpRsh8x16(v, config)
    723 	case OpRsh8x32:
    724 		return rewriteValueAMD64_OpRsh8x32(v, config)
    725 	case OpRsh8x64:
    726 		return rewriteValueAMD64_OpRsh8x64(v, config)
    727 	case OpRsh8x8:
    728 		return rewriteValueAMD64_OpRsh8x8(v, config)
    729 	case OpSelect0:
    730 		return rewriteValueAMD64_OpSelect0(v, config)
    731 	case OpSelect1:
    732 		return rewriteValueAMD64_OpSelect1(v, config)
    733 	case OpSignExt16to32:
    734 		return rewriteValueAMD64_OpSignExt16to32(v, config)
    735 	case OpSignExt16to64:
    736 		return rewriteValueAMD64_OpSignExt16to64(v, config)
    737 	case OpSignExt32to64:
    738 		return rewriteValueAMD64_OpSignExt32to64(v, config)
    739 	case OpSignExt8to16:
    740 		return rewriteValueAMD64_OpSignExt8to16(v, config)
    741 	case OpSignExt8to32:
    742 		return rewriteValueAMD64_OpSignExt8to32(v, config)
    743 	case OpSignExt8to64:
    744 		return rewriteValueAMD64_OpSignExt8to64(v, config)
    745 	case OpSlicemask:
    746 		return rewriteValueAMD64_OpSlicemask(v, config)
    747 	case OpSqrt:
    748 		return rewriteValueAMD64_OpSqrt(v, config)
    749 	case OpStaticCall:
    750 		return rewriteValueAMD64_OpStaticCall(v, config)
    751 	case OpStore:
    752 		return rewriteValueAMD64_OpStore(v, config)
    753 	case OpSub16:
    754 		return rewriteValueAMD64_OpSub16(v, config)
    755 	case OpSub32:
    756 		return rewriteValueAMD64_OpSub32(v, config)
    757 	case OpSub32F:
    758 		return rewriteValueAMD64_OpSub32F(v, config)
    759 	case OpSub64:
    760 		return rewriteValueAMD64_OpSub64(v, config)
    761 	case OpSub64F:
    762 		return rewriteValueAMD64_OpSub64F(v, config)
    763 	case OpSub8:
    764 		return rewriteValueAMD64_OpSub8(v, config)
    765 	case OpSubPtr:
    766 		return rewriteValueAMD64_OpSubPtr(v, config)
    767 	case OpTrunc16to8:
    768 		return rewriteValueAMD64_OpTrunc16to8(v, config)
    769 	case OpTrunc32to16:
    770 		return rewriteValueAMD64_OpTrunc32to16(v, config)
    771 	case OpTrunc32to8:
    772 		return rewriteValueAMD64_OpTrunc32to8(v, config)
    773 	case OpTrunc64to16:
    774 		return rewriteValueAMD64_OpTrunc64to16(v, config)
    775 	case OpTrunc64to32:
    776 		return rewriteValueAMD64_OpTrunc64to32(v, config)
    777 	case OpTrunc64to8:
    778 		return rewriteValueAMD64_OpTrunc64to8(v, config)
    779 	case OpXor16:
    780 		return rewriteValueAMD64_OpXor16(v, config)
    781 	case OpXor32:
    782 		return rewriteValueAMD64_OpXor32(v, config)
    783 	case OpXor64:
    784 		return rewriteValueAMD64_OpXor64(v, config)
    785 	case OpXor8:
    786 		return rewriteValueAMD64_OpXor8(v, config)
    787 	case OpZero:
    788 		return rewriteValueAMD64_OpZero(v, config)
    789 	case OpZeroExt16to32:
    790 		return rewriteValueAMD64_OpZeroExt16to32(v, config)
    791 	case OpZeroExt16to64:
    792 		return rewriteValueAMD64_OpZeroExt16to64(v, config)
    793 	case OpZeroExt32to64:
    794 		return rewriteValueAMD64_OpZeroExt32to64(v, config)
    795 	case OpZeroExt8to16:
    796 		return rewriteValueAMD64_OpZeroExt8to16(v, config)
    797 	case OpZeroExt8to32:
    798 		return rewriteValueAMD64_OpZeroExt8to32(v, config)
    799 	case OpZeroExt8to64:
    800 		return rewriteValueAMD64_OpZeroExt8to64(v, config)
    801 	}
    802 	return false
    803 }
    804 func rewriteValueAMD64_OpAMD64ADDL(v *Value, config *Config) bool {
    805 	b := v.Block
    806 	_ = b
    807 	// match: (ADDL x (MOVLconst [c]))
    808 	// cond:
    809 	// result: (ADDLconst [c] x)
    810 	for {
    811 		x := v.Args[0]
    812 		v_1 := v.Args[1]
    813 		if v_1.Op != OpAMD64MOVLconst {
    814 			break
    815 		}
    816 		c := v_1.AuxInt
    817 		v.reset(OpAMD64ADDLconst)
    818 		v.AuxInt = c
    819 		v.AddArg(x)
    820 		return true
    821 	}
    822 	// match: (ADDL (MOVLconst [c]) x)
    823 	// cond:
    824 	// result: (ADDLconst [c] x)
    825 	for {
    826 		v_0 := v.Args[0]
    827 		if v_0.Op != OpAMD64MOVLconst {
    828 			break
    829 		}
    830 		c := v_0.AuxInt
    831 		x := v.Args[1]
    832 		v.reset(OpAMD64ADDLconst)
    833 		v.AuxInt = c
    834 		v.AddArg(x)
    835 		return true
    836 	}
    837 	// match: (ADDL x (NEGL y))
    838 	// cond:
    839 	// result: (SUBL x y)
    840 	for {
    841 		x := v.Args[0]
    842 		v_1 := v.Args[1]
    843 		if v_1.Op != OpAMD64NEGL {
    844 			break
    845 		}
    846 		y := v_1.Args[0]
    847 		v.reset(OpAMD64SUBL)
    848 		v.AddArg(x)
    849 		v.AddArg(y)
    850 		return true
    851 	}
    852 	return false
    853 }
    854 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value, config *Config) bool {
    855 	b := v.Block
    856 	_ = b
    857 	// match: (ADDLconst [c] x)
    858 	// cond: int32(c)==0
    859 	// result: x
    860 	for {
    861 		c := v.AuxInt
    862 		x := v.Args[0]
    863 		if !(int32(c) == 0) {
    864 			break
    865 		}
    866 		v.reset(OpCopy)
    867 		v.Type = x.Type
    868 		v.AddArg(x)
    869 		return true
    870 	}
    871 	// match: (ADDLconst [c] (MOVLconst [d]))
    872 	// cond:
    873 	// result: (MOVLconst [int64(int32(c+d))])
    874 	for {
    875 		c := v.AuxInt
    876 		v_0 := v.Args[0]
    877 		if v_0.Op != OpAMD64MOVLconst {
    878 			break
    879 		}
    880 		d := v_0.AuxInt
    881 		v.reset(OpAMD64MOVLconst)
    882 		v.AuxInt = int64(int32(c + d))
    883 		return true
    884 	}
    885 	// match: (ADDLconst [c] (ADDLconst [d] x))
    886 	// cond:
    887 	// result: (ADDLconst [int64(int32(c+d))] x)
    888 	for {
    889 		c := v.AuxInt
    890 		v_0 := v.Args[0]
    891 		if v_0.Op != OpAMD64ADDLconst {
    892 			break
    893 		}
    894 		d := v_0.AuxInt
    895 		x := v_0.Args[0]
    896 		v.reset(OpAMD64ADDLconst)
    897 		v.AuxInt = int64(int32(c + d))
    898 		v.AddArg(x)
    899 		return true
    900 	}
    901 	// match: (ADDLconst [c] (LEAL [d] {s} x))
    902 	// cond: is32Bit(c+d)
    903 	// result: (LEAL [c+d] {s} x)
    904 	for {
    905 		c := v.AuxInt
    906 		v_0 := v.Args[0]
    907 		if v_0.Op != OpAMD64LEAL {
    908 			break
    909 		}
    910 		d := v_0.AuxInt
    911 		s := v_0.Aux
    912 		x := v_0.Args[0]
    913 		if !(is32Bit(c + d)) {
    914 			break
    915 		}
    916 		v.reset(OpAMD64LEAL)
    917 		v.AuxInt = c + d
    918 		v.Aux = s
    919 		v.AddArg(x)
    920 		return true
    921 	}
    922 	return false
    923 }
    924 func rewriteValueAMD64_OpAMD64ADDQ(v *Value, config *Config) bool {
    925 	b := v.Block
    926 	_ = b
    927 	// match: (ADDQ x (MOVQconst [c]))
    928 	// cond: is32Bit(c)
    929 	// result: (ADDQconst [c] x)
    930 	for {
    931 		x := v.Args[0]
    932 		v_1 := v.Args[1]
    933 		if v_1.Op != OpAMD64MOVQconst {
    934 			break
    935 		}
    936 		c := v_1.AuxInt
    937 		if !(is32Bit(c)) {
    938 			break
    939 		}
    940 		v.reset(OpAMD64ADDQconst)
    941 		v.AuxInt = c
    942 		v.AddArg(x)
    943 		return true
    944 	}
    945 	// match: (ADDQ (MOVQconst [c]) x)
    946 	// cond: is32Bit(c)
    947 	// result: (ADDQconst [c] x)
    948 	for {
    949 		v_0 := v.Args[0]
    950 		if v_0.Op != OpAMD64MOVQconst {
    951 			break
    952 		}
    953 		c := v_0.AuxInt
    954 		x := v.Args[1]
    955 		if !(is32Bit(c)) {
    956 			break
    957 		}
    958 		v.reset(OpAMD64ADDQconst)
    959 		v.AuxInt = c
    960 		v.AddArg(x)
    961 		return true
    962 	}
    963 	// match: (ADDQ x (SHLQconst [3] y))
    964 	// cond:
    965 	// result: (LEAQ8 x y)
    966 	for {
    967 		x := v.Args[0]
    968 		v_1 := v.Args[1]
    969 		if v_1.Op != OpAMD64SHLQconst {
    970 			break
    971 		}
    972 		if v_1.AuxInt != 3 {
    973 			break
    974 		}
    975 		y := v_1.Args[0]
    976 		v.reset(OpAMD64LEAQ8)
    977 		v.AddArg(x)
    978 		v.AddArg(y)
    979 		return true
    980 	}
    981 	// match: (ADDQ x (SHLQconst [2] y))
    982 	// cond:
    983 	// result: (LEAQ4 x y)
    984 	for {
    985 		x := v.Args[0]
    986 		v_1 := v.Args[1]
    987 		if v_1.Op != OpAMD64SHLQconst {
    988 			break
    989 		}
    990 		if v_1.AuxInt != 2 {
    991 			break
    992 		}
    993 		y := v_1.Args[0]
    994 		v.reset(OpAMD64LEAQ4)
    995 		v.AddArg(x)
    996 		v.AddArg(y)
    997 		return true
    998 	}
    999 	// match: (ADDQ x (SHLQconst [1] y))
   1000 	// cond:
   1001 	// result: (LEAQ2 x y)
   1002 	for {
   1003 		x := v.Args[0]
   1004 		v_1 := v.Args[1]
   1005 		if v_1.Op != OpAMD64SHLQconst {
   1006 			break
   1007 		}
   1008 		if v_1.AuxInt != 1 {
   1009 			break
   1010 		}
   1011 		y := v_1.Args[0]
   1012 		v.reset(OpAMD64LEAQ2)
   1013 		v.AddArg(x)
   1014 		v.AddArg(y)
   1015 		return true
   1016 	}
   1017 	// match: (ADDQ x (ADDQ y y))
   1018 	// cond:
   1019 	// result: (LEAQ2 x y)
   1020 	for {
   1021 		x := v.Args[0]
   1022 		v_1 := v.Args[1]
   1023 		if v_1.Op != OpAMD64ADDQ {
   1024 			break
   1025 		}
   1026 		y := v_1.Args[0]
   1027 		if y != v_1.Args[1] {
   1028 			break
   1029 		}
   1030 		v.reset(OpAMD64LEAQ2)
   1031 		v.AddArg(x)
   1032 		v.AddArg(y)
   1033 		return true
   1034 	}
   1035 	// match: (ADDQ x (ADDQ x y))
   1036 	// cond:
   1037 	// result: (LEAQ2 y x)
   1038 	for {
   1039 		x := v.Args[0]
   1040 		v_1 := v.Args[1]
   1041 		if v_1.Op != OpAMD64ADDQ {
   1042 			break
   1043 		}
   1044 		if x != v_1.Args[0] {
   1045 			break
   1046 		}
   1047 		y := v_1.Args[1]
   1048 		v.reset(OpAMD64LEAQ2)
   1049 		v.AddArg(y)
   1050 		v.AddArg(x)
   1051 		return true
   1052 	}
   1053 	// match: (ADDQ x (ADDQ y x))
   1054 	// cond:
   1055 	// result: (LEAQ2 y x)
   1056 	for {
   1057 		x := v.Args[0]
   1058 		v_1 := v.Args[1]
   1059 		if v_1.Op != OpAMD64ADDQ {
   1060 			break
   1061 		}
   1062 		y := v_1.Args[0]
   1063 		if x != v_1.Args[1] {
   1064 			break
   1065 		}
   1066 		v.reset(OpAMD64LEAQ2)
   1067 		v.AddArg(y)
   1068 		v.AddArg(x)
   1069 		return true
   1070 	}
   1071 	// match: (ADDQ (ADDQconst [c] x) y)
   1072 	// cond:
   1073 	// result: (LEAQ1 [c] x y)
   1074 	for {
   1075 		v_0 := v.Args[0]
   1076 		if v_0.Op != OpAMD64ADDQconst {
   1077 			break
   1078 		}
   1079 		c := v_0.AuxInt
   1080 		x := v_0.Args[0]
   1081 		y := v.Args[1]
   1082 		v.reset(OpAMD64LEAQ1)
   1083 		v.AuxInt = c
   1084 		v.AddArg(x)
   1085 		v.AddArg(y)
   1086 		return true
   1087 	}
   1088 	// match: (ADDQ x (ADDQconst [c] y))
   1089 	// cond:
   1090 	// result: (LEAQ1 [c] x y)
   1091 	for {
   1092 		x := v.Args[0]
   1093 		v_1 := v.Args[1]
   1094 		if v_1.Op != OpAMD64ADDQconst {
   1095 			break
   1096 		}
   1097 		c := v_1.AuxInt
   1098 		y := v_1.Args[0]
   1099 		v.reset(OpAMD64LEAQ1)
   1100 		v.AuxInt = c
   1101 		v.AddArg(x)
   1102 		v.AddArg(y)
   1103 		return true
   1104 	}
   1105 	// match: (ADDQ x (LEAQ [c] {s} y))
   1106 	// cond: x.Op != OpSB && y.Op != OpSB
   1107 	// result: (LEAQ1 [c] {s} x y)
   1108 	for {
   1109 		x := v.Args[0]
   1110 		v_1 := v.Args[1]
   1111 		if v_1.Op != OpAMD64LEAQ {
   1112 			break
   1113 		}
   1114 		c := v_1.AuxInt
   1115 		s := v_1.Aux
   1116 		y := v_1.Args[0]
   1117 		if !(x.Op != OpSB && y.Op != OpSB) {
   1118 			break
   1119 		}
   1120 		v.reset(OpAMD64LEAQ1)
   1121 		v.AuxInt = c
   1122 		v.Aux = s
   1123 		v.AddArg(x)
   1124 		v.AddArg(y)
   1125 		return true
   1126 	}
   1127 	// match: (ADDQ (LEAQ [c] {s} x) y)
   1128 	// cond: x.Op != OpSB && y.Op != OpSB
   1129 	// result: (LEAQ1 [c] {s} x y)
   1130 	for {
   1131 		v_0 := v.Args[0]
   1132 		if v_0.Op != OpAMD64LEAQ {
   1133 			break
   1134 		}
   1135 		c := v_0.AuxInt
   1136 		s := v_0.Aux
   1137 		x := v_0.Args[0]
   1138 		y := v.Args[1]
   1139 		if !(x.Op != OpSB && y.Op != OpSB) {
   1140 			break
   1141 		}
   1142 		v.reset(OpAMD64LEAQ1)
   1143 		v.AuxInt = c
   1144 		v.Aux = s
   1145 		v.AddArg(x)
   1146 		v.AddArg(y)
   1147 		return true
   1148 	}
   1149 	// match: (ADDQ x (NEGQ y))
   1150 	// cond:
   1151 	// result: (SUBQ x y)
   1152 	for {
   1153 		x := v.Args[0]
   1154 		v_1 := v.Args[1]
   1155 		if v_1.Op != OpAMD64NEGQ {
   1156 			break
   1157 		}
   1158 		y := v_1.Args[0]
   1159 		v.reset(OpAMD64SUBQ)
   1160 		v.AddArg(x)
   1161 		v.AddArg(y)
   1162 		return true
   1163 	}
   1164 	return false
   1165 }
   1166 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool {
   1167 	b := v.Block
   1168 	_ = b
   1169 	// match: (ADDQconst [c] (ADDQ x y))
   1170 	// cond:
   1171 	// result: (LEAQ1 [c] x y)
   1172 	for {
   1173 		c := v.AuxInt
   1174 		v_0 := v.Args[0]
   1175 		if v_0.Op != OpAMD64ADDQ {
   1176 			break
   1177 		}
   1178 		x := v_0.Args[0]
   1179 		y := v_0.Args[1]
   1180 		v.reset(OpAMD64LEAQ1)
   1181 		v.AuxInt = c
   1182 		v.AddArg(x)
   1183 		v.AddArg(y)
   1184 		return true
   1185 	}
   1186 	// match: (ADDQconst [c] (LEAQ [d] {s} x))
   1187 	// cond: is32Bit(c+d)
   1188 	// result: (LEAQ [c+d] {s} x)
   1189 	for {
   1190 		c := v.AuxInt
   1191 		v_0 := v.Args[0]
   1192 		if v_0.Op != OpAMD64LEAQ {
   1193 			break
   1194 		}
   1195 		d := v_0.AuxInt
   1196 		s := v_0.Aux
   1197 		x := v_0.Args[0]
   1198 		if !(is32Bit(c + d)) {
   1199 			break
   1200 		}
   1201 		v.reset(OpAMD64LEAQ)
   1202 		v.AuxInt = c + d
   1203 		v.Aux = s
   1204 		v.AddArg(x)
   1205 		return true
   1206 	}
   1207 	// match: (ADDQconst [c] (LEAQ1 [d] {s} x y))
   1208 	// cond: is32Bit(c+d)
   1209 	// result: (LEAQ1 [c+d] {s} x y)
   1210 	for {
   1211 		c := v.AuxInt
   1212 		v_0 := v.Args[0]
   1213 		if v_0.Op != OpAMD64LEAQ1 {
   1214 			break
   1215 		}
   1216 		d := v_0.AuxInt
   1217 		s := v_0.Aux
   1218 		x := v_0.Args[0]
   1219 		y := v_0.Args[1]
   1220 		if !(is32Bit(c + d)) {
   1221 			break
   1222 		}
   1223 		v.reset(OpAMD64LEAQ1)
   1224 		v.AuxInt = c + d
   1225 		v.Aux = s
   1226 		v.AddArg(x)
   1227 		v.AddArg(y)
   1228 		return true
   1229 	}
   1230 	// match: (ADDQconst [c] (LEAQ2 [d] {s} x y))
   1231 	// cond: is32Bit(c+d)
   1232 	// result: (LEAQ2 [c+d] {s} x y)
   1233 	for {
   1234 		c := v.AuxInt
   1235 		v_0 := v.Args[0]
   1236 		if v_0.Op != OpAMD64LEAQ2 {
   1237 			break
   1238 		}
   1239 		d := v_0.AuxInt
   1240 		s := v_0.Aux
   1241 		x := v_0.Args[0]
   1242 		y := v_0.Args[1]
   1243 		if !(is32Bit(c + d)) {
   1244 			break
   1245 		}
   1246 		v.reset(OpAMD64LEAQ2)
   1247 		v.AuxInt = c + d
   1248 		v.Aux = s
   1249 		v.AddArg(x)
   1250 		v.AddArg(y)
   1251 		return true
   1252 	}
   1253 	// match: (ADDQconst [c] (LEAQ4 [d] {s} x y))
   1254 	// cond: is32Bit(c+d)
   1255 	// result: (LEAQ4 [c+d] {s} x y)
   1256 	for {
   1257 		c := v.AuxInt
   1258 		v_0 := v.Args[0]
   1259 		if v_0.Op != OpAMD64LEAQ4 {
   1260 			break
   1261 		}
   1262 		d := v_0.AuxInt
   1263 		s := v_0.Aux
   1264 		x := v_0.Args[0]
   1265 		y := v_0.Args[1]
   1266 		if !(is32Bit(c + d)) {
   1267 			break
   1268 		}
   1269 		v.reset(OpAMD64LEAQ4)
   1270 		v.AuxInt = c + d
   1271 		v.Aux = s
   1272 		v.AddArg(x)
   1273 		v.AddArg(y)
   1274 		return true
   1275 	}
   1276 	// match: (ADDQconst [c] (LEAQ8 [d] {s} x y))
   1277 	// cond: is32Bit(c+d)
   1278 	// result: (LEAQ8 [c+d] {s} x y)
   1279 	for {
   1280 		c := v.AuxInt
   1281 		v_0 := v.Args[0]
   1282 		if v_0.Op != OpAMD64LEAQ8 {
   1283 			break
   1284 		}
   1285 		d := v_0.AuxInt
   1286 		s := v_0.Aux
   1287 		x := v_0.Args[0]
   1288 		y := v_0.Args[1]
   1289 		if !(is32Bit(c + d)) {
   1290 			break
   1291 		}
   1292 		v.reset(OpAMD64LEAQ8)
   1293 		v.AuxInt = c + d
   1294 		v.Aux = s
   1295 		v.AddArg(x)
   1296 		v.AddArg(y)
   1297 		return true
   1298 	}
   1299 	// match: (ADDQconst [0] x)
   1300 	// cond:
   1301 	// result: x
   1302 	for {
   1303 		if v.AuxInt != 0 {
   1304 			break
   1305 		}
   1306 		x := v.Args[0]
   1307 		v.reset(OpCopy)
   1308 		v.Type = x.Type
   1309 		v.AddArg(x)
   1310 		return true
   1311 	}
   1312 	// match: (ADDQconst [c] (MOVQconst [d]))
   1313 	// cond:
   1314 	// result: (MOVQconst [c+d])
   1315 	for {
   1316 		c := v.AuxInt
   1317 		v_0 := v.Args[0]
   1318 		if v_0.Op != OpAMD64MOVQconst {
   1319 			break
   1320 		}
   1321 		d := v_0.AuxInt
   1322 		v.reset(OpAMD64MOVQconst)
   1323 		v.AuxInt = c + d
   1324 		return true
   1325 	}
   1326 	// match: (ADDQconst [c] (ADDQconst [d] x))
   1327 	// cond: is32Bit(c+d)
   1328 	// result: (ADDQconst [c+d] x)
   1329 	for {
   1330 		c := v.AuxInt
   1331 		v_0 := v.Args[0]
   1332 		if v_0.Op != OpAMD64ADDQconst {
   1333 			break
   1334 		}
   1335 		d := v_0.AuxInt
   1336 		x := v_0.Args[0]
   1337 		if !(is32Bit(c + d)) {
   1338 			break
   1339 		}
   1340 		v.reset(OpAMD64ADDQconst)
   1341 		v.AuxInt = c + d
   1342 		v.AddArg(x)
   1343 		return true
   1344 	}
   1345 	return false
   1346 }
   1347 func rewriteValueAMD64_OpAMD64ANDL(v *Value, config *Config) bool {
   1348 	b := v.Block
   1349 	_ = b
   1350 	// match: (ANDL x (MOVLconst [c]))
   1351 	// cond:
   1352 	// result: (ANDLconst [c] x)
   1353 	for {
   1354 		x := v.Args[0]
   1355 		v_1 := v.Args[1]
   1356 		if v_1.Op != OpAMD64MOVLconst {
   1357 			break
   1358 		}
   1359 		c := v_1.AuxInt
   1360 		v.reset(OpAMD64ANDLconst)
   1361 		v.AuxInt = c
   1362 		v.AddArg(x)
   1363 		return true
   1364 	}
   1365 	// match: (ANDL (MOVLconst [c]) x)
   1366 	// cond:
   1367 	// result: (ANDLconst [c] x)
   1368 	for {
   1369 		v_0 := v.Args[0]
   1370 		if v_0.Op != OpAMD64MOVLconst {
   1371 			break
   1372 		}
   1373 		c := v_0.AuxInt
   1374 		x := v.Args[1]
   1375 		v.reset(OpAMD64ANDLconst)
   1376 		v.AuxInt = c
   1377 		v.AddArg(x)
   1378 		return true
   1379 	}
   1380 	// match: (ANDL x x)
   1381 	// cond:
   1382 	// result: x
   1383 	for {
   1384 		x := v.Args[0]
   1385 		if x != v.Args[1] {
   1386 			break
   1387 		}
   1388 		v.reset(OpCopy)
   1389 		v.Type = x.Type
   1390 		v.AddArg(x)
   1391 		return true
   1392 	}
   1393 	return false
   1394 }
   1395 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value, config *Config) bool {
   1396 	b := v.Block
   1397 	_ = b
   1398 	// match: (ANDLconst [c] (ANDLconst [d] x))
   1399 	// cond:
   1400 	// result: (ANDLconst [c & d] x)
   1401 	for {
   1402 		c := v.AuxInt
   1403 		v_0 := v.Args[0]
   1404 		if v_0.Op != OpAMD64ANDLconst {
   1405 			break
   1406 		}
   1407 		d := v_0.AuxInt
   1408 		x := v_0.Args[0]
   1409 		v.reset(OpAMD64ANDLconst)
   1410 		v.AuxInt = c & d
   1411 		v.AddArg(x)
   1412 		return true
   1413 	}
   1414 	// match: (ANDLconst [0xFF] x)
   1415 	// cond:
   1416 	// result: (MOVBQZX x)
   1417 	for {
   1418 		if v.AuxInt != 0xFF {
   1419 			break
   1420 		}
   1421 		x := v.Args[0]
   1422 		v.reset(OpAMD64MOVBQZX)
   1423 		v.AddArg(x)
   1424 		return true
   1425 	}
   1426 	// match: (ANDLconst [0xFFFF] x)
   1427 	// cond:
   1428 	// result: (MOVWQZX x)
   1429 	for {
   1430 		if v.AuxInt != 0xFFFF {
   1431 			break
   1432 		}
   1433 		x := v.Args[0]
   1434 		v.reset(OpAMD64MOVWQZX)
   1435 		v.AddArg(x)
   1436 		return true
   1437 	}
   1438 	// match: (ANDLconst [c] _)
   1439 	// cond: int32(c)==0
   1440 	// result: (MOVLconst [0])
   1441 	for {
   1442 		c := v.AuxInt
   1443 		if !(int32(c) == 0) {
   1444 			break
   1445 		}
   1446 		v.reset(OpAMD64MOVLconst)
   1447 		v.AuxInt = 0
   1448 		return true
   1449 	}
   1450 	// match: (ANDLconst [c] x)
   1451 	// cond: int32(c)==-1
   1452 	// result: x
   1453 	for {
   1454 		c := v.AuxInt
   1455 		x := v.Args[0]
   1456 		if !(int32(c) == -1) {
   1457 			break
   1458 		}
   1459 		v.reset(OpCopy)
   1460 		v.Type = x.Type
   1461 		v.AddArg(x)
   1462 		return true
   1463 	}
   1464 	// match: (ANDLconst [c] (MOVLconst [d]))
   1465 	// cond:
   1466 	// result: (MOVLconst [c&d])
   1467 	for {
   1468 		c := v.AuxInt
   1469 		v_0 := v.Args[0]
   1470 		if v_0.Op != OpAMD64MOVLconst {
   1471 			break
   1472 		}
   1473 		d := v_0.AuxInt
   1474 		v.reset(OpAMD64MOVLconst)
   1475 		v.AuxInt = c & d
   1476 		return true
   1477 	}
   1478 	return false
   1479 }
   1480 func rewriteValueAMD64_OpAMD64ANDQ(v *Value, config *Config) bool {
   1481 	b := v.Block
   1482 	_ = b
   1483 	// match: (ANDQ x (MOVQconst [c]))
   1484 	// cond: is32Bit(c)
   1485 	// result: (ANDQconst [c] x)
   1486 	for {
   1487 		x := v.Args[0]
   1488 		v_1 := v.Args[1]
   1489 		if v_1.Op != OpAMD64MOVQconst {
   1490 			break
   1491 		}
   1492 		c := v_1.AuxInt
   1493 		if !(is32Bit(c)) {
   1494 			break
   1495 		}
   1496 		v.reset(OpAMD64ANDQconst)
   1497 		v.AuxInt = c
   1498 		v.AddArg(x)
   1499 		return true
   1500 	}
   1501 	// match: (ANDQ (MOVQconst [c]) x)
   1502 	// cond: is32Bit(c)
   1503 	// result: (ANDQconst [c] x)
   1504 	for {
   1505 		v_0 := v.Args[0]
   1506 		if v_0.Op != OpAMD64MOVQconst {
   1507 			break
   1508 		}
   1509 		c := v_0.AuxInt
   1510 		x := v.Args[1]
   1511 		if !(is32Bit(c)) {
   1512 			break
   1513 		}
   1514 		v.reset(OpAMD64ANDQconst)
   1515 		v.AuxInt = c
   1516 		v.AddArg(x)
   1517 		return true
   1518 	}
   1519 	// match: (ANDQ x x)
   1520 	// cond:
   1521 	// result: x
   1522 	for {
   1523 		x := v.Args[0]
   1524 		if x != v.Args[1] {
   1525 			break
   1526 		}
   1527 		v.reset(OpCopy)
   1528 		v.Type = x.Type
   1529 		v.AddArg(x)
   1530 		return true
   1531 	}
   1532 	return false
   1533 }
   1534 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value, config *Config) bool {
   1535 	b := v.Block
   1536 	_ = b
   1537 	// match: (ANDQconst [c] (ANDQconst [d] x))
   1538 	// cond:
   1539 	// result: (ANDQconst [c & d] x)
   1540 	for {
   1541 		c := v.AuxInt
   1542 		v_0 := v.Args[0]
   1543 		if v_0.Op != OpAMD64ANDQconst {
   1544 			break
   1545 		}
   1546 		d := v_0.AuxInt
   1547 		x := v_0.Args[0]
   1548 		v.reset(OpAMD64ANDQconst)
   1549 		v.AuxInt = c & d
   1550 		v.AddArg(x)
   1551 		return true
   1552 	}
   1553 	// match: (ANDQconst [0xFF] x)
   1554 	// cond:
   1555 	// result: (MOVBQZX x)
   1556 	for {
   1557 		if v.AuxInt != 0xFF {
   1558 			break
   1559 		}
   1560 		x := v.Args[0]
   1561 		v.reset(OpAMD64MOVBQZX)
   1562 		v.AddArg(x)
   1563 		return true
   1564 	}
   1565 	// match: (ANDQconst [0xFFFF] x)
   1566 	// cond:
   1567 	// result: (MOVWQZX x)
   1568 	for {
   1569 		if v.AuxInt != 0xFFFF {
   1570 			break
   1571 		}
   1572 		x := v.Args[0]
   1573 		v.reset(OpAMD64MOVWQZX)
   1574 		v.AddArg(x)
   1575 		return true
   1576 	}
   1577 	// match: (ANDQconst [0xFFFFFFFF] x)
   1578 	// cond:
   1579 	// result: (MOVLQZX x)
   1580 	for {
   1581 		if v.AuxInt != 0xFFFFFFFF {
   1582 			break
   1583 		}
   1584 		x := v.Args[0]
   1585 		v.reset(OpAMD64MOVLQZX)
   1586 		v.AddArg(x)
   1587 		return true
   1588 	}
   1589 	// match: (ANDQconst [0] _)
   1590 	// cond:
   1591 	// result: (MOVQconst [0])
   1592 	for {
   1593 		if v.AuxInt != 0 {
   1594 			break
   1595 		}
   1596 		v.reset(OpAMD64MOVQconst)
   1597 		v.AuxInt = 0
   1598 		return true
   1599 	}
   1600 	// match: (ANDQconst [-1] x)
   1601 	// cond:
   1602 	// result: x
   1603 	for {
   1604 		if v.AuxInt != -1 {
   1605 			break
   1606 		}
   1607 		x := v.Args[0]
   1608 		v.reset(OpCopy)
   1609 		v.Type = x.Type
   1610 		v.AddArg(x)
   1611 		return true
   1612 	}
   1613 	// match: (ANDQconst [c] (MOVQconst [d]))
   1614 	// cond:
   1615 	// result: (MOVQconst [c&d])
   1616 	for {
   1617 		c := v.AuxInt
   1618 		v_0 := v.Args[0]
   1619 		if v_0.Op != OpAMD64MOVQconst {
   1620 			break
   1621 		}
   1622 		d := v_0.AuxInt
   1623 		v.reset(OpAMD64MOVQconst)
   1624 		v.AuxInt = c & d
   1625 		return true
   1626 	}
   1627 	return false
   1628 }
   1629 func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool {
   1630 	b := v.Block
   1631 	_ = b
   1632 	// match: (CMPB x (MOVLconst [c]))
   1633 	// cond:
   1634 	// result: (CMPBconst x [int64(int8(c))])
   1635 	for {
   1636 		x := v.Args[0]
   1637 		v_1 := v.Args[1]
   1638 		if v_1.Op != OpAMD64MOVLconst {
   1639 			break
   1640 		}
   1641 		c := v_1.AuxInt
   1642 		v.reset(OpAMD64CMPBconst)
   1643 		v.AuxInt = int64(int8(c))
   1644 		v.AddArg(x)
   1645 		return true
   1646 	}
   1647 	// match: (CMPB (MOVLconst [c]) x)
   1648 	// cond:
   1649 	// result: (InvertFlags (CMPBconst x [int64(int8(c))]))
   1650 	for {
   1651 		v_0 := v.Args[0]
   1652 		if v_0.Op != OpAMD64MOVLconst {
   1653 			break
   1654 		}
   1655 		c := v_0.AuxInt
   1656 		x := v.Args[1]
   1657 		v.reset(OpAMD64InvertFlags)
   1658 		v0 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
   1659 		v0.AuxInt = int64(int8(c))
   1660 		v0.AddArg(x)
   1661 		v.AddArg(v0)
   1662 		return true
   1663 	}
   1664 	return false
   1665 }
   1666 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
   1667 	b := v.Block
   1668 	_ = b
   1669 	// match: (CMPBconst (MOVLconst [x]) [y])
   1670 	// cond: int8(x)==int8(y)
   1671 	// result: (FlagEQ)
   1672 	for {
   1673 		y := v.AuxInt
   1674 		v_0 := v.Args[0]
   1675 		if v_0.Op != OpAMD64MOVLconst {
   1676 			break
   1677 		}
   1678 		x := v_0.AuxInt
   1679 		if !(int8(x) == int8(y)) {
   1680 			break
   1681 		}
   1682 		v.reset(OpAMD64FlagEQ)
   1683 		return true
   1684 	}
   1685 	// match: (CMPBconst (MOVLconst [x]) [y])
   1686 	// cond: int8(x)<int8(y) && uint8(x)<uint8(y)
   1687 	// result: (FlagLT_ULT)
   1688 	for {
   1689 		y := v.AuxInt
   1690 		v_0 := v.Args[0]
   1691 		if v_0.Op != OpAMD64MOVLconst {
   1692 			break
   1693 		}
   1694 		x := v_0.AuxInt
   1695 		if !(int8(x) < int8(y) && uint8(x) < uint8(y)) {
   1696 			break
   1697 		}
   1698 		v.reset(OpAMD64FlagLT_ULT)
   1699 		return true
   1700 	}
   1701 	// match: (CMPBconst (MOVLconst [x]) [y])
   1702 	// cond: int8(x)<int8(y) && uint8(x)>uint8(y)
   1703 	// result: (FlagLT_UGT)
   1704 	for {
   1705 		y := v.AuxInt
   1706 		v_0 := v.Args[0]
   1707 		if v_0.Op != OpAMD64MOVLconst {
   1708 			break
   1709 		}
   1710 		x := v_0.AuxInt
   1711 		if !(int8(x) < int8(y) && uint8(x) > uint8(y)) {
   1712 			break
   1713 		}
   1714 		v.reset(OpAMD64FlagLT_UGT)
   1715 		return true
   1716 	}
   1717 	// match: (CMPBconst (MOVLconst [x]) [y])
   1718 	// cond: int8(x)>int8(y) && uint8(x)<uint8(y)
   1719 	// result: (FlagGT_ULT)
   1720 	for {
   1721 		y := v.AuxInt
   1722 		v_0 := v.Args[0]
   1723 		if v_0.Op != OpAMD64MOVLconst {
   1724 			break
   1725 		}
   1726 		x := v_0.AuxInt
   1727 		if !(int8(x) > int8(y) && uint8(x) < uint8(y)) {
   1728 			break
   1729 		}
   1730 		v.reset(OpAMD64FlagGT_ULT)
   1731 		return true
   1732 	}
   1733 	// match: (CMPBconst (MOVLconst [x]) [y])
   1734 	// cond: int8(x)>int8(y) && uint8(x)>uint8(y)
   1735 	// result: (FlagGT_UGT)
   1736 	for {
   1737 		y := v.AuxInt
   1738 		v_0 := v.Args[0]
   1739 		if v_0.Op != OpAMD64MOVLconst {
   1740 			break
   1741 		}
   1742 		x := v_0.AuxInt
   1743 		if !(int8(x) > int8(y) && uint8(x) > uint8(y)) {
   1744 			break
   1745 		}
   1746 		v.reset(OpAMD64FlagGT_UGT)
   1747 		return true
   1748 	}
   1749 	// match: (CMPBconst (ANDLconst _ [m]) [n])
   1750 	// cond: 0 <= int8(m) && int8(m) < int8(n)
   1751 	// result: (FlagLT_ULT)
   1752 	for {
   1753 		n := v.AuxInt
   1754 		v_0 := v.Args[0]
   1755 		if v_0.Op != OpAMD64ANDLconst {
   1756 			break
   1757 		}
   1758 		m := v_0.AuxInt
   1759 		if !(0 <= int8(m) && int8(m) < int8(n)) {
   1760 			break
   1761 		}
   1762 		v.reset(OpAMD64FlagLT_ULT)
   1763 		return true
   1764 	}
   1765 	// match: (CMPBconst (ANDL x y) [0])
   1766 	// cond:
   1767 	// result: (TESTB x y)
   1768 	for {
   1769 		if v.AuxInt != 0 {
   1770 			break
   1771 		}
   1772 		v_0 := v.Args[0]
   1773 		if v_0.Op != OpAMD64ANDL {
   1774 			break
   1775 		}
   1776 		x := v_0.Args[0]
   1777 		y := v_0.Args[1]
   1778 		v.reset(OpAMD64TESTB)
   1779 		v.AddArg(x)
   1780 		v.AddArg(y)
   1781 		return true
   1782 	}
   1783 	// match: (CMPBconst (ANDLconst [c] x) [0])
   1784 	// cond:
   1785 	// result: (TESTBconst [int64(int8(c))] x)
   1786 	for {
   1787 		if v.AuxInt != 0 {
   1788 			break
   1789 		}
   1790 		v_0 := v.Args[0]
   1791 		if v_0.Op != OpAMD64ANDLconst {
   1792 			break
   1793 		}
   1794 		c := v_0.AuxInt
   1795 		x := v_0.Args[0]
   1796 		v.reset(OpAMD64TESTBconst)
   1797 		v.AuxInt = int64(int8(c))
   1798 		v.AddArg(x)
   1799 		return true
   1800 	}
   1801 	// match: (CMPBconst x [0])
   1802 	// cond:
   1803 	// result: (TESTB x x)
   1804 	for {
   1805 		if v.AuxInt != 0 {
   1806 			break
   1807 		}
   1808 		x := v.Args[0]
   1809 		v.reset(OpAMD64TESTB)
   1810 		v.AddArg(x)
   1811 		v.AddArg(x)
   1812 		return true
   1813 	}
   1814 	return false
   1815 }
   1816 func rewriteValueAMD64_OpAMD64CMPL(v *Value, config *Config) bool {
   1817 	b := v.Block
   1818 	_ = b
   1819 	// match: (CMPL x (MOVLconst [c]))
   1820 	// cond:
   1821 	// result: (CMPLconst x [c])
   1822 	for {
   1823 		x := v.Args[0]
   1824 		v_1 := v.Args[1]
   1825 		if v_1.Op != OpAMD64MOVLconst {
   1826 			break
   1827 		}
   1828 		c := v_1.AuxInt
   1829 		v.reset(OpAMD64CMPLconst)
   1830 		v.AuxInt = c
   1831 		v.AddArg(x)
   1832 		return true
   1833 	}
   1834 	// match: (CMPL (MOVLconst [c]) x)
   1835 	// cond:
   1836 	// result: (InvertFlags (CMPLconst x [c]))
   1837 	for {
   1838 		v_0 := v.Args[0]
   1839 		if v_0.Op != OpAMD64MOVLconst {
   1840 			break
   1841 		}
   1842 		c := v_0.AuxInt
   1843 		x := v.Args[1]
   1844 		v.reset(OpAMD64InvertFlags)
   1845 		v0 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
   1846 		v0.AuxInt = c
   1847 		v0.AddArg(x)
   1848 		v.AddArg(v0)
   1849 		return true
   1850 	}
   1851 	return false
   1852 }
   1853 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
   1854 	b := v.Block
   1855 	_ = b
   1856 	// match: (CMPLconst (MOVLconst [x]) [y])
   1857 	// cond: int32(x)==int32(y)
   1858 	// result: (FlagEQ)
   1859 	for {
   1860 		y := v.AuxInt
   1861 		v_0 := v.Args[0]
   1862 		if v_0.Op != OpAMD64MOVLconst {
   1863 			break
   1864 		}
   1865 		x := v_0.AuxInt
   1866 		if !(int32(x) == int32(y)) {
   1867 			break
   1868 		}
   1869 		v.reset(OpAMD64FlagEQ)
   1870 		return true
   1871 	}
   1872 	// match: (CMPLconst (MOVLconst [x]) [y])
   1873 	// cond: int32(x)<int32(y) && uint32(x)<uint32(y)
   1874 	// result: (FlagLT_ULT)
   1875 	for {
   1876 		y := v.AuxInt
   1877 		v_0 := v.Args[0]
   1878 		if v_0.Op != OpAMD64MOVLconst {
   1879 			break
   1880 		}
   1881 		x := v_0.AuxInt
   1882 		if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
   1883 			break
   1884 		}
   1885 		v.reset(OpAMD64FlagLT_ULT)
   1886 		return true
   1887 	}
   1888 	// match: (CMPLconst (MOVLconst [x]) [y])
   1889 	// cond: int32(x)<int32(y) && uint32(x)>uint32(y)
   1890 	// result: (FlagLT_UGT)
   1891 	for {
   1892 		y := v.AuxInt
   1893 		v_0 := v.Args[0]
   1894 		if v_0.Op != OpAMD64MOVLconst {
   1895 			break
   1896 		}
   1897 		x := v_0.AuxInt
   1898 		if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
   1899 			break
   1900 		}
   1901 		v.reset(OpAMD64FlagLT_UGT)
   1902 		return true
   1903 	}
   1904 	// match: (CMPLconst (MOVLconst [x]) [y])
   1905 	// cond: int32(x)>int32(y) && uint32(x)<uint32(y)
   1906 	// result: (FlagGT_ULT)
   1907 	for {
   1908 		y := v.AuxInt
   1909 		v_0 := v.Args[0]
   1910 		if v_0.Op != OpAMD64MOVLconst {
   1911 			break
   1912 		}
   1913 		x := v_0.AuxInt
   1914 		if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
   1915 			break
   1916 		}
   1917 		v.reset(OpAMD64FlagGT_ULT)
   1918 		return true
   1919 	}
   1920 	// match: (CMPLconst (MOVLconst [x]) [y])
   1921 	// cond: int32(x)>int32(y) && uint32(x)>uint32(y)
   1922 	// result: (FlagGT_UGT)
   1923 	for {
   1924 		y := v.AuxInt
   1925 		v_0 := v.Args[0]
   1926 		if v_0.Op != OpAMD64MOVLconst {
   1927 			break
   1928 		}
   1929 		x := v_0.AuxInt
   1930 		if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
   1931 			break
   1932 		}
   1933 		v.reset(OpAMD64FlagGT_UGT)
   1934 		return true
   1935 	}
   1936 	// match: (CMPLconst (SHRLconst _ [c]) [n])
   1937 	// cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
   1938 	// result: (FlagLT_ULT)
   1939 	for {
   1940 		n := v.AuxInt
   1941 		v_0 := v.Args[0]
   1942 		if v_0.Op != OpAMD64SHRLconst {
   1943 			break
   1944 		}
   1945 		c := v_0.AuxInt
   1946 		if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
   1947 			break
   1948 		}
   1949 		v.reset(OpAMD64FlagLT_ULT)
   1950 		return true
   1951 	}
   1952 	// match: (CMPLconst (ANDLconst _ [m]) [n])
   1953 	// cond: 0 <= int32(m) && int32(m) < int32(n)
   1954 	// result: (FlagLT_ULT)
   1955 	for {
   1956 		n := v.AuxInt
   1957 		v_0 := v.Args[0]
   1958 		if v_0.Op != OpAMD64ANDLconst {
   1959 			break
   1960 		}
   1961 		m := v_0.AuxInt
   1962 		if !(0 <= int32(m) && int32(m) < int32(n)) {
   1963 			break
   1964 		}
   1965 		v.reset(OpAMD64FlagLT_ULT)
   1966 		return true
   1967 	}
   1968 	// match: (CMPLconst (ANDL x y) [0])
   1969 	// cond:
   1970 	// result: (TESTL x y)
   1971 	for {
   1972 		if v.AuxInt != 0 {
   1973 			break
   1974 		}
   1975 		v_0 := v.Args[0]
   1976 		if v_0.Op != OpAMD64ANDL {
   1977 			break
   1978 		}
   1979 		x := v_0.Args[0]
   1980 		y := v_0.Args[1]
   1981 		v.reset(OpAMD64TESTL)
   1982 		v.AddArg(x)
   1983 		v.AddArg(y)
   1984 		return true
   1985 	}
   1986 	// match: (CMPLconst (ANDLconst [c] x) [0])
   1987 	// cond:
   1988 	// result: (TESTLconst [c] x)
   1989 	for {
   1990 		if v.AuxInt != 0 {
   1991 			break
   1992 		}
   1993 		v_0 := v.Args[0]
   1994 		if v_0.Op != OpAMD64ANDLconst {
   1995 			break
   1996 		}
   1997 		c := v_0.AuxInt
   1998 		x := v_0.Args[0]
   1999 		v.reset(OpAMD64TESTLconst)
   2000 		v.AuxInt = c
   2001 		v.AddArg(x)
   2002 		return true
   2003 	}
   2004 	// match: (CMPLconst x [0])
   2005 	// cond:
   2006 	// result: (TESTL x x)
   2007 	for {
   2008 		if v.AuxInt != 0 {
   2009 			break
   2010 		}
   2011 		x := v.Args[0]
   2012 		v.reset(OpAMD64TESTL)
   2013 		v.AddArg(x)
   2014 		v.AddArg(x)
   2015 		return true
   2016 	}
   2017 	return false
   2018 }
   2019 func rewriteValueAMD64_OpAMD64CMPQ(v *Value, config *Config) bool {
   2020 	b := v.Block
   2021 	_ = b
   2022 	// match: (CMPQ x (MOVQconst [c]))
   2023 	// cond: is32Bit(c)
   2024 	// result: (CMPQconst x [c])
   2025 	for {
   2026 		x := v.Args[0]
   2027 		v_1 := v.Args[1]
   2028 		if v_1.Op != OpAMD64MOVQconst {
   2029 			break
   2030 		}
   2031 		c := v_1.AuxInt
   2032 		if !(is32Bit(c)) {
   2033 			break
   2034 		}
   2035 		v.reset(OpAMD64CMPQconst)
   2036 		v.AuxInt = c
   2037 		v.AddArg(x)
   2038 		return true
   2039 	}
   2040 	// match: (CMPQ (MOVQconst [c]) x)
   2041 	// cond: is32Bit(c)
   2042 	// result: (InvertFlags (CMPQconst x [c]))
   2043 	for {
   2044 		v_0 := v.Args[0]
   2045 		if v_0.Op != OpAMD64MOVQconst {
   2046 			break
   2047 		}
   2048 		c := v_0.AuxInt
   2049 		x := v.Args[1]
   2050 		if !(is32Bit(c)) {
   2051 			break
   2052 		}
   2053 		v.reset(OpAMD64InvertFlags)
   2054 		v0 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
   2055 		v0.AuxInt = c
   2056 		v0.AddArg(x)
   2057 		v.AddArg(v0)
   2058 		return true
   2059 	}
   2060 	return false
   2061 }
   2062 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
   2063 	b := v.Block
   2064 	_ = b
   2065 	// match: (CMPQconst (MOVQconst [x]) [y])
   2066 	// cond: x==y
   2067 	// result: (FlagEQ)
   2068 	for {
   2069 		y := v.AuxInt
   2070 		v_0 := v.Args[0]
   2071 		if v_0.Op != OpAMD64MOVQconst {
   2072 			break
   2073 		}
   2074 		x := v_0.AuxInt
   2075 		if !(x == y) {
   2076 			break
   2077 		}
   2078 		v.reset(OpAMD64FlagEQ)
   2079 		return true
   2080 	}
   2081 	// match: (CMPQconst (MOVQconst [x]) [y])
   2082 	// cond: x<y && uint64(x)<uint64(y)
   2083 	// result: (FlagLT_ULT)
   2084 	for {
   2085 		y := v.AuxInt
   2086 		v_0 := v.Args[0]
   2087 		if v_0.Op != OpAMD64MOVQconst {
   2088 			break
   2089 		}
   2090 		x := v_0.AuxInt
   2091 		if !(x < y && uint64(x) < uint64(y)) {
   2092 			break
   2093 		}
   2094 		v.reset(OpAMD64FlagLT_ULT)
   2095 		return true
   2096 	}
   2097 	// match: (CMPQconst (MOVQconst [x]) [y])
   2098 	// cond: x<y && uint64(x)>uint64(y)
   2099 	// result: (FlagLT_UGT)
   2100 	for {
   2101 		y := v.AuxInt
   2102 		v_0 := v.Args[0]
   2103 		if v_0.Op != OpAMD64MOVQconst {
   2104 			break
   2105 		}
   2106 		x := v_0.AuxInt
   2107 		if !(x < y && uint64(x) > uint64(y)) {
   2108 			break
   2109 		}
   2110 		v.reset(OpAMD64FlagLT_UGT)
   2111 		return true
   2112 	}
   2113 	// match: (CMPQconst (MOVQconst [x]) [y])
   2114 	// cond: x>y && uint64(x)<uint64(y)
   2115 	// result: (FlagGT_ULT)
   2116 	for {
   2117 		y := v.AuxInt
   2118 		v_0 := v.Args[0]
   2119 		if v_0.Op != OpAMD64MOVQconst {
   2120 			break
   2121 		}
   2122 		x := v_0.AuxInt
   2123 		if !(x > y && uint64(x) < uint64(y)) {
   2124 			break
   2125 		}
   2126 		v.reset(OpAMD64FlagGT_ULT)
   2127 		return true
   2128 	}
   2129 	// match: (CMPQconst (MOVQconst [x]) [y])
   2130 	// cond: x>y && uint64(x)>uint64(y)
   2131 	// result: (FlagGT_UGT)
   2132 	for {
   2133 		y := v.AuxInt
   2134 		v_0 := v.Args[0]
   2135 		if v_0.Op != OpAMD64MOVQconst {
   2136 			break
   2137 		}
   2138 		x := v_0.AuxInt
   2139 		if !(x > y && uint64(x) > uint64(y)) {
   2140 			break
   2141 		}
   2142 		v.reset(OpAMD64FlagGT_UGT)
   2143 		return true
   2144 	}
   2145 	// match: (CMPQconst (MOVBQZX _) [c])
   2146 	// cond: 0xFF < c
   2147 	// result: (FlagLT_ULT)
   2148 	for {
   2149 		c := v.AuxInt
   2150 		v_0 := v.Args[0]
   2151 		if v_0.Op != OpAMD64MOVBQZX {
   2152 			break
   2153 		}
   2154 		if !(0xFF < c) {
   2155 			break
   2156 		}
   2157 		v.reset(OpAMD64FlagLT_ULT)
   2158 		return true
   2159 	}
   2160 	// match: (CMPQconst (MOVWQZX _) [c])
   2161 	// cond: 0xFFFF < c
   2162 	// result: (FlagLT_ULT)
   2163 	for {
   2164 		c := v.AuxInt
   2165 		v_0 := v.Args[0]
   2166 		if v_0.Op != OpAMD64MOVWQZX {
   2167 			break
   2168 		}
   2169 		if !(0xFFFF < c) {
   2170 			break
   2171 		}
   2172 		v.reset(OpAMD64FlagLT_ULT)
   2173 		return true
   2174 	}
   2175 	// match: (CMPQconst (MOVLQZX _) [c])
   2176 	// cond: 0xFFFFFFFF < c
   2177 	// result: (FlagLT_ULT)
   2178 	for {
   2179 		c := v.AuxInt
   2180 		v_0 := v.Args[0]
   2181 		if v_0.Op != OpAMD64MOVLQZX {
   2182 			break
   2183 		}
   2184 		if !(0xFFFFFFFF < c) {
   2185 			break
   2186 		}
   2187 		v.reset(OpAMD64FlagLT_ULT)
   2188 		return true
   2189 	}
   2190 	// match: (CMPQconst (SHRQconst _ [c]) [n])
   2191 	// cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)
   2192 	// result: (FlagLT_ULT)
   2193 	for {
   2194 		n := v.AuxInt
   2195 		v_0 := v.Args[0]
   2196 		if v_0.Op != OpAMD64SHRQconst {
   2197 			break
   2198 		}
   2199 		c := v_0.AuxInt
   2200 		if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
   2201 			break
   2202 		}
   2203 		v.reset(OpAMD64FlagLT_ULT)
   2204 		return true
   2205 	}
   2206 	// match: (CMPQconst (ANDQconst _ [m]) [n])
   2207 	// cond: 0 <= m && m < n
   2208 	// result: (FlagLT_ULT)
   2209 	for {
   2210 		n := v.AuxInt
   2211 		v_0 := v.Args[0]
   2212 		if v_0.Op != OpAMD64ANDQconst {
   2213 			break
   2214 		}
   2215 		m := v_0.AuxInt
   2216 		if !(0 <= m && m < n) {
   2217 			break
   2218 		}
   2219 		v.reset(OpAMD64FlagLT_ULT)
   2220 		return true
   2221 	}
   2222 	// match: (CMPQconst (ANDQ x y) [0])
   2223 	// cond:
   2224 	// result: (TESTQ x y)
   2225 	for {
   2226 		if v.AuxInt != 0 {
   2227 			break
   2228 		}
   2229 		v_0 := v.Args[0]
   2230 		if v_0.Op != OpAMD64ANDQ {
   2231 			break
   2232 		}
   2233 		x := v_0.Args[0]
   2234 		y := v_0.Args[1]
   2235 		v.reset(OpAMD64TESTQ)
   2236 		v.AddArg(x)
   2237 		v.AddArg(y)
   2238 		return true
   2239 	}
   2240 	// match: (CMPQconst (ANDQconst [c] x) [0])
   2241 	// cond:
   2242 	// result: (TESTQconst [c] x)
   2243 	for {
   2244 		if v.AuxInt != 0 {
   2245 			break
   2246 		}
   2247 		v_0 := v.Args[0]
   2248 		if v_0.Op != OpAMD64ANDQconst {
   2249 			break
   2250 		}
   2251 		c := v_0.AuxInt
   2252 		x := v_0.Args[0]
   2253 		v.reset(OpAMD64TESTQconst)
   2254 		v.AuxInt = c
   2255 		v.AddArg(x)
   2256 		return true
   2257 	}
   2258 	// match: (CMPQconst x [0])
   2259 	// cond:
   2260 	// result: (TESTQ x x)
   2261 	for {
   2262 		if v.AuxInt != 0 {
   2263 			break
   2264 		}
   2265 		x := v.Args[0]
   2266 		v.reset(OpAMD64TESTQ)
   2267 		v.AddArg(x)
   2268 		v.AddArg(x)
   2269 		return true
   2270 	}
   2271 	return false
   2272 }
   2273 func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool {
   2274 	b := v.Block
   2275 	_ = b
   2276 	// match: (CMPW x (MOVLconst [c]))
   2277 	// cond:
   2278 	// result: (CMPWconst x [int64(int16(c))])
   2279 	for {
   2280 		x := v.Args[0]
   2281 		v_1 := v.Args[1]
   2282 		if v_1.Op != OpAMD64MOVLconst {
   2283 			break
   2284 		}
   2285 		c := v_1.AuxInt
   2286 		v.reset(OpAMD64CMPWconst)
   2287 		v.AuxInt = int64(int16(c))
   2288 		v.AddArg(x)
   2289 		return true
   2290 	}
   2291 	// match: (CMPW (MOVLconst [c]) x)
   2292 	// cond:
   2293 	// result: (InvertFlags (CMPWconst x [int64(int16(c))]))
   2294 	for {
   2295 		v_0 := v.Args[0]
   2296 		if v_0.Op != OpAMD64MOVLconst {
   2297 			break
   2298 		}
   2299 		c := v_0.AuxInt
   2300 		x := v.Args[1]
   2301 		v.reset(OpAMD64InvertFlags)
   2302 		v0 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
   2303 		v0.AuxInt = int64(int16(c))
   2304 		v0.AddArg(x)
   2305 		v.AddArg(v0)
   2306 		return true
   2307 	}
   2308 	return false
   2309 }
   2310 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
   2311 	b := v.Block
   2312 	_ = b
   2313 	// match: (CMPWconst (MOVLconst [x]) [y])
   2314 	// cond: int16(x)==int16(y)
   2315 	// result: (FlagEQ)
   2316 	for {
   2317 		y := v.AuxInt
   2318 		v_0 := v.Args[0]
   2319 		if v_0.Op != OpAMD64MOVLconst {
   2320 			break
   2321 		}
   2322 		x := v_0.AuxInt
   2323 		if !(int16(x) == int16(y)) {
   2324 			break
   2325 		}
   2326 		v.reset(OpAMD64FlagEQ)
   2327 		return true
   2328 	}
   2329 	// match: (CMPWconst (MOVLconst [x]) [y])
   2330 	// cond: int16(x)<int16(y) && uint16(x)<uint16(y)
   2331 	// result: (FlagLT_ULT)
   2332 	for {
   2333 		y := v.AuxInt
   2334 		v_0 := v.Args[0]
   2335 		if v_0.Op != OpAMD64MOVLconst {
   2336 			break
   2337 		}
   2338 		x := v_0.AuxInt
   2339 		if !(int16(x) < int16(y) && uint16(x) < uint16(y)) {
   2340 			break
   2341 		}
   2342 		v.reset(OpAMD64FlagLT_ULT)
   2343 		return true
   2344 	}
   2345 	// match: (CMPWconst (MOVLconst [x]) [y])
   2346 	// cond: int16(x)<int16(y) && uint16(x)>uint16(y)
   2347 	// result: (FlagLT_UGT)
   2348 	for {
   2349 		y := v.AuxInt
   2350 		v_0 := v.Args[0]
   2351 		if v_0.Op != OpAMD64MOVLconst {
   2352 			break
   2353 		}
   2354 		x := v_0.AuxInt
   2355 		if !(int16(x) < int16(y) && uint16(x) > uint16(y)) {
   2356 			break
   2357 		}
   2358 		v.reset(OpAMD64FlagLT_UGT)
   2359 		return true
   2360 	}
   2361 	// match: (CMPWconst (MOVLconst [x]) [y])
   2362 	// cond: int16(x)>int16(y) && uint16(x)<uint16(y)
   2363 	// result: (FlagGT_ULT)
   2364 	for {
   2365 		y := v.AuxInt
   2366 		v_0 := v.Args[0]
   2367 		if v_0.Op != OpAMD64MOVLconst {
   2368 			break
   2369 		}
   2370 		x := v_0.AuxInt
   2371 		if !(int16(x) > int16(y) && uint16(x) < uint16(y)) {
   2372 			break
   2373 		}
   2374 		v.reset(OpAMD64FlagGT_ULT)
   2375 		return true
   2376 	}
   2377 	// match: (CMPWconst (MOVLconst [x]) [y])
   2378 	// cond: int16(x)>int16(y) && uint16(x)>uint16(y)
   2379 	// result: (FlagGT_UGT)
   2380 	for {
   2381 		y := v.AuxInt
   2382 		v_0 := v.Args[0]
   2383 		if v_0.Op != OpAMD64MOVLconst {
   2384 			break
   2385 		}
   2386 		x := v_0.AuxInt
   2387 		if !(int16(x) > int16(y) && uint16(x) > uint16(y)) {
   2388 			break
   2389 		}
   2390 		v.reset(OpAMD64FlagGT_UGT)
   2391 		return true
   2392 	}
   2393 	// match: (CMPWconst (ANDLconst _ [m]) [n])
   2394 	// cond: 0 <= int16(m) && int16(m) < int16(n)
   2395 	// result: (FlagLT_ULT)
   2396 	for {
   2397 		n := v.AuxInt
   2398 		v_0 := v.Args[0]
   2399 		if v_0.Op != OpAMD64ANDLconst {
   2400 			break
   2401 		}
   2402 		m := v_0.AuxInt
   2403 		if !(0 <= int16(m) && int16(m) < int16(n)) {
   2404 			break
   2405 		}
   2406 		v.reset(OpAMD64FlagLT_ULT)
   2407 		return true
   2408 	}
   2409 	// match: (CMPWconst (ANDL x y) [0])
   2410 	// cond:
   2411 	// result: (TESTW x y)
   2412 	for {
   2413 		if v.AuxInt != 0 {
   2414 			break
   2415 		}
   2416 		v_0 := v.Args[0]
   2417 		if v_0.Op != OpAMD64ANDL {
   2418 			break
   2419 		}
   2420 		x := v_0.Args[0]
   2421 		y := v_0.Args[1]
   2422 		v.reset(OpAMD64TESTW)
   2423 		v.AddArg(x)
   2424 		v.AddArg(y)
   2425 		return true
   2426 	}
   2427 	// match: (CMPWconst (ANDLconst [c] x) [0])
   2428 	// cond:
   2429 	// result: (TESTWconst [int64(int16(c))] x)
   2430 	for {
   2431 		if v.AuxInt != 0 {
   2432 			break
   2433 		}
   2434 		v_0 := v.Args[0]
   2435 		if v_0.Op != OpAMD64ANDLconst {
   2436 			break
   2437 		}
   2438 		c := v_0.AuxInt
   2439 		x := v_0.Args[0]
   2440 		v.reset(OpAMD64TESTWconst)
   2441 		v.AuxInt = int64(int16(c))
   2442 		v.AddArg(x)
   2443 		return true
   2444 	}
   2445 	// match: (CMPWconst x [0])
   2446 	// cond:
   2447 	// result: (TESTW x x)
   2448 	for {
   2449 		if v.AuxInt != 0 {
   2450 			break
   2451 		}
   2452 		x := v.Args[0]
   2453 		v.reset(OpAMD64TESTW)
   2454 		v.AddArg(x)
   2455 		v.AddArg(x)
   2456 		return true
   2457 	}
   2458 	return false
   2459 }
   2460 func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value, config *Config) bool {
   2461 	b := v.Block
   2462 	_ = b
   2463 	// match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
   2464 	// cond: is32Bit(off1+off2)
   2465 	// result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
   2466 	for {
   2467 		off1 := v.AuxInt
   2468 		sym := v.Aux
   2469 		v_0 := v.Args[0]
   2470 		if v_0.Op != OpAMD64ADDQconst {
   2471 			break
   2472 		}
   2473 		off2 := v_0.AuxInt
   2474 		ptr := v_0.Args[0]
   2475 		old := v.Args[1]
   2476 		new_ := v.Args[2]
   2477 		mem := v.Args[3]
   2478 		if !(is32Bit(off1 + off2)) {
   2479 			break
   2480 		}
   2481 		v.reset(OpAMD64CMPXCHGLlock)
   2482 		v.AuxInt = off1 + off2
   2483 		v.Aux = sym
   2484 		v.AddArg(ptr)
   2485 		v.AddArg(old)
   2486 		v.AddArg(new_)
   2487 		v.AddArg(mem)
   2488 		return true
   2489 	}
   2490 	return false
   2491 }
   2492 func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value, config *Config) bool {
   2493 	b := v.Block
   2494 	_ = b
   2495 	// match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
   2496 	// cond: is32Bit(off1+off2)
   2497 	// result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
   2498 	for {
   2499 		off1 := v.AuxInt
   2500 		sym := v.Aux
   2501 		v_0 := v.Args[0]
   2502 		if v_0.Op != OpAMD64ADDQconst {
   2503 			break
   2504 		}
   2505 		off2 := v_0.AuxInt
   2506 		ptr := v_0.Args[0]
   2507 		old := v.Args[1]
   2508 		new_ := v.Args[2]
   2509 		mem := v.Args[3]
   2510 		if !(is32Bit(off1 + off2)) {
   2511 			break
   2512 		}
   2513 		v.reset(OpAMD64CMPXCHGQlock)
   2514 		v.AuxInt = off1 + off2
   2515 		v.Aux = sym
   2516 		v.AddArg(ptr)
   2517 		v.AddArg(old)
   2518 		v.AddArg(new_)
   2519 		v.AddArg(mem)
   2520 		return true
   2521 	}
   2522 	return false
   2523 }
   2524 func rewriteValueAMD64_OpAMD64LEAL(v *Value, config *Config) bool {
   2525 	b := v.Block
   2526 	_ = b
   2527 	// match: (LEAL [c] {s} (ADDLconst [d] x))
   2528 	// cond: is32Bit(c+d)
   2529 	// result: (LEAL [c+d] {s} x)
   2530 	for {
   2531 		c := v.AuxInt
   2532 		s := v.Aux
   2533 		v_0 := v.Args[0]
   2534 		if v_0.Op != OpAMD64ADDLconst {
   2535 			break
   2536 		}
   2537 		d := v_0.AuxInt
   2538 		x := v_0.Args[0]
   2539 		if !(is32Bit(c + d)) {
   2540 			break
   2541 		}
   2542 		v.reset(OpAMD64LEAL)
   2543 		v.AuxInt = c + d
   2544 		v.Aux = s
   2545 		v.AddArg(x)
   2546 		return true
   2547 	}
   2548 	return false
   2549 }
   2550 func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool {
   2551 	b := v.Block
   2552 	_ = b
   2553 	// match: (LEAQ [c] {s} (ADDQconst [d] x))
   2554 	// cond: is32Bit(c+d)
   2555 	// result: (LEAQ [c+d] {s} x)
   2556 	for {
   2557 		c := v.AuxInt
   2558 		s := v.Aux
   2559 		v_0 := v.Args[0]
   2560 		if v_0.Op != OpAMD64ADDQconst {
   2561 			break
   2562 		}
   2563 		d := v_0.AuxInt
   2564 		x := v_0.Args[0]
   2565 		if !(is32Bit(c + d)) {
   2566 			break
   2567 		}
   2568 		v.reset(OpAMD64LEAQ)
   2569 		v.AuxInt = c + d
   2570 		v.Aux = s
   2571 		v.AddArg(x)
   2572 		return true
   2573 	}
   2574 	// match: (LEAQ [c] {s} (ADDQ x y))
   2575 	// cond: x.Op != OpSB && y.Op != OpSB
   2576 	// result: (LEAQ1 [c] {s} x y)
   2577 	for {
   2578 		c := v.AuxInt
   2579 		s := v.Aux
   2580 		v_0 := v.Args[0]
   2581 		if v_0.Op != OpAMD64ADDQ {
   2582 			break
   2583 		}
   2584 		x := v_0.Args[0]
   2585 		y := v_0.Args[1]
   2586 		if !(x.Op != OpSB && y.Op != OpSB) {
   2587 			break
   2588 		}
   2589 		v.reset(OpAMD64LEAQ1)
   2590 		v.AuxInt = c
   2591 		v.Aux = s
   2592 		v.AddArg(x)
   2593 		v.AddArg(y)
   2594 		return true
   2595 	}
   2596 	// match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x))
   2597 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   2598 	// result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
   2599 	for {
   2600 		off1 := v.AuxInt
   2601 		sym1 := v.Aux
   2602 		v_0 := v.Args[0]
   2603 		if v_0.Op != OpAMD64LEAQ {
   2604 			break
   2605 		}
   2606 		off2 := v_0.AuxInt
   2607 		sym2 := v_0.Aux
   2608 		x := v_0.Args[0]
   2609 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   2610 			break
   2611 		}
   2612 		v.reset(OpAMD64LEAQ)
   2613 		v.AuxInt = off1 + off2
   2614 		v.Aux = mergeSym(sym1, sym2)
   2615 		v.AddArg(x)
   2616 		return true
   2617 	}
   2618 	// match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y))
   2619 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   2620 	// result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
   2621 	for {
   2622 		off1 := v.AuxInt
   2623 		sym1 := v.Aux
   2624 		v_0 := v.Args[0]
   2625 		if v_0.Op != OpAMD64LEAQ1 {
   2626 			break
   2627 		}
   2628 		off2 := v_0.AuxInt
   2629 		sym2 := v_0.Aux
   2630 		x := v_0.Args[0]
   2631 		y := v_0.Args[1]
   2632 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   2633 			break
   2634 		}
   2635 		v.reset(OpAMD64LEAQ1)
   2636 		v.AuxInt = off1 + off2
   2637 		v.Aux = mergeSym(sym1, sym2)
   2638 		v.AddArg(x)
   2639 		v.AddArg(y)
   2640 		return true
   2641 	}
   2642 	// match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y))
   2643 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   2644 	// result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
   2645 	for {
   2646 		off1 := v.AuxInt
   2647 		sym1 := v.Aux
   2648 		v_0 := v.Args[0]
   2649 		if v_0.Op != OpAMD64LEAQ2 {
   2650 			break
   2651 		}
   2652 		off2 := v_0.AuxInt
   2653 		sym2 := v_0.Aux
   2654 		x := v_0.Args[0]
   2655 		y := v_0.Args[1]
   2656 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   2657 			break
   2658 		}
   2659 		v.reset(OpAMD64LEAQ2)
   2660 		v.AuxInt = off1 + off2
   2661 		v.Aux = mergeSym(sym1, sym2)
   2662 		v.AddArg(x)
   2663 		v.AddArg(y)
   2664 		return true
   2665 	}
   2666 	// match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y))
   2667 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   2668 	// result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
   2669 	for {
   2670 		off1 := v.AuxInt
   2671 		sym1 := v.Aux
   2672 		v_0 := v.Args[0]
   2673 		if v_0.Op != OpAMD64LEAQ4 {
   2674 			break
   2675 		}
   2676 		off2 := v_0.AuxInt
   2677 		sym2 := v_0.Aux
   2678 		x := v_0.Args[0]
   2679 		y := v_0.Args[1]
   2680 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   2681 			break
   2682 		}
   2683 		v.reset(OpAMD64LEAQ4)
   2684 		v.AuxInt = off1 + off2
   2685 		v.Aux = mergeSym(sym1, sym2)
   2686 		v.AddArg(x)
   2687 		v.AddArg(y)
   2688 		return true
   2689 	}
   2690 	// match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y))
   2691 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   2692 	// result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
   2693 	for {
   2694 		off1 := v.AuxInt
   2695 		sym1 := v.Aux
   2696 		v_0 := v.Args[0]
   2697 		if v_0.Op != OpAMD64LEAQ8 {
   2698 			break
   2699 		}
   2700 		off2 := v_0.AuxInt
   2701 		sym2 := v_0.Aux
   2702 		x := v_0.Args[0]
   2703 		y := v_0.Args[1]
   2704 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   2705 			break
   2706 		}
   2707 		v.reset(OpAMD64LEAQ8)
   2708 		v.AuxInt = off1 + off2
   2709 		v.Aux = mergeSym(sym1, sym2)
   2710 		v.AddArg(x)
   2711 		v.AddArg(y)
   2712 		return true
   2713 	}
   2714 	return false
   2715 }
   2716 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool {
   2717 	b := v.Block
   2718 	_ = b
   2719 	// match: (LEAQ1 [c] {s} (ADDQconst [d] x) y)
   2720 	// cond: is32Bit(c+d)   && x.Op != OpSB
   2721 	// result: (LEAQ1 [c+d] {s} x y)
   2722 	for {
   2723 		c := v.AuxInt
   2724 		s := v.Aux
   2725 		v_0 := v.Args[0]
   2726 		if v_0.Op != OpAMD64ADDQconst {
   2727 			break
   2728 		}
   2729 		d := v_0.AuxInt
   2730 		x := v_0.Args[0]
   2731 		y := v.Args[1]
   2732 		if !(is32Bit(c+d) && x.Op != OpSB) {
   2733 			break
   2734 		}
   2735 		v.reset(OpAMD64LEAQ1)
   2736 		v.AuxInt = c + d
   2737 		v.Aux = s
   2738 		v.AddArg(x)
   2739 		v.AddArg(y)
   2740 		return true
   2741 	}
   2742 	// match: (LEAQ1 [c] {s} x (ADDQconst [d] y))
   2743 	// cond: is32Bit(c+d)   && y.Op != OpSB
   2744 	// result: (LEAQ1 [c+d] {s} x y)
   2745 	for {
   2746 		c := v.AuxInt
   2747 		s := v.Aux
   2748 		x := v.Args[0]
   2749 		v_1 := v.Args[1]
   2750 		if v_1.Op != OpAMD64ADDQconst {
   2751 			break
   2752 		}
   2753 		d := v_1.AuxInt
   2754 		y := v_1.Args[0]
   2755 		if !(is32Bit(c+d) && y.Op != OpSB) {
   2756 			break
   2757 		}
   2758 		v.reset(OpAMD64LEAQ1)
   2759 		v.AuxInt = c + d
   2760 		v.Aux = s
   2761 		v.AddArg(x)
   2762 		v.AddArg(y)
   2763 		return true
   2764 	}
   2765 	// match: (LEAQ1 [c] {s} x (SHLQconst [1] y))
   2766 	// cond:
   2767 	// result: (LEAQ2 [c] {s} x y)
   2768 	for {
   2769 		c := v.AuxInt
   2770 		s := v.Aux
   2771 		x := v.Args[0]
   2772 		v_1 := v.Args[1]
   2773 		if v_1.Op != OpAMD64SHLQconst {
   2774 			break
   2775 		}
   2776 		if v_1.AuxInt != 1 {
   2777 			break
   2778 		}
   2779 		y := v_1.Args[0]
   2780 		v.reset(OpAMD64LEAQ2)
   2781 		v.AuxInt = c
   2782 		v.Aux = s
   2783 		v.AddArg(x)
   2784 		v.AddArg(y)
   2785 		return true
   2786 	}
   2787 	// match: (LEAQ1 [c] {s} (SHLQconst [1] x) y)
   2788 	// cond:
   2789 	// result: (LEAQ2 [c] {s} y x)
   2790 	for {
   2791 		c := v.AuxInt
   2792 		s := v.Aux
   2793 		v_0 := v.Args[0]
   2794 		if v_0.Op != OpAMD64SHLQconst {
   2795 			break
   2796 		}
   2797 		if v_0.AuxInt != 1 {
   2798 			break
   2799 		}
   2800 		x := v_0.Args[0]
   2801 		y := v.Args[1]
   2802 		v.reset(OpAMD64LEAQ2)
   2803 		v.AuxInt = c
   2804 		v.Aux = s
   2805 		v.AddArg(y)
   2806 		v.AddArg(x)
   2807 		return true
   2808 	}
   2809 	// match: (LEAQ1 [c] {s} x (SHLQconst [2] y))
   2810 	// cond:
   2811 	// result: (LEAQ4 [c] {s} x y)
   2812 	for {
   2813 		c := v.AuxInt
   2814 		s := v.Aux
   2815 		x := v.Args[0]
   2816 		v_1 := v.Args[1]
   2817 		if v_1.Op != OpAMD64SHLQconst {
   2818 			break
   2819 		}
   2820 		if v_1.AuxInt != 2 {
   2821 			break
   2822 		}
   2823 		y := v_1.Args[0]
   2824 		v.reset(OpAMD64LEAQ4)
   2825 		v.AuxInt = c
   2826 		v.Aux = s
   2827 		v.AddArg(x)
   2828 		v.AddArg(y)
   2829 		return true
   2830 	}
   2831 	// match: (LEAQ1 [c] {s} (SHLQconst [2] x) y)
   2832 	// cond:
   2833 	// result: (LEAQ4 [c] {s} y x)
   2834 	for {
   2835 		c := v.AuxInt
   2836 		s := v.Aux
   2837 		v_0 := v.Args[0]
   2838 		if v_0.Op != OpAMD64SHLQconst {
   2839 			break
   2840 		}
   2841 		if v_0.AuxInt != 2 {
   2842 			break
   2843 		}
   2844 		x := v_0.Args[0]
   2845 		y := v.Args[1]
   2846 		v.reset(OpAMD64LEAQ4)
   2847 		v.AuxInt = c
   2848 		v.Aux = s
   2849 		v.AddArg(y)
   2850 		v.AddArg(x)
   2851 		return true
   2852 	}
   2853 	// match: (LEAQ1 [c] {s} x (SHLQconst [3] y))
   2854 	// cond:
   2855 	// result: (LEAQ8 [c] {s} x y)
   2856 	for {
   2857 		c := v.AuxInt
   2858 		s := v.Aux
   2859 		x := v.Args[0]
   2860 		v_1 := v.Args[1]
   2861 		if v_1.Op != OpAMD64SHLQconst {
   2862 			break
   2863 		}
   2864 		if v_1.AuxInt != 3 {
   2865 			break
   2866 		}
   2867 		y := v_1.Args[0]
   2868 		v.reset(OpAMD64LEAQ8)
   2869 		v.AuxInt = c
   2870 		v.Aux = s
   2871 		v.AddArg(x)
   2872 		v.AddArg(y)
   2873 		return true
   2874 	}
   2875 	// match: (LEAQ1 [c] {s} (SHLQconst [3] x) y)
   2876 	// cond:
   2877 	// result: (LEAQ8 [c] {s} y x)
   2878 	for {
   2879 		c := v.AuxInt
   2880 		s := v.Aux
   2881 		v_0 := v.Args[0]
   2882 		if v_0.Op != OpAMD64SHLQconst {
   2883 			break
   2884 		}
   2885 		if v_0.AuxInt != 3 {
   2886 			break
   2887 		}
   2888 		x := v_0.Args[0]
   2889 		y := v.Args[1]
   2890 		v.reset(OpAMD64LEAQ8)
   2891 		v.AuxInt = c
   2892 		v.Aux = s
   2893 		v.AddArg(y)
   2894 		v.AddArg(x)
   2895 		return true
   2896 	}
   2897 	// match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
   2898 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
   2899 	// result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
   2900 	for {
   2901 		off1 := v.AuxInt
   2902 		sym1 := v.Aux
   2903 		v_0 := v.Args[0]
   2904 		if v_0.Op != OpAMD64LEAQ {
   2905 			break
   2906 		}
   2907 		off2 := v_0.AuxInt
   2908 		sym2 := v_0.Aux
   2909 		x := v_0.Args[0]
   2910 		y := v.Args[1]
   2911 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
   2912 			break
   2913 		}
   2914 		v.reset(OpAMD64LEAQ1)
   2915 		v.AuxInt = off1 + off2
   2916 		v.Aux = mergeSym(sym1, sym2)
   2917 		v.AddArg(x)
   2918 		v.AddArg(y)
   2919 		return true
   2920 	}
   2921 	// match: (LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y))
   2922 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB
   2923 	// result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
   2924 	for {
   2925 		off1 := v.AuxInt
   2926 		sym1 := v.Aux
   2927 		x := v.Args[0]
   2928 		v_1 := v.Args[1]
   2929 		if v_1.Op != OpAMD64LEAQ {
   2930 			break
   2931 		}
   2932 		off2 := v_1.AuxInt
   2933 		sym2 := v_1.Aux
   2934 		y := v_1.Args[0]
   2935 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) {
   2936 			break
   2937 		}
   2938 		v.reset(OpAMD64LEAQ1)
   2939 		v.AuxInt = off1 + off2
   2940 		v.Aux = mergeSym(sym1, sym2)
   2941 		v.AddArg(x)
   2942 		v.AddArg(y)
   2943 		return true
   2944 	}
   2945 	return false
   2946 }
   2947 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value, config *Config) bool {
   2948 	b := v.Block
   2949 	_ = b
   2950 	// match: (LEAQ2 [c] {s} (ADDQconst [d] x) y)
   2951 	// cond: is32Bit(c+d)   && x.Op != OpSB
   2952 	// result: (LEAQ2 [c+d] {s} x y)
   2953 	for {
   2954 		c := v.AuxInt
   2955 		s := v.Aux
   2956 		v_0 := v.Args[0]
   2957 		if v_0.Op != OpAMD64ADDQconst {
   2958 			break
   2959 		}
   2960 		d := v_0.AuxInt
   2961 		x := v_0.Args[0]
   2962 		y := v.Args[1]
   2963 		if !(is32Bit(c+d) && x.Op != OpSB) {
   2964 			break
   2965 		}
   2966 		v.reset(OpAMD64LEAQ2)
   2967 		v.AuxInt = c + d
   2968 		v.Aux = s
   2969 		v.AddArg(x)
   2970 		v.AddArg(y)
   2971 		return true
   2972 	}
   2973 	// match: (LEAQ2 [c] {s} x (ADDQconst [d] y))
   2974 	// cond: is32Bit(c+2*d) && y.Op != OpSB
   2975 	// result: (LEAQ2 [c+2*d] {s} x y)
   2976 	for {
   2977 		c := v.AuxInt
   2978 		s := v.Aux
   2979 		x := v.Args[0]
   2980 		v_1 := v.Args[1]
   2981 		if v_1.Op != OpAMD64ADDQconst {
   2982 			break
   2983 		}
   2984 		d := v_1.AuxInt
   2985 		y := v_1.Args[0]
   2986 		if !(is32Bit(c+2*d) && y.Op != OpSB) {
   2987 			break
   2988 		}
   2989 		v.reset(OpAMD64LEAQ2)
   2990 		v.AuxInt = c + 2*d
   2991 		v.Aux = s
   2992 		v.AddArg(x)
   2993 		v.AddArg(y)
   2994 		return true
   2995 	}
   2996 	// match: (LEAQ2 [c] {s} x (SHLQconst [1] y))
   2997 	// cond:
   2998 	// result: (LEAQ4 [c] {s} x y)
   2999 	for {
   3000 		c := v.AuxInt
   3001 		s := v.Aux
   3002 		x := v.Args[0]
   3003 		v_1 := v.Args[1]
   3004 		if v_1.Op != OpAMD64SHLQconst {
   3005 			break
   3006 		}
   3007 		if v_1.AuxInt != 1 {
   3008 			break
   3009 		}
   3010 		y := v_1.Args[0]
   3011 		v.reset(OpAMD64LEAQ4)
   3012 		v.AuxInt = c
   3013 		v.Aux = s
   3014 		v.AddArg(x)
   3015 		v.AddArg(y)
   3016 		return true
   3017 	}
   3018 	// match: (LEAQ2 [c] {s} x (SHLQconst [2] y))
   3019 	// cond:
   3020 	// result: (LEAQ8 [c] {s} x y)
   3021 	for {
   3022 		c := v.AuxInt
   3023 		s := v.Aux
   3024 		x := v.Args[0]
   3025 		v_1 := v.Args[1]
   3026 		if v_1.Op != OpAMD64SHLQconst {
   3027 			break
   3028 		}
   3029 		if v_1.AuxInt != 2 {
   3030 			break
   3031 		}
   3032 		y := v_1.Args[0]
   3033 		v.reset(OpAMD64LEAQ8)
   3034 		v.AuxInt = c
   3035 		v.Aux = s
   3036 		v.AddArg(x)
   3037 		v.AddArg(y)
   3038 		return true
   3039 	}
   3040 	// match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
   3041 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
   3042 	// result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
   3043 	for {
   3044 		off1 := v.AuxInt
   3045 		sym1 := v.Aux
   3046 		v_0 := v.Args[0]
   3047 		if v_0.Op != OpAMD64LEAQ {
   3048 			break
   3049 		}
   3050 		off2 := v_0.AuxInt
   3051 		sym2 := v_0.Aux
   3052 		x := v_0.Args[0]
   3053 		y := v.Args[1]
   3054 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
   3055 			break
   3056 		}
   3057 		v.reset(OpAMD64LEAQ2)
   3058 		v.AuxInt = off1 + off2
   3059 		v.Aux = mergeSym(sym1, sym2)
   3060 		v.AddArg(x)
   3061 		v.AddArg(y)
   3062 		return true
   3063 	}
   3064 	return false
   3065 }
   3066 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value, config *Config) bool {
   3067 	b := v.Block
   3068 	_ = b
   3069 	// match: (LEAQ4 [c] {s} (ADDQconst [d] x) y)
   3070 	// cond: is32Bit(c+d)   && x.Op != OpSB
   3071 	// result: (LEAQ4 [c+d] {s} x y)
   3072 	for {
   3073 		c := v.AuxInt
   3074 		s := v.Aux
   3075 		v_0 := v.Args[0]
   3076 		if v_0.Op != OpAMD64ADDQconst {
   3077 			break
   3078 		}
   3079 		d := v_0.AuxInt
   3080 		x := v_0.Args[0]
   3081 		y := v.Args[1]
   3082 		if !(is32Bit(c+d) && x.Op != OpSB) {
   3083 			break
   3084 		}
   3085 		v.reset(OpAMD64LEAQ4)
   3086 		v.AuxInt = c + d
   3087 		v.Aux = s
   3088 		v.AddArg(x)
   3089 		v.AddArg(y)
   3090 		return true
   3091 	}
   3092 	// match: (LEAQ4 [c] {s} x (ADDQconst [d] y))
   3093 	// cond: is32Bit(c+4*d) && y.Op != OpSB
   3094 	// result: (LEAQ4 [c+4*d] {s} x y)
   3095 	for {
   3096 		c := v.AuxInt
   3097 		s := v.Aux
   3098 		x := v.Args[0]
   3099 		v_1 := v.Args[1]
   3100 		if v_1.Op != OpAMD64ADDQconst {
   3101 			break
   3102 		}
   3103 		d := v_1.AuxInt
   3104 		y := v_1.Args[0]
   3105 		if !(is32Bit(c+4*d) && y.Op != OpSB) {
   3106 			break
   3107 		}
   3108 		v.reset(OpAMD64LEAQ4)
   3109 		v.AuxInt = c + 4*d
   3110 		v.Aux = s
   3111 		v.AddArg(x)
   3112 		v.AddArg(y)
   3113 		return true
   3114 	}
   3115 	// match: (LEAQ4 [c] {s} x (SHLQconst [1] y))
   3116 	// cond:
   3117 	// result: (LEAQ8 [c] {s} x y)
   3118 	for {
   3119 		c := v.AuxInt
   3120 		s := v.Aux
   3121 		x := v.Args[0]
   3122 		v_1 := v.Args[1]
   3123 		if v_1.Op != OpAMD64SHLQconst {
   3124 			break
   3125 		}
   3126 		if v_1.AuxInt != 1 {
   3127 			break
   3128 		}
   3129 		y := v_1.Args[0]
   3130 		v.reset(OpAMD64LEAQ8)
   3131 		v.AuxInt = c
   3132 		v.Aux = s
   3133 		v.AddArg(x)
   3134 		v.AddArg(y)
   3135 		return true
   3136 	}
   3137 	// match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
   3138 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
   3139 	// result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
   3140 	for {
   3141 		off1 := v.AuxInt
   3142 		sym1 := v.Aux
   3143 		v_0 := v.Args[0]
   3144 		if v_0.Op != OpAMD64LEAQ {
   3145 			break
   3146 		}
   3147 		off2 := v_0.AuxInt
   3148 		sym2 := v_0.Aux
   3149 		x := v_0.Args[0]
   3150 		y := v.Args[1]
   3151 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
   3152 			break
   3153 		}
   3154 		v.reset(OpAMD64LEAQ4)
   3155 		v.AuxInt = off1 + off2
   3156 		v.Aux = mergeSym(sym1, sym2)
   3157 		v.AddArg(x)
   3158 		v.AddArg(y)
   3159 		return true
   3160 	}
   3161 	return false
   3162 }
   3163 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool {
   3164 	b := v.Block
   3165 	_ = b
   3166 	// match: (LEAQ8 [c] {s} (ADDQconst [d] x) y)
   3167 	// cond: is32Bit(c+d)   && x.Op != OpSB
   3168 	// result: (LEAQ8 [c+d] {s} x y)
   3169 	for {
   3170 		c := v.AuxInt
   3171 		s := v.Aux
   3172 		v_0 := v.Args[0]
   3173 		if v_0.Op != OpAMD64ADDQconst {
   3174 			break
   3175 		}
   3176 		d := v_0.AuxInt
   3177 		x := v_0.Args[0]
   3178 		y := v.Args[1]
   3179 		if !(is32Bit(c+d) && x.Op != OpSB) {
   3180 			break
   3181 		}
   3182 		v.reset(OpAMD64LEAQ8)
   3183 		v.AuxInt = c + d
   3184 		v.Aux = s
   3185 		v.AddArg(x)
   3186 		v.AddArg(y)
   3187 		return true
   3188 	}
   3189 	// match: (LEAQ8 [c] {s} x (ADDQconst [d] y))
   3190 	// cond: is32Bit(c+8*d) && y.Op != OpSB
   3191 	// result: (LEAQ8 [c+8*d] {s} x y)
   3192 	for {
   3193 		c := v.AuxInt
   3194 		s := v.Aux
   3195 		x := v.Args[0]
   3196 		v_1 := v.Args[1]
   3197 		if v_1.Op != OpAMD64ADDQconst {
   3198 			break
   3199 		}
   3200 		d := v_1.AuxInt
   3201 		y := v_1.Args[0]
   3202 		if !(is32Bit(c+8*d) && y.Op != OpSB) {
   3203 			break
   3204 		}
   3205 		v.reset(OpAMD64LEAQ8)
   3206 		v.AuxInt = c + 8*d
   3207 		v.Aux = s
   3208 		v.AddArg(x)
   3209 		v.AddArg(y)
   3210 		return true
   3211 	}
   3212 	// match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
   3213 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
   3214 	// result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
   3215 	for {
   3216 		off1 := v.AuxInt
   3217 		sym1 := v.Aux
   3218 		v_0 := v.Args[0]
   3219 		if v_0.Op != OpAMD64LEAQ {
   3220 			break
   3221 		}
   3222 		off2 := v_0.AuxInt
   3223 		sym2 := v_0.Aux
   3224 		x := v_0.Args[0]
   3225 		y := v.Args[1]
   3226 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
   3227 			break
   3228 		}
   3229 		v.reset(OpAMD64LEAQ8)
   3230 		v.AuxInt = off1 + off2
   3231 		v.Aux = mergeSym(sym1, sym2)
   3232 		v.AddArg(x)
   3233 		v.AddArg(y)
   3234 		return true
   3235 	}
   3236 	return false
   3237 }
   3238 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool {
   3239 	b := v.Block
   3240 	_ = b
   3241 	// match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem))
   3242 	// cond: x.Uses == 1 && clobber(x)
   3243 	// result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
   3244 	for {
   3245 		x := v.Args[0]
   3246 		if x.Op != OpAMD64MOVBload {
   3247 			break
   3248 		}
   3249 		off := x.AuxInt
   3250 		sym := x.Aux
   3251 		ptr := x.Args[0]
   3252 		mem := x.Args[1]
   3253 		if !(x.Uses == 1 && clobber(x)) {
   3254 			break
   3255 		}
   3256 		b = x.Block
   3257 		v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type)
   3258 		v.reset(OpCopy)
   3259 		v.AddArg(v0)
   3260 		v0.AuxInt = off
   3261 		v0.Aux = sym
   3262 		v0.AddArg(ptr)
   3263 		v0.AddArg(mem)
   3264 		return true
   3265 	}
   3266 	// match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem))
   3267 	// cond: x.Uses == 1 && clobber(x)
   3268 	// result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
   3269 	for {
   3270 		x := v.Args[0]
   3271 		if x.Op != OpAMD64MOVWload {
   3272 			break
   3273 		}
   3274 		off := x.AuxInt
   3275 		sym := x.Aux
   3276 		ptr := x.Args[0]
   3277 		mem := x.Args[1]
   3278 		if !(x.Uses == 1 && clobber(x)) {
   3279 			break
   3280 		}
   3281 		b = x.Block
   3282 		v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type)
   3283 		v.reset(OpCopy)
   3284 		v.AddArg(v0)
   3285 		v0.AuxInt = off
   3286 		v0.Aux = sym
   3287 		v0.AddArg(ptr)
   3288 		v0.AddArg(mem)
   3289 		return true
   3290 	}
   3291 	// match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem))
   3292 	// cond: x.Uses == 1 && clobber(x)
   3293 	// result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
   3294 	for {
   3295 		x := v.Args[0]
   3296 		if x.Op != OpAMD64MOVLload {
   3297 			break
   3298 		}
   3299 		off := x.AuxInt
   3300 		sym := x.Aux
   3301 		ptr := x.Args[0]
   3302 		mem := x.Args[1]
   3303 		if !(x.Uses == 1 && clobber(x)) {
   3304 			break
   3305 		}
   3306 		b = x.Block
   3307 		v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type)
   3308 		v.reset(OpCopy)
   3309 		v.AddArg(v0)
   3310 		v0.AuxInt = off
   3311 		v0.Aux = sym
   3312 		v0.AddArg(ptr)
   3313 		v0.AddArg(mem)
   3314 		return true
   3315 	}
   3316 	// match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem))
   3317 	// cond: x.Uses == 1 && clobber(x)
   3318 	// result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
   3319 	for {
   3320 		x := v.Args[0]
   3321 		if x.Op != OpAMD64MOVQload {
   3322 			break
   3323 		}
   3324 		off := x.AuxInt
   3325 		sym := x.Aux
   3326 		ptr := x.Args[0]
   3327 		mem := x.Args[1]
   3328 		if !(x.Uses == 1 && clobber(x)) {
   3329 			break
   3330 		}
   3331 		b = x.Block
   3332 		v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type)
   3333 		v.reset(OpCopy)
   3334 		v.AddArg(v0)
   3335 		v0.AuxInt = off
   3336 		v0.Aux = sym
   3337 		v0.AddArg(ptr)
   3338 		v0.AddArg(mem)
   3339 		return true
   3340 	}
   3341 	// match: (MOVBQSX (ANDLconst [c] x))
   3342 	// cond: c & 0x80 == 0
   3343 	// result: (ANDLconst [c & 0x7f] x)
   3344 	for {
   3345 		v_0 := v.Args[0]
   3346 		if v_0.Op != OpAMD64ANDLconst {
   3347 			break
   3348 		}
   3349 		c := v_0.AuxInt
   3350 		x := v_0.Args[0]
   3351 		if !(c&0x80 == 0) {
   3352 			break
   3353 		}
   3354 		v.reset(OpAMD64ANDLconst)
   3355 		v.AuxInt = c & 0x7f
   3356 		v.AddArg(x)
   3357 		return true
   3358 	}
   3359 	return false
   3360 }
   3361 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value, config *Config) bool {
   3362 	b := v.Block
   3363 	_ = b
   3364 	// match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
   3365 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   3366 	// result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   3367 	for {
   3368 		off1 := v.AuxInt
   3369 		sym1 := v.Aux
   3370 		v_0 := v.Args[0]
   3371 		if v_0.Op != OpAMD64LEAQ {
   3372 			break
   3373 		}
   3374 		off2 := v_0.AuxInt
   3375 		sym2 := v_0.Aux
   3376 		base := v_0.Args[0]
   3377 		mem := v.Args[1]
   3378 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   3379 			break
   3380 		}
   3381 		v.reset(OpAMD64MOVBQSXload)
   3382 		v.AuxInt = off1 + off2
   3383 		v.Aux = mergeSym(sym1, sym2)
   3384 		v.AddArg(base)
   3385 		v.AddArg(mem)
   3386 		return true
   3387 	}
   3388 	return false
   3389 }
   3390 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool {
   3391 	b := v.Block
   3392 	_ = b
   3393 	// match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem))
   3394 	// cond: x.Uses == 1 && clobber(x)
   3395 	// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
   3396 	for {
   3397 		x := v.Args[0]
   3398 		if x.Op != OpAMD64MOVBload {
   3399 			break
   3400 		}
   3401 		off := x.AuxInt
   3402 		sym := x.Aux
   3403 		ptr := x.Args[0]
   3404 		mem := x.Args[1]
   3405 		if !(x.Uses == 1 && clobber(x)) {
   3406 			break
   3407 		}
   3408 		b = x.Block
   3409 		v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type)
   3410 		v.reset(OpCopy)
   3411 		v.AddArg(v0)
   3412 		v0.AuxInt = off
   3413 		v0.Aux = sym
   3414 		v0.AddArg(ptr)
   3415 		v0.AddArg(mem)
   3416 		return true
   3417 	}
   3418 	// match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem))
   3419 	// cond: x.Uses == 1 && clobber(x)
   3420 	// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
   3421 	for {
   3422 		x := v.Args[0]
   3423 		if x.Op != OpAMD64MOVWload {
   3424 			break
   3425 		}
   3426 		off := x.AuxInt
   3427 		sym := x.Aux
   3428 		ptr := x.Args[0]
   3429 		mem := x.Args[1]
   3430 		if !(x.Uses == 1 && clobber(x)) {
   3431 			break
   3432 		}
   3433 		b = x.Block
   3434 		v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type)
   3435 		v.reset(OpCopy)
   3436 		v.AddArg(v0)
   3437 		v0.AuxInt = off
   3438 		v0.Aux = sym
   3439 		v0.AddArg(ptr)
   3440 		v0.AddArg(mem)
   3441 		return true
   3442 	}
   3443 	// match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem))
   3444 	// cond: x.Uses == 1 && clobber(x)
   3445 	// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
   3446 	for {
   3447 		x := v.Args[0]
   3448 		if x.Op != OpAMD64MOVLload {
   3449 			break
   3450 		}
   3451 		off := x.AuxInt
   3452 		sym := x.Aux
   3453 		ptr := x.Args[0]
   3454 		mem := x.Args[1]
   3455 		if !(x.Uses == 1 && clobber(x)) {
   3456 			break
   3457 		}
   3458 		b = x.Block
   3459 		v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type)
   3460 		v.reset(OpCopy)
   3461 		v.AddArg(v0)
   3462 		v0.AuxInt = off
   3463 		v0.Aux = sym
   3464 		v0.AddArg(ptr)
   3465 		v0.AddArg(mem)
   3466 		return true
   3467 	}
   3468 	// match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem))
   3469 	// cond: x.Uses == 1 && clobber(x)
   3470 	// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
   3471 	for {
   3472 		x := v.Args[0]
   3473 		if x.Op != OpAMD64MOVQload {
   3474 			break
   3475 		}
   3476 		off := x.AuxInt
   3477 		sym := x.Aux
   3478 		ptr := x.Args[0]
   3479 		mem := x.Args[1]
   3480 		if !(x.Uses == 1 && clobber(x)) {
   3481 			break
   3482 		}
   3483 		b = x.Block
   3484 		v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type)
   3485 		v.reset(OpCopy)
   3486 		v.AddArg(v0)
   3487 		v0.AuxInt = off
   3488 		v0.Aux = sym
   3489 		v0.AddArg(ptr)
   3490 		v0.AddArg(mem)
   3491 		return true
   3492 	}
   3493 	// match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem))
   3494 	// cond: x.Uses == 1 && clobber(x)
   3495 	// result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem)
   3496 	for {
   3497 		x := v.Args[0]
   3498 		if x.Op != OpAMD64MOVBloadidx1 {
   3499 			break
   3500 		}
   3501 		off := x.AuxInt
   3502 		sym := x.Aux
   3503 		ptr := x.Args[0]
   3504 		idx := x.Args[1]
   3505 		mem := x.Args[2]
   3506 		if !(x.Uses == 1 && clobber(x)) {
   3507 			break
   3508 		}
   3509 		b = x.Block
   3510 		v0 := b.NewValue0(v.Line, OpAMD64MOVBloadidx1, v.Type)
   3511 		v.reset(OpCopy)
   3512 		v.AddArg(v0)
   3513 		v0.AuxInt = off
   3514 		v0.Aux = sym
   3515 		v0.AddArg(ptr)
   3516 		v0.AddArg(idx)
   3517 		v0.AddArg(mem)
   3518 		return true
   3519 	}
   3520 	// match: (MOVBQZX (ANDLconst [c] x))
   3521 	// cond:
   3522 	// result: (ANDLconst [c & 0xff] x)
   3523 	for {
   3524 		v_0 := v.Args[0]
   3525 		if v_0.Op != OpAMD64ANDLconst {
   3526 			break
   3527 		}
   3528 		c := v_0.AuxInt
   3529 		x := v_0.Args[0]
   3530 		v.reset(OpAMD64ANDLconst)
   3531 		v.AuxInt = c & 0xff
   3532 		v.AddArg(x)
   3533 		return true
   3534 	}
   3535 	return false
   3536 }
   3537 func rewriteValueAMD64_OpAMD64MOVBload(v *Value, config *Config) bool {
   3538 	b := v.Block
   3539 	_ = b
   3540 	// match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
   3541 	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
   3542 	// result: x
   3543 	for {
   3544 		off := v.AuxInt
   3545 		sym := v.Aux
   3546 		ptr := v.Args[0]
   3547 		v_1 := v.Args[1]
   3548 		if v_1.Op != OpAMD64MOVBstore {
   3549 			break
   3550 		}
   3551 		off2 := v_1.AuxInt
   3552 		sym2 := v_1.Aux
   3553 		ptr2 := v_1.Args[0]
   3554 		x := v_1.Args[1]
   3555 		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
   3556 			break
   3557 		}
   3558 		v.reset(OpCopy)
   3559 		v.Type = x.Type
   3560 		v.AddArg(x)
   3561 		return true
   3562 	}
   3563 	// match: (MOVBload  [off1] {sym} (ADDQconst [off2] ptr) mem)
   3564 	// cond: is32Bit(off1+off2)
   3565 	// result: (MOVBload  [off1+off2] {sym} ptr mem)
   3566 	for {
   3567 		off1 := v.AuxInt
   3568 		sym := v.Aux
   3569 		v_0 := v.Args[0]
   3570 		if v_0.Op != OpAMD64ADDQconst {
   3571 			break
   3572 		}
   3573 		off2 := v_0.AuxInt
   3574 		ptr := v_0.Args[0]
   3575 		mem := v.Args[1]
   3576 		if !(is32Bit(off1 + off2)) {
   3577 			break
   3578 		}
   3579 		v.reset(OpAMD64MOVBload)
   3580 		v.AuxInt = off1 + off2
   3581 		v.Aux = sym
   3582 		v.AddArg(ptr)
   3583 		v.AddArg(mem)
   3584 		return true
   3585 	}
   3586 	// match: (MOVBload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
   3587 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   3588 	// result: (MOVBload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   3589 	for {
   3590 		off1 := v.AuxInt
   3591 		sym1 := v.Aux
   3592 		v_0 := v.Args[0]
   3593 		if v_0.Op != OpAMD64LEAQ {
   3594 			break
   3595 		}
   3596 		off2 := v_0.AuxInt
   3597 		sym2 := v_0.Aux
   3598 		base := v_0.Args[0]
   3599 		mem := v.Args[1]
   3600 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   3601 			break
   3602 		}
   3603 		v.reset(OpAMD64MOVBload)
   3604 		v.AuxInt = off1 + off2
   3605 		v.Aux = mergeSym(sym1, sym2)
   3606 		v.AddArg(base)
   3607 		v.AddArg(mem)
   3608 		return true
   3609 	}
   3610 	// match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
   3611 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   3612 	// result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   3613 	for {
   3614 		off1 := v.AuxInt
   3615 		sym1 := v.Aux
   3616 		v_0 := v.Args[0]
   3617 		if v_0.Op != OpAMD64LEAQ1 {
   3618 			break
   3619 		}
   3620 		off2 := v_0.AuxInt
   3621 		sym2 := v_0.Aux
   3622 		ptr := v_0.Args[0]
   3623 		idx := v_0.Args[1]
   3624 		mem := v.Args[1]
   3625 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   3626 			break
   3627 		}
   3628 		v.reset(OpAMD64MOVBloadidx1)
   3629 		v.AuxInt = off1 + off2
   3630 		v.Aux = mergeSym(sym1, sym2)
   3631 		v.AddArg(ptr)
   3632 		v.AddArg(idx)
   3633 		v.AddArg(mem)
   3634 		return true
   3635 	}
   3636 	// match: (MOVBload [off] {sym} (ADDQ ptr idx) mem)
   3637 	// cond: ptr.Op != OpSB
   3638 	// result: (MOVBloadidx1 [off] {sym} ptr idx mem)
   3639 	for {
   3640 		off := v.AuxInt
   3641 		sym := v.Aux
   3642 		v_0 := v.Args[0]
   3643 		if v_0.Op != OpAMD64ADDQ {
   3644 			break
   3645 		}
   3646 		ptr := v_0.Args[0]
   3647 		idx := v_0.Args[1]
   3648 		mem := v.Args[1]
   3649 		if !(ptr.Op != OpSB) {
   3650 			break
   3651 		}
   3652 		v.reset(OpAMD64MOVBloadidx1)
   3653 		v.AuxInt = off
   3654 		v.Aux = sym
   3655 		v.AddArg(ptr)
   3656 		v.AddArg(idx)
   3657 		v.AddArg(mem)
   3658 		return true
   3659 	}
   3660 	// match: (MOVBload  [off1] {sym1} (LEAL [off2] {sym2} base) mem)
   3661 	// cond: canMergeSym(sym1, sym2)
   3662 	// result: (MOVBload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   3663 	for {
   3664 		off1 := v.AuxInt
   3665 		sym1 := v.Aux
   3666 		v_0 := v.Args[0]
   3667 		if v_0.Op != OpAMD64LEAL {
   3668 			break
   3669 		}
   3670 		off2 := v_0.AuxInt
   3671 		sym2 := v_0.Aux
   3672 		base := v_0.Args[0]
   3673 		mem := v.Args[1]
   3674 		if !(canMergeSym(sym1, sym2)) {
   3675 			break
   3676 		}
   3677 		v.reset(OpAMD64MOVBload)
   3678 		v.AuxInt = off1 + off2
   3679 		v.Aux = mergeSym(sym1, sym2)
   3680 		v.AddArg(base)
   3681 		v.AddArg(mem)
   3682 		return true
   3683 	}
   3684 	// match: (MOVBload  [off1] {sym} (ADDLconst [off2] ptr) mem)
   3685 	// cond: is32Bit(off1+off2)
   3686 	// result: (MOVBload  [off1+off2] {sym} ptr mem)
   3687 	for {
   3688 		off1 := v.AuxInt
   3689 		sym := v.Aux
   3690 		v_0 := v.Args[0]
   3691 		if v_0.Op != OpAMD64ADDLconst {
   3692 			break
   3693 		}
   3694 		off2 := v_0.AuxInt
   3695 		ptr := v_0.Args[0]
   3696 		mem := v.Args[1]
   3697 		if !(is32Bit(off1 + off2)) {
   3698 			break
   3699 		}
   3700 		v.reset(OpAMD64MOVBload)
   3701 		v.AuxInt = off1 + off2
   3702 		v.Aux = sym
   3703 		v.AddArg(ptr)
   3704 		v.AddArg(mem)
   3705 		return true
   3706 	}
   3707 	return false
   3708 }
   3709 func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value, config *Config) bool {
   3710 	b := v.Block
   3711 	_ = b
   3712 	// match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
   3713 	// cond:
   3714 	// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
   3715 	for {
   3716 		c := v.AuxInt
   3717 		sym := v.Aux
   3718 		v_0 := v.Args[0]
   3719 		if v_0.Op != OpAMD64ADDQconst {
   3720 			break
   3721 		}
   3722 		d := v_0.AuxInt
   3723 		ptr := v_0.Args[0]
   3724 		idx := v.Args[1]
   3725 		mem := v.Args[2]
   3726 		v.reset(OpAMD64MOVBloadidx1)
   3727 		v.AuxInt = c + d
   3728 		v.Aux = sym
   3729 		v.AddArg(ptr)
   3730 		v.AddArg(idx)
   3731 		v.AddArg(mem)
   3732 		return true
   3733 	}
   3734 	// match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
   3735 	// cond:
   3736 	// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
   3737 	for {
   3738 		c := v.AuxInt
   3739 		sym := v.Aux
   3740 		ptr := v.Args[0]
   3741 		v_1 := v.Args[1]
   3742 		if v_1.Op != OpAMD64ADDQconst {
   3743 			break
   3744 		}
   3745 		d := v_1.AuxInt
   3746 		idx := v_1.Args[0]
   3747 		mem := v.Args[2]
   3748 		v.reset(OpAMD64MOVBloadidx1)
   3749 		v.AuxInt = c + d
   3750 		v.Aux = sym
   3751 		v.AddArg(ptr)
   3752 		v.AddArg(idx)
   3753 		v.AddArg(mem)
   3754 		return true
   3755 	}
   3756 	return false
   3757 }
   3758 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool {
   3759 	b := v.Block
   3760 	_ = b
   3761 	// match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem)
   3762 	// cond:
   3763 	// result: (MOVBstore [off] {sym} ptr x mem)
   3764 	for {
   3765 		off := v.AuxInt
   3766 		sym := v.Aux
   3767 		ptr := v.Args[0]
   3768 		v_1 := v.Args[1]
   3769 		if v_1.Op != OpAMD64MOVBQSX {
   3770 			break
   3771 		}
   3772 		x := v_1.Args[0]
   3773 		mem := v.Args[2]
   3774 		v.reset(OpAMD64MOVBstore)
   3775 		v.AuxInt = off
   3776 		v.Aux = sym
   3777 		v.AddArg(ptr)
   3778 		v.AddArg(x)
   3779 		v.AddArg(mem)
   3780 		return true
   3781 	}
   3782 	// match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem)
   3783 	// cond:
   3784 	// result: (MOVBstore [off] {sym} ptr x mem)
   3785 	for {
   3786 		off := v.AuxInt
   3787 		sym := v.Aux
   3788 		ptr := v.Args[0]
   3789 		v_1 := v.Args[1]
   3790 		if v_1.Op != OpAMD64MOVBQZX {
   3791 			break
   3792 		}
   3793 		x := v_1.Args[0]
   3794 		mem := v.Args[2]
   3795 		v.reset(OpAMD64MOVBstore)
   3796 		v.AuxInt = off
   3797 		v.Aux = sym
   3798 		v.AddArg(ptr)
   3799 		v.AddArg(x)
   3800 		v.AddArg(mem)
   3801 		return true
   3802 	}
   3803 	// match: (MOVBstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
   3804 	// cond: is32Bit(off1+off2)
   3805 	// result: (MOVBstore  [off1+off2] {sym} ptr val mem)
   3806 	for {
   3807 		off1 := v.AuxInt
   3808 		sym := v.Aux
   3809 		v_0 := v.Args[0]
   3810 		if v_0.Op != OpAMD64ADDQconst {
   3811 			break
   3812 		}
   3813 		off2 := v_0.AuxInt
   3814 		ptr := v_0.Args[0]
   3815 		val := v.Args[1]
   3816 		mem := v.Args[2]
   3817 		if !(is32Bit(off1 + off2)) {
   3818 			break
   3819 		}
   3820 		v.reset(OpAMD64MOVBstore)
   3821 		v.AuxInt = off1 + off2
   3822 		v.Aux = sym
   3823 		v.AddArg(ptr)
   3824 		v.AddArg(val)
   3825 		v.AddArg(mem)
   3826 		return true
   3827 	}
   3828 	// match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
   3829 	// cond: validOff(off)
   3830 	// result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
   3831 	for {
   3832 		off := v.AuxInt
   3833 		sym := v.Aux
   3834 		ptr := v.Args[0]
   3835 		v_1 := v.Args[1]
   3836 		if v_1.Op != OpAMD64MOVLconst {
   3837 			break
   3838 		}
   3839 		c := v_1.AuxInt
   3840 		mem := v.Args[2]
   3841 		if !(validOff(off)) {
   3842 			break
   3843 		}
   3844 		v.reset(OpAMD64MOVBstoreconst)
   3845 		v.AuxInt = makeValAndOff(int64(int8(c)), off)
   3846 		v.Aux = sym
   3847 		v.AddArg(ptr)
   3848 		v.AddArg(mem)
   3849 		return true
   3850 	}
   3851 	// match: (MOVBstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
   3852 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   3853 	// result: (MOVBstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   3854 	for {
   3855 		off1 := v.AuxInt
   3856 		sym1 := v.Aux
   3857 		v_0 := v.Args[0]
   3858 		if v_0.Op != OpAMD64LEAQ {
   3859 			break
   3860 		}
   3861 		off2 := v_0.AuxInt
   3862 		sym2 := v_0.Aux
   3863 		base := v_0.Args[0]
   3864 		val := v.Args[1]
   3865 		mem := v.Args[2]
   3866 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   3867 			break
   3868 		}
   3869 		v.reset(OpAMD64MOVBstore)
   3870 		v.AuxInt = off1 + off2
   3871 		v.Aux = mergeSym(sym1, sym2)
   3872 		v.AddArg(base)
   3873 		v.AddArg(val)
   3874 		v.AddArg(mem)
   3875 		return true
   3876 	}
   3877 	// match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
   3878 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   3879 	// result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   3880 	for {
   3881 		off1 := v.AuxInt
   3882 		sym1 := v.Aux
   3883 		v_0 := v.Args[0]
   3884 		if v_0.Op != OpAMD64LEAQ1 {
   3885 			break
   3886 		}
   3887 		off2 := v_0.AuxInt
   3888 		sym2 := v_0.Aux
   3889 		ptr := v_0.Args[0]
   3890 		idx := v_0.Args[1]
   3891 		val := v.Args[1]
   3892 		mem := v.Args[2]
   3893 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   3894 			break
   3895 		}
   3896 		v.reset(OpAMD64MOVBstoreidx1)
   3897 		v.AuxInt = off1 + off2
   3898 		v.Aux = mergeSym(sym1, sym2)
   3899 		v.AddArg(ptr)
   3900 		v.AddArg(idx)
   3901 		v.AddArg(val)
   3902 		v.AddArg(mem)
   3903 		return true
   3904 	}
   3905 	// match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem)
   3906 	// cond: ptr.Op != OpSB
   3907 	// result: (MOVBstoreidx1 [off] {sym} ptr idx val mem)
   3908 	for {
   3909 		off := v.AuxInt
   3910 		sym := v.Aux
   3911 		v_0 := v.Args[0]
   3912 		if v_0.Op != OpAMD64ADDQ {
   3913 			break
   3914 		}
   3915 		ptr := v_0.Args[0]
   3916 		idx := v_0.Args[1]
   3917 		val := v.Args[1]
   3918 		mem := v.Args[2]
   3919 		if !(ptr.Op != OpSB) {
   3920 			break
   3921 		}
   3922 		v.reset(OpAMD64MOVBstoreidx1)
   3923 		v.AuxInt = off
   3924 		v.Aux = sym
   3925 		v.AddArg(ptr)
   3926 		v.AddArg(idx)
   3927 		v.AddArg(val)
   3928 		v.AddArg(mem)
   3929 		return true
   3930 	}
   3931 	// match: (MOVBstore [i] {s} p w   x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w)   x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w)   x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem))))
   3932 	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && clobber(x0)   && clobber(x1)   && clobber(x2)
   3933 	// result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem)
   3934 	for {
   3935 		i := v.AuxInt
   3936 		s := v.Aux
   3937 		p := v.Args[0]
   3938 		w := v.Args[1]
   3939 		x2 := v.Args[2]
   3940 		if x2.Op != OpAMD64MOVBstore {
   3941 			break
   3942 		}
   3943 		if x2.AuxInt != i-1 {
   3944 			break
   3945 		}
   3946 		if x2.Aux != s {
   3947 			break
   3948 		}
   3949 		if p != x2.Args[0] {
   3950 			break
   3951 		}
   3952 		x2_1 := x2.Args[1]
   3953 		if x2_1.Op != OpAMD64SHRLconst {
   3954 			break
   3955 		}
   3956 		if x2_1.AuxInt != 8 {
   3957 			break
   3958 		}
   3959 		if w != x2_1.Args[0] {
   3960 			break
   3961 		}
   3962 		x1 := x2.Args[2]
   3963 		if x1.Op != OpAMD64MOVBstore {
   3964 			break
   3965 		}
   3966 		if x1.AuxInt != i-2 {
   3967 			break
   3968 		}
   3969 		if x1.Aux != s {
   3970 			break
   3971 		}
   3972 		if p != x1.Args[0] {
   3973 			break
   3974 		}
   3975 		x1_1 := x1.Args[1]
   3976 		if x1_1.Op != OpAMD64SHRLconst {
   3977 			break
   3978 		}
   3979 		if x1_1.AuxInt != 16 {
   3980 			break
   3981 		}
   3982 		if w != x1_1.Args[0] {
   3983 			break
   3984 		}
   3985 		x0 := x1.Args[2]
   3986 		if x0.Op != OpAMD64MOVBstore {
   3987 			break
   3988 		}
   3989 		if x0.AuxInt != i-3 {
   3990 			break
   3991 		}
   3992 		if x0.Aux != s {
   3993 			break
   3994 		}
   3995 		if p != x0.Args[0] {
   3996 			break
   3997 		}
   3998 		x0_1 := x0.Args[1]
   3999 		if x0_1.Op != OpAMD64SHRLconst {
   4000 			break
   4001 		}
   4002 		if x0_1.AuxInt != 24 {
   4003 			break
   4004 		}
   4005 		if w != x0_1.Args[0] {
   4006 			break
   4007 		}
   4008 		mem := x0.Args[2]
   4009 		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) {
   4010 			break
   4011 		}
   4012 		v.reset(OpAMD64MOVLstore)
   4013 		v.AuxInt = i - 3
   4014 		v.Aux = s
   4015 		v.AddArg(p)
   4016 		v0 := b.NewValue0(v.Line, OpAMD64BSWAPL, w.Type)
   4017 		v0.AddArg(w)
   4018 		v.AddArg(v0)
   4019 		v.AddArg(mem)
   4020 		return true
   4021 	}
   4022 	// match: (MOVBstore [i] {s} p w   x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w)   x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w)   x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w)   x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w)   x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w)   x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w)   x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem))))))))
   4023 	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && x4.Uses == 1   && x5.Uses == 1   && x6.Uses == 1   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(x4)   && clobber(x5)   && clobber(x6)
   4024 	// result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem)
   4025 	for {
   4026 		i := v.AuxInt
   4027 		s := v.Aux
   4028 		p := v.Args[0]
   4029 		w := v.Args[1]
   4030 		x6 := v.Args[2]
   4031 		if x6.Op != OpAMD64MOVBstore {
   4032 			break
   4033 		}
   4034 		if x6.AuxInt != i-1 {
   4035 			break
   4036 		}
   4037 		if x6.Aux != s {
   4038 			break
   4039 		}
   4040 		if p != x6.Args[0] {
   4041 			break
   4042 		}
   4043 		x6_1 := x6.Args[1]
   4044 		if x6_1.Op != OpAMD64SHRQconst {
   4045 			break
   4046 		}
   4047 		if x6_1.AuxInt != 8 {
   4048 			break
   4049 		}
   4050 		if w != x6_1.Args[0] {
   4051 			break
   4052 		}
   4053 		x5 := x6.Args[2]
   4054 		if x5.Op != OpAMD64MOVBstore {
   4055 			break
   4056 		}
   4057 		if x5.AuxInt != i-2 {
   4058 			break
   4059 		}
   4060 		if x5.Aux != s {
   4061 			break
   4062 		}
   4063 		if p != x5.Args[0] {
   4064 			break
   4065 		}
   4066 		x5_1 := x5.Args[1]
   4067 		if x5_1.Op != OpAMD64SHRQconst {
   4068 			break
   4069 		}
   4070 		if x5_1.AuxInt != 16 {
   4071 			break
   4072 		}
   4073 		if w != x5_1.Args[0] {
   4074 			break
   4075 		}
   4076 		x4 := x5.Args[2]
   4077 		if x4.Op != OpAMD64MOVBstore {
   4078 			break
   4079 		}
   4080 		if x4.AuxInt != i-3 {
   4081 			break
   4082 		}
   4083 		if x4.Aux != s {
   4084 			break
   4085 		}
   4086 		if p != x4.Args[0] {
   4087 			break
   4088 		}
   4089 		x4_1 := x4.Args[1]
   4090 		if x4_1.Op != OpAMD64SHRQconst {
   4091 			break
   4092 		}
   4093 		if x4_1.AuxInt != 24 {
   4094 			break
   4095 		}
   4096 		if w != x4_1.Args[0] {
   4097 			break
   4098 		}
   4099 		x3 := x4.Args[2]
   4100 		if x3.Op != OpAMD64MOVBstore {
   4101 			break
   4102 		}
   4103 		if x3.AuxInt != i-4 {
   4104 			break
   4105 		}
   4106 		if x3.Aux != s {
   4107 			break
   4108 		}
   4109 		if p != x3.Args[0] {
   4110 			break
   4111 		}
   4112 		x3_1 := x3.Args[1]
   4113 		if x3_1.Op != OpAMD64SHRQconst {
   4114 			break
   4115 		}
   4116 		if x3_1.AuxInt != 32 {
   4117 			break
   4118 		}
   4119 		if w != x3_1.Args[0] {
   4120 			break
   4121 		}
   4122 		x2 := x3.Args[2]
   4123 		if x2.Op != OpAMD64MOVBstore {
   4124 			break
   4125 		}
   4126 		if x2.AuxInt != i-5 {
   4127 			break
   4128 		}
   4129 		if x2.Aux != s {
   4130 			break
   4131 		}
   4132 		if p != x2.Args[0] {
   4133 			break
   4134 		}
   4135 		x2_1 := x2.Args[1]
   4136 		if x2_1.Op != OpAMD64SHRQconst {
   4137 			break
   4138 		}
   4139 		if x2_1.AuxInt != 40 {
   4140 			break
   4141 		}
   4142 		if w != x2_1.Args[0] {
   4143 			break
   4144 		}
   4145 		x1 := x2.Args[2]
   4146 		if x1.Op != OpAMD64MOVBstore {
   4147 			break
   4148 		}
   4149 		if x1.AuxInt != i-6 {
   4150 			break
   4151 		}
   4152 		if x1.Aux != s {
   4153 			break
   4154 		}
   4155 		if p != x1.Args[0] {
   4156 			break
   4157 		}
   4158 		x1_1 := x1.Args[1]
   4159 		if x1_1.Op != OpAMD64SHRQconst {
   4160 			break
   4161 		}
   4162 		if x1_1.AuxInt != 48 {
   4163 			break
   4164 		}
   4165 		if w != x1_1.Args[0] {
   4166 			break
   4167 		}
   4168 		x0 := x1.Args[2]
   4169 		if x0.Op != OpAMD64MOVBstore {
   4170 			break
   4171 		}
   4172 		if x0.AuxInt != i-7 {
   4173 			break
   4174 		}
   4175 		if x0.Aux != s {
   4176 			break
   4177 		}
   4178 		if p != x0.Args[0] {
   4179 			break
   4180 		}
   4181 		x0_1 := x0.Args[1]
   4182 		if x0_1.Op != OpAMD64SHRQconst {
   4183 			break
   4184 		}
   4185 		if x0_1.AuxInt != 56 {
   4186 			break
   4187 		}
   4188 		if w != x0_1.Args[0] {
   4189 			break
   4190 		}
   4191 		mem := x0.Args[2]
   4192 		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) {
   4193 			break
   4194 		}
   4195 		v.reset(OpAMD64MOVQstore)
   4196 		v.AuxInt = i - 7
   4197 		v.Aux = s
   4198 		v.AddArg(p)
   4199 		v0 := b.NewValue0(v.Line, OpAMD64BSWAPQ, w.Type)
   4200 		v0.AddArg(w)
   4201 		v.AddArg(v0)
   4202 		v.AddArg(mem)
   4203 		return true
   4204 	}
   4205 	// match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
   4206 	// cond: x.Uses == 1   && clobber(x)
   4207 	// result: (MOVWstore [i-1] {s} p w mem)
   4208 	for {
   4209 		i := v.AuxInt
   4210 		s := v.Aux
   4211 		p := v.Args[0]
   4212 		v_1 := v.Args[1]
   4213 		if v_1.Op != OpAMD64SHRQconst {
   4214 			break
   4215 		}
   4216 		if v_1.AuxInt != 8 {
   4217 			break
   4218 		}
   4219 		w := v_1.Args[0]
   4220 		x := v.Args[2]
   4221 		if x.Op != OpAMD64MOVBstore {
   4222 			break
   4223 		}
   4224 		if x.AuxInt != i-1 {
   4225 			break
   4226 		}
   4227 		if x.Aux != s {
   4228 			break
   4229 		}
   4230 		if p != x.Args[0] {
   4231 			break
   4232 		}
   4233 		if w != x.Args[1] {
   4234 			break
   4235 		}
   4236 		mem := x.Args[2]
   4237 		if !(x.Uses == 1 && clobber(x)) {
   4238 			break
   4239 		}
   4240 		v.reset(OpAMD64MOVWstore)
   4241 		v.AuxInt = i - 1
   4242 		v.Aux = s
   4243 		v.AddArg(p)
   4244 		v.AddArg(w)
   4245 		v.AddArg(mem)
   4246 		return true
   4247 	}
   4248 	// match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem))
   4249 	// cond: x.Uses == 1   && clobber(x)
   4250 	// result: (MOVWstore [i-1] {s} p w0 mem)
   4251 	for {
   4252 		i := v.AuxInt
   4253 		s := v.Aux
   4254 		p := v.Args[0]
   4255 		v_1 := v.Args[1]
   4256 		if v_1.Op != OpAMD64SHRQconst {
   4257 			break
   4258 		}
   4259 		j := v_1.AuxInt
   4260 		w := v_1.Args[0]
   4261 		x := v.Args[2]
   4262 		if x.Op != OpAMD64MOVBstore {
   4263 			break
   4264 		}
   4265 		if x.AuxInt != i-1 {
   4266 			break
   4267 		}
   4268 		if x.Aux != s {
   4269 			break
   4270 		}
   4271 		if p != x.Args[0] {
   4272 			break
   4273 		}
   4274 		w0 := x.Args[1]
   4275 		if w0.Op != OpAMD64SHRQconst {
   4276 			break
   4277 		}
   4278 		if w0.AuxInt != j-8 {
   4279 			break
   4280 		}
   4281 		if w != w0.Args[0] {
   4282 			break
   4283 		}
   4284 		mem := x.Args[2]
   4285 		if !(x.Uses == 1 && clobber(x)) {
   4286 			break
   4287 		}
   4288 		v.reset(OpAMD64MOVWstore)
   4289 		v.AuxInt = i - 1
   4290 		v.Aux = s
   4291 		v.AddArg(p)
   4292 		v.AddArg(w0)
   4293 		v.AddArg(mem)
   4294 		return true
   4295 	}
   4296 	// match: (MOVBstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
   4297 	// cond: canMergeSym(sym1, sym2)
   4298 	// result: (MOVBstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   4299 	for {
   4300 		off1 := v.AuxInt
   4301 		sym1 := v.Aux
   4302 		v_0 := v.Args[0]
   4303 		if v_0.Op != OpAMD64LEAL {
   4304 			break
   4305 		}
   4306 		off2 := v_0.AuxInt
   4307 		sym2 := v_0.Aux
   4308 		base := v_0.Args[0]
   4309 		val := v.Args[1]
   4310 		mem := v.Args[2]
   4311 		if !(canMergeSym(sym1, sym2)) {
   4312 			break
   4313 		}
   4314 		v.reset(OpAMD64MOVBstore)
   4315 		v.AuxInt = off1 + off2
   4316 		v.Aux = mergeSym(sym1, sym2)
   4317 		v.AddArg(base)
   4318 		v.AddArg(val)
   4319 		v.AddArg(mem)
   4320 		return true
   4321 	}
   4322 	// match: (MOVBstore  [off1] {sym} (ADDLconst [off2] ptr) val mem)
   4323 	// cond: is32Bit(off1+off2)
   4324 	// result: (MOVBstore  [off1+off2] {sym} ptr val mem)
   4325 	for {
   4326 		off1 := v.AuxInt
   4327 		sym := v.Aux
   4328 		v_0 := v.Args[0]
   4329 		if v_0.Op != OpAMD64ADDLconst {
   4330 			break
   4331 		}
   4332 		off2 := v_0.AuxInt
   4333 		ptr := v_0.Args[0]
   4334 		val := v.Args[1]
   4335 		mem := v.Args[2]
   4336 		if !(is32Bit(off1 + off2)) {
   4337 			break
   4338 		}
   4339 		v.reset(OpAMD64MOVBstore)
   4340 		v.AuxInt = off1 + off2
   4341 		v.Aux = sym
   4342 		v.AddArg(ptr)
   4343 		v.AddArg(val)
   4344 		v.AddArg(mem)
   4345 		return true
   4346 	}
   4347 	return false
   4348 }
   4349 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value, config *Config) bool {
   4350 	b := v.Block
   4351 	_ = b
   4352 	// match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
   4353 	// cond: ValAndOff(sc).canAdd(off)
   4354 	// result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
   4355 	for {
   4356 		sc := v.AuxInt
   4357 		s := v.Aux
   4358 		v_0 := v.Args[0]
   4359 		if v_0.Op != OpAMD64ADDQconst {
   4360 			break
   4361 		}
   4362 		off := v_0.AuxInt
   4363 		ptr := v_0.Args[0]
   4364 		mem := v.Args[1]
   4365 		if !(ValAndOff(sc).canAdd(off)) {
   4366 			break
   4367 		}
   4368 		v.reset(OpAMD64MOVBstoreconst)
   4369 		v.AuxInt = ValAndOff(sc).add(off)
   4370 		v.Aux = s
   4371 		v.AddArg(ptr)
   4372 		v.AddArg(mem)
   4373 		return true
   4374 	}
   4375 	// match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
   4376 	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
   4377 	// result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
   4378 	for {
   4379 		sc := v.AuxInt
   4380 		sym1 := v.Aux
   4381 		v_0 := v.Args[0]
   4382 		if v_0.Op != OpAMD64LEAQ {
   4383 			break
   4384 		}
   4385 		off := v_0.AuxInt
   4386 		sym2 := v_0.Aux
   4387 		ptr := v_0.Args[0]
   4388 		mem := v.Args[1]
   4389 		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
   4390 			break
   4391 		}
   4392 		v.reset(OpAMD64MOVBstoreconst)
   4393 		v.AuxInt = ValAndOff(sc).add(off)
   4394 		v.Aux = mergeSym(sym1, sym2)
   4395 		v.AddArg(ptr)
   4396 		v.AddArg(mem)
   4397 		return true
   4398 	}
   4399 	// match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
   4400 	// cond: canMergeSym(sym1, sym2)
   4401 	// result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   4402 	for {
   4403 		x := v.AuxInt
   4404 		sym1 := v.Aux
   4405 		v_0 := v.Args[0]
   4406 		if v_0.Op != OpAMD64LEAQ1 {
   4407 			break
   4408 		}
   4409 		off := v_0.AuxInt
   4410 		sym2 := v_0.Aux
   4411 		ptr := v_0.Args[0]
   4412 		idx := v_0.Args[1]
   4413 		mem := v.Args[1]
   4414 		if !(canMergeSym(sym1, sym2)) {
   4415 			break
   4416 		}
   4417 		v.reset(OpAMD64MOVBstoreconstidx1)
   4418 		v.AuxInt = ValAndOff(x).add(off)
   4419 		v.Aux = mergeSym(sym1, sym2)
   4420 		v.AddArg(ptr)
   4421 		v.AddArg(idx)
   4422 		v.AddArg(mem)
   4423 		return true
   4424 	}
   4425 	// match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem)
   4426 	// cond:
   4427 	// result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem)
   4428 	for {
   4429 		x := v.AuxInt
   4430 		sym := v.Aux
   4431 		v_0 := v.Args[0]
   4432 		if v_0.Op != OpAMD64ADDQ {
   4433 			break
   4434 		}
   4435 		ptr := v_0.Args[0]
   4436 		idx := v_0.Args[1]
   4437 		mem := v.Args[1]
   4438 		v.reset(OpAMD64MOVBstoreconstidx1)
   4439 		v.AuxInt = x
   4440 		v.Aux = sym
   4441 		v.AddArg(ptr)
   4442 		v.AddArg(idx)
   4443 		v.AddArg(mem)
   4444 		return true
   4445 	}
   4446 	// match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
   4447 	// cond: x.Uses == 1   && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()   && clobber(x)
   4448 	// result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
   4449 	for {
   4450 		c := v.AuxInt
   4451 		s := v.Aux
   4452 		p := v.Args[0]
   4453 		x := v.Args[1]
   4454 		if x.Op != OpAMD64MOVBstoreconst {
   4455 			break
   4456 		}
   4457 		a := x.AuxInt
   4458 		if x.Aux != s {
   4459 			break
   4460 		}
   4461 		if p != x.Args[0] {
   4462 			break
   4463 		}
   4464 		mem := x.Args[1]
   4465 		if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
   4466 			break
   4467 		}
   4468 		v.reset(OpAMD64MOVWstoreconst)
   4469 		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
   4470 		v.Aux = s
   4471 		v.AddArg(p)
   4472 		v.AddArg(mem)
   4473 		return true
   4474 	}
   4475 	// match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
   4476 	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
   4477 	// result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
   4478 	for {
   4479 		sc := v.AuxInt
   4480 		sym1 := v.Aux
   4481 		v_0 := v.Args[0]
   4482 		if v_0.Op != OpAMD64LEAL {
   4483 			break
   4484 		}
   4485 		off := v_0.AuxInt
   4486 		sym2 := v_0.Aux
   4487 		ptr := v_0.Args[0]
   4488 		mem := v.Args[1]
   4489 		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
   4490 			break
   4491 		}
   4492 		v.reset(OpAMD64MOVBstoreconst)
   4493 		v.AuxInt = ValAndOff(sc).add(off)
   4494 		v.Aux = mergeSym(sym1, sym2)
   4495 		v.AddArg(ptr)
   4496 		v.AddArg(mem)
   4497 		return true
   4498 	}
   4499 	// match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
   4500 	// cond: ValAndOff(sc).canAdd(off)
   4501 	// result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
   4502 	for {
   4503 		sc := v.AuxInt
   4504 		s := v.Aux
   4505 		v_0 := v.Args[0]
   4506 		if v_0.Op != OpAMD64ADDLconst {
   4507 			break
   4508 		}
   4509 		off := v_0.AuxInt
   4510 		ptr := v_0.Args[0]
   4511 		mem := v.Args[1]
   4512 		if !(ValAndOff(sc).canAdd(off)) {
   4513 			break
   4514 		}
   4515 		v.reset(OpAMD64MOVBstoreconst)
   4516 		v.AuxInt = ValAndOff(sc).add(off)
   4517 		v.Aux = s
   4518 		v.AddArg(ptr)
   4519 		v.AddArg(mem)
   4520 		return true
   4521 	}
   4522 	return false
   4523 }
   4524 func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value, config *Config) bool {
   4525 	b := v.Block
   4526 	_ = b
   4527 	// match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
   4528 	// cond:
   4529 	// result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   4530 	for {
   4531 		x := v.AuxInt
   4532 		sym := v.Aux
   4533 		v_0 := v.Args[0]
   4534 		if v_0.Op != OpAMD64ADDQconst {
   4535 			break
   4536 		}
   4537 		c := v_0.AuxInt
   4538 		ptr := v_0.Args[0]
   4539 		idx := v.Args[1]
   4540 		mem := v.Args[2]
   4541 		v.reset(OpAMD64MOVBstoreconstidx1)
   4542 		v.AuxInt = ValAndOff(x).add(c)
   4543 		v.Aux = sym
   4544 		v.AddArg(ptr)
   4545 		v.AddArg(idx)
   4546 		v.AddArg(mem)
   4547 		return true
   4548 	}
   4549 	// match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
   4550 	// cond:
   4551 	// result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   4552 	for {
   4553 		x := v.AuxInt
   4554 		sym := v.Aux
   4555 		ptr := v.Args[0]
   4556 		v_1 := v.Args[1]
   4557 		if v_1.Op != OpAMD64ADDQconst {
   4558 			break
   4559 		}
   4560 		c := v_1.AuxInt
   4561 		idx := v_1.Args[0]
   4562 		mem := v.Args[2]
   4563 		v.reset(OpAMD64MOVBstoreconstidx1)
   4564 		v.AuxInt = ValAndOff(x).add(c)
   4565 		v.Aux = sym
   4566 		v.AddArg(ptr)
   4567 		v.AddArg(idx)
   4568 		v.AddArg(mem)
   4569 		return true
   4570 	}
   4571 	// match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem))
   4572 	// cond: x.Uses == 1   && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()   && clobber(x)
   4573 	// result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem)
   4574 	for {
   4575 		c := v.AuxInt
   4576 		s := v.Aux
   4577 		p := v.Args[0]
   4578 		i := v.Args[1]
   4579 		x := v.Args[2]
   4580 		if x.Op != OpAMD64MOVBstoreconstidx1 {
   4581 			break
   4582 		}
   4583 		a := x.AuxInt
   4584 		if x.Aux != s {
   4585 			break
   4586 		}
   4587 		if p != x.Args[0] {
   4588 			break
   4589 		}
   4590 		if i != x.Args[1] {
   4591 			break
   4592 		}
   4593 		mem := x.Args[2]
   4594 		if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
   4595 			break
   4596 		}
   4597 		v.reset(OpAMD64MOVWstoreconstidx1)
   4598 		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
   4599 		v.Aux = s
   4600 		v.AddArg(p)
   4601 		v.AddArg(i)
   4602 		v.AddArg(mem)
   4603 		return true
   4604 	}
   4605 	return false
   4606 }
   4607 func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value, config *Config) bool {
   4608 	b := v.Block
   4609 	_ = b
   4610 	// match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
   4611 	// cond:
   4612 	// result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
   4613 	for {
   4614 		c := v.AuxInt
   4615 		sym := v.Aux
   4616 		v_0 := v.Args[0]
   4617 		if v_0.Op != OpAMD64ADDQconst {
   4618 			break
   4619 		}
   4620 		d := v_0.AuxInt
   4621 		ptr := v_0.Args[0]
   4622 		idx := v.Args[1]
   4623 		val := v.Args[2]
   4624 		mem := v.Args[3]
   4625 		v.reset(OpAMD64MOVBstoreidx1)
   4626 		v.AuxInt = c + d
   4627 		v.Aux = sym
   4628 		v.AddArg(ptr)
   4629 		v.AddArg(idx)
   4630 		v.AddArg(val)
   4631 		v.AddArg(mem)
   4632 		return true
   4633 	}
   4634 	// match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
   4635 	// cond:
   4636 	// result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
   4637 	for {
   4638 		c := v.AuxInt
   4639 		sym := v.Aux
   4640 		ptr := v.Args[0]
   4641 		v_1 := v.Args[1]
   4642 		if v_1.Op != OpAMD64ADDQconst {
   4643 			break
   4644 		}
   4645 		d := v_1.AuxInt
   4646 		idx := v_1.Args[0]
   4647 		val := v.Args[2]
   4648 		mem := v.Args[3]
   4649 		v.reset(OpAMD64MOVBstoreidx1)
   4650 		v.AuxInt = c + d
   4651 		v.Aux = sym
   4652 		v.AddArg(ptr)
   4653 		v.AddArg(idx)
   4654 		v.AddArg(val)
   4655 		v.AddArg(mem)
   4656 		return true
   4657 	}
   4658 	// match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
   4659 	// cond: x.Uses == 1   && clobber(x)
   4660 	// result: (MOVWstoreidx1 [i-1] {s} p idx w mem)
   4661 	for {
   4662 		i := v.AuxInt
   4663 		s := v.Aux
   4664 		p := v.Args[0]
   4665 		idx := v.Args[1]
   4666 		v_2 := v.Args[2]
   4667 		if v_2.Op != OpAMD64SHRQconst {
   4668 			break
   4669 		}
   4670 		if v_2.AuxInt != 8 {
   4671 			break
   4672 		}
   4673 		w := v_2.Args[0]
   4674 		x := v.Args[3]
   4675 		if x.Op != OpAMD64MOVBstoreidx1 {
   4676 			break
   4677 		}
   4678 		if x.AuxInt != i-1 {
   4679 			break
   4680 		}
   4681 		if x.Aux != s {
   4682 			break
   4683 		}
   4684 		if p != x.Args[0] {
   4685 			break
   4686 		}
   4687 		if idx != x.Args[1] {
   4688 			break
   4689 		}
   4690 		if w != x.Args[2] {
   4691 			break
   4692 		}
   4693 		mem := x.Args[3]
   4694 		if !(x.Uses == 1 && clobber(x)) {
   4695 			break
   4696 		}
   4697 		v.reset(OpAMD64MOVWstoreidx1)
   4698 		v.AuxInt = i - 1
   4699 		v.Aux = s
   4700 		v.AddArg(p)
   4701 		v.AddArg(idx)
   4702 		v.AddArg(w)
   4703 		v.AddArg(mem)
   4704 		return true
   4705 	}
   4706 	// match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem))
   4707 	// cond: x.Uses == 1   && clobber(x)
   4708 	// result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem)
   4709 	for {
   4710 		i := v.AuxInt
   4711 		s := v.Aux
   4712 		p := v.Args[0]
   4713 		idx := v.Args[1]
   4714 		v_2 := v.Args[2]
   4715 		if v_2.Op != OpAMD64SHRQconst {
   4716 			break
   4717 		}
   4718 		j := v_2.AuxInt
   4719 		w := v_2.Args[0]
   4720 		x := v.Args[3]
   4721 		if x.Op != OpAMD64MOVBstoreidx1 {
   4722 			break
   4723 		}
   4724 		if x.AuxInt != i-1 {
   4725 			break
   4726 		}
   4727 		if x.Aux != s {
   4728 			break
   4729 		}
   4730 		if p != x.Args[0] {
   4731 			break
   4732 		}
   4733 		if idx != x.Args[1] {
   4734 			break
   4735 		}
   4736 		w0 := x.Args[2]
   4737 		if w0.Op != OpAMD64SHRQconst {
   4738 			break
   4739 		}
   4740 		if w0.AuxInt != j-8 {
   4741 			break
   4742 		}
   4743 		if w != w0.Args[0] {
   4744 			break
   4745 		}
   4746 		mem := x.Args[3]
   4747 		if !(x.Uses == 1 && clobber(x)) {
   4748 			break
   4749 		}
   4750 		v.reset(OpAMD64MOVWstoreidx1)
   4751 		v.AuxInt = i - 1
   4752 		v.Aux = s
   4753 		v.AddArg(p)
   4754 		v.AddArg(idx)
   4755 		v.AddArg(w0)
   4756 		v.AddArg(mem)
   4757 		return true
   4758 	}
   4759 	return false
   4760 }
   4761 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value, config *Config) bool {
   4762 	b := v.Block
   4763 	_ = b
   4764 	// match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem))
   4765 	// cond: x.Uses == 1 && clobber(x)
   4766 	// result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
   4767 	for {
   4768 		x := v.Args[0]
   4769 		if x.Op != OpAMD64MOVLload {
   4770 			break
   4771 		}
   4772 		off := x.AuxInt
   4773 		sym := x.Aux
   4774 		ptr := x.Args[0]
   4775 		mem := x.Args[1]
   4776 		if !(x.Uses == 1 && clobber(x)) {
   4777 			break
   4778 		}
   4779 		b = x.Block
   4780 		v0 := b.NewValue0(v.Line, OpAMD64MOVLQSXload, v.Type)
   4781 		v.reset(OpCopy)
   4782 		v.AddArg(v0)
   4783 		v0.AuxInt = off
   4784 		v0.Aux = sym
   4785 		v0.AddArg(ptr)
   4786 		v0.AddArg(mem)
   4787 		return true
   4788 	}
   4789 	// match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem))
   4790 	// cond: x.Uses == 1 && clobber(x)
   4791 	// result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
   4792 	for {
   4793 		x := v.Args[0]
   4794 		if x.Op != OpAMD64MOVQload {
   4795 			break
   4796 		}
   4797 		off := x.AuxInt
   4798 		sym := x.Aux
   4799 		ptr := x.Args[0]
   4800 		mem := x.Args[1]
   4801 		if !(x.Uses == 1 && clobber(x)) {
   4802 			break
   4803 		}
   4804 		b = x.Block
   4805 		v0 := b.NewValue0(v.Line, OpAMD64MOVLQSXload, v.Type)
   4806 		v.reset(OpCopy)
   4807 		v.AddArg(v0)
   4808 		v0.AuxInt = off
   4809 		v0.Aux = sym
   4810 		v0.AddArg(ptr)
   4811 		v0.AddArg(mem)
   4812 		return true
   4813 	}
   4814 	// match: (MOVLQSX (ANDLconst [c] x))
   4815 	// cond: c & 0x80000000 == 0
   4816 	// result: (ANDLconst [c & 0x7fffffff] x)
   4817 	for {
   4818 		v_0 := v.Args[0]
   4819 		if v_0.Op != OpAMD64ANDLconst {
   4820 			break
   4821 		}
   4822 		c := v_0.AuxInt
   4823 		x := v_0.Args[0]
   4824 		if !(c&0x80000000 == 0) {
   4825 			break
   4826 		}
   4827 		v.reset(OpAMD64ANDLconst)
   4828 		v.AuxInt = c & 0x7fffffff
   4829 		v.AddArg(x)
   4830 		return true
   4831 	}
   4832 	return false
   4833 }
   4834 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value, config *Config) bool {
   4835 	b := v.Block
   4836 	_ = b
   4837 	// match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
   4838 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   4839 	// result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   4840 	for {
   4841 		off1 := v.AuxInt
   4842 		sym1 := v.Aux
   4843 		v_0 := v.Args[0]
   4844 		if v_0.Op != OpAMD64LEAQ {
   4845 			break
   4846 		}
   4847 		off2 := v_0.AuxInt
   4848 		sym2 := v_0.Aux
   4849 		base := v_0.Args[0]
   4850 		mem := v.Args[1]
   4851 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   4852 			break
   4853 		}
   4854 		v.reset(OpAMD64MOVLQSXload)
   4855 		v.AuxInt = off1 + off2
   4856 		v.Aux = mergeSym(sym1, sym2)
   4857 		v.AddArg(base)
   4858 		v.AddArg(mem)
   4859 		return true
   4860 	}
   4861 	return false
   4862 }
   4863 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool {
   4864 	b := v.Block
   4865 	_ = b
   4866 	// match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem))
   4867 	// cond: x.Uses == 1 && clobber(x)
   4868 	// result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
   4869 	for {
   4870 		x := v.Args[0]
   4871 		if x.Op != OpAMD64MOVLload {
   4872 			break
   4873 		}
   4874 		off := x.AuxInt
   4875 		sym := x.Aux
   4876 		ptr := x.Args[0]
   4877 		mem := x.Args[1]
   4878 		if !(x.Uses == 1 && clobber(x)) {
   4879 			break
   4880 		}
   4881 		b = x.Block
   4882 		v0 := b.NewValue0(v.Line, OpAMD64MOVLload, v.Type)
   4883 		v.reset(OpCopy)
   4884 		v.AddArg(v0)
   4885 		v0.AuxInt = off
   4886 		v0.Aux = sym
   4887 		v0.AddArg(ptr)
   4888 		v0.AddArg(mem)
   4889 		return true
   4890 	}
   4891 	// match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem))
   4892 	// cond: x.Uses == 1 && clobber(x)
   4893 	// result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
   4894 	for {
   4895 		x := v.Args[0]
   4896 		if x.Op != OpAMD64MOVQload {
   4897 			break
   4898 		}
   4899 		off := x.AuxInt
   4900 		sym := x.Aux
   4901 		ptr := x.Args[0]
   4902 		mem := x.Args[1]
   4903 		if !(x.Uses == 1 && clobber(x)) {
   4904 			break
   4905 		}
   4906 		b = x.Block
   4907 		v0 := b.NewValue0(v.Line, OpAMD64MOVLload, v.Type)
   4908 		v.reset(OpCopy)
   4909 		v.AddArg(v0)
   4910 		v0.AuxInt = off
   4911 		v0.Aux = sym
   4912 		v0.AddArg(ptr)
   4913 		v0.AddArg(mem)
   4914 		return true
   4915 	}
   4916 	// match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem))
   4917 	// cond: x.Uses == 1 && clobber(x)
   4918 	// result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem)
   4919 	for {
   4920 		x := v.Args[0]
   4921 		if x.Op != OpAMD64MOVLloadidx1 {
   4922 			break
   4923 		}
   4924 		off := x.AuxInt
   4925 		sym := x.Aux
   4926 		ptr := x.Args[0]
   4927 		idx := x.Args[1]
   4928 		mem := x.Args[2]
   4929 		if !(x.Uses == 1 && clobber(x)) {
   4930 			break
   4931 		}
   4932 		b = x.Block
   4933 		v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx1, v.Type)
   4934 		v.reset(OpCopy)
   4935 		v.AddArg(v0)
   4936 		v0.AuxInt = off
   4937 		v0.Aux = sym
   4938 		v0.AddArg(ptr)
   4939 		v0.AddArg(idx)
   4940 		v0.AddArg(mem)
   4941 		return true
   4942 	}
   4943 	// match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem))
   4944 	// cond: x.Uses == 1 && clobber(x)
   4945 	// result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem)
   4946 	for {
   4947 		x := v.Args[0]
   4948 		if x.Op != OpAMD64MOVLloadidx4 {
   4949 			break
   4950 		}
   4951 		off := x.AuxInt
   4952 		sym := x.Aux
   4953 		ptr := x.Args[0]
   4954 		idx := x.Args[1]
   4955 		mem := x.Args[2]
   4956 		if !(x.Uses == 1 && clobber(x)) {
   4957 			break
   4958 		}
   4959 		b = x.Block
   4960 		v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx4, v.Type)
   4961 		v.reset(OpCopy)
   4962 		v.AddArg(v0)
   4963 		v0.AuxInt = off
   4964 		v0.Aux = sym
   4965 		v0.AddArg(ptr)
   4966 		v0.AddArg(idx)
   4967 		v0.AddArg(mem)
   4968 		return true
   4969 	}
   4970 	// match: (MOVLQZX (ANDLconst [c] x))
   4971 	// cond:
   4972 	// result: (ANDLconst [c] x)
   4973 	for {
   4974 		v_0 := v.Args[0]
   4975 		if v_0.Op != OpAMD64ANDLconst {
   4976 			break
   4977 		}
   4978 		c := v_0.AuxInt
   4979 		x := v_0.Args[0]
   4980 		v.reset(OpAMD64ANDLconst)
   4981 		v.AuxInt = c
   4982 		v.AddArg(x)
   4983 		return true
   4984 	}
   4985 	return false
   4986 }
   4987 func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value, config *Config) bool {
   4988 	b := v.Block
   4989 	_ = b
   4990 	// match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
   4991 	// cond: is32Bit(off1+off2)
   4992 	// result: (MOVLatomicload [off1+off2] {sym} ptr mem)
   4993 	for {
   4994 		off1 := v.AuxInt
   4995 		sym := v.Aux
   4996 		v_0 := v.Args[0]
   4997 		if v_0.Op != OpAMD64ADDQconst {
   4998 			break
   4999 		}
   5000 		off2 := v_0.AuxInt
   5001 		ptr := v_0.Args[0]
   5002 		mem := v.Args[1]
   5003 		if !(is32Bit(off1 + off2)) {
   5004 			break
   5005 		}
   5006 		v.reset(OpAMD64MOVLatomicload)
   5007 		v.AuxInt = off1 + off2
   5008 		v.Aux = sym
   5009 		v.AddArg(ptr)
   5010 		v.AddArg(mem)
   5011 		return true
   5012 	}
   5013 	// match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
   5014 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   5015 	// result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   5016 	for {
   5017 		off1 := v.AuxInt
   5018 		sym1 := v.Aux
   5019 		v_0 := v.Args[0]
   5020 		if v_0.Op != OpAMD64LEAQ {
   5021 			break
   5022 		}
   5023 		off2 := v_0.AuxInt
   5024 		sym2 := v_0.Aux
   5025 		ptr := v_0.Args[0]
   5026 		mem := v.Args[1]
   5027 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   5028 			break
   5029 		}
   5030 		v.reset(OpAMD64MOVLatomicload)
   5031 		v.AuxInt = off1 + off2
   5032 		v.Aux = mergeSym(sym1, sym2)
   5033 		v.AddArg(ptr)
   5034 		v.AddArg(mem)
   5035 		return true
   5036 	}
   5037 	return false
   5038 }
   5039 func rewriteValueAMD64_OpAMD64MOVLload(v *Value, config *Config) bool {
   5040 	b := v.Block
   5041 	_ = b
   5042 	// match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
   5043 	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
   5044 	// result: x
   5045 	for {
   5046 		off := v.AuxInt
   5047 		sym := v.Aux
   5048 		ptr := v.Args[0]
   5049 		v_1 := v.Args[1]
   5050 		if v_1.Op != OpAMD64MOVLstore {
   5051 			break
   5052 		}
   5053 		off2 := v_1.AuxInt
   5054 		sym2 := v_1.Aux
   5055 		ptr2 := v_1.Args[0]
   5056 		x := v_1.Args[1]
   5057 		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
   5058 			break
   5059 		}
   5060 		v.reset(OpCopy)
   5061 		v.Type = x.Type
   5062 		v.AddArg(x)
   5063 		return true
   5064 	}
   5065 	// match: (MOVLload  [off1] {sym} (ADDQconst [off2] ptr) mem)
   5066 	// cond: is32Bit(off1+off2)
   5067 	// result: (MOVLload  [off1+off2] {sym} ptr mem)
   5068 	for {
   5069 		off1 := v.AuxInt
   5070 		sym := v.Aux
   5071 		v_0 := v.Args[0]
   5072 		if v_0.Op != OpAMD64ADDQconst {
   5073 			break
   5074 		}
   5075 		off2 := v_0.AuxInt
   5076 		ptr := v_0.Args[0]
   5077 		mem := v.Args[1]
   5078 		if !(is32Bit(off1 + off2)) {
   5079 			break
   5080 		}
   5081 		v.reset(OpAMD64MOVLload)
   5082 		v.AuxInt = off1 + off2
   5083 		v.Aux = sym
   5084 		v.AddArg(ptr)
   5085 		v.AddArg(mem)
   5086 		return true
   5087 	}
   5088 	// match: (MOVLload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
   5089 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   5090 	// result: (MOVLload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   5091 	for {
   5092 		off1 := v.AuxInt
   5093 		sym1 := v.Aux
   5094 		v_0 := v.Args[0]
   5095 		if v_0.Op != OpAMD64LEAQ {
   5096 			break
   5097 		}
   5098 		off2 := v_0.AuxInt
   5099 		sym2 := v_0.Aux
   5100 		base := v_0.Args[0]
   5101 		mem := v.Args[1]
   5102 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   5103 			break
   5104 		}
   5105 		v.reset(OpAMD64MOVLload)
   5106 		v.AuxInt = off1 + off2
   5107 		v.Aux = mergeSym(sym1, sym2)
   5108 		v.AddArg(base)
   5109 		v.AddArg(mem)
   5110 		return true
   5111 	}
   5112 	// match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
   5113 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   5114 	// result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   5115 	for {
   5116 		off1 := v.AuxInt
   5117 		sym1 := v.Aux
   5118 		v_0 := v.Args[0]
   5119 		if v_0.Op != OpAMD64LEAQ1 {
   5120 			break
   5121 		}
   5122 		off2 := v_0.AuxInt
   5123 		sym2 := v_0.Aux
   5124 		ptr := v_0.Args[0]
   5125 		idx := v_0.Args[1]
   5126 		mem := v.Args[1]
   5127 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   5128 			break
   5129 		}
   5130 		v.reset(OpAMD64MOVLloadidx1)
   5131 		v.AuxInt = off1 + off2
   5132 		v.Aux = mergeSym(sym1, sym2)
   5133 		v.AddArg(ptr)
   5134 		v.AddArg(idx)
   5135 		v.AddArg(mem)
   5136 		return true
   5137 	}
   5138 	// match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem)
   5139 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   5140 	// result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   5141 	for {
   5142 		off1 := v.AuxInt
   5143 		sym1 := v.Aux
   5144 		v_0 := v.Args[0]
   5145 		if v_0.Op != OpAMD64LEAQ4 {
   5146 			break
   5147 		}
   5148 		off2 := v_0.AuxInt
   5149 		sym2 := v_0.Aux
   5150 		ptr := v_0.Args[0]
   5151 		idx := v_0.Args[1]
   5152 		mem := v.Args[1]
   5153 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   5154 			break
   5155 		}
   5156 		v.reset(OpAMD64MOVLloadidx4)
   5157 		v.AuxInt = off1 + off2
   5158 		v.Aux = mergeSym(sym1, sym2)
   5159 		v.AddArg(ptr)
   5160 		v.AddArg(idx)
   5161 		v.AddArg(mem)
   5162 		return true
   5163 	}
   5164 	// match: (MOVLload [off] {sym} (ADDQ ptr idx) mem)
   5165 	// cond: ptr.Op != OpSB
   5166 	// result: (MOVLloadidx1 [off] {sym} ptr idx mem)
   5167 	for {
   5168 		off := v.AuxInt
   5169 		sym := v.Aux
   5170 		v_0 := v.Args[0]
   5171 		if v_0.Op != OpAMD64ADDQ {
   5172 			break
   5173 		}
   5174 		ptr := v_0.Args[0]
   5175 		idx := v_0.Args[1]
   5176 		mem := v.Args[1]
   5177 		if !(ptr.Op != OpSB) {
   5178 			break
   5179 		}
   5180 		v.reset(OpAMD64MOVLloadidx1)
   5181 		v.AuxInt = off
   5182 		v.Aux = sym
   5183 		v.AddArg(ptr)
   5184 		v.AddArg(idx)
   5185 		v.AddArg(mem)
   5186 		return true
   5187 	}
   5188 	// match: (MOVLload  [off1] {sym1} (LEAL [off2] {sym2} base) mem)
   5189 	// cond: canMergeSym(sym1, sym2)
   5190 	// result: (MOVLload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   5191 	for {
   5192 		off1 := v.AuxInt
   5193 		sym1 := v.Aux
   5194 		v_0 := v.Args[0]
   5195 		if v_0.Op != OpAMD64LEAL {
   5196 			break
   5197 		}
   5198 		off2 := v_0.AuxInt
   5199 		sym2 := v_0.Aux
   5200 		base := v_0.Args[0]
   5201 		mem := v.Args[1]
   5202 		if !(canMergeSym(sym1, sym2)) {
   5203 			break
   5204 		}
   5205 		v.reset(OpAMD64MOVLload)
   5206 		v.AuxInt = off1 + off2
   5207 		v.Aux = mergeSym(sym1, sym2)
   5208 		v.AddArg(base)
   5209 		v.AddArg(mem)
   5210 		return true
   5211 	}
   5212 	// match: (MOVLload  [off1] {sym} (ADDLconst [off2] ptr) mem)
   5213 	// cond: is32Bit(off1+off2)
   5214 	// result: (MOVLload  [off1+off2] {sym} ptr mem)
   5215 	for {
   5216 		off1 := v.AuxInt
   5217 		sym := v.Aux
   5218 		v_0 := v.Args[0]
   5219 		if v_0.Op != OpAMD64ADDLconst {
   5220 			break
   5221 		}
   5222 		off2 := v_0.AuxInt
   5223 		ptr := v_0.Args[0]
   5224 		mem := v.Args[1]
   5225 		if !(is32Bit(off1 + off2)) {
   5226 			break
   5227 		}
   5228 		v.reset(OpAMD64MOVLload)
   5229 		v.AuxInt = off1 + off2
   5230 		v.Aux = sym
   5231 		v.AddArg(ptr)
   5232 		v.AddArg(mem)
   5233 		return true
   5234 	}
   5235 	return false
   5236 }
   5237 func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value, config *Config) bool {
   5238 	b := v.Block
   5239 	_ = b
   5240 	// match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem)
   5241 	// cond:
   5242 	// result: (MOVLloadidx4 [c] {sym} ptr idx mem)
   5243 	for {
   5244 		c := v.AuxInt
   5245 		sym := v.Aux
   5246 		ptr := v.Args[0]
   5247 		v_1 := v.Args[1]
   5248 		if v_1.Op != OpAMD64SHLQconst {
   5249 			break
   5250 		}
   5251 		if v_1.AuxInt != 2 {
   5252 			break
   5253 		}
   5254 		idx := v_1.Args[0]
   5255 		mem := v.Args[2]
   5256 		v.reset(OpAMD64MOVLloadidx4)
   5257 		v.AuxInt = c
   5258 		v.Aux = sym
   5259 		v.AddArg(ptr)
   5260 		v.AddArg(idx)
   5261 		v.AddArg(mem)
   5262 		return true
   5263 	}
   5264 	// match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
   5265 	// cond:
   5266 	// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
   5267 	for {
   5268 		c := v.AuxInt
   5269 		sym := v.Aux
   5270 		v_0 := v.Args[0]
   5271 		if v_0.Op != OpAMD64ADDQconst {
   5272 			break
   5273 		}
   5274 		d := v_0.AuxInt
   5275 		ptr := v_0.Args[0]
   5276 		idx := v.Args[1]
   5277 		mem := v.Args[2]
   5278 		v.reset(OpAMD64MOVLloadidx1)
   5279 		v.AuxInt = c + d
   5280 		v.Aux = sym
   5281 		v.AddArg(ptr)
   5282 		v.AddArg(idx)
   5283 		v.AddArg(mem)
   5284 		return true
   5285 	}
   5286 	// match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
   5287 	// cond:
   5288 	// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
   5289 	for {
   5290 		c := v.AuxInt
   5291 		sym := v.Aux
   5292 		ptr := v.Args[0]
   5293 		v_1 := v.Args[1]
   5294 		if v_1.Op != OpAMD64ADDQconst {
   5295 			break
   5296 		}
   5297 		d := v_1.AuxInt
   5298 		idx := v_1.Args[0]
   5299 		mem := v.Args[2]
   5300 		v.reset(OpAMD64MOVLloadidx1)
   5301 		v.AuxInt = c + d
   5302 		v.Aux = sym
   5303 		v.AddArg(ptr)
   5304 		v.AddArg(idx)
   5305 		v.AddArg(mem)
   5306 		return true
   5307 	}
   5308 	return false
   5309 }
   5310 func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value, config *Config) bool {
   5311 	b := v.Block
   5312 	_ = b
   5313 	// match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem)
   5314 	// cond:
   5315 	// result: (MOVLloadidx4 [c+d] {sym} ptr idx mem)
   5316 	for {
   5317 		c := v.AuxInt
   5318 		sym := v.Aux
   5319 		v_0 := v.Args[0]
   5320 		if v_0.Op != OpAMD64ADDQconst {
   5321 			break
   5322 		}
   5323 		d := v_0.AuxInt
   5324 		ptr := v_0.Args[0]
   5325 		idx := v.Args[1]
   5326 		mem := v.Args[2]
   5327 		v.reset(OpAMD64MOVLloadidx4)
   5328 		v.AuxInt = c + d
   5329 		v.Aux = sym
   5330 		v.AddArg(ptr)
   5331 		v.AddArg(idx)
   5332 		v.AddArg(mem)
   5333 		return true
   5334 	}
   5335 	// match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem)
   5336 	// cond:
   5337 	// result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
   5338 	for {
   5339 		c := v.AuxInt
   5340 		sym := v.Aux
   5341 		ptr := v.Args[0]
   5342 		v_1 := v.Args[1]
   5343 		if v_1.Op != OpAMD64ADDQconst {
   5344 			break
   5345 		}
   5346 		d := v_1.AuxInt
   5347 		idx := v_1.Args[0]
   5348 		mem := v.Args[2]
   5349 		v.reset(OpAMD64MOVLloadidx4)
   5350 		v.AuxInt = c + 4*d
   5351 		v.Aux = sym
   5352 		v.AddArg(ptr)
   5353 		v.AddArg(idx)
   5354 		v.AddArg(mem)
   5355 		return true
   5356 	}
   5357 	return false
   5358 }
   5359 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value, config *Config) bool {
   5360 	b := v.Block
   5361 	_ = b
   5362 	// match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem)
   5363 	// cond:
   5364 	// result: (MOVLstore [off] {sym} ptr x mem)
   5365 	for {
   5366 		off := v.AuxInt
   5367 		sym := v.Aux
   5368 		ptr := v.Args[0]
   5369 		v_1 := v.Args[1]
   5370 		if v_1.Op != OpAMD64MOVLQSX {
   5371 			break
   5372 		}
   5373 		x := v_1.Args[0]
   5374 		mem := v.Args[2]
   5375 		v.reset(OpAMD64MOVLstore)
   5376 		v.AuxInt = off
   5377 		v.Aux = sym
   5378 		v.AddArg(ptr)
   5379 		v.AddArg(x)
   5380 		v.AddArg(mem)
   5381 		return true
   5382 	}
   5383 	// match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem)
   5384 	// cond:
   5385 	// result: (MOVLstore [off] {sym} ptr x mem)
   5386 	for {
   5387 		off := v.AuxInt
   5388 		sym := v.Aux
   5389 		ptr := v.Args[0]
   5390 		v_1 := v.Args[1]
   5391 		if v_1.Op != OpAMD64MOVLQZX {
   5392 			break
   5393 		}
   5394 		x := v_1.Args[0]
   5395 		mem := v.Args[2]
   5396 		v.reset(OpAMD64MOVLstore)
   5397 		v.AuxInt = off
   5398 		v.Aux = sym
   5399 		v.AddArg(ptr)
   5400 		v.AddArg(x)
   5401 		v.AddArg(mem)
   5402 		return true
   5403 	}
   5404 	// match: (MOVLstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
   5405 	// cond: is32Bit(off1+off2)
   5406 	// result: (MOVLstore  [off1+off2] {sym} ptr val mem)
   5407 	for {
   5408 		off1 := v.AuxInt
   5409 		sym := v.Aux
   5410 		v_0 := v.Args[0]
   5411 		if v_0.Op != OpAMD64ADDQconst {
   5412 			break
   5413 		}
   5414 		off2 := v_0.AuxInt
   5415 		ptr := v_0.Args[0]
   5416 		val := v.Args[1]
   5417 		mem := v.Args[2]
   5418 		if !(is32Bit(off1 + off2)) {
   5419 			break
   5420 		}
   5421 		v.reset(OpAMD64MOVLstore)
   5422 		v.AuxInt = off1 + off2
   5423 		v.Aux = sym
   5424 		v.AddArg(ptr)
   5425 		v.AddArg(val)
   5426 		v.AddArg(mem)
   5427 		return true
   5428 	}
   5429 	// match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
   5430 	// cond: validOff(off)
   5431 	// result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
   5432 	for {
   5433 		off := v.AuxInt
   5434 		sym := v.Aux
   5435 		ptr := v.Args[0]
   5436 		v_1 := v.Args[1]
   5437 		if v_1.Op != OpAMD64MOVLconst {
   5438 			break
   5439 		}
   5440 		c := v_1.AuxInt
   5441 		mem := v.Args[2]
   5442 		if !(validOff(off)) {
   5443 			break
   5444 		}
   5445 		v.reset(OpAMD64MOVLstoreconst)
   5446 		v.AuxInt = makeValAndOff(int64(int32(c)), off)
   5447 		v.Aux = sym
   5448 		v.AddArg(ptr)
   5449 		v.AddArg(mem)
   5450 		return true
   5451 	}
   5452 	// match: (MOVLstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
   5453 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   5454 	// result: (MOVLstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   5455 	for {
   5456 		off1 := v.AuxInt
   5457 		sym1 := v.Aux
   5458 		v_0 := v.Args[0]
   5459 		if v_0.Op != OpAMD64LEAQ {
   5460 			break
   5461 		}
   5462 		off2 := v_0.AuxInt
   5463 		sym2 := v_0.Aux
   5464 		base := v_0.Args[0]
   5465 		val := v.Args[1]
   5466 		mem := v.Args[2]
   5467 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   5468 			break
   5469 		}
   5470 		v.reset(OpAMD64MOVLstore)
   5471 		v.AuxInt = off1 + off2
   5472 		v.Aux = mergeSym(sym1, sym2)
   5473 		v.AddArg(base)
   5474 		v.AddArg(val)
   5475 		v.AddArg(mem)
   5476 		return true
   5477 	}
   5478 	// match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
   5479 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   5480 	// result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   5481 	for {
   5482 		off1 := v.AuxInt
   5483 		sym1 := v.Aux
   5484 		v_0 := v.Args[0]
   5485 		if v_0.Op != OpAMD64LEAQ1 {
   5486 			break
   5487 		}
   5488 		off2 := v_0.AuxInt
   5489 		sym2 := v_0.Aux
   5490 		ptr := v_0.Args[0]
   5491 		idx := v_0.Args[1]
   5492 		val := v.Args[1]
   5493 		mem := v.Args[2]
   5494 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   5495 			break
   5496 		}
   5497 		v.reset(OpAMD64MOVLstoreidx1)
   5498 		v.AuxInt = off1 + off2
   5499 		v.Aux = mergeSym(sym1, sym2)
   5500 		v.AddArg(ptr)
   5501 		v.AddArg(idx)
   5502 		v.AddArg(val)
   5503 		v.AddArg(mem)
   5504 		return true
   5505 	}
   5506 	// match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem)
   5507 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   5508 	// result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   5509 	for {
   5510 		off1 := v.AuxInt
   5511 		sym1 := v.Aux
   5512 		v_0 := v.Args[0]
   5513 		if v_0.Op != OpAMD64LEAQ4 {
   5514 			break
   5515 		}
   5516 		off2 := v_0.AuxInt
   5517 		sym2 := v_0.Aux
   5518 		ptr := v_0.Args[0]
   5519 		idx := v_0.Args[1]
   5520 		val := v.Args[1]
   5521 		mem := v.Args[2]
   5522 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   5523 			break
   5524 		}
   5525 		v.reset(OpAMD64MOVLstoreidx4)
   5526 		v.AuxInt = off1 + off2
   5527 		v.Aux = mergeSym(sym1, sym2)
   5528 		v.AddArg(ptr)
   5529 		v.AddArg(idx)
   5530 		v.AddArg(val)
   5531 		v.AddArg(mem)
   5532 		return true
   5533 	}
   5534 	// match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem)
   5535 	// cond: ptr.Op != OpSB
   5536 	// result: (MOVLstoreidx1 [off] {sym} ptr idx val mem)
   5537 	for {
   5538 		off := v.AuxInt
   5539 		sym := v.Aux
   5540 		v_0 := v.Args[0]
   5541 		if v_0.Op != OpAMD64ADDQ {
   5542 			break
   5543 		}
   5544 		ptr := v_0.Args[0]
   5545 		idx := v_0.Args[1]
   5546 		val := v.Args[1]
   5547 		mem := v.Args[2]
   5548 		if !(ptr.Op != OpSB) {
   5549 			break
   5550 		}
   5551 		v.reset(OpAMD64MOVLstoreidx1)
   5552 		v.AuxInt = off
   5553 		v.Aux = sym
   5554 		v.AddArg(ptr)
   5555 		v.AddArg(idx)
   5556 		v.AddArg(val)
   5557 		v.AddArg(mem)
   5558 		return true
   5559 	}
   5560 	// match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem))
   5561 	// cond: x.Uses == 1   && clobber(x)
   5562 	// result: (MOVQstore [i-4] {s} p w mem)
   5563 	for {
   5564 		i := v.AuxInt
   5565 		s := v.Aux
   5566 		p := v.Args[0]
   5567 		v_1 := v.Args[1]
   5568 		if v_1.Op != OpAMD64SHRQconst {
   5569 			break
   5570 		}
   5571 		if v_1.AuxInt != 32 {
   5572 			break
   5573 		}
   5574 		w := v_1.Args[0]
   5575 		x := v.Args[2]
   5576 		if x.Op != OpAMD64MOVLstore {
   5577 			break
   5578 		}
   5579 		if x.AuxInt != i-4 {
   5580 			break
   5581 		}
   5582 		if x.Aux != s {
   5583 			break
   5584 		}
   5585 		if p != x.Args[0] {
   5586 			break
   5587 		}
   5588 		if w != x.Args[1] {
   5589 			break
   5590 		}
   5591 		mem := x.Args[2]
   5592 		if !(x.Uses == 1 && clobber(x)) {
   5593 			break
   5594 		}
   5595 		v.reset(OpAMD64MOVQstore)
   5596 		v.AuxInt = i - 4
   5597 		v.Aux = s
   5598 		v.AddArg(p)
   5599 		v.AddArg(w)
   5600 		v.AddArg(mem)
   5601 		return true
   5602 	}
   5603 	// match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem))
   5604 	// cond: x.Uses == 1   && clobber(x)
   5605 	// result: (MOVQstore [i-4] {s} p w0 mem)
   5606 	for {
   5607 		i := v.AuxInt
   5608 		s := v.Aux
   5609 		p := v.Args[0]
   5610 		v_1 := v.Args[1]
   5611 		if v_1.Op != OpAMD64SHRQconst {
   5612 			break
   5613 		}
   5614 		j := v_1.AuxInt
   5615 		w := v_1.Args[0]
   5616 		x := v.Args[2]
   5617 		if x.Op != OpAMD64MOVLstore {
   5618 			break
   5619 		}
   5620 		if x.AuxInt != i-4 {
   5621 			break
   5622 		}
   5623 		if x.Aux != s {
   5624 			break
   5625 		}
   5626 		if p != x.Args[0] {
   5627 			break
   5628 		}
   5629 		w0 := x.Args[1]
   5630 		if w0.Op != OpAMD64SHRQconst {
   5631 			break
   5632 		}
   5633 		if w0.AuxInt != j-32 {
   5634 			break
   5635 		}
   5636 		if w != w0.Args[0] {
   5637 			break
   5638 		}
   5639 		mem := x.Args[2]
   5640 		if !(x.Uses == 1 && clobber(x)) {
   5641 			break
   5642 		}
   5643 		v.reset(OpAMD64MOVQstore)
   5644 		v.AuxInt = i - 4
   5645 		v.Aux = s
   5646 		v.AddArg(p)
   5647 		v.AddArg(w0)
   5648 		v.AddArg(mem)
   5649 		return true
   5650 	}
   5651 	// match: (MOVLstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
   5652 	// cond: canMergeSym(sym1, sym2)
   5653 	// result: (MOVLstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   5654 	for {
   5655 		off1 := v.AuxInt
   5656 		sym1 := v.Aux
   5657 		v_0 := v.Args[0]
   5658 		if v_0.Op != OpAMD64LEAL {
   5659 			break
   5660 		}
   5661 		off2 := v_0.AuxInt
   5662 		sym2 := v_0.Aux
   5663 		base := v_0.Args[0]
   5664 		val := v.Args[1]
   5665 		mem := v.Args[2]
   5666 		if !(canMergeSym(sym1, sym2)) {
   5667 			break
   5668 		}
   5669 		v.reset(OpAMD64MOVLstore)
   5670 		v.AuxInt = off1 + off2
   5671 		v.Aux = mergeSym(sym1, sym2)
   5672 		v.AddArg(base)
   5673 		v.AddArg(val)
   5674 		v.AddArg(mem)
   5675 		return true
   5676 	}
   5677 	// match: (MOVLstore  [off1] {sym} (ADDLconst [off2] ptr) val mem)
   5678 	// cond: is32Bit(off1+off2)
   5679 	// result: (MOVLstore  [off1+off2] {sym} ptr val mem)
   5680 	for {
   5681 		off1 := v.AuxInt
   5682 		sym := v.Aux
   5683 		v_0 := v.Args[0]
   5684 		if v_0.Op != OpAMD64ADDLconst {
   5685 			break
   5686 		}
   5687 		off2 := v_0.AuxInt
   5688 		ptr := v_0.Args[0]
   5689 		val := v.Args[1]
   5690 		mem := v.Args[2]
   5691 		if !(is32Bit(off1 + off2)) {
   5692 			break
   5693 		}
   5694 		v.reset(OpAMD64MOVLstore)
   5695 		v.AuxInt = off1 + off2
   5696 		v.Aux = sym
   5697 		v.AddArg(ptr)
   5698 		v.AddArg(val)
   5699 		v.AddArg(mem)
   5700 		return true
   5701 	}
   5702 	return false
   5703 }
   5704 func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value, config *Config) bool {
   5705 	b := v.Block
   5706 	_ = b
   5707 	// match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
   5708 	// cond: ValAndOff(sc).canAdd(off)
   5709 	// result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
   5710 	for {
   5711 		sc := v.AuxInt
   5712 		s := v.Aux
   5713 		v_0 := v.Args[0]
   5714 		if v_0.Op != OpAMD64ADDQconst {
   5715 			break
   5716 		}
   5717 		off := v_0.AuxInt
   5718 		ptr := v_0.Args[0]
   5719 		mem := v.Args[1]
   5720 		if !(ValAndOff(sc).canAdd(off)) {
   5721 			break
   5722 		}
   5723 		v.reset(OpAMD64MOVLstoreconst)
   5724 		v.AuxInt = ValAndOff(sc).add(off)
   5725 		v.Aux = s
   5726 		v.AddArg(ptr)
   5727 		v.AddArg(mem)
   5728 		return true
   5729 	}
   5730 	// match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
   5731 	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
   5732 	// result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
   5733 	for {
   5734 		sc := v.AuxInt
   5735 		sym1 := v.Aux
   5736 		v_0 := v.Args[0]
   5737 		if v_0.Op != OpAMD64LEAQ {
   5738 			break
   5739 		}
   5740 		off := v_0.AuxInt
   5741 		sym2 := v_0.Aux
   5742 		ptr := v_0.Args[0]
   5743 		mem := v.Args[1]
   5744 		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
   5745 			break
   5746 		}
   5747 		v.reset(OpAMD64MOVLstoreconst)
   5748 		v.AuxInt = ValAndOff(sc).add(off)
   5749 		v.Aux = mergeSym(sym1, sym2)
   5750 		v.AddArg(ptr)
   5751 		v.AddArg(mem)
   5752 		return true
   5753 	}
   5754 	// match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
   5755 	// cond: canMergeSym(sym1, sym2)
   5756 	// result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   5757 	for {
   5758 		x := v.AuxInt
   5759 		sym1 := v.Aux
   5760 		v_0 := v.Args[0]
   5761 		if v_0.Op != OpAMD64LEAQ1 {
   5762 			break
   5763 		}
   5764 		off := v_0.AuxInt
   5765 		sym2 := v_0.Aux
   5766 		ptr := v_0.Args[0]
   5767 		idx := v_0.Args[1]
   5768 		mem := v.Args[1]
   5769 		if !(canMergeSym(sym1, sym2)) {
   5770 			break
   5771 		}
   5772 		v.reset(OpAMD64MOVLstoreconstidx1)
   5773 		v.AuxInt = ValAndOff(x).add(off)
   5774 		v.Aux = mergeSym(sym1, sym2)
   5775 		v.AddArg(ptr)
   5776 		v.AddArg(idx)
   5777 		v.AddArg(mem)
   5778 		return true
   5779 	}
   5780 	// match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem)
   5781 	// cond: canMergeSym(sym1, sym2)
   5782 	// result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   5783 	for {
   5784 		x := v.AuxInt
   5785 		sym1 := v.Aux
   5786 		v_0 := v.Args[0]
   5787 		if v_0.Op != OpAMD64LEAQ4 {
   5788 			break
   5789 		}
   5790 		off := v_0.AuxInt
   5791 		sym2 := v_0.Aux
   5792 		ptr := v_0.Args[0]
   5793 		idx := v_0.Args[1]
   5794 		mem := v.Args[1]
   5795 		if !(canMergeSym(sym1, sym2)) {
   5796 			break
   5797 		}
   5798 		v.reset(OpAMD64MOVLstoreconstidx4)
   5799 		v.AuxInt = ValAndOff(x).add(off)
   5800 		v.Aux = mergeSym(sym1, sym2)
   5801 		v.AddArg(ptr)
   5802 		v.AddArg(idx)
   5803 		v.AddArg(mem)
   5804 		return true
   5805 	}
   5806 	// match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem)
   5807 	// cond:
   5808 	// result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem)
   5809 	for {
   5810 		x := v.AuxInt
   5811 		sym := v.Aux
   5812 		v_0 := v.Args[0]
   5813 		if v_0.Op != OpAMD64ADDQ {
   5814 			break
   5815 		}
   5816 		ptr := v_0.Args[0]
   5817 		idx := v_0.Args[1]
   5818 		mem := v.Args[1]
   5819 		v.reset(OpAMD64MOVLstoreconstidx1)
   5820 		v.AuxInt = x
   5821 		v.Aux = sym
   5822 		v.AddArg(ptr)
   5823 		v.AddArg(idx)
   5824 		v.AddArg(mem)
   5825 		return true
   5826 	}
   5827 	// match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem))
   5828 	// cond: x.Uses == 1   && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()   && clobber(x)
   5829 	// result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
   5830 	for {
   5831 		c := v.AuxInt
   5832 		s := v.Aux
   5833 		p := v.Args[0]
   5834 		x := v.Args[1]
   5835 		if x.Op != OpAMD64MOVLstoreconst {
   5836 			break
   5837 		}
   5838 		a := x.AuxInt
   5839 		if x.Aux != s {
   5840 			break
   5841 		}
   5842 		if p != x.Args[0] {
   5843 			break
   5844 		}
   5845 		mem := x.Args[1]
   5846 		if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
   5847 			break
   5848 		}
   5849 		v.reset(OpAMD64MOVQstore)
   5850 		v.AuxInt = ValAndOff(a).Off()
   5851 		v.Aux = s
   5852 		v.AddArg(p)
   5853 		v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
   5854 		v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
   5855 		v.AddArg(v0)
   5856 		v.AddArg(mem)
   5857 		return true
   5858 	}
   5859 	// match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
   5860 	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
   5861 	// result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
   5862 	for {
   5863 		sc := v.AuxInt
   5864 		sym1 := v.Aux
   5865 		v_0 := v.Args[0]
   5866 		if v_0.Op != OpAMD64LEAL {
   5867 			break
   5868 		}
   5869 		off := v_0.AuxInt
   5870 		sym2 := v_0.Aux
   5871 		ptr := v_0.Args[0]
   5872 		mem := v.Args[1]
   5873 		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
   5874 			break
   5875 		}
   5876 		v.reset(OpAMD64MOVLstoreconst)
   5877 		v.AuxInt = ValAndOff(sc).add(off)
   5878 		v.Aux = mergeSym(sym1, sym2)
   5879 		v.AddArg(ptr)
   5880 		v.AddArg(mem)
   5881 		return true
   5882 	}
   5883 	// match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
   5884 	// cond: ValAndOff(sc).canAdd(off)
   5885 	// result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
   5886 	for {
   5887 		sc := v.AuxInt
   5888 		s := v.Aux
   5889 		v_0 := v.Args[0]
   5890 		if v_0.Op != OpAMD64ADDLconst {
   5891 			break
   5892 		}
   5893 		off := v_0.AuxInt
   5894 		ptr := v_0.Args[0]
   5895 		mem := v.Args[1]
   5896 		if !(ValAndOff(sc).canAdd(off)) {
   5897 			break
   5898 		}
   5899 		v.reset(OpAMD64MOVLstoreconst)
   5900 		v.AuxInt = ValAndOff(sc).add(off)
   5901 		v.Aux = s
   5902 		v.AddArg(ptr)
   5903 		v.AddArg(mem)
   5904 		return true
   5905 	}
   5906 	return false
   5907 }
   5908 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value, config *Config) bool {
   5909 	b := v.Block
   5910 	_ = b
   5911 	// match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem)
   5912 	// cond:
   5913 	// result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
   5914 	for {
   5915 		c := v.AuxInt
   5916 		sym := v.Aux
   5917 		ptr := v.Args[0]
   5918 		v_1 := v.Args[1]
   5919 		if v_1.Op != OpAMD64SHLQconst {
   5920 			break
   5921 		}
   5922 		if v_1.AuxInt != 2 {
   5923 			break
   5924 		}
   5925 		idx := v_1.Args[0]
   5926 		mem := v.Args[2]
   5927 		v.reset(OpAMD64MOVLstoreconstidx4)
   5928 		v.AuxInt = c
   5929 		v.Aux = sym
   5930 		v.AddArg(ptr)
   5931 		v.AddArg(idx)
   5932 		v.AddArg(mem)
   5933 		return true
   5934 	}
   5935 	// match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
   5936 	// cond:
   5937 	// result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   5938 	for {
   5939 		x := v.AuxInt
   5940 		sym := v.Aux
   5941 		v_0 := v.Args[0]
   5942 		if v_0.Op != OpAMD64ADDQconst {
   5943 			break
   5944 		}
   5945 		c := v_0.AuxInt
   5946 		ptr := v_0.Args[0]
   5947 		idx := v.Args[1]
   5948 		mem := v.Args[2]
   5949 		v.reset(OpAMD64MOVLstoreconstidx1)
   5950 		v.AuxInt = ValAndOff(x).add(c)
   5951 		v.Aux = sym
   5952 		v.AddArg(ptr)
   5953 		v.AddArg(idx)
   5954 		v.AddArg(mem)
   5955 		return true
   5956 	}
   5957 	// match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
   5958 	// cond:
   5959 	// result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   5960 	for {
   5961 		x := v.AuxInt
   5962 		sym := v.Aux
   5963 		ptr := v.Args[0]
   5964 		v_1 := v.Args[1]
   5965 		if v_1.Op != OpAMD64ADDQconst {
   5966 			break
   5967 		}
   5968 		c := v_1.AuxInt
   5969 		idx := v_1.Args[0]
   5970 		mem := v.Args[2]
   5971 		v.reset(OpAMD64MOVLstoreconstidx1)
   5972 		v.AuxInt = ValAndOff(x).add(c)
   5973 		v.Aux = sym
   5974 		v.AddArg(ptr)
   5975 		v.AddArg(idx)
   5976 		v.AddArg(mem)
   5977 		return true
   5978 	}
   5979 	// match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem))
   5980 	// cond: x.Uses == 1   && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()   && clobber(x)
   5981 	// result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
   5982 	for {
   5983 		c := v.AuxInt
   5984 		s := v.Aux
   5985 		p := v.Args[0]
   5986 		i := v.Args[1]
   5987 		x := v.Args[2]
   5988 		if x.Op != OpAMD64MOVLstoreconstidx1 {
   5989 			break
   5990 		}
   5991 		a := x.AuxInt
   5992 		if x.Aux != s {
   5993 			break
   5994 		}
   5995 		if p != x.Args[0] {
   5996 			break
   5997 		}
   5998 		if i != x.Args[1] {
   5999 			break
   6000 		}
   6001 		mem := x.Args[2]
   6002 		if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
   6003 			break
   6004 		}
   6005 		v.reset(OpAMD64MOVQstoreidx1)
   6006 		v.AuxInt = ValAndOff(a).Off()
   6007 		v.Aux = s
   6008 		v.AddArg(p)
   6009 		v.AddArg(i)
   6010 		v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
   6011 		v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
   6012 		v.AddArg(v0)
   6013 		v.AddArg(mem)
   6014 		return true
   6015 	}
   6016 	return false
   6017 }
   6018 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value, config *Config) bool {
   6019 	b := v.Block
   6020 	_ = b
   6021 	// match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem)
   6022 	// cond:
   6023 	// result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   6024 	for {
   6025 		x := v.AuxInt
   6026 		sym := v.Aux
   6027 		v_0 := v.Args[0]
   6028 		if v_0.Op != OpAMD64ADDQconst {
   6029 			break
   6030 		}
   6031 		c := v_0.AuxInt
   6032 		ptr := v_0.Args[0]
   6033 		idx := v.Args[1]
   6034 		mem := v.Args[2]
   6035 		v.reset(OpAMD64MOVLstoreconstidx4)
   6036 		v.AuxInt = ValAndOff(x).add(c)
   6037 		v.Aux = sym
   6038 		v.AddArg(ptr)
   6039 		v.AddArg(idx)
   6040 		v.AddArg(mem)
   6041 		return true
   6042 	}
   6043 	// match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem)
   6044 	// cond:
   6045 	// result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
   6046 	for {
   6047 		x := v.AuxInt
   6048 		sym := v.Aux
   6049 		ptr := v.Args[0]
   6050 		v_1 := v.Args[1]
   6051 		if v_1.Op != OpAMD64ADDQconst {
   6052 			break
   6053 		}
   6054 		c := v_1.AuxInt
   6055 		idx := v_1.Args[0]
   6056 		mem := v.Args[2]
   6057 		v.reset(OpAMD64MOVLstoreconstidx4)
   6058 		v.AuxInt = ValAndOff(x).add(4 * c)
   6059 		v.Aux = sym
   6060 		v.AddArg(ptr)
   6061 		v.AddArg(idx)
   6062 		v.AddArg(mem)
   6063 		return true
   6064 	}
   6065 	// match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem))
   6066 	// cond: x.Uses == 1   && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()   && clobber(x)
   6067 	// result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
   6068 	for {
   6069 		c := v.AuxInt
   6070 		s := v.Aux
   6071 		p := v.Args[0]
   6072 		i := v.Args[1]
   6073 		x := v.Args[2]
   6074 		if x.Op != OpAMD64MOVLstoreconstidx4 {
   6075 			break
   6076 		}
   6077 		a := x.AuxInt
   6078 		if x.Aux != s {
   6079 			break
   6080 		}
   6081 		if p != x.Args[0] {
   6082 			break
   6083 		}
   6084 		if i != x.Args[1] {
   6085 			break
   6086 		}
   6087 		mem := x.Args[2]
   6088 		if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
   6089 			break
   6090 		}
   6091 		v.reset(OpAMD64MOVQstoreidx1)
   6092 		v.AuxInt = ValAndOff(a).Off()
   6093 		v.Aux = s
   6094 		v.AddArg(p)
   6095 		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, i.Type)
   6096 		v0.AuxInt = 2
   6097 		v0.AddArg(i)
   6098 		v.AddArg(v0)
   6099 		v1 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
   6100 		v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
   6101 		v.AddArg(v1)
   6102 		v.AddArg(mem)
   6103 		return true
   6104 	}
   6105 	return false
   6106 }
   6107 func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value, config *Config) bool {
   6108 	b := v.Block
   6109 	_ = b
   6110 	// match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem)
   6111 	// cond:
   6112 	// result: (MOVLstoreidx4 [c] {sym} ptr idx val mem)
   6113 	for {
   6114 		c := v.AuxInt
   6115 		sym := v.Aux
   6116 		ptr := v.Args[0]
   6117 		v_1 := v.Args[1]
   6118 		if v_1.Op != OpAMD64SHLQconst {
   6119 			break
   6120 		}
   6121 		if v_1.AuxInt != 2 {
   6122 			break
   6123 		}
   6124 		idx := v_1.Args[0]
   6125 		val := v.Args[2]
   6126 		mem := v.Args[3]
   6127 		v.reset(OpAMD64MOVLstoreidx4)
   6128 		v.AuxInt = c
   6129 		v.Aux = sym
   6130 		v.AddArg(ptr)
   6131 		v.AddArg(idx)
   6132 		v.AddArg(val)
   6133 		v.AddArg(mem)
   6134 		return true
   6135 	}
   6136 	// match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
   6137 	// cond:
   6138 	// result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
   6139 	for {
   6140 		c := v.AuxInt
   6141 		sym := v.Aux
   6142 		v_0 := v.Args[0]
   6143 		if v_0.Op != OpAMD64ADDQconst {
   6144 			break
   6145 		}
   6146 		d := v_0.AuxInt
   6147 		ptr := v_0.Args[0]
   6148 		idx := v.Args[1]
   6149 		val := v.Args[2]
   6150 		mem := v.Args[3]
   6151 		v.reset(OpAMD64MOVLstoreidx1)
   6152 		v.AuxInt = c + d
   6153 		v.Aux = sym
   6154 		v.AddArg(ptr)
   6155 		v.AddArg(idx)
   6156 		v.AddArg(val)
   6157 		v.AddArg(mem)
   6158 		return true
   6159 	}
   6160 	// match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
   6161 	// cond:
   6162 	// result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
   6163 	for {
   6164 		c := v.AuxInt
   6165 		sym := v.Aux
   6166 		ptr := v.Args[0]
   6167 		v_1 := v.Args[1]
   6168 		if v_1.Op != OpAMD64ADDQconst {
   6169 			break
   6170 		}
   6171 		d := v_1.AuxInt
   6172 		idx := v_1.Args[0]
   6173 		val := v.Args[2]
   6174 		mem := v.Args[3]
   6175 		v.reset(OpAMD64MOVLstoreidx1)
   6176 		v.AuxInt = c + d
   6177 		v.Aux = sym
   6178 		v.AddArg(ptr)
   6179 		v.AddArg(idx)
   6180 		v.AddArg(val)
   6181 		v.AddArg(mem)
   6182 		return true
   6183 	}
   6184 	// match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem))
   6185 	// cond: x.Uses == 1   && clobber(x)
   6186 	// result: (MOVQstoreidx1 [i-4] {s} p idx w mem)
   6187 	for {
   6188 		i := v.AuxInt
   6189 		s := v.Aux
   6190 		p := v.Args[0]
   6191 		idx := v.Args[1]
   6192 		v_2 := v.Args[2]
   6193 		if v_2.Op != OpAMD64SHRQconst {
   6194 			break
   6195 		}
   6196 		if v_2.AuxInt != 32 {
   6197 			break
   6198 		}
   6199 		w := v_2.Args[0]
   6200 		x := v.Args[3]
   6201 		if x.Op != OpAMD64MOVLstoreidx1 {
   6202 			break
   6203 		}
   6204 		if x.AuxInt != i-4 {
   6205 			break
   6206 		}
   6207 		if x.Aux != s {
   6208 			break
   6209 		}
   6210 		if p != x.Args[0] {
   6211 			break
   6212 		}
   6213 		if idx != x.Args[1] {
   6214 			break
   6215 		}
   6216 		if w != x.Args[2] {
   6217 			break
   6218 		}
   6219 		mem := x.Args[3]
   6220 		if !(x.Uses == 1 && clobber(x)) {
   6221 			break
   6222 		}
   6223 		v.reset(OpAMD64MOVQstoreidx1)
   6224 		v.AuxInt = i - 4
   6225 		v.Aux = s
   6226 		v.AddArg(p)
   6227 		v.AddArg(idx)
   6228 		v.AddArg(w)
   6229 		v.AddArg(mem)
   6230 		return true
   6231 	}
   6232 	// match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem))
   6233 	// cond: x.Uses == 1   && clobber(x)
   6234 	// result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem)
   6235 	for {
   6236 		i := v.AuxInt
   6237 		s := v.Aux
   6238 		p := v.Args[0]
   6239 		idx := v.Args[1]
   6240 		v_2 := v.Args[2]
   6241 		if v_2.Op != OpAMD64SHRQconst {
   6242 			break
   6243 		}
   6244 		j := v_2.AuxInt
   6245 		w := v_2.Args[0]
   6246 		x := v.Args[3]
   6247 		if x.Op != OpAMD64MOVLstoreidx1 {
   6248 			break
   6249 		}
   6250 		if x.AuxInt != i-4 {
   6251 			break
   6252 		}
   6253 		if x.Aux != s {
   6254 			break
   6255 		}
   6256 		if p != x.Args[0] {
   6257 			break
   6258 		}
   6259 		if idx != x.Args[1] {
   6260 			break
   6261 		}
   6262 		w0 := x.Args[2]
   6263 		if w0.Op != OpAMD64SHRQconst {
   6264 			break
   6265 		}
   6266 		if w0.AuxInt != j-32 {
   6267 			break
   6268 		}
   6269 		if w != w0.Args[0] {
   6270 			break
   6271 		}
   6272 		mem := x.Args[3]
   6273 		if !(x.Uses == 1 && clobber(x)) {
   6274 			break
   6275 		}
   6276 		v.reset(OpAMD64MOVQstoreidx1)
   6277 		v.AuxInt = i - 4
   6278 		v.Aux = s
   6279 		v.AddArg(p)
   6280 		v.AddArg(idx)
   6281 		v.AddArg(w0)
   6282 		v.AddArg(mem)
   6283 		return true
   6284 	}
   6285 	return false
   6286 }
   6287 func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value, config *Config) bool {
   6288 	b := v.Block
   6289 	_ = b
   6290 	// match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem)
   6291 	// cond:
   6292 	// result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
   6293 	for {
   6294 		c := v.AuxInt
   6295 		sym := v.Aux
   6296 		v_0 := v.Args[0]
   6297 		if v_0.Op != OpAMD64ADDQconst {
   6298 			break
   6299 		}
   6300 		d := v_0.AuxInt
   6301 		ptr := v_0.Args[0]
   6302 		idx := v.Args[1]
   6303 		val := v.Args[2]
   6304 		mem := v.Args[3]
   6305 		v.reset(OpAMD64MOVLstoreidx4)
   6306 		v.AuxInt = c + d
   6307 		v.Aux = sym
   6308 		v.AddArg(ptr)
   6309 		v.AddArg(idx)
   6310 		v.AddArg(val)
   6311 		v.AddArg(mem)
   6312 		return true
   6313 	}
   6314 	// match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem)
   6315 	// cond:
   6316 	// result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
   6317 	for {
   6318 		c := v.AuxInt
   6319 		sym := v.Aux
   6320 		ptr := v.Args[0]
   6321 		v_1 := v.Args[1]
   6322 		if v_1.Op != OpAMD64ADDQconst {
   6323 			break
   6324 		}
   6325 		d := v_1.AuxInt
   6326 		idx := v_1.Args[0]
   6327 		val := v.Args[2]
   6328 		mem := v.Args[3]
   6329 		v.reset(OpAMD64MOVLstoreidx4)
   6330 		v.AuxInt = c + 4*d
   6331 		v.Aux = sym
   6332 		v.AddArg(ptr)
   6333 		v.AddArg(idx)
   6334 		v.AddArg(val)
   6335 		v.AddArg(mem)
   6336 		return true
   6337 	}
   6338 	// match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem))
   6339 	// cond: x.Uses == 1   && clobber(x)
   6340 	// result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem)
   6341 	for {
   6342 		i := v.AuxInt
   6343 		s := v.Aux
   6344 		p := v.Args[0]
   6345 		idx := v.Args[1]
   6346 		v_2 := v.Args[2]
   6347 		if v_2.Op != OpAMD64SHRQconst {
   6348 			break
   6349 		}
   6350 		if v_2.AuxInt != 32 {
   6351 			break
   6352 		}
   6353 		w := v_2.Args[0]
   6354 		x := v.Args[3]
   6355 		if x.Op != OpAMD64MOVLstoreidx4 {
   6356 			break
   6357 		}
   6358 		if x.AuxInt != i-4 {
   6359 			break
   6360 		}
   6361 		if x.Aux != s {
   6362 			break
   6363 		}
   6364 		if p != x.Args[0] {
   6365 			break
   6366 		}
   6367 		if idx != x.Args[1] {
   6368 			break
   6369 		}
   6370 		if w != x.Args[2] {
   6371 			break
   6372 		}
   6373 		mem := x.Args[3]
   6374 		if !(x.Uses == 1 && clobber(x)) {
   6375 			break
   6376 		}
   6377 		v.reset(OpAMD64MOVQstoreidx1)
   6378 		v.AuxInt = i - 4
   6379 		v.Aux = s
   6380 		v.AddArg(p)
   6381 		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type)
   6382 		v0.AuxInt = 2
   6383 		v0.AddArg(idx)
   6384 		v.AddArg(v0)
   6385 		v.AddArg(w)
   6386 		v.AddArg(mem)
   6387 		return true
   6388 	}
   6389 	// match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem))
   6390 	// cond: x.Uses == 1   && clobber(x)
   6391 	// result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem)
   6392 	for {
   6393 		i := v.AuxInt
   6394 		s := v.Aux
   6395 		p := v.Args[0]
   6396 		idx := v.Args[1]
   6397 		v_2 := v.Args[2]
   6398 		if v_2.Op != OpAMD64SHRQconst {
   6399 			break
   6400 		}
   6401 		j := v_2.AuxInt
   6402 		w := v_2.Args[0]
   6403 		x := v.Args[3]
   6404 		if x.Op != OpAMD64MOVLstoreidx4 {
   6405 			break
   6406 		}
   6407 		if x.AuxInt != i-4 {
   6408 			break
   6409 		}
   6410 		if x.Aux != s {
   6411 			break
   6412 		}
   6413 		if p != x.Args[0] {
   6414 			break
   6415 		}
   6416 		if idx != x.Args[1] {
   6417 			break
   6418 		}
   6419 		w0 := x.Args[2]
   6420 		if w0.Op != OpAMD64SHRQconst {
   6421 			break
   6422 		}
   6423 		if w0.AuxInt != j-32 {
   6424 			break
   6425 		}
   6426 		if w != w0.Args[0] {
   6427 			break
   6428 		}
   6429 		mem := x.Args[3]
   6430 		if !(x.Uses == 1 && clobber(x)) {
   6431 			break
   6432 		}
   6433 		v.reset(OpAMD64MOVQstoreidx1)
   6434 		v.AuxInt = i - 4
   6435 		v.Aux = s
   6436 		v.AddArg(p)
   6437 		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type)
   6438 		v0.AuxInt = 2
   6439 		v0.AddArg(idx)
   6440 		v.AddArg(v0)
   6441 		v.AddArg(w0)
   6442 		v.AddArg(mem)
   6443 		return true
   6444 	}
   6445 	return false
   6446 }
   6447 func rewriteValueAMD64_OpAMD64MOVOload(v *Value, config *Config) bool {
   6448 	b := v.Block
   6449 	_ = b
   6450 	// match: (MOVOload  [off1] {sym} (ADDQconst [off2] ptr) mem)
   6451 	// cond: is32Bit(off1+off2)
   6452 	// result: (MOVOload  [off1+off2] {sym} ptr mem)
   6453 	for {
   6454 		off1 := v.AuxInt
   6455 		sym := v.Aux
   6456 		v_0 := v.Args[0]
   6457 		if v_0.Op != OpAMD64ADDQconst {
   6458 			break
   6459 		}
   6460 		off2 := v_0.AuxInt
   6461 		ptr := v_0.Args[0]
   6462 		mem := v.Args[1]
   6463 		if !(is32Bit(off1 + off2)) {
   6464 			break
   6465 		}
   6466 		v.reset(OpAMD64MOVOload)
   6467 		v.AuxInt = off1 + off2
   6468 		v.Aux = sym
   6469 		v.AddArg(ptr)
   6470 		v.AddArg(mem)
   6471 		return true
   6472 	}
   6473 	// match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
   6474 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   6475 	// result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   6476 	for {
   6477 		off1 := v.AuxInt
   6478 		sym1 := v.Aux
   6479 		v_0 := v.Args[0]
   6480 		if v_0.Op != OpAMD64LEAQ {
   6481 			break
   6482 		}
   6483 		off2 := v_0.AuxInt
   6484 		sym2 := v_0.Aux
   6485 		base := v_0.Args[0]
   6486 		mem := v.Args[1]
   6487 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   6488 			break
   6489 		}
   6490 		v.reset(OpAMD64MOVOload)
   6491 		v.AuxInt = off1 + off2
   6492 		v.Aux = mergeSym(sym1, sym2)
   6493 		v.AddArg(base)
   6494 		v.AddArg(mem)
   6495 		return true
   6496 	}
   6497 	return false
   6498 }
   6499 func rewriteValueAMD64_OpAMD64MOVOstore(v *Value, config *Config) bool {
   6500 	b := v.Block
   6501 	_ = b
   6502 	// match: (MOVOstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
   6503 	// cond: is32Bit(off1+off2)
   6504 	// result: (MOVOstore  [off1+off2] {sym} ptr val mem)
   6505 	for {
   6506 		off1 := v.AuxInt
   6507 		sym := v.Aux
   6508 		v_0 := v.Args[0]
   6509 		if v_0.Op != OpAMD64ADDQconst {
   6510 			break
   6511 		}
   6512 		off2 := v_0.AuxInt
   6513 		ptr := v_0.Args[0]
   6514 		val := v.Args[1]
   6515 		mem := v.Args[2]
   6516 		if !(is32Bit(off1 + off2)) {
   6517 			break
   6518 		}
   6519 		v.reset(OpAMD64MOVOstore)
   6520 		v.AuxInt = off1 + off2
   6521 		v.Aux = sym
   6522 		v.AddArg(ptr)
   6523 		v.AddArg(val)
   6524 		v.AddArg(mem)
   6525 		return true
   6526 	}
   6527 	// match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
   6528 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   6529 	// result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   6530 	for {
   6531 		off1 := v.AuxInt
   6532 		sym1 := v.Aux
   6533 		v_0 := v.Args[0]
   6534 		if v_0.Op != OpAMD64LEAQ {
   6535 			break
   6536 		}
   6537 		off2 := v_0.AuxInt
   6538 		sym2 := v_0.Aux
   6539 		base := v_0.Args[0]
   6540 		val := v.Args[1]
   6541 		mem := v.Args[2]
   6542 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   6543 			break
   6544 		}
   6545 		v.reset(OpAMD64MOVOstore)
   6546 		v.AuxInt = off1 + off2
   6547 		v.Aux = mergeSym(sym1, sym2)
   6548 		v.AddArg(base)
   6549 		v.AddArg(val)
   6550 		v.AddArg(mem)
   6551 		return true
   6552 	}
   6553 	return false
   6554 }
   6555 func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value, config *Config) bool {
   6556 	b := v.Block
   6557 	_ = b
   6558 	// match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
   6559 	// cond: is32Bit(off1+off2)
   6560 	// result: (MOVQatomicload [off1+off2] {sym} ptr mem)
   6561 	for {
   6562 		off1 := v.AuxInt
   6563 		sym := v.Aux
   6564 		v_0 := v.Args[0]
   6565 		if v_0.Op != OpAMD64ADDQconst {
   6566 			break
   6567 		}
   6568 		off2 := v_0.AuxInt
   6569 		ptr := v_0.Args[0]
   6570 		mem := v.Args[1]
   6571 		if !(is32Bit(off1 + off2)) {
   6572 			break
   6573 		}
   6574 		v.reset(OpAMD64MOVQatomicload)
   6575 		v.AuxInt = off1 + off2
   6576 		v.Aux = sym
   6577 		v.AddArg(ptr)
   6578 		v.AddArg(mem)
   6579 		return true
   6580 	}
   6581 	// match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
   6582 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   6583 	// result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   6584 	for {
   6585 		off1 := v.AuxInt
   6586 		sym1 := v.Aux
   6587 		v_0 := v.Args[0]
   6588 		if v_0.Op != OpAMD64LEAQ {
   6589 			break
   6590 		}
   6591 		off2 := v_0.AuxInt
   6592 		sym2 := v_0.Aux
   6593 		ptr := v_0.Args[0]
   6594 		mem := v.Args[1]
   6595 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   6596 			break
   6597 		}
   6598 		v.reset(OpAMD64MOVQatomicload)
   6599 		v.AuxInt = off1 + off2
   6600 		v.Aux = mergeSym(sym1, sym2)
   6601 		v.AddArg(ptr)
   6602 		v.AddArg(mem)
   6603 		return true
   6604 	}
   6605 	return false
   6606 }
   6607 func rewriteValueAMD64_OpAMD64MOVQload(v *Value, config *Config) bool {
   6608 	b := v.Block
   6609 	_ = b
   6610 	// match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _))
   6611 	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
   6612 	// result: x
   6613 	for {
   6614 		off := v.AuxInt
   6615 		sym := v.Aux
   6616 		ptr := v.Args[0]
   6617 		v_1 := v.Args[1]
   6618 		if v_1.Op != OpAMD64MOVQstore {
   6619 			break
   6620 		}
   6621 		off2 := v_1.AuxInt
   6622 		sym2 := v_1.Aux
   6623 		ptr2 := v_1.Args[0]
   6624 		x := v_1.Args[1]
   6625 		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
   6626 			break
   6627 		}
   6628 		v.reset(OpCopy)
   6629 		v.Type = x.Type
   6630 		v.AddArg(x)
   6631 		return true
   6632 	}
   6633 	// match: (MOVQload  [off1] {sym} (ADDQconst [off2] ptr) mem)
   6634 	// cond: is32Bit(off1+off2)
   6635 	// result: (MOVQload  [off1+off2] {sym} ptr mem)
   6636 	for {
   6637 		off1 := v.AuxInt
   6638 		sym := v.Aux
   6639 		v_0 := v.Args[0]
   6640 		if v_0.Op != OpAMD64ADDQconst {
   6641 			break
   6642 		}
   6643 		off2 := v_0.AuxInt
   6644 		ptr := v_0.Args[0]
   6645 		mem := v.Args[1]
   6646 		if !(is32Bit(off1 + off2)) {
   6647 			break
   6648 		}
   6649 		v.reset(OpAMD64MOVQload)
   6650 		v.AuxInt = off1 + off2
   6651 		v.Aux = sym
   6652 		v.AddArg(ptr)
   6653 		v.AddArg(mem)
   6654 		return true
   6655 	}
   6656 	// match: (MOVQload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
   6657 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   6658 	// result: (MOVQload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   6659 	for {
   6660 		off1 := v.AuxInt
   6661 		sym1 := v.Aux
   6662 		v_0 := v.Args[0]
   6663 		if v_0.Op != OpAMD64LEAQ {
   6664 			break
   6665 		}
   6666 		off2 := v_0.AuxInt
   6667 		sym2 := v_0.Aux
   6668 		base := v_0.Args[0]
   6669 		mem := v.Args[1]
   6670 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   6671 			break
   6672 		}
   6673 		v.reset(OpAMD64MOVQload)
   6674 		v.AuxInt = off1 + off2
   6675 		v.Aux = mergeSym(sym1, sym2)
   6676 		v.AddArg(base)
   6677 		v.AddArg(mem)
   6678 		return true
   6679 	}
   6680 	// match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
   6681 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   6682 	// result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   6683 	for {
   6684 		off1 := v.AuxInt
   6685 		sym1 := v.Aux
   6686 		v_0 := v.Args[0]
   6687 		if v_0.Op != OpAMD64LEAQ1 {
   6688 			break
   6689 		}
   6690 		off2 := v_0.AuxInt
   6691 		sym2 := v_0.Aux
   6692 		ptr := v_0.Args[0]
   6693 		idx := v_0.Args[1]
   6694 		mem := v.Args[1]
   6695 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   6696 			break
   6697 		}
   6698 		v.reset(OpAMD64MOVQloadidx1)
   6699 		v.AuxInt = off1 + off2
   6700 		v.Aux = mergeSym(sym1, sym2)
   6701 		v.AddArg(ptr)
   6702 		v.AddArg(idx)
   6703 		v.AddArg(mem)
   6704 		return true
   6705 	}
   6706 	// match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem)
   6707 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   6708 	// result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   6709 	for {
   6710 		off1 := v.AuxInt
   6711 		sym1 := v.Aux
   6712 		v_0 := v.Args[0]
   6713 		if v_0.Op != OpAMD64LEAQ8 {
   6714 			break
   6715 		}
   6716 		off2 := v_0.AuxInt
   6717 		sym2 := v_0.Aux
   6718 		ptr := v_0.Args[0]
   6719 		idx := v_0.Args[1]
   6720 		mem := v.Args[1]
   6721 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   6722 			break
   6723 		}
   6724 		v.reset(OpAMD64MOVQloadidx8)
   6725 		v.AuxInt = off1 + off2
   6726 		v.Aux = mergeSym(sym1, sym2)
   6727 		v.AddArg(ptr)
   6728 		v.AddArg(idx)
   6729 		v.AddArg(mem)
   6730 		return true
   6731 	}
   6732 	// match: (MOVQload [off] {sym} (ADDQ ptr idx) mem)
   6733 	// cond: ptr.Op != OpSB
   6734 	// result: (MOVQloadidx1 [off] {sym} ptr idx mem)
   6735 	for {
   6736 		off := v.AuxInt
   6737 		sym := v.Aux
   6738 		v_0 := v.Args[0]
   6739 		if v_0.Op != OpAMD64ADDQ {
   6740 			break
   6741 		}
   6742 		ptr := v_0.Args[0]
   6743 		idx := v_0.Args[1]
   6744 		mem := v.Args[1]
   6745 		if !(ptr.Op != OpSB) {
   6746 			break
   6747 		}
   6748 		v.reset(OpAMD64MOVQloadidx1)
   6749 		v.AuxInt = off
   6750 		v.Aux = sym
   6751 		v.AddArg(ptr)
   6752 		v.AddArg(idx)
   6753 		v.AddArg(mem)
   6754 		return true
   6755 	}
   6756 	// match: (MOVQload  [off1] {sym1} (LEAL [off2] {sym2} base) mem)
   6757 	// cond: canMergeSym(sym1, sym2)
   6758 	// result: (MOVQload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   6759 	for {
   6760 		off1 := v.AuxInt
   6761 		sym1 := v.Aux
   6762 		v_0 := v.Args[0]
   6763 		if v_0.Op != OpAMD64LEAL {
   6764 			break
   6765 		}
   6766 		off2 := v_0.AuxInt
   6767 		sym2 := v_0.Aux
   6768 		base := v_0.Args[0]
   6769 		mem := v.Args[1]
   6770 		if !(canMergeSym(sym1, sym2)) {
   6771 			break
   6772 		}
   6773 		v.reset(OpAMD64MOVQload)
   6774 		v.AuxInt = off1 + off2
   6775 		v.Aux = mergeSym(sym1, sym2)
   6776 		v.AddArg(base)
   6777 		v.AddArg(mem)
   6778 		return true
   6779 	}
   6780 	// match: (MOVQload  [off1] {sym} (ADDLconst [off2] ptr) mem)
   6781 	// cond: is32Bit(off1+off2)
   6782 	// result: (MOVQload  [off1+off2] {sym} ptr mem)
   6783 	for {
   6784 		off1 := v.AuxInt
   6785 		sym := v.Aux
   6786 		v_0 := v.Args[0]
   6787 		if v_0.Op != OpAMD64ADDLconst {
   6788 			break
   6789 		}
   6790 		off2 := v_0.AuxInt
   6791 		ptr := v_0.Args[0]
   6792 		mem := v.Args[1]
   6793 		if !(is32Bit(off1 + off2)) {
   6794 			break
   6795 		}
   6796 		v.reset(OpAMD64MOVQload)
   6797 		v.AuxInt = off1 + off2
   6798 		v.Aux = sym
   6799 		v.AddArg(ptr)
   6800 		v.AddArg(mem)
   6801 		return true
   6802 	}
   6803 	return false
   6804 }
   6805 func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value, config *Config) bool {
   6806 	b := v.Block
   6807 	_ = b
   6808 	// match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem)
   6809 	// cond:
   6810 	// result: (MOVQloadidx8 [c] {sym} ptr idx mem)
   6811 	for {
   6812 		c := v.AuxInt
   6813 		sym := v.Aux
   6814 		ptr := v.Args[0]
   6815 		v_1 := v.Args[1]
   6816 		if v_1.Op != OpAMD64SHLQconst {
   6817 			break
   6818 		}
   6819 		if v_1.AuxInt != 3 {
   6820 			break
   6821 		}
   6822 		idx := v_1.Args[0]
   6823 		mem := v.Args[2]
   6824 		v.reset(OpAMD64MOVQloadidx8)
   6825 		v.AuxInt = c
   6826 		v.Aux = sym
   6827 		v.AddArg(ptr)
   6828 		v.AddArg(idx)
   6829 		v.AddArg(mem)
   6830 		return true
   6831 	}
   6832 	// match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
   6833 	// cond:
   6834 	// result: (MOVQloadidx1 [c+d] {sym} ptr idx mem)
   6835 	for {
   6836 		c := v.AuxInt
   6837 		sym := v.Aux
   6838 		v_0 := v.Args[0]
   6839 		if v_0.Op != OpAMD64ADDQconst {
   6840 			break
   6841 		}
   6842 		d := v_0.AuxInt
   6843 		ptr := v_0.Args[0]
   6844 		idx := v.Args[1]
   6845 		mem := v.Args[2]
   6846 		v.reset(OpAMD64MOVQloadidx1)
   6847 		v.AuxInt = c + d
   6848 		v.Aux = sym
   6849 		v.AddArg(ptr)
   6850 		v.AddArg(idx)
   6851 		v.AddArg(mem)
   6852 		return true
   6853 	}
   6854 	// match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
   6855 	// cond:
   6856 	// result: (MOVQloadidx1 [c+d] {sym} ptr idx mem)
   6857 	for {
   6858 		c := v.AuxInt
   6859 		sym := v.Aux
   6860 		ptr := v.Args[0]
   6861 		v_1 := v.Args[1]
   6862 		if v_1.Op != OpAMD64ADDQconst {
   6863 			break
   6864 		}
   6865 		d := v_1.AuxInt
   6866 		idx := v_1.Args[0]
   6867 		mem := v.Args[2]
   6868 		v.reset(OpAMD64MOVQloadidx1)
   6869 		v.AuxInt = c + d
   6870 		v.Aux = sym
   6871 		v.AddArg(ptr)
   6872 		v.AddArg(idx)
   6873 		v.AddArg(mem)
   6874 		return true
   6875 	}
   6876 	return false
   6877 }
   6878 func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value, config *Config) bool {
   6879 	b := v.Block
   6880 	_ = b
   6881 	// match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem)
   6882 	// cond:
   6883 	// result: (MOVQloadidx8 [c+d] {sym} ptr idx mem)
   6884 	for {
   6885 		c := v.AuxInt
   6886 		sym := v.Aux
   6887 		v_0 := v.Args[0]
   6888 		if v_0.Op != OpAMD64ADDQconst {
   6889 			break
   6890 		}
   6891 		d := v_0.AuxInt
   6892 		ptr := v_0.Args[0]
   6893 		idx := v.Args[1]
   6894 		mem := v.Args[2]
   6895 		v.reset(OpAMD64MOVQloadidx8)
   6896 		v.AuxInt = c + d
   6897 		v.Aux = sym
   6898 		v.AddArg(ptr)
   6899 		v.AddArg(idx)
   6900 		v.AddArg(mem)
   6901 		return true
   6902 	}
   6903 	// match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)
   6904 	// cond:
   6905 	// result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem)
   6906 	for {
   6907 		c := v.AuxInt
   6908 		sym := v.Aux
   6909 		ptr := v.Args[0]
   6910 		v_1 := v.Args[1]
   6911 		if v_1.Op != OpAMD64ADDQconst {
   6912 			break
   6913 		}
   6914 		d := v_1.AuxInt
   6915 		idx := v_1.Args[0]
   6916 		mem := v.Args[2]
   6917 		v.reset(OpAMD64MOVQloadidx8)
   6918 		v.AuxInt = c + 8*d
   6919 		v.Aux = sym
   6920 		v.AddArg(ptr)
   6921 		v.AddArg(idx)
   6922 		v.AddArg(mem)
   6923 		return true
   6924 	}
   6925 	return false
   6926 }
   6927 func rewriteValueAMD64_OpAMD64MOVQstore(v *Value, config *Config) bool {
   6928 	b := v.Block
   6929 	_ = b
   6930 	// match: (MOVQstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
   6931 	// cond: is32Bit(off1+off2)
   6932 	// result: (MOVQstore  [off1+off2] {sym} ptr val mem)
   6933 	for {
   6934 		off1 := v.AuxInt
   6935 		sym := v.Aux
   6936 		v_0 := v.Args[0]
   6937 		if v_0.Op != OpAMD64ADDQconst {
   6938 			break
   6939 		}
   6940 		off2 := v_0.AuxInt
   6941 		ptr := v_0.Args[0]
   6942 		val := v.Args[1]
   6943 		mem := v.Args[2]
   6944 		if !(is32Bit(off1 + off2)) {
   6945 			break
   6946 		}
   6947 		v.reset(OpAMD64MOVQstore)
   6948 		v.AuxInt = off1 + off2
   6949 		v.Aux = sym
   6950 		v.AddArg(ptr)
   6951 		v.AddArg(val)
   6952 		v.AddArg(mem)
   6953 		return true
   6954 	}
   6955 	// match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem)
   6956 	// cond: validValAndOff(c,off)
   6957 	// result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
   6958 	for {
   6959 		off := v.AuxInt
   6960 		sym := v.Aux
   6961 		ptr := v.Args[0]
   6962 		v_1 := v.Args[1]
   6963 		if v_1.Op != OpAMD64MOVQconst {
   6964 			break
   6965 		}
   6966 		c := v_1.AuxInt
   6967 		mem := v.Args[2]
   6968 		if !(validValAndOff(c, off)) {
   6969 			break
   6970 		}
   6971 		v.reset(OpAMD64MOVQstoreconst)
   6972 		v.AuxInt = makeValAndOff(c, off)
   6973 		v.Aux = sym
   6974 		v.AddArg(ptr)
   6975 		v.AddArg(mem)
   6976 		return true
   6977 	}
   6978 	// match: (MOVQstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
   6979 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   6980 	// result: (MOVQstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   6981 	for {
   6982 		off1 := v.AuxInt
   6983 		sym1 := v.Aux
   6984 		v_0 := v.Args[0]
   6985 		if v_0.Op != OpAMD64LEAQ {
   6986 			break
   6987 		}
   6988 		off2 := v_0.AuxInt
   6989 		sym2 := v_0.Aux
   6990 		base := v_0.Args[0]
   6991 		val := v.Args[1]
   6992 		mem := v.Args[2]
   6993 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   6994 			break
   6995 		}
   6996 		v.reset(OpAMD64MOVQstore)
   6997 		v.AuxInt = off1 + off2
   6998 		v.Aux = mergeSym(sym1, sym2)
   6999 		v.AddArg(base)
   7000 		v.AddArg(val)
   7001 		v.AddArg(mem)
   7002 		return true
   7003 	}
   7004 	// match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
   7005 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   7006 	// result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   7007 	for {
   7008 		off1 := v.AuxInt
   7009 		sym1 := v.Aux
   7010 		v_0 := v.Args[0]
   7011 		if v_0.Op != OpAMD64LEAQ1 {
   7012 			break
   7013 		}
   7014 		off2 := v_0.AuxInt
   7015 		sym2 := v_0.Aux
   7016 		ptr := v_0.Args[0]
   7017 		idx := v_0.Args[1]
   7018 		val := v.Args[1]
   7019 		mem := v.Args[2]
   7020 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   7021 			break
   7022 		}
   7023 		v.reset(OpAMD64MOVQstoreidx1)
   7024 		v.AuxInt = off1 + off2
   7025 		v.Aux = mergeSym(sym1, sym2)
   7026 		v.AddArg(ptr)
   7027 		v.AddArg(idx)
   7028 		v.AddArg(val)
   7029 		v.AddArg(mem)
   7030 		return true
   7031 	}
   7032 	// match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem)
   7033 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   7034 	// result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   7035 	for {
   7036 		off1 := v.AuxInt
   7037 		sym1 := v.Aux
   7038 		v_0 := v.Args[0]
   7039 		if v_0.Op != OpAMD64LEAQ8 {
   7040 			break
   7041 		}
   7042 		off2 := v_0.AuxInt
   7043 		sym2 := v_0.Aux
   7044 		ptr := v_0.Args[0]
   7045 		idx := v_0.Args[1]
   7046 		val := v.Args[1]
   7047 		mem := v.Args[2]
   7048 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   7049 			break
   7050 		}
   7051 		v.reset(OpAMD64MOVQstoreidx8)
   7052 		v.AuxInt = off1 + off2
   7053 		v.Aux = mergeSym(sym1, sym2)
   7054 		v.AddArg(ptr)
   7055 		v.AddArg(idx)
   7056 		v.AddArg(val)
   7057 		v.AddArg(mem)
   7058 		return true
   7059 	}
   7060 	// match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem)
   7061 	// cond: ptr.Op != OpSB
   7062 	// result: (MOVQstoreidx1 [off] {sym} ptr idx val mem)
   7063 	for {
   7064 		off := v.AuxInt
   7065 		sym := v.Aux
   7066 		v_0 := v.Args[0]
   7067 		if v_0.Op != OpAMD64ADDQ {
   7068 			break
   7069 		}
   7070 		ptr := v_0.Args[0]
   7071 		idx := v_0.Args[1]
   7072 		val := v.Args[1]
   7073 		mem := v.Args[2]
   7074 		if !(ptr.Op != OpSB) {
   7075 			break
   7076 		}
   7077 		v.reset(OpAMD64MOVQstoreidx1)
   7078 		v.AuxInt = off
   7079 		v.Aux = sym
   7080 		v.AddArg(ptr)
   7081 		v.AddArg(idx)
   7082 		v.AddArg(val)
   7083 		v.AddArg(mem)
   7084 		return true
   7085 	}
   7086 	// match: (MOVQstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
   7087 	// cond: canMergeSym(sym1, sym2)
   7088 	// result: (MOVQstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   7089 	for {
   7090 		off1 := v.AuxInt
   7091 		sym1 := v.Aux
   7092 		v_0 := v.Args[0]
   7093 		if v_0.Op != OpAMD64LEAL {
   7094 			break
   7095 		}
   7096 		off2 := v_0.AuxInt
   7097 		sym2 := v_0.Aux
   7098 		base := v_0.Args[0]
   7099 		val := v.Args[1]
   7100 		mem := v.Args[2]
   7101 		if !(canMergeSym(sym1, sym2)) {
   7102 			break
   7103 		}
   7104 		v.reset(OpAMD64MOVQstore)
   7105 		v.AuxInt = off1 + off2
   7106 		v.Aux = mergeSym(sym1, sym2)
   7107 		v.AddArg(base)
   7108 		v.AddArg(val)
   7109 		v.AddArg(mem)
   7110 		return true
   7111 	}
   7112 	// match: (MOVQstore  [off1] {sym} (ADDLconst [off2] ptr) val mem)
   7113 	// cond: is32Bit(off1+off2)
   7114 	// result: (MOVQstore  [off1+off2] {sym} ptr val mem)
   7115 	for {
   7116 		off1 := v.AuxInt
   7117 		sym := v.Aux
   7118 		v_0 := v.Args[0]
   7119 		if v_0.Op != OpAMD64ADDLconst {
   7120 			break
   7121 		}
   7122 		off2 := v_0.AuxInt
   7123 		ptr := v_0.Args[0]
   7124 		val := v.Args[1]
   7125 		mem := v.Args[2]
   7126 		if !(is32Bit(off1 + off2)) {
   7127 			break
   7128 		}
   7129 		v.reset(OpAMD64MOVQstore)
   7130 		v.AuxInt = off1 + off2
   7131 		v.Aux = sym
   7132 		v.AddArg(ptr)
   7133 		v.AddArg(val)
   7134 		v.AddArg(mem)
   7135 		return true
   7136 	}
   7137 	return false
   7138 }
   7139 func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value, config *Config) bool {
   7140 	b := v.Block
   7141 	_ = b
   7142 	// match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
   7143 	// cond: ValAndOff(sc).canAdd(off)
   7144 	// result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
   7145 	for {
   7146 		sc := v.AuxInt
   7147 		s := v.Aux
   7148 		v_0 := v.Args[0]
   7149 		if v_0.Op != OpAMD64ADDQconst {
   7150 			break
   7151 		}
   7152 		off := v_0.AuxInt
   7153 		ptr := v_0.Args[0]
   7154 		mem := v.Args[1]
   7155 		if !(ValAndOff(sc).canAdd(off)) {
   7156 			break
   7157 		}
   7158 		v.reset(OpAMD64MOVQstoreconst)
   7159 		v.AuxInt = ValAndOff(sc).add(off)
   7160 		v.Aux = s
   7161 		v.AddArg(ptr)
   7162 		v.AddArg(mem)
   7163 		return true
   7164 	}
   7165 	// match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
   7166 	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
   7167 	// result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
   7168 	for {
   7169 		sc := v.AuxInt
   7170 		sym1 := v.Aux
   7171 		v_0 := v.Args[0]
   7172 		if v_0.Op != OpAMD64LEAQ {
   7173 			break
   7174 		}
   7175 		off := v_0.AuxInt
   7176 		sym2 := v_0.Aux
   7177 		ptr := v_0.Args[0]
   7178 		mem := v.Args[1]
   7179 		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
   7180 			break
   7181 		}
   7182 		v.reset(OpAMD64MOVQstoreconst)
   7183 		v.AuxInt = ValAndOff(sc).add(off)
   7184 		v.Aux = mergeSym(sym1, sym2)
   7185 		v.AddArg(ptr)
   7186 		v.AddArg(mem)
   7187 		return true
   7188 	}
   7189 	// match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
   7190 	// cond: canMergeSym(sym1, sym2)
   7191 	// result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   7192 	for {
   7193 		x := v.AuxInt
   7194 		sym1 := v.Aux
   7195 		v_0 := v.Args[0]
   7196 		if v_0.Op != OpAMD64LEAQ1 {
   7197 			break
   7198 		}
   7199 		off := v_0.AuxInt
   7200 		sym2 := v_0.Aux
   7201 		ptr := v_0.Args[0]
   7202 		idx := v_0.Args[1]
   7203 		mem := v.Args[1]
   7204 		if !(canMergeSym(sym1, sym2)) {
   7205 			break
   7206 		}
   7207 		v.reset(OpAMD64MOVQstoreconstidx1)
   7208 		v.AuxInt = ValAndOff(x).add(off)
   7209 		v.Aux = mergeSym(sym1, sym2)
   7210 		v.AddArg(ptr)
   7211 		v.AddArg(idx)
   7212 		v.AddArg(mem)
   7213 		return true
   7214 	}
   7215 	// match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem)
   7216 	// cond: canMergeSym(sym1, sym2)
   7217 	// result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   7218 	for {
   7219 		x := v.AuxInt
   7220 		sym1 := v.Aux
   7221 		v_0 := v.Args[0]
   7222 		if v_0.Op != OpAMD64LEAQ8 {
   7223 			break
   7224 		}
   7225 		off := v_0.AuxInt
   7226 		sym2 := v_0.Aux
   7227 		ptr := v_0.Args[0]
   7228 		idx := v_0.Args[1]
   7229 		mem := v.Args[1]
   7230 		if !(canMergeSym(sym1, sym2)) {
   7231 			break
   7232 		}
   7233 		v.reset(OpAMD64MOVQstoreconstidx8)
   7234 		v.AuxInt = ValAndOff(x).add(off)
   7235 		v.Aux = mergeSym(sym1, sym2)
   7236 		v.AddArg(ptr)
   7237 		v.AddArg(idx)
   7238 		v.AddArg(mem)
   7239 		return true
   7240 	}
   7241 	// match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem)
   7242 	// cond:
   7243 	// result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem)
   7244 	for {
   7245 		x := v.AuxInt
   7246 		sym := v.Aux
   7247 		v_0 := v.Args[0]
   7248 		if v_0.Op != OpAMD64ADDQ {
   7249 			break
   7250 		}
   7251 		ptr := v_0.Args[0]
   7252 		idx := v_0.Args[1]
   7253 		mem := v.Args[1]
   7254 		v.reset(OpAMD64MOVQstoreconstidx1)
   7255 		v.AuxInt = x
   7256 		v.Aux = sym
   7257 		v.AddArg(ptr)
   7258 		v.AddArg(idx)
   7259 		v.AddArg(mem)
   7260 		return true
   7261 	}
   7262 	// match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
   7263 	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
   7264 	// result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
   7265 	for {
   7266 		sc := v.AuxInt
   7267 		sym1 := v.Aux
   7268 		v_0 := v.Args[0]
   7269 		if v_0.Op != OpAMD64LEAL {
   7270 			break
   7271 		}
   7272 		off := v_0.AuxInt
   7273 		sym2 := v_0.Aux
   7274 		ptr := v_0.Args[0]
   7275 		mem := v.Args[1]
   7276 		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
   7277 			break
   7278 		}
   7279 		v.reset(OpAMD64MOVQstoreconst)
   7280 		v.AuxInt = ValAndOff(sc).add(off)
   7281 		v.Aux = mergeSym(sym1, sym2)
   7282 		v.AddArg(ptr)
   7283 		v.AddArg(mem)
   7284 		return true
   7285 	}
   7286 	// match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
   7287 	// cond: ValAndOff(sc).canAdd(off)
   7288 	// result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
   7289 	for {
   7290 		sc := v.AuxInt
   7291 		s := v.Aux
   7292 		v_0 := v.Args[0]
   7293 		if v_0.Op != OpAMD64ADDLconst {
   7294 			break
   7295 		}
   7296 		off := v_0.AuxInt
   7297 		ptr := v_0.Args[0]
   7298 		mem := v.Args[1]
   7299 		if !(ValAndOff(sc).canAdd(off)) {
   7300 			break
   7301 		}
   7302 		v.reset(OpAMD64MOVQstoreconst)
   7303 		v.AuxInt = ValAndOff(sc).add(off)
   7304 		v.Aux = s
   7305 		v.AddArg(ptr)
   7306 		v.AddArg(mem)
   7307 		return true
   7308 	}
   7309 	return false
   7310 }
   7311 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value, config *Config) bool {
   7312 	b := v.Block
   7313 	_ = b
   7314 	// match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem)
   7315 	// cond:
   7316 	// result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem)
   7317 	for {
   7318 		c := v.AuxInt
   7319 		sym := v.Aux
   7320 		ptr := v.Args[0]
   7321 		v_1 := v.Args[1]
   7322 		if v_1.Op != OpAMD64SHLQconst {
   7323 			break
   7324 		}
   7325 		if v_1.AuxInt != 3 {
   7326 			break
   7327 		}
   7328 		idx := v_1.Args[0]
   7329 		mem := v.Args[2]
   7330 		v.reset(OpAMD64MOVQstoreconstidx8)
   7331 		v.AuxInt = c
   7332 		v.Aux = sym
   7333 		v.AddArg(ptr)
   7334 		v.AddArg(idx)
   7335 		v.AddArg(mem)
   7336 		return true
   7337 	}
   7338 	// match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
   7339 	// cond:
   7340 	// result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   7341 	for {
   7342 		x := v.AuxInt
   7343 		sym := v.Aux
   7344 		v_0 := v.Args[0]
   7345 		if v_0.Op != OpAMD64ADDQconst {
   7346 			break
   7347 		}
   7348 		c := v_0.AuxInt
   7349 		ptr := v_0.Args[0]
   7350 		idx := v.Args[1]
   7351 		mem := v.Args[2]
   7352 		v.reset(OpAMD64MOVQstoreconstidx1)
   7353 		v.AuxInt = ValAndOff(x).add(c)
   7354 		v.Aux = sym
   7355 		v.AddArg(ptr)
   7356 		v.AddArg(idx)
   7357 		v.AddArg(mem)
   7358 		return true
   7359 	}
   7360 	// match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
   7361 	// cond:
   7362 	// result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   7363 	for {
   7364 		x := v.AuxInt
   7365 		sym := v.Aux
   7366 		ptr := v.Args[0]
   7367 		v_1 := v.Args[1]
   7368 		if v_1.Op != OpAMD64ADDQconst {
   7369 			break
   7370 		}
   7371 		c := v_1.AuxInt
   7372 		idx := v_1.Args[0]
   7373 		mem := v.Args[2]
   7374 		v.reset(OpAMD64MOVQstoreconstidx1)
   7375 		v.AuxInt = ValAndOff(x).add(c)
   7376 		v.Aux = sym
   7377 		v.AddArg(ptr)
   7378 		v.AddArg(idx)
   7379 		v.AddArg(mem)
   7380 		return true
   7381 	}
   7382 	return false
   7383 }
   7384 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value, config *Config) bool {
   7385 	b := v.Block
   7386 	_ = b
   7387 	// match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem)
   7388 	// cond:
   7389 	// result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   7390 	for {
   7391 		x := v.AuxInt
   7392 		sym := v.Aux
   7393 		v_0 := v.Args[0]
   7394 		if v_0.Op != OpAMD64ADDQconst {
   7395 			break
   7396 		}
   7397 		c := v_0.AuxInt
   7398 		ptr := v_0.Args[0]
   7399 		idx := v.Args[1]
   7400 		mem := v.Args[2]
   7401 		v.reset(OpAMD64MOVQstoreconstidx8)
   7402 		v.AuxInt = ValAndOff(x).add(c)
   7403 		v.Aux = sym
   7404 		v.AddArg(ptr)
   7405 		v.AddArg(idx)
   7406 		v.AddArg(mem)
   7407 		return true
   7408 	}
   7409 	// match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem)
   7410 	// cond:
   7411 	// result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem)
   7412 	for {
   7413 		x := v.AuxInt
   7414 		sym := v.Aux
   7415 		ptr := v.Args[0]
   7416 		v_1 := v.Args[1]
   7417 		if v_1.Op != OpAMD64ADDQconst {
   7418 			break
   7419 		}
   7420 		c := v_1.AuxInt
   7421 		idx := v_1.Args[0]
   7422 		mem := v.Args[2]
   7423 		v.reset(OpAMD64MOVQstoreconstidx8)
   7424 		v.AuxInt = ValAndOff(x).add(8 * c)
   7425 		v.Aux = sym
   7426 		v.AddArg(ptr)
   7427 		v.AddArg(idx)
   7428 		v.AddArg(mem)
   7429 		return true
   7430 	}
   7431 	return false
   7432 }
   7433 func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value, config *Config) bool {
   7434 	b := v.Block
   7435 	_ = b
   7436 	// match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem)
   7437 	// cond:
   7438 	// result: (MOVQstoreidx8 [c] {sym} ptr idx val mem)
   7439 	for {
   7440 		c := v.AuxInt
   7441 		sym := v.Aux
   7442 		ptr := v.Args[0]
   7443 		v_1 := v.Args[1]
   7444 		if v_1.Op != OpAMD64SHLQconst {
   7445 			break
   7446 		}
   7447 		if v_1.AuxInt != 3 {
   7448 			break
   7449 		}
   7450 		idx := v_1.Args[0]
   7451 		val := v.Args[2]
   7452 		mem := v.Args[3]
   7453 		v.reset(OpAMD64MOVQstoreidx8)
   7454 		v.AuxInt = c
   7455 		v.Aux = sym
   7456 		v.AddArg(ptr)
   7457 		v.AddArg(idx)
   7458 		v.AddArg(val)
   7459 		v.AddArg(mem)
   7460 		return true
   7461 	}
   7462 	// match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
   7463 	// cond:
   7464 	// result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
   7465 	for {
   7466 		c := v.AuxInt
   7467 		sym := v.Aux
   7468 		v_0 := v.Args[0]
   7469 		if v_0.Op != OpAMD64ADDQconst {
   7470 			break
   7471 		}
   7472 		d := v_0.AuxInt
   7473 		ptr := v_0.Args[0]
   7474 		idx := v.Args[1]
   7475 		val := v.Args[2]
   7476 		mem := v.Args[3]
   7477 		v.reset(OpAMD64MOVQstoreidx1)
   7478 		v.AuxInt = c + d
   7479 		v.Aux = sym
   7480 		v.AddArg(ptr)
   7481 		v.AddArg(idx)
   7482 		v.AddArg(val)
   7483 		v.AddArg(mem)
   7484 		return true
   7485 	}
   7486 	// match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
   7487 	// cond:
   7488 	// result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
   7489 	for {
   7490 		c := v.AuxInt
   7491 		sym := v.Aux
   7492 		ptr := v.Args[0]
   7493 		v_1 := v.Args[1]
   7494 		if v_1.Op != OpAMD64ADDQconst {
   7495 			break
   7496 		}
   7497 		d := v_1.AuxInt
   7498 		idx := v_1.Args[0]
   7499 		val := v.Args[2]
   7500 		mem := v.Args[3]
   7501 		v.reset(OpAMD64MOVQstoreidx1)
   7502 		v.AuxInt = c + d
   7503 		v.Aux = sym
   7504 		v.AddArg(ptr)
   7505 		v.AddArg(idx)
   7506 		v.AddArg(val)
   7507 		v.AddArg(mem)
   7508 		return true
   7509 	}
   7510 	return false
   7511 }
   7512 func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value, config *Config) bool {
   7513 	b := v.Block
   7514 	_ = b
   7515 	// match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem)
   7516 	// cond:
   7517 	// result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem)
   7518 	for {
   7519 		c := v.AuxInt
   7520 		sym := v.Aux
   7521 		v_0 := v.Args[0]
   7522 		if v_0.Op != OpAMD64ADDQconst {
   7523 			break
   7524 		}
   7525 		d := v_0.AuxInt
   7526 		ptr := v_0.Args[0]
   7527 		idx := v.Args[1]
   7528 		val := v.Args[2]
   7529 		mem := v.Args[3]
   7530 		v.reset(OpAMD64MOVQstoreidx8)
   7531 		v.AuxInt = c + d
   7532 		v.Aux = sym
   7533 		v.AddArg(ptr)
   7534 		v.AddArg(idx)
   7535 		v.AddArg(val)
   7536 		v.AddArg(mem)
   7537 		return true
   7538 	}
   7539 	// match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)
   7540 	// cond:
   7541 	// result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem)
   7542 	for {
   7543 		c := v.AuxInt
   7544 		sym := v.Aux
   7545 		ptr := v.Args[0]
   7546 		v_1 := v.Args[1]
   7547 		if v_1.Op != OpAMD64ADDQconst {
   7548 			break
   7549 		}
   7550 		d := v_1.AuxInt
   7551 		idx := v_1.Args[0]
   7552 		val := v.Args[2]
   7553 		mem := v.Args[3]
   7554 		v.reset(OpAMD64MOVQstoreidx8)
   7555 		v.AuxInt = c + 8*d
   7556 		v.Aux = sym
   7557 		v.AddArg(ptr)
   7558 		v.AddArg(idx)
   7559 		v.AddArg(val)
   7560 		v.AddArg(mem)
   7561 		return true
   7562 	}
   7563 	return false
   7564 }
   7565 func rewriteValueAMD64_OpAMD64MOVSDload(v *Value, config *Config) bool {
   7566 	b := v.Block
   7567 	_ = b
   7568 	// match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem)
   7569 	// cond: is32Bit(off1+off2)
   7570 	// result: (MOVSDload [off1+off2] {sym} ptr mem)
   7571 	for {
   7572 		off1 := v.AuxInt
   7573 		sym := v.Aux
   7574 		v_0 := v.Args[0]
   7575 		if v_0.Op != OpAMD64ADDQconst {
   7576 			break
   7577 		}
   7578 		off2 := v_0.AuxInt
   7579 		ptr := v_0.Args[0]
   7580 		mem := v.Args[1]
   7581 		if !(is32Bit(off1 + off2)) {
   7582 			break
   7583 		}
   7584 		v.reset(OpAMD64MOVSDload)
   7585 		v.AuxInt = off1 + off2
   7586 		v.Aux = sym
   7587 		v.AddArg(ptr)
   7588 		v.AddArg(mem)
   7589 		return true
   7590 	}
   7591 	// match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
   7592 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   7593 	// result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   7594 	for {
   7595 		off1 := v.AuxInt
   7596 		sym1 := v.Aux
   7597 		v_0 := v.Args[0]
   7598 		if v_0.Op != OpAMD64LEAQ {
   7599 			break
   7600 		}
   7601 		off2 := v_0.AuxInt
   7602 		sym2 := v_0.Aux
   7603 		base := v_0.Args[0]
   7604 		mem := v.Args[1]
   7605 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   7606 			break
   7607 		}
   7608 		v.reset(OpAMD64MOVSDload)
   7609 		v.AuxInt = off1 + off2
   7610 		v.Aux = mergeSym(sym1, sym2)
   7611 		v.AddArg(base)
   7612 		v.AddArg(mem)
   7613 		return true
   7614 	}
   7615 	// match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
   7616 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   7617 	// result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   7618 	for {
   7619 		off1 := v.AuxInt
   7620 		sym1 := v.Aux
   7621 		v_0 := v.Args[0]
   7622 		if v_0.Op != OpAMD64LEAQ1 {
   7623 			break
   7624 		}
   7625 		off2 := v_0.AuxInt
   7626 		sym2 := v_0.Aux
   7627 		ptr := v_0.Args[0]
   7628 		idx := v_0.Args[1]
   7629 		mem := v.Args[1]
   7630 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   7631 			break
   7632 		}
   7633 		v.reset(OpAMD64MOVSDloadidx1)
   7634 		v.AuxInt = off1 + off2
   7635 		v.Aux = mergeSym(sym1, sym2)
   7636 		v.AddArg(ptr)
   7637 		v.AddArg(idx)
   7638 		v.AddArg(mem)
   7639 		return true
   7640 	}
   7641 	// match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem)
   7642 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   7643 	// result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   7644 	for {
   7645 		off1 := v.AuxInt
   7646 		sym1 := v.Aux
   7647 		v_0 := v.Args[0]
   7648 		if v_0.Op != OpAMD64LEAQ8 {
   7649 			break
   7650 		}
   7651 		off2 := v_0.AuxInt
   7652 		sym2 := v_0.Aux
   7653 		ptr := v_0.Args[0]
   7654 		idx := v_0.Args[1]
   7655 		mem := v.Args[1]
   7656 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   7657 			break
   7658 		}
   7659 		v.reset(OpAMD64MOVSDloadidx8)
   7660 		v.AuxInt = off1 + off2
   7661 		v.Aux = mergeSym(sym1, sym2)
   7662 		v.AddArg(ptr)
   7663 		v.AddArg(idx)
   7664 		v.AddArg(mem)
   7665 		return true
   7666 	}
   7667 	// match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem)
   7668 	// cond: ptr.Op != OpSB
   7669 	// result: (MOVSDloadidx1 [off] {sym} ptr idx mem)
   7670 	for {
   7671 		off := v.AuxInt
   7672 		sym := v.Aux
   7673 		v_0 := v.Args[0]
   7674 		if v_0.Op != OpAMD64ADDQ {
   7675 			break
   7676 		}
   7677 		ptr := v_0.Args[0]
   7678 		idx := v_0.Args[1]
   7679 		mem := v.Args[1]
   7680 		if !(ptr.Op != OpSB) {
   7681 			break
   7682 		}
   7683 		v.reset(OpAMD64MOVSDloadidx1)
   7684 		v.AuxInt = off
   7685 		v.Aux = sym
   7686 		v.AddArg(ptr)
   7687 		v.AddArg(idx)
   7688 		v.AddArg(mem)
   7689 		return true
   7690 	}
   7691 	return false
   7692 }
   7693 func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value, config *Config) bool {
   7694 	b := v.Block
   7695 	_ = b
   7696 	// match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem)
   7697 	// cond:
   7698 	// result: (MOVSDloadidx8 [c] {sym} ptr idx mem)
   7699 	for {
   7700 		c := v.AuxInt
   7701 		sym := v.Aux
   7702 		ptr := v.Args[0]
   7703 		v_1 := v.Args[1]
   7704 		if v_1.Op != OpAMD64SHLQconst {
   7705 			break
   7706 		}
   7707 		if v_1.AuxInt != 3 {
   7708 			break
   7709 		}
   7710 		idx := v_1.Args[0]
   7711 		mem := v.Args[2]
   7712 		v.reset(OpAMD64MOVSDloadidx8)
   7713 		v.AuxInt = c
   7714 		v.Aux = sym
   7715 		v.AddArg(ptr)
   7716 		v.AddArg(idx)
   7717 		v.AddArg(mem)
   7718 		return true
   7719 	}
   7720 	// match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
   7721 	// cond:
   7722 	// result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
   7723 	for {
   7724 		c := v.AuxInt
   7725 		sym := v.Aux
   7726 		v_0 := v.Args[0]
   7727 		if v_0.Op != OpAMD64ADDQconst {
   7728 			break
   7729 		}
   7730 		d := v_0.AuxInt
   7731 		ptr := v_0.Args[0]
   7732 		idx := v.Args[1]
   7733 		mem := v.Args[2]
   7734 		v.reset(OpAMD64MOVSDloadidx1)
   7735 		v.AuxInt = c + d
   7736 		v.Aux = sym
   7737 		v.AddArg(ptr)
   7738 		v.AddArg(idx)
   7739 		v.AddArg(mem)
   7740 		return true
   7741 	}
   7742 	// match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
   7743 	// cond:
   7744 	// result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
   7745 	for {
   7746 		c := v.AuxInt
   7747 		sym := v.Aux
   7748 		ptr := v.Args[0]
   7749 		v_1 := v.Args[1]
   7750 		if v_1.Op != OpAMD64ADDQconst {
   7751 			break
   7752 		}
   7753 		d := v_1.AuxInt
   7754 		idx := v_1.Args[0]
   7755 		mem := v.Args[2]
   7756 		v.reset(OpAMD64MOVSDloadidx1)
   7757 		v.AuxInt = c + d
   7758 		v.Aux = sym
   7759 		v.AddArg(ptr)
   7760 		v.AddArg(idx)
   7761 		v.AddArg(mem)
   7762 		return true
   7763 	}
   7764 	return false
   7765 }
   7766 func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value, config *Config) bool {
   7767 	b := v.Block
   7768 	_ = b
   7769 	// match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem)
   7770 	// cond:
   7771 	// result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
   7772 	for {
   7773 		c := v.AuxInt
   7774 		sym := v.Aux
   7775 		v_0 := v.Args[0]
   7776 		if v_0.Op != OpAMD64ADDQconst {
   7777 			break
   7778 		}
   7779 		d := v_0.AuxInt
   7780 		ptr := v_0.Args[0]
   7781 		idx := v.Args[1]
   7782 		mem := v.Args[2]
   7783 		v.reset(OpAMD64MOVSDloadidx8)
   7784 		v.AuxInt = c + d
   7785 		v.Aux = sym
   7786 		v.AddArg(ptr)
   7787 		v.AddArg(idx)
   7788 		v.AddArg(mem)
   7789 		return true
   7790 	}
   7791 	// match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)
   7792 	// cond:
   7793 	// result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
   7794 	for {
   7795 		c := v.AuxInt
   7796 		sym := v.Aux
   7797 		ptr := v.Args[0]
   7798 		v_1 := v.Args[1]
   7799 		if v_1.Op != OpAMD64ADDQconst {
   7800 			break
   7801 		}
   7802 		d := v_1.AuxInt
   7803 		idx := v_1.Args[0]
   7804 		mem := v.Args[2]
   7805 		v.reset(OpAMD64MOVSDloadidx8)
   7806 		v.AuxInt = c + 8*d
   7807 		v.Aux = sym
   7808 		v.AddArg(ptr)
   7809 		v.AddArg(idx)
   7810 		v.AddArg(mem)
   7811 		return true
   7812 	}
   7813 	return false
   7814 }
   7815 func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value, config *Config) bool {
   7816 	b := v.Block
   7817 	_ = b
   7818 	// match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
   7819 	// cond: is32Bit(off1+off2)
   7820 	// result: (MOVSDstore [off1+off2] {sym} ptr val mem)
   7821 	for {
   7822 		off1 := v.AuxInt
   7823 		sym := v.Aux
   7824 		v_0 := v.Args[0]
   7825 		if v_0.Op != OpAMD64ADDQconst {
   7826 			break
   7827 		}
   7828 		off2 := v_0.AuxInt
   7829 		ptr := v_0.Args[0]
   7830 		val := v.Args[1]
   7831 		mem := v.Args[2]
   7832 		if !(is32Bit(off1 + off2)) {
   7833 			break
   7834 		}
   7835 		v.reset(OpAMD64MOVSDstore)
   7836 		v.AuxInt = off1 + off2
   7837 		v.Aux = sym
   7838 		v.AddArg(ptr)
   7839 		v.AddArg(val)
   7840 		v.AddArg(mem)
   7841 		return true
   7842 	}
   7843 	// match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
   7844 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   7845 	// result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   7846 	for {
   7847 		off1 := v.AuxInt
   7848 		sym1 := v.Aux
   7849 		v_0 := v.Args[0]
   7850 		if v_0.Op != OpAMD64LEAQ {
   7851 			break
   7852 		}
   7853 		off2 := v_0.AuxInt
   7854 		sym2 := v_0.Aux
   7855 		base := v_0.Args[0]
   7856 		val := v.Args[1]
   7857 		mem := v.Args[2]
   7858 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   7859 			break
   7860 		}
   7861 		v.reset(OpAMD64MOVSDstore)
   7862 		v.AuxInt = off1 + off2
   7863 		v.Aux = mergeSym(sym1, sym2)
   7864 		v.AddArg(base)
   7865 		v.AddArg(val)
   7866 		v.AddArg(mem)
   7867 		return true
   7868 	}
   7869 	// match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
   7870 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   7871 	// result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   7872 	for {
   7873 		off1 := v.AuxInt
   7874 		sym1 := v.Aux
   7875 		v_0 := v.Args[0]
   7876 		if v_0.Op != OpAMD64LEAQ1 {
   7877 			break
   7878 		}
   7879 		off2 := v_0.AuxInt
   7880 		sym2 := v_0.Aux
   7881 		ptr := v_0.Args[0]
   7882 		idx := v_0.Args[1]
   7883 		val := v.Args[1]
   7884 		mem := v.Args[2]
   7885 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   7886 			break
   7887 		}
   7888 		v.reset(OpAMD64MOVSDstoreidx1)
   7889 		v.AuxInt = off1 + off2
   7890 		v.Aux = mergeSym(sym1, sym2)
   7891 		v.AddArg(ptr)
   7892 		v.AddArg(idx)
   7893 		v.AddArg(val)
   7894 		v.AddArg(mem)
   7895 		return true
   7896 	}
   7897 	// match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem)
   7898 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   7899 	// result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   7900 	for {
   7901 		off1 := v.AuxInt
   7902 		sym1 := v.Aux
   7903 		v_0 := v.Args[0]
   7904 		if v_0.Op != OpAMD64LEAQ8 {
   7905 			break
   7906 		}
   7907 		off2 := v_0.AuxInt
   7908 		sym2 := v_0.Aux
   7909 		ptr := v_0.Args[0]
   7910 		idx := v_0.Args[1]
   7911 		val := v.Args[1]
   7912 		mem := v.Args[2]
   7913 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   7914 			break
   7915 		}
   7916 		v.reset(OpAMD64MOVSDstoreidx8)
   7917 		v.AuxInt = off1 + off2
   7918 		v.Aux = mergeSym(sym1, sym2)
   7919 		v.AddArg(ptr)
   7920 		v.AddArg(idx)
   7921 		v.AddArg(val)
   7922 		v.AddArg(mem)
   7923 		return true
   7924 	}
   7925 	// match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem)
   7926 	// cond: ptr.Op != OpSB
   7927 	// result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem)
   7928 	for {
   7929 		off := v.AuxInt
   7930 		sym := v.Aux
   7931 		v_0 := v.Args[0]
   7932 		if v_0.Op != OpAMD64ADDQ {
   7933 			break
   7934 		}
   7935 		ptr := v_0.Args[0]
   7936 		idx := v_0.Args[1]
   7937 		val := v.Args[1]
   7938 		mem := v.Args[2]
   7939 		if !(ptr.Op != OpSB) {
   7940 			break
   7941 		}
   7942 		v.reset(OpAMD64MOVSDstoreidx1)
   7943 		v.AuxInt = off
   7944 		v.Aux = sym
   7945 		v.AddArg(ptr)
   7946 		v.AddArg(idx)
   7947 		v.AddArg(val)
   7948 		v.AddArg(mem)
   7949 		return true
   7950 	}
   7951 	return false
   7952 }
   7953 func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value, config *Config) bool {
   7954 	b := v.Block
   7955 	_ = b
   7956 	// match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem)
   7957 	// cond:
   7958 	// result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem)
   7959 	for {
   7960 		c := v.AuxInt
   7961 		sym := v.Aux
   7962 		ptr := v.Args[0]
   7963 		v_1 := v.Args[1]
   7964 		if v_1.Op != OpAMD64SHLQconst {
   7965 			break
   7966 		}
   7967 		if v_1.AuxInt != 3 {
   7968 			break
   7969 		}
   7970 		idx := v_1.Args[0]
   7971 		val := v.Args[2]
   7972 		mem := v.Args[3]
   7973 		v.reset(OpAMD64MOVSDstoreidx8)
   7974 		v.AuxInt = c
   7975 		v.Aux = sym
   7976 		v.AddArg(ptr)
   7977 		v.AddArg(idx)
   7978 		v.AddArg(val)
   7979 		v.AddArg(mem)
   7980 		return true
   7981 	}
   7982 	// match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
   7983 	// cond:
   7984 	// result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
   7985 	for {
   7986 		c := v.AuxInt
   7987 		sym := v.Aux
   7988 		v_0 := v.Args[0]
   7989 		if v_0.Op != OpAMD64ADDQconst {
   7990 			break
   7991 		}
   7992 		d := v_0.AuxInt
   7993 		ptr := v_0.Args[0]
   7994 		idx := v.Args[1]
   7995 		val := v.Args[2]
   7996 		mem := v.Args[3]
   7997 		v.reset(OpAMD64MOVSDstoreidx1)
   7998 		v.AuxInt = c + d
   7999 		v.Aux = sym
   8000 		v.AddArg(ptr)
   8001 		v.AddArg(idx)
   8002 		v.AddArg(val)
   8003 		v.AddArg(mem)
   8004 		return true
   8005 	}
   8006 	// match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
   8007 	// cond:
   8008 	// result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
   8009 	for {
   8010 		c := v.AuxInt
   8011 		sym := v.Aux
   8012 		ptr := v.Args[0]
   8013 		v_1 := v.Args[1]
   8014 		if v_1.Op != OpAMD64ADDQconst {
   8015 			break
   8016 		}
   8017 		d := v_1.AuxInt
   8018 		idx := v_1.Args[0]
   8019 		val := v.Args[2]
   8020 		mem := v.Args[3]
   8021 		v.reset(OpAMD64MOVSDstoreidx1)
   8022 		v.AuxInt = c + d
   8023 		v.Aux = sym
   8024 		v.AddArg(ptr)
   8025 		v.AddArg(idx)
   8026 		v.AddArg(val)
   8027 		v.AddArg(mem)
   8028 		return true
   8029 	}
   8030 	return false
   8031 }
   8032 func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value, config *Config) bool {
   8033 	b := v.Block
   8034 	_ = b
   8035 	// match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem)
   8036 	// cond:
   8037 	// result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
   8038 	for {
   8039 		c := v.AuxInt
   8040 		sym := v.Aux
   8041 		v_0 := v.Args[0]
   8042 		if v_0.Op != OpAMD64ADDQconst {
   8043 			break
   8044 		}
   8045 		d := v_0.AuxInt
   8046 		ptr := v_0.Args[0]
   8047 		idx := v.Args[1]
   8048 		val := v.Args[2]
   8049 		mem := v.Args[3]
   8050 		v.reset(OpAMD64MOVSDstoreidx8)
   8051 		v.AuxInt = c + d
   8052 		v.Aux = sym
   8053 		v.AddArg(ptr)
   8054 		v.AddArg(idx)
   8055 		v.AddArg(val)
   8056 		v.AddArg(mem)
   8057 		return true
   8058 	}
   8059 	// match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)
   8060 	// cond:
   8061 	// result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
   8062 	for {
   8063 		c := v.AuxInt
   8064 		sym := v.Aux
   8065 		ptr := v.Args[0]
   8066 		v_1 := v.Args[1]
   8067 		if v_1.Op != OpAMD64ADDQconst {
   8068 			break
   8069 		}
   8070 		d := v_1.AuxInt
   8071 		idx := v_1.Args[0]
   8072 		val := v.Args[2]
   8073 		mem := v.Args[3]
   8074 		v.reset(OpAMD64MOVSDstoreidx8)
   8075 		v.AuxInt = c + 8*d
   8076 		v.Aux = sym
   8077 		v.AddArg(ptr)
   8078 		v.AddArg(idx)
   8079 		v.AddArg(val)
   8080 		v.AddArg(mem)
   8081 		return true
   8082 	}
   8083 	return false
   8084 }
   8085 func rewriteValueAMD64_OpAMD64MOVSSload(v *Value, config *Config) bool {
   8086 	b := v.Block
   8087 	_ = b
   8088 	// match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem)
   8089 	// cond: is32Bit(off1+off2)
   8090 	// result: (MOVSSload [off1+off2] {sym} ptr mem)
   8091 	for {
   8092 		off1 := v.AuxInt
   8093 		sym := v.Aux
   8094 		v_0 := v.Args[0]
   8095 		if v_0.Op != OpAMD64ADDQconst {
   8096 			break
   8097 		}
   8098 		off2 := v_0.AuxInt
   8099 		ptr := v_0.Args[0]
   8100 		mem := v.Args[1]
   8101 		if !(is32Bit(off1 + off2)) {
   8102 			break
   8103 		}
   8104 		v.reset(OpAMD64MOVSSload)
   8105 		v.AuxInt = off1 + off2
   8106 		v.Aux = sym
   8107 		v.AddArg(ptr)
   8108 		v.AddArg(mem)
   8109 		return true
   8110 	}
   8111 	// match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
   8112 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   8113 	// result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   8114 	for {
   8115 		off1 := v.AuxInt
   8116 		sym1 := v.Aux
   8117 		v_0 := v.Args[0]
   8118 		if v_0.Op != OpAMD64LEAQ {
   8119 			break
   8120 		}
   8121 		off2 := v_0.AuxInt
   8122 		sym2 := v_0.Aux
   8123 		base := v_0.Args[0]
   8124 		mem := v.Args[1]
   8125 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   8126 			break
   8127 		}
   8128 		v.reset(OpAMD64MOVSSload)
   8129 		v.AuxInt = off1 + off2
   8130 		v.Aux = mergeSym(sym1, sym2)
   8131 		v.AddArg(base)
   8132 		v.AddArg(mem)
   8133 		return true
   8134 	}
   8135 	// match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
   8136 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   8137 	// result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   8138 	for {
   8139 		off1 := v.AuxInt
   8140 		sym1 := v.Aux
   8141 		v_0 := v.Args[0]
   8142 		if v_0.Op != OpAMD64LEAQ1 {
   8143 			break
   8144 		}
   8145 		off2 := v_0.AuxInt
   8146 		sym2 := v_0.Aux
   8147 		ptr := v_0.Args[0]
   8148 		idx := v_0.Args[1]
   8149 		mem := v.Args[1]
   8150 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   8151 			break
   8152 		}
   8153 		v.reset(OpAMD64MOVSSloadidx1)
   8154 		v.AuxInt = off1 + off2
   8155 		v.Aux = mergeSym(sym1, sym2)
   8156 		v.AddArg(ptr)
   8157 		v.AddArg(idx)
   8158 		v.AddArg(mem)
   8159 		return true
   8160 	}
   8161 	// match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem)
   8162 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   8163 	// result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   8164 	for {
   8165 		off1 := v.AuxInt
   8166 		sym1 := v.Aux
   8167 		v_0 := v.Args[0]
   8168 		if v_0.Op != OpAMD64LEAQ4 {
   8169 			break
   8170 		}
   8171 		off2 := v_0.AuxInt
   8172 		sym2 := v_0.Aux
   8173 		ptr := v_0.Args[0]
   8174 		idx := v_0.Args[1]
   8175 		mem := v.Args[1]
   8176 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   8177 			break
   8178 		}
   8179 		v.reset(OpAMD64MOVSSloadidx4)
   8180 		v.AuxInt = off1 + off2
   8181 		v.Aux = mergeSym(sym1, sym2)
   8182 		v.AddArg(ptr)
   8183 		v.AddArg(idx)
   8184 		v.AddArg(mem)
   8185 		return true
   8186 	}
   8187 	// match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem)
   8188 	// cond: ptr.Op != OpSB
   8189 	// result: (MOVSSloadidx1 [off] {sym} ptr idx mem)
   8190 	for {
   8191 		off := v.AuxInt
   8192 		sym := v.Aux
   8193 		v_0 := v.Args[0]
   8194 		if v_0.Op != OpAMD64ADDQ {
   8195 			break
   8196 		}
   8197 		ptr := v_0.Args[0]
   8198 		idx := v_0.Args[1]
   8199 		mem := v.Args[1]
   8200 		if !(ptr.Op != OpSB) {
   8201 			break
   8202 		}
   8203 		v.reset(OpAMD64MOVSSloadidx1)
   8204 		v.AuxInt = off
   8205 		v.Aux = sym
   8206 		v.AddArg(ptr)
   8207 		v.AddArg(idx)
   8208 		v.AddArg(mem)
   8209 		return true
   8210 	}
   8211 	return false
   8212 }
   8213 func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value, config *Config) bool {
   8214 	b := v.Block
   8215 	_ = b
   8216 	// match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem)
   8217 	// cond:
   8218 	// result: (MOVSSloadidx4 [c] {sym} ptr idx mem)
   8219 	for {
   8220 		c := v.AuxInt
   8221 		sym := v.Aux
   8222 		ptr := v.Args[0]
   8223 		v_1 := v.Args[1]
   8224 		if v_1.Op != OpAMD64SHLQconst {
   8225 			break
   8226 		}
   8227 		if v_1.AuxInt != 2 {
   8228 			break
   8229 		}
   8230 		idx := v_1.Args[0]
   8231 		mem := v.Args[2]
   8232 		v.reset(OpAMD64MOVSSloadidx4)
   8233 		v.AuxInt = c
   8234 		v.Aux = sym
   8235 		v.AddArg(ptr)
   8236 		v.AddArg(idx)
   8237 		v.AddArg(mem)
   8238 		return true
   8239 	}
   8240 	// match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
   8241 	// cond:
   8242 	// result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
   8243 	for {
   8244 		c := v.AuxInt
   8245 		sym := v.Aux
   8246 		v_0 := v.Args[0]
   8247 		if v_0.Op != OpAMD64ADDQconst {
   8248 			break
   8249 		}
   8250 		d := v_0.AuxInt
   8251 		ptr := v_0.Args[0]
   8252 		idx := v.Args[1]
   8253 		mem := v.Args[2]
   8254 		v.reset(OpAMD64MOVSSloadidx1)
   8255 		v.AuxInt = c + d
   8256 		v.Aux = sym
   8257 		v.AddArg(ptr)
   8258 		v.AddArg(idx)
   8259 		v.AddArg(mem)
   8260 		return true
   8261 	}
   8262 	// match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
   8263 	// cond:
   8264 	// result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
   8265 	for {
   8266 		c := v.AuxInt
   8267 		sym := v.Aux
   8268 		ptr := v.Args[0]
   8269 		v_1 := v.Args[1]
   8270 		if v_1.Op != OpAMD64ADDQconst {
   8271 			break
   8272 		}
   8273 		d := v_1.AuxInt
   8274 		idx := v_1.Args[0]
   8275 		mem := v.Args[2]
   8276 		v.reset(OpAMD64MOVSSloadidx1)
   8277 		v.AuxInt = c + d
   8278 		v.Aux = sym
   8279 		v.AddArg(ptr)
   8280 		v.AddArg(idx)
   8281 		v.AddArg(mem)
   8282 		return true
   8283 	}
   8284 	return false
   8285 }
   8286 func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value, config *Config) bool {
   8287 	b := v.Block
   8288 	_ = b
   8289 	// match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem)
   8290 	// cond:
   8291 	// result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
   8292 	for {
   8293 		c := v.AuxInt
   8294 		sym := v.Aux
   8295 		v_0 := v.Args[0]
   8296 		if v_0.Op != OpAMD64ADDQconst {
   8297 			break
   8298 		}
   8299 		d := v_0.AuxInt
   8300 		ptr := v_0.Args[0]
   8301 		idx := v.Args[1]
   8302 		mem := v.Args[2]
   8303 		v.reset(OpAMD64MOVSSloadidx4)
   8304 		v.AuxInt = c + d
   8305 		v.Aux = sym
   8306 		v.AddArg(ptr)
   8307 		v.AddArg(idx)
   8308 		v.AddArg(mem)
   8309 		return true
   8310 	}
   8311 	// match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem)
   8312 	// cond:
   8313 	// result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
   8314 	for {
   8315 		c := v.AuxInt
   8316 		sym := v.Aux
   8317 		ptr := v.Args[0]
   8318 		v_1 := v.Args[1]
   8319 		if v_1.Op != OpAMD64ADDQconst {
   8320 			break
   8321 		}
   8322 		d := v_1.AuxInt
   8323 		idx := v_1.Args[0]
   8324 		mem := v.Args[2]
   8325 		v.reset(OpAMD64MOVSSloadidx4)
   8326 		v.AuxInt = c + 4*d
   8327 		v.Aux = sym
   8328 		v.AddArg(ptr)
   8329 		v.AddArg(idx)
   8330 		v.AddArg(mem)
   8331 		return true
   8332 	}
   8333 	return false
   8334 }
   8335 func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value, config *Config) bool {
   8336 	b := v.Block
   8337 	_ = b
   8338 	// match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
   8339 	// cond: is32Bit(off1+off2)
   8340 	// result: (MOVSSstore [off1+off2] {sym} ptr val mem)
   8341 	for {
   8342 		off1 := v.AuxInt
   8343 		sym := v.Aux
   8344 		v_0 := v.Args[0]
   8345 		if v_0.Op != OpAMD64ADDQconst {
   8346 			break
   8347 		}
   8348 		off2 := v_0.AuxInt
   8349 		ptr := v_0.Args[0]
   8350 		val := v.Args[1]
   8351 		mem := v.Args[2]
   8352 		if !(is32Bit(off1 + off2)) {
   8353 			break
   8354 		}
   8355 		v.reset(OpAMD64MOVSSstore)
   8356 		v.AuxInt = off1 + off2
   8357 		v.Aux = sym
   8358 		v.AddArg(ptr)
   8359 		v.AddArg(val)
   8360 		v.AddArg(mem)
   8361 		return true
   8362 	}
   8363 	// match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
   8364 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   8365 	// result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   8366 	for {
   8367 		off1 := v.AuxInt
   8368 		sym1 := v.Aux
   8369 		v_0 := v.Args[0]
   8370 		if v_0.Op != OpAMD64LEAQ {
   8371 			break
   8372 		}
   8373 		off2 := v_0.AuxInt
   8374 		sym2 := v_0.Aux
   8375 		base := v_0.Args[0]
   8376 		val := v.Args[1]
   8377 		mem := v.Args[2]
   8378 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   8379 			break
   8380 		}
   8381 		v.reset(OpAMD64MOVSSstore)
   8382 		v.AuxInt = off1 + off2
   8383 		v.Aux = mergeSym(sym1, sym2)
   8384 		v.AddArg(base)
   8385 		v.AddArg(val)
   8386 		v.AddArg(mem)
   8387 		return true
   8388 	}
   8389 	// match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
   8390 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   8391 	// result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   8392 	for {
   8393 		off1 := v.AuxInt
   8394 		sym1 := v.Aux
   8395 		v_0 := v.Args[0]
   8396 		if v_0.Op != OpAMD64LEAQ1 {
   8397 			break
   8398 		}
   8399 		off2 := v_0.AuxInt
   8400 		sym2 := v_0.Aux
   8401 		ptr := v_0.Args[0]
   8402 		idx := v_0.Args[1]
   8403 		val := v.Args[1]
   8404 		mem := v.Args[2]
   8405 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   8406 			break
   8407 		}
   8408 		v.reset(OpAMD64MOVSSstoreidx1)
   8409 		v.AuxInt = off1 + off2
   8410 		v.Aux = mergeSym(sym1, sym2)
   8411 		v.AddArg(ptr)
   8412 		v.AddArg(idx)
   8413 		v.AddArg(val)
   8414 		v.AddArg(mem)
   8415 		return true
   8416 	}
   8417 	// match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem)
   8418 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   8419 	// result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   8420 	for {
   8421 		off1 := v.AuxInt
   8422 		sym1 := v.Aux
   8423 		v_0 := v.Args[0]
   8424 		if v_0.Op != OpAMD64LEAQ4 {
   8425 			break
   8426 		}
   8427 		off2 := v_0.AuxInt
   8428 		sym2 := v_0.Aux
   8429 		ptr := v_0.Args[0]
   8430 		idx := v_0.Args[1]
   8431 		val := v.Args[1]
   8432 		mem := v.Args[2]
   8433 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   8434 			break
   8435 		}
   8436 		v.reset(OpAMD64MOVSSstoreidx4)
   8437 		v.AuxInt = off1 + off2
   8438 		v.Aux = mergeSym(sym1, sym2)
   8439 		v.AddArg(ptr)
   8440 		v.AddArg(idx)
   8441 		v.AddArg(val)
   8442 		v.AddArg(mem)
   8443 		return true
   8444 	}
   8445 	// match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem)
   8446 	// cond: ptr.Op != OpSB
   8447 	// result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem)
   8448 	for {
   8449 		off := v.AuxInt
   8450 		sym := v.Aux
   8451 		v_0 := v.Args[0]
   8452 		if v_0.Op != OpAMD64ADDQ {
   8453 			break
   8454 		}
   8455 		ptr := v_0.Args[0]
   8456 		idx := v_0.Args[1]
   8457 		val := v.Args[1]
   8458 		mem := v.Args[2]
   8459 		if !(ptr.Op != OpSB) {
   8460 			break
   8461 		}
   8462 		v.reset(OpAMD64MOVSSstoreidx1)
   8463 		v.AuxInt = off
   8464 		v.Aux = sym
   8465 		v.AddArg(ptr)
   8466 		v.AddArg(idx)
   8467 		v.AddArg(val)
   8468 		v.AddArg(mem)
   8469 		return true
   8470 	}
   8471 	return false
   8472 }
   8473 func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value, config *Config) bool {
   8474 	b := v.Block
   8475 	_ = b
   8476 	// match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem)
   8477 	// cond:
   8478 	// result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem)
   8479 	for {
   8480 		c := v.AuxInt
   8481 		sym := v.Aux
   8482 		ptr := v.Args[0]
   8483 		v_1 := v.Args[1]
   8484 		if v_1.Op != OpAMD64SHLQconst {
   8485 			break
   8486 		}
   8487 		if v_1.AuxInt != 2 {
   8488 			break
   8489 		}
   8490 		idx := v_1.Args[0]
   8491 		val := v.Args[2]
   8492 		mem := v.Args[3]
   8493 		v.reset(OpAMD64MOVSSstoreidx4)
   8494 		v.AuxInt = c
   8495 		v.Aux = sym
   8496 		v.AddArg(ptr)
   8497 		v.AddArg(idx)
   8498 		v.AddArg(val)
   8499 		v.AddArg(mem)
   8500 		return true
   8501 	}
   8502 	// match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
   8503 	// cond:
   8504 	// result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
   8505 	for {
   8506 		c := v.AuxInt
   8507 		sym := v.Aux
   8508 		v_0 := v.Args[0]
   8509 		if v_0.Op != OpAMD64ADDQconst {
   8510 			break
   8511 		}
   8512 		d := v_0.AuxInt
   8513 		ptr := v_0.Args[0]
   8514 		idx := v.Args[1]
   8515 		val := v.Args[2]
   8516 		mem := v.Args[3]
   8517 		v.reset(OpAMD64MOVSSstoreidx1)
   8518 		v.AuxInt = c + d
   8519 		v.Aux = sym
   8520 		v.AddArg(ptr)
   8521 		v.AddArg(idx)
   8522 		v.AddArg(val)
   8523 		v.AddArg(mem)
   8524 		return true
   8525 	}
   8526 	// match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
   8527 	// cond:
   8528 	// result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
   8529 	for {
   8530 		c := v.AuxInt
   8531 		sym := v.Aux
   8532 		ptr := v.Args[0]
   8533 		v_1 := v.Args[1]
   8534 		if v_1.Op != OpAMD64ADDQconst {
   8535 			break
   8536 		}
   8537 		d := v_1.AuxInt
   8538 		idx := v_1.Args[0]
   8539 		val := v.Args[2]
   8540 		mem := v.Args[3]
   8541 		v.reset(OpAMD64MOVSSstoreidx1)
   8542 		v.AuxInt = c + d
   8543 		v.Aux = sym
   8544 		v.AddArg(ptr)
   8545 		v.AddArg(idx)
   8546 		v.AddArg(val)
   8547 		v.AddArg(mem)
   8548 		return true
   8549 	}
   8550 	return false
   8551 }
   8552 func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value, config *Config) bool {
   8553 	b := v.Block
   8554 	_ = b
   8555 	// match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem)
   8556 	// cond:
   8557 	// result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
   8558 	for {
   8559 		c := v.AuxInt
   8560 		sym := v.Aux
   8561 		v_0 := v.Args[0]
   8562 		if v_0.Op != OpAMD64ADDQconst {
   8563 			break
   8564 		}
   8565 		d := v_0.AuxInt
   8566 		ptr := v_0.Args[0]
   8567 		idx := v.Args[1]
   8568 		val := v.Args[2]
   8569 		mem := v.Args[3]
   8570 		v.reset(OpAMD64MOVSSstoreidx4)
   8571 		v.AuxInt = c + d
   8572 		v.Aux = sym
   8573 		v.AddArg(ptr)
   8574 		v.AddArg(idx)
   8575 		v.AddArg(val)
   8576 		v.AddArg(mem)
   8577 		return true
   8578 	}
   8579 	// match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem)
   8580 	// cond:
   8581 	// result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
   8582 	for {
   8583 		c := v.AuxInt
   8584 		sym := v.Aux
   8585 		ptr := v.Args[0]
   8586 		v_1 := v.Args[1]
   8587 		if v_1.Op != OpAMD64ADDQconst {
   8588 			break
   8589 		}
   8590 		d := v_1.AuxInt
   8591 		idx := v_1.Args[0]
   8592 		val := v.Args[2]
   8593 		mem := v.Args[3]
   8594 		v.reset(OpAMD64MOVSSstoreidx4)
   8595 		v.AuxInt = c + 4*d
   8596 		v.Aux = sym
   8597 		v.AddArg(ptr)
   8598 		v.AddArg(idx)
   8599 		v.AddArg(val)
   8600 		v.AddArg(mem)
   8601 		return true
   8602 	}
   8603 	return false
   8604 }
   8605 func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value, config *Config) bool {
   8606 	b := v.Block
   8607 	_ = b
   8608 	// match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem))
   8609 	// cond: x.Uses == 1 && clobber(x)
   8610 	// result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
   8611 	for {
   8612 		x := v.Args[0]
   8613 		if x.Op != OpAMD64MOVWload {
   8614 			break
   8615 		}
   8616 		off := x.AuxInt
   8617 		sym := x.Aux
   8618 		ptr := x.Args[0]
   8619 		mem := x.Args[1]
   8620 		if !(x.Uses == 1 && clobber(x)) {
   8621 			break
   8622 		}
   8623 		b = x.Block
   8624 		v0 := b.NewValue0(v.Line, OpAMD64MOVWQSXload, v.Type)
   8625 		v.reset(OpCopy)
   8626 		v.AddArg(v0)
   8627 		v0.AuxInt = off
   8628 		v0.Aux = sym
   8629 		v0.AddArg(ptr)
   8630 		v0.AddArg(mem)
   8631 		return true
   8632 	}
   8633 	// match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem))
   8634 	// cond: x.Uses == 1 && clobber(x)
   8635 	// result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
   8636 	for {
   8637 		x := v.Args[0]
   8638 		if x.Op != OpAMD64MOVLload {
   8639 			break
   8640 		}
   8641 		off := x.AuxInt
   8642 		sym := x.Aux
   8643 		ptr := x.Args[0]
   8644 		mem := x.Args[1]
   8645 		if !(x.Uses == 1 && clobber(x)) {
   8646 			break
   8647 		}
   8648 		b = x.Block
   8649 		v0 := b.NewValue0(v.Line, OpAMD64MOVWQSXload, v.Type)
   8650 		v.reset(OpCopy)
   8651 		v.AddArg(v0)
   8652 		v0.AuxInt = off
   8653 		v0.Aux = sym
   8654 		v0.AddArg(ptr)
   8655 		v0.AddArg(mem)
   8656 		return true
   8657 	}
   8658 	// match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem))
   8659 	// cond: x.Uses == 1 && clobber(x)
   8660 	// result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
   8661 	for {
   8662 		x := v.Args[0]
   8663 		if x.Op != OpAMD64MOVQload {
   8664 			break
   8665 		}
   8666 		off := x.AuxInt
   8667 		sym := x.Aux
   8668 		ptr := x.Args[0]
   8669 		mem := x.Args[1]
   8670 		if !(x.Uses == 1 && clobber(x)) {
   8671 			break
   8672 		}
   8673 		b = x.Block
   8674 		v0 := b.NewValue0(v.Line, OpAMD64MOVWQSXload, v.Type)
   8675 		v.reset(OpCopy)
   8676 		v.AddArg(v0)
   8677 		v0.AuxInt = off
   8678 		v0.Aux = sym
   8679 		v0.AddArg(ptr)
   8680 		v0.AddArg(mem)
   8681 		return true
   8682 	}
   8683 	// match: (MOVWQSX (ANDLconst [c] x))
   8684 	// cond: c & 0x8000 == 0
   8685 	// result: (ANDLconst [c & 0x7fff] x)
   8686 	for {
   8687 		v_0 := v.Args[0]
   8688 		if v_0.Op != OpAMD64ANDLconst {
   8689 			break
   8690 		}
   8691 		c := v_0.AuxInt
   8692 		x := v_0.Args[0]
   8693 		if !(c&0x8000 == 0) {
   8694 			break
   8695 		}
   8696 		v.reset(OpAMD64ANDLconst)
   8697 		v.AuxInt = c & 0x7fff
   8698 		v.AddArg(x)
   8699 		return true
   8700 	}
   8701 	return false
   8702 }
   8703 func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value, config *Config) bool {
   8704 	b := v.Block
   8705 	_ = b
   8706 	// match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
   8707 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   8708 	// result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   8709 	for {
   8710 		off1 := v.AuxInt
   8711 		sym1 := v.Aux
   8712 		v_0 := v.Args[0]
   8713 		if v_0.Op != OpAMD64LEAQ {
   8714 			break
   8715 		}
   8716 		off2 := v_0.AuxInt
   8717 		sym2 := v_0.Aux
   8718 		base := v_0.Args[0]
   8719 		mem := v.Args[1]
   8720 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   8721 			break
   8722 		}
   8723 		v.reset(OpAMD64MOVWQSXload)
   8724 		v.AuxInt = off1 + off2
   8725 		v.Aux = mergeSym(sym1, sym2)
   8726 		v.AddArg(base)
   8727 		v.AddArg(mem)
   8728 		return true
   8729 	}
   8730 	return false
   8731 }
   8732 func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value, config *Config) bool {
   8733 	b := v.Block
   8734 	_ = b
   8735 	// match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem))
   8736 	// cond: x.Uses == 1 && clobber(x)
   8737 	// result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
   8738 	for {
   8739 		x := v.Args[0]
   8740 		if x.Op != OpAMD64MOVWload {
   8741 			break
   8742 		}
   8743 		off := x.AuxInt
   8744 		sym := x.Aux
   8745 		ptr := x.Args[0]
   8746 		mem := x.Args[1]
   8747 		if !(x.Uses == 1 && clobber(x)) {
   8748 			break
   8749 		}
   8750 		b = x.Block
   8751 		v0 := b.NewValue0(v.Line, OpAMD64MOVWload, v.Type)
   8752 		v.reset(OpCopy)
   8753 		v.AddArg(v0)
   8754 		v0.AuxInt = off
   8755 		v0.Aux = sym
   8756 		v0.AddArg(ptr)
   8757 		v0.AddArg(mem)
   8758 		return true
   8759 	}
   8760 	// match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem))
   8761 	// cond: x.Uses == 1 && clobber(x)
   8762 	// result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
   8763 	for {
   8764 		x := v.Args[0]
   8765 		if x.Op != OpAMD64MOVLload {
   8766 			break
   8767 		}
   8768 		off := x.AuxInt
   8769 		sym := x.Aux
   8770 		ptr := x.Args[0]
   8771 		mem := x.Args[1]
   8772 		if !(x.Uses == 1 && clobber(x)) {
   8773 			break
   8774 		}
   8775 		b = x.Block
   8776 		v0 := b.NewValue0(v.Line, OpAMD64MOVWload, v.Type)
   8777 		v.reset(OpCopy)
   8778 		v.AddArg(v0)
   8779 		v0.AuxInt = off
   8780 		v0.Aux = sym
   8781 		v0.AddArg(ptr)
   8782 		v0.AddArg(mem)
   8783 		return true
   8784 	}
   8785 	// match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem))
   8786 	// cond: x.Uses == 1 && clobber(x)
   8787 	// result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
   8788 	for {
   8789 		x := v.Args[0]
   8790 		if x.Op != OpAMD64MOVQload {
   8791 			break
   8792 		}
   8793 		off := x.AuxInt
   8794 		sym := x.Aux
   8795 		ptr := x.Args[0]
   8796 		mem := x.Args[1]
   8797 		if !(x.Uses == 1 && clobber(x)) {
   8798 			break
   8799 		}
   8800 		b = x.Block
   8801 		v0 := b.NewValue0(v.Line, OpAMD64MOVWload, v.Type)
   8802 		v.reset(OpCopy)
   8803 		v.AddArg(v0)
   8804 		v0.AuxInt = off
   8805 		v0.Aux = sym
   8806 		v0.AddArg(ptr)
   8807 		v0.AddArg(mem)
   8808 		return true
   8809 	}
   8810 	// match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem))
   8811 	// cond: x.Uses == 1 && clobber(x)
   8812 	// result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem)
   8813 	for {
   8814 		x := v.Args[0]
   8815 		if x.Op != OpAMD64MOVWloadidx1 {
   8816 			break
   8817 		}
   8818 		off := x.AuxInt
   8819 		sym := x.Aux
   8820 		ptr := x.Args[0]
   8821 		idx := x.Args[1]
   8822 		mem := x.Args[2]
   8823 		if !(x.Uses == 1 && clobber(x)) {
   8824 			break
   8825 		}
   8826 		b = x.Block
   8827 		v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type)
   8828 		v.reset(OpCopy)
   8829 		v.AddArg(v0)
   8830 		v0.AuxInt = off
   8831 		v0.Aux = sym
   8832 		v0.AddArg(ptr)
   8833 		v0.AddArg(idx)
   8834 		v0.AddArg(mem)
   8835 		return true
   8836 	}
   8837 	// match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem))
   8838 	// cond: x.Uses == 1 && clobber(x)
   8839 	// result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem)
   8840 	for {
   8841 		x := v.Args[0]
   8842 		if x.Op != OpAMD64MOVWloadidx2 {
   8843 			break
   8844 		}
   8845 		off := x.AuxInt
   8846 		sym := x.Aux
   8847 		ptr := x.Args[0]
   8848 		idx := x.Args[1]
   8849 		mem := x.Args[2]
   8850 		if !(x.Uses == 1 && clobber(x)) {
   8851 			break
   8852 		}
   8853 		b = x.Block
   8854 		v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx2, v.Type)
   8855 		v.reset(OpCopy)
   8856 		v.AddArg(v0)
   8857 		v0.AuxInt = off
   8858 		v0.Aux = sym
   8859 		v0.AddArg(ptr)
   8860 		v0.AddArg(idx)
   8861 		v0.AddArg(mem)
   8862 		return true
   8863 	}
   8864 	// match: (MOVWQZX (ANDLconst [c] x))
   8865 	// cond:
   8866 	// result: (ANDLconst [c & 0xffff] x)
   8867 	for {
   8868 		v_0 := v.Args[0]
   8869 		if v_0.Op != OpAMD64ANDLconst {
   8870 			break
   8871 		}
   8872 		c := v_0.AuxInt
   8873 		x := v_0.Args[0]
   8874 		v.reset(OpAMD64ANDLconst)
   8875 		v.AuxInt = c & 0xffff
   8876 		v.AddArg(x)
   8877 		return true
   8878 	}
   8879 	return false
   8880 }
   8881 func rewriteValueAMD64_OpAMD64MOVWload(v *Value, config *Config) bool {
   8882 	b := v.Block
   8883 	_ = b
   8884 	// match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
   8885 	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
   8886 	// result: x
   8887 	for {
   8888 		off := v.AuxInt
   8889 		sym := v.Aux
   8890 		ptr := v.Args[0]
   8891 		v_1 := v.Args[1]
   8892 		if v_1.Op != OpAMD64MOVWstore {
   8893 			break
   8894 		}
   8895 		off2 := v_1.AuxInt
   8896 		sym2 := v_1.Aux
   8897 		ptr2 := v_1.Args[0]
   8898 		x := v_1.Args[1]
   8899 		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
   8900 			break
   8901 		}
   8902 		v.reset(OpCopy)
   8903 		v.Type = x.Type
   8904 		v.AddArg(x)
   8905 		return true
   8906 	}
   8907 	// match: (MOVWload  [off1] {sym} (ADDQconst [off2] ptr) mem)
   8908 	// cond: is32Bit(off1+off2)
   8909 	// result: (MOVWload  [off1+off2] {sym} ptr mem)
   8910 	for {
   8911 		off1 := v.AuxInt
   8912 		sym := v.Aux
   8913 		v_0 := v.Args[0]
   8914 		if v_0.Op != OpAMD64ADDQconst {
   8915 			break
   8916 		}
   8917 		off2 := v_0.AuxInt
   8918 		ptr := v_0.Args[0]
   8919 		mem := v.Args[1]
   8920 		if !(is32Bit(off1 + off2)) {
   8921 			break
   8922 		}
   8923 		v.reset(OpAMD64MOVWload)
   8924 		v.AuxInt = off1 + off2
   8925 		v.Aux = sym
   8926 		v.AddArg(ptr)
   8927 		v.AddArg(mem)
   8928 		return true
   8929 	}
   8930 	// match: (MOVWload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
   8931 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   8932 	// result: (MOVWload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   8933 	for {
   8934 		off1 := v.AuxInt
   8935 		sym1 := v.Aux
   8936 		v_0 := v.Args[0]
   8937 		if v_0.Op != OpAMD64LEAQ {
   8938 			break
   8939 		}
   8940 		off2 := v_0.AuxInt
   8941 		sym2 := v_0.Aux
   8942 		base := v_0.Args[0]
   8943 		mem := v.Args[1]
   8944 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   8945 			break
   8946 		}
   8947 		v.reset(OpAMD64MOVWload)
   8948 		v.AuxInt = off1 + off2
   8949 		v.Aux = mergeSym(sym1, sym2)
   8950 		v.AddArg(base)
   8951 		v.AddArg(mem)
   8952 		return true
   8953 	}
   8954 	// match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
   8955 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   8956 	// result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   8957 	for {
   8958 		off1 := v.AuxInt
   8959 		sym1 := v.Aux
   8960 		v_0 := v.Args[0]
   8961 		if v_0.Op != OpAMD64LEAQ1 {
   8962 			break
   8963 		}
   8964 		off2 := v_0.AuxInt
   8965 		sym2 := v_0.Aux
   8966 		ptr := v_0.Args[0]
   8967 		idx := v_0.Args[1]
   8968 		mem := v.Args[1]
   8969 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   8970 			break
   8971 		}
   8972 		v.reset(OpAMD64MOVWloadidx1)
   8973 		v.AuxInt = off1 + off2
   8974 		v.Aux = mergeSym(sym1, sym2)
   8975 		v.AddArg(ptr)
   8976 		v.AddArg(idx)
   8977 		v.AddArg(mem)
   8978 		return true
   8979 	}
   8980 	// match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem)
   8981 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   8982 	// result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
   8983 	for {
   8984 		off1 := v.AuxInt
   8985 		sym1 := v.Aux
   8986 		v_0 := v.Args[0]
   8987 		if v_0.Op != OpAMD64LEAQ2 {
   8988 			break
   8989 		}
   8990 		off2 := v_0.AuxInt
   8991 		sym2 := v_0.Aux
   8992 		ptr := v_0.Args[0]
   8993 		idx := v_0.Args[1]
   8994 		mem := v.Args[1]
   8995 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   8996 			break
   8997 		}
   8998 		v.reset(OpAMD64MOVWloadidx2)
   8999 		v.AuxInt = off1 + off2
   9000 		v.Aux = mergeSym(sym1, sym2)
   9001 		v.AddArg(ptr)
   9002 		v.AddArg(idx)
   9003 		v.AddArg(mem)
   9004 		return true
   9005 	}
   9006 	// match: (MOVWload [off] {sym} (ADDQ ptr idx) mem)
   9007 	// cond: ptr.Op != OpSB
   9008 	// result: (MOVWloadidx1 [off] {sym} ptr idx mem)
   9009 	for {
   9010 		off := v.AuxInt
   9011 		sym := v.Aux
   9012 		v_0 := v.Args[0]
   9013 		if v_0.Op != OpAMD64ADDQ {
   9014 			break
   9015 		}
   9016 		ptr := v_0.Args[0]
   9017 		idx := v_0.Args[1]
   9018 		mem := v.Args[1]
   9019 		if !(ptr.Op != OpSB) {
   9020 			break
   9021 		}
   9022 		v.reset(OpAMD64MOVWloadidx1)
   9023 		v.AuxInt = off
   9024 		v.Aux = sym
   9025 		v.AddArg(ptr)
   9026 		v.AddArg(idx)
   9027 		v.AddArg(mem)
   9028 		return true
   9029 	}
   9030 	// match: (MOVWload  [off1] {sym1} (LEAL [off2] {sym2} base) mem)
   9031 	// cond: canMergeSym(sym1, sym2)
   9032 	// result: (MOVWload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   9033 	for {
   9034 		off1 := v.AuxInt
   9035 		sym1 := v.Aux
   9036 		v_0 := v.Args[0]
   9037 		if v_0.Op != OpAMD64LEAL {
   9038 			break
   9039 		}
   9040 		off2 := v_0.AuxInt
   9041 		sym2 := v_0.Aux
   9042 		base := v_0.Args[0]
   9043 		mem := v.Args[1]
   9044 		if !(canMergeSym(sym1, sym2)) {
   9045 			break
   9046 		}
   9047 		v.reset(OpAMD64MOVWload)
   9048 		v.AuxInt = off1 + off2
   9049 		v.Aux = mergeSym(sym1, sym2)
   9050 		v.AddArg(base)
   9051 		v.AddArg(mem)
   9052 		return true
   9053 	}
   9054 	// match: (MOVWload  [off1] {sym} (ADDLconst [off2] ptr) mem)
   9055 	// cond: is32Bit(off1+off2)
   9056 	// result: (MOVWload  [off1+off2] {sym} ptr mem)
   9057 	for {
   9058 		off1 := v.AuxInt
   9059 		sym := v.Aux
   9060 		v_0 := v.Args[0]
   9061 		if v_0.Op != OpAMD64ADDLconst {
   9062 			break
   9063 		}
   9064 		off2 := v_0.AuxInt
   9065 		ptr := v_0.Args[0]
   9066 		mem := v.Args[1]
   9067 		if !(is32Bit(off1 + off2)) {
   9068 			break
   9069 		}
   9070 		v.reset(OpAMD64MOVWload)
   9071 		v.AuxInt = off1 + off2
   9072 		v.Aux = sym
   9073 		v.AddArg(ptr)
   9074 		v.AddArg(mem)
   9075 		return true
   9076 	}
   9077 	return false
   9078 }
   9079 func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value, config *Config) bool {
   9080 	b := v.Block
   9081 	_ = b
   9082 	// match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem)
   9083 	// cond:
   9084 	// result: (MOVWloadidx2 [c] {sym} ptr idx mem)
   9085 	for {
   9086 		c := v.AuxInt
   9087 		sym := v.Aux
   9088 		ptr := v.Args[0]
   9089 		v_1 := v.Args[1]
   9090 		if v_1.Op != OpAMD64SHLQconst {
   9091 			break
   9092 		}
   9093 		if v_1.AuxInt != 1 {
   9094 			break
   9095 		}
   9096 		idx := v_1.Args[0]
   9097 		mem := v.Args[2]
   9098 		v.reset(OpAMD64MOVWloadidx2)
   9099 		v.AuxInt = c
   9100 		v.Aux = sym
   9101 		v.AddArg(ptr)
   9102 		v.AddArg(idx)
   9103 		v.AddArg(mem)
   9104 		return true
   9105 	}
   9106 	// match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
   9107 	// cond:
   9108 	// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
   9109 	for {
   9110 		c := v.AuxInt
   9111 		sym := v.Aux
   9112 		v_0 := v.Args[0]
   9113 		if v_0.Op != OpAMD64ADDQconst {
   9114 			break
   9115 		}
   9116 		d := v_0.AuxInt
   9117 		ptr := v_0.Args[0]
   9118 		idx := v.Args[1]
   9119 		mem := v.Args[2]
   9120 		v.reset(OpAMD64MOVWloadidx1)
   9121 		v.AuxInt = c + d
   9122 		v.Aux = sym
   9123 		v.AddArg(ptr)
   9124 		v.AddArg(idx)
   9125 		v.AddArg(mem)
   9126 		return true
   9127 	}
   9128 	// match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
   9129 	// cond:
   9130 	// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
   9131 	for {
   9132 		c := v.AuxInt
   9133 		sym := v.Aux
   9134 		ptr := v.Args[0]
   9135 		v_1 := v.Args[1]
   9136 		if v_1.Op != OpAMD64ADDQconst {
   9137 			break
   9138 		}
   9139 		d := v_1.AuxInt
   9140 		idx := v_1.Args[0]
   9141 		mem := v.Args[2]
   9142 		v.reset(OpAMD64MOVWloadidx1)
   9143 		v.AuxInt = c + d
   9144 		v.Aux = sym
   9145 		v.AddArg(ptr)
   9146 		v.AddArg(idx)
   9147 		v.AddArg(mem)
   9148 		return true
   9149 	}
   9150 	return false
   9151 }
   9152 func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value, config *Config) bool {
   9153 	b := v.Block
   9154 	_ = b
   9155 	// match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem)
   9156 	// cond:
   9157 	// result: (MOVWloadidx2 [c+d] {sym} ptr idx mem)
   9158 	for {
   9159 		c := v.AuxInt
   9160 		sym := v.Aux
   9161 		v_0 := v.Args[0]
   9162 		if v_0.Op != OpAMD64ADDQconst {
   9163 			break
   9164 		}
   9165 		d := v_0.AuxInt
   9166 		ptr := v_0.Args[0]
   9167 		idx := v.Args[1]
   9168 		mem := v.Args[2]
   9169 		v.reset(OpAMD64MOVWloadidx2)
   9170 		v.AuxInt = c + d
   9171 		v.Aux = sym
   9172 		v.AddArg(ptr)
   9173 		v.AddArg(idx)
   9174 		v.AddArg(mem)
   9175 		return true
   9176 	}
   9177 	// match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem)
   9178 	// cond:
   9179 	// result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
   9180 	for {
   9181 		c := v.AuxInt
   9182 		sym := v.Aux
   9183 		ptr := v.Args[0]
   9184 		v_1 := v.Args[1]
   9185 		if v_1.Op != OpAMD64ADDQconst {
   9186 			break
   9187 		}
   9188 		d := v_1.AuxInt
   9189 		idx := v_1.Args[0]
   9190 		mem := v.Args[2]
   9191 		v.reset(OpAMD64MOVWloadidx2)
   9192 		v.AuxInt = c + 2*d
   9193 		v.Aux = sym
   9194 		v.AddArg(ptr)
   9195 		v.AddArg(idx)
   9196 		v.AddArg(mem)
   9197 		return true
   9198 	}
   9199 	return false
   9200 }
   9201 func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool {
   9202 	b := v.Block
   9203 	_ = b
   9204 	// match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem)
   9205 	// cond:
   9206 	// result: (MOVWstore [off] {sym} ptr x mem)
   9207 	for {
   9208 		off := v.AuxInt
   9209 		sym := v.Aux
   9210 		ptr := v.Args[0]
   9211 		v_1 := v.Args[1]
   9212 		if v_1.Op != OpAMD64MOVWQSX {
   9213 			break
   9214 		}
   9215 		x := v_1.Args[0]
   9216 		mem := v.Args[2]
   9217 		v.reset(OpAMD64MOVWstore)
   9218 		v.AuxInt = off
   9219 		v.Aux = sym
   9220 		v.AddArg(ptr)
   9221 		v.AddArg(x)
   9222 		v.AddArg(mem)
   9223 		return true
   9224 	}
   9225 	// match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem)
   9226 	// cond:
   9227 	// result: (MOVWstore [off] {sym} ptr x mem)
   9228 	for {
   9229 		off := v.AuxInt
   9230 		sym := v.Aux
   9231 		ptr := v.Args[0]
   9232 		v_1 := v.Args[1]
   9233 		if v_1.Op != OpAMD64MOVWQZX {
   9234 			break
   9235 		}
   9236 		x := v_1.Args[0]
   9237 		mem := v.Args[2]
   9238 		v.reset(OpAMD64MOVWstore)
   9239 		v.AuxInt = off
   9240 		v.Aux = sym
   9241 		v.AddArg(ptr)
   9242 		v.AddArg(x)
   9243 		v.AddArg(mem)
   9244 		return true
   9245 	}
   9246 	// match: (MOVWstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
   9247 	// cond: is32Bit(off1+off2)
   9248 	// result: (MOVWstore  [off1+off2] {sym} ptr val mem)
   9249 	for {
   9250 		off1 := v.AuxInt
   9251 		sym := v.Aux
   9252 		v_0 := v.Args[0]
   9253 		if v_0.Op != OpAMD64ADDQconst {
   9254 			break
   9255 		}
   9256 		off2 := v_0.AuxInt
   9257 		ptr := v_0.Args[0]
   9258 		val := v.Args[1]
   9259 		mem := v.Args[2]
   9260 		if !(is32Bit(off1 + off2)) {
   9261 			break
   9262 		}
   9263 		v.reset(OpAMD64MOVWstore)
   9264 		v.AuxInt = off1 + off2
   9265 		v.Aux = sym
   9266 		v.AddArg(ptr)
   9267 		v.AddArg(val)
   9268 		v.AddArg(mem)
   9269 		return true
   9270 	}
   9271 	// match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
   9272 	// cond: validOff(off)
   9273 	// result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
   9274 	for {
   9275 		off := v.AuxInt
   9276 		sym := v.Aux
   9277 		ptr := v.Args[0]
   9278 		v_1 := v.Args[1]
   9279 		if v_1.Op != OpAMD64MOVLconst {
   9280 			break
   9281 		}
   9282 		c := v_1.AuxInt
   9283 		mem := v.Args[2]
   9284 		if !(validOff(off)) {
   9285 			break
   9286 		}
   9287 		v.reset(OpAMD64MOVWstoreconst)
   9288 		v.AuxInt = makeValAndOff(int64(int16(c)), off)
   9289 		v.Aux = sym
   9290 		v.AddArg(ptr)
   9291 		v.AddArg(mem)
   9292 		return true
   9293 	}
   9294 	// match: (MOVWstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
   9295 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   9296 	// result: (MOVWstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   9297 	for {
   9298 		off1 := v.AuxInt
   9299 		sym1 := v.Aux
   9300 		v_0 := v.Args[0]
   9301 		if v_0.Op != OpAMD64LEAQ {
   9302 			break
   9303 		}
   9304 		off2 := v_0.AuxInt
   9305 		sym2 := v_0.Aux
   9306 		base := v_0.Args[0]
   9307 		val := v.Args[1]
   9308 		mem := v.Args[2]
   9309 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   9310 			break
   9311 		}
   9312 		v.reset(OpAMD64MOVWstore)
   9313 		v.AuxInt = off1 + off2
   9314 		v.Aux = mergeSym(sym1, sym2)
   9315 		v.AddArg(base)
   9316 		v.AddArg(val)
   9317 		v.AddArg(mem)
   9318 		return true
   9319 	}
   9320 	// match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
   9321 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   9322 	// result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   9323 	for {
   9324 		off1 := v.AuxInt
   9325 		sym1 := v.Aux
   9326 		v_0 := v.Args[0]
   9327 		if v_0.Op != OpAMD64LEAQ1 {
   9328 			break
   9329 		}
   9330 		off2 := v_0.AuxInt
   9331 		sym2 := v_0.Aux
   9332 		ptr := v_0.Args[0]
   9333 		idx := v_0.Args[1]
   9334 		val := v.Args[1]
   9335 		mem := v.Args[2]
   9336 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   9337 			break
   9338 		}
   9339 		v.reset(OpAMD64MOVWstoreidx1)
   9340 		v.AuxInt = off1 + off2
   9341 		v.Aux = mergeSym(sym1, sym2)
   9342 		v.AddArg(ptr)
   9343 		v.AddArg(idx)
   9344 		v.AddArg(val)
   9345 		v.AddArg(mem)
   9346 		return true
   9347 	}
   9348 	// match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem)
   9349 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
   9350 	// result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
   9351 	for {
   9352 		off1 := v.AuxInt
   9353 		sym1 := v.Aux
   9354 		v_0 := v.Args[0]
   9355 		if v_0.Op != OpAMD64LEAQ2 {
   9356 			break
   9357 		}
   9358 		off2 := v_0.AuxInt
   9359 		sym2 := v_0.Aux
   9360 		ptr := v_0.Args[0]
   9361 		idx := v_0.Args[1]
   9362 		val := v.Args[1]
   9363 		mem := v.Args[2]
   9364 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
   9365 			break
   9366 		}
   9367 		v.reset(OpAMD64MOVWstoreidx2)
   9368 		v.AuxInt = off1 + off2
   9369 		v.Aux = mergeSym(sym1, sym2)
   9370 		v.AddArg(ptr)
   9371 		v.AddArg(idx)
   9372 		v.AddArg(val)
   9373 		v.AddArg(mem)
   9374 		return true
   9375 	}
   9376 	// match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem)
   9377 	// cond: ptr.Op != OpSB
   9378 	// result: (MOVWstoreidx1 [off] {sym} ptr idx val mem)
   9379 	for {
   9380 		off := v.AuxInt
   9381 		sym := v.Aux
   9382 		v_0 := v.Args[0]
   9383 		if v_0.Op != OpAMD64ADDQ {
   9384 			break
   9385 		}
   9386 		ptr := v_0.Args[0]
   9387 		idx := v_0.Args[1]
   9388 		val := v.Args[1]
   9389 		mem := v.Args[2]
   9390 		if !(ptr.Op != OpSB) {
   9391 			break
   9392 		}
   9393 		v.reset(OpAMD64MOVWstoreidx1)
   9394 		v.AuxInt = off
   9395 		v.Aux = sym
   9396 		v.AddArg(ptr)
   9397 		v.AddArg(idx)
   9398 		v.AddArg(val)
   9399 		v.AddArg(mem)
   9400 		return true
   9401 	}
   9402 	// match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
   9403 	// cond: x.Uses == 1   && clobber(x)
   9404 	// result: (MOVLstore [i-2] {s} p w mem)
   9405 	for {
   9406 		i := v.AuxInt
   9407 		s := v.Aux
   9408 		p := v.Args[0]
   9409 		v_1 := v.Args[1]
   9410 		if v_1.Op != OpAMD64SHRQconst {
   9411 			break
   9412 		}
   9413 		if v_1.AuxInt != 16 {
   9414 			break
   9415 		}
   9416 		w := v_1.Args[0]
   9417 		x := v.Args[2]
   9418 		if x.Op != OpAMD64MOVWstore {
   9419 			break
   9420 		}
   9421 		if x.AuxInt != i-2 {
   9422 			break
   9423 		}
   9424 		if x.Aux != s {
   9425 			break
   9426 		}
   9427 		if p != x.Args[0] {
   9428 			break
   9429 		}
   9430 		if w != x.Args[1] {
   9431 			break
   9432 		}
   9433 		mem := x.Args[2]
   9434 		if !(x.Uses == 1 && clobber(x)) {
   9435 			break
   9436 		}
   9437 		v.reset(OpAMD64MOVLstore)
   9438 		v.AuxInt = i - 2
   9439 		v.Aux = s
   9440 		v.AddArg(p)
   9441 		v.AddArg(w)
   9442 		v.AddArg(mem)
   9443 		return true
   9444 	}
   9445 	// match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem))
   9446 	// cond: x.Uses == 1   && clobber(x)
   9447 	// result: (MOVLstore [i-2] {s} p w0 mem)
   9448 	for {
   9449 		i := v.AuxInt
   9450 		s := v.Aux
   9451 		p := v.Args[0]
   9452 		v_1 := v.Args[1]
   9453 		if v_1.Op != OpAMD64SHRQconst {
   9454 			break
   9455 		}
   9456 		j := v_1.AuxInt
   9457 		w := v_1.Args[0]
   9458 		x := v.Args[2]
   9459 		if x.Op != OpAMD64MOVWstore {
   9460 			break
   9461 		}
   9462 		if x.AuxInt != i-2 {
   9463 			break
   9464 		}
   9465 		if x.Aux != s {
   9466 			break
   9467 		}
   9468 		if p != x.Args[0] {
   9469 			break
   9470 		}
   9471 		w0 := x.Args[1]
   9472 		if w0.Op != OpAMD64SHRQconst {
   9473 			break
   9474 		}
   9475 		if w0.AuxInt != j-16 {
   9476 			break
   9477 		}
   9478 		if w != w0.Args[0] {
   9479 			break
   9480 		}
   9481 		mem := x.Args[2]
   9482 		if !(x.Uses == 1 && clobber(x)) {
   9483 			break
   9484 		}
   9485 		v.reset(OpAMD64MOVLstore)
   9486 		v.AuxInt = i - 2
   9487 		v.Aux = s
   9488 		v.AddArg(p)
   9489 		v.AddArg(w0)
   9490 		v.AddArg(mem)
   9491 		return true
   9492 	}
   9493 	// match: (MOVWstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
   9494 	// cond: canMergeSym(sym1, sym2)
   9495 	// result: (MOVWstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   9496 	for {
   9497 		off1 := v.AuxInt
   9498 		sym1 := v.Aux
   9499 		v_0 := v.Args[0]
   9500 		if v_0.Op != OpAMD64LEAL {
   9501 			break
   9502 		}
   9503 		off2 := v_0.AuxInt
   9504 		sym2 := v_0.Aux
   9505 		base := v_0.Args[0]
   9506 		val := v.Args[1]
   9507 		mem := v.Args[2]
   9508 		if !(canMergeSym(sym1, sym2)) {
   9509 			break
   9510 		}
   9511 		v.reset(OpAMD64MOVWstore)
   9512 		v.AuxInt = off1 + off2
   9513 		v.Aux = mergeSym(sym1, sym2)
   9514 		v.AddArg(base)
   9515 		v.AddArg(val)
   9516 		v.AddArg(mem)
   9517 		return true
   9518 	}
   9519 	// match: (MOVWstore  [off1] {sym} (ADDLconst [off2] ptr) val mem)
   9520 	// cond: is32Bit(off1+off2)
   9521 	// result: (MOVWstore  [off1+off2] {sym} ptr val mem)
   9522 	for {
   9523 		off1 := v.AuxInt
   9524 		sym := v.Aux
   9525 		v_0 := v.Args[0]
   9526 		if v_0.Op != OpAMD64ADDLconst {
   9527 			break
   9528 		}
   9529 		off2 := v_0.AuxInt
   9530 		ptr := v_0.Args[0]
   9531 		val := v.Args[1]
   9532 		mem := v.Args[2]
   9533 		if !(is32Bit(off1 + off2)) {
   9534 			break
   9535 		}
   9536 		v.reset(OpAMD64MOVWstore)
   9537 		v.AuxInt = off1 + off2
   9538 		v.Aux = sym
   9539 		v.AddArg(ptr)
   9540 		v.AddArg(val)
   9541 		v.AddArg(mem)
   9542 		return true
   9543 	}
   9544 	return false
   9545 }
   9546 func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value, config *Config) bool {
   9547 	b := v.Block
   9548 	_ = b
   9549 	// match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
   9550 	// cond: ValAndOff(sc).canAdd(off)
   9551 	// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
   9552 	for {
   9553 		sc := v.AuxInt
   9554 		s := v.Aux
   9555 		v_0 := v.Args[0]
   9556 		if v_0.Op != OpAMD64ADDQconst {
   9557 			break
   9558 		}
   9559 		off := v_0.AuxInt
   9560 		ptr := v_0.Args[0]
   9561 		mem := v.Args[1]
   9562 		if !(ValAndOff(sc).canAdd(off)) {
   9563 			break
   9564 		}
   9565 		v.reset(OpAMD64MOVWstoreconst)
   9566 		v.AuxInt = ValAndOff(sc).add(off)
   9567 		v.Aux = s
   9568 		v.AddArg(ptr)
   9569 		v.AddArg(mem)
   9570 		return true
   9571 	}
   9572 	// match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
   9573 	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
   9574 	// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
   9575 	for {
   9576 		sc := v.AuxInt
   9577 		sym1 := v.Aux
   9578 		v_0 := v.Args[0]
   9579 		if v_0.Op != OpAMD64LEAQ {
   9580 			break
   9581 		}
   9582 		off := v_0.AuxInt
   9583 		sym2 := v_0.Aux
   9584 		ptr := v_0.Args[0]
   9585 		mem := v.Args[1]
   9586 		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
   9587 			break
   9588 		}
   9589 		v.reset(OpAMD64MOVWstoreconst)
   9590 		v.AuxInt = ValAndOff(sc).add(off)
   9591 		v.Aux = mergeSym(sym1, sym2)
   9592 		v.AddArg(ptr)
   9593 		v.AddArg(mem)
   9594 		return true
   9595 	}
   9596 	// match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
   9597 	// cond: canMergeSym(sym1, sym2)
   9598 	// result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   9599 	for {
   9600 		x := v.AuxInt
   9601 		sym1 := v.Aux
   9602 		v_0 := v.Args[0]
   9603 		if v_0.Op != OpAMD64LEAQ1 {
   9604 			break
   9605 		}
   9606 		off := v_0.AuxInt
   9607 		sym2 := v_0.Aux
   9608 		ptr := v_0.Args[0]
   9609 		idx := v_0.Args[1]
   9610 		mem := v.Args[1]
   9611 		if !(canMergeSym(sym1, sym2)) {
   9612 			break
   9613 		}
   9614 		v.reset(OpAMD64MOVWstoreconstidx1)
   9615 		v.AuxInt = ValAndOff(x).add(off)
   9616 		v.Aux = mergeSym(sym1, sym2)
   9617 		v.AddArg(ptr)
   9618 		v.AddArg(idx)
   9619 		v.AddArg(mem)
   9620 		return true
   9621 	}
   9622 	// match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem)
   9623 	// cond: canMergeSym(sym1, sym2)
   9624 	// result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
   9625 	for {
   9626 		x := v.AuxInt
   9627 		sym1 := v.Aux
   9628 		v_0 := v.Args[0]
   9629 		if v_0.Op != OpAMD64LEAQ2 {
   9630 			break
   9631 		}
   9632 		off := v_0.AuxInt
   9633 		sym2 := v_0.Aux
   9634 		ptr := v_0.Args[0]
   9635 		idx := v_0.Args[1]
   9636 		mem := v.Args[1]
   9637 		if !(canMergeSym(sym1, sym2)) {
   9638 			break
   9639 		}
   9640 		v.reset(OpAMD64MOVWstoreconstidx2)
   9641 		v.AuxInt = ValAndOff(x).add(off)
   9642 		v.Aux = mergeSym(sym1, sym2)
   9643 		v.AddArg(ptr)
   9644 		v.AddArg(idx)
   9645 		v.AddArg(mem)
   9646 		return true
   9647 	}
   9648 	// match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem)
   9649 	// cond:
   9650 	// result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem)
   9651 	for {
   9652 		x := v.AuxInt
   9653 		sym := v.Aux
   9654 		v_0 := v.Args[0]
   9655 		if v_0.Op != OpAMD64ADDQ {
   9656 			break
   9657 		}
   9658 		ptr := v_0.Args[0]
   9659 		idx := v_0.Args[1]
   9660 		mem := v.Args[1]
   9661 		v.reset(OpAMD64MOVWstoreconstidx1)
   9662 		v.AuxInt = x
   9663 		v.Aux = sym
   9664 		v.AddArg(ptr)
   9665 		v.AddArg(idx)
   9666 		v.AddArg(mem)
   9667 		return true
   9668 	}
   9669 	// match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
   9670 	// cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
   9671 	// result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
   9672 	for {
   9673 		c := v.AuxInt
   9674 		s := v.Aux
   9675 		p := v.Args[0]
   9676 		x := v.Args[1]
   9677 		if x.Op != OpAMD64MOVWstoreconst {
   9678 			break
   9679 		}
   9680 		a := x.AuxInt
   9681 		if x.Aux != s {
   9682 			break
   9683 		}
   9684 		if p != x.Args[0] {
   9685 			break
   9686 		}
   9687 		mem := x.Args[1]
   9688 		if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
   9689 			break
   9690 		}
   9691 		v.reset(OpAMD64MOVLstoreconst)
   9692 		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
   9693 		v.Aux = s
   9694 		v.AddArg(p)
   9695 		v.AddArg(mem)
   9696 		return true
   9697 	}
   9698 	// match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
   9699 	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
   9700 	// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
   9701 	for {
   9702 		sc := v.AuxInt
   9703 		sym1 := v.Aux
   9704 		v_0 := v.Args[0]
   9705 		if v_0.Op != OpAMD64LEAL {
   9706 			break
   9707 		}
   9708 		off := v_0.AuxInt
   9709 		sym2 := v_0.Aux
   9710 		ptr := v_0.Args[0]
   9711 		mem := v.Args[1]
   9712 		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
   9713 			break
   9714 		}
   9715 		v.reset(OpAMD64MOVWstoreconst)
   9716 		v.AuxInt = ValAndOff(sc).add(off)
   9717 		v.Aux = mergeSym(sym1, sym2)
   9718 		v.AddArg(ptr)
   9719 		v.AddArg(mem)
   9720 		return true
   9721 	}
   9722 	// match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
   9723 	// cond: ValAndOff(sc).canAdd(off)
   9724 	// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
   9725 	for {
   9726 		sc := v.AuxInt
   9727 		s := v.Aux
   9728 		v_0 := v.Args[0]
   9729 		if v_0.Op != OpAMD64ADDLconst {
   9730 			break
   9731 		}
   9732 		off := v_0.AuxInt
   9733 		ptr := v_0.Args[0]
   9734 		mem := v.Args[1]
   9735 		if !(ValAndOff(sc).canAdd(off)) {
   9736 			break
   9737 		}
   9738 		v.reset(OpAMD64MOVWstoreconst)
   9739 		v.AuxInt = ValAndOff(sc).add(off)
   9740 		v.Aux = s
   9741 		v.AddArg(ptr)
   9742 		v.AddArg(mem)
   9743 		return true
   9744 	}
   9745 	return false
   9746 }
   9747 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value, config *Config) bool {
   9748 	b := v.Block
   9749 	_ = b
   9750 	// match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem)
   9751 	// cond:
   9752 	// result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem)
   9753 	for {
   9754 		c := v.AuxInt
   9755 		sym := v.Aux
   9756 		ptr := v.Args[0]
   9757 		v_1 := v.Args[1]
   9758 		if v_1.Op != OpAMD64SHLQconst {
   9759 			break
   9760 		}
   9761 		if v_1.AuxInt != 1 {
   9762 			break
   9763 		}
   9764 		idx := v_1.Args[0]
   9765 		mem := v.Args[2]
   9766 		v.reset(OpAMD64MOVWstoreconstidx2)
   9767 		v.AuxInt = c
   9768 		v.Aux = sym
   9769 		v.AddArg(ptr)
   9770 		v.AddArg(idx)
   9771 		v.AddArg(mem)
   9772 		return true
   9773 	}
   9774 	// match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
   9775 	// cond:
   9776 	// result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   9777 	for {
   9778 		x := v.AuxInt
   9779 		sym := v.Aux
   9780 		v_0 := v.Args[0]
   9781 		if v_0.Op != OpAMD64ADDQconst {
   9782 			break
   9783 		}
   9784 		c := v_0.AuxInt
   9785 		ptr := v_0.Args[0]
   9786 		idx := v.Args[1]
   9787 		mem := v.Args[2]
   9788 		v.reset(OpAMD64MOVWstoreconstidx1)
   9789 		v.AuxInt = ValAndOff(x).add(c)
   9790 		v.Aux = sym
   9791 		v.AddArg(ptr)
   9792 		v.AddArg(idx)
   9793 		v.AddArg(mem)
   9794 		return true
   9795 	}
   9796 	// match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
   9797 	// cond:
   9798 	// result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   9799 	for {
   9800 		x := v.AuxInt
   9801 		sym := v.Aux
   9802 		ptr := v.Args[0]
   9803 		v_1 := v.Args[1]
   9804 		if v_1.Op != OpAMD64ADDQconst {
   9805 			break
   9806 		}
   9807 		c := v_1.AuxInt
   9808 		idx := v_1.Args[0]
   9809 		mem := v.Args[2]
   9810 		v.reset(OpAMD64MOVWstoreconstidx1)
   9811 		v.AuxInt = ValAndOff(x).add(c)
   9812 		v.Aux = sym
   9813 		v.AddArg(ptr)
   9814 		v.AddArg(idx)
   9815 		v.AddArg(mem)
   9816 		return true
   9817 	}
   9818 	// match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem))
   9819 	// cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
   9820 	// result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem)
   9821 	for {
   9822 		c := v.AuxInt
   9823 		s := v.Aux
   9824 		p := v.Args[0]
   9825 		i := v.Args[1]
   9826 		x := v.Args[2]
   9827 		if x.Op != OpAMD64MOVWstoreconstidx1 {
   9828 			break
   9829 		}
   9830 		a := x.AuxInt
   9831 		if x.Aux != s {
   9832 			break
   9833 		}
   9834 		if p != x.Args[0] {
   9835 			break
   9836 		}
   9837 		if i != x.Args[1] {
   9838 			break
   9839 		}
   9840 		mem := x.Args[2]
   9841 		if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
   9842 			break
   9843 		}
   9844 		v.reset(OpAMD64MOVLstoreconstidx1)
   9845 		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
   9846 		v.Aux = s
   9847 		v.AddArg(p)
   9848 		v.AddArg(i)
   9849 		v.AddArg(mem)
   9850 		return true
   9851 	}
   9852 	return false
   9853 }
   9854 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value, config *Config) bool {
   9855 	b := v.Block
   9856 	_ = b
   9857 	// match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem)
   9858 	// cond:
   9859 	// result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
   9860 	for {
   9861 		x := v.AuxInt
   9862 		sym := v.Aux
   9863 		v_0 := v.Args[0]
   9864 		if v_0.Op != OpAMD64ADDQconst {
   9865 			break
   9866 		}
   9867 		c := v_0.AuxInt
   9868 		ptr := v_0.Args[0]
   9869 		idx := v.Args[1]
   9870 		mem := v.Args[2]
   9871 		v.reset(OpAMD64MOVWstoreconstidx2)
   9872 		v.AuxInt = ValAndOff(x).add(c)
   9873 		v.Aux = sym
   9874 		v.AddArg(ptr)
   9875 		v.AddArg(idx)
   9876 		v.AddArg(mem)
   9877 		return true
   9878 	}
   9879 	// match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem)
   9880 	// cond:
   9881 	// result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
   9882 	for {
   9883 		x := v.AuxInt
   9884 		sym := v.Aux
   9885 		ptr := v.Args[0]
   9886 		v_1 := v.Args[1]
   9887 		if v_1.Op != OpAMD64ADDQconst {
   9888 			break
   9889 		}
   9890 		c := v_1.AuxInt
   9891 		idx := v_1.Args[0]
   9892 		mem := v.Args[2]
   9893 		v.reset(OpAMD64MOVWstoreconstidx2)
   9894 		v.AuxInt = ValAndOff(x).add(2 * c)
   9895 		v.Aux = sym
   9896 		v.AddArg(ptr)
   9897 		v.AddArg(idx)
   9898 		v.AddArg(mem)
   9899 		return true
   9900 	}
   9901 	// match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem))
   9902 	// cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
   9903 	// result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem)
   9904 	for {
   9905 		c := v.AuxInt
   9906 		s := v.Aux
   9907 		p := v.Args[0]
   9908 		i := v.Args[1]
   9909 		x := v.Args[2]
   9910 		if x.Op != OpAMD64MOVWstoreconstidx2 {
   9911 			break
   9912 		}
   9913 		a := x.AuxInt
   9914 		if x.Aux != s {
   9915 			break
   9916 		}
   9917 		if p != x.Args[0] {
   9918 			break
   9919 		}
   9920 		if i != x.Args[1] {
   9921 			break
   9922 		}
   9923 		mem := x.Args[2]
   9924 		if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
   9925 			break
   9926 		}
   9927 		v.reset(OpAMD64MOVLstoreconstidx1)
   9928 		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
   9929 		v.Aux = s
   9930 		v.AddArg(p)
   9931 		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, i.Type)
   9932 		v0.AuxInt = 1
   9933 		v0.AddArg(i)
   9934 		v.AddArg(v0)
   9935 		v.AddArg(mem)
   9936 		return true
   9937 	}
   9938 	return false
   9939 }
   9940 func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value, config *Config) bool {
   9941 	b := v.Block
   9942 	_ = b
   9943 	// match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem)
   9944 	// cond:
   9945 	// result: (MOVWstoreidx2 [c] {sym} ptr idx val mem)
   9946 	for {
   9947 		c := v.AuxInt
   9948 		sym := v.Aux
   9949 		ptr := v.Args[0]
   9950 		v_1 := v.Args[1]
   9951 		if v_1.Op != OpAMD64SHLQconst {
   9952 			break
   9953 		}
   9954 		if v_1.AuxInt != 1 {
   9955 			break
   9956 		}
   9957 		idx := v_1.Args[0]
   9958 		val := v.Args[2]
   9959 		mem := v.Args[3]
   9960 		v.reset(OpAMD64MOVWstoreidx2)
   9961 		v.AuxInt = c
   9962 		v.Aux = sym
   9963 		v.AddArg(ptr)
   9964 		v.AddArg(idx)
   9965 		v.AddArg(val)
   9966 		v.AddArg(mem)
   9967 		return true
   9968 	}
   9969 	// match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
   9970 	// cond:
   9971 	// result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
   9972 	for {
   9973 		c := v.AuxInt
   9974 		sym := v.Aux
   9975 		v_0 := v.Args[0]
   9976 		if v_0.Op != OpAMD64ADDQconst {
   9977 			break
   9978 		}
   9979 		d := v_0.AuxInt
   9980 		ptr := v_0.Args[0]
   9981 		idx := v.Args[1]
   9982 		val := v.Args[2]
   9983 		mem := v.Args[3]
   9984 		v.reset(OpAMD64MOVWstoreidx1)
   9985 		v.AuxInt = c + d
   9986 		v.Aux = sym
   9987 		v.AddArg(ptr)
   9988 		v.AddArg(idx)
   9989 		v.AddArg(val)
   9990 		v.AddArg(mem)
   9991 		return true
   9992 	}
   9993 	// match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
   9994 	// cond:
   9995 	// result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
   9996 	for {
   9997 		c := v.AuxInt
   9998 		sym := v.Aux
   9999 		ptr := v.Args[0]
   10000 		v_1 := v.Args[1]
   10001 		if v_1.Op != OpAMD64ADDQconst {
   10002 			break
   10003 		}
   10004 		d := v_1.AuxInt
   10005 		idx := v_1.Args[0]
   10006 		val := v.Args[2]
   10007 		mem := v.Args[3]
   10008 		v.reset(OpAMD64MOVWstoreidx1)
   10009 		v.AuxInt = c + d
   10010 		v.Aux = sym
   10011 		v.AddArg(ptr)
   10012 		v.AddArg(idx)
   10013 		v.AddArg(val)
   10014 		v.AddArg(mem)
   10015 		return true
   10016 	}
   10017 	// match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem))
   10018 	// cond: x.Uses == 1   && clobber(x)
   10019 	// result: (MOVLstoreidx1 [i-2] {s} p idx w mem)
   10020 	for {
   10021 		i := v.AuxInt
   10022 		s := v.Aux
   10023 		p := v.Args[0]
   10024 		idx := v.Args[1]
   10025 		v_2 := v.Args[2]
   10026 		if v_2.Op != OpAMD64SHRQconst {
   10027 			break
   10028 		}
   10029 		if v_2.AuxInt != 16 {
   10030 			break
   10031 		}
   10032 		w := v_2.Args[0]
   10033 		x := v.Args[3]
   10034 		if x.Op != OpAMD64MOVWstoreidx1 {
   10035 			break
   10036 		}
   10037 		if x.AuxInt != i-2 {
   10038 			break
   10039 		}
   10040 		if x.Aux != s {
   10041 			break
   10042 		}
   10043 		if p != x.Args[0] {
   10044 			break
   10045 		}
   10046 		if idx != x.Args[1] {
   10047 			break
   10048 		}
   10049 		if w != x.Args[2] {
   10050 			break
   10051 		}
   10052 		mem := x.Args[3]
   10053 		if !(x.Uses == 1 && clobber(x)) {
   10054 			break
   10055 		}
   10056 		v.reset(OpAMD64MOVLstoreidx1)
   10057 		v.AuxInt = i - 2
   10058 		v.Aux = s
   10059 		v.AddArg(p)
   10060 		v.AddArg(idx)
   10061 		v.AddArg(w)
   10062 		v.AddArg(mem)
   10063 		return true
   10064 	}
   10065 	// match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem))
   10066 	// cond: x.Uses == 1   && clobber(x)
   10067 	// result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem)
   10068 	for {
   10069 		i := v.AuxInt
   10070 		s := v.Aux
   10071 		p := v.Args[0]
   10072 		idx := v.Args[1]
   10073 		v_2 := v.Args[2]
   10074 		if v_2.Op != OpAMD64SHRQconst {
   10075 			break
   10076 		}
   10077 		j := v_2.AuxInt
   10078 		w := v_2.Args[0]
   10079 		x := v.Args[3]
   10080 		if x.Op != OpAMD64MOVWstoreidx1 {
   10081 			break
   10082 		}
   10083 		if x.AuxInt != i-2 {
   10084 			break
   10085 		}
   10086 		if x.Aux != s {
   10087 			break
   10088 		}
   10089 		if p != x.Args[0] {
   10090 			break
   10091 		}
   10092 		if idx != x.Args[1] {
   10093 			break
   10094 		}
   10095 		w0 := x.Args[2]
   10096 		if w0.Op != OpAMD64SHRQconst {
   10097 			break
   10098 		}
   10099 		if w0.AuxInt != j-16 {
   10100 			break
   10101 		}
   10102 		if w != w0.Args[0] {
   10103 			break
   10104 		}
   10105 		mem := x.Args[3]
   10106 		if !(x.Uses == 1 && clobber(x)) {
   10107 			break
   10108 		}
   10109 		v.reset(OpAMD64MOVLstoreidx1)
   10110 		v.AuxInt = i - 2
   10111 		v.Aux = s
   10112 		v.AddArg(p)
   10113 		v.AddArg(idx)
   10114 		v.AddArg(w0)
   10115 		v.AddArg(mem)
   10116 		return true
   10117 	}
   10118 	return false
   10119 }
   10120 func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value, config *Config) bool {
   10121 	b := v.Block
   10122 	_ = b
   10123 	// match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem)
   10124 	// cond:
   10125 	// result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
   10126 	for {
   10127 		c := v.AuxInt
   10128 		sym := v.Aux
   10129 		v_0 := v.Args[0]
   10130 		if v_0.Op != OpAMD64ADDQconst {
   10131 			break
   10132 		}
   10133 		d := v_0.AuxInt
   10134 		ptr := v_0.Args[0]
   10135 		idx := v.Args[1]
   10136 		val := v.Args[2]
   10137 		mem := v.Args[3]
   10138 		v.reset(OpAMD64MOVWstoreidx2)
   10139 		v.AuxInt = c + d
   10140 		v.Aux = sym
   10141 		v.AddArg(ptr)
   10142 		v.AddArg(idx)
   10143 		v.AddArg(val)
   10144 		v.AddArg(mem)
   10145 		return true
   10146 	}
   10147 	// match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem)
   10148 	// cond:
   10149 	// result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
   10150 	for {
   10151 		c := v.AuxInt
   10152 		sym := v.Aux
   10153 		ptr := v.Args[0]
   10154 		v_1 := v.Args[1]
   10155 		if v_1.Op != OpAMD64ADDQconst {
   10156 			break
   10157 		}
   10158 		d := v_1.AuxInt
   10159 		idx := v_1.Args[0]
   10160 		val := v.Args[2]
   10161 		mem := v.Args[3]
   10162 		v.reset(OpAMD64MOVWstoreidx2)
   10163 		v.AuxInt = c + 2*d
   10164 		v.Aux = sym
   10165 		v.AddArg(ptr)
   10166 		v.AddArg(idx)
   10167 		v.AddArg(val)
   10168 		v.AddArg(mem)
   10169 		return true
   10170 	}
   10171 	// match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem))
   10172 	// cond: x.Uses == 1   && clobber(x)
   10173 	// result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem)
   10174 	for {
   10175 		i := v.AuxInt
   10176 		s := v.Aux
   10177 		p := v.Args[0]
   10178 		idx := v.Args[1]
   10179 		v_2 := v.Args[2]
   10180 		if v_2.Op != OpAMD64SHRQconst {
   10181 			break
   10182 		}
   10183 		if v_2.AuxInt != 16 {
   10184 			break
   10185 		}
   10186 		w := v_2.Args[0]
   10187 		x := v.Args[3]
   10188 		if x.Op != OpAMD64MOVWstoreidx2 {
   10189 			break
   10190 		}
   10191 		if x.AuxInt != i-2 {
   10192 			break
   10193 		}
   10194 		if x.Aux != s {
   10195 			break
   10196 		}
   10197 		if p != x.Args[0] {
   10198 			break
   10199 		}
   10200 		if idx != x.Args[1] {
   10201 			break
   10202 		}
   10203 		if w != x.Args[2] {
   10204 			break
   10205 		}
   10206 		mem := x.Args[3]
   10207 		if !(x.Uses == 1 && clobber(x)) {
   10208 			break
   10209 		}
   10210 		v.reset(OpAMD64MOVLstoreidx1)
   10211 		v.AuxInt = i - 2
   10212 		v.Aux = s
   10213 		v.AddArg(p)
   10214 		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type)
   10215 		v0.AuxInt = 1
   10216 		v0.AddArg(idx)
   10217 		v.AddArg(v0)
   10218 		v.AddArg(w)
   10219 		v.AddArg(mem)
   10220 		return true
   10221 	}
   10222 	// match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem))
   10223 	// cond: x.Uses == 1   && clobber(x)
   10224 	// result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem)
   10225 	for {
   10226 		i := v.AuxInt
   10227 		s := v.Aux
   10228 		p := v.Args[0]
   10229 		idx := v.Args[1]
   10230 		v_2 := v.Args[2]
   10231 		if v_2.Op != OpAMD64SHRQconst {
   10232 			break
   10233 		}
   10234 		j := v_2.AuxInt
   10235 		w := v_2.Args[0]
   10236 		x := v.Args[3]
   10237 		if x.Op != OpAMD64MOVWstoreidx2 {
   10238 			break
   10239 		}
   10240 		if x.AuxInt != i-2 {
   10241 			break
   10242 		}
   10243 		if x.Aux != s {
   10244 			break
   10245 		}
   10246 		if p != x.Args[0] {
   10247 			break
   10248 		}
   10249 		if idx != x.Args[1] {
   10250 			break
   10251 		}
   10252 		w0 := x.Args[2]
   10253 		if w0.Op != OpAMD64SHRQconst {
   10254 			break
   10255 		}
   10256 		if w0.AuxInt != j-16 {
   10257 			break
   10258 		}
   10259 		if w != w0.Args[0] {
   10260 			break
   10261 		}
   10262 		mem := x.Args[3]
   10263 		if !(x.Uses == 1 && clobber(x)) {
   10264 			break
   10265 		}
   10266 		v.reset(OpAMD64MOVLstoreidx1)
   10267 		v.AuxInt = i - 2
   10268 		v.Aux = s
   10269 		v.AddArg(p)
   10270 		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type)
   10271 		v0.AuxInt = 1
   10272 		v0.AddArg(idx)
   10273 		v.AddArg(v0)
   10274 		v.AddArg(w0)
   10275 		v.AddArg(mem)
   10276 		return true
   10277 	}
   10278 	return false
   10279 }
   10280 func rewriteValueAMD64_OpAMD64MULL(v *Value, config *Config) bool {
   10281 	b := v.Block
   10282 	_ = b
   10283 	// match: (MULL x (MOVLconst [c]))
   10284 	// cond:
   10285 	// result: (MULLconst [c] x)
   10286 	for {
   10287 		x := v.Args[0]
   10288 		v_1 := v.Args[1]
   10289 		if v_1.Op != OpAMD64MOVLconst {
   10290 			break
   10291 		}
   10292 		c := v_1.AuxInt
   10293 		v.reset(OpAMD64MULLconst)
   10294 		v.AuxInt = c
   10295 		v.AddArg(x)
   10296 		return true
   10297 	}
   10298 	// match: (MULL (MOVLconst [c]) x)
   10299 	// cond:
   10300 	// result: (MULLconst [c] x)
   10301 	for {
   10302 		v_0 := v.Args[0]
   10303 		if v_0.Op != OpAMD64MOVLconst {
   10304 			break
   10305 		}
   10306 		c := v_0.AuxInt
   10307 		x := v.Args[1]
   10308 		v.reset(OpAMD64MULLconst)
   10309 		v.AuxInt = c
   10310 		v.AddArg(x)
   10311 		return true
   10312 	}
   10313 	return false
   10314 }
   10315 func rewriteValueAMD64_OpAMD64MULLconst(v *Value, config *Config) bool {
   10316 	b := v.Block
   10317 	_ = b
   10318 	// match: (MULLconst [c] (MULLconst [d] x))
   10319 	// cond:
   10320 	// result: (MULLconst [int64(int32(c * d))] x)
   10321 	for {
   10322 		c := v.AuxInt
   10323 		v_0 := v.Args[0]
   10324 		if v_0.Op != OpAMD64MULLconst {
   10325 			break
   10326 		}
   10327 		d := v_0.AuxInt
   10328 		x := v_0.Args[0]
   10329 		v.reset(OpAMD64MULLconst)
   10330 		v.AuxInt = int64(int32(c * d))
   10331 		v.AddArg(x)
   10332 		return true
   10333 	}
   10334 	// match: (MULLconst [c] (MOVLconst [d]))
   10335 	// cond:
   10336 	// result: (MOVLconst [int64(int32(c*d))])
   10337 	for {
   10338 		c := v.AuxInt
   10339 		v_0 := v.Args[0]
   10340 		if v_0.Op != OpAMD64MOVLconst {
   10341 			break
   10342 		}
   10343 		d := v_0.AuxInt
   10344 		v.reset(OpAMD64MOVLconst)
   10345 		v.AuxInt = int64(int32(c * d))
   10346 		return true
   10347 	}
   10348 	return false
   10349 }
   10350 func rewriteValueAMD64_OpAMD64MULQ(v *Value, config *Config) bool {
   10351 	b := v.Block
   10352 	_ = b
   10353 	// match: (MULQ x (MOVQconst [c]))
   10354 	// cond: is32Bit(c)
   10355 	// result: (MULQconst [c] x)
   10356 	for {
   10357 		x := v.Args[0]
   10358 		v_1 := v.Args[1]
   10359 		if v_1.Op != OpAMD64MOVQconst {
   10360 			break
   10361 		}
   10362 		c := v_1.AuxInt
   10363 		if !(is32Bit(c)) {
   10364 			break
   10365 		}
   10366 		v.reset(OpAMD64MULQconst)
   10367 		v.AuxInt = c
   10368 		v.AddArg(x)
   10369 		return true
   10370 	}
   10371 	// match: (MULQ (MOVQconst [c]) x)
   10372 	// cond: is32Bit(c)
   10373 	// result: (MULQconst [c] x)
   10374 	for {
   10375 		v_0 := v.Args[0]
   10376 		if v_0.Op != OpAMD64MOVQconst {
   10377 			break
   10378 		}
   10379 		c := v_0.AuxInt
   10380 		x := v.Args[1]
   10381 		if !(is32Bit(c)) {
   10382 			break
   10383 		}
   10384 		v.reset(OpAMD64MULQconst)
   10385 		v.AuxInt = c
   10386 		v.AddArg(x)
   10387 		return true
   10388 	}
   10389 	return false
   10390 }
   10391 func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool {
   10392 	b := v.Block
   10393 	_ = b
   10394 	// match: (MULQconst [c] (MULQconst [d] x))
   10395 	// cond: is32Bit(c*d)
   10396 	// result: (MULQconst [c * d] x)
   10397 	for {
   10398 		c := v.AuxInt
   10399 		v_0 := v.Args[0]
   10400 		if v_0.Op != OpAMD64MULQconst {
   10401 			break
   10402 		}
   10403 		d := v_0.AuxInt
   10404 		x := v_0.Args[0]
   10405 		if !(is32Bit(c * d)) {
   10406 			break
   10407 		}
   10408 		v.reset(OpAMD64MULQconst)
   10409 		v.AuxInt = c * d
   10410 		v.AddArg(x)
   10411 		return true
   10412 	}
   10413 	// match: (MULQconst [-1] x)
   10414 	// cond:
   10415 	// result: (NEGQ x)
   10416 	for {
   10417 		if v.AuxInt != -1 {
   10418 			break
   10419 		}
   10420 		x := v.Args[0]
   10421 		v.reset(OpAMD64NEGQ)
   10422 		v.AddArg(x)
   10423 		return true
   10424 	}
   10425 	// match: (MULQconst [0] _)
   10426 	// cond:
   10427 	// result: (MOVQconst [0])
   10428 	for {
   10429 		if v.AuxInt != 0 {
   10430 			break
   10431 		}
   10432 		v.reset(OpAMD64MOVQconst)
   10433 		v.AuxInt = 0
   10434 		return true
   10435 	}
   10436 	// match: (MULQconst [1] x)
   10437 	// cond:
   10438 	// result: x
   10439 	for {
   10440 		if v.AuxInt != 1 {
   10441 			break
   10442 		}
   10443 		x := v.Args[0]
   10444 		v.reset(OpCopy)
   10445 		v.Type = x.Type
   10446 		v.AddArg(x)
   10447 		return true
   10448 	}
   10449 	// match: (MULQconst [3] x)
   10450 	// cond:
   10451 	// result: (LEAQ2 x x)
   10452 	for {
   10453 		if v.AuxInt != 3 {
   10454 			break
   10455 		}
   10456 		x := v.Args[0]
   10457 		v.reset(OpAMD64LEAQ2)
   10458 		v.AddArg(x)
   10459 		v.AddArg(x)
   10460 		return true
   10461 	}
   10462 	// match: (MULQconst [5] x)
   10463 	// cond:
   10464 	// result: (LEAQ4 x x)
   10465 	for {
   10466 		if v.AuxInt != 5 {
   10467 			break
   10468 		}
   10469 		x := v.Args[0]
   10470 		v.reset(OpAMD64LEAQ4)
   10471 		v.AddArg(x)
   10472 		v.AddArg(x)
   10473 		return true
   10474 	}
   10475 	// match: (MULQconst [7] x)
   10476 	// cond:
   10477 	// result: (LEAQ8 (NEGQ <v.Type> x) x)
   10478 	for {
   10479 		if v.AuxInt != 7 {
   10480 			break
   10481 		}
   10482 		x := v.Args[0]
   10483 		v.reset(OpAMD64LEAQ8)
   10484 		v0 := b.NewValue0(v.Line, OpAMD64NEGQ, v.Type)
   10485 		v0.AddArg(x)
   10486 		v.AddArg(v0)
   10487 		v.AddArg(x)
   10488 		return true
   10489 	}
   10490 	// match: (MULQconst [9] x)
   10491 	// cond:
   10492 	// result: (LEAQ8 x x)
   10493 	for {
   10494 		if v.AuxInt != 9 {
   10495 			break
   10496 		}
   10497 		x := v.Args[0]
   10498 		v.reset(OpAMD64LEAQ8)
   10499 		v.AddArg(x)
   10500 		v.AddArg(x)
   10501 		return true
   10502 	}
   10503 	// match: (MULQconst [11] x)
   10504 	// cond:
   10505 	// result: (LEAQ2 x (LEAQ4 <v.Type> x x))
   10506 	for {
   10507 		if v.AuxInt != 11 {
   10508 			break
   10509 		}
   10510 		x := v.Args[0]
   10511 		v.reset(OpAMD64LEAQ2)
   10512 		v.AddArg(x)
   10513 		v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type)
   10514 		v0.AddArg(x)
   10515 		v0.AddArg(x)
   10516 		v.AddArg(v0)
   10517 		return true
   10518 	}
   10519 	// match: (MULQconst [13] x)
   10520 	// cond:
   10521 	// result: (LEAQ4 x (LEAQ2 <v.Type> x x))
   10522 	for {
   10523 		if v.AuxInt != 13 {
   10524 			break
   10525 		}
   10526 		x := v.Args[0]
   10527 		v.reset(OpAMD64LEAQ4)
   10528 		v.AddArg(x)
   10529 		v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type)
   10530 		v0.AddArg(x)
   10531 		v0.AddArg(x)
   10532 		v.AddArg(v0)
   10533 		return true
   10534 	}
   10535 	// match: (MULQconst [21] x)
   10536 	// cond:
   10537 	// result: (LEAQ4 x (LEAQ4 <v.Type> x x))
   10538 	for {
   10539 		if v.AuxInt != 21 {
   10540 			break
   10541 		}
   10542 		x := v.Args[0]
   10543 		v.reset(OpAMD64LEAQ4)
   10544 		v.AddArg(x)
   10545 		v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type)
   10546 		v0.AddArg(x)
   10547 		v0.AddArg(x)
   10548 		v.AddArg(v0)
   10549 		return true
   10550 	}
   10551 	// match: (MULQconst [25] x)
   10552 	// cond:
   10553 	// result: (LEAQ8 x (LEAQ2 <v.Type> x x))
   10554 	for {
   10555 		if v.AuxInt != 25 {
   10556 			break
   10557 		}
   10558 		x := v.Args[0]
   10559 		v.reset(OpAMD64LEAQ8)
   10560 		v.AddArg(x)
   10561 		v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type)
   10562 		v0.AddArg(x)
   10563 		v0.AddArg(x)
   10564 		v.AddArg(v0)
   10565 		return true
   10566 	}
   10567 	// match: (MULQconst [37] x)
   10568 	// cond:
   10569 	// result: (LEAQ4 x (LEAQ8 <v.Type> x x))
   10570 	for {
   10571 		if v.AuxInt != 37 {
   10572 			break
   10573 		}
   10574 		x := v.Args[0]
   10575 		v.reset(OpAMD64LEAQ4)
   10576 		v.AddArg(x)
   10577 		v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type)
   10578 		v0.AddArg(x)
   10579 		v0.AddArg(x)
   10580 		v.AddArg(v0)
   10581 		return true
   10582 	}
   10583 	// match: (MULQconst [41] x)
   10584 	// cond:
   10585 	// result: (LEAQ8 x (LEAQ4 <v.Type> x x))
   10586 	for {
   10587 		if v.AuxInt != 41 {
   10588 			break
   10589 		}
   10590 		x := v.Args[0]
   10591 		v.reset(OpAMD64LEAQ8)
   10592 		v.AddArg(x)
   10593 		v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type)
   10594 		v0.AddArg(x)
   10595 		v0.AddArg(x)
   10596 		v.AddArg(v0)
   10597 		return true
   10598 	}
   10599 	// match: (MULQconst [73] x)
   10600 	// cond:
   10601 	// result: (LEAQ8 x (LEAQ8 <v.Type> x x))
   10602 	for {
   10603 		if v.AuxInt != 73 {
   10604 			break
   10605 		}
   10606 		x := v.Args[0]
   10607 		v.reset(OpAMD64LEAQ8)
   10608 		v.AddArg(x)
   10609 		v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type)
   10610 		v0.AddArg(x)
   10611 		v0.AddArg(x)
   10612 		v.AddArg(v0)
   10613 		return true
   10614 	}
   10615 	// match: (MULQconst [c] x)
   10616 	// cond: isPowerOfTwo(c)
   10617 	// result: (SHLQconst [log2(c)] x)
   10618 	for {
   10619 		c := v.AuxInt
   10620 		x := v.Args[0]
   10621 		if !(isPowerOfTwo(c)) {
   10622 			break
   10623 		}
   10624 		v.reset(OpAMD64SHLQconst)
   10625 		v.AuxInt = log2(c)
   10626 		v.AddArg(x)
   10627 		return true
   10628 	}
   10629 	// match: (MULQconst [c] x)
   10630 	// cond: isPowerOfTwo(c+1) && c >= 15
   10631 	// result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x)
   10632 	for {
   10633 		c := v.AuxInt
   10634 		x := v.Args[0]
   10635 		if !(isPowerOfTwo(c+1) && c >= 15) {
   10636 			break
   10637 		}
   10638 		v.reset(OpAMD64SUBQ)
   10639 		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
   10640 		v0.AuxInt = log2(c + 1)
   10641 		v0.AddArg(x)
   10642 		v.AddArg(v0)
   10643 		v.AddArg(x)
   10644 		return true
   10645 	}
   10646 	// match: (MULQconst [c] x)
   10647 	// cond: isPowerOfTwo(c-1) && c >= 17
   10648 	// result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x)
   10649 	for {
   10650 		c := v.AuxInt
   10651 		x := v.Args[0]
   10652 		if !(isPowerOfTwo(c-1) && c >= 17) {
   10653 			break
   10654 		}
   10655 		v.reset(OpAMD64LEAQ1)
   10656 		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
   10657 		v0.AuxInt = log2(c - 1)
   10658 		v0.AddArg(x)
   10659 		v.AddArg(v0)
   10660 		v.AddArg(x)
   10661 		return true
   10662 	}
   10663 	// match: (MULQconst [c] x)
   10664 	// cond: isPowerOfTwo(c-2) && c >= 34
   10665 	// result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x)
   10666 	for {
   10667 		c := v.AuxInt
   10668 		x := v.Args[0]
   10669 		if !(isPowerOfTwo(c-2) && c >= 34) {
   10670 			break
   10671 		}
   10672 		v.reset(OpAMD64LEAQ2)
   10673 		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
   10674 		v0.AuxInt = log2(c - 2)
   10675 		v0.AddArg(x)
   10676 		v.AddArg(v0)
   10677 		v.AddArg(x)
   10678 		return true
   10679 	}
   10680 	// match: (MULQconst [c] x)
   10681 	// cond: isPowerOfTwo(c-4) && c >= 68
   10682 	// result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x)
   10683 	for {
   10684 		c := v.AuxInt
   10685 		x := v.Args[0]
   10686 		if !(isPowerOfTwo(c-4) && c >= 68) {
   10687 			break
   10688 		}
   10689 		v.reset(OpAMD64LEAQ4)
   10690 		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
   10691 		v0.AuxInt = log2(c - 4)
   10692 		v0.AddArg(x)
   10693 		v.AddArg(v0)
   10694 		v.AddArg(x)
   10695 		return true
   10696 	}
   10697 	// match: (MULQconst [c] x)
   10698 	// cond: isPowerOfTwo(c-8) && c >= 136
   10699 	// result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x)
   10700 	for {
   10701 		c := v.AuxInt
   10702 		x := v.Args[0]
   10703 		if !(isPowerOfTwo(c-8) && c >= 136) {
   10704 			break
   10705 		}
   10706 		v.reset(OpAMD64LEAQ8)
   10707 		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
   10708 		v0.AuxInt = log2(c - 8)
   10709 		v0.AddArg(x)
   10710 		v.AddArg(v0)
   10711 		v.AddArg(x)
   10712 		return true
   10713 	}
   10714 	// match: (MULQconst [c] x)
   10715 	// cond: c%3 == 0 && isPowerOfTwo(c/3)
   10716 	// result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x))
   10717 	for {
   10718 		c := v.AuxInt
   10719 		x := v.Args[0]
   10720 		if !(c%3 == 0 && isPowerOfTwo(c/3)) {
   10721 			break
   10722 		}
   10723 		v.reset(OpAMD64SHLQconst)
   10724 		v.AuxInt = log2(c / 3)
   10725 		v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type)
   10726 		v0.AddArg(x)
   10727 		v0.AddArg(x)
   10728 		v.AddArg(v0)
   10729 		return true
   10730 	}
   10731 	// match: (MULQconst [c] x)
   10732 	// cond: c%5 == 0 && isPowerOfTwo(c/5)
   10733 	// result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x))
   10734 	for {
   10735 		c := v.AuxInt
   10736 		x := v.Args[0]
   10737 		if !(c%5 == 0 && isPowerOfTwo(c/5)) {
   10738 			break
   10739 		}
   10740 		v.reset(OpAMD64SHLQconst)
   10741 		v.AuxInt = log2(c / 5)
   10742 		v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type)
   10743 		v0.AddArg(x)
   10744 		v0.AddArg(x)
   10745 		v.AddArg(v0)
   10746 		return true
   10747 	}
   10748 	// match: (MULQconst [c] x)
   10749 	// cond: c%9 == 0 && isPowerOfTwo(c/9)
   10750 	// result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x))
   10751 	for {
   10752 		c := v.AuxInt
   10753 		x := v.Args[0]
   10754 		if !(c%9 == 0 && isPowerOfTwo(c/9)) {
   10755 			break
   10756 		}
   10757 		v.reset(OpAMD64SHLQconst)
   10758 		v.AuxInt = log2(c / 9)
   10759 		v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type)
   10760 		v0.AddArg(x)
   10761 		v0.AddArg(x)
   10762 		v.AddArg(v0)
   10763 		return true
   10764 	}
   10765 	// match: (MULQconst [c] (MOVQconst [d]))
   10766 	// cond:
   10767 	// result: (MOVQconst [c*d])
   10768 	for {
   10769 		c := v.AuxInt
   10770 		v_0 := v.Args[0]
   10771 		if v_0.Op != OpAMD64MOVQconst {
   10772 			break
   10773 		}
   10774 		d := v_0.AuxInt
   10775 		v.reset(OpAMD64MOVQconst)
   10776 		v.AuxInt = c * d
   10777 		return true
   10778 	}
   10779 	return false
   10780 }
   10781 func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool {
   10782 	b := v.Block
   10783 	_ = b
   10784 	// match: (NEGL (MOVLconst [c]))
   10785 	// cond:
   10786 	// result: (MOVLconst [int64(int32(-c))])
   10787 	for {
   10788 		v_0 := v.Args[0]
   10789 		if v_0.Op != OpAMD64MOVLconst {
   10790 			break
   10791 		}
   10792 		c := v_0.AuxInt
   10793 		v.reset(OpAMD64MOVLconst)
   10794 		v.AuxInt = int64(int32(-c))
   10795 		return true
   10796 	}
   10797 	return false
   10798 }
   10799 func rewriteValueAMD64_OpAMD64NEGQ(v *Value, config *Config) bool {
   10800 	b := v.Block
   10801 	_ = b
   10802 	// match: (NEGQ (MOVQconst [c]))
   10803 	// cond:
   10804 	// result: (MOVQconst [-c])
   10805 	for {
   10806 		v_0 := v.Args[0]
   10807 		if v_0.Op != OpAMD64MOVQconst {
   10808 			break
   10809 		}
   10810 		c := v_0.AuxInt
   10811 		v.reset(OpAMD64MOVQconst)
   10812 		v.AuxInt = -c
   10813 		return true
   10814 	}
   10815 	return false
   10816 }
   10817 func rewriteValueAMD64_OpAMD64NOTL(v *Value, config *Config) bool {
   10818 	b := v.Block
   10819 	_ = b
   10820 	// match: (NOTL (MOVLconst [c]))
   10821 	// cond:
   10822 	// result: (MOVLconst [^c])
   10823 	for {
   10824 		v_0 := v.Args[0]
   10825 		if v_0.Op != OpAMD64MOVLconst {
   10826 			break
   10827 		}
   10828 		c := v_0.AuxInt
   10829 		v.reset(OpAMD64MOVLconst)
   10830 		v.AuxInt = ^c
   10831 		return true
   10832 	}
   10833 	return false
   10834 }
   10835 func rewriteValueAMD64_OpAMD64NOTQ(v *Value, config *Config) bool {
   10836 	b := v.Block
   10837 	_ = b
   10838 	// match: (NOTQ (MOVQconst [c]))
   10839 	// cond:
   10840 	// result: (MOVQconst [^c])
   10841 	for {
   10842 		v_0 := v.Args[0]
   10843 		if v_0.Op != OpAMD64MOVQconst {
   10844 			break
   10845 		}
   10846 		c := v_0.AuxInt
   10847 		v.reset(OpAMD64MOVQconst)
   10848 		v.AuxInt = ^c
   10849 		return true
   10850 	}
   10851 	return false
   10852 }
   10853 func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool {
   10854 	b := v.Block
   10855 	_ = b
   10856 	// match: (ORL x (MOVLconst [c]))
   10857 	// cond:
   10858 	// result: (ORLconst [c] x)
   10859 	for {
   10860 		x := v.Args[0]
   10861 		v_1 := v.Args[1]
   10862 		if v_1.Op != OpAMD64MOVLconst {
   10863 			break
   10864 		}
   10865 		c := v_1.AuxInt
   10866 		v.reset(OpAMD64ORLconst)
   10867 		v.AuxInt = c
   10868 		v.AddArg(x)
   10869 		return true
   10870 	}
   10871 	// match: (ORL (MOVLconst [c]) x)
   10872 	// cond:
   10873 	// result: (ORLconst [c] x)
   10874 	for {
   10875 		v_0 := v.Args[0]
   10876 		if v_0.Op != OpAMD64MOVLconst {
   10877 			break
   10878 		}
   10879 		c := v_0.AuxInt
   10880 		x := v.Args[1]
   10881 		v.reset(OpAMD64ORLconst)
   10882 		v.AuxInt = c
   10883 		v.AddArg(x)
   10884 		return true
   10885 	}
   10886 	// match: (ORL x x)
   10887 	// cond:
   10888 	// result: x
   10889 	for {
   10890 		x := v.Args[0]
   10891 		if x != v.Args[1] {
   10892 			break
   10893 		}
   10894 		v.reset(OpCopy)
   10895 		v.Type = x.Type
   10896 		v.AddArg(x)
   10897 		return true
   10898 	}
   10899 	// match: (ORL                  x0:(MOVBload [i]   {s} p mem)     s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem)))
   10900 	// cond: x0.Uses == 1   && x1.Uses == 1   && s0.Uses == 1   && mergePoint(b,x0,x1) != nil   && clobber(x0)   && clobber(x1)   && clobber(s0)
   10901 	// result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem)
   10902 	for {
   10903 		x0 := v.Args[0]
   10904 		if x0.Op != OpAMD64MOVBload {
   10905 			break
   10906 		}
   10907 		i := x0.AuxInt
   10908 		s := x0.Aux
   10909 		p := x0.Args[0]
   10910 		mem := x0.Args[1]
   10911 		s0 := v.Args[1]
   10912 		if s0.Op != OpAMD64SHLLconst {
   10913 			break
   10914 		}
   10915 		if s0.AuxInt != 8 {
   10916 			break
   10917 		}
   10918 		x1 := s0.Args[0]
   10919 		if x1.Op != OpAMD64MOVBload {
   10920 			break
   10921 		}
   10922 		if x1.AuxInt != i+1 {
   10923 			break
   10924 		}
   10925 		if x1.Aux != s {
   10926 			break
   10927 		}
   10928 		if p != x1.Args[0] {
   10929 			break
   10930 		}
   10931 		if mem != x1.Args[1] {
   10932 			break
   10933 		}
   10934 		if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
   10935 			break
   10936 		}
   10937 		b = mergePoint(b, x0, x1)
   10938 		v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
   10939 		v.reset(OpCopy)
   10940 		v.AddArg(v0)
   10941 		v0.AuxInt = i
   10942 		v0.Aux = s
   10943 		v0.AddArg(p)
   10944 		v0.AddArg(mem)
   10945 		return true
   10946 	}
   10947 	// match: (ORL o0:(ORL                        x0:(MOVWload [i]   {s} p mem)     s0:(SHLLconst [16] x1:(MOVBload [i+2] {s} p mem)))     s1:(SHLLconst [24] x2:(MOVBload [i+3] {s} p mem)))
   10948 	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && o0.Uses == 1   && mergePoint(b,x0,x1,x2) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(s0)   && clobber(s1)   && clobber(o0)
   10949 	// result: @mergePoint(b,x0,x1,x2) (MOVLload [i] {s} p mem)
   10950 	for {
   10951 		o0 := v.Args[0]
   10952 		if o0.Op != OpAMD64ORL {
   10953 			break
   10954 		}
   10955 		x0 := o0.Args[0]
   10956 		if x0.Op != OpAMD64MOVWload {
   10957 			break
   10958 		}
   10959 		i := x0.AuxInt
   10960 		s := x0.Aux
   10961 		p := x0.Args[0]
   10962 		mem := x0.Args[1]
   10963 		s0 := o0.Args[1]
   10964 		if s0.Op != OpAMD64SHLLconst {
   10965 			break
   10966 		}
   10967 		if s0.AuxInt != 16 {
   10968 			break
   10969 		}
   10970 		x1 := s0.Args[0]
   10971 		if x1.Op != OpAMD64MOVBload {
   10972 			break
   10973 		}
   10974 		if x1.AuxInt != i+2 {
   10975 			break
   10976 		}
   10977 		if x1.Aux != s {
   10978 			break
   10979 		}
   10980 		if p != x1.Args[0] {
   10981 			break
   10982 		}
   10983 		if mem != x1.Args[1] {
   10984 			break
   10985 		}
   10986 		s1 := v.Args[1]
   10987 		if s1.Op != OpAMD64SHLLconst {
   10988 			break
   10989 		}
   10990 		if s1.AuxInt != 24 {
   10991 			break
   10992 		}
   10993 		x2 := s1.Args[0]
   10994 		if x2.Op != OpAMD64MOVBload {
   10995 			break
   10996 		}
   10997 		if x2.AuxInt != i+3 {
   10998 			break
   10999 		}
   11000 		if x2.Aux != s {
   11001 			break
   11002 		}
   11003 		if p != x2.Args[0] {
   11004 			break
   11005 		}
   11006 		if mem != x2.Args[1] {
   11007 			break
   11008 		}
   11009 		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) {
   11010 			break
   11011 		}
   11012 		b = mergePoint(b, x0, x1, x2)
   11013 		v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
   11014 		v.reset(OpCopy)
   11015 		v.AddArg(v0)
   11016 		v0.AuxInt = i
   11017 		v0.Aux = s
   11018 		v0.AddArg(p)
   11019 		v0.AddArg(mem)
   11020 		return true
   11021 	}
   11022 	// match: (ORL                  x0:(MOVBloadidx1 [i]   {s} p idx mem)     s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem)))
   11023 	// cond: x0.Uses == 1   && x1.Uses == 1   && s0.Uses == 1   && mergePoint(b,x0,x1) != nil   && clobber(x0)   && clobber(x1)   && clobber(s0)
   11024 	// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem)
   11025 	for {
   11026 		x0 := v.Args[0]
   11027 		if x0.Op != OpAMD64MOVBloadidx1 {
   11028 			break
   11029 		}
   11030 		i := x0.AuxInt
   11031 		s := x0.Aux
   11032 		p := x0.Args[0]
   11033 		idx := x0.Args[1]
   11034 		mem := x0.Args[2]
   11035 		s0 := v.Args[1]
   11036 		if s0.Op != OpAMD64SHLLconst {
   11037 			break
   11038 		}
   11039 		if s0.AuxInt != 8 {
   11040 			break
   11041 		}
   11042 		x1 := s0.Args[0]
   11043 		if x1.Op != OpAMD64MOVBloadidx1 {
   11044 			break
   11045 		}
   11046 		if x1.AuxInt != i+1 {
   11047 			break
   11048 		}
   11049 		if x1.Aux != s {
   11050 			break
   11051 		}
   11052 		if p != x1.Args[0] {
   11053 			break
   11054 		}
   11055 		if idx != x1.Args[1] {
   11056 			break
   11057 		}
   11058 		if mem != x1.Args[2] {
   11059 			break
   11060 		}
   11061 		if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
   11062 			break
   11063 		}
   11064 		b = mergePoint(b, x0, x1)
   11065 		v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type)
   11066 		v.reset(OpCopy)
   11067 		v.AddArg(v0)
   11068 		v0.AuxInt = i
   11069 		v0.Aux = s
   11070 		v0.AddArg(p)
   11071 		v0.AddArg(idx)
   11072 		v0.AddArg(mem)
   11073 		return true
   11074 	}
   11075 	// match: (ORL o0:(ORL                        x0:(MOVWloadidx1 [i]   {s} p idx mem)     s0:(SHLLconst [16] x1:(MOVBloadidx1 [i+2] {s} p idx mem)))     s1:(SHLLconst [24] x2:(MOVBloadidx1 [i+3] {s} p idx mem)))
   11076 	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && o0.Uses == 1   && mergePoint(b,x0,x1,x2) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(s0)   && clobber(s1)   && clobber(o0)
   11077 	// result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 <v.Type> [i] {s} p idx mem)
   11078 	for {
   11079 		o0 := v.Args[0]
   11080 		if o0.Op != OpAMD64ORL {
   11081 			break
   11082 		}
   11083 		x0 := o0.Args[0]
   11084 		if x0.Op != OpAMD64MOVWloadidx1 {
   11085 			break
   11086 		}
   11087 		i := x0.AuxInt
   11088 		s := x0.Aux
   11089 		p := x0.Args[0]
   11090 		idx := x0.Args[1]
   11091 		mem := x0.Args[2]
   11092 		s0 := o0.Args[1]
   11093 		if s0.Op != OpAMD64SHLLconst {
   11094 			break
   11095 		}
   11096 		if s0.AuxInt != 16 {
   11097 			break
   11098 		}
   11099 		x1 := s0.Args[0]
   11100 		if x1.Op != OpAMD64MOVBloadidx1 {
   11101 			break
   11102 		}
   11103 		if x1.AuxInt != i+2 {
   11104 			break
   11105 		}
   11106 		if x1.Aux != s {
   11107 			break
   11108 		}
   11109 		if p != x1.Args[0] {
   11110 			break
   11111 		}
   11112 		if idx != x1.Args[1] {
   11113 			break
   11114 		}
   11115 		if mem != x1.Args[2] {
   11116 			break
   11117 		}
   11118 		s1 := v.Args[1]
   11119 		if s1.Op != OpAMD64SHLLconst {
   11120 			break
   11121 		}
   11122 		if s1.AuxInt != 24 {
   11123 			break
   11124 		}
   11125 		x2 := s1.Args[0]
   11126 		if x2.Op != OpAMD64MOVBloadidx1 {
   11127 			break
   11128 		}
   11129 		if x2.AuxInt != i+3 {
   11130 			break
   11131 		}
   11132 		if x2.Aux != s {
   11133 			break
   11134 		}
   11135 		if p != x2.Args[0] {
   11136 			break
   11137 		}
   11138 		if idx != x2.Args[1] {
   11139 			break
   11140 		}
   11141 		if mem != x2.Args[2] {
   11142 			break
   11143 		}
   11144 		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) {
   11145 			break
   11146 		}
   11147 		b = mergePoint(b, x0, x1, x2)
   11148 		v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx1, v.Type)
   11149 		v.reset(OpCopy)
   11150 		v.AddArg(v0)
   11151 		v0.AuxInt = i
   11152 		v0.Aux = s
   11153 		v0.AddArg(p)
   11154 		v0.AddArg(idx)
   11155 		v0.AddArg(mem)
   11156 		return true
   11157 	}
   11158 	// match: (ORL o1:(ORL o0:(ORL                        x0:(MOVBload [i] {s} p mem)     s0:(SHLLconst [8]  x1:(MOVBload [i-1] {s} p mem)))     s1:(SHLLconst [16] x2:(MOVBload [i-2] {s} p mem)))     s2:(SHLLconst [24] x3:(MOVBload [i-3] {s} p mem)))
   11159 	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && mergePoint(b,x0,x1,x2,x3) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(o0)   && clobber(o1)
   11160 	// result: @mergePoint(b,x0,x1,x2,x3) (BSWAPL <v.Type> (MOVLload [i-3] {s} p mem))
   11161 	for {
   11162 		o1 := v.Args[0]
   11163 		if o1.Op != OpAMD64ORL {
   11164 			break
   11165 		}
   11166 		o0 := o1.Args[0]
   11167 		if o0.Op != OpAMD64ORL {
   11168 			break
   11169 		}
   11170 		x0 := o0.Args[0]
   11171 		if x0.Op != OpAMD64MOVBload {
   11172 			break
   11173 		}
   11174 		i := x0.AuxInt
   11175 		s := x0.Aux
   11176 		p := x0.Args[0]
   11177 		mem := x0.Args[1]
   11178 		s0 := o0.Args[1]
   11179 		if s0.Op != OpAMD64SHLLconst {
   11180 			break
   11181 		}
   11182 		if s0.AuxInt != 8 {
   11183 			break
   11184 		}
   11185 		x1 := s0.Args[0]
   11186 		if x1.Op != OpAMD64MOVBload {
   11187 			break
   11188 		}
   11189 		if x1.AuxInt != i-1 {
   11190 			break
   11191 		}
   11192 		if x1.Aux != s {
   11193 			break
   11194 		}
   11195 		if p != x1.Args[0] {
   11196 			break
   11197 		}
   11198 		if mem != x1.Args[1] {
   11199 			break
   11200 		}
   11201 		s1 := o1.Args[1]
   11202 		if s1.Op != OpAMD64SHLLconst {
   11203 			break
   11204 		}
   11205 		if s1.AuxInt != 16 {
   11206 			break
   11207 		}
   11208 		x2 := s1.Args[0]
   11209 		if x2.Op != OpAMD64MOVBload {
   11210 			break
   11211 		}
   11212 		if x2.AuxInt != i-2 {
   11213 			break
   11214 		}
   11215 		if x2.Aux != s {
   11216 			break
   11217 		}
   11218 		if p != x2.Args[0] {
   11219 			break
   11220 		}
   11221 		if mem != x2.Args[1] {
   11222 			break
   11223 		}
   11224 		s2 := v.Args[1]
   11225 		if s2.Op != OpAMD64SHLLconst {
   11226 			break
   11227 		}
   11228 		if s2.AuxInt != 24 {
   11229 			break
   11230 		}
   11231 		x3 := s2.Args[0]
   11232 		if x3.Op != OpAMD64MOVBload {
   11233 			break
   11234 		}
   11235 		if x3.AuxInt != i-3 {
   11236 			break
   11237 		}
   11238 		if x3.Aux != s {
   11239 			break
   11240 		}
   11241 		if p != x3.Args[0] {
   11242 			break
   11243 		}
   11244 		if mem != x3.Args[1] {
   11245 			break
   11246 		}
   11247 		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) {
   11248 			break
   11249 		}
   11250 		b = mergePoint(b, x0, x1, x2, x3)
   11251 		v0 := b.NewValue0(v.Line, OpAMD64BSWAPL, v.Type)
   11252 		v.reset(OpCopy)
   11253 		v.AddArg(v0)
   11254 		v1 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
   11255 		v1.AuxInt = i - 3
   11256 		v1.Aux = s
   11257 		v1.AddArg(p)
   11258 		v1.AddArg(mem)
   11259 		v0.AddArg(v1)
   11260 		return true
   11261 	}
   11262 	// match: (ORL o1:(ORL o0:(ORL                        x0:(MOVBloadidx1 [i] {s} p idx mem)     s0:(SHLLconst [8]  x1:(MOVBloadidx1 [i-1] {s} p idx mem)))     s1:(SHLLconst [16] x2:(MOVBloadidx1 [i-2] {s} p idx mem)))     s2:(SHLLconst [24] x3:(MOVBloadidx1 [i-3] {s} p idx mem)))
   11263 	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && mergePoint(b,x0,x1,x2,x3) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(o0)   && clobber(o1)
   11264 	// result: @mergePoint(b,x0,x1,x2,x3) (BSWAPL <v.Type> (MOVLloadidx1 <v.Type> [i-3] {s} p idx mem))
   11265 	for {
   11266 		o1 := v.Args[0]
   11267 		if o1.Op != OpAMD64ORL {
   11268 			break
   11269 		}
   11270 		o0 := o1.Args[0]
   11271 		if o0.Op != OpAMD64ORL {
   11272 			break
   11273 		}
   11274 		x0 := o0.Args[0]
   11275 		if x0.Op != OpAMD64MOVBloadidx1 {
   11276 			break
   11277 		}
   11278 		i := x0.AuxInt
   11279 		s := x0.Aux
   11280 		p := x0.Args[0]
   11281 		idx := x0.Args[1]
   11282 		mem := x0.Args[2]
   11283 		s0 := o0.Args[1]
   11284 		if s0.Op != OpAMD64SHLLconst {
   11285 			break
   11286 		}
   11287 		if s0.AuxInt != 8 {
   11288 			break
   11289 		}
   11290 		x1 := s0.Args[0]
   11291 		if x1.Op != OpAMD64MOVBloadidx1 {
   11292 			break
   11293 		}
   11294 		if x1.AuxInt != i-1 {
   11295 			break
   11296 		}
   11297 		if x1.Aux != s {
   11298 			break
   11299 		}
   11300 		if p != x1.Args[0] {
   11301 			break
   11302 		}
   11303 		if idx != x1.Args[1] {
   11304 			break
   11305 		}
   11306 		if mem != x1.Args[2] {
   11307 			break
   11308 		}
   11309 		s1 := o1.Args[1]
   11310 		if s1.Op != OpAMD64SHLLconst {
   11311 			break
   11312 		}
   11313 		if s1.AuxInt != 16 {
   11314 			break
   11315 		}
   11316 		x2 := s1.Args[0]
   11317 		if x2.Op != OpAMD64MOVBloadidx1 {
   11318 			break
   11319 		}
   11320 		if x2.AuxInt != i-2 {
   11321 			break
   11322 		}
   11323 		if x2.Aux != s {
   11324 			break
   11325 		}
   11326 		if p != x2.Args[0] {
   11327 			break
   11328 		}
   11329 		if idx != x2.Args[1] {
   11330 			break
   11331 		}
   11332 		if mem != x2.Args[2] {
   11333 			break
   11334 		}
   11335 		s2 := v.Args[1]
   11336 		if s2.Op != OpAMD64SHLLconst {
   11337 			break
   11338 		}
   11339 		if s2.AuxInt != 24 {
   11340 			break
   11341 		}
   11342 		x3 := s2.Args[0]
   11343 		if x3.Op != OpAMD64MOVBloadidx1 {
   11344 			break
   11345 		}
   11346 		if x3.AuxInt != i-3 {
   11347 			break
   11348 		}
   11349 		if x3.Aux != s {
   11350 			break
   11351 		}
   11352 		if p != x3.Args[0] {
   11353 			break
   11354 		}
   11355 		if idx != x3.Args[1] {
   11356 			break
   11357 		}
   11358 		if mem != x3.Args[2] {
   11359 			break
   11360 		}
   11361 		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) {
   11362 			break
   11363 		}
   11364 		b = mergePoint(b, x0, x1, x2, x3)
   11365 		v0 := b.NewValue0(v.Line, OpAMD64BSWAPL, v.Type)
   11366 		v.reset(OpCopy)
   11367 		v.AddArg(v0)
   11368 		v1 := b.NewValue0(v.Line, OpAMD64MOVLloadidx1, v.Type)
   11369 		v1.AuxInt = i - 3
   11370 		v1.Aux = s
   11371 		v1.AddArg(p)
   11372 		v1.AddArg(idx)
   11373 		v1.AddArg(mem)
   11374 		v0.AddArg(v1)
   11375 		return true
   11376 	}
   11377 	return false
   11378 }
   11379 func rewriteValueAMD64_OpAMD64ORLconst(v *Value, config *Config) bool {
   11380 	b := v.Block
   11381 	_ = b
   11382 	// match: (ORLconst [c] x)
   11383 	// cond: int32(c)==0
   11384 	// result: x
   11385 	for {
   11386 		c := v.AuxInt
   11387 		x := v.Args[0]
   11388 		if !(int32(c) == 0) {
   11389 			break
   11390 		}
   11391 		v.reset(OpCopy)
   11392 		v.Type = x.Type
   11393 		v.AddArg(x)
   11394 		return true
   11395 	}
   11396 	// match: (ORLconst [c] _)
   11397 	// cond: int32(c)==-1
   11398 	// result: (MOVLconst [-1])
   11399 	for {
   11400 		c := v.AuxInt
   11401 		if !(int32(c) == -1) {
   11402 			break
   11403 		}
   11404 		v.reset(OpAMD64MOVLconst)
   11405 		v.AuxInt = -1
   11406 		return true
   11407 	}
   11408 	// match: (ORLconst [c] (MOVLconst [d]))
   11409 	// cond:
   11410 	// result: (MOVLconst [c|d])
   11411 	for {
   11412 		c := v.AuxInt
   11413 		v_0 := v.Args[0]
   11414 		if v_0.Op != OpAMD64MOVLconst {
   11415 			break
   11416 		}
   11417 		d := v_0.AuxInt
   11418 		v.reset(OpAMD64MOVLconst)
   11419 		v.AuxInt = c | d
   11420 		return true
   11421 	}
   11422 	return false
   11423 }
   11424 func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool {
   11425 	b := v.Block
   11426 	_ = b
   11427 	// match: (ORQ x (MOVQconst [c]))
   11428 	// cond: is32Bit(c)
   11429 	// result: (ORQconst [c] x)
   11430 	for {
   11431 		x := v.Args[0]
   11432 		v_1 := v.Args[1]
   11433 		if v_1.Op != OpAMD64MOVQconst {
   11434 			break
   11435 		}
   11436 		c := v_1.AuxInt
   11437 		if !(is32Bit(c)) {
   11438 			break
   11439 		}
   11440 		v.reset(OpAMD64ORQconst)
   11441 		v.AuxInt = c
   11442 		v.AddArg(x)
   11443 		return true
   11444 	}
   11445 	// match: (ORQ (MOVQconst [c]) x)
   11446 	// cond: is32Bit(c)
   11447 	// result: (ORQconst [c] x)
   11448 	for {
   11449 		v_0 := v.Args[0]
   11450 		if v_0.Op != OpAMD64MOVQconst {
   11451 			break
   11452 		}
   11453 		c := v_0.AuxInt
   11454 		x := v.Args[1]
   11455 		if !(is32Bit(c)) {
   11456 			break
   11457 		}
   11458 		v.reset(OpAMD64ORQconst)
   11459 		v.AuxInt = c
   11460 		v.AddArg(x)
   11461 		return true
   11462 	}
   11463 	// match: (ORQ x x)
   11464 	// cond:
   11465 	// result: x
   11466 	for {
   11467 		x := v.Args[0]
   11468 		if x != v.Args[1] {
   11469 			break
   11470 		}
   11471 		v.reset(OpCopy)
   11472 		v.Type = x.Type
   11473 		v.AddArg(x)
   11474 		return true
   11475 	}
   11476 	// match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ                        x0:(MOVBload [i]   {s} p mem)     s0:(SHLQconst [8]  x1:(MOVBload [i+1] {s} p mem)))     s1:(SHLQconst [16] x2:(MOVBload [i+2] {s} p mem)))     s2:(SHLQconst [24] x3:(MOVBload [i+3] {s} p mem)))     s3:(SHLQconst [32] x4:(MOVBload [i+4] {s} p mem)))     s4:(SHLQconst [40] x5:(MOVBload [i+5] {s} p mem)))     s5:(SHLQconst [48] x6:(MOVBload [i+6] {s} p mem)))     s6:(SHLQconst [56] x7:(MOVBload [i+7] {s} p mem)))
   11477 	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && x4.Uses == 1   && x5.Uses == 1   && x6.Uses == 1   && x7.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && s3.Uses == 1   && s4.Uses == 1   && s5.Uses == 1   && s6.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && o2.Uses == 1   && o3.Uses == 1   && o4.Uses == 1   && o5.Uses == 1   && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(x4)   && clobber(x5)   && clobber(x6)   && clobber(x7)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(s3)   && clobber(s4)   && clobber(s5)   && clobber(s6)   && clobber(o0)   && clobber(o1)   && clobber(o2)   && clobber(o3)   && clobber(o4)   && clobber(o5)
   11478 	// result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem)
   11479 	for {
   11480 		o0 := v.Args[0]
   11481 		if o0.Op != OpAMD64ORQ {
   11482 			break
   11483 		}
   11484 		o1 := o0.Args[0]
   11485 		if o1.Op != OpAMD64ORQ {
   11486 			break
   11487 		}
   11488 		o2 := o1.Args[0]
   11489 		if o2.Op != OpAMD64ORQ {
   11490 			break
   11491 		}
   11492 		o3 := o2.Args[0]
   11493 		if o3.Op != OpAMD64ORQ {
   11494 			break
   11495 		}
   11496 		o4 := o3.Args[0]
   11497 		if o4.Op != OpAMD64ORQ {
   11498 			break
   11499 		}
   11500 		o5 := o4.Args[0]
   11501 		if o5.Op != OpAMD64ORQ {
   11502 			break
   11503 		}
   11504 		x0 := o5.Args[0]
   11505 		if x0.Op != OpAMD64MOVBload {
   11506 			break
   11507 		}
   11508 		i := x0.AuxInt
   11509 		s := x0.Aux
   11510 		p := x0.Args[0]
   11511 		mem := x0.Args[1]
   11512 		s0 := o5.Args[1]
   11513 		if s0.Op != OpAMD64SHLQconst {
   11514 			break
   11515 		}
   11516 		if s0.AuxInt != 8 {
   11517 			break
   11518 		}
   11519 		x1 := s0.Args[0]
   11520 		if x1.Op != OpAMD64MOVBload {
   11521 			break
   11522 		}
   11523 		if x1.AuxInt != i+1 {
   11524 			break
   11525 		}
   11526 		if x1.Aux != s {
   11527 			break
   11528 		}
   11529 		if p != x1.Args[0] {
   11530 			break
   11531 		}
   11532 		if mem != x1.Args[1] {
   11533 			break
   11534 		}
   11535 		s1 := o4.Args[1]
   11536 		if s1.Op != OpAMD64SHLQconst {
   11537 			break
   11538 		}
   11539 		if s1.AuxInt != 16 {
   11540 			break
   11541 		}
   11542 		x2 := s1.Args[0]
   11543 		if x2.Op != OpAMD64MOVBload {
   11544 			break
   11545 		}
   11546 		if x2.AuxInt != i+2 {
   11547 			break
   11548 		}
   11549 		if x2.Aux != s {
   11550 			break
   11551 		}
   11552 		if p != x2.Args[0] {
   11553 			break
   11554 		}
   11555 		if mem != x2.Args[1] {
   11556 			break
   11557 		}
   11558 		s2 := o3.Args[1]
   11559 		if s2.Op != OpAMD64SHLQconst {
   11560 			break
   11561 		}
   11562 		if s2.AuxInt != 24 {
   11563 			break
   11564 		}
   11565 		x3 := s2.Args[0]
   11566 		if x3.Op != OpAMD64MOVBload {
   11567 			break
   11568 		}
   11569 		if x3.AuxInt != i+3 {
   11570 			break
   11571 		}
   11572 		if x3.Aux != s {
   11573 			break
   11574 		}
   11575 		if p != x3.Args[0] {
   11576 			break
   11577 		}
   11578 		if mem != x3.Args[1] {
   11579 			break
   11580 		}
   11581 		s3 := o2.Args[1]
   11582 		if s3.Op != OpAMD64SHLQconst {
   11583 			break
   11584 		}
   11585 		if s3.AuxInt != 32 {
   11586 			break
   11587 		}
   11588 		x4 := s3.Args[0]
   11589 		if x4.Op != OpAMD64MOVBload {
   11590 			break
   11591 		}
   11592 		if x4.AuxInt != i+4 {
   11593 			break
   11594 		}
   11595 		if x4.Aux != s {
   11596 			break
   11597 		}
   11598 		if p != x4.Args[0] {
   11599 			break
   11600 		}
   11601 		if mem != x4.Args[1] {
   11602 			break
   11603 		}
   11604 		s4 := o1.Args[1]
   11605 		if s4.Op != OpAMD64SHLQconst {
   11606 			break
   11607 		}
   11608 		if s4.AuxInt != 40 {
   11609 			break
   11610 		}
   11611 		x5 := s4.Args[0]
   11612 		if x5.Op != OpAMD64MOVBload {
   11613 			break
   11614 		}
   11615 		if x5.AuxInt != i+5 {
   11616 			break
   11617 		}
   11618 		if x5.Aux != s {
   11619 			break
   11620 		}
   11621 		if p != x5.Args[0] {
   11622 			break
   11623 		}
   11624 		if mem != x5.Args[1] {
   11625 			break
   11626 		}
   11627 		s5 := o0.Args[1]
   11628 		if s5.Op != OpAMD64SHLQconst {
   11629 			break
   11630 		}
   11631 		if s5.AuxInt != 48 {
   11632 			break
   11633 		}
   11634 		x6 := s5.Args[0]
   11635 		if x6.Op != OpAMD64MOVBload {
   11636 			break
   11637 		}
   11638 		if x6.AuxInt != i+6 {
   11639 			break
   11640 		}
   11641 		if x6.Aux != s {
   11642 			break
   11643 		}
   11644 		if p != x6.Args[0] {
   11645 			break
   11646 		}
   11647 		if mem != x6.Args[1] {
   11648 			break
   11649 		}
   11650 		s6 := v.Args[1]
   11651 		if s6.Op != OpAMD64SHLQconst {
   11652 			break
   11653 		}
   11654 		if s6.AuxInt != 56 {
   11655 			break
   11656 		}
   11657 		x7 := s6.Args[0]
   11658 		if x7.Op != OpAMD64MOVBload {
   11659 			break
   11660 		}
   11661 		if x7.AuxInt != i+7 {
   11662 			break
   11663 		}
   11664 		if x7.Aux != s {
   11665 			break
   11666 		}
   11667 		if p != x7.Args[0] {
   11668 			break
   11669 		}
   11670 		if mem != x7.Args[1] {
   11671 			break
   11672 		}
   11673 		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) {
   11674 			break
   11675 		}
   11676 		b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
   11677 		v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
   11678 		v.reset(OpCopy)
   11679 		v.AddArg(v0)
   11680 		v0.AuxInt = i
   11681 		v0.Aux = s
   11682 		v0.AddArg(p)
   11683 		v0.AddArg(mem)
   11684 		return true
   11685 	}
   11686 	// match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ                        x0:(MOVBloadidx1 [i]   {s} p idx mem)     s0:(SHLQconst [8]  x1:(MOVBloadidx1 [i+1] {s} p idx mem)))     s1:(SHLQconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem)))     s2:(SHLQconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem)))     s3:(SHLQconst [32] x4:(MOVBloadidx1 [i+4] {s} p idx mem)))     s4:(SHLQconst [40] x5:(MOVBloadidx1 [i+5] {s} p idx mem)))     s5:(SHLQconst [48] x6:(MOVBloadidx1 [i+6] {s} p idx mem)))     s6:(SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem)))
   11687 	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && x4.Uses == 1   && x5.Uses == 1   && x6.Uses == 1   && x7.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && s3.Uses == 1   && s4.Uses == 1   && s5.Uses == 1   && s6.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && o2.Uses == 1   && o3.Uses == 1   && o4.Uses == 1   && o5.Uses == 1   && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(x4)   && clobber(x5)   && clobber(x6)   && clobber(x7)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(s3)   && clobber(s4)   && clobber(s5)   && clobber(s6)   && clobber(o0)   && clobber(o1)   && clobber(o2)   && clobber(o3)   && clobber(o4)   && clobber(o5)
   11688 	// result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQloadidx1 <v.Type> [i] {s} p idx mem)
   11689 	for {
   11690 		o0 := v.Args[0]
   11691 		if o0.Op != OpAMD64ORQ {
   11692 			break
   11693 		}
   11694 		o1 := o0.Args[0]
   11695 		if o1.Op != OpAMD64ORQ {
   11696 			break
   11697 		}
   11698 		o2 := o1.Args[0]
   11699 		if o2.Op != OpAMD64ORQ {
   11700 			break
   11701 		}
   11702 		o3 := o2.Args[0]
   11703 		if o3.Op != OpAMD64ORQ {
   11704 			break
   11705 		}
   11706 		o4 := o3.Args[0]
   11707 		if o4.Op != OpAMD64ORQ {
   11708 			break
   11709 		}
   11710 		o5 := o4.Args[0]
   11711 		if o5.Op != OpAMD64ORQ {
   11712 			break
   11713 		}
   11714 		x0 := o5.Args[0]
   11715 		if x0.Op != OpAMD64MOVBloadidx1 {
   11716 			break
   11717 		}
   11718 		i := x0.AuxInt
   11719 		s := x0.Aux
   11720 		p := x0.Args[0]
   11721 		idx := x0.Args[1]
   11722 		mem := x0.Args[2]
   11723 		s0 := o5.Args[1]
   11724 		if s0.Op != OpAMD64SHLQconst {
   11725 			break
   11726 		}
   11727 		if s0.AuxInt != 8 {
   11728 			break
   11729 		}
   11730 		x1 := s0.Args[0]
   11731 		if x1.Op != OpAMD64MOVBloadidx1 {
   11732 			break
   11733 		}
   11734 		if x1.AuxInt != i+1 {
   11735 			break
   11736 		}
   11737 		if x1.Aux != s {
   11738 			break
   11739 		}
   11740 		if p != x1.Args[0] {
   11741 			break
   11742 		}
   11743 		if idx != x1.Args[1] {
   11744 			break
   11745 		}
   11746 		if mem != x1.Args[2] {
   11747 			break
   11748 		}
   11749 		s1 := o4.Args[1]
   11750 		if s1.Op != OpAMD64SHLQconst {
   11751 			break
   11752 		}
   11753 		if s1.AuxInt != 16 {
   11754 			break
   11755 		}
   11756 		x2 := s1.Args[0]
   11757 		if x2.Op != OpAMD64MOVBloadidx1 {
   11758 			break
   11759 		}
   11760 		if x2.AuxInt != i+2 {
   11761 			break
   11762 		}
   11763 		if x2.Aux != s {
   11764 			break
   11765 		}
   11766 		if p != x2.Args[0] {
   11767 			break
   11768 		}
   11769 		if idx != x2.Args[1] {
   11770 			break
   11771 		}
   11772 		if mem != x2.Args[2] {
   11773 			break
   11774 		}
   11775 		s2 := o3.Args[1]
   11776 		if s2.Op != OpAMD64SHLQconst {
   11777 			break
   11778 		}
   11779 		if s2.AuxInt != 24 {
   11780 			break
   11781 		}
   11782 		x3 := s2.Args[0]
   11783 		if x3.Op != OpAMD64MOVBloadidx1 {
   11784 			break
   11785 		}
   11786 		if x3.AuxInt != i+3 {
   11787 			break
   11788 		}
   11789 		if x3.Aux != s {
   11790 			break
   11791 		}
   11792 		if p != x3.Args[0] {
   11793 			break
   11794 		}
   11795 		if idx != x3.Args[1] {
   11796 			break
   11797 		}
   11798 		if mem != x3.Args[2] {
   11799 			break
   11800 		}
   11801 		s3 := o2.Args[1]
   11802 		if s3.Op != OpAMD64SHLQconst {
   11803 			break
   11804 		}
   11805 		if s3.AuxInt != 32 {
   11806 			break
   11807 		}
   11808 		x4 := s3.Args[0]
   11809 		if x4.Op != OpAMD64MOVBloadidx1 {
   11810 			break
   11811 		}
   11812 		if x4.AuxInt != i+4 {
   11813 			break
   11814 		}
   11815 		if x4.Aux != s {
   11816 			break
   11817 		}
   11818 		if p != x4.Args[0] {
   11819 			break
   11820 		}
   11821 		if idx != x4.Args[1] {
   11822 			break
   11823 		}
   11824 		if mem != x4.Args[2] {
   11825 			break
   11826 		}
   11827 		s4 := o1.Args[1]
   11828 		if s4.Op != OpAMD64SHLQconst {
   11829 			break
   11830 		}
   11831 		if s4.AuxInt != 40 {
   11832 			break
   11833 		}
   11834 		x5 := s4.Args[0]
   11835 		if x5.Op != OpAMD64MOVBloadidx1 {
   11836 			break
   11837 		}
   11838 		if x5.AuxInt != i+5 {
   11839 			break
   11840 		}
   11841 		if x5.Aux != s {
   11842 			break
   11843 		}
   11844 		if p != x5.Args[0] {
   11845 			break
   11846 		}
   11847 		if idx != x5.Args[1] {
   11848 			break
   11849 		}
   11850 		if mem != x5.Args[2] {
   11851 			break
   11852 		}
   11853 		s5 := o0.Args[1]
   11854 		if s5.Op != OpAMD64SHLQconst {
   11855 			break
   11856 		}
   11857 		if s5.AuxInt != 48 {
   11858 			break
   11859 		}
   11860 		x6 := s5.Args[0]
   11861 		if x6.Op != OpAMD64MOVBloadidx1 {
   11862 			break
   11863 		}
   11864 		if x6.AuxInt != i+6 {
   11865 			break
   11866 		}
   11867 		if x6.Aux != s {
   11868 			break
   11869 		}
   11870 		if p != x6.Args[0] {
   11871 			break
   11872 		}
   11873 		if idx != x6.Args[1] {
   11874 			break
   11875 		}
   11876 		if mem != x6.Args[2] {
   11877 			break
   11878 		}
   11879 		s6 := v.Args[1]
   11880 		if s6.Op != OpAMD64SHLQconst {
   11881 			break
   11882 		}
   11883 		if s6.AuxInt != 56 {
   11884 			break
   11885 		}
   11886 		x7 := s6.Args[0]
   11887 		if x7.Op != OpAMD64MOVBloadidx1 {
   11888 			break
   11889 		}
   11890 		if x7.AuxInt != i+7 {
   11891 			break
   11892 		}
   11893 		if x7.Aux != s {
   11894 			break
   11895 		}
   11896 		if p != x7.Args[0] {
   11897 			break
   11898 		}
   11899 		if idx != x7.Args[1] {
   11900 			break
   11901 		}
   11902 		if mem != x7.Args[2] {
   11903 			break
   11904 		}
   11905 		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) {
   11906 			break
   11907 		}
   11908 		b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
   11909 		v0 := b.NewValue0(v.Line, OpAMD64MOVQloadidx1, v.Type)
   11910 		v.reset(OpCopy)
   11911 		v.AddArg(v0)
   11912 		v0.AuxInt = i
   11913 		v0.Aux = s
   11914 		v0.AddArg(p)
   11915 		v0.AddArg(idx)
   11916 		v0.AddArg(mem)
   11917 		return true
   11918 	}
   11919 	// match: (ORQ o5:(ORQ o4:(ORQ o3:(ORQ o2:(ORQ o1:(ORQ o0:(ORQ                        x0:(MOVBload [i] {s} p mem)     s0:(SHLQconst [8]  x1:(MOVBload [i-1] {s} p mem)))     s1:(SHLQconst [16] x2:(MOVBload [i-2] {s} p mem)))     s2:(SHLQconst [24] x3:(MOVBload [i-3] {s} p mem)))     s3:(SHLQconst [32] x4:(MOVBload [i-4] {s} p mem)))     s4:(SHLQconst [40] x5:(MOVBload [i-5] {s} p mem)))     s5:(SHLQconst [48] x6:(MOVBload [i-6] {s} p mem)))     s6:(SHLQconst [56] x7:(MOVBload [i-7] {s} p mem)))
   11920 	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && x4.Uses == 1   && x5.Uses == 1   && x6.Uses == 1   && x7.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && s3.Uses == 1   && s4.Uses == 1   && s5.Uses == 1   && s6.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && o2.Uses == 1   && o3.Uses == 1   && o4.Uses == 1   && o5.Uses == 1   && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(x4)   && clobber(x5)   && clobber(x6)   && clobber(x7)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(s3)   && clobber(s4)   && clobber(s5)   && clobber(s6)   && clobber(o0)   && clobber(o1)   && clobber(o2)   && clobber(o3)   && clobber(o4)   && clobber(o5)
   11921 	// result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (BSWAPQ <v.Type> (MOVQload [i-7] {s} p mem))
   11922 	for {
   11923 		o5 := v.Args[0]
   11924 		if o5.Op != OpAMD64ORQ {
   11925 			break
   11926 		}
   11927 		o4 := o5.Args[0]
   11928 		if o4.Op != OpAMD64ORQ {
   11929 			break
   11930 		}
   11931 		o3 := o4.Args[0]
   11932 		if o3.Op != OpAMD64ORQ {
   11933 			break
   11934 		}
   11935 		o2 := o3.Args[0]
   11936 		if o2.Op != OpAMD64ORQ {
   11937 			break
   11938 		}
   11939 		o1 := o2.Args[0]
   11940 		if o1.Op != OpAMD64ORQ {
   11941 			break
   11942 		}
   11943 		o0 := o1.Args[0]
   11944 		if o0.Op != OpAMD64ORQ {
   11945 			break
   11946 		}
   11947 		x0 := o0.Args[0]
   11948 		if x0.Op != OpAMD64MOVBload {
   11949 			break
   11950 		}
   11951 		i := x0.AuxInt
   11952 		s := x0.Aux
   11953 		p := x0.Args[0]
   11954 		mem := x0.Args[1]
   11955 		s0 := o0.Args[1]
   11956 		if s0.Op != OpAMD64SHLQconst {
   11957 			break
   11958 		}
   11959 		if s0.AuxInt != 8 {
   11960 			break
   11961 		}
   11962 		x1 := s0.Args[0]
   11963 		if x1.Op != OpAMD64MOVBload {
   11964 			break
   11965 		}
   11966 		if x1.AuxInt != i-1 {
   11967 			break
   11968 		}
   11969 		if x1.Aux != s {
   11970 			break
   11971 		}
   11972 		if p != x1.Args[0] {
   11973 			break
   11974 		}
   11975 		if mem != x1.Args[1] {
   11976 			break
   11977 		}
   11978 		s1 := o1.Args[1]
   11979 		if s1.Op != OpAMD64SHLQconst {
   11980 			break
   11981 		}
   11982 		if s1.AuxInt != 16 {
   11983 			break
   11984 		}
   11985 		x2 := s1.Args[0]
   11986 		if x2.Op != OpAMD64MOVBload {
   11987 			break
   11988 		}
   11989 		if x2.AuxInt != i-2 {
   11990 			break
   11991 		}
   11992 		if x2.Aux != s {
   11993 			break
   11994 		}
   11995 		if p != x2.Args[0] {
   11996 			break
   11997 		}
   11998 		if mem != x2.Args[1] {
   11999 			break
   12000 		}
   12001 		s2 := o2.Args[1]
   12002 		if s2.Op != OpAMD64SHLQconst {
   12003 			break
   12004 		}
   12005 		if s2.AuxInt != 24 {
   12006 			break
   12007 		}
   12008 		x3 := s2.Args[0]
   12009 		if x3.Op != OpAMD64MOVBload {
   12010 			break
   12011 		}
   12012 		if x3.AuxInt != i-3 {
   12013 			break
   12014 		}
   12015 		if x3.Aux != s {
   12016 			break
   12017 		}
   12018 		if p != x3.Args[0] {
   12019 			break
   12020 		}
   12021 		if mem != x3.Args[1] {
   12022 			break
   12023 		}
   12024 		s3 := o3.Args[1]
   12025 		if s3.Op != OpAMD64SHLQconst {
   12026 			break
   12027 		}
   12028 		if s3.AuxInt != 32 {
   12029 			break
   12030 		}
   12031 		x4 := s3.Args[0]
   12032 		if x4.Op != OpAMD64MOVBload {
   12033 			break
   12034 		}
   12035 		if x4.AuxInt != i-4 {
   12036 			break
   12037 		}
   12038 		if x4.Aux != s {
   12039 			break
   12040 		}
   12041 		if p != x4.Args[0] {
   12042 			break
   12043 		}
   12044 		if mem != x4.Args[1] {
   12045 			break
   12046 		}
   12047 		s4 := o4.Args[1]
   12048 		if s4.Op != OpAMD64SHLQconst {
   12049 			break
   12050 		}
   12051 		if s4.AuxInt != 40 {
   12052 			break
   12053 		}
   12054 		x5 := s4.Args[0]
   12055 		if x5.Op != OpAMD64MOVBload {
   12056 			break
   12057 		}
   12058 		if x5.AuxInt != i-5 {
   12059 			break
   12060 		}
   12061 		if x5.Aux != s {
   12062 			break
   12063 		}
   12064 		if p != x5.Args[0] {
   12065 			break
   12066 		}
   12067 		if mem != x5.Args[1] {
   12068 			break
   12069 		}
   12070 		s5 := o5.Args[1]
   12071 		if s5.Op != OpAMD64SHLQconst {
   12072 			break
   12073 		}
   12074 		if s5.AuxInt != 48 {
   12075 			break
   12076 		}
   12077 		x6 := s5.Args[0]
   12078 		if x6.Op != OpAMD64MOVBload {
   12079 			break
   12080 		}
   12081 		if x6.AuxInt != i-6 {
   12082 			break
   12083 		}
   12084 		if x6.Aux != s {
   12085 			break
   12086 		}
   12087 		if p != x6.Args[0] {
   12088 			break
   12089 		}
   12090 		if mem != x6.Args[1] {
   12091 			break
   12092 		}
   12093 		s6 := v.Args[1]
   12094 		if s6.Op != OpAMD64SHLQconst {
   12095 			break
   12096 		}
   12097 		if s6.AuxInt != 56 {
   12098 			break
   12099 		}
   12100 		x7 := s6.Args[0]
   12101 		if x7.Op != OpAMD64MOVBload {
   12102 			break
   12103 		}
   12104 		if x7.AuxInt != i-7 {
   12105 			break
   12106 		}
   12107 		if x7.Aux != s {
   12108 			break
   12109 		}
   12110 		if p != x7.Args[0] {
   12111 			break
   12112 		}
   12113 		if mem != x7.Args[1] {
   12114 			break
   12115 		}
   12116 		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) {
   12117 			break
   12118 		}
   12119 		b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
   12120 		v0 := b.NewValue0(v.Line, OpAMD64BSWAPQ, v.Type)
   12121 		v.reset(OpCopy)
   12122 		v.AddArg(v0)
   12123 		v1 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
   12124 		v1.AuxInt = i - 7
   12125 		v1.Aux = s
   12126 		v1.AddArg(p)
   12127 		v1.AddArg(mem)
   12128 		v0.AddArg(v1)
   12129 		return true
   12130 	}
   12131 	// match: (ORQ o5:(ORQ o4:(ORQ o3:(ORQ o2:(ORQ o1:(ORQ o0:(ORQ                        x0:(MOVBloadidx1 [i] {s} p idx mem)     s0:(SHLQconst [8]  x1:(MOVBloadidx1 [i-1] {s} p idx mem)))     s1:(SHLQconst [16] x2:(MOVBloadidx1 [i-2] {s} p idx mem)))     s2:(SHLQconst [24] x3:(MOVBloadidx1 [i-3] {s} p idx mem)))     s3:(SHLQconst [32] x4:(MOVBloadidx1 [i-4] {s} p idx mem)))     s4:(SHLQconst [40] x5:(MOVBloadidx1 [i-5] {s} p idx mem)))     s5:(SHLQconst [48] x6:(MOVBloadidx1 [i-6] {s} p idx mem)))     s6:(SHLQconst [56] x7:(MOVBloadidx1 [i-7] {s} p idx mem)))
   12132 	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && x4.Uses == 1   && x5.Uses == 1   && x6.Uses == 1   && x7.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && s3.Uses == 1   && s4.Uses == 1   && s5.Uses == 1   && s6.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && o2.Uses == 1   && o3.Uses == 1   && o4.Uses == 1   && o5.Uses == 1   && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(x4)   && clobber(x5)   && clobber(x6)   && clobber(x7)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(s3)   && clobber(s4)   && clobber(s5)   && clobber(s6)   && clobber(o0)   && clobber(o1)   && clobber(o2)   && clobber(o3)   && clobber(o4)   && clobber(o5)
   12133 	// result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (BSWAPQ <v.Type> (MOVQloadidx1 <v.Type> [i-7] {s} p idx mem))
   12134 	for {
   12135 		o5 := v.Args[0]
   12136 		if o5.Op != OpAMD64ORQ {
   12137 			break
   12138 		}
   12139 		o4 := o5.Args[0]
   12140 		if o4.Op != OpAMD64ORQ {
   12141 			break
   12142 		}
   12143 		o3 := o4.Args[0]
   12144 		if o3.Op != OpAMD64ORQ {
   12145 			break
   12146 		}
   12147 		o2 := o3.Args[0]
   12148 		if o2.Op != OpAMD64ORQ {
   12149 			break
   12150 		}
   12151 		o1 := o2.Args[0]
   12152 		if o1.Op != OpAMD64ORQ {
   12153 			break
   12154 		}
   12155 		o0 := o1.Args[0]
   12156 		if o0.Op != OpAMD64ORQ {
   12157 			break
   12158 		}
   12159 		x0 := o0.Args[0]
   12160 		if x0.Op != OpAMD64MOVBloadidx1 {
   12161 			break
   12162 		}
   12163 		i := x0.AuxInt
   12164 		s := x0.Aux
   12165 		p := x0.Args[0]
   12166 		idx := x0.Args[1]
   12167 		mem := x0.Args[2]
   12168 		s0 := o0.Args[1]
   12169 		if s0.Op != OpAMD64SHLQconst {
   12170 			break
   12171 		}
   12172 		if s0.AuxInt != 8 {
   12173 			break
   12174 		}
   12175 		x1 := s0.Args[0]
   12176 		if x1.Op != OpAMD64MOVBloadidx1 {
   12177 			break
   12178 		}
   12179 		if x1.AuxInt != i-1 {
   12180 			break
   12181 		}
   12182 		if x1.Aux != s {
   12183 			break
   12184 		}
   12185 		if p != x1.Args[0] {
   12186 			break
   12187 		}
   12188 		if idx != x1.Args[1] {
   12189 			break
   12190 		}
   12191 		if mem != x1.Args[2] {
   12192 			break
   12193 		}
   12194 		s1 := o1.Args[1]
   12195 		if s1.Op != OpAMD64SHLQconst {
   12196 			break
   12197 		}
   12198 		if s1.AuxInt != 16 {
   12199 			break
   12200 		}
   12201 		x2 := s1.Args[0]
   12202 		if x2.Op != OpAMD64MOVBloadidx1 {
   12203 			break
   12204 		}
   12205 		if x2.AuxInt != i-2 {
   12206 			break
   12207 		}
   12208 		if x2.Aux != s {
   12209 			break
   12210 		}
   12211 		if p != x2.Args[0] {
   12212 			break
   12213 		}
   12214 		if idx != x2.Args[1] {
   12215 			break
   12216 		}
   12217 		if mem != x2.Args[2] {
   12218 			break
   12219 		}
   12220 		s2 := o2.Args[1]
   12221 		if s2.Op != OpAMD64SHLQconst {
   12222 			break
   12223 		}
   12224 		if s2.AuxInt != 24 {
   12225 			break
   12226 		}
   12227 		x3 := s2.Args[0]
   12228 		if x3.Op != OpAMD64MOVBloadidx1 {
   12229 			break
   12230 		}
   12231 		if x3.AuxInt != i-3 {
   12232 			break
   12233 		}
   12234 		if x3.Aux != s {
   12235 			break
   12236 		}
   12237 		if p != x3.Args[0] {
   12238 			break
   12239 		}
   12240 		if idx != x3.Args[1] {
   12241 			break
   12242 		}
   12243 		if mem != x3.Args[2] {
   12244 			break
   12245 		}
   12246 		s3 := o3.Args[1]
   12247 		if s3.Op != OpAMD64SHLQconst {
   12248 			break
   12249 		}
   12250 		if s3.AuxInt != 32 {
   12251 			break
   12252 		}
   12253 		x4 := s3.Args[0]
   12254 		if x4.Op != OpAMD64MOVBloadidx1 {
   12255 			break
   12256 		}
   12257 		if x4.AuxInt != i-4 {
   12258 			break
   12259 		}
   12260 		if x4.Aux != s {
   12261 			break
   12262 		}
   12263 		if p != x4.Args[0] {
   12264 			break
   12265 		}
   12266 		if idx != x4.Args[1] {
   12267 			break
   12268 		}
   12269 		if mem != x4.Args[2] {
   12270 			break
   12271 		}
   12272 		s4 := o4.Args[1]
   12273 		if s4.Op != OpAMD64SHLQconst {
   12274 			break
   12275 		}
   12276 		if s4.AuxInt != 40 {
   12277 			break
   12278 		}
   12279 		x5 := s4.Args[0]
   12280 		if x5.Op != OpAMD64MOVBloadidx1 {
   12281 			break
   12282 		}
   12283 		if x5.AuxInt != i-5 {
   12284 			break
   12285 		}
   12286 		if x5.Aux != s {
   12287 			break
   12288 		}
   12289 		if p != x5.Args[0] {
   12290 			break
   12291 		}
   12292 		if idx != x5.Args[1] {
   12293 			break
   12294 		}
   12295 		if mem != x5.Args[2] {
   12296 			break
   12297 		}
   12298 		s5 := o5.Args[1]
   12299 		if s5.Op != OpAMD64SHLQconst {
   12300 			break
   12301 		}
   12302 		if s5.AuxInt != 48 {
   12303 			break
   12304 		}
   12305 		x6 := s5.Args[0]
   12306 		if x6.Op != OpAMD64MOVBloadidx1 {
   12307 			break
   12308 		}
   12309 		if x6.AuxInt != i-6 {
   12310 			break
   12311 		}
   12312 		if x6.Aux != s {
   12313 			break
   12314 		}
   12315 		if p != x6.Args[0] {
   12316 			break
   12317 		}
   12318 		if idx != x6.Args[1] {
   12319 			break
   12320 		}
   12321 		if mem != x6.Args[2] {
   12322 			break
   12323 		}
   12324 		s6 := v.Args[1]
   12325 		if s6.Op != OpAMD64SHLQconst {
   12326 			break
   12327 		}
   12328 		if s6.AuxInt != 56 {
   12329 			break
   12330 		}
   12331 		x7 := s6.Args[0]
   12332 		if x7.Op != OpAMD64MOVBloadidx1 {
   12333 			break
   12334 		}
   12335 		if x7.AuxInt != i-7 {
   12336 			break
   12337 		}
   12338 		if x7.Aux != s {
   12339 			break
   12340 		}
   12341 		if p != x7.Args[0] {
   12342 			break
   12343 		}
   12344 		if idx != x7.Args[1] {
   12345 			break
   12346 		}
   12347 		if mem != x7.Args[2] {
   12348 			break
   12349 		}
   12350 		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) {
   12351 			break
   12352 		}
   12353 		b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
   12354 		v0 := b.NewValue0(v.Line, OpAMD64BSWAPQ, v.Type)
   12355 		v.reset(OpCopy)
   12356 		v.AddArg(v0)
   12357 		v1 := b.NewValue0(v.Line, OpAMD64MOVQloadidx1, v.Type)
   12358 		v1.AuxInt = i - 7
   12359 		v1.Aux = s
   12360 		v1.AddArg(p)
   12361 		v1.AddArg(idx)
   12362 		v1.AddArg(mem)
   12363 		v0.AddArg(v1)
   12364 		return true
   12365 	}
   12366 	return false
   12367 }
   12368 func rewriteValueAMD64_OpAMD64ORQconst(v *Value, config *Config) bool {
   12369 	b := v.Block
   12370 	_ = b
   12371 	// match: (ORQconst [0] x)
   12372 	// cond:
   12373 	// result: x
   12374 	for {
   12375 		if v.AuxInt != 0 {
   12376 			break
   12377 		}
   12378 		x := v.Args[0]
   12379 		v.reset(OpCopy)
   12380 		v.Type = x.Type
   12381 		v.AddArg(x)
   12382 		return true
   12383 	}
   12384 	// match: (ORQconst [-1] _)
   12385 	// cond:
   12386 	// result: (MOVQconst [-1])
   12387 	for {
   12388 		if v.AuxInt != -1 {
   12389 			break
   12390 		}
   12391 		v.reset(OpAMD64MOVQconst)
   12392 		v.AuxInt = -1
   12393 		return true
   12394 	}
   12395 	// match: (ORQconst [c] (MOVQconst [d]))
   12396 	// cond:
   12397 	// result: (MOVQconst [c|d])
   12398 	for {
   12399 		c := v.AuxInt
   12400 		v_0 := v.Args[0]
   12401 		if v_0.Op != OpAMD64MOVQconst {
   12402 			break
   12403 		}
   12404 		d := v_0.AuxInt
   12405 		v.reset(OpAMD64MOVQconst)
   12406 		v.AuxInt = c | d
   12407 		return true
   12408 	}
   12409 	return false
   12410 }
   12411 func rewriteValueAMD64_OpAMD64ROLBconst(v *Value, config *Config) bool {
   12412 	b := v.Block
   12413 	_ = b
   12414 	// match: (ROLBconst [c] (ROLBconst [d] x))
   12415 	// cond:
   12416 	// result: (ROLBconst [(c+d)& 7] x)
   12417 	for {
   12418 		c := v.AuxInt
   12419 		v_0 := v.Args[0]
   12420 		if v_0.Op != OpAMD64ROLBconst {
   12421 			break
   12422 		}
   12423 		d := v_0.AuxInt
   12424 		x := v_0.Args[0]
   12425 		v.reset(OpAMD64ROLBconst)
   12426 		v.AuxInt = (c + d) & 7
   12427 		v.AddArg(x)
   12428 		return true
   12429 	}
   12430 	// match: (ROLBconst [0] x)
   12431 	// cond:
   12432 	// result: x
   12433 	for {
   12434 		if v.AuxInt != 0 {
   12435 			break
   12436 		}
   12437 		x := v.Args[0]
   12438 		v.reset(OpCopy)
   12439 		v.Type = x.Type
   12440 		v.AddArg(x)
   12441 		return true
   12442 	}
   12443 	return false
   12444 }
   12445 func rewriteValueAMD64_OpAMD64ROLLconst(v *Value, config *Config) bool {
   12446 	b := v.Block
   12447 	_ = b
   12448 	// match: (ROLLconst [c] (ROLLconst [d] x))
   12449 	// cond:
   12450 	// result: (ROLLconst [(c+d)&31] x)
   12451 	for {
   12452 		c := v.AuxInt
   12453 		v_0 := v.Args[0]
   12454 		if v_0.Op != OpAMD64ROLLconst {
   12455 			break
   12456 		}
   12457 		d := v_0.AuxInt
   12458 		x := v_0.Args[0]
   12459 		v.reset(OpAMD64ROLLconst)
   12460 		v.AuxInt = (c + d) & 31
   12461 		v.AddArg(x)
   12462 		return true
   12463 	}
   12464 	// match: (ROLLconst [0] x)
   12465 	// cond:
   12466 	// result: x
   12467 	for {
   12468 		if v.AuxInt != 0 {
   12469 			break
   12470 		}
   12471 		x := v.Args[0]
   12472 		v.reset(OpCopy)
   12473 		v.Type = x.Type
   12474 		v.AddArg(x)
   12475 		return true
   12476 	}
   12477 	return false
   12478 }
   12479 func rewriteValueAMD64_OpAMD64ROLQconst(v *Value, config *Config) bool {
   12480 	b := v.Block
   12481 	_ = b
   12482 	// match: (ROLQconst [c] (ROLQconst [d] x))
   12483 	// cond:
   12484 	// result: (ROLQconst [(c+d)&63] x)
   12485 	for {
   12486 		c := v.AuxInt
   12487 		v_0 := v.Args[0]
   12488 		if v_0.Op != OpAMD64ROLQconst {
   12489 			break
   12490 		}
   12491 		d := v_0.AuxInt
   12492 		x := v_0.Args[0]
   12493 		v.reset(OpAMD64ROLQconst)
   12494 		v.AuxInt = (c + d) & 63
   12495 		v.AddArg(x)
   12496 		return true
   12497 	}
   12498 	// match: (ROLQconst [0] x)
   12499 	// cond:
   12500 	// result: x
   12501 	for {
   12502 		if v.AuxInt != 0 {
   12503 			break
   12504 		}
   12505 		x := v.Args[0]
   12506 		v.reset(OpCopy)
   12507 		v.Type = x.Type
   12508 		v.AddArg(x)
   12509 		return true
   12510 	}
   12511 	return false
   12512 }
   12513 func rewriteValueAMD64_OpAMD64ROLWconst(v *Value, config *Config) bool {
   12514 	b := v.Block
   12515 	_ = b
   12516 	// match: (ROLWconst [c] (ROLWconst [d] x))
   12517 	// cond:
   12518 	// result: (ROLWconst [(c+d)&15] x)
   12519 	for {
   12520 		c := v.AuxInt
   12521 		v_0 := v.Args[0]
   12522 		if v_0.Op != OpAMD64ROLWconst {
   12523 			break
   12524 		}
   12525 		d := v_0.AuxInt
   12526 		x := v_0.Args[0]
   12527 		v.reset(OpAMD64ROLWconst)
   12528 		v.AuxInt = (c + d) & 15
   12529 		v.AddArg(x)
   12530 		return true
   12531 	}
   12532 	// match: (ROLWconst [0] x)
   12533 	// cond:
   12534 	// result: x
   12535 	for {
   12536 		if v.AuxInt != 0 {
   12537 			break
   12538 		}
   12539 		x := v.Args[0]
   12540 		v.reset(OpCopy)
   12541 		v.Type = x.Type
   12542 		v.AddArg(x)
   12543 		return true
   12544 	}
   12545 	return false
   12546 }
   12547 func rewriteValueAMD64_OpAMD64SARB(v *Value, config *Config) bool {
   12548 	b := v.Block
   12549 	_ = b
   12550 	// match: (SARB x (MOVQconst [c]))
   12551 	// cond:
   12552 	// result: (SARBconst [c&31] x)
   12553 	for {
   12554 		x := v.Args[0]
   12555 		v_1 := v.Args[1]
   12556 		if v_1.Op != OpAMD64MOVQconst {
   12557 			break
   12558 		}
   12559 		c := v_1.AuxInt
   12560 		v.reset(OpAMD64SARBconst)
   12561 		v.AuxInt = c & 31
   12562 		v.AddArg(x)
   12563 		return true
   12564 	}
   12565 	// match: (SARB x (MOVLconst [c]))
   12566 	// cond:
   12567 	// result: (SARBconst [c&31] x)
   12568 	for {
   12569 		x := v.Args[0]
   12570 		v_1 := v.Args[1]
   12571 		if v_1.Op != OpAMD64MOVLconst {
   12572 			break
   12573 		}
   12574 		c := v_1.AuxInt
   12575 		v.reset(OpAMD64SARBconst)
   12576 		v.AuxInt = c & 31
   12577 		v.AddArg(x)
   12578 		return true
   12579 	}
   12580 	return false
   12581 }
   12582 func rewriteValueAMD64_OpAMD64SARBconst(v *Value, config *Config) bool {
   12583 	b := v.Block
   12584 	_ = b
   12585 	// match: (SARBconst [c] (MOVQconst [d]))
   12586 	// cond:
   12587 	// result: (MOVQconst [d>>uint64(c)])
   12588 	for {
   12589 		c := v.AuxInt
   12590 		v_0 := v.Args[0]
   12591 		if v_0.Op != OpAMD64MOVQconst {
   12592 			break
   12593 		}
   12594 		d := v_0.AuxInt
   12595 		v.reset(OpAMD64MOVQconst)
   12596 		v.AuxInt = d >> uint64(c)
   12597 		return true
   12598 	}
   12599 	return false
   12600 }
   12601 func rewriteValueAMD64_OpAMD64SARL(v *Value, config *Config) bool {
   12602 	b := v.Block
   12603 	_ = b
   12604 	// match: (SARL x (MOVQconst [c]))
   12605 	// cond:
   12606 	// result: (SARLconst [c&31] x)
   12607 	for {
   12608 		x := v.Args[0]
   12609 		v_1 := v.Args[1]
   12610 		if v_1.Op != OpAMD64MOVQconst {
   12611 			break
   12612 		}
   12613 		c := v_1.AuxInt
   12614 		v.reset(OpAMD64SARLconst)
   12615 		v.AuxInt = c & 31
   12616 		v.AddArg(x)
   12617 		return true
   12618 	}
   12619 	// match: (SARL x (MOVLconst [c]))
   12620 	// cond:
   12621 	// result: (SARLconst [c&31] x)
   12622 	for {
   12623 		x := v.Args[0]
   12624 		v_1 := v.Args[1]
   12625 		if v_1.Op != OpAMD64MOVLconst {
   12626 			break
   12627 		}
   12628 		c := v_1.AuxInt
   12629 		v.reset(OpAMD64SARLconst)
   12630 		v.AuxInt = c & 31
   12631 		v.AddArg(x)
   12632 		return true
   12633 	}
   12634 	// match: (SARL x (ANDLconst [31] y))
   12635 	// cond:
   12636 	// result: (SARL x y)
   12637 	for {
   12638 		x := v.Args[0]
   12639 		v_1 := v.Args[1]
   12640 		if v_1.Op != OpAMD64ANDLconst {
   12641 			break
   12642 		}
   12643 		if v_1.AuxInt != 31 {
   12644 			break
   12645 		}
   12646 		y := v_1.Args[0]
   12647 		v.reset(OpAMD64SARL)
   12648 		v.AddArg(x)
   12649 		v.AddArg(y)
   12650 		return true
   12651 	}
   12652 	return false
   12653 }
   12654 func rewriteValueAMD64_OpAMD64SARLconst(v *Value, config *Config) bool {
   12655 	b := v.Block
   12656 	_ = b
   12657 	// match: (SARLconst [c] (MOVQconst [d]))
   12658 	// cond:
   12659 	// result: (MOVQconst [d>>uint64(c)])
   12660 	for {
   12661 		c := v.AuxInt
   12662 		v_0 := v.Args[0]
   12663 		if v_0.Op != OpAMD64MOVQconst {
   12664 			break
   12665 		}
   12666 		d := v_0.AuxInt
   12667 		v.reset(OpAMD64MOVQconst)
   12668 		v.AuxInt = d >> uint64(c)
   12669 		return true
   12670 	}
   12671 	return false
   12672 }
   12673 func rewriteValueAMD64_OpAMD64SARQ(v *Value, config *Config) bool {
   12674 	b := v.Block
   12675 	_ = b
   12676 	// match: (SARQ x (MOVQconst [c]))
   12677 	// cond:
   12678 	// result: (SARQconst [c&63] x)
   12679 	for {
   12680 		x := v.Args[0]
   12681 		v_1 := v.Args[1]
   12682 		if v_1.Op != OpAMD64MOVQconst {
   12683 			break
   12684 		}
   12685 		c := v_1.AuxInt
   12686 		v.reset(OpAMD64SARQconst)
   12687 		v.AuxInt = c & 63
   12688 		v.AddArg(x)
   12689 		return true
   12690 	}
   12691 	// match: (SARQ x (MOVLconst [c]))
   12692 	// cond:
   12693 	// result: (SARQconst [c&63] x)
   12694 	for {
   12695 		x := v.Args[0]
   12696 		v_1 := v.Args[1]
   12697 		if v_1.Op != OpAMD64MOVLconst {
   12698 			break
   12699 		}
   12700 		c := v_1.AuxInt
   12701 		v.reset(OpAMD64SARQconst)
   12702 		v.AuxInt = c & 63
   12703 		v.AddArg(x)
   12704 		return true
   12705 	}
   12706 	// match: (SARQ x (ANDQconst [63] y))
   12707 	// cond:
   12708 	// result: (SARQ x y)
   12709 	for {
   12710 		x := v.Args[0]
   12711 		v_1 := v.Args[1]
   12712 		if v_1.Op != OpAMD64ANDQconst {
   12713 			break
   12714 		}
   12715 		if v_1.AuxInt != 63 {
   12716 			break
   12717 		}
   12718 		y := v_1.Args[0]
   12719 		v.reset(OpAMD64SARQ)
   12720 		v.AddArg(x)
   12721 		v.AddArg(y)
   12722 		return true
   12723 	}
   12724 	return false
   12725 }
   12726 func rewriteValueAMD64_OpAMD64SARQconst(v *Value, config *Config) bool {
   12727 	b := v.Block
   12728 	_ = b
   12729 	// match: (SARQconst [c] (MOVQconst [d]))
   12730 	// cond:
   12731 	// result: (MOVQconst [d>>uint64(c)])
   12732 	for {
   12733 		c := v.AuxInt
   12734 		v_0 := v.Args[0]
   12735 		if v_0.Op != OpAMD64MOVQconst {
   12736 			break
   12737 		}
   12738 		d := v_0.AuxInt
   12739 		v.reset(OpAMD64MOVQconst)
   12740 		v.AuxInt = d >> uint64(c)
   12741 		return true
   12742 	}
   12743 	return false
   12744 }
   12745 func rewriteValueAMD64_OpAMD64SARW(v *Value, config *Config) bool {
   12746 	b := v.Block
   12747 	_ = b
   12748 	// match: (SARW x (MOVQconst [c]))
   12749 	// cond:
   12750 	// result: (SARWconst [c&31] x)
   12751 	for {
   12752 		x := v.Args[0]
   12753 		v_1 := v.Args[1]
   12754 		if v_1.Op != OpAMD64MOVQconst {
   12755 			break
   12756 		}
   12757 		c := v_1.AuxInt
   12758 		v.reset(OpAMD64SARWconst)
   12759 		v.AuxInt = c & 31
   12760 		v.AddArg(x)
   12761 		return true
   12762 	}
   12763 	// match: (SARW x (MOVLconst [c]))
   12764 	// cond:
   12765 	// result: (SARWconst [c&31] x)
   12766 	for {
   12767 		x := v.Args[0]
   12768 		v_1 := v.Args[1]
   12769 		if v_1.Op != OpAMD64MOVLconst {
   12770 			break
   12771 		}
   12772 		c := v_1.AuxInt
   12773 		v.reset(OpAMD64SARWconst)
   12774 		v.AuxInt = c & 31
   12775 		v.AddArg(x)
   12776 		return true
   12777 	}
   12778 	return false
   12779 }
   12780 func rewriteValueAMD64_OpAMD64SARWconst(v *Value, config *Config) bool {
   12781 	b := v.Block
   12782 	_ = b
   12783 	// match: (SARWconst [c] (MOVQconst [d]))
   12784 	// cond:
   12785 	// result: (MOVQconst [d>>uint64(c)])
   12786 	for {
   12787 		c := v.AuxInt
   12788 		v_0 := v.Args[0]
   12789 		if v_0.Op != OpAMD64MOVQconst {
   12790 			break
   12791 		}
   12792 		d := v_0.AuxInt
   12793 		v.reset(OpAMD64MOVQconst)
   12794 		v.AuxInt = d >> uint64(c)
   12795 		return true
   12796 	}
   12797 	return false
   12798 }
   12799 func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value, config *Config) bool {
   12800 	b := v.Block
   12801 	_ = b
   12802 	// match: (SBBLcarrymask (FlagEQ))
   12803 	// cond:
   12804 	// result: (MOVLconst [0])
   12805 	for {
   12806 		v_0 := v.Args[0]
   12807 		if v_0.Op != OpAMD64FlagEQ {
   12808 			break
   12809 		}
   12810 		v.reset(OpAMD64MOVLconst)
   12811 		v.AuxInt = 0
   12812 		return true
   12813 	}
   12814 	// match: (SBBLcarrymask (FlagLT_ULT))
   12815 	// cond:
   12816 	// result: (MOVLconst [-1])
   12817 	for {
   12818 		v_0 := v.Args[0]
   12819 		if v_0.Op != OpAMD64FlagLT_ULT {
   12820 			break
   12821 		}
   12822 		v.reset(OpAMD64MOVLconst)
   12823 		v.AuxInt = -1
   12824 		return true
   12825 	}
   12826 	// match: (SBBLcarrymask (FlagLT_UGT))
   12827 	// cond:
   12828 	// result: (MOVLconst [0])
   12829 	for {
   12830 		v_0 := v.Args[0]
   12831 		if v_0.Op != OpAMD64FlagLT_UGT {
   12832 			break
   12833 		}
   12834 		v.reset(OpAMD64MOVLconst)
   12835 		v.AuxInt = 0
   12836 		return true
   12837 	}
   12838 	// match: (SBBLcarrymask (FlagGT_ULT))
   12839 	// cond:
   12840 	// result: (MOVLconst [-1])
   12841 	for {
   12842 		v_0 := v.Args[0]
   12843 		if v_0.Op != OpAMD64FlagGT_ULT {
   12844 			break
   12845 		}
   12846 		v.reset(OpAMD64MOVLconst)
   12847 		v.AuxInt = -1
   12848 		return true
   12849 	}
   12850 	// match: (SBBLcarrymask (FlagGT_UGT))
   12851 	// cond:
   12852 	// result: (MOVLconst [0])
   12853 	for {
   12854 		v_0 := v.Args[0]
   12855 		if v_0.Op != OpAMD64FlagGT_UGT {
   12856 			break
   12857 		}
   12858 		v.reset(OpAMD64MOVLconst)
   12859 		v.AuxInt = 0
   12860 		return true
   12861 	}
   12862 	return false
   12863 }
   12864 func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value, config *Config) bool {
   12865 	b := v.Block
   12866 	_ = b
   12867 	// match: (SBBQcarrymask (FlagEQ))
   12868 	// cond:
   12869 	// result: (MOVQconst [0])
   12870 	for {
   12871 		v_0 := v.Args[0]
   12872 		if v_0.Op != OpAMD64FlagEQ {
   12873 			break
   12874 		}
   12875 		v.reset(OpAMD64MOVQconst)
   12876 		v.AuxInt = 0
   12877 		return true
   12878 	}
   12879 	// match: (SBBQcarrymask (FlagLT_ULT))
   12880 	// cond:
   12881 	// result: (MOVQconst [-1])
   12882 	for {
   12883 		v_0 := v.Args[0]
   12884 		if v_0.Op != OpAMD64FlagLT_ULT {
   12885 			break
   12886 		}
   12887 		v.reset(OpAMD64MOVQconst)
   12888 		v.AuxInt = -1
   12889 		return true
   12890 	}
   12891 	// match: (SBBQcarrymask (FlagLT_UGT))
   12892 	// cond:
   12893 	// result: (MOVQconst [0])
   12894 	for {
   12895 		v_0 := v.Args[0]
   12896 		if v_0.Op != OpAMD64FlagLT_UGT {
   12897 			break
   12898 		}
   12899 		v.reset(OpAMD64MOVQconst)
   12900 		v.AuxInt = 0
   12901 		return true
   12902 	}
   12903 	// match: (SBBQcarrymask (FlagGT_ULT))
   12904 	// cond:
   12905 	// result: (MOVQconst [-1])
   12906 	for {
   12907 		v_0 := v.Args[0]
   12908 		if v_0.Op != OpAMD64FlagGT_ULT {
   12909 			break
   12910 		}
   12911 		v.reset(OpAMD64MOVQconst)
   12912 		v.AuxInt = -1
   12913 		return true
   12914 	}
   12915 	// match: (SBBQcarrymask (FlagGT_UGT))
   12916 	// cond:
   12917 	// result: (MOVQconst [0])
   12918 	for {
   12919 		v_0 := v.Args[0]
   12920 		if v_0.Op != OpAMD64FlagGT_UGT {
   12921 			break
   12922 		}
   12923 		v.reset(OpAMD64MOVQconst)
   12924 		v.AuxInt = 0
   12925 		return true
   12926 	}
   12927 	return false
   12928 }
   12929 func rewriteValueAMD64_OpAMD64SETA(v *Value, config *Config) bool {
   12930 	b := v.Block
   12931 	_ = b
   12932 	// match: (SETA (InvertFlags x))
   12933 	// cond:
   12934 	// result: (SETB x)
   12935 	for {
   12936 		v_0 := v.Args[0]
   12937 		if v_0.Op != OpAMD64InvertFlags {
   12938 			break
   12939 		}
   12940 		x := v_0.Args[0]
   12941 		v.reset(OpAMD64SETB)
   12942 		v.AddArg(x)
   12943 		return true
   12944 	}
   12945 	// match: (SETA (FlagEQ))
   12946 	// cond:
   12947 	// result: (MOVLconst [0])
   12948 	for {
   12949 		v_0 := v.Args[0]
   12950 		if v_0.Op != OpAMD64FlagEQ {
   12951 			break
   12952 		}
   12953 		v.reset(OpAMD64MOVLconst)
   12954 		v.AuxInt = 0
   12955 		return true
   12956 	}
   12957 	// match: (SETA (FlagLT_ULT))
   12958 	// cond:
   12959 	// result: (MOVLconst [0])
   12960 	for {
   12961 		v_0 := v.Args[0]
   12962 		if v_0.Op != OpAMD64FlagLT_ULT {
   12963 			break
   12964 		}
   12965 		v.reset(OpAMD64MOVLconst)
   12966 		v.AuxInt = 0
   12967 		return true
   12968 	}
   12969 	// match: (SETA (FlagLT_UGT))
   12970 	// cond:
   12971 	// result: (MOVLconst [1])
   12972 	for {
   12973 		v_0 := v.Args[0]
   12974 		if v_0.Op != OpAMD64FlagLT_UGT {
   12975 			break
   12976 		}
   12977 		v.reset(OpAMD64MOVLconst)
   12978 		v.AuxInt = 1
   12979 		return true
   12980 	}
   12981 	// match: (SETA (FlagGT_ULT))
   12982 	// cond:
   12983 	// result: (MOVLconst [0])
   12984 	for {
   12985 		v_0 := v.Args[0]
   12986 		if v_0.Op != OpAMD64FlagGT_ULT {
   12987 			break
   12988 		}
   12989 		v.reset(OpAMD64MOVLconst)
   12990 		v.AuxInt = 0
   12991 		return true
   12992 	}
   12993 	// match: (SETA (FlagGT_UGT))
   12994 	// cond:
   12995 	// result: (MOVLconst [1])
   12996 	for {
   12997 		v_0 := v.Args[0]
   12998 		if v_0.Op != OpAMD64FlagGT_UGT {
   12999 			break
   13000 		}
   13001 		v.reset(OpAMD64MOVLconst)
   13002 		v.AuxInt = 1
   13003 		return true
   13004 	}
   13005 	return false
   13006 }
   13007 func rewriteValueAMD64_OpAMD64SETAE(v *Value, config *Config) bool {
   13008 	b := v.Block
   13009 	_ = b
   13010 	// match: (SETAE (InvertFlags x))
   13011 	// cond:
   13012 	// result: (SETBE x)
   13013 	for {
   13014 		v_0 := v.Args[0]
   13015 		if v_0.Op != OpAMD64InvertFlags {
   13016 			break
   13017 		}
   13018 		x := v_0.Args[0]
   13019 		v.reset(OpAMD64SETBE)
   13020 		v.AddArg(x)
   13021 		return true
   13022 	}
   13023 	// match: (SETAE (FlagEQ))
   13024 	// cond:
   13025 	// result: (MOVLconst [1])
   13026 	for {
   13027 		v_0 := v.Args[0]
   13028 		if v_0.Op != OpAMD64FlagEQ {
   13029 			break
   13030 		}
   13031 		v.reset(OpAMD64MOVLconst)
   13032 		v.AuxInt = 1
   13033 		return true
   13034 	}
   13035 	// match: (SETAE (FlagLT_ULT))
   13036 	// cond:
   13037 	// result: (MOVLconst [0])
   13038 	for {
   13039 		v_0 := v.Args[0]
   13040 		if v_0.Op != OpAMD64FlagLT_ULT {
   13041 			break
   13042 		}
   13043 		v.reset(OpAMD64MOVLconst)
   13044 		v.AuxInt = 0
   13045 		return true
   13046 	}
   13047 	// match: (SETAE (FlagLT_UGT))
   13048 	// cond:
   13049 	// result: (MOVLconst [1])
   13050 	for {
   13051 		v_0 := v.Args[0]
   13052 		if v_0.Op != OpAMD64FlagLT_UGT {
   13053 			break
   13054 		}
   13055 		v.reset(OpAMD64MOVLconst)
   13056 		v.AuxInt = 1
   13057 		return true
   13058 	}
   13059 	// match: (SETAE (FlagGT_ULT))
   13060 	// cond:
   13061 	// result: (MOVLconst [0])
   13062 	for {
   13063 		v_0 := v.Args[0]
   13064 		if v_0.Op != OpAMD64FlagGT_ULT {
   13065 			break
   13066 		}
   13067 		v.reset(OpAMD64MOVLconst)
   13068 		v.AuxInt = 0
   13069 		return true
   13070 	}
   13071 	// match: (SETAE (FlagGT_UGT))
   13072 	// cond:
   13073 	// result: (MOVLconst [1])
   13074 	for {
   13075 		v_0 := v.Args[0]
   13076 		if v_0.Op != OpAMD64FlagGT_UGT {
   13077 			break
   13078 		}
   13079 		v.reset(OpAMD64MOVLconst)
   13080 		v.AuxInt = 1
   13081 		return true
   13082 	}
   13083 	return false
   13084 }
   13085 func rewriteValueAMD64_OpAMD64SETB(v *Value, config *Config) bool {
   13086 	b := v.Block
   13087 	_ = b
   13088 	// match: (SETB (InvertFlags x))
   13089 	// cond:
   13090 	// result: (SETA x)
   13091 	for {
   13092 		v_0 := v.Args[0]
   13093 		if v_0.Op != OpAMD64InvertFlags {
   13094 			break
   13095 		}
   13096 		x := v_0.Args[0]
   13097 		v.reset(OpAMD64SETA)
   13098 		v.AddArg(x)
   13099 		return true
   13100 	}
   13101 	// match: (SETB (FlagEQ))
   13102 	// cond:
   13103 	// result: (MOVLconst [0])
   13104 	for {
   13105 		v_0 := v.Args[0]
   13106 		if v_0.Op != OpAMD64FlagEQ {
   13107 			break
   13108 		}
   13109 		v.reset(OpAMD64MOVLconst)
   13110 		v.AuxInt = 0
   13111 		return true
   13112 	}
   13113 	// match: (SETB (FlagLT_ULT))
   13114 	// cond:
   13115 	// result: (MOVLconst [1])
   13116 	for {
   13117 		v_0 := v.Args[0]
   13118 		if v_0.Op != OpAMD64FlagLT_ULT {
   13119 			break
   13120 		}
   13121 		v.reset(OpAMD64MOVLconst)
   13122 		v.AuxInt = 1
   13123 		return true
   13124 	}
   13125 	// match: (SETB (FlagLT_UGT))
   13126 	// cond:
   13127 	// result: (MOVLconst [0])
   13128 	for {
   13129 		v_0 := v.Args[0]
   13130 		if v_0.Op != OpAMD64FlagLT_UGT {
   13131 			break
   13132 		}
   13133 		v.reset(OpAMD64MOVLconst)
   13134 		v.AuxInt = 0
   13135 		return true
   13136 	}
   13137 	// match: (SETB (FlagGT_ULT))
   13138 	// cond:
   13139 	// result: (MOVLconst [1])
   13140 	for {
   13141 		v_0 := v.Args[0]
   13142 		if v_0.Op != OpAMD64FlagGT_ULT {
   13143 			break
   13144 		}
   13145 		v.reset(OpAMD64MOVLconst)
   13146 		v.AuxInt = 1
   13147 		return true
   13148 	}
   13149 	// match: (SETB (FlagGT_UGT))
   13150 	// cond:
   13151 	// result: (MOVLconst [0])
   13152 	for {
   13153 		v_0 := v.Args[0]
   13154 		if v_0.Op != OpAMD64FlagGT_UGT {
   13155 			break
   13156 		}
   13157 		v.reset(OpAMD64MOVLconst)
   13158 		v.AuxInt = 0
   13159 		return true
   13160 	}
   13161 	return false
   13162 }
   13163 func rewriteValueAMD64_OpAMD64SETBE(v *Value, config *Config) bool {
   13164 	b := v.Block
   13165 	_ = b
   13166 	// match: (SETBE (InvertFlags x))
   13167 	// cond:
   13168 	// result: (SETAE x)
   13169 	for {
   13170 		v_0 := v.Args[0]
   13171 		if v_0.Op != OpAMD64InvertFlags {
   13172 			break
   13173 		}
   13174 		x := v_0.Args[0]
   13175 		v.reset(OpAMD64SETAE)
   13176 		v.AddArg(x)
   13177 		return true
   13178 	}
   13179 	// match: (SETBE (FlagEQ))
   13180 	// cond:
   13181 	// result: (MOVLconst [1])
   13182 	for {
   13183 		v_0 := v.Args[0]
   13184 		if v_0.Op != OpAMD64FlagEQ {
   13185 			break
   13186 		}
   13187 		v.reset(OpAMD64MOVLconst)
   13188 		v.AuxInt = 1
   13189 		return true
   13190 	}
   13191 	// match: (SETBE (FlagLT_ULT))
   13192 	// cond:
   13193 	// result: (MOVLconst [1])
   13194 	for {
   13195 		v_0 := v.Args[0]
   13196 		if v_0.Op != OpAMD64FlagLT_ULT {
   13197 			break
   13198 		}
   13199 		v.reset(OpAMD64MOVLconst)
   13200 		v.AuxInt = 1
   13201 		return true
   13202 	}
   13203 	// match: (SETBE (FlagLT_UGT))
   13204 	// cond:
   13205 	// result: (MOVLconst [0])
   13206 	for {
   13207 		v_0 := v.Args[0]
   13208 		if v_0.Op != OpAMD64FlagLT_UGT {
   13209 			break
   13210 		}
   13211 		v.reset(OpAMD64MOVLconst)
   13212 		v.AuxInt = 0
   13213 		return true
   13214 	}
   13215 	// match: (SETBE (FlagGT_ULT))
   13216 	// cond:
   13217 	// result: (MOVLconst [1])
   13218 	for {
   13219 		v_0 := v.Args[0]
   13220 		if v_0.Op != OpAMD64FlagGT_ULT {
   13221 			break
   13222 		}
   13223 		v.reset(OpAMD64MOVLconst)
   13224 		v.AuxInt = 1
   13225 		return true
   13226 	}
   13227 	// match: (SETBE (FlagGT_UGT))
   13228 	// cond:
   13229 	// result: (MOVLconst [0])
   13230 	for {
   13231 		v_0 := v.Args[0]
   13232 		if v_0.Op != OpAMD64FlagGT_UGT {
   13233 			break
   13234 		}
   13235 		v.reset(OpAMD64MOVLconst)
   13236 		v.AuxInt = 0
   13237 		return true
   13238 	}
   13239 	return false
   13240 }
   13241 func rewriteValueAMD64_OpAMD64SETEQ(v *Value, config *Config) bool {
   13242 	b := v.Block
   13243 	_ = b
   13244 	// match: (SETEQ (InvertFlags x))
   13245 	// cond:
   13246 	// result: (SETEQ x)
   13247 	for {
   13248 		v_0 := v.Args[0]
   13249 		if v_0.Op != OpAMD64InvertFlags {
   13250 			break
   13251 		}
   13252 		x := v_0.Args[0]
   13253 		v.reset(OpAMD64SETEQ)
   13254 		v.AddArg(x)
   13255 		return true
   13256 	}
   13257 	// match: (SETEQ (FlagEQ))
   13258 	// cond:
   13259 	// result: (MOVLconst [1])
   13260 	for {
   13261 		v_0 := v.Args[0]
   13262 		if v_0.Op != OpAMD64FlagEQ {
   13263 			break
   13264 		}
   13265 		v.reset(OpAMD64MOVLconst)
   13266 		v.AuxInt = 1
   13267 		return true
   13268 	}
   13269 	// match: (SETEQ (FlagLT_ULT))
   13270 	// cond:
   13271 	// result: (MOVLconst [0])
   13272 	for {
   13273 		v_0 := v.Args[0]
   13274 		if v_0.Op != OpAMD64FlagLT_ULT {
   13275 			break
   13276 		}
   13277 		v.reset(OpAMD64MOVLconst)
   13278 		v.AuxInt = 0
   13279 		return true
   13280 	}
   13281 	// match: (SETEQ (FlagLT_UGT))
   13282 	// cond:
   13283 	// result: (MOVLconst [0])
   13284 	for {
   13285 		v_0 := v.Args[0]
   13286 		if v_0.Op != OpAMD64FlagLT_UGT {
   13287 			break
   13288 		}
   13289 		v.reset(OpAMD64MOVLconst)
   13290 		v.AuxInt = 0
   13291 		return true
   13292 	}
   13293 	// match: (SETEQ (FlagGT_ULT))
   13294 	// cond:
   13295 	// result: (MOVLconst [0])
   13296 	for {
   13297 		v_0 := v.Args[0]
   13298 		if v_0.Op != OpAMD64FlagGT_ULT {
   13299 			break
   13300 		}
   13301 		v.reset(OpAMD64MOVLconst)
   13302 		v.AuxInt = 0
   13303 		return true
   13304 	}
   13305 	// match: (SETEQ (FlagGT_UGT))
   13306 	// cond:
   13307 	// result: (MOVLconst [0])
   13308 	for {
   13309 		v_0 := v.Args[0]
   13310 		if v_0.Op != OpAMD64FlagGT_UGT {
   13311 			break
   13312 		}
   13313 		v.reset(OpAMD64MOVLconst)
   13314 		v.AuxInt = 0
   13315 		return true
   13316 	}
   13317 	return false
   13318 }
   13319 func rewriteValueAMD64_OpAMD64SETG(v *Value, config *Config) bool {
   13320 	b := v.Block
   13321 	_ = b
   13322 	// match: (SETG (InvertFlags x))
   13323 	// cond:
   13324 	// result: (SETL x)
   13325 	for {
   13326 		v_0 := v.Args[0]
   13327 		if v_0.Op != OpAMD64InvertFlags {
   13328 			break
   13329 		}
   13330 		x := v_0.Args[0]
   13331 		v.reset(OpAMD64SETL)
   13332 		v.AddArg(x)
   13333 		return true
   13334 	}
   13335 	// match: (SETG (FlagEQ))
   13336 	// cond:
   13337 	// result: (MOVLconst [0])
   13338 	for {
   13339 		v_0 := v.Args[0]
   13340 		if v_0.Op != OpAMD64FlagEQ {
   13341 			break
   13342 		}
   13343 		v.reset(OpAMD64MOVLconst)
   13344 		v.AuxInt = 0
   13345 		return true
   13346 	}
   13347 	// match: (SETG (FlagLT_ULT))
   13348 	// cond:
   13349 	// result: (MOVLconst [0])
   13350 	for {
   13351 		v_0 := v.Args[0]
   13352 		if v_0.Op != OpAMD64FlagLT_ULT {
   13353 			break
   13354 		}
   13355 		v.reset(OpAMD64MOVLconst)
   13356 		v.AuxInt = 0
   13357 		return true
   13358 	}
   13359 	// match: (SETG (FlagLT_UGT))
   13360 	// cond:
   13361 	// result: (MOVLconst [0])
   13362 	for {
   13363 		v_0 := v.Args[0]
   13364 		if v_0.Op != OpAMD64FlagLT_UGT {
   13365 			break
   13366 		}
   13367 		v.reset(OpAMD64MOVLconst)
   13368 		v.AuxInt = 0
   13369 		return true
   13370 	}
   13371 	// match: (SETG (FlagGT_ULT))
   13372 	// cond:
   13373 	// result: (MOVLconst [1])
   13374 	for {
   13375 		v_0 := v.Args[0]
   13376 		if v_0.Op != OpAMD64FlagGT_ULT {
   13377 			break
   13378 		}
   13379 		v.reset(OpAMD64MOVLconst)
   13380 		v.AuxInt = 1
   13381 		return true
   13382 	}
   13383 	// match: (SETG (FlagGT_UGT))
   13384 	// cond:
   13385 	// result: (MOVLconst [1])
   13386 	for {
   13387 		v_0 := v.Args[0]
   13388 		if v_0.Op != OpAMD64FlagGT_UGT {
   13389 			break
   13390 		}
   13391 		v.reset(OpAMD64MOVLconst)
   13392 		v.AuxInt = 1
   13393 		return true
   13394 	}
   13395 	return false
   13396 }
   13397 func rewriteValueAMD64_OpAMD64SETGE(v *Value, config *Config) bool {
   13398 	b := v.Block
   13399 	_ = b
   13400 	// match: (SETGE (InvertFlags x))
   13401 	// cond:
   13402 	// result: (SETLE x)
   13403 	for {
   13404 		v_0 := v.Args[0]
   13405 		if v_0.Op != OpAMD64InvertFlags {
   13406 			break
   13407 		}
   13408 		x := v_0.Args[0]
   13409 		v.reset(OpAMD64SETLE)
   13410 		v.AddArg(x)
   13411 		return true
   13412 	}
   13413 	// match: (SETGE (FlagEQ))
   13414 	// cond:
   13415 	// result: (MOVLconst [1])
   13416 	for {
   13417 		v_0 := v.Args[0]
   13418 		if v_0.Op != OpAMD64FlagEQ {
   13419 			break
   13420 		}
   13421 		v.reset(OpAMD64MOVLconst)
   13422 		v.AuxInt = 1
   13423 		return true
   13424 	}
   13425 	// match: (SETGE (FlagLT_ULT))
   13426 	// cond:
   13427 	// result: (MOVLconst [0])
   13428 	for {
   13429 		v_0 := v.Args[0]
   13430 		if v_0.Op != OpAMD64FlagLT_ULT {
   13431 			break
   13432 		}
   13433 		v.reset(OpAMD64MOVLconst)
   13434 		v.AuxInt = 0
   13435 		return true
   13436 	}
   13437 	// match: (SETGE (FlagLT_UGT))
   13438 	// cond:
   13439 	// result: (MOVLconst [0])
   13440 	for {
   13441 		v_0 := v.Args[0]
   13442 		if v_0.Op != OpAMD64FlagLT_UGT {
   13443 			break
   13444 		}
   13445 		v.reset(OpAMD64MOVLconst)
   13446 		v.AuxInt = 0
   13447 		return true
   13448 	}
   13449 	// match: (SETGE (FlagGT_ULT))
   13450 	// cond:
   13451 	// result: (MOVLconst [1])
   13452 	for {
   13453 		v_0 := v.Args[0]
   13454 		if v_0.Op != OpAMD64FlagGT_ULT {
   13455 			break
   13456 		}
   13457 		v.reset(OpAMD64MOVLconst)
   13458 		v.AuxInt = 1
   13459 		return true
   13460 	}
   13461 	// match: (SETGE (FlagGT_UGT))
   13462 	// cond:
   13463 	// result: (MOVLconst [1])
   13464 	for {
   13465 		v_0 := v.Args[0]
   13466 		if v_0.Op != OpAMD64FlagGT_UGT {
   13467 			break
   13468 		}
   13469 		v.reset(OpAMD64MOVLconst)
   13470 		v.AuxInt = 1
   13471 		return true
   13472 	}
   13473 	return false
   13474 }
   13475 func rewriteValueAMD64_OpAMD64SETL(v *Value, config *Config) bool {
   13476 	b := v.Block
   13477 	_ = b
   13478 	// match: (SETL (InvertFlags x))
   13479 	// cond:
   13480 	// result: (SETG x)
   13481 	for {
   13482 		v_0 := v.Args[0]
   13483 		if v_0.Op != OpAMD64InvertFlags {
   13484 			break
   13485 		}
   13486 		x := v_0.Args[0]
   13487 		v.reset(OpAMD64SETG)
   13488 		v.AddArg(x)
   13489 		return true
   13490 	}
   13491 	// match: (SETL (FlagEQ))
   13492 	// cond:
   13493 	// result: (MOVLconst [0])
   13494 	for {
   13495 		v_0 := v.Args[0]
   13496 		if v_0.Op != OpAMD64FlagEQ {
   13497 			break
   13498 		}
   13499 		v.reset(OpAMD64MOVLconst)
   13500 		v.AuxInt = 0
   13501 		return true
   13502 	}
   13503 	// match: (SETL (FlagLT_ULT))
   13504 	// cond:
   13505 	// result: (MOVLconst [1])
   13506 	for {
   13507 		v_0 := v.Args[0]
   13508 		if v_0.Op != OpAMD64FlagLT_ULT {
   13509 			break
   13510 		}
   13511 		v.reset(OpAMD64MOVLconst)
   13512 		v.AuxInt = 1
   13513 		return true
   13514 	}
   13515 	// match: (SETL (FlagLT_UGT))
   13516 	// cond:
   13517 	// result: (MOVLconst [1])
   13518 	for {
   13519 		v_0 := v.Args[0]
   13520 		if v_0.Op != OpAMD64FlagLT_UGT {
   13521 			break
   13522 		}
   13523 		v.reset(OpAMD64MOVLconst)
   13524 		v.AuxInt = 1
   13525 		return true
   13526 	}
   13527 	// match: (SETL (FlagGT_ULT))
   13528 	// cond:
   13529 	// result: (MOVLconst [0])
   13530 	for {
   13531 		v_0 := v.Args[0]
   13532 		if v_0.Op != OpAMD64FlagGT_ULT {
   13533 			break
   13534 		}
   13535 		v.reset(OpAMD64MOVLconst)
   13536 		v.AuxInt = 0
   13537 		return true
   13538 	}
   13539 	// match: (SETL (FlagGT_UGT))
   13540 	// cond:
   13541 	// result: (MOVLconst [0])
   13542 	for {
   13543 		v_0 := v.Args[0]
   13544 		if v_0.Op != OpAMD64FlagGT_UGT {
   13545 			break
   13546 		}
   13547 		v.reset(OpAMD64MOVLconst)
   13548 		v.AuxInt = 0
   13549 		return true
   13550 	}
   13551 	return false
   13552 }
   13553 func rewriteValueAMD64_OpAMD64SETLE(v *Value, config *Config) bool {
   13554 	b := v.Block
   13555 	_ = b
   13556 	// match: (SETLE (InvertFlags x))
   13557 	// cond:
   13558 	// result: (SETGE x)
   13559 	for {
   13560 		v_0 := v.Args[0]
   13561 		if v_0.Op != OpAMD64InvertFlags {
   13562 			break
   13563 		}
   13564 		x := v_0.Args[0]
   13565 		v.reset(OpAMD64SETGE)
   13566 		v.AddArg(x)
   13567 		return true
   13568 	}
   13569 	// match: (SETLE (FlagEQ))
   13570 	// cond:
   13571 	// result: (MOVLconst [1])
   13572 	for {
   13573 		v_0 := v.Args[0]
   13574 		if v_0.Op != OpAMD64FlagEQ {
   13575 			break
   13576 		}
   13577 		v.reset(OpAMD64MOVLconst)
   13578 		v.AuxInt = 1
   13579 		return true
   13580 	}
   13581 	// match: (SETLE (FlagLT_ULT))
   13582 	// cond:
   13583 	// result: (MOVLconst [1])
   13584 	for {
   13585 		v_0 := v.Args[0]
   13586 		if v_0.Op != OpAMD64FlagLT_ULT {
   13587 			break
   13588 		}
   13589 		v.reset(OpAMD64MOVLconst)
   13590 		v.AuxInt = 1
   13591 		return true
   13592 	}
   13593 	// match: (SETLE (FlagLT_UGT))
   13594 	// cond:
   13595 	// result: (MOVLconst [1])
   13596 	for {
   13597 		v_0 := v.Args[0]
   13598 		if v_0.Op != OpAMD64FlagLT_UGT {
   13599 			break
   13600 		}
   13601 		v.reset(OpAMD64MOVLconst)
   13602 		v.AuxInt = 1
   13603 		return true
   13604 	}
   13605 	// match: (SETLE (FlagGT_ULT))
   13606 	// cond:
   13607 	// result: (MOVLconst [0])
   13608 	for {
   13609 		v_0 := v.Args[0]
   13610 		if v_0.Op != OpAMD64FlagGT_ULT {
   13611 			break
   13612 		}
   13613 		v.reset(OpAMD64MOVLconst)
   13614 		v.AuxInt = 0
   13615 		return true
   13616 	}
   13617 	// match: (SETLE (FlagGT_UGT))
   13618 	// cond:
   13619 	// result: (MOVLconst [0])
   13620 	for {
   13621 		v_0 := v.Args[0]
   13622 		if v_0.Op != OpAMD64FlagGT_UGT {
   13623 			break
   13624 		}
   13625 		v.reset(OpAMD64MOVLconst)
   13626 		v.AuxInt = 0
   13627 		return true
   13628 	}
   13629 	return false
   13630 }
   13631 func rewriteValueAMD64_OpAMD64SETNE(v *Value, config *Config) bool {
   13632 	b := v.Block
   13633 	_ = b
   13634 	// match: (SETNE (InvertFlags x))
   13635 	// cond:
   13636 	// result: (SETNE x)
   13637 	for {
   13638 		v_0 := v.Args[0]
   13639 		if v_0.Op != OpAMD64InvertFlags {
   13640 			break
   13641 		}
   13642 		x := v_0.Args[0]
   13643 		v.reset(OpAMD64SETNE)
   13644 		v.AddArg(x)
   13645 		return true
   13646 	}
   13647 	// match: (SETNE (FlagEQ))
   13648 	// cond:
   13649 	// result: (MOVLconst [0])
   13650 	for {
   13651 		v_0 := v.Args[0]
   13652 		if v_0.Op != OpAMD64FlagEQ {
   13653 			break
   13654 		}
   13655 		v.reset(OpAMD64MOVLconst)
   13656 		v.AuxInt = 0
   13657 		return true
   13658 	}
   13659 	// match: (SETNE (FlagLT_ULT))
   13660 	// cond:
   13661 	// result: (MOVLconst [1])
   13662 	for {
   13663 		v_0 := v.Args[0]
   13664 		if v_0.Op != OpAMD64FlagLT_ULT {
   13665 			break
   13666 		}
   13667 		v.reset(OpAMD64MOVLconst)
   13668 		v.AuxInt = 1
   13669 		return true
   13670 	}
   13671 	// match: (SETNE (FlagLT_UGT))
   13672 	// cond:
   13673 	// result: (MOVLconst [1])
   13674 	for {
   13675 		v_0 := v.Args[0]
   13676 		if v_0.Op != OpAMD64FlagLT_UGT {
   13677 			break
   13678 		}
   13679 		v.reset(OpAMD64MOVLconst)
   13680 		v.AuxInt = 1
   13681 		return true
   13682 	}
   13683 	// match: (SETNE (FlagGT_ULT))
   13684 	// cond:
   13685 	// result: (MOVLconst [1])
   13686 	for {
   13687 		v_0 := v.Args[0]
   13688 		if v_0.Op != OpAMD64FlagGT_ULT {
   13689 			break
   13690 		}
   13691 		v.reset(OpAMD64MOVLconst)
   13692 		v.AuxInt = 1
   13693 		return true
   13694 	}
   13695 	// match: (SETNE (FlagGT_UGT))
   13696 	// cond:
   13697 	// result: (MOVLconst [1])
   13698 	for {
   13699 		v_0 := v.Args[0]
   13700 		if v_0.Op != OpAMD64FlagGT_UGT {
   13701 			break
   13702 		}
   13703 		v.reset(OpAMD64MOVLconst)
   13704 		v.AuxInt = 1
   13705 		return true
   13706 	}
   13707 	return false
   13708 }
   13709 func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool {
   13710 	b := v.Block
   13711 	_ = b
   13712 	// match: (SHLL x (MOVQconst [c]))
   13713 	// cond:
   13714 	// result: (SHLLconst [c&31] x)
   13715 	for {
   13716 		x := v.Args[0]
   13717 		v_1 := v.Args[1]
   13718 		if v_1.Op != OpAMD64MOVQconst {
   13719 			break
   13720 		}
   13721 		c := v_1.AuxInt
   13722 		v.reset(OpAMD64SHLLconst)
   13723 		v.AuxInt = c & 31
   13724 		v.AddArg(x)
   13725 		return true
   13726 	}
   13727 	// match: (SHLL x (MOVLconst [c]))
   13728 	// cond:
   13729 	// result: (SHLLconst [c&31] x)
   13730 	for {
   13731 		x := v.Args[0]
   13732 		v_1 := v.Args[1]
   13733 		if v_1.Op != OpAMD64MOVLconst {
   13734 			break
   13735 		}
   13736 		c := v_1.AuxInt
   13737 		v.reset(OpAMD64SHLLconst)
   13738 		v.AuxInt = c & 31
   13739 		v.AddArg(x)
   13740 		return true
   13741 	}
   13742 	// match: (SHLL x (ANDLconst [31] y))
   13743 	// cond:
   13744 	// result: (SHLL x y)
   13745 	for {
   13746 		x := v.Args[0]
   13747 		v_1 := v.Args[1]
   13748 		if v_1.Op != OpAMD64ANDLconst {
   13749 			break
   13750 		}
   13751 		if v_1.AuxInt != 31 {
   13752 			break
   13753 		}
   13754 		y := v_1.Args[0]
   13755 		v.reset(OpAMD64SHLL)
   13756 		v.AddArg(x)
   13757 		v.AddArg(y)
   13758 		return true
   13759 	}
   13760 	return false
   13761 }
   13762 func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool {
   13763 	b := v.Block
   13764 	_ = b
   13765 	// match: (SHLQ x (MOVQconst [c]))
   13766 	// cond:
   13767 	// result: (SHLQconst [c&63] x)
   13768 	for {
   13769 		x := v.Args[0]
   13770 		v_1 := v.Args[1]
   13771 		if v_1.Op != OpAMD64MOVQconst {
   13772 			break
   13773 		}
   13774 		c := v_1.AuxInt
   13775 		v.reset(OpAMD64SHLQconst)
   13776 		v.AuxInt = c & 63
   13777 		v.AddArg(x)
   13778 		return true
   13779 	}
   13780 	// match: (SHLQ x (MOVLconst [c]))
   13781 	// cond:
   13782 	// result: (SHLQconst [c&63] x)
   13783 	for {
   13784 		x := v.Args[0]
   13785 		v_1 := v.Args[1]
   13786 		if v_1.Op != OpAMD64MOVLconst {
   13787 			break
   13788 		}
   13789 		c := v_1.AuxInt
   13790 		v.reset(OpAMD64SHLQconst)
   13791 		v.AuxInt = c & 63
   13792 		v.AddArg(x)
   13793 		return true
   13794 	}
   13795 	// match: (SHLQ x (ANDQconst [63] y))
   13796 	// cond:
   13797 	// result: (SHLQ x y)
   13798 	for {
   13799 		x := v.Args[0]
   13800 		v_1 := v.Args[1]
   13801 		if v_1.Op != OpAMD64ANDQconst {
   13802 			break
   13803 		}
   13804 		if v_1.AuxInt != 63 {
   13805 			break
   13806 		}
   13807 		y := v_1.Args[0]
   13808 		v.reset(OpAMD64SHLQ)
   13809 		v.AddArg(x)
   13810 		v.AddArg(y)
   13811 		return true
   13812 	}
   13813 	return false
   13814 }
   13815 func rewriteValueAMD64_OpAMD64SHRB(v *Value, config *Config) bool {
   13816 	b := v.Block
   13817 	_ = b
   13818 	// match: (SHRB x (MOVQconst [c]))
   13819 	// cond:
   13820 	// result: (SHRBconst [c&31] x)
   13821 	for {
   13822 		x := v.Args[0]
   13823 		v_1 := v.Args[1]
   13824 		if v_1.Op != OpAMD64MOVQconst {
   13825 			break
   13826 		}
   13827 		c := v_1.AuxInt
   13828 		v.reset(OpAMD64SHRBconst)
   13829 		v.AuxInt = c & 31
   13830 		v.AddArg(x)
   13831 		return true
   13832 	}
   13833 	// match: (SHRB x (MOVLconst [c]))
   13834 	// cond:
   13835 	// result: (SHRBconst [c&31] x)
   13836 	for {
   13837 		x := v.Args[0]
   13838 		v_1 := v.Args[1]
   13839 		if v_1.Op != OpAMD64MOVLconst {
   13840 			break
   13841 		}
   13842 		c := v_1.AuxInt
   13843 		v.reset(OpAMD64SHRBconst)
   13844 		v.AuxInt = c & 31
   13845 		v.AddArg(x)
   13846 		return true
   13847 	}
   13848 	return false
   13849 }
   13850 func rewriteValueAMD64_OpAMD64SHRL(v *Value, config *Config) bool {
   13851 	b := v.Block
   13852 	_ = b
   13853 	// match: (SHRL x (MOVQconst [c]))
   13854 	// cond:
   13855 	// result: (SHRLconst [c&31] x)
   13856 	for {
   13857 		x := v.Args[0]
   13858 		v_1 := v.Args[1]
   13859 		if v_1.Op != OpAMD64MOVQconst {
   13860 			break
   13861 		}
   13862 		c := v_1.AuxInt
   13863 		v.reset(OpAMD64SHRLconst)
   13864 		v.AuxInt = c & 31
   13865 		v.AddArg(x)
   13866 		return true
   13867 	}
   13868 	// match: (SHRL x (MOVLconst [c]))
   13869 	// cond:
   13870 	// result: (SHRLconst [c&31] x)
   13871 	for {
   13872 		x := v.Args[0]
   13873 		v_1 := v.Args[1]
   13874 		if v_1.Op != OpAMD64MOVLconst {
   13875 			break
   13876 		}
   13877 		c := v_1.AuxInt
   13878 		v.reset(OpAMD64SHRLconst)
   13879 		v.AuxInt = c & 31
   13880 		v.AddArg(x)
   13881 		return true
   13882 	}
   13883 	// match: (SHRL x (ANDLconst [31] y))
   13884 	// cond:
   13885 	// result: (SHRL x y)
   13886 	for {
   13887 		x := v.Args[0]
   13888 		v_1 := v.Args[1]
   13889 		if v_1.Op != OpAMD64ANDLconst {
   13890 			break
   13891 		}
   13892 		if v_1.AuxInt != 31 {
   13893 			break
   13894 		}
   13895 		y := v_1.Args[0]
   13896 		v.reset(OpAMD64SHRL)
   13897 		v.AddArg(x)
   13898 		v.AddArg(y)
   13899 		return true
   13900 	}
   13901 	return false
   13902 }
   13903 func rewriteValueAMD64_OpAMD64SHRQ(v *Value, config *Config) bool {
   13904 	b := v.Block
   13905 	_ = b
   13906 	// match: (SHRQ x (MOVQconst [c]))
   13907 	// cond:
   13908 	// result: (SHRQconst [c&63] x)
   13909 	for {
   13910 		x := v.Args[0]
   13911 		v_1 := v.Args[1]
   13912 		if v_1.Op != OpAMD64MOVQconst {
   13913 			break
   13914 		}
   13915 		c := v_1.AuxInt
   13916 		v.reset(OpAMD64SHRQconst)
   13917 		v.AuxInt = c & 63
   13918 		v.AddArg(x)
   13919 		return true
   13920 	}
   13921 	// match: (SHRQ x (MOVLconst [c]))
   13922 	// cond:
   13923 	// result: (SHRQconst [c&63] x)
   13924 	for {
   13925 		x := v.Args[0]
   13926 		v_1 := v.Args[1]
   13927 		if v_1.Op != OpAMD64MOVLconst {
   13928 			break
   13929 		}
   13930 		c := v_1.AuxInt
   13931 		v.reset(OpAMD64SHRQconst)
   13932 		v.AuxInt = c & 63
   13933 		v.AddArg(x)
   13934 		return true
   13935 	}
   13936 	// match: (SHRQ x (ANDQconst [63] y))
   13937 	// cond:
   13938 	// result: (SHRQ x y)
   13939 	for {
   13940 		x := v.Args[0]
   13941 		v_1 := v.Args[1]
   13942 		if v_1.Op != OpAMD64ANDQconst {
   13943 			break
   13944 		}
   13945 		if v_1.AuxInt != 63 {
   13946 			break
   13947 		}
   13948 		y := v_1.Args[0]
   13949 		v.reset(OpAMD64SHRQ)
   13950 		v.AddArg(x)
   13951 		v.AddArg(y)
   13952 		return true
   13953 	}
   13954 	return false
   13955 }
   13956 func rewriteValueAMD64_OpAMD64SHRW(v *Value, config *Config) bool {
   13957 	b := v.Block
   13958 	_ = b
   13959 	// match: (SHRW x (MOVQconst [c]))
   13960 	// cond:
   13961 	// result: (SHRWconst [c&31] x)
   13962 	for {
   13963 		x := v.Args[0]
   13964 		v_1 := v.Args[1]
   13965 		if v_1.Op != OpAMD64MOVQconst {
   13966 			break
   13967 		}
   13968 		c := v_1.AuxInt
   13969 		v.reset(OpAMD64SHRWconst)
   13970 		v.AuxInt = c & 31
   13971 		v.AddArg(x)
   13972 		return true
   13973 	}
   13974 	// match: (SHRW x (MOVLconst [c]))
   13975 	// cond:
   13976 	// result: (SHRWconst [c&31] x)
   13977 	for {
   13978 		x := v.Args[0]
   13979 		v_1 := v.Args[1]
   13980 		if v_1.Op != OpAMD64MOVLconst {
   13981 			break
   13982 		}
   13983 		c := v_1.AuxInt
   13984 		v.reset(OpAMD64SHRWconst)
   13985 		v.AuxInt = c & 31
   13986 		v.AddArg(x)
   13987 		return true
   13988 	}
   13989 	return false
   13990 }
   13991 func rewriteValueAMD64_OpAMD64SUBL(v *Value, config *Config) bool {
   13992 	b := v.Block
   13993 	_ = b
   13994 	// match: (SUBL x (MOVLconst [c]))
   13995 	// cond:
   13996 	// result: (SUBLconst x [c])
   13997 	for {
   13998 		x := v.Args[0]
   13999 		v_1 := v.Args[1]
   14000 		if v_1.Op != OpAMD64MOVLconst {
   14001 			break
   14002 		}
   14003 		c := v_1.AuxInt
   14004 		v.reset(OpAMD64SUBLconst)
   14005 		v.AuxInt = c
   14006 		v.AddArg(x)
   14007 		return true
   14008 	}
   14009 	// match: (SUBL (MOVLconst [c]) x)
   14010 	// cond:
   14011 	// result: (NEGL (SUBLconst <v.Type> x [c]))
   14012 	for {
   14013 		v_0 := v.Args[0]
   14014 		if v_0.Op != OpAMD64MOVLconst {
   14015 			break
   14016 		}
   14017 		c := v_0.AuxInt
   14018 		x := v.Args[1]
   14019 		v.reset(OpAMD64NEGL)
   14020 		v0 := b.NewValue0(v.Line, OpAMD64SUBLconst, v.Type)
   14021 		v0.AuxInt = c
   14022 		v0.AddArg(x)
   14023 		v.AddArg(v0)
   14024 		return true
   14025 	}
   14026 	// match: (SUBL x x)
   14027 	// cond:
   14028 	// result: (MOVLconst [0])
   14029 	for {
   14030 		x := v.Args[0]
   14031 		if x != v.Args[1] {
   14032 			break
   14033 		}
   14034 		v.reset(OpAMD64MOVLconst)
   14035 		v.AuxInt = 0
   14036 		return true
   14037 	}
   14038 	return false
   14039 }
   14040 func rewriteValueAMD64_OpAMD64SUBLconst(v *Value, config *Config) bool {
   14041 	b := v.Block
   14042 	_ = b
   14043 	// match: (SUBLconst [c] x)
   14044 	// cond: int32(c) == 0
   14045 	// result: x
   14046 	for {
   14047 		c := v.AuxInt
   14048 		x := v.Args[0]
   14049 		if !(int32(c) == 0) {
   14050 			break
   14051 		}
   14052 		v.reset(OpCopy)
   14053 		v.Type = x.Type
   14054 		v.AddArg(x)
   14055 		return true
   14056 	}
   14057 	// match: (SUBLconst [c] x)
   14058 	// cond:
   14059 	// result: (ADDLconst [int64(int32(-c))] x)
   14060 	for {
   14061 		c := v.AuxInt
   14062 		x := v.Args[0]
   14063 		v.reset(OpAMD64ADDLconst)
   14064 		v.AuxInt = int64(int32(-c))
   14065 		v.AddArg(x)
   14066 		return true
   14067 	}
   14068 }
   14069 func rewriteValueAMD64_OpAMD64SUBQ(v *Value, config *Config) bool {
   14070 	b := v.Block
   14071 	_ = b
   14072 	// match: (SUBQ x (MOVQconst [c]))
   14073 	// cond: is32Bit(c)
   14074 	// result: (SUBQconst x [c])
   14075 	for {
   14076 		x := v.Args[0]
   14077 		v_1 := v.Args[1]
   14078 		if v_1.Op != OpAMD64MOVQconst {
   14079 			break
   14080 		}
   14081 		c := v_1.AuxInt
   14082 		if !(is32Bit(c)) {
   14083 			break
   14084 		}
   14085 		v.reset(OpAMD64SUBQconst)
   14086 		v.AuxInt = c
   14087 		v.AddArg(x)
   14088 		return true
   14089 	}
   14090 	// match: (SUBQ (MOVQconst [c]) x)
   14091 	// cond: is32Bit(c)
   14092 	// result: (NEGQ (SUBQconst <v.Type> x [c]))
   14093 	for {
   14094 		v_0 := v.Args[0]
   14095 		if v_0.Op != OpAMD64MOVQconst {
   14096 			break
   14097 		}
   14098 		c := v_0.AuxInt
   14099 		x := v.Args[1]
   14100 		if !(is32Bit(c)) {
   14101 			break
   14102 		}
   14103 		v.reset(OpAMD64NEGQ)
   14104 		v0 := b.NewValue0(v.Line, OpAMD64SUBQconst, v.Type)
   14105 		v0.AuxInt = c
   14106 		v0.AddArg(x)
   14107 		v.AddArg(v0)
   14108 		return true
   14109 	}
   14110 	// match: (SUBQ x x)
   14111 	// cond:
   14112 	// result: (MOVQconst [0])
   14113 	for {
   14114 		x := v.Args[0]
   14115 		if x != v.Args[1] {
   14116 			break
   14117 		}
   14118 		v.reset(OpAMD64MOVQconst)
   14119 		v.AuxInt = 0
   14120 		return true
   14121 	}
   14122 	return false
   14123 }
   14124 func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool {
   14125 	b := v.Block
   14126 	_ = b
   14127 	// match: (SUBQconst [0] x)
   14128 	// cond:
   14129 	// result: x
   14130 	for {
   14131 		if v.AuxInt != 0 {
   14132 			break
   14133 		}
   14134 		x := v.Args[0]
   14135 		v.reset(OpCopy)
   14136 		v.Type = x.Type
   14137 		v.AddArg(x)
   14138 		return true
   14139 	}
   14140 	// match: (SUBQconst [c] x)
   14141 	// cond: c != -(1<<31)
   14142 	// result: (ADDQconst [-c] x)
   14143 	for {
   14144 		c := v.AuxInt
   14145 		x := v.Args[0]
   14146 		if !(c != -(1 << 31)) {
   14147 			break
   14148 		}
   14149 		v.reset(OpAMD64ADDQconst)
   14150 		v.AuxInt = -c
   14151 		v.AddArg(x)
   14152 		return true
   14153 	}
   14154 	// match: (SUBQconst (MOVQconst [d]) [c])
   14155 	// cond:
   14156 	// result: (MOVQconst [d-c])
   14157 	for {
   14158 		c := v.AuxInt
   14159 		v_0 := v.Args[0]
   14160 		if v_0.Op != OpAMD64MOVQconst {
   14161 			break
   14162 		}
   14163 		d := v_0.AuxInt
   14164 		v.reset(OpAMD64MOVQconst)
   14165 		v.AuxInt = d - c
   14166 		return true
   14167 	}
   14168 	// match: (SUBQconst (SUBQconst x [d]) [c])
   14169 	// cond: is32Bit(-c-d)
   14170 	// result: (ADDQconst [-c-d] x)
   14171 	for {
   14172 		c := v.AuxInt
   14173 		v_0 := v.Args[0]
   14174 		if v_0.Op != OpAMD64SUBQconst {
   14175 			break
   14176 		}
   14177 		d := v_0.AuxInt
   14178 		x := v_0.Args[0]
   14179 		if !(is32Bit(-c - d)) {
   14180 			break
   14181 		}
   14182 		v.reset(OpAMD64ADDQconst)
   14183 		v.AuxInt = -c - d
   14184 		v.AddArg(x)
   14185 		return true
   14186 	}
   14187 	return false
   14188 }
   14189 func rewriteValueAMD64_OpAMD64XADDLlock(v *Value, config *Config) bool {
   14190 	b := v.Block
   14191 	_ = b
   14192 	// match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
   14193 	// cond: is32Bit(off1+off2)
   14194 	// result: (XADDLlock [off1+off2] {sym} val ptr mem)
   14195 	for {
   14196 		off1 := v.AuxInt
   14197 		sym := v.Aux
   14198 		val := v.Args[0]
   14199 		v_1 := v.Args[1]
   14200 		if v_1.Op != OpAMD64ADDQconst {
   14201 			break
   14202 		}
   14203 		off2 := v_1.AuxInt
   14204 		ptr := v_1.Args[0]
   14205 		mem := v.Args[2]
   14206 		if !(is32Bit(off1 + off2)) {
   14207 			break
   14208 		}
   14209 		v.reset(OpAMD64XADDLlock)
   14210 		v.AuxInt = off1 + off2
   14211 		v.Aux = sym
   14212 		v.AddArg(val)
   14213 		v.AddArg(ptr)
   14214 		v.AddArg(mem)
   14215 		return true
   14216 	}
   14217 	return false
   14218 }
   14219 func rewriteValueAMD64_OpAMD64XADDQlock(v *Value, config *Config) bool {
   14220 	b := v.Block
   14221 	_ = b
   14222 	// match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
   14223 	// cond: is32Bit(off1+off2)
   14224 	// result: (XADDQlock [off1+off2] {sym} val ptr mem)
   14225 	for {
   14226 		off1 := v.AuxInt
   14227 		sym := v.Aux
   14228 		val := v.Args[0]
   14229 		v_1 := v.Args[1]
   14230 		if v_1.Op != OpAMD64ADDQconst {
   14231 			break
   14232 		}
   14233 		off2 := v_1.AuxInt
   14234 		ptr := v_1.Args[0]
   14235 		mem := v.Args[2]
   14236 		if !(is32Bit(off1 + off2)) {
   14237 			break
   14238 		}
   14239 		v.reset(OpAMD64XADDQlock)
   14240 		v.AuxInt = off1 + off2
   14241 		v.Aux = sym
   14242 		v.AddArg(val)
   14243 		v.AddArg(ptr)
   14244 		v.AddArg(mem)
   14245 		return true
   14246 	}
   14247 	return false
   14248 }
   14249 func rewriteValueAMD64_OpAMD64XCHGL(v *Value, config *Config) bool {
   14250 	b := v.Block
   14251 	_ = b
   14252 	// match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem)
   14253 	// cond: is32Bit(off1+off2)
   14254 	// result: (XCHGL [off1+off2] {sym} val ptr mem)
   14255 	for {
   14256 		off1 := v.AuxInt
   14257 		sym := v.Aux
   14258 		val := v.Args[0]
   14259 		v_1 := v.Args[1]
   14260 		if v_1.Op != OpAMD64ADDQconst {
   14261 			break
   14262 		}
   14263 		off2 := v_1.AuxInt
   14264 		ptr := v_1.Args[0]
   14265 		mem := v.Args[2]
   14266 		if !(is32Bit(off1 + off2)) {
   14267 			break
   14268 		}
   14269 		v.reset(OpAMD64XCHGL)
   14270 		v.AuxInt = off1 + off2
   14271 		v.Aux = sym
   14272 		v.AddArg(val)
   14273 		v.AddArg(ptr)
   14274 		v.AddArg(mem)
   14275 		return true
   14276 	}
   14277 	// match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
   14278 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
   14279 	// result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
   14280 	for {
   14281 		off1 := v.AuxInt
   14282 		sym1 := v.Aux
   14283 		val := v.Args[0]
   14284 		v_1 := v.Args[1]
   14285 		if v_1.Op != OpAMD64LEAQ {
   14286 			break
   14287 		}
   14288 		off2 := v_1.AuxInt
   14289 		sym2 := v_1.Aux
   14290 		ptr := v_1.Args[0]
   14291 		mem := v.Args[2]
   14292 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
   14293 			break
   14294 		}
   14295 		v.reset(OpAMD64XCHGL)
   14296 		v.AuxInt = off1 + off2
   14297 		v.Aux = mergeSym(sym1, sym2)
   14298 		v.AddArg(val)
   14299 		v.AddArg(ptr)
   14300 		v.AddArg(mem)
   14301 		return true
   14302 	}
   14303 	return false
   14304 }
   14305 func rewriteValueAMD64_OpAMD64XCHGQ(v *Value, config *Config) bool {
   14306 	b := v.Block
   14307 	_ = b
   14308 	// match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem)
   14309 	// cond: is32Bit(off1+off2)
   14310 	// result: (XCHGQ [off1+off2] {sym} val ptr mem)
   14311 	for {
   14312 		off1 := v.AuxInt
   14313 		sym := v.Aux
   14314 		val := v.Args[0]
   14315 		v_1 := v.Args[1]
   14316 		if v_1.Op != OpAMD64ADDQconst {
   14317 			break
   14318 		}
   14319 		off2 := v_1.AuxInt
   14320 		ptr := v_1.Args[0]
   14321 		mem := v.Args[2]
   14322 		if !(is32Bit(off1 + off2)) {
   14323 			break
   14324 		}
   14325 		v.reset(OpAMD64XCHGQ)
   14326 		v.AuxInt = off1 + off2
   14327 		v.Aux = sym
   14328 		v.AddArg(val)
   14329 		v.AddArg(ptr)
   14330 		v.AddArg(mem)
   14331 		return true
   14332 	}
   14333 	// match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
   14334 	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
   14335 	// result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
   14336 	for {
   14337 		off1 := v.AuxInt
   14338 		sym1 := v.Aux
   14339 		val := v.Args[0]
   14340 		v_1 := v.Args[1]
   14341 		if v_1.Op != OpAMD64LEAQ {
   14342 			break
   14343 		}
   14344 		off2 := v_1.AuxInt
   14345 		sym2 := v_1.Aux
   14346 		ptr := v_1.Args[0]
   14347 		mem := v.Args[2]
   14348 		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
   14349 			break
   14350 		}
   14351 		v.reset(OpAMD64XCHGQ)
   14352 		v.AuxInt = off1 + off2
   14353 		v.Aux = mergeSym(sym1, sym2)
   14354 		v.AddArg(val)
   14355 		v.AddArg(ptr)
   14356 		v.AddArg(mem)
   14357 		return true
   14358 	}
   14359 	return false
   14360 }
   14361 func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool {
   14362 	b := v.Block
   14363 	_ = b
   14364 	// match: (XORL x (MOVLconst [c]))
   14365 	// cond:
   14366 	// result: (XORLconst [c] x)
   14367 	for {
   14368 		x := v.Args[0]
   14369 		v_1 := v.Args[1]
   14370 		if v_1.Op != OpAMD64MOVLconst {
   14371 			break
   14372 		}
   14373 		c := v_1.AuxInt
   14374 		v.reset(OpAMD64XORLconst)
   14375 		v.AuxInt = c
   14376 		v.AddArg(x)
   14377 		return true
   14378 	}
   14379 	// match: (XORL (MOVLconst [c]) x)
   14380 	// cond:
   14381 	// result: (XORLconst [c] x)
   14382 	for {
   14383 		v_0 := v.Args[0]
   14384 		if v_0.Op != OpAMD64MOVLconst {
   14385 			break
   14386 		}
   14387 		c := v_0.AuxInt
   14388 		x := v.Args[1]
   14389 		v.reset(OpAMD64XORLconst)
   14390 		v.AuxInt = c
   14391 		v.AddArg(x)
   14392 		return true
   14393 	}
   14394 	// match: (XORL x x)
   14395 	// cond:
   14396 	// result: (MOVLconst [0])
   14397 	for {
   14398 		x := v.Args[0]
   14399 		if x != v.Args[1] {
   14400 			break
   14401 		}
   14402 		v.reset(OpAMD64MOVLconst)
   14403 		v.AuxInt = 0
   14404 		return true
   14405 	}
   14406 	return false
   14407 }
   14408 func rewriteValueAMD64_OpAMD64XORLconst(v *Value, config *Config) bool {
   14409 	b := v.Block
   14410 	_ = b
   14411 	// match: (XORLconst [c] (XORLconst [d] x))
   14412 	// cond:
   14413 	// result: (XORLconst [c ^ d] x)
   14414 	for {
   14415 		c := v.AuxInt
   14416 		v_0 := v.Args[0]
   14417 		if v_0.Op != OpAMD64XORLconst {
   14418 			break
   14419 		}
   14420 		d := v_0.AuxInt
   14421 		x := v_0.Args[0]
   14422 		v.reset(OpAMD64XORLconst)
   14423 		v.AuxInt = c ^ d
   14424 		v.AddArg(x)
   14425 		return true
   14426 	}
   14427 	// match: (XORLconst [c] x)
   14428 	// cond: int32(c)==0
   14429 	// result: x
   14430 	for {
   14431 		c := v.AuxInt
   14432 		x := v.Args[0]
   14433 		if !(int32(c) == 0) {
   14434 			break
   14435 		}
   14436 		v.reset(OpCopy)
   14437 		v.Type = x.Type
   14438 		v.AddArg(x)
   14439 		return true
   14440 	}
   14441 	// match: (XORLconst [c] (MOVLconst [d]))
   14442 	// cond:
   14443 	// result: (MOVLconst [c^d])
   14444 	for {
   14445 		c := v.AuxInt
   14446 		v_0 := v.Args[0]
   14447 		if v_0.Op != OpAMD64MOVLconst {
   14448 			break
   14449 		}
   14450 		d := v_0.AuxInt
   14451 		v.reset(OpAMD64MOVLconst)
   14452 		v.AuxInt = c ^ d
   14453 		return true
   14454 	}
   14455 	return false
   14456 }
   14457 func rewriteValueAMD64_OpAMD64XORQ(v *Value, config *Config) bool {
   14458 	b := v.Block
   14459 	_ = b
   14460 	// match: (XORQ x (MOVQconst [c]))
   14461 	// cond: is32Bit(c)
   14462 	// result: (XORQconst [c] x)
   14463 	for {
   14464 		x := v.Args[0]
   14465 		v_1 := v.Args[1]
   14466 		if v_1.Op != OpAMD64MOVQconst {
   14467 			break
   14468 		}
   14469 		c := v_1.AuxInt
   14470 		if !(is32Bit(c)) {
   14471 			break
   14472 		}
   14473 		v.reset(OpAMD64XORQconst)
   14474 		v.AuxInt = c
   14475 		v.AddArg(x)
   14476 		return true
   14477 	}
   14478 	// match: (XORQ (MOVQconst [c]) x)
   14479 	// cond: is32Bit(c)
   14480 	// result: (XORQconst [c] x)
   14481 	for {
   14482 		v_0 := v.Args[0]
   14483 		if v_0.Op != OpAMD64MOVQconst {
   14484 			break
   14485 		}
   14486 		c := v_0.AuxInt
   14487 		x := v.Args[1]
   14488 		if !(is32Bit(c)) {
   14489 			break
   14490 		}
   14491 		v.reset(OpAMD64XORQconst)
   14492 		v.AuxInt = c
   14493 		v.AddArg(x)
   14494 		return true
   14495 	}
   14496 	// match: (XORQ x x)
   14497 	// cond:
   14498 	// result: (MOVQconst [0])
   14499 	for {
   14500 		x := v.Args[0]
   14501 		if x != v.Args[1] {
   14502 			break
   14503 		}
   14504 		v.reset(OpAMD64MOVQconst)
   14505 		v.AuxInt = 0
   14506 		return true
   14507 	}
   14508 	return false
   14509 }
   14510 func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool {
   14511 	b := v.Block
   14512 	_ = b
   14513 	// match: (XORQconst [c] (XORQconst [d] x))
   14514 	// cond:
   14515 	// result: (XORQconst [c ^ d] x)
   14516 	for {
   14517 		c := v.AuxInt
   14518 		v_0 := v.Args[0]
   14519 		if v_0.Op != OpAMD64XORQconst {
   14520 			break
   14521 		}
   14522 		d := v_0.AuxInt
   14523 		x := v_0.Args[0]
   14524 		v.reset(OpAMD64XORQconst)
   14525 		v.AuxInt = c ^ d
   14526 		v.AddArg(x)
   14527 		return true
   14528 	}
   14529 	// match: (XORQconst [0] x)
   14530 	// cond:
   14531 	// result: x
   14532 	for {
   14533 		if v.AuxInt != 0 {
   14534 			break
   14535 		}
   14536 		x := v.Args[0]
   14537 		v.reset(OpCopy)
   14538 		v.Type = x.Type
   14539 		v.AddArg(x)
   14540 		return true
   14541 	}
   14542 	// match: (XORQconst [c] (MOVQconst [d]))
   14543 	// cond:
   14544 	// result: (MOVQconst [c^d])
   14545 	for {
   14546 		c := v.AuxInt
   14547 		v_0 := v.Args[0]
   14548 		if v_0.Op != OpAMD64MOVQconst {
   14549 			break
   14550 		}
   14551 		d := v_0.AuxInt
   14552 		v.reset(OpAMD64MOVQconst)
   14553 		v.AuxInt = c ^ d
   14554 		return true
   14555 	}
   14556 	return false
   14557 }
   14558 func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool {
   14559 	b := v.Block
   14560 	_ = b
   14561 	// match: (Add16  x y)
   14562 	// cond:
   14563 	// result: (ADDL  x y)
   14564 	for {
   14565 		x := v.Args[0]
   14566 		y := v.Args[1]
   14567 		v.reset(OpAMD64ADDL)
   14568 		v.AddArg(x)
   14569 		v.AddArg(y)
   14570 		return true
   14571 	}
   14572 }
   14573 func rewriteValueAMD64_OpAdd32(v *Value, config *Config) bool {
   14574 	b := v.Block
   14575 	_ = b
   14576 	// match: (Add32  x y)
   14577 	// cond:
   14578 	// result: (ADDL  x y)
   14579 	for {
   14580 		x := v.Args[0]
   14581 		y := v.Args[1]
   14582 		v.reset(OpAMD64ADDL)
   14583 		v.AddArg(x)
   14584 		v.AddArg(y)
   14585 		return true
   14586 	}
   14587 }
   14588 func rewriteValueAMD64_OpAdd32F(v *Value, config *Config) bool {
   14589 	b := v.Block
   14590 	_ = b
   14591 	// match: (Add32F x y)
   14592 	// cond:
   14593 	// result: (ADDSS x y)
   14594 	for {
   14595 		x := v.Args[0]
   14596 		y := v.Args[1]
   14597 		v.reset(OpAMD64ADDSS)
   14598 		v.AddArg(x)
   14599 		v.AddArg(y)
   14600 		return true
   14601 	}
   14602 }
   14603 func rewriteValueAMD64_OpAdd64(v *Value, config *Config) bool {
   14604 	b := v.Block
   14605 	_ = b
   14606 	// match: (Add64  x y)
   14607 	// cond:
   14608 	// result: (ADDQ  x y)
   14609 	for {
   14610 		x := v.Args[0]
   14611 		y := v.Args[1]
   14612 		v.reset(OpAMD64ADDQ)
   14613 		v.AddArg(x)
   14614 		v.AddArg(y)
   14615 		return true
   14616 	}
   14617 }
   14618 func rewriteValueAMD64_OpAdd64F(v *Value, config *Config) bool {
   14619 	b := v.Block
   14620 	_ = b
   14621 	// match: (Add64F x y)
   14622 	// cond:
   14623 	// result: (ADDSD x y)
   14624 	for {
   14625 		x := v.Args[0]
   14626 		y := v.Args[1]
   14627 		v.reset(OpAMD64ADDSD)
   14628 		v.AddArg(x)
   14629 		v.AddArg(y)
   14630 		return true
   14631 	}
   14632 }
   14633 func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool {
   14634 	b := v.Block
   14635 	_ = b
   14636 	// match: (Add8   x y)
   14637 	// cond:
   14638 	// result: (ADDL  x y)
   14639 	for {
   14640 		x := v.Args[0]
   14641 		y := v.Args[1]
   14642 		v.reset(OpAMD64ADDL)
   14643 		v.AddArg(x)
   14644 		v.AddArg(y)
   14645 		return true
   14646 	}
   14647 }
   14648 func rewriteValueAMD64_OpAddPtr(v *Value, config *Config) bool {
   14649 	b := v.Block
   14650 	_ = b
   14651 	// match: (AddPtr x y)
   14652 	// cond: config.PtrSize == 8
   14653 	// result: (ADDQ x y)
   14654 	for {
   14655 		x := v.Args[0]
   14656 		y := v.Args[1]
   14657 		if !(config.PtrSize == 8) {
   14658 			break
   14659 		}
   14660 		v.reset(OpAMD64ADDQ)
   14661 		v.AddArg(x)
   14662 		v.AddArg(y)
   14663 		return true
   14664 	}
   14665 	// match: (AddPtr x y)
   14666 	// cond: config.PtrSize == 4
   14667 	// result: (ADDL x y)
   14668 	for {
   14669 		x := v.Args[0]
   14670 		y := v.Args[1]
   14671 		if !(config.PtrSize == 4) {
   14672 			break
   14673 		}
   14674 		v.reset(OpAMD64ADDL)
   14675 		v.AddArg(x)
   14676 		v.AddArg(y)
   14677 		return true
   14678 	}
   14679 	return false
   14680 }
   14681 func rewriteValueAMD64_OpAddr(v *Value, config *Config) bool {
   14682 	b := v.Block
   14683 	_ = b
   14684 	// match: (Addr {sym} base)
   14685 	// cond: config.PtrSize == 8
   14686 	// result: (LEAQ {sym} base)
   14687 	for {
   14688 		sym := v.Aux
   14689 		base := v.Args[0]
   14690 		if !(config.PtrSize == 8) {
   14691 			break
   14692 		}
   14693 		v.reset(OpAMD64LEAQ)
   14694 		v.Aux = sym
   14695 		v.AddArg(base)
   14696 		return true
   14697 	}
   14698 	// match: (Addr {sym} base)
   14699 	// cond: config.PtrSize == 4
   14700 	// result: (LEAL {sym} base)
   14701 	for {
   14702 		sym := v.Aux
   14703 		base := v.Args[0]
   14704 		if !(config.PtrSize == 4) {
   14705 			break
   14706 		}
   14707 		v.reset(OpAMD64LEAL)
   14708 		v.Aux = sym
   14709 		v.AddArg(base)
   14710 		return true
   14711 	}
   14712 	return false
   14713 }
   14714 func rewriteValueAMD64_OpAnd16(v *Value, config *Config) bool {
   14715 	b := v.Block
   14716 	_ = b
   14717 	// match: (And16 x y)
   14718 	// cond:
   14719 	// result: (ANDL x y)
   14720 	for {
   14721 		x := v.Args[0]
   14722 		y := v.Args[1]
   14723 		v.reset(OpAMD64ANDL)
   14724 		v.AddArg(x)
   14725 		v.AddArg(y)
   14726 		return true
   14727 	}
   14728 }
   14729 func rewriteValueAMD64_OpAnd32(v *Value, config *Config) bool {
   14730 	b := v.Block
   14731 	_ = b
   14732 	// match: (And32 x y)
   14733 	// cond:
   14734 	// result: (ANDL x y)
   14735 	for {
   14736 		x := v.Args[0]
   14737 		y := v.Args[1]
   14738 		v.reset(OpAMD64ANDL)
   14739 		v.AddArg(x)
   14740 		v.AddArg(y)
   14741 		return true
   14742 	}
   14743 }
   14744 func rewriteValueAMD64_OpAnd64(v *Value, config *Config) bool {
   14745 	b := v.Block
   14746 	_ = b
   14747 	// match: (And64 x y)
   14748 	// cond:
   14749 	// result: (ANDQ x y)
   14750 	for {
   14751 		x := v.Args[0]
   14752 		y := v.Args[1]
   14753 		v.reset(OpAMD64ANDQ)
   14754 		v.AddArg(x)
   14755 		v.AddArg(y)
   14756 		return true
   14757 	}
   14758 }
   14759 func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool {
   14760 	b := v.Block
   14761 	_ = b
   14762 	// match: (And8  x y)
   14763 	// cond:
   14764 	// result: (ANDL x y)
   14765 	for {
   14766 		x := v.Args[0]
   14767 		y := v.Args[1]
   14768 		v.reset(OpAMD64ANDL)
   14769 		v.AddArg(x)
   14770 		v.AddArg(y)
   14771 		return true
   14772 	}
   14773 }
   14774 func rewriteValueAMD64_OpAndB(v *Value, config *Config) bool {
   14775 	b := v.Block
   14776 	_ = b
   14777 	// match: (AndB x y)
   14778 	// cond:
   14779 	// result: (ANDL x y)
   14780 	for {
   14781 		x := v.Args[0]
   14782 		y := v.Args[1]
   14783 		v.reset(OpAMD64ANDL)
   14784 		v.AddArg(x)
   14785 		v.AddArg(y)
   14786 		return true
   14787 	}
   14788 }
   14789 func rewriteValueAMD64_OpAtomicAdd32(v *Value, config *Config) bool {
   14790 	b := v.Block
   14791 	_ = b
   14792 	// match: (AtomicAdd32 ptr val mem)
   14793 	// cond:
   14794 	// result: (AddTupleFirst32 (XADDLlock val ptr mem) val)
   14795 	for {
   14796 		ptr := v.Args[0]
   14797 		val := v.Args[1]
   14798 		mem := v.Args[2]
   14799 		v.reset(OpAMD64AddTupleFirst32)
   14800 		v0 := b.NewValue0(v.Line, OpAMD64XADDLlock, MakeTuple(config.fe.TypeUInt32(), TypeMem))
   14801 		v0.AddArg(val)
   14802 		v0.AddArg(ptr)
   14803 		v0.AddArg(mem)
   14804 		v.AddArg(v0)
   14805 		v.AddArg(val)
   14806 		return true
   14807 	}
   14808 }
   14809 func rewriteValueAMD64_OpAtomicAdd64(v *Value, config *Config) bool {
   14810 	b := v.Block
   14811 	_ = b
   14812 	// match: (AtomicAdd64 ptr val mem)
   14813 	// cond:
   14814 	// result: (AddTupleFirst64 (XADDQlock val ptr mem) val)
   14815 	for {
   14816 		ptr := v.Args[0]
   14817 		val := v.Args[1]
   14818 		mem := v.Args[2]
   14819 		v.reset(OpAMD64AddTupleFirst64)
   14820 		v0 := b.NewValue0(v.Line, OpAMD64XADDQlock, MakeTuple(config.fe.TypeUInt64(), TypeMem))
   14821 		v0.AddArg(val)
   14822 		v0.AddArg(ptr)
   14823 		v0.AddArg(mem)
   14824 		v.AddArg(v0)
   14825 		v.AddArg(val)
   14826 		return true
   14827 	}
   14828 }
   14829 func rewriteValueAMD64_OpAtomicAnd8(v *Value, config *Config) bool {
   14830 	b := v.Block
   14831 	_ = b
   14832 	// match: (AtomicAnd8 ptr val mem)
   14833 	// cond:
   14834 	// result: (ANDBlock ptr val mem)
   14835 	for {
   14836 		ptr := v.Args[0]
   14837 		val := v.Args[1]
   14838 		mem := v.Args[2]
   14839 		v.reset(OpAMD64ANDBlock)
   14840 		v.AddArg(ptr)
   14841 		v.AddArg(val)
   14842 		v.AddArg(mem)
   14843 		return true
   14844 	}
   14845 }
   14846 func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value, config *Config) bool {
   14847 	b := v.Block
   14848 	_ = b
   14849 	// match: (AtomicCompareAndSwap32 ptr old new_ mem)
   14850 	// cond:
   14851 	// result: (CMPXCHGLlock ptr old new_ mem)
   14852 	for {
   14853 		ptr := v.Args[0]
   14854 		old := v.Args[1]
   14855 		new_ := v.Args[2]
   14856 		mem := v.Args[3]
   14857 		v.reset(OpAMD64CMPXCHGLlock)
   14858 		v.AddArg(ptr)
   14859 		v.AddArg(old)
   14860 		v.AddArg(new_)
   14861 		v.AddArg(mem)
   14862 		return true
   14863 	}
   14864 }
   14865 func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value, config *Config) bool {
   14866 	b := v.Block
   14867 	_ = b
   14868 	// match: (AtomicCompareAndSwap64 ptr old new_ mem)
   14869 	// cond:
   14870 	// result: (CMPXCHGQlock ptr old new_ mem)
   14871 	for {
   14872 		ptr := v.Args[0]
   14873 		old := v.Args[1]
   14874 		new_ := v.Args[2]
   14875 		mem := v.Args[3]
   14876 		v.reset(OpAMD64CMPXCHGQlock)
   14877 		v.AddArg(ptr)
   14878 		v.AddArg(old)
   14879 		v.AddArg(new_)
   14880 		v.AddArg(mem)
   14881 		return true
   14882 	}
   14883 }
   14884 func rewriteValueAMD64_OpAtomicExchange32(v *Value, config *Config) bool {
   14885 	b := v.Block
   14886 	_ = b
   14887 	// match: (AtomicExchange32 ptr val mem)
   14888 	// cond:
   14889 	// result: (XCHGL val ptr mem)
   14890 	for {
   14891 		ptr := v.Args[0]
   14892 		val := v.Args[1]
   14893 		mem := v.Args[2]
   14894 		v.reset(OpAMD64XCHGL)
   14895 		v.AddArg(val)
   14896 		v.AddArg(ptr)
   14897 		v.AddArg(mem)
   14898 		return true
   14899 	}
   14900 }
   14901 func rewriteValueAMD64_OpAtomicExchange64(v *Value, config *Config) bool {
   14902 	b := v.Block
   14903 	_ = b
   14904 	// match: (AtomicExchange64 ptr val mem)
   14905 	// cond:
   14906 	// result: (XCHGQ val ptr mem)
   14907 	for {
   14908 		ptr := v.Args[0]
   14909 		val := v.Args[1]
   14910 		mem := v.Args[2]
   14911 		v.reset(OpAMD64XCHGQ)
   14912 		v.AddArg(val)
   14913 		v.AddArg(ptr)
   14914 		v.AddArg(mem)
   14915 		return true
   14916 	}
   14917 }
   14918 func rewriteValueAMD64_OpAtomicLoad32(v *Value, config *Config) bool {
   14919 	b := v.Block
   14920 	_ = b
   14921 	// match: (AtomicLoad32 ptr mem)
   14922 	// cond:
   14923 	// result: (MOVLatomicload ptr mem)
   14924 	for {
   14925 		ptr := v.Args[0]
   14926 		mem := v.Args[1]
   14927 		v.reset(OpAMD64MOVLatomicload)
   14928 		v.AddArg(ptr)
   14929 		v.AddArg(mem)
   14930 		return true
   14931 	}
   14932 }
   14933 func rewriteValueAMD64_OpAtomicLoad64(v *Value, config *Config) bool {
   14934 	b := v.Block
   14935 	_ = b
   14936 	// match: (AtomicLoad64 ptr mem)
   14937 	// cond:
   14938 	// result: (MOVQatomicload ptr mem)
   14939 	for {
   14940 		ptr := v.Args[0]
   14941 		mem := v.Args[1]
   14942 		v.reset(OpAMD64MOVQatomicload)
   14943 		v.AddArg(ptr)
   14944 		v.AddArg(mem)
   14945 		return true
   14946 	}
   14947 }
   14948 func rewriteValueAMD64_OpAtomicLoadPtr(v *Value, config *Config) bool {
   14949 	b := v.Block
   14950 	_ = b
   14951 	// match: (AtomicLoadPtr ptr mem)
   14952 	// cond: config.PtrSize == 8
   14953 	// result: (MOVQatomicload ptr mem)
   14954 	for {
   14955 		ptr := v.Args[0]
   14956 		mem := v.Args[1]
   14957 		if !(config.PtrSize == 8) {
   14958 			break
   14959 		}
   14960 		v.reset(OpAMD64MOVQatomicload)
   14961 		v.AddArg(ptr)
   14962 		v.AddArg(mem)
   14963 		return true
   14964 	}
   14965 	// match: (AtomicLoadPtr ptr mem)
   14966 	// cond: config.PtrSize == 4
   14967 	// result: (MOVLatomicload ptr mem)
   14968 	for {
   14969 		ptr := v.Args[0]
   14970 		mem := v.Args[1]
   14971 		if !(config.PtrSize == 4) {
   14972 			break
   14973 		}
   14974 		v.reset(OpAMD64MOVLatomicload)
   14975 		v.AddArg(ptr)
   14976 		v.AddArg(mem)
   14977 		return true
   14978 	}
   14979 	return false
   14980 }
   14981 func rewriteValueAMD64_OpAtomicOr8(v *Value, config *Config) bool {
   14982 	b := v.Block
   14983 	_ = b
   14984 	// match: (AtomicOr8 ptr val mem)
   14985 	// cond:
   14986 	// result: (ORBlock ptr val mem)
   14987 	for {
   14988 		ptr := v.Args[0]
   14989 		val := v.Args[1]
   14990 		mem := v.Args[2]
   14991 		v.reset(OpAMD64ORBlock)
   14992 		v.AddArg(ptr)
   14993 		v.AddArg(val)
   14994 		v.AddArg(mem)
   14995 		return true
   14996 	}
   14997 }
   14998 func rewriteValueAMD64_OpAtomicStore32(v *Value, config *Config) bool {
   14999 	b := v.Block
   15000 	_ = b
   15001 	// match: (AtomicStore32 ptr val mem)
   15002 	// cond:
   15003 	// result: (Select1 (XCHGL <MakeTuple(config.Frontend().TypeUInt32(),TypeMem)> val ptr mem))
   15004 	for {
   15005 		ptr := v.Args[0]
   15006 		val := v.Args[1]
   15007 		mem := v.Args[2]
   15008 		v.reset(OpSelect1)
   15009 		v0 := b.NewValue0(v.Line, OpAMD64XCHGL, MakeTuple(config.Frontend().TypeUInt32(), TypeMem))
   15010 		v0.AddArg(val)
   15011 		v0.AddArg(ptr)
   15012 		v0.AddArg(mem)
   15013 		v.AddArg(v0)
   15014 		return true
   15015 	}
   15016 }
   15017 func rewriteValueAMD64_OpAtomicStore64(v *Value, config *Config) bool {
   15018 	b := v.Block
   15019 	_ = b
   15020 	// match: (AtomicStore64 ptr val mem)
   15021 	// cond:
   15022 	// result: (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeUInt64(),TypeMem)> val ptr mem))
   15023 	for {
   15024 		ptr := v.Args[0]
   15025 		val := v.Args[1]
   15026 		mem := v.Args[2]
   15027 		v.reset(OpSelect1)
   15028 		v0 := b.NewValue0(v.Line, OpAMD64XCHGQ, MakeTuple(config.Frontend().TypeUInt64(), TypeMem))
   15029 		v0.AddArg(val)
   15030 		v0.AddArg(ptr)
   15031 		v0.AddArg(mem)
   15032 		v.AddArg(v0)
   15033 		return true
   15034 	}
   15035 }
   15036 func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value, config *Config) bool {
   15037 	b := v.Block
   15038 	_ = b
   15039 	// match: (AtomicStorePtrNoWB ptr val mem)
   15040 	// cond: config.PtrSize == 8
   15041 	// result: (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem))
   15042 	for {
   15043 		ptr := v.Args[0]
   15044 		val := v.Args[1]
   15045 		mem := v.Args[2]
   15046 		if !(config.PtrSize == 8) {
   15047 			break
   15048 		}
   15049 		v.reset(OpSelect1)
   15050 		v0 := b.NewValue0(v.Line, OpAMD64XCHGQ, MakeTuple(config.Frontend().TypeBytePtr(), TypeMem))
   15051 		v0.AddArg(val)
   15052 		v0.AddArg(ptr)
   15053 		v0.AddArg(mem)
   15054 		v.AddArg(v0)
   15055 		return true
   15056 	}
   15057 	// match: (AtomicStorePtrNoWB ptr val mem)
   15058 	// cond: config.PtrSize == 4
   15059 	// result: (Select1 (XCHGL <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem))
   15060 	for {
   15061 		ptr := v.Args[0]
   15062 		val := v.Args[1]
   15063 		mem := v.Args[2]
   15064 		if !(config.PtrSize == 4) {
   15065 			break
   15066 		}
   15067 		v.reset(OpSelect1)
   15068 		v0 := b.NewValue0(v.Line, OpAMD64XCHGL, MakeTuple(config.Frontend().TypeBytePtr(), TypeMem))
   15069 		v0.AddArg(val)
   15070 		v0.AddArg(ptr)
   15071 		v0.AddArg(mem)
   15072 		v.AddArg(v0)
   15073 		return true
   15074 	}
   15075 	return false
   15076 }
   15077 func rewriteValueAMD64_OpAvg64u(v *Value, config *Config) bool {
   15078 	b := v.Block
   15079 	_ = b
   15080 	// match: (Avg64u x y)
   15081 	// cond:
   15082 	// result: (AVGQU x y)
   15083 	for {
   15084 		x := v.Args[0]
   15085 		y := v.Args[1]
   15086 		v.reset(OpAMD64AVGQU)
   15087 		v.AddArg(x)
   15088 		v.AddArg(y)
   15089 		return true
   15090 	}
   15091 }
   15092 func rewriteValueAMD64_OpBswap32(v *Value, config *Config) bool {
   15093 	b := v.Block
   15094 	_ = b
   15095 	// match: (Bswap32 x)
   15096 	// cond:
   15097 	// result: (BSWAPL x)
   15098 	for {
   15099 		x := v.Args[0]
   15100 		v.reset(OpAMD64BSWAPL)
   15101 		v.AddArg(x)
   15102 		return true
   15103 	}
   15104 }
   15105 func rewriteValueAMD64_OpBswap64(v *Value, config *Config) bool {
   15106 	b := v.Block
   15107 	_ = b
   15108 	// match: (Bswap64 x)
   15109 	// cond:
   15110 	// result: (BSWAPQ x)
   15111 	for {
   15112 		x := v.Args[0]
   15113 		v.reset(OpAMD64BSWAPQ)
   15114 		v.AddArg(x)
   15115 		return true
   15116 	}
   15117 }
   15118 func rewriteValueAMD64_OpClosureCall(v *Value, config *Config) bool {
   15119 	b := v.Block
   15120 	_ = b
   15121 	// match: (ClosureCall [argwid] entry closure mem)
   15122 	// cond:
   15123 	// result: (CALLclosure [argwid] entry closure mem)
   15124 	for {
   15125 		argwid := v.AuxInt
   15126 		entry := v.Args[0]
   15127 		closure := v.Args[1]
   15128 		mem := v.Args[2]
   15129 		v.reset(OpAMD64CALLclosure)
   15130 		v.AuxInt = argwid
   15131 		v.AddArg(entry)
   15132 		v.AddArg(closure)
   15133 		v.AddArg(mem)
   15134 		return true
   15135 	}
   15136 }
   15137 func rewriteValueAMD64_OpCom16(v *Value, config *Config) bool {
   15138 	b := v.Block
   15139 	_ = b
   15140 	// match: (Com16 x)
   15141 	// cond:
   15142 	// result: (NOTL x)
   15143 	for {
   15144 		x := v.Args[0]
   15145 		v.reset(OpAMD64NOTL)
   15146 		v.AddArg(x)
   15147 		return true
   15148 	}
   15149 }
   15150 func rewriteValueAMD64_OpCom32(v *Value, config *Config) bool {
   15151 	b := v.Block
   15152 	_ = b
   15153 	// match: (Com32 x)
   15154 	// cond:
   15155 	// result: (NOTL x)
   15156 	for {
   15157 		x := v.Args[0]
   15158 		v.reset(OpAMD64NOTL)
   15159 		v.AddArg(x)
   15160 		return true
   15161 	}
   15162 }
   15163 func rewriteValueAMD64_OpCom64(v *Value, config *Config) bool {
   15164 	b := v.Block
   15165 	_ = b
   15166 	// match: (Com64 x)
   15167 	// cond:
   15168 	// result: (NOTQ x)
   15169 	for {
   15170 		x := v.Args[0]
   15171 		v.reset(OpAMD64NOTQ)
   15172 		v.AddArg(x)
   15173 		return true
   15174 	}
   15175 }
   15176 func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool {
   15177 	b := v.Block
   15178 	_ = b
   15179 	// match: (Com8  x)
   15180 	// cond:
   15181 	// result: (NOTL x)
   15182 	for {
   15183 		x := v.Args[0]
   15184 		v.reset(OpAMD64NOTL)
   15185 		v.AddArg(x)
   15186 		return true
   15187 	}
   15188 }
   15189 func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool {
   15190 	b := v.Block
   15191 	_ = b
   15192 	// match: (Const16  [val])
   15193 	// cond:
   15194 	// result: (MOVLconst [val])
   15195 	for {
   15196 		val := v.AuxInt
   15197 		v.reset(OpAMD64MOVLconst)
   15198 		v.AuxInt = val
   15199 		return true
   15200 	}
   15201 }
   15202 func rewriteValueAMD64_OpConst32(v *Value, config *Config) bool {
   15203 	b := v.Block
   15204 	_ = b
   15205 	// match: (Const32  [val])
   15206 	// cond:
   15207 	// result: (MOVLconst [val])
   15208 	for {
   15209 		val := v.AuxInt
   15210 		v.reset(OpAMD64MOVLconst)
   15211 		v.AuxInt = val
   15212 		return true
   15213 	}
   15214 }
   15215 func rewriteValueAMD64_OpConst32F(v *Value, config *Config) bool {
   15216 	b := v.Block
   15217 	_ = b
   15218 	// match: (Const32F [val])
   15219 	// cond:
   15220 	// result: (MOVSSconst [val])
   15221 	for {
   15222 		val := v.AuxInt
   15223 		v.reset(OpAMD64MOVSSconst)
   15224 		v.AuxInt = val
   15225 		return true
   15226 	}
   15227 }
   15228 func rewriteValueAMD64_OpConst64(v *Value, config *Config) bool {
   15229 	b := v.Block
   15230 	_ = b
   15231 	// match: (Const64  [val])
   15232 	// cond:
   15233 	// result: (MOVQconst [val])
   15234 	for {
   15235 		val := v.AuxInt
   15236 		v.reset(OpAMD64MOVQconst)
   15237 		v.AuxInt = val
   15238 		return true
   15239 	}
   15240 }
   15241 func rewriteValueAMD64_OpConst64F(v *Value, config *Config) bool {
   15242 	b := v.Block
   15243 	_ = b
   15244 	// match: (Const64F [val])
   15245 	// cond:
   15246 	// result: (MOVSDconst [val])
   15247 	for {
   15248 		val := v.AuxInt
   15249 		v.reset(OpAMD64MOVSDconst)
   15250 		v.AuxInt = val
   15251 		return true
   15252 	}
   15253 }
   15254 func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool {
   15255 	b := v.Block
   15256 	_ = b
   15257 	// match: (Const8   [val])
   15258 	// cond:
   15259 	// result: (MOVLconst [val])
   15260 	for {
   15261 		val := v.AuxInt
   15262 		v.reset(OpAMD64MOVLconst)
   15263 		v.AuxInt = val
   15264 		return true
   15265 	}
   15266 }
   15267 func rewriteValueAMD64_OpConstBool(v *Value, config *Config) bool {
   15268 	b := v.Block
   15269 	_ = b
   15270 	// match: (ConstBool [b])
   15271 	// cond:
   15272 	// result: (MOVLconst [b])
   15273 	for {
   15274 		b := v.AuxInt
   15275 		v.reset(OpAMD64MOVLconst)
   15276 		v.AuxInt = b
   15277 		return true
   15278 	}
   15279 }
   15280 func rewriteValueAMD64_OpConstNil(v *Value, config *Config) bool {
   15281 	b := v.Block
   15282 	_ = b
   15283 	// match: (ConstNil)
   15284 	// cond: config.PtrSize == 8
   15285 	// result: (MOVQconst [0])
   15286 	for {
   15287 		if !(config.PtrSize == 8) {
   15288 			break
   15289 		}
   15290 		v.reset(OpAMD64MOVQconst)
   15291 		v.AuxInt = 0
   15292 		return true
   15293 	}
   15294 	// match: (ConstNil)
   15295 	// cond: config.PtrSize == 4
   15296 	// result: (MOVLconst [0])
   15297 	for {
   15298 		if !(config.PtrSize == 4) {
   15299 			break
   15300 		}
   15301 		v.reset(OpAMD64MOVLconst)
   15302 		v.AuxInt = 0
   15303 		return true
   15304 	}
   15305 	return false
   15306 }
   15307 func rewriteValueAMD64_OpConvert(v *Value, config *Config) bool {
   15308 	b := v.Block
   15309 	_ = b
   15310 	// match: (Convert <t> x mem)
   15311 	// cond: config.PtrSize == 8
   15312 	// result: (MOVQconvert <t> x mem)
   15313 	for {
   15314 		t := v.Type
   15315 		x := v.Args[0]
   15316 		mem := v.Args[1]
   15317 		if !(config.PtrSize == 8) {
   15318 			break
   15319 		}
   15320 		v.reset(OpAMD64MOVQconvert)
   15321 		v.Type = t
   15322 		v.AddArg(x)
   15323 		v.AddArg(mem)
   15324 		return true
   15325 	}
   15326 	// match: (Convert <t> x mem)
   15327 	// cond: config.PtrSize == 4
   15328 	// result: (MOVLconvert <t> x mem)
   15329 	for {
   15330 		t := v.Type
   15331 		x := v.Args[0]
   15332 		mem := v.Args[1]
   15333 		if !(config.PtrSize == 4) {
   15334 			break
   15335 		}
   15336 		v.reset(OpAMD64MOVLconvert)
   15337 		v.Type = t
   15338 		v.AddArg(x)
   15339 		v.AddArg(mem)
   15340 		return true
   15341 	}
   15342 	return false
   15343 }
   15344 func rewriteValueAMD64_OpCtz32(v *Value, config *Config) bool {
   15345 	b := v.Block
   15346 	_ = b
   15347 	// match: (Ctz32 <t> x)
   15348 	// cond:
   15349 	// result: (CMOVLEQ (Select0 <t> (BSFL x)) (MOVLconst <t> [32]) (Select1 <TypeFlags> (BSFL x)))
   15350 	for {
   15351 		t := v.Type
   15352 		x := v.Args[0]
   15353 		v.reset(OpAMD64CMOVLEQ)
   15354 		v0 := b.NewValue0(v.Line, OpSelect0, t)
   15355 		v1 := b.NewValue0(v.Line, OpAMD64BSFL, MakeTuple(config.fe.TypeUInt32(), TypeFlags))
   15356 		v1.AddArg(x)
   15357 		v0.AddArg(v1)
   15358 		v.AddArg(v0)
   15359 		v2 := b.NewValue0(v.Line, OpAMD64MOVLconst, t)
   15360 		v2.AuxInt = 32
   15361 		v.AddArg(v2)
   15362 		v3 := b.NewValue0(v.Line, OpSelect1, TypeFlags)
   15363 		v4 := b.NewValue0(v.Line, OpAMD64BSFL, MakeTuple(config.fe.TypeUInt32(), TypeFlags))
   15364 		v4.AddArg(x)
   15365 		v3.AddArg(v4)
   15366 		v.AddArg(v3)
   15367 		return true
   15368 	}
   15369 }
   15370 func rewriteValueAMD64_OpCtz64(v *Value, config *Config) bool {
   15371 	b := v.Block
   15372 	_ = b
   15373 	// match: (Ctz64 <t> x)
   15374 	// cond:
   15375 	// result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <TypeFlags> (BSFQ x)))
   15376 	for {
   15377 		t := v.Type
   15378 		x := v.Args[0]
   15379 		v.reset(OpAMD64CMOVQEQ)
   15380 		v0 := b.NewValue0(v.Line, OpSelect0, t)
   15381 		v1 := b.NewValue0(v.Line, OpAMD64BSFQ, MakeTuple(config.fe.TypeUInt64(), TypeFlags))
   15382 		v1.AddArg(x)
   15383 		v0.AddArg(v1)
   15384 		v.AddArg(v0)
   15385 		v2 := b.NewValue0(v.Line, OpAMD64MOVQconst, t)
   15386 		v2.AuxInt = 64
   15387 		v.AddArg(v2)
   15388 		v3 := b.NewValue0(v.Line, OpSelect1, TypeFlags)
   15389 		v4 := b.NewValue0(v.Line, OpAMD64BSFQ, MakeTuple(config.fe.TypeUInt64(), TypeFlags))
   15390 		v4.AddArg(x)
   15391 		v3.AddArg(v4)
   15392 		v.AddArg(v3)
   15393 		return true
   15394 	}
   15395 }
   15396 func rewriteValueAMD64_OpCvt32Fto32(v *Value, config *Config) bool {
   15397 	b := v.Block
   15398 	_ = b
   15399 	// match: (Cvt32Fto32 x)
   15400 	// cond:
   15401 	// result: (CVTTSS2SL x)
   15402 	for {
   15403 		x := v.Args[0]
   15404 		v.reset(OpAMD64CVTTSS2SL)
   15405 		v.AddArg(x)
   15406 		return true
   15407 	}
   15408 }
   15409 func rewriteValueAMD64_OpCvt32Fto64(v *Value, config *Config) bool {
   15410 	b := v.Block
   15411 	_ = b
   15412 	// match: (Cvt32Fto64 x)
   15413 	// cond:
   15414 	// result: (CVTTSS2SQ x)
   15415 	for {
   15416 		x := v.Args[0]
   15417 		v.reset(OpAMD64CVTTSS2SQ)
   15418 		v.AddArg(x)
   15419 		return true
   15420 	}
   15421 }
   15422 func rewriteValueAMD64_OpCvt32Fto64F(v *Value, config *Config) bool {
   15423 	b := v.Block
   15424 	_ = b
   15425 	// match: (Cvt32Fto64F x)
   15426 	// cond:
   15427 	// result: (CVTSS2SD x)
   15428 	for {
   15429 		x := v.Args[0]
   15430 		v.reset(OpAMD64CVTSS2SD)
   15431 		v.AddArg(x)
   15432 		return true
   15433 	}
   15434 }
   15435 func rewriteValueAMD64_OpCvt32to32F(v *Value, config *Config) bool {
   15436 	b := v.Block
   15437 	_ = b
   15438 	// match: (Cvt32to32F x)
   15439 	// cond:
   15440 	// result: (CVTSL2SS x)
   15441 	for {
   15442 		x := v.Args[0]
   15443 		v.reset(OpAMD64CVTSL2SS)
   15444 		v.AddArg(x)
   15445 		return true
   15446 	}
   15447 }
   15448 func rewriteValueAMD64_OpCvt32to64F(v *Value, config *Config) bool {
   15449 	b := v.Block
   15450 	_ = b
   15451 	// match: (Cvt32to64F x)
   15452 	// cond:
   15453 	// result: (CVTSL2SD x)
   15454 	for {
   15455 		x := v.Args[0]
   15456 		v.reset(OpAMD64CVTSL2SD)
   15457 		v.AddArg(x)
   15458 		return true
   15459 	}
   15460 }
   15461 func rewriteValueAMD64_OpCvt64Fto32(v *Value, config *Config) bool {
   15462 	b := v.Block
   15463 	_ = b
   15464 	// match: (Cvt64Fto32 x)
   15465 	// cond:
   15466 	// result: (CVTTSD2SL x)
   15467 	for {
   15468 		x := v.Args[0]
   15469 		v.reset(OpAMD64CVTTSD2SL)
   15470 		v.AddArg(x)
   15471 		return true
   15472 	}
   15473 }
   15474 func rewriteValueAMD64_OpCvt64Fto32F(v *Value, config *Config) bool {
   15475 	b := v.Block
   15476 	_ = b
   15477 	// match: (Cvt64Fto32F x)
   15478 	// cond:
   15479 	// result: (CVTSD2SS x)
   15480 	for {
   15481 		x := v.Args[0]
   15482 		v.reset(OpAMD64CVTSD2SS)
   15483 		v.AddArg(x)
   15484 		return true
   15485 	}
   15486 }
   15487 func rewriteValueAMD64_OpCvt64Fto64(v *Value, config *Config) bool {
   15488 	b := v.Block
   15489 	_ = b
   15490 	// match: (Cvt64Fto64 x)
   15491 	// cond:
   15492 	// result: (CVTTSD2SQ x)
   15493 	for {
   15494 		x := v.Args[0]
   15495 		v.reset(OpAMD64CVTTSD2SQ)
   15496 		v.AddArg(x)
   15497 		return true
   15498 	}
   15499 }
   15500 func rewriteValueAMD64_OpCvt64to32F(v *Value, config *Config) bool {
   15501 	b := v.Block
   15502 	_ = b
   15503 	// match: (Cvt64to32F x)
   15504 	// cond:
   15505 	// result: (CVTSQ2SS x)
   15506 	for {
   15507 		x := v.Args[0]
   15508 		v.reset(OpAMD64CVTSQ2SS)
   15509 		v.AddArg(x)
   15510 		return true
   15511 	}
   15512 }
   15513 func rewriteValueAMD64_OpCvt64to64F(v *Value, config *Config) bool {
   15514 	b := v.Block
   15515 	_ = b
   15516 	// match: (Cvt64to64F x)
   15517 	// cond:
   15518 	// result: (CVTSQ2SD x)
   15519 	for {
   15520 		x := v.Args[0]
   15521 		v.reset(OpAMD64CVTSQ2SD)
   15522 		v.AddArg(x)
   15523 		return true
   15524 	}
   15525 }
   15526 func rewriteValueAMD64_OpDeferCall(v *Value, config *Config) bool {
   15527 	b := v.Block
   15528 	_ = b
   15529 	// match: (DeferCall [argwid] mem)
   15530 	// cond:
   15531 	// result: (CALLdefer [argwid] mem)
   15532 	for {
   15533 		argwid := v.AuxInt
   15534 		mem := v.Args[0]
   15535 		v.reset(OpAMD64CALLdefer)
   15536 		v.AuxInt = argwid
   15537 		v.AddArg(mem)
   15538 		return true
   15539 	}
   15540 }
   15541 func rewriteValueAMD64_OpDiv128u(v *Value, config *Config) bool {
   15542 	b := v.Block
   15543 	_ = b
   15544 	// match: (Div128u xhi xlo y)
   15545 	// cond:
   15546 	// result: (DIVQU2 xhi xlo y)
   15547 	for {
   15548 		xhi := v.Args[0]
   15549 		xlo := v.Args[1]
   15550 		y := v.Args[2]
   15551 		v.reset(OpAMD64DIVQU2)
   15552 		v.AddArg(xhi)
   15553 		v.AddArg(xlo)
   15554 		v.AddArg(y)
   15555 		return true
   15556 	}
   15557 }
   15558 func rewriteValueAMD64_OpDiv16(v *Value, config *Config) bool {
   15559 	b := v.Block
   15560 	_ = b
   15561 	// match: (Div16  x y)
   15562 	// cond:
   15563 	// result: (Select0 (DIVW  x y))
   15564 	for {
   15565 		x := v.Args[0]
   15566 		y := v.Args[1]
   15567 		v.reset(OpSelect0)
   15568 		v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
   15569 		v0.AddArg(x)
   15570 		v0.AddArg(y)
   15571 		v.AddArg(v0)
   15572 		return true
   15573 	}
   15574 }
   15575 func rewriteValueAMD64_OpDiv16u(v *Value, config *Config) bool {
   15576 	b := v.Block
   15577 	_ = b
   15578 	// match: (Div16u x y)
   15579 	// cond:
   15580 	// result: (Select0 (DIVWU x y))
   15581 	for {
   15582 		x := v.Args[0]
   15583 		y := v.Args[1]
   15584 		v.reset(OpSelect0)
   15585 		v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
   15586 		v0.AddArg(x)
   15587 		v0.AddArg(y)
   15588 		v.AddArg(v0)
   15589 		return true
   15590 	}
   15591 }
   15592 func rewriteValueAMD64_OpDiv32(v *Value, config *Config) bool {
   15593 	b := v.Block
   15594 	_ = b
   15595 	// match: (Div32  x y)
   15596 	// cond:
   15597 	// result: (Select0 (DIVL  x y))
   15598 	for {
   15599 		x := v.Args[0]
   15600 		y := v.Args[1]
   15601 		v.reset(OpSelect0)
   15602 		v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
   15603 		v0.AddArg(x)
   15604 		v0.AddArg(y)
   15605 		v.AddArg(v0)
   15606 		return true
   15607 	}
   15608 }
   15609 func rewriteValueAMD64_OpDiv32F(v *Value, config *Config) bool {
   15610 	b := v.Block
   15611 	_ = b
   15612 	// match: (Div32F x y)
   15613 	// cond:
   15614 	// result: (DIVSS x y)
   15615 	for {
   15616 		x := v.Args[0]
   15617 		y := v.Args[1]
   15618 		v.reset(OpAMD64DIVSS)
   15619 		v.AddArg(x)
   15620 		v.AddArg(y)
   15621 		return true
   15622 	}
   15623 }
   15624 func rewriteValueAMD64_OpDiv32u(v *Value, config *Config) bool {
   15625 	b := v.Block
   15626 	_ = b
   15627 	// match: (Div32u x y)
   15628 	// cond:
   15629 	// result: (Select0 (DIVLU x y))
   15630 	for {
   15631 		x := v.Args[0]
   15632 		y := v.Args[1]
   15633 		v.reset(OpSelect0)
   15634 		v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
   15635 		v0.AddArg(x)
   15636 		v0.AddArg(y)
   15637 		v.AddArg(v0)
   15638 		return true
   15639 	}
   15640 }
   15641 func rewriteValueAMD64_OpDiv64(v *Value, config *Config) bool {
   15642 	b := v.Block
   15643 	_ = b
   15644 	// match: (Div64  x y)
   15645 	// cond:
   15646 	// result: (Select0 (DIVQ  x y))
   15647 	for {
   15648 		x := v.Args[0]
   15649 		y := v.Args[1]
   15650 		v.reset(OpSelect0)
   15651 		v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
   15652 		v0.AddArg(x)
   15653 		v0.AddArg(y)
   15654 		v.AddArg(v0)
   15655 		return true
   15656 	}
   15657 }
   15658 func rewriteValueAMD64_OpDiv64F(v *Value, config *Config) bool {
   15659 	b := v.Block
   15660 	_ = b
   15661 	// match: (Div64F x y)
   15662 	// cond:
   15663 	// result: (DIVSD x y)
   15664 	for {
   15665 		x := v.Args[0]
   15666 		y := v.Args[1]
   15667 		v.reset(OpAMD64DIVSD)
   15668 		v.AddArg(x)
   15669 		v.AddArg(y)
   15670 		return true
   15671 	}
   15672 }
   15673 func rewriteValueAMD64_OpDiv64u(v *Value, config *Config) bool {
   15674 	b := v.Block
   15675 	_ = b
   15676 	// match: (Div64u x y)
   15677 	// cond:
   15678 	// result: (Select0 (DIVQU x y))
   15679 	for {
   15680 		x := v.Args[0]
   15681 		y := v.Args[1]
   15682 		v.reset(OpSelect0)
   15683 		v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
   15684 		v0.AddArg(x)
   15685 		v0.AddArg(y)
   15686 		v.AddArg(v0)
   15687 		return true
   15688 	}
   15689 }
   15690 func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool {
   15691 	b := v.Block
   15692 	_ = b
   15693 	// match: (Div8   x y)
   15694 	// cond:
   15695 	// result: (Select0 (DIVW  (SignExt8to16 x) (SignExt8to16 y)))
   15696 	for {
   15697 		x := v.Args[0]
   15698 		y := v.Args[1]
   15699 		v.reset(OpSelect0)
   15700 		v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
   15701 		v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
   15702 		v1.AddArg(x)
   15703 		v0.AddArg(v1)
   15704 		v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
   15705 		v2.AddArg(y)
   15706 		v0.AddArg(v2)
   15707 		v.AddArg(v0)
   15708 		return true
   15709 	}
   15710 }
   15711 func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool {
   15712 	b := v.Block
   15713 	_ = b
   15714 	// match: (Div8u  x y)
   15715 	// cond:
   15716 	// result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
   15717 	for {
   15718 		x := v.Args[0]
   15719 		y := v.Args[1]
   15720 		v.reset(OpSelect0)
   15721 		v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
   15722 		v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
   15723 		v1.AddArg(x)
   15724 		v0.AddArg(v1)
   15725 		v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
   15726 		v2.AddArg(y)
   15727 		v0.AddArg(v2)
   15728 		v.AddArg(v0)
   15729 		return true
   15730 	}
   15731 }
   15732 func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool {
   15733 	b := v.Block
   15734 	_ = b
   15735 	// match: (Eq16  x y)
   15736 	// cond:
   15737 	// result: (SETEQ (CMPW x y))
   15738 	for {
   15739 		x := v.Args[0]
   15740 		y := v.Args[1]
   15741 		v.reset(OpAMD64SETEQ)
   15742 		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
   15743 		v0.AddArg(x)
   15744 		v0.AddArg(y)
   15745 		v.AddArg(v0)
   15746 		return true
   15747 	}
   15748 }
   15749 func rewriteValueAMD64_OpEq32(v *Value, config *Config) bool {
   15750 	b := v.Block
   15751 	_ = b
   15752 	// match: (Eq32  x y)
   15753 	// cond:
   15754 	// result: (SETEQ (CMPL x y))
   15755 	for {
   15756 		x := v.Args[0]
   15757 		y := v.Args[1]
   15758 		v.reset(OpAMD64SETEQ)
   15759 		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
   15760 		v0.AddArg(x)
   15761 		v0.AddArg(y)
   15762 		v.AddArg(v0)
   15763 		return true
   15764 	}
   15765 }
   15766 func rewriteValueAMD64_OpEq32F(v *Value, config *Config) bool {
   15767 	b := v.Block
   15768 	_ = b
   15769 	// match: (Eq32F x y)
   15770 	// cond:
   15771 	// result: (SETEQF (UCOMISS x y))
   15772 	for {
   15773 		x := v.Args[0]
   15774 		y := v.Args[1]
   15775 		v.reset(OpAMD64SETEQF)
   15776 		v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
   15777 		v0.AddArg(x)
   15778 		v0.AddArg(y)
   15779 		v.AddArg(v0)
   15780 		return true
   15781 	}
   15782 }
   15783 func rewriteValueAMD64_OpEq64(v *Value, config *Config) bool {
   15784 	b := v.Block
   15785 	_ = b
   15786 	// match: (Eq64  x y)
   15787 	// cond:
   15788 	// result: (SETEQ (CMPQ x y))
   15789 	for {
   15790 		x := v.Args[0]
   15791 		y := v.Args[1]
   15792 		v.reset(OpAMD64SETEQ)
   15793 		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
   15794 		v0.AddArg(x)
   15795 		v0.AddArg(y)
   15796 		v.AddArg(v0)
   15797 		return true
   15798 	}
   15799 }
   15800 func rewriteValueAMD64_OpEq64F(v *Value, config *Config) bool {
   15801 	b := v.Block
   15802 	_ = b
   15803 	// match: (Eq64F x y)
   15804 	// cond:
   15805 	// result: (SETEQF (UCOMISD x y))
   15806 	for {
   15807 		x := v.Args[0]
   15808 		y := v.Args[1]
   15809 		v.reset(OpAMD64SETEQF)
   15810 		v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
   15811 		v0.AddArg(x)
   15812 		v0.AddArg(y)
   15813 		v.AddArg(v0)
   15814 		return true
   15815 	}
   15816 }
   15817 func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool {
   15818 	b := v.Block
   15819 	_ = b
   15820 	// match: (Eq8   x y)
   15821 	// cond:
   15822 	// result: (SETEQ (CMPB x y))
   15823 	for {
   15824 		x := v.Args[0]
   15825 		y := v.Args[1]
   15826 		v.reset(OpAMD64SETEQ)
   15827 		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
   15828 		v0.AddArg(x)
   15829 		v0.AddArg(y)
   15830 		v.AddArg(v0)
   15831 		return true
   15832 	}
   15833 }
   15834 func rewriteValueAMD64_OpEqB(v *Value, config *Config) bool {
   15835 	b := v.Block
   15836 	_ = b
   15837 	// match: (EqB   x y)
   15838 	// cond:
   15839 	// result: (SETEQ (CMPB x y))
   15840 	for {
   15841 		x := v.Args[0]
   15842 		y := v.Args[1]
   15843 		v.reset(OpAMD64SETEQ)
   15844 		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
   15845 		v0.AddArg(x)
   15846 		v0.AddArg(y)
   15847 		v.AddArg(v0)
   15848 		return true
   15849 	}
   15850 }
   15851 func rewriteValueAMD64_OpEqPtr(v *Value, config *Config) bool {
   15852 	b := v.Block
   15853 	_ = b
   15854 	// match: (EqPtr x y)
   15855 	// cond: config.PtrSize == 8
   15856 	// result: (SETEQ (CMPQ x y))
   15857 	for {
   15858 		x := v.Args[0]
   15859 		y := v.Args[1]
   15860 		if !(config.PtrSize == 8) {
   15861 			break
   15862 		}
   15863 		v.reset(OpAMD64SETEQ)
   15864 		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
   15865 		v0.AddArg(x)
   15866 		v0.AddArg(y)
   15867 		v.AddArg(v0)
   15868 		return true
   15869 	}
   15870 	// match: (EqPtr x y)
   15871 	// cond: config.PtrSize == 4
   15872 	// result: (SETEQ (CMPL x y))
   15873 	for {
   15874 		x := v.Args[0]
   15875 		y := v.Args[1]
   15876 		if !(config.PtrSize == 4) {
   15877 			break
   15878 		}
   15879 		v.reset(OpAMD64SETEQ)
   15880 		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
   15881 		v0.AddArg(x)
   15882 		v0.AddArg(y)
   15883 		v.AddArg(v0)
   15884 		return true
   15885 	}
   15886 	return false
   15887 }
   15888 func rewriteValueAMD64_OpGeq16(v *Value, config *Config) bool {
   15889 	b := v.Block
   15890 	_ = b
   15891 	// match: (Geq16  x y)
   15892 	// cond:
   15893 	// result: (SETGE (CMPW x y))
   15894 	for {
   15895 		x := v.Args[0]
   15896 		y := v.Args[1]
   15897 		v.reset(OpAMD64SETGE)
   15898 		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
   15899 		v0.AddArg(x)
   15900 		v0.AddArg(y)
   15901 		v.AddArg(v0)
   15902 		return true
   15903 	}
   15904 }
   15905 func rewriteValueAMD64_OpGeq16U(v *Value, config *Config) bool {
   15906 	b := v.Block
   15907 	_ = b
   15908 	// match: (Geq16U x y)
   15909 	// cond:
   15910 	// result: (SETAE (CMPW x y))
   15911 	for {
   15912 		x := v.Args[0]
   15913 		y := v.Args[1]
   15914 		v.reset(OpAMD64SETAE)
   15915 		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
   15916 		v0.AddArg(x)
   15917 		v0.AddArg(y)
   15918 		v.AddArg(v0)
   15919 		return true
   15920 	}
   15921 }
   15922 func rewriteValueAMD64_OpGeq32(v *Value, config *Config) bool {
   15923 	b := v.Block
   15924 	_ = b
   15925 	// match: (Geq32  x y)
   15926 	// cond:
   15927 	// result: (SETGE (CMPL x y))
   15928 	for {
   15929 		x := v.Args[0]
   15930 		y := v.Args[1]
   15931 		v.reset(OpAMD64SETGE)
   15932 		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
   15933 		v0.AddArg(x)
   15934 		v0.AddArg(y)
   15935 		v.AddArg(v0)
   15936 		return true
   15937 	}
   15938 }
   15939 func rewriteValueAMD64_OpGeq32F(v *Value, config *Config) bool {
   15940 	b := v.Block
   15941 	_ = b
   15942 	// match: (Geq32F x y)
   15943 	// cond:
   15944 	// result: (SETGEF (UCOMISS x y))
   15945 	for {
   15946 		x := v.Args[0]
   15947 		y := v.Args[1]
   15948 		v.reset(OpAMD64SETGEF)
   15949 		v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
   15950 		v0.AddArg(x)
   15951 		v0.AddArg(y)
   15952 		v.AddArg(v0)
   15953 		return true
   15954 	}
   15955 }
   15956 func rewriteValueAMD64_OpGeq32U(v *Value, config *Config) bool {
   15957 	b := v.Block
   15958 	_ = b
   15959 	// match: (Geq32U x y)
   15960 	// cond:
   15961 	// result: (SETAE (CMPL x y))
   15962 	for {
   15963 		x := v.Args[0]
   15964 		y := v.Args[1]
   15965 		v.reset(OpAMD64SETAE)
   15966 		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
   15967 		v0.AddArg(x)
   15968 		v0.AddArg(y)
   15969 		v.AddArg(v0)
   15970 		return true
   15971 	}
   15972 }
   15973 func rewriteValueAMD64_OpGeq64(v *Value, config *Config) bool {
   15974 	b := v.Block
   15975 	_ = b
   15976 	// match: (Geq64  x y)
   15977 	// cond:
   15978 	// result: (SETGE (CMPQ x y))
   15979 	for {
   15980 		x := v.Args[0]
   15981 		y := v.Args[1]
   15982 		v.reset(OpAMD64SETGE)
   15983 		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
   15984 		v0.AddArg(x)
   15985 		v0.AddArg(y)
   15986 		v.AddArg(v0)
   15987 		return true
   15988 	}
   15989 }
   15990 func rewriteValueAMD64_OpGeq64F(v *Value, config *Config) bool {
   15991 	b := v.Block
   15992 	_ = b
   15993 	// match: (Geq64F x y)
   15994 	// cond:
   15995 	// result: (SETGEF (UCOMISD x y))
   15996 	for {
   15997 		x := v.Args[0]
   15998 		y := v.Args[1]
   15999 		v.reset(OpAMD64SETGEF)
   16000 		v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
   16001 		v0.AddArg(x)
   16002 		v0.AddArg(y)
   16003 		v.AddArg(v0)
   16004 		return true
   16005 	}
   16006 }
   16007 func rewriteValueAMD64_OpGeq64U(v *Value, config *Config) bool {
   16008 	b := v.Block
   16009 	_ = b
   16010 	// match: (Geq64U x y)
   16011 	// cond:
   16012 	// result: (SETAE (CMPQ x y))
   16013 	for {
   16014 		x := v.Args[0]
   16015 		y := v.Args[1]
   16016 		v.reset(OpAMD64SETAE)
   16017 		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
   16018 		v0.AddArg(x)
   16019 		v0.AddArg(y)
   16020 		v.AddArg(v0)
   16021 		return true
   16022 	}
   16023 }
   16024 func rewriteValueAMD64_OpGeq8(v *Value, config *Config) bool {
   16025 	b := v.Block
   16026 	_ = b
   16027 	// match: (Geq8   x y)
   16028 	// cond:
   16029 	// result: (SETGE (CMPB x y))
   16030 	for {
   16031 		x := v.Args[0]
   16032 		y := v.Args[1]
   16033 		v.reset(OpAMD64SETGE)
   16034 		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
   16035 		v0.AddArg(x)
   16036 		v0.AddArg(y)
   16037 		v.AddArg(v0)
   16038 		return true
   16039 	}
   16040 }
   16041 func rewriteValueAMD64_OpGeq8U(v *Value, config *Config) bool {
   16042 	b := v.Block
   16043 	_ = b
   16044 	// match: (Geq8U  x y)
   16045 	// cond:
   16046 	// result: (SETAE (CMPB x y))
   16047 	for {
   16048 		x := v.Args[0]
   16049 		y := v.Args[1]
   16050 		v.reset(OpAMD64SETAE)
   16051 		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
   16052 		v0.AddArg(x)
   16053 		v0.AddArg(y)
   16054 		v.AddArg(v0)
   16055 		return true
   16056 	}
   16057 }
   16058 func rewriteValueAMD64_OpGetClosurePtr(v *Value, config *Config) bool {
   16059 	b := v.Block
   16060 	_ = b
   16061 	// match: (GetClosurePtr)
   16062 	// cond:
   16063 	// result: (LoweredGetClosurePtr)
   16064 	for {
   16065 		v.reset(OpAMD64LoweredGetClosurePtr)
   16066 		return true
   16067 	}
   16068 }
   16069 func rewriteValueAMD64_OpGetG(v *Value, config *Config) bool {
   16070 	b := v.Block
   16071 	_ = b
   16072 	// match: (GetG mem)
   16073 	// cond:
   16074 	// result: (LoweredGetG mem)
   16075 	for {
   16076 		mem := v.Args[0]
   16077 		v.reset(OpAMD64LoweredGetG)
   16078 		v.AddArg(mem)
   16079 		return true
   16080 	}
   16081 }
   16082 func rewriteValueAMD64_OpGoCall(v *Value, config *Config) bool {
   16083 	b := v.Block
   16084 	_ = b
   16085 	// match: (GoCall [argwid] mem)
   16086 	// cond:
   16087 	// result: (CALLgo [argwid] mem)
   16088 	for {
   16089 		argwid := v.AuxInt
   16090 		mem := v.Args[0]
   16091 		v.reset(OpAMD64CALLgo)
   16092 		v.AuxInt = argwid
   16093 		v.AddArg(mem)
   16094 		return true
   16095 	}
   16096 }
   16097 func rewriteValueAMD64_OpGreater16(v *Value, config *Config) bool {
   16098 	b := v.Block
   16099 	_ = b
   16100 	// match: (Greater16  x y)
   16101 	// cond:
   16102 	// result: (SETG (CMPW x y))
   16103 	for {
   16104 		x := v.Args[0]
   16105 		y := v.Args[1]
   16106 		v.reset(OpAMD64SETG)
   16107 		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
   16108 		v0.AddArg(x)
   16109 		v0.AddArg(y)
   16110 		v.AddArg(v0)
   16111 		return true
   16112 	}
   16113 }
   16114 func rewriteValueAMD64_OpGreater16U(v *Value, config *Config) bool {
   16115 	b := v.Block
   16116 	_ = b
   16117 	// match: (Greater16U x y)
   16118 	// cond:
   16119 	// result: (SETA (CMPW x y))
   16120 	for {
   16121 		x := v.Args[0]
   16122 		y := v.Args[1]
   16123 		v.reset(OpAMD64SETA)
   16124 		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
   16125 		v0.AddArg(x)
   16126 		v0.AddArg(y)
   16127 		v.AddArg(v0)
   16128 		return true
   16129 	}
   16130 }
   16131 func rewriteValueAMD64_OpGreater32(v *Value, config *Config) bool {
   16132 	b := v.Block
   16133 	_ = b
   16134 	// match: (Greater32  x y)
   16135 	// cond:
   16136 	// result: (SETG (CMPL x y))
   16137 	for {
   16138 		x := v.Args[0]
   16139 		y := v.Args[1]
   16140 		v.reset(OpAMD64SETG)
   16141 		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
   16142 		v0.AddArg(x)
   16143 		v0.AddArg(y)
   16144 		v.AddArg(v0)
   16145 		return true
   16146 	}
   16147 }
   16148 func rewriteValueAMD64_OpGreater32F(v *Value, config *Config) bool {
   16149 	b := v.Block
   16150 	_ = b
   16151 	// match: (Greater32F x y)
   16152 	// cond:
   16153 	// result: (SETGF (UCOMISS x y))
   16154 	for {
   16155 		x := v.Args[0]
   16156 		y := v.Args[1]
   16157 		v.reset(OpAMD64SETGF)
   16158 		v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
   16159 		v0.AddArg(x)
   16160 		v0.AddArg(y)
   16161 		v.AddArg(v0)
   16162 		return true
   16163 	}
   16164 }
   16165 func rewriteValueAMD64_OpGreater32U(v *Value, config *Config) bool {
   16166 	b := v.Block
   16167 	_ = b
   16168 	// match: (Greater32U x y)
   16169 	// cond:
   16170 	// result: (SETA (CMPL x y))
   16171 	for {
   16172 		x := v.Args[0]
   16173 		y := v.Args[1]
   16174 		v.reset(OpAMD64SETA)
   16175 		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
   16176 		v0.AddArg(x)
   16177 		v0.AddArg(y)
   16178 		v.AddArg(v0)
   16179 		return true
   16180 	}
   16181 }
   16182 func rewriteValueAMD64_OpGreater64(v *Value, config *Config) bool {
   16183 	b := v.Block
   16184 	_ = b
   16185 	// match: (Greater64  x y)
   16186 	// cond:
   16187 	// result: (SETG (CMPQ x y))
   16188 	for {
   16189 		x := v.Args[0]
   16190 		y := v.Args[1]
   16191 		v.reset(OpAMD64SETG)
   16192 		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
   16193 		v0.AddArg(x)
   16194 		v0.AddArg(y)
   16195 		v.AddArg(v0)
   16196 		return true
   16197 	}
   16198 }
   16199 func rewriteValueAMD64_OpGreater64F(v *Value, config *Config) bool {
   16200 	b := v.Block
   16201 	_ = b
   16202 	// match: (Greater64F x y)
   16203 	// cond:
   16204 	// result: (SETGF (UCOMISD x y))
   16205 	for {
   16206 		x := v.Args[0]
   16207 		y := v.Args[1]
   16208 		v.reset(OpAMD64SETGF)
   16209 		v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
   16210 		v0.AddArg(x)
   16211 		v0.AddArg(y)
   16212 		v.AddArg(v0)
   16213 		return true
   16214 	}
   16215 }
   16216 func rewriteValueAMD64_OpGreater64U(v *Value, config *Config) bool {
   16217 	b := v.Block
   16218 	_ = b
   16219 	// match: (Greater64U x y)
   16220 	// cond:
   16221 	// result: (SETA (CMPQ x y))
   16222 	for {
   16223 		x := v.Args[0]
   16224 		y := v.Args[1]
   16225 		v.reset(OpAMD64SETA)
   16226 		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
   16227 		v0.AddArg(x)
   16228 		v0.AddArg(y)
   16229 		v.AddArg(v0)
   16230 		return true
   16231 	}
   16232 }
   16233 func rewriteValueAMD64_OpGreater8(v *Value, config *Config) bool {
   16234 	b := v.Block
   16235 	_ = b
   16236 	// match: (Greater8   x y)
   16237 	// cond:
   16238 	// result: (SETG (CMPB x y))
   16239 	for {
   16240 		x := v.Args[0]
   16241 		y := v.Args[1]
   16242 		v.reset(OpAMD64SETG)
   16243 		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
   16244 		v0.AddArg(x)
   16245 		v0.AddArg(y)
   16246 		v.AddArg(v0)
   16247 		return true
   16248 	}
   16249 }
   16250 func rewriteValueAMD64_OpGreater8U(v *Value, config *Config) bool {
   16251 	b := v.Block
   16252 	_ = b
   16253 	// match: (Greater8U  x y)
   16254 	// cond:
   16255 	// result: (SETA (CMPB x y))
   16256 	for {
   16257 		x := v.Args[0]
   16258 		y := v.Args[1]
   16259 		v.reset(OpAMD64SETA)
   16260 		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
   16261 		v0.AddArg(x)
   16262 		v0.AddArg(y)
   16263 		v.AddArg(v0)
   16264 		return true
   16265 	}
   16266 }
   16267 func rewriteValueAMD64_OpHmul16(v *Value, config *Config) bool {
   16268 	b := v.Block
   16269 	_ = b
   16270 	// match: (Hmul16  x y)
   16271 	// cond:
   16272 	// result: (HMULW  x y)
   16273 	for {
   16274 		x := v.Args[0]
   16275 		y := v.Args[1]
   16276 		v.reset(OpAMD64HMULW)
   16277 		v.AddArg(x)
   16278 		v.AddArg(y)
   16279 		return true
   16280 	}
   16281 }
   16282 func rewriteValueAMD64_OpHmul16u(v *Value, config *Config) bool {
   16283 	b := v.Block
   16284 	_ = b
   16285 	// match: (Hmul16u x y)
   16286 	// cond:
   16287 	// result: (HMULWU x y)
   16288 	for {
   16289 		x := v.Args[0]
   16290 		y := v.Args[1]
   16291 		v.reset(OpAMD64HMULWU)
   16292 		v.AddArg(x)
   16293 		v.AddArg(y)
   16294 		return true
   16295 	}
   16296 }
   16297 func rewriteValueAMD64_OpHmul32(v *Value, config *Config) bool {
   16298 	b := v.Block
   16299 	_ = b
   16300 	// match: (Hmul32  x y)
   16301 	// cond:
   16302 	// result: (HMULL  x y)
   16303 	for {
   16304 		x := v.Args[0]
   16305 		y := v.Args[1]
   16306 		v.reset(OpAMD64HMULL)
   16307 		v.AddArg(x)
   16308 		v.AddArg(y)
   16309 		return true
   16310 	}
   16311 }
   16312 func rewriteValueAMD64_OpHmul32u(v *Value, config *Config) bool {
   16313 	b := v.Block
   16314 	_ = b
   16315 	// match: (Hmul32u x y)
   16316 	// cond:
   16317 	// result: (HMULLU x y)
   16318 	for {
   16319 		x := v.Args[0]
   16320 		y := v.Args[1]
   16321 		v.reset(OpAMD64HMULLU)
   16322 		v.AddArg(x)
   16323 		v.AddArg(y)
   16324 		return true
   16325 	}
   16326 }
   16327 func rewriteValueAMD64_OpHmul64(v *Value, config *Config) bool {
   16328 	b := v.Block
   16329 	_ = b
   16330 	// match: (Hmul64  x y)
   16331 	// cond:
   16332 	// result: (HMULQ  x y)
   16333 	for {
   16334 		x := v.Args[0]
   16335 		y := v.Args[1]
   16336 		v.reset(OpAMD64HMULQ)
   16337 		v.AddArg(x)
   16338 		v.AddArg(y)
   16339 		return true
   16340 	}
   16341 }
   16342 func rewriteValueAMD64_OpHmul64u(v *Value, config *Config) bool {
   16343 	b := v.Block
   16344 	_ = b
   16345 	// match: (Hmul64u x y)
   16346 	// cond:
   16347 	// result: (HMULQU x y)
   16348 	for {
   16349 		x := v.Args[0]
   16350 		y := v.Args[1]
   16351 		v.reset(OpAMD64HMULQU)
   16352 		v.AddArg(x)
   16353 		v.AddArg(y)
   16354 		return true
   16355 	}
   16356 }
   16357 func rewriteValueAMD64_OpHmul8(v *Value, config *Config) bool {
   16358 	b := v.Block
   16359 	_ = b
   16360 	// match: (Hmul8   x y)
   16361 	// cond:
   16362 	// result: (HMULB  x y)
   16363 	for {
   16364 		x := v.Args[0]
   16365 		y := v.Args[1]
   16366 		v.reset(OpAMD64HMULB)
   16367 		v.AddArg(x)
   16368 		v.AddArg(y)
   16369 		return true
   16370 	}
   16371 }
   16372 func rewriteValueAMD64_OpHmul8u(v *Value, config *Config) bool {
   16373 	b := v.Block
   16374 	_ = b
   16375 	// match: (Hmul8u  x y)
   16376 	// cond:
   16377 	// result: (HMULBU x y)
   16378 	for {
   16379 		x := v.Args[0]
   16380 		y := v.Args[1]
   16381 		v.reset(OpAMD64HMULBU)
   16382 		v.AddArg(x)
   16383 		v.AddArg(y)
   16384 		return true
   16385 	}
   16386 }
   16387 func rewriteValueAMD64_OpInt64Hi(v *Value, config *Config) bool {
   16388 	b := v.Block
   16389 	_ = b
   16390 	// match: (Int64Hi x)
   16391 	// cond:
   16392 	// result: (SHRQconst [32] x)
   16393 	for {
   16394 		x := v.Args[0]
   16395 		v.reset(OpAMD64SHRQconst)
   16396 		v.AuxInt = 32
   16397 		v.AddArg(x)
   16398 		return true
   16399 	}
   16400 }
   16401 func rewriteValueAMD64_OpInterCall(v *Value, config *Config) bool {
   16402 	b := v.Block
   16403 	_ = b
   16404 	// match: (InterCall [argwid] entry mem)
   16405 	// cond:
   16406 	// result: (CALLinter [argwid] entry mem)
   16407 	for {
   16408 		argwid := v.AuxInt
   16409 		entry := v.Args[0]
   16410 		mem := v.Args[1]
   16411 		v.reset(OpAMD64CALLinter)
   16412 		v.AuxInt = argwid
   16413 		v.AddArg(entry)
   16414 		v.AddArg(mem)
   16415 		return true
   16416 	}
   16417 }
   16418 func rewriteValueAMD64_OpIsInBounds(v *Value, config *Config) bool {
   16419 	b := v.Block
   16420 	_ = b
   16421 	// match: (IsInBounds idx len)
   16422 	// cond:
   16423 	// result: (SETB (CMPQ idx len))
   16424 	for {
   16425 		idx := v.Args[0]
   16426 		len := v.Args[1]
   16427 		v.reset(OpAMD64SETB)
   16428 		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
   16429 		v0.AddArg(idx)
   16430 		v0.AddArg(len)
   16431 		v.AddArg(v0)
   16432 		return true
   16433 	}
   16434 }
   16435 func rewriteValueAMD64_OpIsNonNil(v *Value, config *Config) bool {
   16436 	b := v.Block
   16437 	_ = b
   16438 	// match: (IsNonNil p)
   16439 	// cond: config.PtrSize == 8
   16440 	// result: (SETNE (TESTQ p p))
   16441 	for {
   16442 		p := v.Args[0]
   16443 		if !(config.PtrSize == 8) {
   16444 			break
   16445 		}
   16446 		v.reset(OpAMD64SETNE)
   16447 		v0 := b.NewValue0(v.Line, OpAMD64TESTQ, TypeFlags)
   16448 		v0.AddArg(p)
   16449 		v0.AddArg(p)
   16450 		v.AddArg(v0)
   16451 		return true
   16452 	}
   16453 	// match: (IsNonNil p)
   16454 	// cond: config.PtrSize == 4
   16455 	// result: (SETNE (TESTL p p))
   16456 	for {
   16457 		p := v.Args[0]
   16458 		if !(config.PtrSize == 4) {
   16459 			break
   16460 		}
   16461 		v.reset(OpAMD64SETNE)
   16462 		v0 := b.NewValue0(v.Line, OpAMD64TESTL, TypeFlags)
   16463 		v0.AddArg(p)
   16464 		v0.AddArg(p)
   16465 		v.AddArg(v0)
   16466 		return true
   16467 	}
   16468 	return false
   16469 }
   16470 func rewriteValueAMD64_OpIsSliceInBounds(v *Value, config *Config) bool {
   16471 	b := v.Block
   16472 	_ = b
   16473 	// match: (IsSliceInBounds idx len)
   16474 	// cond:
   16475 	// result: (SETBE (CMPQ idx len))
   16476 	for {
   16477 		idx := v.Args[0]
   16478 		len := v.Args[1]
   16479 		v.reset(OpAMD64SETBE)
   16480 		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
   16481 		v0.AddArg(idx)
   16482 		v0.AddArg(len)
   16483 		v.AddArg(v0)
   16484 		return true
   16485 	}
   16486 }
   16487 func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool {
   16488 	b := v.Block
   16489 	_ = b
   16490 	// match: (Leq16  x y)
   16491 	// cond:
   16492 	// result: (SETLE (CMPW x y))
   16493 	for {
   16494 		x := v.Args[0]
   16495 		y := v.Args[1]
   16496 		v.reset(OpAMD64SETLE)
   16497 		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
   16498 		v0.AddArg(x)
   16499 		v0.AddArg(y)
   16500 		v.AddArg(v0)
   16501 		return true
   16502 	}
   16503 }
   16504 func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool {
   16505 	b := v.Block
   16506 	_ = b
   16507 	// match: (Leq16U x y)
   16508 	// cond:
   16509 	// result: (SETBE (CMPW x y))
   16510 	for {
   16511 		x := v.Args[0]
   16512 		y := v.Args[1]
   16513 		v.reset(OpAMD64SETBE)
   16514 		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
   16515 		v0.AddArg(x)
   16516 		v0.AddArg(y)
   16517 		v.AddArg(v0)
   16518 		return true
   16519 	}
   16520 }
   16521 func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool {
   16522 	b := v.Block
   16523 	_ = b
   16524 	// match: (Leq32  x y)
   16525 	// cond:
   16526 	// result: (SETLE (CMPL x y))
   16527 	for {
   16528 		x := v.Args[0]
   16529 		y := v.Args[1]
   16530 		v.reset(OpAMD64SETLE)
   16531 		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
   16532 		v0.AddArg(x)
   16533 		v0.AddArg(y)
   16534 		v.AddArg(v0)
   16535 		return true
   16536 	}
   16537 }
   16538 func rewriteValueAMD64_OpLeq32F(v *Value, config *Config) bool {
   16539 	b := v.Block
   16540 	_ = b
   16541 	// match: (Leq32F x y)
   16542 	// cond:
   16543 	// result: (SETGEF (UCOMISS y x))
   16544 	for {
   16545 		x := v.Args[0]
   16546 		y := v.Args[1]
   16547 		v.reset(OpAMD64SETGEF)
   16548 		v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
   16549 		v0.AddArg(y)
   16550 		v0.AddArg(x)
   16551 		v.AddArg(v0)
   16552 		return true
   16553 	}
   16554 }
   16555 func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool {
   16556 	b := v.Block
   16557 	_ = b
   16558 	// match: (Leq32U x y)
   16559 	// cond:
   16560 	// result: (SETBE (CMPL x y))
   16561 	for {
   16562 		x := v.Args[0]
   16563 		y := v.Args[1]
   16564 		v.reset(OpAMD64SETBE)
   16565 		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
   16566 		v0.AddArg(x)
   16567 		v0.AddArg(y)
   16568 		v.AddArg(v0)
   16569 		return true
   16570 	}
   16571 }
   16572 func rewriteValueAMD64_OpLeq64(v *Value, config *Config) bool {
   16573 	b := v.Block
   16574 	_ = b
   16575 	// match: (Leq64  x y)
   16576 	// cond:
   16577 	// result: (SETLE (CMPQ x y))
   16578 	for {
   16579 		x := v.Args[0]
   16580 		y := v.Args[1]
   16581 		v.reset(OpAMD64SETLE)
   16582 		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
   16583 		v0.AddArg(x)
   16584 		v0.AddArg(y)
   16585 		v.AddArg(v0)
   16586 		return true
   16587 	}
   16588 }
   16589 func rewriteValueAMD64_OpLeq64F(v *Value, config *Config) bool {
   16590 	b := v.Block
   16591 	_ = b
   16592 	// match: (Leq64F x y)
   16593 	// cond:
   16594 	// result: (SETGEF (UCOMISD y x))
   16595 	for {
   16596 		x := v.Args[0]
   16597 		y := v.Args[1]
   16598 		v.reset(OpAMD64SETGEF)
   16599 		v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
   16600 		v0.AddArg(y)
   16601 		v0.AddArg(x)
   16602 		v.AddArg(v0)
   16603 		return true
   16604 	}
   16605 }
   16606 func rewriteValueAMD64_OpLeq64U(v *Value, config *Config) bool {
   16607 	b := v.Block
   16608 	_ = b
   16609 	// match: (Leq64U x y)
   16610 	// cond:
   16611 	// result: (SETBE (CMPQ x y))
   16612 	for {
   16613 		x := v.Args[0]
   16614 		y := v.Args[1]
   16615 		v.reset(OpAMD64SETBE)
   16616 		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
   16617 		v0.AddArg(x)
   16618 		v0.AddArg(y)
   16619 		v.AddArg(v0)
   16620 		return true
   16621 	}
   16622 }
   16623 func rewriteValueAMD64_OpLeq8(v *Value, config *Config) bool {
   16624 	b := v.Block
   16625 	_ = b
   16626 	// match: (Leq8   x y)
   16627 	// cond:
   16628 	// result: (SETLE (CMPB x y))
   16629 	for {
   16630 		x := v.Args[0]
   16631 		y := v.Args[1]
   16632 		v.reset(OpAMD64SETLE)
   16633 		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
   16634 		v0.AddArg(x)
   16635 		v0.AddArg(y)
   16636 		v.AddArg(v0)
   16637 		return true
   16638 	}
   16639 }
   16640 func rewriteValueAMD64_OpLeq8U(v *Value, config *Config) bool {
   16641 	b := v.Block
   16642 	_ = b
   16643 	// match: (Leq8U  x y)
   16644 	// cond:
   16645 	// result: (SETBE (CMPB x y))
   16646 	for {
   16647 		x := v.Args[0]
   16648 		y := v.Args[1]
   16649 		v.reset(OpAMD64SETBE)
   16650 		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
   16651 		v0.AddArg(x)
   16652 		v0.AddArg(y)
   16653 		v.AddArg(v0)
   16654 		return true
   16655 	}
   16656 }
   16657 func rewriteValueAMD64_OpLess16(v *Value, config *Config) bool {
   16658 	b := v.Block
   16659 	_ = b
   16660 	// match: (Less16  x y)
   16661 	// cond:
   16662 	// result: (SETL (CMPW x y))
   16663 	for {
   16664 		x := v.Args[0]
   16665 		y := v.Args[1]
   16666 		v.reset(OpAMD64SETL)
   16667 		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
   16668 		v0.AddArg(x)
   16669 		v0.AddArg(y)
   16670 		v.AddArg(v0)
   16671 		return true
   16672 	}
   16673 }
   16674 func rewriteValueAMD64_OpLess16U(v *Value, config *Config) bool {
   16675 	b := v.Block
   16676 	_ = b
   16677 	// match: (Less16U x y)
   16678 	// cond:
   16679 	// result: (SETB (CMPW x y))
   16680 	for {
   16681 		x := v.Args[0]
   16682 		y := v.Args[1]
   16683 		v.reset(OpAMD64SETB)
   16684 		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
   16685 		v0.AddArg(x)
   16686 		v0.AddArg(y)
   16687 		v.AddArg(v0)
   16688 		return true
   16689 	}
   16690 }
   16691 func rewriteValueAMD64_OpLess32(v *Value, config *Config) bool {
   16692 	b := v.Block
   16693 	_ = b
   16694 	// match: (Less32  x y)
   16695 	// cond:
   16696 	// result: (SETL (CMPL x y))
   16697 	for {
   16698 		x := v.Args[0]
   16699 		y := v.Args[1]
   16700 		v.reset(OpAMD64SETL)
   16701 		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
   16702 		v0.AddArg(x)
   16703 		v0.AddArg(y)
   16704 		v.AddArg(v0)
   16705 		return true
   16706 	}
   16707 }
   16708 func rewriteValueAMD64_OpLess32F(v *Value, config *Config) bool {
   16709 	b := v.Block
   16710 	_ = b
   16711 	// match: (Less32F x y)
   16712 	// cond:
   16713 	// result: (SETGF (UCOMISS y x))
   16714 	for {
   16715 		x := v.Args[0]
   16716 		y := v.Args[1]
   16717 		v.reset(OpAMD64SETGF)
   16718 		v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
   16719 		v0.AddArg(y)
   16720 		v0.AddArg(x)
   16721 		v.AddArg(v0)
   16722 		return true
   16723 	}
   16724 }
   16725 func rewriteValueAMD64_OpLess32U(v *Value, config *Config) bool {
   16726 	b := v.Block
   16727 	_ = b
   16728 	// match: (Less32U x y)
   16729 	// cond:
   16730 	// result: (SETB (CMPL x y))
   16731 	for {
   16732 		x := v.Args[0]
   16733 		y := v.Args[1]
   16734 		v.reset(OpAMD64SETB)
   16735 		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
   16736 		v0.AddArg(x)
   16737 		v0.AddArg(y)
   16738 		v.AddArg(v0)
   16739 		return true
   16740 	}
   16741 }
   16742 func rewriteValueAMD64_OpLess64(v *Value, config *Config) bool {
   16743 	b := v.Block
   16744 	_ = b
   16745 	// match: (Less64  x y)
   16746 	// cond:
   16747 	// result: (SETL (CMPQ x y))
   16748 	for {
   16749 		x := v.Args[0]
   16750 		y := v.Args[1]
   16751 		v.reset(OpAMD64SETL)
   16752 		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
   16753 		v0.AddArg(x)
   16754 		v0.AddArg(y)
   16755 		v.AddArg(v0)
   16756 		return true
   16757 	}
   16758 }
   16759 func rewriteValueAMD64_OpLess64F(v *Value, config *Config) bool {
   16760 	b := v.Block
   16761 	_ = b
   16762 	// match: (Less64F x y)
   16763 	// cond:
   16764 	// result: (SETGF (UCOMISD y x))
   16765 	for {
   16766 		x := v.Args[0]
   16767 		y := v.Args[1]
   16768 		v.reset(OpAMD64SETGF)
   16769 		v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
   16770 		v0.AddArg(y)
   16771 		v0.AddArg(x)
   16772 		v.AddArg(v0)
   16773 		return true
   16774 	}
   16775 }
   16776 func rewriteValueAMD64_OpLess64U(v *Value, config *Config) bool {
   16777 	b := v.Block
   16778 	_ = b
   16779 	// match: (Less64U x y)
   16780 	// cond:
   16781 	// result: (SETB (CMPQ x y))
   16782 	for {
   16783 		x := v.Args[0]
   16784 		y := v.Args[1]
   16785 		v.reset(OpAMD64SETB)
   16786 		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
   16787 		v0.AddArg(x)
   16788 		v0.AddArg(y)
   16789 		v.AddArg(v0)
   16790 		return true
   16791 	}
   16792 }
   16793 func rewriteValueAMD64_OpLess8(v *Value, config *Config) bool {
   16794 	b := v.Block
   16795 	_ = b
   16796 	// match: (Less8   x y)
   16797 	// cond:
   16798 	// result: (SETL (CMPB x y))
   16799 	for {
   16800 		x := v.Args[0]
   16801 		y := v.Args[1]
   16802 		v.reset(OpAMD64SETL)
   16803 		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
   16804 		v0.AddArg(x)
   16805 		v0.AddArg(y)
   16806 		v.AddArg(v0)
   16807 		return true
   16808 	}
   16809 }
   16810 func rewriteValueAMD64_OpLess8U(v *Value, config *Config) bool {
   16811 	b := v.Block
   16812 	_ = b
   16813 	// match: (Less8U  x y)
   16814 	// cond:
   16815 	// result: (SETB (CMPB x y))
   16816 	for {
   16817 		x := v.Args[0]
   16818 		y := v.Args[1]
   16819 		v.reset(OpAMD64SETB)
   16820 		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
   16821 		v0.AddArg(x)
   16822 		v0.AddArg(y)
   16823 		v.AddArg(v0)
   16824 		return true
   16825 	}
   16826 }
   16827 func rewriteValueAMD64_OpLoad(v *Value, config *Config) bool {
   16828 	b := v.Block
   16829 	_ = b
   16830 	// match: (Load <t> ptr mem)
   16831 	// cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8)
   16832 	// result: (MOVQload ptr mem)
   16833 	for {
   16834 		t := v.Type
   16835 		ptr := v.Args[0]
   16836 		mem := v.Args[1]
   16837 		if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) {
   16838 			break
   16839 		}
   16840 		v.reset(OpAMD64MOVQload)
   16841 		v.AddArg(ptr)
   16842 		v.AddArg(mem)
   16843 		return true
   16844 	}
   16845 	// match: (Load <t> ptr mem)
   16846 	// cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4)
   16847 	// result: (MOVLload ptr mem)
   16848 	for {
   16849 		t := v.Type
   16850 		ptr := v.Args[0]
   16851 		mem := v.Args[1]
   16852 		if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) {
   16853 			break
   16854 		}
   16855 		v.reset(OpAMD64MOVLload)
   16856 		v.AddArg(ptr)
   16857 		v.AddArg(mem)
   16858 		return true
   16859 	}
   16860 	// match: (Load <t> ptr mem)
   16861 	// cond: is16BitInt(t)
   16862 	// result: (MOVWload ptr mem)
   16863 	for {
   16864 		t := v.Type
   16865 		ptr := v.Args[0]
   16866 		mem := v.Args[1]
   16867 		if !(is16BitInt(t)) {
   16868 			break
   16869 		}
   16870 		v.reset(OpAMD64MOVWload)
   16871 		v.AddArg(ptr)
   16872 		v.AddArg(mem)
   16873 		return true
   16874 	}
   16875 	// match: (Load <t> ptr mem)
   16876 	// cond: (t.IsBoolean() || is8BitInt(t))
   16877 	// result: (MOVBload ptr mem)
   16878 	for {
   16879 		t := v.Type
   16880 		ptr := v.Args[0]
   16881 		mem := v.Args[1]
   16882 		if !(t.IsBoolean() || is8BitInt(t)) {
   16883 			break
   16884 		}
   16885 		v.reset(OpAMD64MOVBload)
   16886 		v.AddArg(ptr)
   16887 		v.AddArg(mem)
   16888 		return true
   16889 	}
   16890 	// match: (Load <t> ptr mem)
   16891 	// cond: is32BitFloat(t)
   16892 	// result: (MOVSSload ptr mem)
   16893 	for {
   16894 		t := v.Type
   16895 		ptr := v.Args[0]
   16896 		mem := v.Args[1]
   16897 		if !(is32BitFloat(t)) {
   16898 			break
   16899 		}
   16900 		v.reset(OpAMD64MOVSSload)
   16901 		v.AddArg(ptr)
   16902 		v.AddArg(mem)
   16903 		return true
   16904 	}
   16905 	// match: (Load <t> ptr mem)
   16906 	// cond: is64BitFloat(t)
   16907 	// result: (MOVSDload ptr mem)
   16908 	for {
   16909 		t := v.Type
   16910 		ptr := v.Args[0]
   16911 		mem := v.Args[1]
   16912 		if !(is64BitFloat(t)) {
   16913 			break
   16914 		}
   16915 		v.reset(OpAMD64MOVSDload)
   16916 		v.AddArg(ptr)
   16917 		v.AddArg(mem)
   16918 		return true
   16919 	}
   16920 	return false
   16921 }
   16922 func rewriteValueAMD64_OpLrot16(v *Value, config *Config) bool {
   16923 	b := v.Block
   16924 	_ = b
   16925 	// match: (Lrot16 <t> x [c])
   16926 	// cond:
   16927 	// result: (ROLWconst <t> [c&15] x)
   16928 	for {
   16929 		t := v.Type
   16930 		c := v.AuxInt
   16931 		x := v.Args[0]
   16932 		v.reset(OpAMD64ROLWconst)
   16933 		v.Type = t
   16934 		v.AuxInt = c & 15
   16935 		v.AddArg(x)
   16936 		return true
   16937 	}
   16938 }
   16939 func rewriteValueAMD64_OpLrot32(v *Value, config *Config) bool {
   16940 	b := v.Block
   16941 	_ = b
   16942 	// match: (Lrot32 <t> x [c])
   16943 	// cond:
   16944 	// result: (ROLLconst <t> [c&31] x)
   16945 	for {
   16946 		t := v.Type
   16947 		c := v.AuxInt
   16948 		x := v.Args[0]
   16949 		v.reset(OpAMD64ROLLconst)
   16950 		v.Type = t
   16951 		v.AuxInt = c & 31
   16952 		v.AddArg(x)
   16953 		return true
   16954 	}
   16955 }
   16956 func rewriteValueAMD64_OpLrot64(v *Value, config *Config) bool {
   16957 	b := v.Block
   16958 	_ = b
   16959 	// match: (Lrot64 <t> x [c])
   16960 	// cond:
   16961 	// result: (ROLQconst <t> [c&63] x)
   16962 	for {
   16963 		t := v.Type
   16964 		c := v.AuxInt
   16965 		x := v.Args[0]
   16966 		v.reset(OpAMD64ROLQconst)
   16967 		v.Type = t
   16968 		v.AuxInt = c & 63
   16969 		v.AddArg(x)
   16970 		return true
   16971 	}
   16972 }
   16973 func rewriteValueAMD64_OpLrot8(v *Value, config *Config) bool {
   16974 	b := v.Block
   16975 	_ = b
   16976 	// match: (Lrot8  <t> x [c])
   16977 	// cond:
   16978 	// result: (ROLBconst <t> [c&7] x)
   16979 	for {
   16980 		t := v.Type
   16981 		c := v.AuxInt
   16982 		x := v.Args[0]
   16983 		v.reset(OpAMD64ROLBconst)
   16984 		v.Type = t
   16985 		v.AuxInt = c & 7
   16986 		v.AddArg(x)
   16987 		return true
   16988 	}
   16989 }
   16990 func rewriteValueAMD64_OpLsh16x16(v *Value, config *Config) bool {
   16991 	b := v.Block
   16992 	_ = b
   16993 	// match: (Lsh16x16 <t> x y)
   16994 	// cond:
   16995 	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   16996 	for {
   16997 		t := v.Type
   16998 		x := v.Args[0]
   16999 		y := v.Args[1]
   17000 		v.reset(OpAMD64ANDL)
   17001 		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
   17002 		v0.AddArg(x)
   17003 		v0.AddArg(y)
   17004 		v.AddArg(v0)
   17005 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   17006 		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
   17007 		v2.AuxInt = 32
   17008 		v2.AddArg(y)
   17009 		v1.AddArg(v2)
   17010 		v.AddArg(v1)
   17011 		return true
   17012 	}
   17013 }
   17014 func rewriteValueAMD64_OpLsh16x32(v *Value, config *Config) bool {
   17015 	b := v.Block
   17016 	_ = b
   17017 	// match: (Lsh16x32 <t> x y)
   17018 	// cond:
   17019 	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   17020 	for {
   17021 		t := v.Type
   17022 		x := v.Args[0]
   17023 		y := v.Args[1]
   17024 		v.reset(OpAMD64ANDL)
   17025 		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
   17026 		v0.AddArg(x)
   17027 		v0.AddArg(y)
   17028 		v.AddArg(v0)
   17029 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   17030 		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
   17031 		v2.AuxInt = 32
   17032 		v2.AddArg(y)
   17033 		v1.AddArg(v2)
   17034 		v.AddArg(v1)
   17035 		return true
   17036 	}
   17037 }
   17038 func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool {
   17039 	b := v.Block
   17040 	_ = b
   17041 	// match: (Lsh16x64 <t> x y)
   17042 	// cond:
   17043 	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
   17044 	for {
   17045 		t := v.Type
   17046 		x := v.Args[0]
   17047 		y := v.Args[1]
   17048 		v.reset(OpAMD64ANDL)
   17049 		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
   17050 		v0.AddArg(x)
   17051 		v0.AddArg(y)
   17052 		v.AddArg(v0)
   17053 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   17054 		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
   17055 		v2.AuxInt = 32
   17056 		v2.AddArg(y)
   17057 		v1.AddArg(v2)
   17058 		v.AddArg(v1)
   17059 		return true
   17060 	}
   17061 }
   17062 func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool {
   17063 	b := v.Block
   17064 	_ = b
   17065 	// match: (Lsh16x8  <t> x y)
   17066 	// cond:
   17067 	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   17068 	for {
   17069 		t := v.Type
   17070 		x := v.Args[0]
   17071 		y := v.Args[1]
   17072 		v.reset(OpAMD64ANDL)
   17073 		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
   17074 		v0.AddArg(x)
   17075 		v0.AddArg(y)
   17076 		v.AddArg(v0)
   17077 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   17078 		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
   17079 		v2.AuxInt = 32
   17080 		v2.AddArg(y)
   17081 		v1.AddArg(v2)
   17082 		v.AddArg(v1)
   17083 		return true
   17084 	}
   17085 }
   17086 func rewriteValueAMD64_OpLsh32x16(v *Value, config *Config) bool {
   17087 	b := v.Block
   17088 	_ = b
   17089 	// match: (Lsh32x16 <t> x y)
   17090 	// cond:
   17091 	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   17092 	for {
   17093 		t := v.Type
   17094 		x := v.Args[0]
   17095 		y := v.Args[1]
   17096 		v.reset(OpAMD64ANDL)
   17097 		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
   17098 		v0.AddArg(x)
   17099 		v0.AddArg(y)
   17100 		v.AddArg(v0)
   17101 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   17102 		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
   17103 		v2.AuxInt = 32
   17104 		v2.AddArg(y)
   17105 		v1.AddArg(v2)
   17106 		v.AddArg(v1)
   17107 		return true
   17108 	}
   17109 }
   17110 func rewriteValueAMD64_OpLsh32x32(v *Value, config *Config) bool {
   17111 	b := v.Block
   17112 	_ = b
   17113 	// match: (Lsh32x32 <t> x y)
   17114 	// cond:
   17115 	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   17116 	for {
   17117 		t := v.Type
   17118 		x := v.Args[0]
   17119 		y := v.Args[1]
   17120 		v.reset(OpAMD64ANDL)
   17121 		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
   17122 		v0.AddArg(x)
   17123 		v0.AddArg(y)
   17124 		v.AddArg(v0)
   17125 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   17126 		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
   17127 		v2.AuxInt = 32
   17128 		v2.AddArg(y)
   17129 		v1.AddArg(v2)
   17130 		v.AddArg(v1)
   17131 		return true
   17132 	}
   17133 }
   17134 func rewriteValueAMD64_OpLsh32x64(v *Value, config *Config) bool {
   17135 	b := v.Block
   17136 	_ = b
   17137 	// match: (Lsh32x64 <t> x y)
   17138 	// cond:
   17139 	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
   17140 	for {
   17141 		t := v.Type
   17142 		x := v.Args[0]
   17143 		y := v.Args[1]
   17144 		v.reset(OpAMD64ANDL)
   17145 		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
   17146 		v0.AddArg(x)
   17147 		v0.AddArg(y)
   17148 		v.AddArg(v0)
   17149 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   17150 		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
   17151 		v2.AuxInt = 32
   17152 		v2.AddArg(y)
   17153 		v1.AddArg(v2)
   17154 		v.AddArg(v1)
   17155 		return true
   17156 	}
   17157 }
   17158 func rewriteValueAMD64_OpLsh32x8(v *Value, config *Config) bool {
   17159 	b := v.Block
   17160 	_ = b
   17161 	// match: (Lsh32x8  <t> x y)
   17162 	// cond:
   17163 	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   17164 	for {
   17165 		t := v.Type
   17166 		x := v.Args[0]
   17167 		y := v.Args[1]
   17168 		v.reset(OpAMD64ANDL)
   17169 		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
   17170 		v0.AddArg(x)
   17171 		v0.AddArg(y)
   17172 		v.AddArg(v0)
   17173 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   17174 		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
   17175 		v2.AuxInt = 32
   17176 		v2.AddArg(y)
   17177 		v1.AddArg(v2)
   17178 		v.AddArg(v1)
   17179 		return true
   17180 	}
   17181 }
   17182 func rewriteValueAMD64_OpLsh64x16(v *Value, config *Config) bool {
   17183 	b := v.Block
   17184 	_ = b
   17185 	// match: (Lsh64x16 <t> x y)
   17186 	// cond:
   17187 	// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
   17188 	for {
   17189 		t := v.Type
   17190 		x := v.Args[0]
   17191 		y := v.Args[1]
   17192 		v.reset(OpAMD64ANDQ)
   17193 		v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
   17194 		v0.AddArg(x)
   17195 		v0.AddArg(y)
   17196 		v.AddArg(v0)
   17197 		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
   17198 		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
   17199 		v2.AuxInt = 64
   17200 		v2.AddArg(y)
   17201 		v1.AddArg(v2)
   17202 		v.AddArg(v1)
   17203 		return true
   17204 	}
   17205 }
   17206 func rewriteValueAMD64_OpLsh64x32(v *Value, config *Config) bool {
   17207 	b := v.Block
   17208 	_ = b
   17209 	// match: (Lsh64x32 <t> x y)
   17210 	// cond:
   17211 	// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
   17212 	for {
   17213 		t := v.Type
   17214 		x := v.Args[0]
   17215 		y := v.Args[1]
   17216 		v.reset(OpAMD64ANDQ)
   17217 		v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
   17218 		v0.AddArg(x)
   17219 		v0.AddArg(y)
   17220 		v.AddArg(v0)
   17221 		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
   17222 		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
   17223 		v2.AuxInt = 64
   17224 		v2.AddArg(y)
   17225 		v1.AddArg(v2)
   17226 		v.AddArg(v1)
   17227 		return true
   17228 	}
   17229 }
   17230 func rewriteValueAMD64_OpLsh64x64(v *Value, config *Config) bool {
   17231 	b := v.Block
   17232 	_ = b
   17233 	// match: (Lsh64x64 <t> x y)
   17234 	// cond:
   17235 	// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
   17236 	for {
   17237 		t := v.Type
   17238 		x := v.Args[0]
   17239 		y := v.Args[1]
   17240 		v.reset(OpAMD64ANDQ)
   17241 		v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
   17242 		v0.AddArg(x)
   17243 		v0.AddArg(y)
   17244 		v.AddArg(v0)
   17245 		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
   17246 		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
   17247 		v2.AuxInt = 64
   17248 		v2.AddArg(y)
   17249 		v1.AddArg(v2)
   17250 		v.AddArg(v1)
   17251 		return true
   17252 	}
   17253 }
   17254 func rewriteValueAMD64_OpLsh64x8(v *Value, config *Config) bool {
   17255 	b := v.Block
   17256 	_ = b
   17257 	// match: (Lsh64x8  <t> x y)
   17258 	// cond:
   17259 	// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
   17260 	for {
   17261 		t := v.Type
   17262 		x := v.Args[0]
   17263 		y := v.Args[1]
   17264 		v.reset(OpAMD64ANDQ)
   17265 		v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
   17266 		v0.AddArg(x)
   17267 		v0.AddArg(y)
   17268 		v.AddArg(v0)
   17269 		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
   17270 		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
   17271 		v2.AuxInt = 64
   17272 		v2.AddArg(y)
   17273 		v1.AddArg(v2)
   17274 		v.AddArg(v1)
   17275 		return true
   17276 	}
   17277 }
   17278 func rewriteValueAMD64_OpLsh8x16(v *Value, config *Config) bool {
   17279 	b := v.Block
   17280 	_ = b
   17281 	// match: (Lsh8x16 <t> x y)
   17282 	// cond:
   17283 	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   17284 	for {
   17285 		t := v.Type
   17286 		x := v.Args[0]
   17287 		y := v.Args[1]
   17288 		v.reset(OpAMD64ANDL)
   17289 		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
   17290 		v0.AddArg(x)
   17291 		v0.AddArg(y)
   17292 		v.AddArg(v0)
   17293 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   17294 		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
   17295 		v2.AuxInt = 32
   17296 		v2.AddArg(y)
   17297 		v1.AddArg(v2)
   17298 		v.AddArg(v1)
   17299 		return true
   17300 	}
   17301 }
   17302 func rewriteValueAMD64_OpLsh8x32(v *Value, config *Config) bool {
   17303 	b := v.Block
   17304 	_ = b
   17305 	// match: (Lsh8x32 <t> x y)
   17306 	// cond:
   17307 	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   17308 	for {
   17309 		t := v.Type
   17310 		x := v.Args[0]
   17311 		y := v.Args[1]
   17312 		v.reset(OpAMD64ANDL)
   17313 		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
   17314 		v0.AddArg(x)
   17315 		v0.AddArg(y)
   17316 		v.AddArg(v0)
   17317 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   17318 		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
   17319 		v2.AuxInt = 32
   17320 		v2.AddArg(y)
   17321 		v1.AddArg(v2)
   17322 		v.AddArg(v1)
   17323 		return true
   17324 	}
   17325 }
   17326 func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool {
   17327 	b := v.Block
   17328 	_ = b
   17329 	// match: (Lsh8x64 <t> x y)
   17330 	// cond:
   17331 	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
   17332 	for {
   17333 		t := v.Type
   17334 		x := v.Args[0]
   17335 		y := v.Args[1]
   17336 		v.reset(OpAMD64ANDL)
   17337 		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
   17338 		v0.AddArg(x)
   17339 		v0.AddArg(y)
   17340 		v.AddArg(v0)
   17341 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   17342 		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
   17343 		v2.AuxInt = 32
   17344 		v2.AddArg(y)
   17345 		v1.AddArg(v2)
   17346 		v.AddArg(v1)
   17347 		return true
   17348 	}
   17349 }
   17350 func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool {
   17351 	b := v.Block
   17352 	_ = b
   17353 	// match: (Lsh8x8  <t> x y)
   17354 	// cond:
   17355 	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   17356 	for {
   17357 		t := v.Type
   17358 		x := v.Args[0]
   17359 		y := v.Args[1]
   17360 		v.reset(OpAMD64ANDL)
   17361 		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
   17362 		v0.AddArg(x)
   17363 		v0.AddArg(y)
   17364 		v.AddArg(v0)
   17365 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   17366 		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
   17367 		v2.AuxInt = 32
   17368 		v2.AddArg(y)
   17369 		v1.AddArg(v2)
   17370 		v.AddArg(v1)
   17371 		return true
   17372 	}
   17373 }
   17374 func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool {
   17375 	b := v.Block
   17376 	_ = b
   17377 	// match: (Mod16  x y)
   17378 	// cond:
   17379 	// result: (Select1 (DIVW  x y))
   17380 	for {
   17381 		x := v.Args[0]
   17382 		y := v.Args[1]
   17383 		v.reset(OpSelect1)
   17384 		v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
   17385 		v0.AddArg(x)
   17386 		v0.AddArg(y)
   17387 		v.AddArg(v0)
   17388 		return true
   17389 	}
   17390 }
   17391 func rewriteValueAMD64_OpMod16u(v *Value, config *Config) bool {
   17392 	b := v.Block
   17393 	_ = b
   17394 	// match: (Mod16u x y)
   17395 	// cond:
   17396 	// result: (Select1 (DIVWU x y))
   17397 	for {
   17398 		x := v.Args[0]
   17399 		y := v.Args[1]
   17400 		v.reset(OpSelect1)
   17401 		v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
   17402 		v0.AddArg(x)
   17403 		v0.AddArg(y)
   17404 		v.AddArg(v0)
   17405 		return true
   17406 	}
   17407 }
   17408 func rewriteValueAMD64_OpMod32(v *Value, config *Config) bool {
   17409 	b := v.Block
   17410 	_ = b
   17411 	// match: (Mod32  x y)
   17412 	// cond:
   17413 	// result: (Select1 (DIVL  x y))
   17414 	for {
   17415 		x := v.Args[0]
   17416 		y := v.Args[1]
   17417 		v.reset(OpSelect1)
   17418 		v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
   17419 		v0.AddArg(x)
   17420 		v0.AddArg(y)
   17421 		v.AddArg(v0)
   17422 		return true
   17423 	}
   17424 }
   17425 func rewriteValueAMD64_OpMod32u(v *Value, config *Config) bool {
   17426 	b := v.Block
   17427 	_ = b
   17428 	// match: (Mod32u x y)
   17429 	// cond:
   17430 	// result: (Select1 (DIVLU x y))
   17431 	for {
   17432 		x := v.Args[0]
   17433 		y := v.Args[1]
   17434 		v.reset(OpSelect1)
   17435 		v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
   17436 		v0.AddArg(x)
   17437 		v0.AddArg(y)
   17438 		v.AddArg(v0)
   17439 		return true
   17440 	}
   17441 }
   17442 func rewriteValueAMD64_OpMod64(v *Value, config *Config) bool {
   17443 	b := v.Block
   17444 	_ = b
   17445 	// match: (Mod64  x y)
   17446 	// cond:
   17447 	// result: (Select1 (DIVQ  x y))
   17448 	for {
   17449 		x := v.Args[0]
   17450 		y := v.Args[1]
   17451 		v.reset(OpSelect1)
   17452 		v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
   17453 		v0.AddArg(x)
   17454 		v0.AddArg(y)
   17455 		v.AddArg(v0)
   17456 		return true
   17457 	}
   17458 }
   17459 func rewriteValueAMD64_OpMod64u(v *Value, config *Config) bool {
   17460 	b := v.Block
   17461 	_ = b
   17462 	// match: (Mod64u x y)
   17463 	// cond:
   17464 	// result: (Select1 (DIVQU x y))
   17465 	for {
   17466 		x := v.Args[0]
   17467 		y := v.Args[1]
   17468 		v.reset(OpSelect1)
   17469 		v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
   17470 		v0.AddArg(x)
   17471 		v0.AddArg(y)
   17472 		v.AddArg(v0)
   17473 		return true
   17474 	}
   17475 }
   17476 func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool {
   17477 	b := v.Block
   17478 	_ = b
   17479 	// match: (Mod8   x y)
   17480 	// cond:
   17481 	// result: (Select1 (DIVW  (SignExt8to16 x) (SignExt8to16 y)))
   17482 	for {
   17483 		x := v.Args[0]
   17484 		y := v.Args[1]
   17485 		v.reset(OpSelect1)
   17486 		v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
   17487 		v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
   17488 		v1.AddArg(x)
   17489 		v0.AddArg(v1)
   17490 		v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
   17491 		v2.AddArg(y)
   17492 		v0.AddArg(v2)
   17493 		v.AddArg(v0)
   17494 		return true
   17495 	}
   17496 }
   17497 func rewriteValueAMD64_OpMod8u(v *Value, config *Config) bool {
   17498 	b := v.Block
   17499 	_ = b
   17500 	// match: (Mod8u  x y)
   17501 	// cond:
   17502 	// result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
   17503 	for {
   17504 		x := v.Args[0]
   17505 		y := v.Args[1]
   17506 		v.reset(OpSelect1)
   17507 		v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
   17508 		v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
   17509 		v1.AddArg(x)
   17510 		v0.AddArg(v1)
   17511 		v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
   17512 		v2.AddArg(y)
   17513 		v0.AddArg(v2)
   17514 		v.AddArg(v0)
   17515 		return true
   17516 	}
   17517 }
   17518 func rewriteValueAMD64_OpMove(v *Value, config *Config) bool {
   17519 	b := v.Block
   17520 	_ = b
   17521 	// match: (Move [s] _ _ mem)
   17522 	// cond: SizeAndAlign(s).Size() == 0
   17523 	// result: mem
   17524 	for {
   17525 		s := v.AuxInt
   17526 		mem := v.Args[2]
   17527 		if !(SizeAndAlign(s).Size() == 0) {
   17528 			break
   17529 		}
   17530 		v.reset(OpCopy)
   17531 		v.Type = mem.Type
   17532 		v.AddArg(mem)
   17533 		return true
   17534 	}
   17535 	// match: (Move [s] dst src mem)
   17536 	// cond: SizeAndAlign(s).Size() == 1
   17537 	// result: (MOVBstore dst (MOVBload src mem) mem)
   17538 	for {
   17539 		s := v.AuxInt
   17540 		dst := v.Args[0]
   17541 		src := v.Args[1]
   17542 		mem := v.Args[2]
   17543 		if !(SizeAndAlign(s).Size() == 1) {
   17544 			break
   17545 		}
   17546 		v.reset(OpAMD64MOVBstore)
   17547 		v.AddArg(dst)
   17548 		v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
   17549 		v0.AddArg(src)
   17550 		v0.AddArg(mem)
   17551 		v.AddArg(v0)
   17552 		v.AddArg(mem)
   17553 		return true
   17554 	}
   17555 	// match: (Move [s] dst src mem)
   17556 	// cond: SizeAndAlign(s).Size() == 2
   17557 	// result: (MOVWstore dst (MOVWload src mem) mem)
   17558 	for {
   17559 		s := v.AuxInt
   17560 		dst := v.Args[0]
   17561 		src := v.Args[1]
   17562 		mem := v.Args[2]
   17563 		if !(SizeAndAlign(s).Size() == 2) {
   17564 			break
   17565 		}
   17566 		v.reset(OpAMD64MOVWstore)
   17567 		v.AddArg(dst)
   17568 		v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
   17569 		v0.AddArg(src)
   17570 		v0.AddArg(mem)
   17571 		v.AddArg(v0)
   17572 		v.AddArg(mem)
   17573 		return true
   17574 	}
   17575 	// match: (Move [s] dst src mem)
   17576 	// cond: SizeAndAlign(s).Size() == 4
   17577 	// result: (MOVLstore dst (MOVLload src mem) mem)
   17578 	for {
   17579 		s := v.AuxInt
   17580 		dst := v.Args[0]
   17581 		src := v.Args[1]
   17582 		mem := v.Args[2]
   17583 		if !(SizeAndAlign(s).Size() == 4) {
   17584 			break
   17585 		}
   17586 		v.reset(OpAMD64MOVLstore)
   17587 		v.AddArg(dst)
   17588 		v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
   17589 		v0.AddArg(src)
   17590 		v0.AddArg(mem)
   17591 		v.AddArg(v0)
   17592 		v.AddArg(mem)
   17593 		return true
   17594 	}
   17595 	// match: (Move [s] dst src mem)
   17596 	// cond: SizeAndAlign(s).Size() == 8
   17597 	// result: (MOVQstore dst (MOVQload src mem) mem)
   17598 	for {
   17599 		s := v.AuxInt
   17600 		dst := v.Args[0]
   17601 		src := v.Args[1]
   17602 		mem := v.Args[2]
   17603 		if !(SizeAndAlign(s).Size() == 8) {
   17604 			break
   17605 		}
   17606 		v.reset(OpAMD64MOVQstore)
   17607 		v.AddArg(dst)
   17608 		v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
   17609 		v0.AddArg(src)
   17610 		v0.AddArg(mem)
   17611 		v.AddArg(v0)
   17612 		v.AddArg(mem)
   17613 		return true
   17614 	}
   17615 	// match: (Move [s] dst src mem)
   17616 	// cond: SizeAndAlign(s).Size() == 16
   17617 	// result: (MOVOstore dst (MOVOload src mem) mem)
   17618 	for {
   17619 		s := v.AuxInt
   17620 		dst := v.Args[0]
   17621 		src := v.Args[1]
   17622 		mem := v.Args[2]
   17623 		if !(SizeAndAlign(s).Size() == 16) {
   17624 			break
   17625 		}
   17626 		v.reset(OpAMD64MOVOstore)
   17627 		v.AddArg(dst)
   17628 		v0 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128)
   17629 		v0.AddArg(src)
   17630 		v0.AddArg(mem)
   17631 		v.AddArg(v0)
   17632 		v.AddArg(mem)
   17633 		return true
   17634 	}
   17635 	// match: (Move [s] dst src mem)
   17636 	// cond: SizeAndAlign(s).Size() == 3
   17637 	// result: (MOVBstore [2] dst (MOVBload [2] src mem) 		(MOVWstore dst (MOVWload src mem) mem))
   17638 	for {
   17639 		s := v.AuxInt
   17640 		dst := v.Args[0]
   17641 		src := v.Args[1]
   17642 		mem := v.Args[2]
   17643 		if !(SizeAndAlign(s).Size() == 3) {
   17644 			break
   17645 		}
   17646 		v.reset(OpAMD64MOVBstore)
   17647 		v.AuxInt = 2
   17648 		v.AddArg(dst)
   17649 		v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
   17650 		v0.AuxInt = 2
   17651 		v0.AddArg(src)
   17652 		v0.AddArg(mem)
   17653 		v.AddArg(v0)
   17654 		v1 := b.NewValue0(v.Line, OpAMD64MOVWstore, TypeMem)
   17655 		v1.AddArg(dst)
   17656 		v2 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
   17657 		v2.AddArg(src)
   17658 		v2.AddArg(mem)
   17659 		v1.AddArg(v2)
   17660 		v1.AddArg(mem)
   17661 		v.AddArg(v1)
   17662 		return true
   17663 	}
   17664 	// match: (Move [s] dst src mem)
   17665 	// cond: SizeAndAlign(s).Size() == 5
   17666 	// result: (MOVBstore [4] dst (MOVBload [4] src mem) 		(MOVLstore dst (MOVLload src mem) mem))
   17667 	for {
   17668 		s := v.AuxInt
   17669 		dst := v.Args[0]
   17670 		src := v.Args[1]
   17671 		mem := v.Args[2]
   17672 		if !(SizeAndAlign(s).Size() == 5) {
   17673 			break
   17674 		}
   17675 		v.reset(OpAMD64MOVBstore)
   17676 		v.AuxInt = 4
   17677 		v.AddArg(dst)
   17678 		v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
   17679 		v0.AuxInt = 4
   17680 		v0.AddArg(src)
   17681 		v0.AddArg(mem)
   17682 		v.AddArg(v0)
   17683 		v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
   17684 		v1.AddArg(dst)
   17685 		v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
   17686 		v2.AddArg(src)
   17687 		v2.AddArg(mem)
   17688 		v1.AddArg(v2)
   17689 		v1.AddArg(mem)
   17690 		v.AddArg(v1)
   17691 		return true
   17692 	}
   17693 	// match: (Move [s] dst src mem)
   17694 	// cond: SizeAndAlign(s).Size() == 6
   17695 	// result: (MOVWstore [4] dst (MOVWload [4] src mem) 		(MOVLstore dst (MOVLload src mem) mem))
   17696 	for {
   17697 		s := v.AuxInt
   17698 		dst := v.Args[0]
   17699 		src := v.Args[1]
   17700 		mem := v.Args[2]
   17701 		if !(SizeAndAlign(s).Size() == 6) {
   17702 			break
   17703 		}
   17704 		v.reset(OpAMD64MOVWstore)
   17705 		v.AuxInt = 4
   17706 		v.AddArg(dst)
   17707 		v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
   17708 		v0.AuxInt = 4
   17709 		v0.AddArg(src)
   17710 		v0.AddArg(mem)
   17711 		v.AddArg(v0)
   17712 		v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
   17713 		v1.AddArg(dst)
   17714 		v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
   17715 		v2.AddArg(src)
   17716 		v2.AddArg(mem)
   17717 		v1.AddArg(v2)
   17718 		v1.AddArg(mem)
   17719 		v.AddArg(v1)
   17720 		return true
   17721 	}
   17722 	// match: (Move [s] dst src mem)
   17723 	// cond: SizeAndAlign(s).Size() == 7
   17724 	// result: (MOVLstore [3] dst (MOVLload [3] src mem) 		(MOVLstore dst (MOVLload src mem) mem))
   17725 	for {
   17726 		s := v.AuxInt
   17727 		dst := v.Args[0]
   17728 		src := v.Args[1]
   17729 		mem := v.Args[2]
   17730 		if !(SizeAndAlign(s).Size() == 7) {
   17731 			break
   17732 		}
   17733 		v.reset(OpAMD64MOVLstore)
   17734 		v.AuxInt = 3
   17735 		v.AddArg(dst)
   17736 		v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
   17737 		v0.AuxInt = 3
   17738 		v0.AddArg(src)
   17739 		v0.AddArg(mem)
   17740 		v.AddArg(v0)
   17741 		v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
   17742 		v1.AddArg(dst)
   17743 		v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
   17744 		v2.AddArg(src)
   17745 		v2.AddArg(mem)
   17746 		v1.AddArg(v2)
   17747 		v1.AddArg(mem)
   17748 		v.AddArg(v1)
   17749 		return true
   17750 	}
   17751 	// match: (Move [s] dst src mem)
   17752 	// cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16
   17753 	// result: (MOVQstore [SizeAndAlign(s).Size()-8] dst (MOVQload [SizeAndAlign(s).Size()-8] src mem) 		(MOVQstore dst (MOVQload src mem) mem))
   17754 	for {
   17755 		s := v.AuxInt
   17756 		dst := v.Args[0]
   17757 		src := v.Args[1]
   17758 		mem := v.Args[2]
   17759 		if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16) {
   17760 			break
   17761 		}
   17762 		v.reset(OpAMD64MOVQstore)
   17763 		v.AuxInt = SizeAndAlign(s).Size() - 8
   17764 		v.AddArg(dst)
   17765 		v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
   17766 		v0.AuxInt = SizeAndAlign(s).Size() - 8
   17767 		v0.AddArg(src)
   17768 		v0.AddArg(mem)
   17769 		v.AddArg(v0)
   17770 		v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
   17771 		v1.AddArg(dst)
   17772 		v2 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
   17773 		v2.AddArg(src)
   17774 		v2.AddArg(mem)
   17775 		v1.AddArg(v2)
   17776 		v1.AddArg(mem)
   17777 		v.AddArg(v1)
   17778 		return true
   17779 	}
   17780 	// match: (Move [s] dst src mem)
   17781 	// cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8
   17782 	// result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] 		(OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16]) 		(OffPtr <src.Type> src [SizeAndAlign(s).Size()%16]) 		(MOVQstore dst (MOVQload src mem) mem))
   17783 	for {
   17784 		s := v.AuxInt
   17785 		dst := v.Args[0]
   17786 		src := v.Args[1]
   17787 		mem := v.Args[2]
   17788 		if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8) {
   17789 			break
   17790 		}
   17791 		v.reset(OpMove)
   17792 		v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16
   17793 		v0 := b.NewValue0(v.Line, OpOffPtr, dst.Type)
   17794 		v0.AuxInt = SizeAndAlign(s).Size() % 16
   17795 		v0.AddArg(dst)
   17796 		v.AddArg(v0)
   17797 		v1 := b.NewValue0(v.Line, OpOffPtr, src.Type)
   17798 		v1.AuxInt = SizeAndAlign(s).Size() % 16
   17799 		v1.AddArg(src)
   17800 		v.AddArg(v1)
   17801 		v2 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
   17802 		v2.AddArg(dst)
   17803 		v3 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
   17804 		v3.AddArg(src)
   17805 		v3.AddArg(mem)
   17806 		v2.AddArg(v3)
   17807 		v2.AddArg(mem)
   17808 		v.AddArg(v2)
   17809 		return true
   17810 	}
   17811 	// match: (Move [s] dst src mem)
   17812 	// cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8
   17813 	// result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] 		(OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16]) 		(OffPtr <src.Type> src [SizeAndAlign(s).Size()%16]) 		(MOVOstore dst (MOVOload src mem) mem))
   17814 	for {
   17815 		s := v.AuxInt
   17816 		dst := v.Args[0]
   17817 		src := v.Args[1]
   17818 		mem := v.Args[2]
   17819 		if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8) {
   17820 			break
   17821 		}
   17822 		v.reset(OpMove)
   17823 		v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16
   17824 		v0 := b.NewValue0(v.Line, OpOffPtr, dst.Type)
   17825 		v0.AuxInt = SizeAndAlign(s).Size() % 16
   17826 		v0.AddArg(dst)
   17827 		v.AddArg(v0)
   17828 		v1 := b.NewValue0(v.Line, OpOffPtr, src.Type)
   17829 		v1.AuxInt = SizeAndAlign(s).Size() % 16
   17830 		v1.AddArg(src)
   17831 		v.AddArg(v1)
   17832 		v2 := b.NewValue0(v.Line, OpAMD64MOVOstore, TypeMem)
   17833 		v2.AddArg(dst)
   17834 		v3 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128)
   17835 		v3.AddArg(src)
   17836 		v3.AddArg(mem)
   17837 		v2.AddArg(v3)
   17838 		v2.AddArg(mem)
   17839 		v.AddArg(v2)
   17840 		return true
   17841 	}
   17842 	// match: (Move [s] dst src mem)
   17843 	// cond: SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 	&& !config.noDuffDevice
   17844 	// result: (DUFFCOPY [14*(64-SizeAndAlign(s).Size()/16)] dst src mem)
   17845 	for {
   17846 		s := v.AuxInt
   17847 		dst := v.Args[0]
   17848 		src := v.Args[1]
   17849 		mem := v.Args[2]
   17850 		if !(SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) {
   17851 			break
   17852 		}
   17853 		v.reset(OpAMD64DUFFCOPY)
   17854 		v.AuxInt = 14 * (64 - SizeAndAlign(s).Size()/16)
   17855 		v.AddArg(dst)
   17856 		v.AddArg(src)
   17857 		v.AddArg(mem)
   17858 		return true
   17859 	}
   17860 	// match: (Move [s] dst src mem)
   17861 	// cond: (SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0
   17862 	// result: (REPMOVSQ dst src (MOVQconst [SizeAndAlign(s).Size()/8]) mem)
   17863 	for {
   17864 		s := v.AuxInt
   17865 		dst := v.Args[0]
   17866 		src := v.Args[1]
   17867 		mem := v.Args[2]
   17868 		if !((SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0) {
   17869 			break
   17870 		}
   17871 		v.reset(OpAMD64REPMOVSQ)
   17872 		v.AddArg(dst)
   17873 		v.AddArg(src)
   17874 		v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
   17875 		v0.AuxInt = SizeAndAlign(s).Size() / 8
   17876 		v.AddArg(v0)
   17877 		v.AddArg(mem)
   17878 		return true
   17879 	}
   17880 	return false
   17881 }
   17882 func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool {
   17883 	b := v.Block
   17884 	_ = b
   17885 	// match: (Mul16  x y)
   17886 	// cond:
   17887 	// result: (MULL  x y)
   17888 	for {
   17889 		x := v.Args[0]
   17890 		y := v.Args[1]
   17891 		v.reset(OpAMD64MULL)
   17892 		v.AddArg(x)
   17893 		v.AddArg(y)
   17894 		return true
   17895 	}
   17896 }
   17897 func rewriteValueAMD64_OpMul32(v *Value, config *Config) bool {
   17898 	b := v.Block
   17899 	_ = b
   17900 	// match: (Mul32  x y)
   17901 	// cond:
   17902 	// result: (MULL  x y)
   17903 	for {
   17904 		x := v.Args[0]
   17905 		y := v.Args[1]
   17906 		v.reset(OpAMD64MULL)
   17907 		v.AddArg(x)
   17908 		v.AddArg(y)
   17909 		return true
   17910 	}
   17911 }
   17912 func rewriteValueAMD64_OpMul32F(v *Value, config *Config) bool {
   17913 	b := v.Block
   17914 	_ = b
   17915 	// match: (Mul32F x y)
   17916 	// cond:
   17917 	// result: (MULSS x y)
   17918 	for {
   17919 		x := v.Args[0]
   17920 		y := v.Args[1]
   17921 		v.reset(OpAMD64MULSS)
   17922 		v.AddArg(x)
   17923 		v.AddArg(y)
   17924 		return true
   17925 	}
   17926 }
   17927 func rewriteValueAMD64_OpMul64(v *Value, config *Config) bool {
   17928 	b := v.Block
   17929 	_ = b
   17930 	// match: (Mul64  x y)
   17931 	// cond:
   17932 	// result: (MULQ  x y)
   17933 	for {
   17934 		x := v.Args[0]
   17935 		y := v.Args[1]
   17936 		v.reset(OpAMD64MULQ)
   17937 		v.AddArg(x)
   17938 		v.AddArg(y)
   17939 		return true
   17940 	}
   17941 }
   17942 func rewriteValueAMD64_OpMul64F(v *Value, config *Config) bool {
   17943 	b := v.Block
   17944 	_ = b
   17945 	// match: (Mul64F x y)
   17946 	// cond:
   17947 	// result: (MULSD x y)
   17948 	for {
   17949 		x := v.Args[0]
   17950 		y := v.Args[1]
   17951 		v.reset(OpAMD64MULSD)
   17952 		v.AddArg(x)
   17953 		v.AddArg(y)
   17954 		return true
   17955 	}
   17956 }
   17957 func rewriteValueAMD64_OpMul64uhilo(v *Value, config *Config) bool {
   17958 	b := v.Block
   17959 	_ = b
   17960 	// match: (Mul64uhilo x y)
   17961 	// cond:
   17962 	// result: (MULQU2 x y)
   17963 	for {
   17964 		x := v.Args[0]
   17965 		y := v.Args[1]
   17966 		v.reset(OpAMD64MULQU2)
   17967 		v.AddArg(x)
   17968 		v.AddArg(y)
   17969 		return true
   17970 	}
   17971 }
   17972 func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool {
   17973 	b := v.Block
   17974 	_ = b
   17975 	// match: (Mul8   x y)
   17976 	// cond:
   17977 	// result: (MULL  x y)
   17978 	for {
   17979 		x := v.Args[0]
   17980 		y := v.Args[1]
   17981 		v.reset(OpAMD64MULL)
   17982 		v.AddArg(x)
   17983 		v.AddArg(y)
   17984 		return true
   17985 	}
   17986 }
   17987 func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool {
   17988 	b := v.Block
   17989 	_ = b
   17990 	// match: (Neg16  x)
   17991 	// cond:
   17992 	// result: (NEGL x)
   17993 	for {
   17994 		x := v.Args[0]
   17995 		v.reset(OpAMD64NEGL)
   17996 		v.AddArg(x)
   17997 		return true
   17998 	}
   17999 }
   18000 func rewriteValueAMD64_OpNeg32(v *Value, config *Config) bool {
   18001 	b := v.Block
   18002 	_ = b
   18003 	// match: (Neg32  x)
   18004 	// cond:
   18005 	// result: (NEGL x)
   18006 	for {
   18007 		x := v.Args[0]
   18008 		v.reset(OpAMD64NEGL)
   18009 		v.AddArg(x)
   18010 		return true
   18011 	}
   18012 }
   18013 func rewriteValueAMD64_OpNeg32F(v *Value, config *Config) bool {
   18014 	b := v.Block
   18015 	_ = b
   18016 	// match: (Neg32F x)
   18017 	// cond:
   18018 	// result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
   18019 	for {
   18020 		x := v.Args[0]
   18021 		v.reset(OpAMD64PXOR)
   18022 		v.AddArg(x)
   18023 		v0 := b.NewValue0(v.Line, OpAMD64MOVSSconst, config.Frontend().TypeFloat32())
   18024 		v0.AuxInt = f2i(math.Copysign(0, -1))
   18025 		v.AddArg(v0)
   18026 		return true
   18027 	}
   18028 }
   18029 func rewriteValueAMD64_OpNeg64(v *Value, config *Config) bool {
   18030 	b := v.Block
   18031 	_ = b
   18032 	// match: (Neg64  x)
   18033 	// cond:
   18034 	// result: (NEGQ x)
   18035 	for {
   18036 		x := v.Args[0]
   18037 		v.reset(OpAMD64NEGQ)
   18038 		v.AddArg(x)
   18039 		return true
   18040 	}
   18041 }
   18042 func rewriteValueAMD64_OpNeg64F(v *Value, config *Config) bool {
   18043 	b := v.Block
   18044 	_ = b
   18045 	// match: (Neg64F x)
   18046 	// cond:
   18047 	// result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
   18048 	for {
   18049 		x := v.Args[0]
   18050 		v.reset(OpAMD64PXOR)
   18051 		v.AddArg(x)
   18052 		v0 := b.NewValue0(v.Line, OpAMD64MOVSDconst, config.Frontend().TypeFloat64())
   18053 		v0.AuxInt = f2i(math.Copysign(0, -1))
   18054 		v.AddArg(v0)
   18055 		return true
   18056 	}
   18057 }
   18058 func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool {
   18059 	b := v.Block
   18060 	_ = b
   18061 	// match: (Neg8   x)
   18062 	// cond:
   18063 	// result: (NEGL x)
   18064 	for {
   18065 		x := v.Args[0]
   18066 		v.reset(OpAMD64NEGL)
   18067 		v.AddArg(x)
   18068 		return true
   18069 	}
   18070 }
   18071 func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool {
   18072 	b := v.Block
   18073 	_ = b
   18074 	// match: (Neq16  x y)
   18075 	// cond:
   18076 	// result: (SETNE (CMPW x y))
   18077 	for {
   18078 		x := v.Args[0]
   18079 		y := v.Args[1]
   18080 		v.reset(OpAMD64SETNE)
   18081 		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
   18082 		v0.AddArg(x)
   18083 		v0.AddArg(y)
   18084 		v.AddArg(v0)
   18085 		return true
   18086 	}
   18087 }
   18088 func rewriteValueAMD64_OpNeq32(v *Value, config *Config) bool {
   18089 	b := v.Block
   18090 	_ = b
   18091 	// match: (Neq32  x y)
   18092 	// cond:
   18093 	// result: (SETNE (CMPL x y))
   18094 	for {
   18095 		x := v.Args[0]
   18096 		y := v.Args[1]
   18097 		v.reset(OpAMD64SETNE)
   18098 		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
   18099 		v0.AddArg(x)
   18100 		v0.AddArg(y)
   18101 		v.AddArg(v0)
   18102 		return true
   18103 	}
   18104 }
   18105 func rewriteValueAMD64_OpNeq32F(v *Value, config *Config) bool {
   18106 	b := v.Block
   18107 	_ = b
   18108 	// match: (Neq32F x y)
   18109 	// cond:
   18110 	// result: (SETNEF (UCOMISS x y))
   18111 	for {
   18112 		x := v.Args[0]
   18113 		y := v.Args[1]
   18114 		v.reset(OpAMD64SETNEF)
   18115 		v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
   18116 		v0.AddArg(x)
   18117 		v0.AddArg(y)
   18118 		v.AddArg(v0)
   18119 		return true
   18120 	}
   18121 }
   18122 func rewriteValueAMD64_OpNeq64(v *Value, config *Config) bool {
   18123 	b := v.Block
   18124 	_ = b
   18125 	// match: (Neq64  x y)
   18126 	// cond:
   18127 	// result: (SETNE (CMPQ x y))
   18128 	for {
   18129 		x := v.Args[0]
   18130 		y := v.Args[1]
   18131 		v.reset(OpAMD64SETNE)
   18132 		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
   18133 		v0.AddArg(x)
   18134 		v0.AddArg(y)
   18135 		v.AddArg(v0)
   18136 		return true
   18137 	}
   18138 }
   18139 func rewriteValueAMD64_OpNeq64F(v *Value, config *Config) bool {
   18140 	b := v.Block
   18141 	_ = b
   18142 	// match: (Neq64F x y)
   18143 	// cond:
   18144 	// result: (SETNEF (UCOMISD x y))
   18145 	for {
   18146 		x := v.Args[0]
   18147 		y := v.Args[1]
   18148 		v.reset(OpAMD64SETNEF)
   18149 		v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
   18150 		v0.AddArg(x)
   18151 		v0.AddArg(y)
   18152 		v.AddArg(v0)
   18153 		return true
   18154 	}
   18155 }
   18156 func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool {
   18157 	b := v.Block
   18158 	_ = b
   18159 	// match: (Neq8   x y)
   18160 	// cond:
   18161 	// result: (SETNE (CMPB x y))
   18162 	for {
   18163 		x := v.Args[0]
   18164 		y := v.Args[1]
   18165 		v.reset(OpAMD64SETNE)
   18166 		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
   18167 		v0.AddArg(x)
   18168 		v0.AddArg(y)
   18169 		v.AddArg(v0)
   18170 		return true
   18171 	}
   18172 }
   18173 func rewriteValueAMD64_OpNeqB(v *Value, config *Config) bool {
   18174 	b := v.Block
   18175 	_ = b
   18176 	// match: (NeqB   x y)
   18177 	// cond:
   18178 	// result: (SETNE (CMPB x y))
   18179 	for {
   18180 		x := v.Args[0]
   18181 		y := v.Args[1]
   18182 		v.reset(OpAMD64SETNE)
   18183 		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
   18184 		v0.AddArg(x)
   18185 		v0.AddArg(y)
   18186 		v.AddArg(v0)
   18187 		return true
   18188 	}
   18189 }
   18190 func rewriteValueAMD64_OpNeqPtr(v *Value, config *Config) bool {
   18191 	b := v.Block
   18192 	_ = b
   18193 	// match: (NeqPtr x y)
   18194 	// cond: config.PtrSize == 8
   18195 	// result: (SETNE (CMPQ x y))
   18196 	for {
   18197 		x := v.Args[0]
   18198 		y := v.Args[1]
   18199 		if !(config.PtrSize == 8) {
   18200 			break
   18201 		}
   18202 		v.reset(OpAMD64SETNE)
   18203 		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
   18204 		v0.AddArg(x)
   18205 		v0.AddArg(y)
   18206 		v.AddArg(v0)
   18207 		return true
   18208 	}
   18209 	// match: (NeqPtr x y)
   18210 	// cond: config.PtrSize == 4
   18211 	// result: (SETNE (CMPL x y))
   18212 	for {
   18213 		x := v.Args[0]
   18214 		y := v.Args[1]
   18215 		if !(config.PtrSize == 4) {
   18216 			break
   18217 		}
   18218 		v.reset(OpAMD64SETNE)
   18219 		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
   18220 		v0.AddArg(x)
   18221 		v0.AddArg(y)
   18222 		v.AddArg(v0)
   18223 		return true
   18224 	}
   18225 	return false
   18226 }
   18227 func rewriteValueAMD64_OpNilCheck(v *Value, config *Config) bool {
   18228 	b := v.Block
   18229 	_ = b
   18230 	// match: (NilCheck ptr mem)
   18231 	// cond:
   18232 	// result: (LoweredNilCheck ptr mem)
   18233 	for {
   18234 		ptr := v.Args[0]
   18235 		mem := v.Args[1]
   18236 		v.reset(OpAMD64LoweredNilCheck)
   18237 		v.AddArg(ptr)
   18238 		v.AddArg(mem)
   18239 		return true
   18240 	}
   18241 }
   18242 func rewriteValueAMD64_OpNot(v *Value, config *Config) bool {
   18243 	b := v.Block
   18244 	_ = b
   18245 	// match: (Not x)
   18246 	// cond:
   18247 	// result: (XORLconst [1] x)
   18248 	for {
   18249 		x := v.Args[0]
   18250 		v.reset(OpAMD64XORLconst)
   18251 		v.AuxInt = 1
   18252 		v.AddArg(x)
   18253 		return true
   18254 	}
   18255 }
   18256 func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool {
   18257 	b := v.Block
   18258 	_ = b
   18259 	// match: (OffPtr [off] ptr)
   18260 	// cond: config.PtrSize == 8 && is32Bit(off)
   18261 	// result: (ADDQconst [off] ptr)
   18262 	for {
   18263 		off := v.AuxInt
   18264 		ptr := v.Args[0]
   18265 		if !(config.PtrSize == 8 && is32Bit(off)) {
   18266 			break
   18267 		}
   18268 		v.reset(OpAMD64ADDQconst)
   18269 		v.AuxInt = off
   18270 		v.AddArg(ptr)
   18271 		return true
   18272 	}
   18273 	// match: (OffPtr [off] ptr)
   18274 	// cond: config.PtrSize == 8
   18275 	// result: (ADDQ (MOVQconst [off]) ptr)
   18276 	for {
   18277 		off := v.AuxInt
   18278 		ptr := v.Args[0]
   18279 		if !(config.PtrSize == 8) {
   18280 			break
   18281 		}
   18282 		v.reset(OpAMD64ADDQ)
   18283 		v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
   18284 		v0.AuxInt = off
   18285 		v.AddArg(v0)
   18286 		v.AddArg(ptr)
   18287 		return true
   18288 	}
   18289 	// match: (OffPtr [off] ptr)
   18290 	// cond: config.PtrSize == 4
   18291 	// result: (ADDLconst [off] ptr)
   18292 	for {
   18293 		off := v.AuxInt
   18294 		ptr := v.Args[0]
   18295 		if !(config.PtrSize == 4) {
   18296 			break
   18297 		}
   18298 		v.reset(OpAMD64ADDLconst)
   18299 		v.AuxInt = off
   18300 		v.AddArg(ptr)
   18301 		return true
   18302 	}
   18303 	return false
   18304 }
   18305 func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool {
   18306 	b := v.Block
   18307 	_ = b
   18308 	// match: (Or16 x y)
   18309 	// cond:
   18310 	// result: (ORL x y)
   18311 	for {
   18312 		x := v.Args[0]
   18313 		y := v.Args[1]
   18314 		v.reset(OpAMD64ORL)
   18315 		v.AddArg(x)
   18316 		v.AddArg(y)
   18317 		return true
   18318 	}
   18319 }
   18320 func rewriteValueAMD64_OpOr32(v *Value, config *Config) bool {
   18321 	b := v.Block
   18322 	_ = b
   18323 	// match: (Or32 x y)
   18324 	// cond:
   18325 	// result: (ORL x y)
   18326 	for {
   18327 		x := v.Args[0]
   18328 		y := v.Args[1]
   18329 		v.reset(OpAMD64ORL)
   18330 		v.AddArg(x)
   18331 		v.AddArg(y)
   18332 		return true
   18333 	}
   18334 }
   18335 func rewriteValueAMD64_OpOr64(v *Value, config *Config) bool {
   18336 	b := v.Block
   18337 	_ = b
   18338 	// match: (Or64 x y)
   18339 	// cond:
   18340 	// result: (ORQ x y)
   18341 	for {
   18342 		x := v.Args[0]
   18343 		y := v.Args[1]
   18344 		v.reset(OpAMD64ORQ)
   18345 		v.AddArg(x)
   18346 		v.AddArg(y)
   18347 		return true
   18348 	}
   18349 }
   18350 func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool {
   18351 	b := v.Block
   18352 	_ = b
   18353 	// match: (Or8  x y)
   18354 	// cond:
   18355 	// result: (ORL x y)
   18356 	for {
   18357 		x := v.Args[0]
   18358 		y := v.Args[1]
   18359 		v.reset(OpAMD64ORL)
   18360 		v.AddArg(x)
   18361 		v.AddArg(y)
   18362 		return true
   18363 	}
   18364 }
   18365 func rewriteValueAMD64_OpOrB(v *Value, config *Config) bool {
   18366 	b := v.Block
   18367 	_ = b
   18368 	// match: (OrB x y)
   18369 	// cond:
   18370 	// result: (ORL x y)
   18371 	for {
   18372 		x := v.Args[0]
   18373 		y := v.Args[1]
   18374 		v.reset(OpAMD64ORL)
   18375 		v.AddArg(x)
   18376 		v.AddArg(y)
   18377 		return true
   18378 	}
   18379 }
   18380 func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool {
   18381 	b := v.Block
   18382 	_ = b
   18383 	// match: (Rsh16Ux16 <t> x y)
   18384 	// cond:
   18385 	// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
   18386 	for {
   18387 		t := v.Type
   18388 		x := v.Args[0]
   18389 		y := v.Args[1]
   18390 		v.reset(OpAMD64ANDL)
   18391 		v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
   18392 		v0.AddArg(x)
   18393 		v0.AddArg(y)
   18394 		v.AddArg(v0)
   18395 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   18396 		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
   18397 		v2.AuxInt = 16
   18398 		v2.AddArg(y)
   18399 		v1.AddArg(v2)
   18400 		v.AddArg(v1)
   18401 		return true
   18402 	}
   18403 }
   18404 func rewriteValueAMD64_OpRsh16Ux32(v *Value, config *Config) bool {
   18405 	b := v.Block
   18406 	_ = b
   18407 	// match: (Rsh16Ux32 <t> x y)
   18408 	// cond:
   18409 	// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
   18410 	for {
   18411 		t := v.Type
   18412 		x := v.Args[0]
   18413 		y := v.Args[1]
   18414 		v.reset(OpAMD64ANDL)
   18415 		v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
   18416 		v0.AddArg(x)
   18417 		v0.AddArg(y)
   18418 		v.AddArg(v0)
   18419 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   18420 		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
   18421 		v2.AuxInt = 16
   18422 		v2.AddArg(y)
   18423 		v1.AddArg(v2)
   18424 		v.AddArg(v1)
   18425 		return true
   18426 	}
   18427 }
   18428 func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool {
   18429 	b := v.Block
   18430 	_ = b
   18431 	// match: (Rsh16Ux64 <t> x y)
   18432 	// cond:
   18433 	// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
   18434 	for {
   18435 		t := v.Type
   18436 		x := v.Args[0]
   18437 		y := v.Args[1]
   18438 		v.reset(OpAMD64ANDL)
   18439 		v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
   18440 		v0.AddArg(x)
   18441 		v0.AddArg(y)
   18442 		v.AddArg(v0)
   18443 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   18444 		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
   18445 		v2.AuxInt = 16
   18446 		v2.AddArg(y)
   18447 		v1.AddArg(v2)
   18448 		v.AddArg(v1)
   18449 		return true
   18450 	}
   18451 }
   18452 func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool {
   18453 	b := v.Block
   18454 	_ = b
   18455 	// match: (Rsh16Ux8  <t> x y)
   18456 	// cond:
   18457 	// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
   18458 	for {
   18459 		t := v.Type
   18460 		x := v.Args[0]
   18461 		y := v.Args[1]
   18462 		v.reset(OpAMD64ANDL)
   18463 		v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
   18464 		v0.AddArg(x)
   18465 		v0.AddArg(y)
   18466 		v.AddArg(v0)
   18467 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   18468 		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
   18469 		v2.AuxInt = 16
   18470 		v2.AddArg(y)
   18471 		v1.AddArg(v2)
   18472 		v.AddArg(v1)
   18473 		return true
   18474 	}
   18475 }
   18476 func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool {
   18477 	b := v.Block
   18478 	_ = b
   18479 	// match: (Rsh16x16 <t> x y)
   18480 	// cond:
   18481 	// result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
   18482 	for {
   18483 		t := v.Type
   18484 		x := v.Args[0]
   18485 		y := v.Args[1]
   18486 		v.reset(OpAMD64SARW)
   18487 		v.Type = t
   18488 		v.AddArg(x)
   18489 		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
   18490 		v0.AddArg(y)
   18491 		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
   18492 		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
   18493 		v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
   18494 		v3.AuxInt = 16
   18495 		v3.AddArg(y)
   18496 		v2.AddArg(v3)
   18497 		v1.AddArg(v2)
   18498 		v0.AddArg(v1)
   18499 		v.AddArg(v0)
   18500 		return true
   18501 	}
   18502 }
   18503 func rewriteValueAMD64_OpRsh16x32(v *Value, config *Config) bool {
   18504 	b := v.Block
   18505 	_ = b
   18506 	// match: (Rsh16x32 <t> x y)
   18507 	// cond:
   18508 	// result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
   18509 	for {
   18510 		t := v.Type
   18511 		x := v.Args[0]
   18512 		y := v.Args[1]
   18513 		v.reset(OpAMD64SARW)
   18514 		v.Type = t
   18515 		v.AddArg(x)
   18516 		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
   18517 		v0.AddArg(y)
   18518 		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
   18519 		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
   18520 		v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
   18521 		v3.AuxInt = 16
   18522 		v3.AddArg(y)
   18523 		v2.AddArg(v3)
   18524 		v1.AddArg(v2)
   18525 		v0.AddArg(v1)
   18526 		v.AddArg(v0)
   18527 		return true
   18528 	}
   18529 }
   18530 func rewriteValueAMD64_OpRsh16x64(v *Value, config *Config) bool {
   18531 	b := v.Block
   18532 	_ = b
   18533 	// match: (Rsh16x64 <t> x y)
   18534 	// cond:
   18535 	// result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
   18536 	for {
   18537 		t := v.Type
   18538 		x := v.Args[0]
   18539 		y := v.Args[1]
   18540 		v.reset(OpAMD64SARW)
   18541 		v.Type = t
   18542 		v.AddArg(x)
   18543 		v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
   18544 		v0.AddArg(y)
   18545 		v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
   18546 		v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
   18547 		v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
   18548 		v3.AuxInt = 16
   18549 		v3.AddArg(y)
   18550 		v2.AddArg(v3)
   18551 		v1.AddArg(v2)
   18552 		v0.AddArg(v1)
   18553 		v.AddArg(v0)
   18554 		return true
   18555 	}
   18556 }
   18557 func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool {
   18558 	b := v.Block
   18559 	_ = b
   18560 	// match: (Rsh16x8  <t> x y)
   18561 	// cond:
   18562 	// result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
   18563 	for {
   18564 		t := v.Type
   18565 		x := v.Args[0]
   18566 		y := v.Args[1]
   18567 		v.reset(OpAMD64SARW)
   18568 		v.Type = t
   18569 		v.AddArg(x)
   18570 		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
   18571 		v0.AddArg(y)
   18572 		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
   18573 		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
   18574 		v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
   18575 		v3.AuxInt = 16
   18576 		v3.AddArg(y)
   18577 		v2.AddArg(v3)
   18578 		v1.AddArg(v2)
   18579 		v0.AddArg(v1)
   18580 		v.AddArg(v0)
   18581 		return true
   18582 	}
   18583 }
   18584 func rewriteValueAMD64_OpRsh32Ux16(v *Value, config *Config) bool {
   18585 	b := v.Block
   18586 	_ = b
   18587 	// match: (Rsh32Ux16 <t> x y)
   18588 	// cond:
   18589 	// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
   18590 	for {
   18591 		t := v.Type
   18592 		x := v.Args[0]
   18593 		y := v.Args[1]
   18594 		v.reset(OpAMD64ANDL)
   18595 		v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
   18596 		v0.AddArg(x)
   18597 		v0.AddArg(y)
   18598 		v.AddArg(v0)
   18599 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   18600 		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
   18601 		v2.AuxInt = 32
   18602 		v2.AddArg(y)
   18603 		v1.AddArg(v2)
   18604 		v.AddArg(v1)
   18605 		return true
   18606 	}
   18607 }
   18608 func rewriteValueAMD64_OpRsh32Ux32(v *Value, config *Config) bool {
   18609 	b := v.Block
   18610 	_ = b
   18611 	// match: (Rsh32Ux32 <t> x y)
   18612 	// cond:
   18613 	// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
   18614 	for {
   18615 		t := v.Type
   18616 		x := v.Args[0]
   18617 		y := v.Args[1]
   18618 		v.reset(OpAMD64ANDL)
   18619 		v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
   18620 		v0.AddArg(x)
   18621 		v0.AddArg(y)
   18622 		v.AddArg(v0)
   18623 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   18624 		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
   18625 		v2.AuxInt = 32
   18626 		v2.AddArg(y)
   18627 		v1.AddArg(v2)
   18628 		v.AddArg(v1)
   18629 		return true
   18630 	}
   18631 }
   18632 func rewriteValueAMD64_OpRsh32Ux64(v *Value, config *Config) bool {
   18633 	b := v.Block
   18634 	_ = b
   18635 	// match: (Rsh32Ux64 <t> x y)
   18636 	// cond:
   18637 	// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
   18638 	for {
   18639 		t := v.Type
   18640 		x := v.Args[0]
   18641 		y := v.Args[1]
   18642 		v.reset(OpAMD64ANDL)
   18643 		v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
   18644 		v0.AddArg(x)
   18645 		v0.AddArg(y)
   18646 		v.AddArg(v0)
   18647 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   18648 		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
   18649 		v2.AuxInt = 32
   18650 		v2.AddArg(y)
   18651 		v1.AddArg(v2)
   18652 		v.AddArg(v1)
   18653 		return true
   18654 	}
   18655 }
   18656 func rewriteValueAMD64_OpRsh32Ux8(v *Value, config *Config) bool {
   18657 	b := v.Block
   18658 	_ = b
   18659 	// match: (Rsh32Ux8  <t> x y)
   18660 	// cond:
   18661 	// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
   18662 	for {
   18663 		t := v.Type
   18664 		x := v.Args[0]
   18665 		y := v.Args[1]
   18666 		v.reset(OpAMD64ANDL)
   18667 		v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
   18668 		v0.AddArg(x)
   18669 		v0.AddArg(y)
   18670 		v.AddArg(v0)
   18671 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   18672 		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
   18673 		v2.AuxInt = 32
   18674 		v2.AddArg(y)
   18675 		v1.AddArg(v2)
   18676 		v.AddArg(v1)
   18677 		return true
   18678 	}
   18679 }
   18680 func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool {
   18681 	b := v.Block
   18682 	_ = b
   18683 	// match: (Rsh32x16 <t> x y)
   18684 	// cond:
   18685 	// result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
   18686 	for {
   18687 		t := v.Type
   18688 		x := v.Args[0]
   18689 		y := v.Args[1]
   18690 		v.reset(OpAMD64SARL)
   18691 		v.Type = t
   18692 		v.AddArg(x)
   18693 		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
   18694 		v0.AddArg(y)
   18695 		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
   18696 		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
   18697 		v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
   18698 		v3.AuxInt = 32
   18699 		v3.AddArg(y)
   18700 		v2.AddArg(v3)
   18701 		v1.AddArg(v2)
   18702 		v0.AddArg(v1)
   18703 		v.AddArg(v0)
   18704 		return true
   18705 	}
   18706 }
   18707 func rewriteValueAMD64_OpRsh32x32(v *Value, config *Config) bool {
   18708 	b := v.Block
   18709 	_ = b
   18710 	// match: (Rsh32x32 <t> x y)
   18711 	// cond:
   18712 	// result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
   18713 	for {
   18714 		t := v.Type
   18715 		x := v.Args[0]
   18716 		y := v.Args[1]
   18717 		v.reset(OpAMD64SARL)
   18718 		v.Type = t
   18719 		v.AddArg(x)
   18720 		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
   18721 		v0.AddArg(y)
   18722 		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
   18723 		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
   18724 		v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
   18725 		v3.AuxInt = 32
   18726 		v3.AddArg(y)
   18727 		v2.AddArg(v3)
   18728 		v1.AddArg(v2)
   18729 		v0.AddArg(v1)
   18730 		v.AddArg(v0)
   18731 		return true
   18732 	}
   18733 }
   18734 func rewriteValueAMD64_OpRsh32x64(v *Value, config *Config) bool {
   18735 	b := v.Block
   18736 	_ = b
   18737 	// match: (Rsh32x64 <t> x y)
   18738 	// cond:
   18739 	// result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
   18740 	for {
   18741 		t := v.Type
   18742 		x := v.Args[0]
   18743 		y := v.Args[1]
   18744 		v.reset(OpAMD64SARL)
   18745 		v.Type = t
   18746 		v.AddArg(x)
   18747 		v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
   18748 		v0.AddArg(y)
   18749 		v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
   18750 		v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
   18751 		v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
   18752 		v3.AuxInt = 32
   18753 		v3.AddArg(y)
   18754 		v2.AddArg(v3)
   18755 		v1.AddArg(v2)
   18756 		v0.AddArg(v1)
   18757 		v.AddArg(v0)
   18758 		return true
   18759 	}
   18760 }
   18761 func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool {
   18762 	b := v.Block
   18763 	_ = b
   18764 	// match: (Rsh32x8  <t> x y)
   18765 	// cond:
   18766 	// result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
   18767 	for {
   18768 		t := v.Type
   18769 		x := v.Args[0]
   18770 		y := v.Args[1]
   18771 		v.reset(OpAMD64SARL)
   18772 		v.Type = t
   18773 		v.AddArg(x)
   18774 		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
   18775 		v0.AddArg(y)
   18776 		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
   18777 		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
   18778 		v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
   18779 		v3.AuxInt = 32
   18780 		v3.AddArg(y)
   18781 		v2.AddArg(v3)
   18782 		v1.AddArg(v2)
   18783 		v0.AddArg(v1)
   18784 		v.AddArg(v0)
   18785 		return true
   18786 	}
   18787 }
   18788 func rewriteValueAMD64_OpRsh64Ux16(v *Value, config *Config) bool {
   18789 	b := v.Block
   18790 	_ = b
   18791 	// match: (Rsh64Ux16 <t> x y)
   18792 	// cond:
   18793 	// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
   18794 	for {
   18795 		t := v.Type
   18796 		x := v.Args[0]
   18797 		y := v.Args[1]
   18798 		v.reset(OpAMD64ANDQ)
   18799 		v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
   18800 		v0.AddArg(x)
   18801 		v0.AddArg(y)
   18802 		v.AddArg(v0)
   18803 		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
   18804 		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
   18805 		v2.AuxInt = 64
   18806 		v2.AddArg(y)
   18807 		v1.AddArg(v2)
   18808 		v.AddArg(v1)
   18809 		return true
   18810 	}
   18811 }
   18812 func rewriteValueAMD64_OpRsh64Ux32(v *Value, config *Config) bool {
   18813 	b := v.Block
   18814 	_ = b
   18815 	// match: (Rsh64Ux32 <t> x y)
   18816 	// cond:
   18817 	// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
   18818 	for {
   18819 		t := v.Type
   18820 		x := v.Args[0]
   18821 		y := v.Args[1]
   18822 		v.reset(OpAMD64ANDQ)
   18823 		v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
   18824 		v0.AddArg(x)
   18825 		v0.AddArg(y)
   18826 		v.AddArg(v0)
   18827 		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
   18828 		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
   18829 		v2.AuxInt = 64
   18830 		v2.AddArg(y)
   18831 		v1.AddArg(v2)
   18832 		v.AddArg(v1)
   18833 		return true
   18834 	}
   18835 }
   18836 func rewriteValueAMD64_OpRsh64Ux64(v *Value, config *Config) bool {
   18837 	b := v.Block
   18838 	_ = b
   18839 	// match: (Rsh64Ux64 <t> x y)
   18840 	// cond:
   18841 	// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
   18842 	for {
   18843 		t := v.Type
   18844 		x := v.Args[0]
   18845 		y := v.Args[1]
   18846 		v.reset(OpAMD64ANDQ)
   18847 		v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
   18848 		v0.AddArg(x)
   18849 		v0.AddArg(y)
   18850 		v.AddArg(v0)
   18851 		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
   18852 		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
   18853 		v2.AuxInt = 64
   18854 		v2.AddArg(y)
   18855 		v1.AddArg(v2)
   18856 		v.AddArg(v1)
   18857 		return true
   18858 	}
   18859 }
   18860 func rewriteValueAMD64_OpRsh64Ux8(v *Value, config *Config) bool {
   18861 	b := v.Block
   18862 	_ = b
   18863 	// match: (Rsh64Ux8  <t> x y)
   18864 	// cond:
   18865 	// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
   18866 	for {
   18867 		t := v.Type
   18868 		x := v.Args[0]
   18869 		y := v.Args[1]
   18870 		v.reset(OpAMD64ANDQ)
   18871 		v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
   18872 		v0.AddArg(x)
   18873 		v0.AddArg(y)
   18874 		v.AddArg(v0)
   18875 		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
   18876 		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
   18877 		v2.AuxInt = 64
   18878 		v2.AddArg(y)
   18879 		v1.AddArg(v2)
   18880 		v.AddArg(v1)
   18881 		return true
   18882 	}
   18883 }
   18884 func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool {
   18885 	b := v.Block
   18886 	_ = b
   18887 	// match: (Rsh64x16 <t> x y)
   18888 	// cond:
   18889 	// result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
   18890 	for {
   18891 		t := v.Type
   18892 		x := v.Args[0]
   18893 		y := v.Args[1]
   18894 		v.reset(OpAMD64SARQ)
   18895 		v.Type = t
   18896 		v.AddArg(x)
   18897 		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
   18898 		v0.AddArg(y)
   18899 		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
   18900 		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
   18901 		v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
   18902 		v3.AuxInt = 64
   18903 		v3.AddArg(y)
   18904 		v2.AddArg(v3)
   18905 		v1.AddArg(v2)
   18906 		v0.AddArg(v1)
   18907 		v.AddArg(v0)
   18908 		return true
   18909 	}
   18910 }
   18911 func rewriteValueAMD64_OpRsh64x32(v *Value, config *Config) bool {
   18912 	b := v.Block
   18913 	_ = b
   18914 	// match: (Rsh64x32 <t> x y)
   18915 	// cond:
   18916 	// result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
   18917 	for {
   18918 		t := v.Type
   18919 		x := v.Args[0]
   18920 		y := v.Args[1]
   18921 		v.reset(OpAMD64SARQ)
   18922 		v.Type = t
   18923 		v.AddArg(x)
   18924 		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
   18925 		v0.AddArg(y)
   18926 		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
   18927 		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
   18928 		v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
   18929 		v3.AuxInt = 64
   18930 		v3.AddArg(y)
   18931 		v2.AddArg(v3)
   18932 		v1.AddArg(v2)
   18933 		v0.AddArg(v1)
   18934 		v.AddArg(v0)
   18935 		return true
   18936 	}
   18937 }
   18938 func rewriteValueAMD64_OpRsh64x64(v *Value, config *Config) bool {
   18939 	b := v.Block
   18940 	_ = b
   18941 	// match: (Rsh64x64 <t> x y)
   18942 	// cond:
   18943 	// result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
   18944 	for {
   18945 		t := v.Type
   18946 		x := v.Args[0]
   18947 		y := v.Args[1]
   18948 		v.reset(OpAMD64SARQ)
   18949 		v.Type = t
   18950 		v.AddArg(x)
   18951 		v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
   18952 		v0.AddArg(y)
   18953 		v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
   18954 		v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
   18955 		v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
   18956 		v3.AuxInt = 64
   18957 		v3.AddArg(y)
   18958 		v2.AddArg(v3)
   18959 		v1.AddArg(v2)
   18960 		v0.AddArg(v1)
   18961 		v.AddArg(v0)
   18962 		return true
   18963 	}
   18964 }
   18965 func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool {
   18966 	b := v.Block
   18967 	_ = b
   18968 	// match: (Rsh64x8  <t> x y)
   18969 	// cond:
   18970 	// result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
   18971 	for {
   18972 		t := v.Type
   18973 		x := v.Args[0]
   18974 		y := v.Args[1]
   18975 		v.reset(OpAMD64SARQ)
   18976 		v.Type = t
   18977 		v.AddArg(x)
   18978 		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
   18979 		v0.AddArg(y)
   18980 		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
   18981 		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
   18982 		v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
   18983 		v3.AuxInt = 64
   18984 		v3.AddArg(y)
   18985 		v2.AddArg(v3)
   18986 		v1.AddArg(v2)
   18987 		v0.AddArg(v1)
   18988 		v.AddArg(v0)
   18989 		return true
   18990 	}
   18991 }
   18992 func rewriteValueAMD64_OpRsh8Ux16(v *Value, config *Config) bool {
   18993 	b := v.Block
   18994 	_ = b
   18995 	// match: (Rsh8Ux16 <t> x y)
   18996 	// cond:
   18997 	// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
   18998 	for {
   18999 		t := v.Type
   19000 		x := v.Args[0]
   19001 		y := v.Args[1]
   19002 		v.reset(OpAMD64ANDL)
   19003 		v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
   19004 		v0.AddArg(x)
   19005 		v0.AddArg(y)
   19006 		v.AddArg(v0)
   19007 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   19008 		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
   19009 		v2.AuxInt = 8
   19010 		v2.AddArg(y)
   19011 		v1.AddArg(v2)
   19012 		v.AddArg(v1)
   19013 		return true
   19014 	}
   19015 }
   19016 func rewriteValueAMD64_OpRsh8Ux32(v *Value, config *Config) bool {
   19017 	b := v.Block
   19018 	_ = b
   19019 	// match: (Rsh8Ux32 <t> x y)
   19020 	// cond:
   19021 	// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
   19022 	for {
   19023 		t := v.Type
   19024 		x := v.Args[0]
   19025 		y := v.Args[1]
   19026 		v.reset(OpAMD64ANDL)
   19027 		v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
   19028 		v0.AddArg(x)
   19029 		v0.AddArg(y)
   19030 		v.AddArg(v0)
   19031 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   19032 		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
   19033 		v2.AuxInt = 8
   19034 		v2.AddArg(y)
   19035 		v1.AddArg(v2)
   19036 		v.AddArg(v1)
   19037 		return true
   19038 	}
   19039 }
   19040 func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool {
   19041 	b := v.Block
   19042 	_ = b
   19043 	// match: (Rsh8Ux64 <t> x y)
   19044 	// cond:
   19045 	// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
   19046 	for {
   19047 		t := v.Type
   19048 		x := v.Args[0]
   19049 		y := v.Args[1]
   19050 		v.reset(OpAMD64ANDL)
   19051 		v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
   19052 		v0.AddArg(x)
   19053 		v0.AddArg(y)
   19054 		v.AddArg(v0)
   19055 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   19056 		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
   19057 		v2.AuxInt = 8
   19058 		v2.AddArg(y)
   19059 		v1.AddArg(v2)
   19060 		v.AddArg(v1)
   19061 		return true
   19062 	}
   19063 }
   19064 func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool {
   19065 	b := v.Block
   19066 	_ = b
   19067 	// match: (Rsh8Ux8  <t> x y)
   19068 	// cond:
   19069 	// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
   19070 	for {
   19071 		t := v.Type
   19072 		x := v.Args[0]
   19073 		y := v.Args[1]
   19074 		v.reset(OpAMD64ANDL)
   19075 		v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
   19076 		v0.AddArg(x)
   19077 		v0.AddArg(y)
   19078 		v.AddArg(v0)
   19079 		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
   19080 		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
   19081 		v2.AuxInt = 8
   19082 		v2.AddArg(y)
   19083 		v1.AddArg(v2)
   19084 		v.AddArg(v1)
   19085 		return true
   19086 	}
   19087 }
   19088 func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool {
   19089 	b := v.Block
   19090 	_ = b
   19091 	// match: (Rsh8x16 <t> x y)
   19092 	// cond:
   19093 	// result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
   19094 	for {
   19095 		t := v.Type
   19096 		x := v.Args[0]
   19097 		y := v.Args[1]
   19098 		v.reset(OpAMD64SARB)
   19099 		v.Type = t
   19100 		v.AddArg(x)
   19101 		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
   19102 		v0.AddArg(y)
   19103 		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
   19104 		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
   19105 		v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
   19106 		v3.AuxInt = 8
   19107 		v3.AddArg(y)
   19108 		v2.AddArg(v3)
   19109 		v1.AddArg(v2)
   19110 		v0.AddArg(v1)
   19111 		v.AddArg(v0)
   19112 		return true
   19113 	}
   19114 }
   19115 func rewriteValueAMD64_OpRsh8x32(v *Value, config *Config) bool {
   19116 	b := v.Block
   19117 	_ = b
   19118 	// match: (Rsh8x32 <t> x y)
   19119 	// cond:
   19120 	// result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
   19121 	for {
   19122 		t := v.Type
   19123 		x := v.Args[0]
   19124 		y := v.Args[1]
   19125 		v.reset(OpAMD64SARB)
   19126 		v.Type = t
   19127 		v.AddArg(x)
   19128 		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
   19129 		v0.AddArg(y)
   19130 		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
   19131 		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
   19132 		v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
   19133 		v3.AuxInt = 8
   19134 		v3.AddArg(y)
   19135 		v2.AddArg(v3)
   19136 		v1.AddArg(v2)
   19137 		v0.AddArg(v1)
   19138 		v.AddArg(v0)
   19139 		return true
   19140 	}
   19141 }
   19142 func rewriteValueAMD64_OpRsh8x64(v *Value, config *Config) bool {
   19143 	b := v.Block
   19144 	_ = b
   19145 	// match: (Rsh8x64 <t> x y)
   19146 	// cond:
   19147 	// result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
   19148 	for {
   19149 		t := v.Type
   19150 		x := v.Args[0]
   19151 		y := v.Args[1]
   19152 		v.reset(OpAMD64SARB)
   19153 		v.Type = t
   19154 		v.AddArg(x)
   19155 		v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
   19156 		v0.AddArg(y)
   19157 		v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
   19158 		v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
   19159 		v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
   19160 		v3.AuxInt = 8
   19161 		v3.AddArg(y)
   19162 		v2.AddArg(v3)
   19163 		v1.AddArg(v2)
   19164 		v0.AddArg(v1)
   19165 		v.AddArg(v0)
   19166 		return true
   19167 	}
   19168 }
   19169 func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool {
   19170 	b := v.Block
   19171 	_ = b
   19172 	// match: (Rsh8x8  <t> x y)
   19173 	// cond:
   19174 	// result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
   19175 	for {
   19176 		t := v.Type
   19177 		x := v.Args[0]
   19178 		y := v.Args[1]
   19179 		v.reset(OpAMD64SARB)
   19180 		v.Type = t
   19181 		v.AddArg(x)
   19182 		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
   19183 		v0.AddArg(y)
   19184 		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
   19185 		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
   19186 		v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
   19187 		v3.AuxInt = 8
   19188 		v3.AddArg(y)
   19189 		v2.AddArg(v3)
   19190 		v1.AddArg(v2)
   19191 		v0.AddArg(v1)
   19192 		v.AddArg(v0)
   19193 		return true
   19194 	}
   19195 }
   19196 func rewriteValueAMD64_OpSelect0(v *Value, config *Config) bool {
   19197 	b := v.Block
   19198 	_ = b
   19199 	// match: (Select0 <t> (AddTupleFirst32 tuple val))
   19200 	// cond:
   19201 	// result: (ADDL val (Select0 <t> tuple))
   19202 	for {
   19203 		t := v.Type
   19204 		v_0 := v.Args[0]
   19205 		if v_0.Op != OpAMD64AddTupleFirst32 {
   19206 			break
   19207 		}
   19208 		tuple := v_0.Args[0]
   19209 		val := v_0.Args[1]
   19210 		v.reset(OpAMD64ADDL)
   19211 		v.AddArg(val)
   19212 		v0 := b.NewValue0(v.Line, OpSelect0, t)
   19213 		v0.AddArg(tuple)
   19214 		v.AddArg(v0)
   19215 		return true
   19216 	}
   19217 	// match: (Select0 <t> (AddTupleFirst64 tuple val))
   19218 	// cond:
   19219 	// result: (ADDQ val (Select0 <t> tuple))
   19220 	for {
   19221 		t := v.Type
   19222 		v_0 := v.Args[0]
   19223 		if v_0.Op != OpAMD64AddTupleFirst64 {
   19224 			break
   19225 		}
   19226 		tuple := v_0.Args[0]
   19227 		val := v_0.Args[1]
   19228 		v.reset(OpAMD64ADDQ)
   19229 		v.AddArg(val)
   19230 		v0 := b.NewValue0(v.Line, OpSelect0, t)
   19231 		v0.AddArg(tuple)
   19232 		v.AddArg(v0)
   19233 		return true
   19234 	}
   19235 	return false
   19236 }
   19237 func rewriteValueAMD64_OpSelect1(v *Value, config *Config) bool {
   19238 	b := v.Block
   19239 	_ = b
   19240 	// match: (Select1     (AddTupleFirst32 tuple _  ))
   19241 	// cond:
   19242 	// result: (Select1 tuple)
   19243 	for {
   19244 		v_0 := v.Args[0]
   19245 		if v_0.Op != OpAMD64AddTupleFirst32 {
   19246 			break
   19247 		}
   19248 		tuple := v_0.Args[0]
   19249 		v.reset(OpSelect1)
   19250 		v.AddArg(tuple)
   19251 		return true
   19252 	}
   19253 	// match: (Select1     (AddTupleFirst64 tuple _  ))
   19254 	// cond:
   19255 	// result: (Select1 tuple)
   19256 	for {
   19257 		v_0 := v.Args[0]
   19258 		if v_0.Op != OpAMD64AddTupleFirst64 {
   19259 			break
   19260 		}
   19261 		tuple := v_0.Args[0]
   19262 		v.reset(OpSelect1)
   19263 		v.AddArg(tuple)
   19264 		return true
   19265 	}
   19266 	return false
   19267 }
   19268 func rewriteValueAMD64_OpSignExt16to32(v *Value, config *Config) bool {
   19269 	b := v.Block
   19270 	_ = b
   19271 	// match: (SignExt16to32 x)
   19272 	// cond:
   19273 	// result: (MOVWQSX x)
   19274 	for {
   19275 		x := v.Args[0]
   19276 		v.reset(OpAMD64MOVWQSX)
   19277 		v.AddArg(x)
   19278 		return true
   19279 	}
   19280 }
   19281 func rewriteValueAMD64_OpSignExt16to64(v *Value, config *Config) bool {
   19282 	b := v.Block
   19283 	_ = b
   19284 	// match: (SignExt16to64 x)
   19285 	// cond:
   19286 	// result: (MOVWQSX x)
   19287 	for {
   19288 		x := v.Args[0]
   19289 		v.reset(OpAMD64MOVWQSX)
   19290 		v.AddArg(x)
   19291 		return true
   19292 	}
   19293 }
   19294 func rewriteValueAMD64_OpSignExt32to64(v *Value, config *Config) bool {
   19295 	b := v.Block
   19296 	_ = b
   19297 	// match: (SignExt32to64 x)
   19298 	// cond:
   19299 	// result: (MOVLQSX x)
   19300 	for {
   19301 		x := v.Args[0]
   19302 		v.reset(OpAMD64MOVLQSX)
   19303 		v.AddArg(x)
   19304 		return true
   19305 	}
   19306 }
   19307 func rewriteValueAMD64_OpSignExt8to16(v *Value, config *Config) bool {
   19308 	b := v.Block
   19309 	_ = b
   19310 	// match: (SignExt8to16  x)
   19311 	// cond:
   19312 	// result: (MOVBQSX x)
   19313 	for {
   19314 		x := v.Args[0]
   19315 		v.reset(OpAMD64MOVBQSX)
   19316 		v.AddArg(x)
   19317 		return true
   19318 	}
   19319 }
   19320 func rewriteValueAMD64_OpSignExt8to32(v *Value, config *Config) bool {
   19321 	b := v.Block
   19322 	_ = b
   19323 	// match: (SignExt8to32  x)
   19324 	// cond:
   19325 	// result: (MOVBQSX x)
   19326 	for {
   19327 		x := v.Args[0]
   19328 		v.reset(OpAMD64MOVBQSX)
   19329 		v.AddArg(x)
   19330 		return true
   19331 	}
   19332 }
   19333 func rewriteValueAMD64_OpSignExt8to64(v *Value, config *Config) bool {
   19334 	b := v.Block
   19335 	_ = b
   19336 	// match: (SignExt8to64  x)
   19337 	// cond:
   19338 	// result: (MOVBQSX x)
   19339 	for {
   19340 		x := v.Args[0]
   19341 		v.reset(OpAMD64MOVBQSX)
   19342 		v.AddArg(x)
   19343 		return true
   19344 	}
   19345 }
   19346 func rewriteValueAMD64_OpSlicemask(v *Value, config *Config) bool {
   19347 	b := v.Block
   19348 	_ = b
   19349 	// match: (Slicemask <t> x)
   19350 	// cond:
   19351 	// result: (XORQconst [-1] (SARQconst <t> (SUBQconst <t> x [1]) [63]))
   19352 	for {
   19353 		t := v.Type
   19354 		x := v.Args[0]
   19355 		v.reset(OpAMD64XORQconst)
   19356 		v.AuxInt = -1
   19357 		v0 := b.NewValue0(v.Line, OpAMD64SARQconst, t)
   19358 		v0.AuxInt = 63
   19359 		v1 := b.NewValue0(v.Line, OpAMD64SUBQconst, t)
   19360 		v1.AuxInt = 1
   19361 		v1.AddArg(x)
   19362 		v0.AddArg(v1)
   19363 		v.AddArg(v0)
   19364 		return true
   19365 	}
   19366 }
   19367 func rewriteValueAMD64_OpSqrt(v *Value, config *Config) bool {
   19368 	b := v.Block
   19369 	_ = b
   19370 	// match: (Sqrt x)
   19371 	// cond:
   19372 	// result: (SQRTSD x)
   19373 	for {
   19374 		x := v.Args[0]
   19375 		v.reset(OpAMD64SQRTSD)
   19376 		v.AddArg(x)
   19377 		return true
   19378 	}
   19379 }
   19380 func rewriteValueAMD64_OpStaticCall(v *Value, config *Config) bool {
   19381 	b := v.Block
   19382 	_ = b
   19383 	// match: (StaticCall [argwid] {target} mem)
   19384 	// cond:
   19385 	// result: (CALLstatic [argwid] {target} mem)
   19386 	for {
   19387 		argwid := v.AuxInt
   19388 		target := v.Aux
   19389 		mem := v.Args[0]
   19390 		v.reset(OpAMD64CALLstatic)
   19391 		v.AuxInt = argwid
   19392 		v.Aux = target
   19393 		v.AddArg(mem)
   19394 		return true
   19395 	}
   19396 }
   19397 func rewriteValueAMD64_OpStore(v *Value, config *Config) bool {
   19398 	b := v.Block
   19399 	_ = b
   19400 	// match: (Store [8] ptr val mem)
   19401 	// cond: is64BitFloat(val.Type)
   19402 	// result: (MOVSDstore ptr val mem)
   19403 	for {
   19404 		if v.AuxInt != 8 {
   19405 			break
   19406 		}
   19407 		ptr := v.Args[0]
   19408 		val := v.Args[1]
   19409 		mem := v.Args[2]
   19410 		if !(is64BitFloat(val.Type)) {
   19411 			break
   19412 		}
   19413 		v.reset(OpAMD64MOVSDstore)
   19414 		v.AddArg(ptr)
   19415 		v.AddArg(val)
   19416 		v.AddArg(mem)
   19417 		return true
   19418 	}
   19419 	// match: (Store [4] ptr val mem)
   19420 	// cond: is32BitFloat(val.Type)
   19421 	// result: (MOVSSstore ptr val mem)
   19422 	for {
   19423 		if v.AuxInt != 4 {
   19424 			break
   19425 		}
   19426 		ptr := v.Args[0]
   19427 		val := v.Args[1]
   19428 		mem := v.Args[2]
   19429 		if !(is32BitFloat(val.Type)) {
   19430 			break
   19431 		}
   19432 		v.reset(OpAMD64MOVSSstore)
   19433 		v.AddArg(ptr)
   19434 		v.AddArg(val)
   19435 		v.AddArg(mem)
   19436 		return true
   19437 	}
   19438 	// match: (Store [8] ptr val mem)
   19439 	// cond:
   19440 	// result: (MOVQstore ptr val mem)
   19441 	for {
   19442 		if v.AuxInt != 8 {
   19443 			break
   19444 		}
   19445 		ptr := v.Args[0]
   19446 		val := v.Args[1]
   19447 		mem := v.Args[2]
   19448 		v.reset(OpAMD64MOVQstore)
   19449 		v.AddArg(ptr)
   19450 		v.AddArg(val)
   19451 		v.AddArg(mem)
   19452 		return true
   19453 	}
   19454 	// match: (Store [4] ptr val mem)
   19455 	// cond:
   19456 	// result: (MOVLstore ptr val mem)
   19457 	for {
   19458 		if v.AuxInt != 4 {
   19459 			break
   19460 		}
   19461 		ptr := v.Args[0]
   19462 		val := v.Args[1]
   19463 		mem := v.Args[2]
   19464 		v.reset(OpAMD64MOVLstore)
   19465 		v.AddArg(ptr)
   19466 		v.AddArg(val)
   19467 		v.AddArg(mem)
   19468 		return true
   19469 	}
   19470 	// match: (Store [2] ptr val mem)
   19471 	// cond:
   19472 	// result: (MOVWstore ptr val mem)
   19473 	for {
   19474 		if v.AuxInt != 2 {
   19475 			break
   19476 		}
   19477 		ptr := v.Args[0]
   19478 		val := v.Args[1]
   19479 		mem := v.Args[2]
   19480 		v.reset(OpAMD64MOVWstore)
   19481 		v.AddArg(ptr)
   19482 		v.AddArg(val)
   19483 		v.AddArg(mem)
   19484 		return true
   19485 	}
   19486 	// match: (Store [1] ptr val mem)
   19487 	// cond:
   19488 	// result: (MOVBstore ptr val mem)
   19489 	for {
   19490 		if v.AuxInt != 1 {
   19491 			break
   19492 		}
   19493 		ptr := v.Args[0]
   19494 		val := v.Args[1]
   19495 		mem := v.Args[2]
   19496 		v.reset(OpAMD64MOVBstore)
   19497 		v.AddArg(ptr)
   19498 		v.AddArg(val)
   19499 		v.AddArg(mem)
   19500 		return true
   19501 	}
   19502 	return false
   19503 }
   19504 func rewriteValueAMD64_OpSub16(v *Value, config *Config) bool {
   19505 	b := v.Block
   19506 	_ = b
   19507 	// match: (Sub16  x y)
   19508 	// cond:
   19509 	// result: (SUBL  x y)
   19510 	for {
   19511 		x := v.Args[0]
   19512 		y := v.Args[1]
   19513 		v.reset(OpAMD64SUBL)
   19514 		v.AddArg(x)
   19515 		v.AddArg(y)
   19516 		return true
   19517 	}
   19518 }
   19519 func rewriteValueAMD64_OpSub32(v *Value, config *Config) bool {
   19520 	b := v.Block
   19521 	_ = b
   19522 	// match: (Sub32  x y)
   19523 	// cond:
   19524 	// result: (SUBL  x y)
   19525 	for {
   19526 		x := v.Args[0]
   19527 		y := v.Args[1]
   19528 		v.reset(OpAMD64SUBL)
   19529 		v.AddArg(x)
   19530 		v.AddArg(y)
   19531 		return true
   19532 	}
   19533 }
   19534 func rewriteValueAMD64_OpSub32F(v *Value, config *Config) bool {
   19535 	b := v.Block
   19536 	_ = b
   19537 	// match: (Sub32F x y)
   19538 	// cond:
   19539 	// result: (SUBSS x y)
   19540 	for {
   19541 		x := v.Args[0]
   19542 		y := v.Args[1]
   19543 		v.reset(OpAMD64SUBSS)
   19544 		v.AddArg(x)
   19545 		v.AddArg(y)
   19546 		return true
   19547 	}
   19548 }
   19549 func rewriteValueAMD64_OpSub64(v *Value, config *Config) bool {
   19550 	b := v.Block
   19551 	_ = b
   19552 	// match: (Sub64  x y)
   19553 	// cond:
   19554 	// result: (SUBQ  x y)
   19555 	for {
   19556 		x := v.Args[0]
   19557 		y := v.Args[1]
   19558 		v.reset(OpAMD64SUBQ)
   19559 		v.AddArg(x)
   19560 		v.AddArg(y)
   19561 		return true
   19562 	}
   19563 }
   19564 func rewriteValueAMD64_OpSub64F(v *Value, config *Config) bool {
   19565 	b := v.Block
   19566 	_ = b
   19567 	// match: (Sub64F x y)
   19568 	// cond:
   19569 	// result: (SUBSD x y)
   19570 	for {
   19571 		x := v.Args[0]
   19572 		y := v.Args[1]
   19573 		v.reset(OpAMD64SUBSD)
   19574 		v.AddArg(x)
   19575 		v.AddArg(y)
   19576 		return true
   19577 	}
   19578 }
   19579 func rewriteValueAMD64_OpSub8(v *Value, config *Config) bool {
   19580 	b := v.Block
   19581 	_ = b
   19582 	// match: (Sub8   x y)
   19583 	// cond:
   19584 	// result: (SUBL  x y)
   19585 	for {
   19586 		x := v.Args[0]
   19587 		y := v.Args[1]
   19588 		v.reset(OpAMD64SUBL)
   19589 		v.AddArg(x)
   19590 		v.AddArg(y)
   19591 		return true
   19592 	}
   19593 }
   19594 func rewriteValueAMD64_OpSubPtr(v *Value, config *Config) bool {
   19595 	b := v.Block
   19596 	_ = b
   19597 	// match: (SubPtr x y)
   19598 	// cond: config.PtrSize == 8
   19599 	// result: (SUBQ x y)
   19600 	for {
   19601 		x := v.Args[0]
   19602 		y := v.Args[1]
   19603 		if !(config.PtrSize == 8) {
   19604 			break
   19605 		}
   19606 		v.reset(OpAMD64SUBQ)
   19607 		v.AddArg(x)
   19608 		v.AddArg(y)
   19609 		return true
   19610 	}
   19611 	// match: (SubPtr x y)
   19612 	// cond: config.PtrSize == 4
   19613 	// result: (SUBL x y)
   19614 	for {
   19615 		x := v.Args[0]
   19616 		y := v.Args[1]
   19617 		if !(config.PtrSize == 4) {
   19618 			break
   19619 		}
   19620 		v.reset(OpAMD64SUBL)
   19621 		v.AddArg(x)
   19622 		v.AddArg(y)
   19623 		return true
   19624 	}
   19625 	return false
   19626 }
   19627 func rewriteValueAMD64_OpTrunc16to8(v *Value, config *Config) bool {
   19628 	b := v.Block
   19629 	_ = b
   19630 	// match: (Trunc16to8  x)
   19631 	// cond:
   19632 	// result: x
   19633 	for {
   19634 		x := v.Args[0]
   19635 		v.reset(OpCopy)
   19636 		v.Type = x.Type
   19637 		v.AddArg(x)
   19638 		return true
   19639 	}
   19640 }
   19641 func rewriteValueAMD64_OpTrunc32to16(v *Value, config *Config) bool {
   19642 	b := v.Block
   19643 	_ = b
   19644 	// match: (Trunc32to16 x)
   19645 	// cond:
   19646 	// result: x
   19647 	for {
   19648 		x := v.Args[0]
   19649 		v.reset(OpCopy)
   19650 		v.Type = x.Type
   19651 		v.AddArg(x)
   19652 		return true
   19653 	}
   19654 }
   19655 func rewriteValueAMD64_OpTrunc32to8(v *Value, config *Config) bool {
   19656 	b := v.Block
   19657 	_ = b
   19658 	// match: (Trunc32to8  x)
   19659 	// cond:
   19660 	// result: x
   19661 	for {
   19662 		x := v.Args[0]
   19663 		v.reset(OpCopy)
   19664 		v.Type = x.Type
   19665 		v.AddArg(x)
   19666 		return true
   19667 	}
   19668 }
   19669 func rewriteValueAMD64_OpTrunc64to16(v *Value, config *Config) bool {
   19670 	b := v.Block
   19671 	_ = b
   19672 	// match: (Trunc64to16 x)
   19673 	// cond:
   19674 	// result: x
   19675 	for {
   19676 		x := v.Args[0]
   19677 		v.reset(OpCopy)
   19678 		v.Type = x.Type
   19679 		v.AddArg(x)
   19680 		return true
   19681 	}
   19682 }
   19683 func rewriteValueAMD64_OpTrunc64to32(v *Value, config *Config) bool {
   19684 	b := v.Block
   19685 	_ = b
   19686 	// match: (Trunc64to32 x)
   19687 	// cond:
   19688 	// result: x
   19689 	for {
   19690 		x := v.Args[0]
   19691 		v.reset(OpCopy)
   19692 		v.Type = x.Type
   19693 		v.AddArg(x)
   19694 		return true
   19695 	}
   19696 }
   19697 func rewriteValueAMD64_OpTrunc64to8(v *Value, config *Config) bool {
   19698 	b := v.Block
   19699 	_ = b
   19700 	// match: (Trunc64to8  x)
   19701 	// cond:
   19702 	// result: x
   19703 	for {
   19704 		x := v.Args[0]
   19705 		v.reset(OpCopy)
   19706 		v.Type = x.Type
   19707 		v.AddArg(x)
   19708 		return true
   19709 	}
   19710 }
   19711 func rewriteValueAMD64_OpXor16(v *Value, config *Config) bool {
   19712 	b := v.Block
   19713 	_ = b
   19714 	// match: (Xor16 x y)
   19715 	// cond:
   19716 	// result: (XORL x y)
   19717 	for {
   19718 		x := v.Args[0]
   19719 		y := v.Args[1]
   19720 		v.reset(OpAMD64XORL)
   19721 		v.AddArg(x)
   19722 		v.AddArg(y)
   19723 		return true
   19724 	}
   19725 }
   19726 func rewriteValueAMD64_OpXor32(v *Value, config *Config) bool {
   19727 	b := v.Block
   19728 	_ = b
   19729 	// match: (Xor32 x y)
   19730 	// cond:
   19731 	// result: (XORL x y)
   19732 	for {
   19733 		x := v.Args[0]
   19734 		y := v.Args[1]
   19735 		v.reset(OpAMD64XORL)
   19736 		v.AddArg(x)
   19737 		v.AddArg(y)
   19738 		return true
   19739 	}
   19740 }
   19741 func rewriteValueAMD64_OpXor64(v *Value, config *Config) bool {
   19742 	b := v.Block
   19743 	_ = b
   19744 	// match: (Xor64 x y)
   19745 	// cond:
   19746 	// result: (XORQ x y)
   19747 	for {
   19748 		x := v.Args[0]
   19749 		y := v.Args[1]
   19750 		v.reset(OpAMD64XORQ)
   19751 		v.AddArg(x)
   19752 		v.AddArg(y)
   19753 		return true
   19754 	}
   19755 }
   19756 func rewriteValueAMD64_OpXor8(v *Value, config *Config) bool {
   19757 	b := v.Block
   19758 	_ = b
   19759 	// match: (Xor8  x y)
   19760 	// cond:
   19761 	// result: (XORL x y)
   19762 	for {
   19763 		x := v.Args[0]
   19764 		y := v.Args[1]
   19765 		v.reset(OpAMD64XORL)
   19766 		v.AddArg(x)
   19767 		v.AddArg(y)
   19768 		return true
   19769 	}
   19770 }
   19771 func rewriteValueAMD64_OpZero(v *Value, config *Config) bool {
   19772 	b := v.Block
   19773 	_ = b
   19774 	// match: (Zero [s] _ mem)
   19775 	// cond: SizeAndAlign(s).Size() == 0
   19776 	// result: mem
   19777 	for {
   19778 		s := v.AuxInt
   19779 		mem := v.Args[1]
   19780 		if !(SizeAndAlign(s).Size() == 0) {
   19781 			break
   19782 		}
   19783 		v.reset(OpCopy)
   19784 		v.Type = mem.Type
   19785 		v.AddArg(mem)
   19786 		return true
   19787 	}
   19788 	// match: (Zero [s] destptr mem)
   19789 	// cond: SizeAndAlign(s).Size() == 1
   19790 	// result: (MOVBstoreconst [0] destptr mem)
   19791 	for {
   19792 		s := v.AuxInt
   19793 		destptr := v.Args[0]
   19794 		mem := v.Args[1]
   19795 		if !(SizeAndAlign(s).Size() == 1) {
   19796 			break
   19797 		}
   19798 		v.reset(OpAMD64MOVBstoreconst)
   19799 		v.AuxInt = 0
   19800 		v.AddArg(destptr)
   19801 		v.AddArg(mem)
   19802 		return true
   19803 	}
   19804 	// match: (Zero [s] destptr mem)
   19805 	// cond: SizeAndAlign(s).Size() == 2
   19806 	// result: (MOVWstoreconst [0] destptr mem)
   19807 	for {
   19808 		s := v.AuxInt
   19809 		destptr := v.Args[0]
   19810 		mem := v.Args[1]
   19811 		if !(SizeAndAlign(s).Size() == 2) {
   19812 			break
   19813 		}
   19814 		v.reset(OpAMD64MOVWstoreconst)
   19815 		v.AuxInt = 0
   19816 		v.AddArg(destptr)
   19817 		v.AddArg(mem)
   19818 		return true
   19819 	}
   19820 	// match: (Zero [s] destptr mem)
   19821 	// cond: SizeAndAlign(s).Size() == 4
   19822 	// result: (MOVLstoreconst [0] destptr mem)
   19823 	for {
   19824 		s := v.AuxInt
   19825 		destptr := v.Args[0]
   19826 		mem := v.Args[1]
   19827 		if !(SizeAndAlign(s).Size() == 4) {
   19828 			break
   19829 		}
   19830 		v.reset(OpAMD64MOVLstoreconst)
   19831 		v.AuxInt = 0
   19832 		v.AddArg(destptr)
   19833 		v.AddArg(mem)
   19834 		return true
   19835 	}
   19836 	// match: (Zero [s] destptr mem)
   19837 	// cond: SizeAndAlign(s).Size() == 8
   19838 	// result: (MOVQstoreconst [0] destptr mem)
   19839 	for {
   19840 		s := v.AuxInt
   19841 		destptr := v.Args[0]
   19842 		mem := v.Args[1]
   19843 		if !(SizeAndAlign(s).Size() == 8) {
   19844 			break
   19845 		}
   19846 		v.reset(OpAMD64MOVQstoreconst)
   19847 		v.AuxInt = 0
   19848 		v.AddArg(destptr)
   19849 		v.AddArg(mem)
   19850 		return true
   19851 	}
   19852 	// match: (Zero [s] destptr mem)
   19853 	// cond: SizeAndAlign(s).Size() == 3
   19854 	// result: (MOVBstoreconst [makeValAndOff(0,2)] destptr 		(MOVWstoreconst [0] destptr mem))
   19855 	for {
   19856 		s := v.AuxInt
   19857 		destptr := v.Args[0]
   19858 		mem := v.Args[1]
   19859 		if !(SizeAndAlign(s).Size() == 3) {
   19860 			break
   19861 		}
   19862 		v.reset(OpAMD64MOVBstoreconst)
   19863 		v.AuxInt = makeValAndOff(0, 2)
   19864 		v.AddArg(destptr)
   19865 		v0 := b.NewValue0(v.Line, OpAMD64MOVWstoreconst, TypeMem)
   19866 		v0.AuxInt = 0
   19867 		v0.AddArg(destptr)
   19868 		v0.AddArg(mem)
   19869 		v.AddArg(v0)
   19870 		return true
   19871 	}
   19872 	// match: (Zero [s] destptr mem)
   19873 	// cond: SizeAndAlign(s).Size() == 5
   19874 	// result: (MOVBstoreconst [makeValAndOff(0,4)] destptr 		(MOVLstoreconst [0] destptr mem))
   19875 	for {
   19876 		s := v.AuxInt
   19877 		destptr := v.Args[0]
   19878 		mem := v.Args[1]
   19879 		if !(SizeAndAlign(s).Size() == 5) {
   19880 			break
   19881 		}
   19882 		v.reset(OpAMD64MOVBstoreconst)
   19883 		v.AuxInt = makeValAndOff(0, 4)
   19884 		v.AddArg(destptr)
   19885 		v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem)
   19886 		v0.AuxInt = 0
   19887 		v0.AddArg(destptr)
   19888 		v0.AddArg(mem)
   19889 		v.AddArg(v0)
   19890 		return true
   19891 	}
   19892 	// match: (Zero [s] destptr mem)
   19893 	// cond: SizeAndAlign(s).Size() == 6
   19894 	// result: (MOVWstoreconst [makeValAndOff(0,4)] destptr 		(MOVLstoreconst [0] destptr mem))
   19895 	for {
   19896 		s := v.AuxInt
   19897 		destptr := v.Args[0]
   19898 		mem := v.Args[1]
   19899 		if !(SizeAndAlign(s).Size() == 6) {
   19900 			break
   19901 		}
   19902 		v.reset(OpAMD64MOVWstoreconst)
   19903 		v.AuxInt = makeValAndOff(0, 4)
   19904 		v.AddArg(destptr)
   19905 		v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem)
   19906 		v0.AuxInt = 0
   19907 		v0.AddArg(destptr)
   19908 		v0.AddArg(mem)
   19909 		v.AddArg(v0)
   19910 		return true
   19911 	}
   19912 	// match: (Zero [s] destptr mem)
   19913 	// cond: SizeAndAlign(s).Size() == 7
   19914 	// result: (MOVLstoreconst [makeValAndOff(0,3)] destptr 		(MOVLstoreconst [0] destptr mem))
   19915 	for {
   19916 		s := v.AuxInt
   19917 		destptr := v.Args[0]
   19918 		mem := v.Args[1]
   19919 		if !(SizeAndAlign(s).Size() == 7) {
   19920 			break
   19921 		}
   19922 		v.reset(OpAMD64MOVLstoreconst)
   19923 		v.AuxInt = makeValAndOff(0, 3)
   19924 		v.AddArg(destptr)
   19925 		v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem)
   19926 		v0.AuxInt = 0
   19927 		v0.AddArg(destptr)
   19928 		v0.AddArg(mem)
   19929 		v.AddArg(v0)
   19930 		return true
   19931 	}
   19932 	// match: (Zero [s] destptr mem)
   19933 	// cond: SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8
   19934 	// result: (Zero [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8] (OffPtr <destptr.Type> destptr [SizeAndAlign(s).Size()%8]) 		(MOVQstoreconst [0] destptr mem))
   19935 	for {
   19936 		s := v.AuxInt
   19937 		destptr := v.Args[0]
   19938 		mem := v.Args[1]
   19939 		if !(SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8) {
   19940 			break
   19941 		}
   19942 		v.reset(OpZero)
   19943 		v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%8
   19944 		v0 := b.NewValue0(v.Line, OpOffPtr, destptr.Type)
   19945 		v0.AuxInt = SizeAndAlign(s).Size() % 8
   19946 		v0.AddArg(destptr)
   19947 		v.AddArg(v0)
   19948 		v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
   19949 		v1.AuxInt = 0
   19950 		v1.AddArg(destptr)
   19951 		v1.AddArg(mem)
   19952 		v.AddArg(v1)
   19953 		return true
   19954 	}
   19955 	// match: (Zero [s] destptr mem)
   19956 	// cond: SizeAndAlign(s).Size() == 16
   19957 	// result: (MOVQstoreconst [makeValAndOff(0,8)] destptr 		(MOVQstoreconst [0] destptr mem))
   19958 	for {
   19959 		s := v.AuxInt
   19960 		destptr := v.Args[0]
   19961 		mem := v.Args[1]
   19962 		if !(SizeAndAlign(s).Size() == 16) {
   19963 			break
   19964 		}
   19965 		v.reset(OpAMD64MOVQstoreconst)
   19966 		v.AuxInt = makeValAndOff(0, 8)
   19967 		v.AddArg(destptr)
   19968 		v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
   19969 		v0.AuxInt = 0
   19970 		v0.AddArg(destptr)
   19971 		v0.AddArg(mem)
   19972 		v.AddArg(v0)
   19973 		return true
   19974 	}
   19975 	// match: (Zero [s] destptr mem)
   19976 	// cond: SizeAndAlign(s).Size() == 24
   19977 	// result: (MOVQstoreconst [makeValAndOff(0,16)] destptr 		(MOVQstoreconst [makeValAndOff(0,8)] destptr 			(MOVQstoreconst [0] destptr mem)))
   19978 	for {
   19979 		s := v.AuxInt
   19980 		destptr := v.Args[0]
   19981 		mem := v.Args[1]
   19982 		if !(SizeAndAlign(s).Size() == 24) {
   19983 			break
   19984 		}
   19985 		v.reset(OpAMD64MOVQstoreconst)
   19986 		v.AuxInt = makeValAndOff(0, 16)
   19987 		v.AddArg(destptr)
   19988 		v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
   19989 		v0.AuxInt = makeValAndOff(0, 8)
   19990 		v0.AddArg(destptr)
   19991 		v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
   19992 		v1.AuxInt = 0
   19993 		v1.AddArg(destptr)
   19994 		v1.AddArg(mem)
   19995 		v0.AddArg(v1)
   19996 		v.AddArg(v0)
   19997 		return true
   19998 	}
   19999 	// match: (Zero [s] destptr mem)
   20000 	// cond: SizeAndAlign(s).Size() == 32
   20001 	// result: (MOVQstoreconst [makeValAndOff(0,24)] destptr 		(MOVQstoreconst [makeValAndOff(0,16)] destptr 			(MOVQstoreconst [makeValAndOff(0,8)] destptr 				(MOVQstoreconst [0] destptr mem))))
   20002 	for {
   20003 		s := v.AuxInt
   20004 		destptr := v.Args[0]
   20005 		mem := v.Args[1]
   20006 		if !(SizeAndAlign(s).Size() == 32) {
   20007 			break
   20008 		}
   20009 		v.reset(OpAMD64MOVQstoreconst)
   20010 		v.AuxInt = makeValAndOff(0, 24)
   20011 		v.AddArg(destptr)
   20012 		v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
   20013 		v0.AuxInt = makeValAndOff(0, 16)
   20014 		v0.AddArg(destptr)
   20015 		v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
   20016 		v1.AuxInt = makeValAndOff(0, 8)
   20017 		v1.AddArg(destptr)
   20018 		v2 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
   20019 		v2.AuxInt = 0
   20020 		v2.AddArg(destptr)
   20021 		v2.AddArg(mem)
   20022 		v1.AddArg(v2)
   20023 		v0.AddArg(v1)
   20024 		v.AddArg(v0)
   20025 		return true
   20026 	}
   20027 	// match: (Zero [s] destptr mem)
   20028 	// cond: SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0 	&& !config.noDuffDevice
   20029 	// result: (Zero [SizeAndAlign(s).Size()-8] (OffPtr <destptr.Type> [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
   20030 	for {
   20031 		s := v.AuxInt
   20032 		destptr := v.Args[0]
   20033 		mem := v.Args[1]
   20034 		if !(SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0 && !config.noDuffDevice) {
   20035 			break
   20036 		}
   20037 		v.reset(OpZero)
   20038 		v.AuxInt = SizeAndAlign(s).Size() - 8
   20039 		v0 := b.NewValue0(v.Line, OpOffPtr, destptr.Type)
   20040 		v0.AuxInt = 8
   20041 		v0.AddArg(destptr)
   20042 		v.AddArg(v0)
   20043 		v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
   20044 		v1.AddArg(destptr)
   20045 		v2 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
   20046 		v2.AuxInt = 0
   20047 		v1.AddArg(v2)
   20048 		v1.AddArg(mem)
   20049 		v.AddArg(v1)
   20050 		return true
   20051 	}
   20052 	// match: (Zero [s] destptr mem)
   20053 	// cond: SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice
   20054 	// result: (DUFFZERO [SizeAndAlign(s).Size()] destptr (MOVOconst [0]) mem)
   20055 	for {
   20056 		s := v.AuxInt
   20057 		destptr := v.Args[0]
   20058 		mem := v.Args[1]
   20059 		if !(SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) {
   20060 			break
   20061 		}
   20062 		v.reset(OpAMD64DUFFZERO)
   20063 		v.AuxInt = SizeAndAlign(s).Size()
   20064 		v.AddArg(destptr)
   20065 		v0 := b.NewValue0(v.Line, OpAMD64MOVOconst, TypeInt128)
   20066 		v0.AuxInt = 0
   20067 		v.AddArg(v0)
   20068 		v.AddArg(mem)
   20069 		return true
   20070 	}
   20071 	// match: (Zero [s] destptr mem)
   20072 	// cond: (SizeAndAlign(s).Size() > 1024 || (config.noDuffDevice && SizeAndAlign(s).Size() > 32)) 	&& SizeAndAlign(s).Size()%8 == 0
   20073 	// result: (REPSTOSQ destptr (MOVQconst [SizeAndAlign(s).Size()/8]) (MOVQconst [0]) mem)
   20074 	for {
   20075 		s := v.AuxInt
   20076 		destptr := v.Args[0]
   20077 		mem := v.Args[1]
   20078 		if !((SizeAndAlign(s).Size() > 1024 || (config.noDuffDevice && SizeAndAlign(s).Size() > 32)) && SizeAndAlign(s).Size()%8 == 0) {
   20079 			break
   20080 		}
   20081 		v.reset(OpAMD64REPSTOSQ)
   20082 		v.AddArg(destptr)
   20083 		v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
   20084 		v0.AuxInt = SizeAndAlign(s).Size() / 8
   20085 		v.AddArg(v0)
   20086 		v1 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
   20087 		v1.AuxInt = 0
   20088 		v.AddArg(v1)
   20089 		v.AddArg(mem)
   20090 		return true
   20091 	}
   20092 	return false
   20093 }
   20094 func rewriteValueAMD64_OpZeroExt16to32(v *Value, config *Config) bool {
   20095 	b := v.Block
   20096 	_ = b
   20097 	// match: (ZeroExt16to32 x)
   20098 	// cond:
   20099 	// result: (MOVWQZX x)
   20100 	for {
   20101 		x := v.Args[0]
   20102 		v.reset(OpAMD64MOVWQZX)
   20103 		v.AddArg(x)
   20104 		return true
   20105 	}
   20106 }
   20107 func rewriteValueAMD64_OpZeroExt16to64(v *Value, config *Config) bool {
   20108 	b := v.Block
   20109 	_ = b
   20110 	// match: (ZeroExt16to64 x)
   20111 	// cond:
   20112 	// result: (MOVWQZX x)
   20113 	for {
   20114 		x := v.Args[0]
   20115 		v.reset(OpAMD64MOVWQZX)
   20116 		v.AddArg(x)
   20117 		return true
   20118 	}
   20119 }
   20120 func rewriteValueAMD64_OpZeroExt32to64(v *Value, config *Config) bool {
   20121 	b := v.Block
   20122 	_ = b
   20123 	// match: (ZeroExt32to64 x)
   20124 	// cond:
   20125 	// result: (MOVLQZX x)
   20126 	for {
   20127 		x := v.Args[0]
   20128 		v.reset(OpAMD64MOVLQZX)
   20129 		v.AddArg(x)
   20130 		return true
   20131 	}
   20132 }
   20133 func rewriteValueAMD64_OpZeroExt8to16(v *Value, config *Config) bool {
   20134 	b := v.Block
   20135 	_ = b
   20136 	// match: (ZeroExt8to16  x)
   20137 	// cond:
   20138 	// result: (MOVBQZX x)
   20139 	for {
   20140 		x := v.Args[0]
   20141 		v.reset(OpAMD64MOVBQZX)
   20142 		v.AddArg(x)
   20143 		return true
   20144 	}
   20145 }
   20146 func rewriteValueAMD64_OpZeroExt8to32(v *Value, config *Config) bool {
   20147 	b := v.Block
   20148 	_ = b
   20149 	// match: (ZeroExt8to32  x)
   20150 	// cond:
   20151 	// result: (MOVBQZX x)
   20152 	for {
   20153 		x := v.Args[0]
   20154 		v.reset(OpAMD64MOVBQZX)
   20155 		v.AddArg(x)
   20156 		return true
   20157 	}
   20158 }
   20159 func rewriteValueAMD64_OpZeroExt8to64(v *Value, config *Config) bool {
   20160 	b := v.Block
   20161 	_ = b
   20162 	// match: (ZeroExt8to64  x)
   20163 	// cond:
   20164 	// result: (MOVBQZX x)
   20165 	for {
   20166 		x := v.Args[0]
   20167 		v.reset(OpAMD64MOVBQZX)
   20168 		v.AddArg(x)
   20169 		return true
   20170 	}
   20171 }
   20172 func rewriteBlockAMD64(b *Block, config *Config) bool {
   20173 	switch b.Kind {
   20174 	case BlockAMD64EQ:
   20175 		// match: (EQ (InvertFlags cmp) yes no)
   20176 		// cond:
   20177 		// result: (EQ cmp yes no)
   20178 		for {
   20179 			v := b.Control
   20180 			if v.Op != OpAMD64InvertFlags {
   20181 				break
   20182 			}
   20183 			cmp := v.Args[0]
   20184 			yes := b.Succs[0]
   20185 			no := b.Succs[1]
   20186 			b.Kind = BlockAMD64EQ
   20187 			b.SetControl(cmp)
   20188 			_ = yes
   20189 			_ = no
   20190 			return true
   20191 		}
   20192 		// match: (EQ (FlagEQ) yes no)
   20193 		// cond:
   20194 		// result: (First nil yes no)
   20195 		for {
   20196 			v := b.Control
   20197 			if v.Op != OpAMD64FlagEQ {
   20198 				break
   20199 			}
   20200 			yes := b.Succs[0]
   20201 			no := b.Succs[1]
   20202 			b.Kind = BlockFirst
   20203 			b.SetControl(nil)
   20204 			_ = yes
   20205 			_ = no
   20206 			return true
   20207 		}
   20208 		// match: (EQ (FlagLT_ULT) yes no)
   20209 		// cond:
   20210 		// result: (First nil no yes)
   20211 		for {
   20212 			v := b.Control
   20213 			if v.Op != OpAMD64FlagLT_ULT {
   20214 				break
   20215 			}
   20216 			yes := b.Succs[0]
   20217 			no := b.Succs[1]
   20218 			b.Kind = BlockFirst
   20219 			b.SetControl(nil)
   20220 			b.swapSuccessors()
   20221 			_ = no
   20222 			_ = yes
   20223 			return true
   20224 		}
   20225 		// match: (EQ (FlagLT_UGT) yes no)
   20226 		// cond:
   20227 		// result: (First nil no yes)
   20228 		for {
   20229 			v := b.Control
   20230 			if v.Op != OpAMD64FlagLT_UGT {
   20231 				break
   20232 			}
   20233 			yes := b.Succs[0]
   20234 			no := b.Succs[1]
   20235 			b.Kind = BlockFirst
   20236 			b.SetControl(nil)
   20237 			b.swapSuccessors()
   20238 			_ = no
   20239 			_ = yes
   20240 			return true
   20241 		}
   20242 		// match: (EQ (FlagGT_ULT) yes no)
   20243 		// cond:
   20244 		// result: (First nil no yes)
   20245 		for {
   20246 			v := b.Control
   20247 			if v.Op != OpAMD64FlagGT_ULT {
   20248 				break
   20249 			}
   20250 			yes := b.Succs[0]
   20251 			no := b.Succs[1]
   20252 			b.Kind = BlockFirst
   20253 			b.SetControl(nil)
   20254 			b.swapSuccessors()
   20255 			_ = no
   20256 			_ = yes
   20257 			return true
   20258 		}
   20259 		// match: (EQ (FlagGT_UGT) yes no)
   20260 		// cond:
   20261 		// result: (First nil no yes)
   20262 		for {
   20263 			v := b.Control
   20264 			if v.Op != OpAMD64FlagGT_UGT {
   20265 				break
   20266 			}
   20267 			yes := b.Succs[0]
   20268 			no := b.Succs[1]
   20269 			b.Kind = BlockFirst
   20270 			b.SetControl(nil)
   20271 			b.swapSuccessors()
   20272 			_ = no
   20273 			_ = yes
   20274 			return true
   20275 		}
   20276 	case BlockAMD64GE:
   20277 		// match: (GE (InvertFlags cmp) yes no)
   20278 		// cond:
   20279 		// result: (LE cmp yes no)
   20280 		for {
   20281 			v := b.Control
   20282 			if v.Op != OpAMD64InvertFlags {
   20283 				break
   20284 			}
   20285 			cmp := v.Args[0]
   20286 			yes := b.Succs[0]
   20287 			no := b.Succs[1]
   20288 			b.Kind = BlockAMD64LE
   20289 			b.SetControl(cmp)
   20290 			_ = yes
   20291 			_ = no
   20292 			return true
   20293 		}
   20294 		// match: (GE (FlagEQ) yes no)
   20295 		// cond:
   20296 		// result: (First nil yes no)
   20297 		for {
   20298 			v := b.Control
   20299 			if v.Op != OpAMD64FlagEQ {
   20300 				break
   20301 			}
   20302 			yes := b.Succs[0]
   20303 			no := b.Succs[1]
   20304 			b.Kind = BlockFirst
   20305 			b.SetControl(nil)
   20306 			_ = yes
   20307 			_ = no
   20308 			return true
   20309 		}
   20310 		// match: (GE (FlagLT_ULT) yes no)
   20311 		// cond:
   20312 		// result: (First nil no yes)
   20313 		for {
   20314 			v := b.Control
   20315 			if v.Op != OpAMD64FlagLT_ULT {
   20316 				break
   20317 			}
   20318 			yes := b.Succs[0]
   20319 			no := b.Succs[1]
   20320 			b.Kind = BlockFirst
   20321 			b.SetControl(nil)
   20322 			b.swapSuccessors()
   20323 			_ = no
   20324 			_ = yes
   20325 			return true
   20326 		}
   20327 		// match: (GE (FlagLT_UGT) yes no)
   20328 		// cond:
   20329 		// result: (First nil no yes)
   20330 		for {
   20331 			v := b.Control
   20332 			if v.Op != OpAMD64FlagLT_UGT {
   20333 				break
   20334 			}
   20335 			yes := b.Succs[0]
   20336 			no := b.Succs[1]
   20337 			b.Kind = BlockFirst
   20338 			b.SetControl(nil)
   20339 			b.swapSuccessors()
   20340 			_ = no
   20341 			_ = yes
   20342 			return true
   20343 		}
   20344 		// match: (GE (FlagGT_ULT) yes no)
   20345 		// cond:
   20346 		// result: (First nil yes no)
   20347 		for {
   20348 			v := b.Control
   20349 			if v.Op != OpAMD64FlagGT_ULT {
   20350 				break
   20351 			}
   20352 			yes := b.Succs[0]
   20353 			no := b.Succs[1]
   20354 			b.Kind = BlockFirst
   20355 			b.SetControl(nil)
   20356 			_ = yes
   20357 			_ = no
   20358 			return true
   20359 		}
   20360 		// match: (GE (FlagGT_UGT) yes no)
   20361 		// cond:
   20362 		// result: (First nil yes no)
   20363 		for {
   20364 			v := b.Control
   20365 			if v.Op != OpAMD64FlagGT_UGT {
   20366 				break
   20367 			}
   20368 			yes := b.Succs[0]
   20369 			no := b.Succs[1]
   20370 			b.Kind = BlockFirst
   20371 			b.SetControl(nil)
   20372 			_ = yes
   20373 			_ = no
   20374 			return true
   20375 		}
   20376 	case BlockAMD64GT:
   20377 		// match: (GT (InvertFlags cmp) yes no)
   20378 		// cond:
   20379 		// result: (LT cmp yes no)
   20380 		for {
   20381 			v := b.Control
   20382 			if v.Op != OpAMD64InvertFlags {
   20383 				break
   20384 			}
   20385 			cmp := v.Args[0]
   20386 			yes := b.Succs[0]
   20387 			no := b.Succs[1]
   20388 			b.Kind = BlockAMD64LT
   20389 			b.SetControl(cmp)
   20390 			_ = yes
   20391 			_ = no
   20392 			return true
   20393 		}
   20394 		// match: (GT (FlagEQ) yes no)
   20395 		// cond:
   20396 		// result: (First nil no yes)
   20397 		for {
   20398 			v := b.Control
   20399 			if v.Op != OpAMD64FlagEQ {
   20400 				break
   20401 			}
   20402 			yes := b.Succs[0]
   20403 			no := b.Succs[1]
   20404 			b.Kind = BlockFirst
   20405 			b.SetControl(nil)
   20406 			b.swapSuccessors()
   20407 			_ = no
   20408 			_ = yes
   20409 			return true
   20410 		}
   20411 		// match: (GT (FlagLT_ULT) yes no)
   20412 		// cond:
   20413 		// result: (First nil no yes)
   20414 		for {
   20415 			v := b.Control
   20416 			if v.Op != OpAMD64FlagLT_ULT {
   20417 				break
   20418 			}
   20419 			yes := b.Succs[0]
   20420 			no := b.Succs[1]
   20421 			b.Kind = BlockFirst
   20422 			b.SetControl(nil)
   20423 			b.swapSuccessors()
   20424 			_ = no
   20425 			_ = yes
   20426 			return true
   20427 		}
   20428 		// match: (GT (FlagLT_UGT) yes no)
   20429 		// cond:
   20430 		// result: (First nil no yes)
   20431 		for {
   20432 			v := b.Control
   20433 			if v.Op != OpAMD64FlagLT_UGT {
   20434 				break
   20435 			}
   20436 			yes := b.Succs[0]
   20437 			no := b.Succs[1]
   20438 			b.Kind = BlockFirst
   20439 			b.SetControl(nil)
   20440 			b.swapSuccessors()
   20441 			_ = no
   20442 			_ = yes
   20443 			return true
   20444 		}
   20445 		// match: (GT (FlagGT_ULT) yes no)
   20446 		// cond:
   20447 		// result: (First nil yes no)
   20448 		for {
   20449 			v := b.Control
   20450 			if v.Op != OpAMD64FlagGT_ULT {
   20451 				break
   20452 			}
   20453 			yes := b.Succs[0]
   20454 			no := b.Succs[1]
   20455 			b.Kind = BlockFirst
   20456 			b.SetControl(nil)
   20457 			_ = yes
   20458 			_ = no
   20459 			return true
   20460 		}
   20461 		// match: (GT (FlagGT_UGT) yes no)
   20462 		// cond:
   20463 		// result: (First nil yes no)
   20464 		for {
   20465 			v := b.Control
   20466 			if v.Op != OpAMD64FlagGT_UGT {
   20467 				break
   20468 			}
   20469 			yes := b.Succs[0]
   20470 			no := b.Succs[1]
   20471 			b.Kind = BlockFirst
   20472 			b.SetControl(nil)
   20473 			_ = yes
   20474 			_ = no
   20475 			return true
   20476 		}
   20477 	case BlockIf:
   20478 		// match: (If (SETL  cmp) yes no)
   20479 		// cond:
   20480 		// result: (LT  cmp yes no)
   20481 		for {
   20482 			v := b.Control
   20483 			if v.Op != OpAMD64SETL {
   20484 				break
   20485 			}
   20486 			cmp := v.Args[0]
   20487 			yes := b.Succs[0]
   20488 			no := b.Succs[1]
   20489 			b.Kind = BlockAMD64LT
   20490 			b.SetControl(cmp)
   20491 			_ = yes
   20492 			_ = no
   20493 			return true
   20494 		}
   20495 		// match: (If (SETLE cmp) yes no)
   20496 		// cond:
   20497 		// result: (LE  cmp yes no)
   20498 		for {
   20499 			v := b.Control
   20500 			if v.Op != OpAMD64SETLE {
   20501 				break
   20502 			}
   20503 			cmp := v.Args[0]
   20504 			yes := b.Succs[0]
   20505 			no := b.Succs[1]
   20506 			b.Kind = BlockAMD64LE
   20507 			b.SetControl(cmp)
   20508 			_ = yes
   20509 			_ = no
   20510 			return true
   20511 		}
   20512 		// match: (If (SETG  cmp) yes no)
   20513 		// cond:
   20514 		// result: (GT  cmp yes no)
   20515 		for {
   20516 			v := b.Control
   20517 			if v.Op != OpAMD64SETG {
   20518 				break
   20519 			}
   20520 			cmp := v.Args[0]
   20521 			yes := b.Succs[0]
   20522 			no := b.Succs[1]
   20523 			b.Kind = BlockAMD64GT
   20524 			b.SetControl(cmp)
   20525 			_ = yes
   20526 			_ = no
   20527 			return true
   20528 		}
   20529 		// match: (If (SETGE cmp) yes no)
   20530 		// cond:
   20531 		// result: (GE  cmp yes no)
   20532 		for {
   20533 			v := b.Control
   20534 			if v.Op != OpAMD64SETGE {
   20535 				break
   20536 			}
   20537 			cmp := v.Args[0]
   20538 			yes := b.Succs[0]
   20539 			no := b.Succs[1]
   20540 			b.Kind = BlockAMD64GE
   20541 			b.SetControl(cmp)
   20542 			_ = yes
   20543 			_ = no
   20544 			return true
   20545 		}
   20546 		// match: (If (SETEQ cmp) yes no)
   20547 		// cond:
   20548 		// result: (EQ  cmp yes no)
   20549 		for {
   20550 			v := b.Control
   20551 			if v.Op != OpAMD64SETEQ {
   20552 				break
   20553 			}
   20554 			cmp := v.Args[0]
   20555 			yes := b.Succs[0]
   20556 			no := b.Succs[1]
   20557 			b.Kind = BlockAMD64EQ
   20558 			b.SetControl(cmp)
   20559 			_ = yes
   20560 			_ = no
   20561 			return true
   20562 		}
   20563 		// match: (If (SETNE cmp) yes no)
   20564 		// cond:
   20565 		// result: (NE  cmp yes no)
   20566 		for {
   20567 			v := b.Control
   20568 			if v.Op != OpAMD64SETNE {
   20569 				break
   20570 			}
   20571 			cmp := v.Args[0]
   20572 			yes := b.Succs[0]
   20573 			no := b.Succs[1]
   20574 			b.Kind = BlockAMD64NE
   20575 			b.SetControl(cmp)
   20576 			_ = yes
   20577 			_ = no
   20578 			return true
   20579 		}
   20580 		// match: (If (SETB  cmp) yes no)
   20581 		// cond:
   20582 		// result: (ULT cmp yes no)
   20583 		for {
   20584 			v := b.Control
   20585 			if v.Op != OpAMD64SETB {
   20586 				break
   20587 			}
   20588 			cmp := v.Args[0]
   20589 			yes := b.Succs[0]
   20590 			no := b.Succs[1]
   20591 			b.Kind = BlockAMD64ULT
   20592 			b.SetControl(cmp)
   20593 			_ = yes
   20594 			_ = no
   20595 			return true
   20596 		}
   20597 		// match: (If (SETBE cmp) yes no)
   20598 		// cond:
   20599 		// result: (ULE cmp yes no)
   20600 		for {
   20601 			v := b.Control
   20602 			if v.Op != OpAMD64SETBE {
   20603 				break
   20604 			}
   20605 			cmp := v.Args[0]
   20606 			yes := b.Succs[0]
   20607 			no := b.Succs[1]
   20608 			b.Kind = BlockAMD64ULE
   20609 			b.SetControl(cmp)
   20610 			_ = yes
   20611 			_ = no
   20612 			return true
   20613 		}
   20614 		// match: (If (SETA  cmp) yes no)
   20615 		// cond:
   20616 		// result: (UGT cmp yes no)
   20617 		for {
   20618 			v := b.Control
   20619 			if v.Op != OpAMD64SETA {
   20620 				break
   20621 			}
   20622 			cmp := v.Args[0]
   20623 			yes := b.Succs[0]
   20624 			no := b.Succs[1]
   20625 			b.Kind = BlockAMD64UGT
   20626 			b.SetControl(cmp)
   20627 			_ = yes
   20628 			_ = no
   20629 			return true
   20630 		}
   20631 		// match: (If (SETAE cmp) yes no)
   20632 		// cond:
   20633 		// result: (UGE cmp yes no)
   20634 		for {
   20635 			v := b.Control
   20636 			if v.Op != OpAMD64SETAE {
   20637 				break
   20638 			}
   20639 			cmp := v.Args[0]
   20640 			yes := b.Succs[0]
   20641 			no := b.Succs[1]
   20642 			b.Kind = BlockAMD64UGE
   20643 			b.SetControl(cmp)
   20644 			_ = yes
   20645 			_ = no
   20646 			return true
   20647 		}
   20648 		// match: (If (SETGF  cmp) yes no)
   20649 		// cond:
   20650 		// result: (UGT  cmp yes no)
   20651 		for {
   20652 			v := b.Control
   20653 			if v.Op != OpAMD64SETGF {
   20654 				break
   20655 			}
   20656 			cmp := v.Args[0]
   20657 			yes := b.Succs[0]
   20658 			no := b.Succs[1]
   20659 			b.Kind = BlockAMD64UGT
   20660 			b.SetControl(cmp)
   20661 			_ = yes
   20662 			_ = no
   20663 			return true
   20664 		}
   20665 		// match: (If (SETGEF cmp) yes no)
   20666 		// cond:
   20667 		// result: (UGE  cmp yes no)
   20668 		for {
   20669 			v := b.Control
   20670 			if v.Op != OpAMD64SETGEF {
   20671 				break
   20672 			}
   20673 			cmp := v.Args[0]
   20674 			yes := b.Succs[0]
   20675 			no := b.Succs[1]
   20676 			b.Kind = BlockAMD64UGE
   20677 			b.SetControl(cmp)
   20678 			_ = yes
   20679 			_ = no
   20680 			return true
   20681 		}
   20682 		// match: (If (SETEQF cmp) yes no)
   20683 		// cond:
   20684 		// result: (EQF  cmp yes no)
   20685 		for {
   20686 			v := b.Control
   20687 			if v.Op != OpAMD64SETEQF {
   20688 				break
   20689 			}
   20690 			cmp := v.Args[0]
   20691 			yes := b.Succs[0]
   20692 			no := b.Succs[1]
   20693 			b.Kind = BlockAMD64EQF
   20694 			b.SetControl(cmp)
   20695 			_ = yes
   20696 			_ = no
   20697 			return true
   20698 		}
   20699 		// match: (If (SETNEF cmp) yes no)
   20700 		// cond:
   20701 		// result: (NEF  cmp yes no)
   20702 		for {
   20703 			v := b.Control
   20704 			if v.Op != OpAMD64SETNEF {
   20705 				break
   20706 			}
   20707 			cmp := v.Args[0]
   20708 			yes := b.Succs[0]
   20709 			no := b.Succs[1]
   20710 			b.Kind = BlockAMD64NEF
   20711 			b.SetControl(cmp)
   20712 			_ = yes
   20713 			_ = no
   20714 			return true
   20715 		}
   20716 		// match: (If cond yes no)
   20717 		// cond:
   20718 		// result: (NE (TESTB cond cond) yes no)
   20719 		for {
   20720 			v := b.Control
   20721 			_ = v
   20722 			cond := b.Control
   20723 			yes := b.Succs[0]
   20724 			no := b.Succs[1]
   20725 			b.Kind = BlockAMD64NE
   20726 			v0 := b.NewValue0(v.Line, OpAMD64TESTB, TypeFlags)
   20727 			v0.AddArg(cond)
   20728 			v0.AddArg(cond)
   20729 			b.SetControl(v0)
   20730 			_ = yes
   20731 			_ = no
   20732 			return true
   20733 		}
   20734 	case BlockAMD64LE:
   20735 		// match: (LE (InvertFlags cmp) yes no)
   20736 		// cond:
   20737 		// result: (GE cmp yes no)
   20738 		for {
   20739 			v := b.Control
   20740 			if v.Op != OpAMD64InvertFlags {
   20741 				break
   20742 			}
   20743 			cmp := v.Args[0]
   20744 			yes := b.Succs[0]
   20745 			no := b.Succs[1]
   20746 			b.Kind = BlockAMD64GE
   20747 			b.SetControl(cmp)
   20748 			_ = yes
   20749 			_ = no
   20750 			return true
   20751 		}
   20752 		// match: (LE (FlagEQ) yes no)
   20753 		// cond:
   20754 		// result: (First nil yes no)
   20755 		for {
   20756 			v := b.Control
   20757 			if v.Op != OpAMD64FlagEQ {
   20758 				break
   20759 			}
   20760 			yes := b.Succs[0]
   20761 			no := b.Succs[1]
   20762 			b.Kind = BlockFirst
   20763 			b.SetControl(nil)
   20764 			_ = yes
   20765 			_ = no
   20766 			return true
   20767 		}
   20768 		// match: (LE (FlagLT_ULT) yes no)
   20769 		// cond:
   20770 		// result: (First nil yes no)
   20771 		for {
   20772 			v := b.Control
   20773 			if v.Op != OpAMD64FlagLT_ULT {
   20774 				break
   20775 			}
   20776 			yes := b.Succs[0]
   20777 			no := b.Succs[1]
   20778 			b.Kind = BlockFirst
   20779 			b.SetControl(nil)
   20780 			_ = yes
   20781 			_ = no
   20782 			return true
   20783 		}
   20784 		// match: (LE (FlagLT_UGT) yes no)
   20785 		// cond:
   20786 		// result: (First nil yes no)
   20787 		for {
   20788 			v := b.Control
   20789 			if v.Op != OpAMD64FlagLT_UGT {
   20790 				break
   20791 			}
   20792 			yes := b.Succs[0]
   20793 			no := b.Succs[1]
   20794 			b.Kind = BlockFirst
   20795 			b.SetControl(nil)
   20796 			_ = yes
   20797 			_ = no
   20798 			return true
   20799 		}
   20800 		// match: (LE (FlagGT_ULT) yes no)
   20801 		// cond:
   20802 		// result: (First nil no yes)
   20803 		for {
   20804 			v := b.Control
   20805 			if v.Op != OpAMD64FlagGT_ULT {
   20806 				break
   20807 			}
   20808 			yes := b.Succs[0]
   20809 			no := b.Succs[1]
   20810 			b.Kind = BlockFirst
   20811 			b.SetControl(nil)
   20812 			b.swapSuccessors()
   20813 			_ = no
   20814 			_ = yes
   20815 			return true
   20816 		}
   20817 		// match: (LE (FlagGT_UGT) yes no)
   20818 		// cond:
   20819 		// result: (First nil no yes)
   20820 		for {
   20821 			v := b.Control
   20822 			if v.Op != OpAMD64FlagGT_UGT {
   20823 				break
   20824 			}
   20825 			yes := b.Succs[0]
   20826 			no := b.Succs[1]
   20827 			b.Kind = BlockFirst
   20828 			b.SetControl(nil)
   20829 			b.swapSuccessors()
   20830 			_ = no
   20831 			_ = yes
   20832 			return true
   20833 		}
   20834 	case BlockAMD64LT:
   20835 		// match: (LT (InvertFlags cmp) yes no)
   20836 		// cond:
   20837 		// result: (GT cmp yes no)
   20838 		for {
   20839 			v := b.Control
   20840 			if v.Op != OpAMD64InvertFlags {
   20841 				break
   20842 			}
   20843 			cmp := v.Args[0]
   20844 			yes := b.Succs[0]
   20845 			no := b.Succs[1]
   20846 			b.Kind = BlockAMD64GT
   20847 			b.SetControl(cmp)
   20848 			_ = yes
   20849 			_ = no
   20850 			return true
   20851 		}
   20852 		// match: (LT (FlagEQ) yes no)
   20853 		// cond:
   20854 		// result: (First nil no yes)
   20855 		for {
   20856 			v := b.Control
   20857 			if v.Op != OpAMD64FlagEQ {
   20858 				break
   20859 			}
   20860 			yes := b.Succs[0]
   20861 			no := b.Succs[1]
   20862 			b.Kind = BlockFirst
   20863 			b.SetControl(nil)
   20864 			b.swapSuccessors()
   20865 			_ = no
   20866 			_ = yes
   20867 			return true
   20868 		}
   20869 		// match: (LT (FlagLT_ULT) yes no)
   20870 		// cond:
   20871 		// result: (First nil yes no)
   20872 		for {
   20873 			v := b.Control
   20874 			if v.Op != OpAMD64FlagLT_ULT {
   20875 				break
   20876 			}
   20877 			yes := b.Succs[0]
   20878 			no := b.Succs[1]
   20879 			b.Kind = BlockFirst
   20880 			b.SetControl(nil)
   20881 			_ = yes
   20882 			_ = no
   20883 			return true
   20884 		}
   20885 		// match: (LT (FlagLT_UGT) yes no)
   20886 		// cond:
   20887 		// result: (First nil yes no)
   20888 		for {
   20889 			v := b.Control
   20890 			if v.Op != OpAMD64FlagLT_UGT {
   20891 				break
   20892 			}
   20893 			yes := b.Succs[0]
   20894 			no := b.Succs[1]
   20895 			b.Kind = BlockFirst
   20896 			b.SetControl(nil)
   20897 			_ = yes
   20898 			_ = no
   20899 			return true
   20900 		}
   20901 		// match: (LT (FlagGT_ULT) yes no)
   20902 		// cond:
   20903 		// result: (First nil no yes)
   20904 		for {
   20905 			v := b.Control
   20906 			if v.Op != OpAMD64FlagGT_ULT {
   20907 				break
   20908 			}
   20909 			yes := b.Succs[0]
   20910 			no := b.Succs[1]
   20911 			b.Kind = BlockFirst
   20912 			b.SetControl(nil)
   20913 			b.swapSuccessors()
   20914 			_ = no
   20915 			_ = yes
   20916 			return true
   20917 		}
   20918 		// match: (LT (FlagGT_UGT) yes no)
   20919 		// cond:
   20920 		// result: (First nil no yes)
   20921 		for {
   20922 			v := b.Control
   20923 			if v.Op != OpAMD64FlagGT_UGT {
   20924 				break
   20925 			}
   20926 			yes := b.Succs[0]
   20927 			no := b.Succs[1]
   20928 			b.Kind = BlockFirst
   20929 			b.SetControl(nil)
   20930 			b.swapSuccessors()
   20931 			_ = no
   20932 			_ = yes
   20933 			return true
   20934 		}
   20935 	case BlockAMD64NE:
   20936 		// match: (NE (TESTB (SETL  cmp) (SETL  cmp)) yes no)
   20937 		// cond:
   20938 		// result: (LT  cmp yes no)
   20939 		for {
   20940 			v := b.Control
   20941 			if v.Op != OpAMD64TESTB {
   20942 				break
   20943 			}
   20944 			v_0 := v.Args[0]
   20945 			if v_0.Op != OpAMD64SETL {
   20946 				break
   20947 			}
   20948 			cmp := v_0.Args[0]
   20949 			v_1 := v.Args[1]
   20950 			if v_1.Op != OpAMD64SETL {
   20951 				break
   20952 			}
   20953 			if cmp != v_1.Args[0] {
   20954 				break
   20955 			}
   20956 			yes := b.Succs[0]
   20957 			no := b.Succs[1]
   20958 			b.Kind = BlockAMD64LT
   20959 			b.SetControl(cmp)
   20960 			_ = yes
   20961 			_ = no
   20962 			return true
   20963 		}
   20964 		// match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no)
   20965 		// cond:
   20966 		// result: (LE  cmp yes no)
   20967 		for {
   20968 			v := b.Control
   20969 			if v.Op != OpAMD64TESTB {
   20970 				break
   20971 			}
   20972 			v_0 := v.Args[0]
   20973 			if v_0.Op != OpAMD64SETLE {
   20974 				break
   20975 			}
   20976 			cmp := v_0.Args[0]
   20977 			v_1 := v.Args[1]
   20978 			if v_1.Op != OpAMD64SETLE {
   20979 				break
   20980 			}
   20981 			if cmp != v_1.Args[0] {
   20982 				break
   20983 			}
   20984 			yes := b.Succs[0]
   20985 			no := b.Succs[1]
   20986 			b.Kind = BlockAMD64LE
   20987 			b.SetControl(cmp)
   20988 			_ = yes
   20989 			_ = no
   20990 			return true
   20991 		}
   20992 		// match: (NE (TESTB (SETG  cmp) (SETG  cmp)) yes no)
   20993 		// cond:
   20994 		// result: (GT  cmp yes no)
   20995 		for {
   20996 			v := b.Control
   20997 			if v.Op != OpAMD64TESTB {
   20998 				break
   20999 			}
   21000 			v_0 := v.Args[0]
   21001 			if v_0.Op != OpAMD64SETG {
   21002 				break
   21003 			}
   21004 			cmp := v_0.Args[0]
   21005 			v_1 := v.Args[1]
   21006 			if v_1.Op != OpAMD64SETG {
   21007 				break
   21008 			}
   21009 			if cmp != v_1.Args[0] {
   21010 				break
   21011 			}
   21012 			yes := b.Succs[0]
   21013 			no := b.Succs[1]
   21014 			b.Kind = BlockAMD64GT
   21015 			b.SetControl(cmp)
   21016 			_ = yes
   21017 			_ = no
   21018 			return true
   21019 		}
   21020 		// match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no)
   21021 		// cond:
   21022 		// result: (GE  cmp yes no)
   21023 		for {
   21024 			v := b.Control
   21025 			if v.Op != OpAMD64TESTB {
   21026 				break
   21027 			}
   21028 			v_0 := v.Args[0]
   21029 			if v_0.Op != OpAMD64SETGE {
   21030 				break
   21031 			}
   21032 			cmp := v_0.Args[0]
   21033 			v_1 := v.Args[1]
   21034 			if v_1.Op != OpAMD64SETGE {
   21035 				break
   21036 			}
   21037 			if cmp != v_1.Args[0] {
   21038 				break
   21039 			}
   21040 			yes := b.Succs[0]
   21041 			no := b.Succs[1]
   21042 			b.Kind = BlockAMD64GE
   21043 			b.SetControl(cmp)
   21044 			_ = yes
   21045 			_ = no
   21046 			return true
   21047 		}
   21048 		// match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no)
   21049 		// cond:
   21050 		// result: (EQ  cmp yes no)
   21051 		for {
   21052 			v := b.Control
   21053 			if v.Op != OpAMD64TESTB {
   21054 				break
   21055 			}
   21056 			v_0 := v.Args[0]
   21057 			if v_0.Op != OpAMD64SETEQ {
   21058 				break
   21059 			}
   21060 			cmp := v_0.Args[0]
   21061 			v_1 := v.Args[1]
   21062 			if v_1.Op != OpAMD64SETEQ {
   21063 				break
   21064 			}
   21065 			if cmp != v_1.Args[0] {
   21066 				break
   21067 			}
   21068 			yes := b.Succs[0]
   21069 			no := b.Succs[1]
   21070 			b.Kind = BlockAMD64EQ
   21071 			b.SetControl(cmp)
   21072 			_ = yes
   21073 			_ = no
   21074 			return true
   21075 		}
   21076 		// match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no)
   21077 		// cond:
   21078 		// result: (NE  cmp yes no)
   21079 		for {
   21080 			v := b.Control
   21081 			if v.Op != OpAMD64TESTB {
   21082 				break
   21083 			}
   21084 			v_0 := v.Args[0]
   21085 			if v_0.Op != OpAMD64SETNE {
   21086 				break
   21087 			}
   21088 			cmp := v_0.Args[0]
   21089 			v_1 := v.Args[1]
   21090 			if v_1.Op != OpAMD64SETNE {
   21091 				break
   21092 			}
   21093 			if cmp != v_1.Args[0] {
   21094 				break
   21095 			}
   21096 			yes := b.Succs[0]
   21097 			no := b.Succs[1]
   21098 			b.Kind = BlockAMD64NE
   21099 			b.SetControl(cmp)
   21100 			_ = yes
   21101 			_ = no
   21102 			return true
   21103 		}
   21104 		// match: (NE (TESTB (SETB  cmp) (SETB  cmp)) yes no)
   21105 		// cond:
   21106 		// result: (ULT cmp yes no)
   21107 		for {
   21108 			v := b.Control
   21109 			if v.Op != OpAMD64TESTB {
   21110 				break
   21111 			}
   21112 			v_0 := v.Args[0]
   21113 			if v_0.Op != OpAMD64SETB {
   21114 				break
   21115 			}
   21116 			cmp := v_0.Args[0]
   21117 			v_1 := v.Args[1]
   21118 			if v_1.Op != OpAMD64SETB {
   21119 				break
   21120 			}
   21121 			if cmp != v_1.Args[0] {
   21122 				break
   21123 			}
   21124 			yes := b.Succs[0]
   21125 			no := b.Succs[1]
   21126 			b.Kind = BlockAMD64ULT
   21127 			b.SetControl(cmp)
   21128 			_ = yes
   21129 			_ = no
   21130 			return true
   21131 		}
   21132 		// match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no)
   21133 		// cond:
   21134 		// result: (ULE cmp yes no)
   21135 		for {
   21136 			v := b.Control
   21137 			if v.Op != OpAMD64TESTB {
   21138 				break
   21139 			}
   21140 			v_0 := v.Args[0]
   21141 			if v_0.Op != OpAMD64SETBE {
   21142 				break
   21143 			}
   21144 			cmp := v_0.Args[0]
   21145 			v_1 := v.Args[1]
   21146 			if v_1.Op != OpAMD64SETBE {
   21147 				break
   21148 			}
   21149 			if cmp != v_1.Args[0] {
   21150 				break
   21151 			}
   21152 			yes := b.Succs[0]
   21153 			no := b.Succs[1]
   21154 			b.Kind = BlockAMD64ULE
   21155 			b.SetControl(cmp)
   21156 			_ = yes
   21157 			_ = no
   21158 			return true
   21159 		}
   21160 		// match: (NE (TESTB (SETA  cmp) (SETA  cmp)) yes no)
   21161 		// cond:
   21162 		// result: (UGT cmp yes no)
   21163 		for {
   21164 			v := b.Control
   21165 			if v.Op != OpAMD64TESTB {
   21166 				break
   21167 			}
   21168 			v_0 := v.Args[0]
   21169 			if v_0.Op != OpAMD64SETA {
   21170 				break
   21171 			}
   21172 			cmp := v_0.Args[0]
   21173 			v_1 := v.Args[1]
   21174 			if v_1.Op != OpAMD64SETA {
   21175 				break
   21176 			}
   21177 			if cmp != v_1.Args[0] {
   21178 				break
   21179 			}
   21180 			yes := b.Succs[0]
   21181 			no := b.Succs[1]
   21182 			b.Kind = BlockAMD64UGT
   21183 			b.SetControl(cmp)
   21184 			_ = yes
   21185 			_ = no
   21186 			return true
   21187 		}
   21188 		// match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no)
   21189 		// cond:
   21190 		// result: (UGE cmp yes no)
   21191 		for {
   21192 			v := b.Control
   21193 			if v.Op != OpAMD64TESTB {
   21194 				break
   21195 			}
   21196 			v_0 := v.Args[0]
   21197 			if v_0.Op != OpAMD64SETAE {
   21198 				break
   21199 			}
   21200 			cmp := v_0.Args[0]
   21201 			v_1 := v.Args[1]
   21202 			if v_1.Op != OpAMD64SETAE {
   21203 				break
   21204 			}
   21205 			if cmp != v_1.Args[0] {
   21206 				break
   21207 			}
   21208 			yes := b.Succs[0]
   21209 			no := b.Succs[1]
   21210 			b.Kind = BlockAMD64UGE
   21211 			b.SetControl(cmp)
   21212 			_ = yes
   21213 			_ = no
   21214 			return true
   21215 		}
   21216 		// match: (NE (TESTB (SETGF  cmp) (SETGF  cmp)) yes no)
   21217 		// cond:
   21218 		// result: (UGT  cmp yes no)
   21219 		for {
   21220 			v := b.Control
   21221 			if v.Op != OpAMD64TESTB {
   21222 				break
   21223 			}
   21224 			v_0 := v.Args[0]
   21225 			if v_0.Op != OpAMD64SETGF {
   21226 				break
   21227 			}
   21228 			cmp := v_0.Args[0]
   21229 			v_1 := v.Args[1]
   21230 			if v_1.Op != OpAMD64SETGF {
   21231 				break
   21232 			}
   21233 			if cmp != v_1.Args[0] {
   21234 				break
   21235 			}
   21236 			yes := b.Succs[0]
   21237 			no := b.Succs[1]
   21238 			b.Kind = BlockAMD64UGT
   21239 			b.SetControl(cmp)
   21240 			_ = yes
   21241 			_ = no
   21242 			return true
   21243 		}
   21244 		// match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no)
   21245 		// cond:
   21246 		// result: (UGE  cmp yes no)
   21247 		for {
   21248 			v := b.Control
   21249 			if v.Op != OpAMD64TESTB {
   21250 				break
   21251 			}
   21252 			v_0 := v.Args[0]
   21253 			if v_0.Op != OpAMD64SETGEF {
   21254 				break
   21255 			}
   21256 			cmp := v_0.Args[0]
   21257 			v_1 := v.Args[1]
   21258 			if v_1.Op != OpAMD64SETGEF {
   21259 				break
   21260 			}
   21261 			if cmp != v_1.Args[0] {
   21262 				break
   21263 			}
   21264 			yes := b.Succs[0]
   21265 			no := b.Succs[1]
   21266 			b.Kind = BlockAMD64UGE
   21267 			b.SetControl(cmp)
   21268 			_ = yes
   21269 			_ = no
   21270 			return true
   21271 		}
   21272 		// match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no)
   21273 		// cond:
   21274 		// result: (EQF  cmp yes no)
   21275 		for {
   21276 			v := b.Control
   21277 			if v.Op != OpAMD64TESTB {
   21278 				break
   21279 			}
   21280 			v_0 := v.Args[0]
   21281 			if v_0.Op != OpAMD64SETEQF {
   21282 				break
   21283 			}
   21284 			cmp := v_0.Args[0]
   21285 			v_1 := v.Args[1]
   21286 			if v_1.Op != OpAMD64SETEQF {
   21287 				break
   21288 			}
   21289 			if cmp != v_1.Args[0] {
   21290 				break
   21291 			}
   21292 			yes := b.Succs[0]
   21293 			no := b.Succs[1]
   21294 			b.Kind = BlockAMD64EQF
   21295 			b.SetControl(cmp)
   21296 			_ = yes
   21297 			_ = no
   21298 			return true
   21299 		}
   21300 		// match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no)
   21301 		// cond:
   21302 		// result: (NEF  cmp yes no)
   21303 		for {
   21304 			v := b.Control
   21305 			if v.Op != OpAMD64TESTB {
   21306 				break
   21307 			}
   21308 			v_0 := v.Args[0]
   21309 			if v_0.Op != OpAMD64SETNEF {
   21310 				break
   21311 			}
   21312 			cmp := v_0.Args[0]
   21313 			v_1 := v.Args[1]
   21314 			if v_1.Op != OpAMD64SETNEF {
   21315 				break
   21316 			}
   21317 			if cmp != v_1.Args[0] {
   21318 				break
   21319 			}
   21320 			yes := b.Succs[0]
   21321 			no := b.Succs[1]
   21322 			b.Kind = BlockAMD64NEF
   21323 			b.SetControl(cmp)
   21324 			_ = yes
   21325 			_ = no
   21326 			return true
   21327 		}
   21328 		// match: (NE (InvertFlags cmp) yes no)
   21329 		// cond:
   21330 		// result: (NE cmp yes no)
   21331 		for {
   21332 			v := b.Control
   21333 			if v.Op != OpAMD64InvertFlags {
   21334 				break
   21335 			}
   21336 			cmp := v.Args[0]
   21337 			yes := b.Succs[0]
   21338 			no := b.Succs[1]
   21339 			b.Kind = BlockAMD64NE
   21340 			b.SetControl(cmp)
   21341 			_ = yes
   21342 			_ = no
   21343 			return true
   21344 		}
   21345 		// match: (NE (FlagEQ) yes no)
   21346 		// cond:
   21347 		// result: (First nil no yes)
   21348 		for {
   21349 			v := b.Control
   21350 			if v.Op != OpAMD64FlagEQ {
   21351 				break
   21352 			}
   21353 			yes := b.Succs[0]
   21354 			no := b.Succs[1]
   21355 			b.Kind = BlockFirst
   21356 			b.SetControl(nil)
   21357 			b.swapSuccessors()
   21358 			_ = no
   21359 			_ = yes
   21360 			return true
   21361 		}
   21362 		// match: (NE (FlagLT_ULT) yes no)
   21363 		// cond:
   21364 		// result: (First nil yes no)
   21365 		for {
   21366 			v := b.Control
   21367 			if v.Op != OpAMD64FlagLT_ULT {
   21368 				break
   21369 			}
   21370 			yes := b.Succs[0]
   21371 			no := b.Succs[1]
   21372 			b.Kind = BlockFirst
   21373 			b.SetControl(nil)
   21374 			_ = yes
   21375 			_ = no
   21376 			return true
   21377 		}
   21378 		// match: (NE (FlagLT_UGT) yes no)
   21379 		// cond:
   21380 		// result: (First nil yes no)
   21381 		for {
   21382 			v := b.Control
   21383 			if v.Op != OpAMD64FlagLT_UGT {
   21384 				break
   21385 			}
   21386 			yes := b.Succs[0]
   21387 			no := b.Succs[1]
   21388 			b.Kind = BlockFirst
   21389 			b.SetControl(nil)
   21390 			_ = yes
   21391 			_ = no
   21392 			return true
   21393 		}
   21394 		// match: (NE (FlagGT_ULT) yes no)
   21395 		// cond:
   21396 		// result: (First nil yes no)
   21397 		for {
   21398 			v := b.Control
   21399 			if v.Op != OpAMD64FlagGT_ULT {
   21400 				break
   21401 			}
   21402 			yes := b.Succs[0]
   21403 			no := b.Succs[1]
   21404 			b.Kind = BlockFirst
   21405 			b.SetControl(nil)
   21406 			_ = yes
   21407 			_ = no
   21408 			return true
   21409 		}
   21410 		// match: (NE (FlagGT_UGT) yes no)
   21411 		// cond:
   21412 		// result: (First nil yes no)
   21413 		for {
   21414 			v := b.Control
   21415 			if v.Op != OpAMD64FlagGT_UGT {
   21416 				break
   21417 			}
   21418 			yes := b.Succs[0]
   21419 			no := b.Succs[1]
   21420 			b.Kind = BlockFirst
   21421 			b.SetControl(nil)
   21422 			_ = yes
   21423 			_ = no
   21424 			return true
   21425 		}
   21426 	case BlockAMD64UGE:
   21427 		// match: (UGE (InvertFlags cmp) yes no)
   21428 		// cond:
   21429 		// result: (ULE cmp yes no)
   21430 		for {
   21431 			v := b.Control
   21432 			if v.Op != OpAMD64InvertFlags {
   21433 				break
   21434 			}
   21435 			cmp := v.Args[0]
   21436 			yes := b.Succs[0]
   21437 			no := b.Succs[1]
   21438 			b.Kind = BlockAMD64ULE
   21439 			b.SetControl(cmp)
   21440 			_ = yes
   21441 			_ = no
   21442 			return true
   21443 		}
   21444 		// match: (UGE (FlagEQ) yes no)
   21445 		// cond:
   21446 		// result: (First nil yes no)
   21447 		for {
   21448 			v := b.Control
   21449 			if v.Op != OpAMD64FlagEQ {
   21450 				break
   21451 			}
   21452 			yes := b.Succs[0]
   21453 			no := b.Succs[1]
   21454 			b.Kind = BlockFirst
   21455 			b.SetControl(nil)
   21456 			_ = yes
   21457 			_ = no
   21458 			return true
   21459 		}
   21460 		// match: (UGE (FlagLT_ULT) yes no)
   21461 		// cond:
   21462 		// result: (First nil no yes)
   21463 		for {
   21464 			v := b.Control
   21465 			if v.Op != OpAMD64FlagLT_ULT {
   21466 				break
   21467 			}
   21468 			yes := b.Succs[0]
   21469 			no := b.Succs[1]
   21470 			b.Kind = BlockFirst
   21471 			b.SetControl(nil)
   21472 			b.swapSuccessors()
   21473 			_ = no
   21474 			_ = yes
   21475 			return true
   21476 		}
   21477 		// match: (UGE (FlagLT_UGT) yes no)
   21478 		// cond:
   21479 		// result: (First nil yes no)
   21480 		for {
   21481 			v := b.Control
   21482 			if v.Op != OpAMD64FlagLT_UGT {
   21483 				break
   21484 			}
   21485 			yes := b.Succs[0]
   21486 			no := b.Succs[1]
   21487 			b.Kind = BlockFirst
   21488 			b.SetControl(nil)
   21489 			_ = yes
   21490 			_ = no
   21491 			return true
   21492 		}
   21493 		// match: (UGE (FlagGT_ULT) yes no)
   21494 		// cond:
   21495 		// result: (First nil no yes)
   21496 		for {
   21497 			v := b.Control
   21498 			if v.Op != OpAMD64FlagGT_ULT {
   21499 				break
   21500 			}
   21501 			yes := b.Succs[0]
   21502 			no := b.Succs[1]
   21503 			b.Kind = BlockFirst
   21504 			b.SetControl(nil)
   21505 			b.swapSuccessors()
   21506 			_ = no
   21507 			_ = yes
   21508 			return true
   21509 		}
   21510 		// match: (UGE (FlagGT_UGT) yes no)
   21511 		// cond:
   21512 		// result: (First nil yes no)
   21513 		for {
   21514 			v := b.Control
   21515 			if v.Op != OpAMD64FlagGT_UGT {
   21516 				break
   21517 			}
   21518 			yes := b.Succs[0]
   21519 			no := b.Succs[1]
   21520 			b.Kind = BlockFirst
   21521 			b.SetControl(nil)
   21522 			_ = yes
   21523 			_ = no
   21524 			return true
   21525 		}
   21526 	case BlockAMD64UGT:
   21527 		// match: (UGT (InvertFlags cmp) yes no)
   21528 		// cond:
   21529 		// result: (ULT cmp yes no)
   21530 		for {
   21531 			v := b.Control
   21532 			if v.Op != OpAMD64InvertFlags {
   21533 				break
   21534 			}
   21535 			cmp := v.Args[0]
   21536 			yes := b.Succs[0]
   21537 			no := b.Succs[1]
   21538 			b.Kind = BlockAMD64ULT
   21539 			b.SetControl(cmp)
   21540 			_ = yes
   21541 			_ = no
   21542 			return true
   21543 		}
   21544 		// match: (UGT (FlagEQ) yes no)
   21545 		// cond:
   21546 		// result: (First nil no yes)
   21547 		for {
   21548 			v := b.Control
   21549 			if v.Op != OpAMD64FlagEQ {
   21550 				break
   21551 			}
   21552 			yes := b.Succs[0]
   21553 			no := b.Succs[1]
   21554 			b.Kind = BlockFirst
   21555 			b.SetControl(nil)
   21556 			b.swapSuccessors()
   21557 			_ = no
   21558 			_ = yes
   21559 			return true
   21560 		}
   21561 		// match: (UGT (FlagLT_ULT) yes no)
   21562 		// cond:
   21563 		// result: (First nil no yes)
   21564 		for {
   21565 			v := b.Control
   21566 			if v.Op != OpAMD64FlagLT_ULT {
   21567 				break
   21568 			}
   21569 			yes := b.Succs[0]
   21570 			no := b.Succs[1]
   21571 			b.Kind = BlockFirst
   21572 			b.SetControl(nil)
   21573 			b.swapSuccessors()
   21574 			_ = no
   21575 			_ = yes
   21576 			return true
   21577 		}
   21578 		// match: (UGT (FlagLT_UGT) yes no)
   21579 		// cond:
   21580 		// result: (First nil yes no)
   21581 		for {
   21582 			v := b.Control
   21583 			if v.Op != OpAMD64FlagLT_UGT {
   21584 				break
   21585 			}
   21586 			yes := b.Succs[0]
   21587 			no := b.Succs[1]
   21588 			b.Kind = BlockFirst
   21589 			b.SetControl(nil)
   21590 			_ = yes
   21591 			_ = no
   21592 			return true
   21593 		}
   21594 		// match: (UGT (FlagGT_ULT) yes no)
   21595 		// cond:
   21596 		// result: (First nil no yes)
   21597 		for {
   21598 			v := b.Control
   21599 			if v.Op != OpAMD64FlagGT_ULT {
   21600 				break
   21601 			}
   21602 			yes := b.Succs[0]
   21603 			no := b.Succs[1]
   21604 			b.Kind = BlockFirst
   21605 			b.SetControl(nil)
   21606 			b.swapSuccessors()
   21607 			_ = no
   21608 			_ = yes
   21609 			return true
   21610 		}
   21611 		// match: (UGT (FlagGT_UGT) yes no)
   21612 		// cond:
   21613 		// result: (First nil yes no)
   21614 		for {
   21615 			v := b.Control
   21616 			if v.Op != OpAMD64FlagGT_UGT {
   21617 				break
   21618 			}
   21619 			yes := b.Succs[0]
   21620 			no := b.Succs[1]
   21621 			b.Kind = BlockFirst
   21622 			b.SetControl(nil)
   21623 			_ = yes
   21624 			_ = no
   21625 			return true
   21626 		}
   21627 	case BlockAMD64ULE:
   21628 		// match: (ULE (InvertFlags cmp) yes no)
   21629 		// cond:
   21630 		// result: (UGE cmp yes no)
   21631 		for {
   21632 			v := b.Control
   21633 			if v.Op != OpAMD64InvertFlags {
   21634 				break
   21635 			}
   21636 			cmp := v.Args[0]
   21637 			yes := b.Succs[0]
   21638 			no := b.Succs[1]
   21639 			b.Kind = BlockAMD64UGE
   21640 			b.SetControl(cmp)
   21641 			_ = yes
   21642 			_ = no
   21643 			return true
   21644 		}
   21645 		// match: (ULE (FlagEQ) yes no)
   21646 		// cond:
   21647 		// result: (First nil yes no)
   21648 		for {
   21649 			v := b.Control
   21650 			if v.Op != OpAMD64FlagEQ {
   21651 				break
   21652 			}
   21653 			yes := b.Succs[0]
   21654 			no := b.Succs[1]
   21655 			b.Kind = BlockFirst
   21656 			b.SetControl(nil)
   21657 			_ = yes
   21658 			_ = no
   21659 			return true
   21660 		}
   21661 		// match: (ULE (FlagLT_ULT) yes no)
   21662 		// cond:
   21663 		// result: (First nil yes no)
   21664 		for {
   21665 			v := b.Control
   21666 			if v.Op != OpAMD64FlagLT_ULT {
   21667 				break
   21668 			}
   21669 			yes := b.Succs[0]
   21670 			no := b.Succs[1]
   21671 			b.Kind = BlockFirst
   21672 			b.SetControl(nil)
   21673 			_ = yes
   21674 			_ = no
   21675 			return true
   21676 		}
   21677 		// match: (ULE (FlagLT_UGT) yes no)
   21678 		// cond:
   21679 		// result: (First nil no yes)
   21680 		for {
   21681 			v := b.Control
   21682 			if v.Op != OpAMD64FlagLT_UGT {
   21683 				break
   21684 			}
   21685 			yes := b.Succs[0]
   21686 			no := b.Succs[1]
   21687 			b.Kind = BlockFirst
   21688 			b.SetControl(nil)
   21689 			b.swapSuccessors()
   21690 			_ = no
   21691 			_ = yes
   21692 			return true
   21693 		}
   21694 		// match: (ULE (FlagGT_ULT) yes no)
   21695 		// cond:
   21696 		// result: (First nil yes no)
   21697 		for {
   21698 			v := b.Control
   21699 			if v.Op != OpAMD64FlagGT_ULT {
   21700 				break
   21701 			}
   21702 			yes := b.Succs[0]
   21703 			no := b.Succs[1]
   21704 			b.Kind = BlockFirst
   21705 			b.SetControl(nil)
   21706 			_ = yes
   21707 			_ = no
   21708 			return true
   21709 		}
   21710 		// match: (ULE (FlagGT_UGT) yes no)
   21711 		// cond:
   21712 		// result: (First nil no yes)
   21713 		for {
   21714 			v := b.Control
   21715 			if v.Op != OpAMD64FlagGT_UGT {
   21716 				break
   21717 			}
   21718 			yes := b.Succs[0]
   21719 			no := b.Succs[1]
   21720 			b.Kind = BlockFirst
   21721 			b.SetControl(nil)
   21722 			b.swapSuccessors()
   21723 			_ = no
   21724 			_ = yes
   21725 			return true
   21726 		}
   21727 	case BlockAMD64ULT:
   21728 		// match: (ULT (InvertFlags cmp) yes no)
   21729 		// cond:
   21730 		// result: (UGT cmp yes no)
   21731 		for {
   21732 			v := b.Control
   21733 			if v.Op != OpAMD64InvertFlags {
   21734 				break
   21735 			}
   21736 			cmp := v.Args[0]
   21737 			yes := b.Succs[0]
   21738 			no := b.Succs[1]
   21739 			b.Kind = BlockAMD64UGT
   21740 			b.SetControl(cmp)
   21741 			_ = yes
   21742 			_ = no
   21743 			return true
   21744 		}
   21745 		// match: (ULT (FlagEQ) yes no)
   21746 		// cond:
   21747 		// result: (First nil no yes)
   21748 		for {
   21749 			v := b.Control
   21750 			if v.Op != OpAMD64FlagEQ {
   21751 				break
   21752 			}
   21753 			yes := b.Succs[0]
   21754 			no := b.Succs[1]
   21755 			b.Kind = BlockFirst
   21756 			b.SetControl(nil)
   21757 			b.swapSuccessors()
   21758 			_ = no
   21759 			_ = yes
   21760 			return true
   21761 		}
   21762 		// match: (ULT (FlagLT_ULT) yes no)
   21763 		// cond:
   21764 		// result: (First nil yes no)
   21765 		for {
   21766 			v := b.Control
   21767 			if v.Op != OpAMD64FlagLT_ULT {
   21768 				break
   21769 			}
   21770 			yes := b.Succs[0]
   21771 			no := b.Succs[1]
   21772 			b.Kind = BlockFirst
   21773 			b.SetControl(nil)
   21774 			_ = yes
   21775 			_ = no
   21776 			return true
   21777 		}
   21778 		// match: (ULT (FlagLT_UGT) yes no)
   21779 		// cond:
   21780 		// result: (First nil no yes)
   21781 		for {
   21782 			v := b.Control
   21783 			if v.Op != OpAMD64FlagLT_UGT {
   21784 				break
   21785 			}
   21786 			yes := b.Succs[0]
   21787 			no := b.Succs[1]
   21788 			b.Kind = BlockFirst
   21789 			b.SetControl(nil)
   21790 			b.swapSuccessors()
   21791 			_ = no
   21792 			_ = yes
   21793 			return true
   21794 		}
   21795 		// match: (ULT (FlagGT_ULT) yes no)
   21796 		// cond:
   21797 		// result: (First nil yes no)
   21798 		for {
   21799 			v := b.Control
   21800 			if v.Op != OpAMD64FlagGT_ULT {
   21801 				break
   21802 			}
   21803 			yes := b.Succs[0]
   21804 			no := b.Succs[1]
   21805 			b.Kind = BlockFirst
   21806 			b.SetControl(nil)
   21807 			_ = yes
   21808 			_ = no
   21809 			return true
   21810 		}
   21811 		// match: (ULT (FlagGT_UGT) yes no)
   21812 		// cond:
   21813 		// result: (First nil no yes)
   21814 		for {
   21815 			v := b.Control
   21816 			if v.Op != OpAMD64FlagGT_UGT {
   21817 				break
   21818 			}
   21819 			yes := b.Succs[0]
   21820 			no := b.Succs[1]
   21821 			b.Kind = BlockFirst
   21822 			b.SetControl(nil)
   21823 			b.swapSuccessors()
   21824 			_ = no
   21825 			_ = yes
   21826 			return true
   21827 		}
   21828 	}
   21829 	return false
   21830 }
   21831