1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package arm 6 7 import ( 8 "fmt" 9 "math" 10 11 "cmd/compile/internal/gc" 12 "cmd/compile/internal/ssa" 13 "cmd/internal/obj" 14 "cmd/internal/obj/arm" 15 ) 16 17 // loadByType returns the load instruction of the given type. 18 func loadByType(t ssa.Type) obj.As { 19 if t.IsFloat() { 20 switch t.Size() { 21 case 4: 22 return arm.AMOVF 23 case 8: 24 return arm.AMOVD 25 } 26 } else { 27 switch t.Size() { 28 case 1: 29 if t.IsSigned() { 30 return arm.AMOVB 31 } else { 32 return arm.AMOVBU 33 } 34 case 2: 35 if t.IsSigned() { 36 return arm.AMOVH 37 } else { 38 return arm.AMOVHU 39 } 40 case 4: 41 return arm.AMOVW 42 } 43 } 44 panic("bad load type") 45 } 46 47 // storeByType returns the store instruction of the given type. 48 func storeByType(t ssa.Type) obj.As { 49 if t.IsFloat() { 50 switch t.Size() { 51 case 4: 52 return arm.AMOVF 53 case 8: 54 return arm.AMOVD 55 } 56 } else { 57 switch t.Size() { 58 case 1: 59 return arm.AMOVB 60 case 2: 61 return arm.AMOVH 62 case 4: 63 return arm.AMOVW 64 } 65 } 66 panic("bad store type") 67 } 68 69 // shift type is used as Offset in obj.TYPE_SHIFT operands to encode shifted register operands 70 type shift int64 71 72 // copied from ../../../internal/obj/util.go:/TYPE_SHIFT 73 func (v shift) String() string { 74 op := "<<>>->@>"[((v>>5)&3)<<1:] 75 if v&(1<<4) != 0 { 76 // register shift 77 return fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15) 78 } else { 79 // constant shift 80 return fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31) 81 } 82 } 83 84 // makeshift encodes a register shifted by a constant 85 func makeshift(reg int16, typ int64, s int64) shift { 86 return shift(int64(reg&0xf) | typ | (s&31)<<7) 87 } 88 89 // genshift generates a Prog for r = r0 op (r1 shifted by s) 90 func genshift(as obj.As, r0, r1, r int16, typ int64, s int64) *obj.Prog { 91 p := gc.Prog(as) 92 p.From.Type = obj.TYPE_SHIFT 93 p.From.Offset = int64(makeshift(r1, typ, s)) 94 p.Reg = r0 95 if r != 0 { 96 p.To.Type = obj.TYPE_REG 97 p.To.Reg = r 98 } 99 return p 100 } 101 102 // makeregshift encodes a register shifted by a register 103 func makeregshift(r1 int16, typ int64, r2 int16) shift { 104 return shift(int64(r1&0xf) | typ | int64(r2&0xf)<<8 | 1<<4) 105 } 106 107 // genregshift generates a Prog for r = r0 op (r1 shifted by r2) 108 func genregshift(as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog { 109 p := gc.Prog(as) 110 p.From.Type = obj.TYPE_SHIFT 111 p.From.Offset = int64(makeregshift(r1, typ, r2)) 112 p.Reg = r0 113 if r != 0 { 114 p.To.Type = obj.TYPE_REG 115 p.To.Reg = r 116 } 117 return p 118 } 119 120 func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { 121 s.SetLineno(v.Line) 122 switch v.Op { 123 case ssa.OpInitMem: 124 // memory arg needs no code 125 case ssa.OpArg: 126 // input args need no code 127 case ssa.OpSP, ssa.OpSB, ssa.OpGetG: 128 // nothing to do 129 case ssa.OpCopy, ssa.OpARMMOVWconvert, ssa.OpARMMOVWreg: 130 if v.Type.IsMemory() { 131 return 132 } 133 x := v.Args[0].Reg() 134 y := v.Reg() 135 if x == y { 136 return 137 } 138 as := arm.AMOVW 139 if v.Type.IsFloat() { 140 switch v.Type.Size() { 141 case 4: 142 as = arm.AMOVF 143 case 8: 144 as = arm.AMOVD 145 default: 146 panic("bad float size") 147 } 148 } 149 p := gc.Prog(as) 150 p.From.Type = obj.TYPE_REG 151 p.From.Reg = x 152 p.To.Type = obj.TYPE_REG 153 p.To.Reg = y 154 case ssa.OpARMMOVWnop: 155 if v.Reg() != v.Args[0].Reg() { 156 v.Fatalf("input[0] and output not in same register %s", v.LongString()) 157 } 158 // nothing to do 159 case ssa.OpLoadReg: 160 if v.Type.IsFlags() { 161 v.Fatalf("load flags not implemented: %v", v.LongString()) 162 return 163 } 164 p := gc.Prog(loadByType(v.Type)) 165 gc.AddrAuto(&p.From, v.Args[0]) 166 p.To.Type = obj.TYPE_REG 167 p.To.Reg = v.Reg() 168 case ssa.OpPhi: 169 gc.CheckLoweredPhi(v) 170 case ssa.OpStoreReg: 171 if v.Type.IsFlags() { 172 v.Fatalf("store flags not implemented: %v", v.LongString()) 173 return 174 } 175 p := gc.Prog(storeByType(v.Type)) 176 p.From.Type = obj.TYPE_REG 177 p.From.Reg = v.Args[0].Reg() 178 gc.AddrAuto(&p.To, v) 179 case ssa.OpARMUDIVrtcall: 180 p := gc.Prog(obj.ACALL) 181 p.To.Type = obj.TYPE_MEM 182 p.To.Name = obj.NAME_EXTERN 183 p.To.Sym = obj.Linklookup(gc.Ctxt, "udiv", 0) 184 case ssa.OpARMADD, 185 ssa.OpARMADC, 186 ssa.OpARMSUB, 187 ssa.OpARMSBC, 188 ssa.OpARMRSB, 189 ssa.OpARMAND, 190 ssa.OpARMOR, 191 ssa.OpARMXOR, 192 ssa.OpARMBIC, 193 ssa.OpARMMUL, 194 ssa.OpARMADDF, 195 ssa.OpARMADDD, 196 ssa.OpARMSUBF, 197 ssa.OpARMSUBD, 198 ssa.OpARMMULF, 199 ssa.OpARMMULD, 200 ssa.OpARMDIVF, 201 ssa.OpARMDIVD: 202 r := v.Reg() 203 r1 := v.Args[0].Reg() 204 r2 := v.Args[1].Reg() 205 p := gc.Prog(v.Op.Asm()) 206 p.From.Type = obj.TYPE_REG 207 p.From.Reg = r2 208 p.Reg = r1 209 p.To.Type = obj.TYPE_REG 210 p.To.Reg = r 211 case ssa.OpARMADDS, 212 ssa.OpARMSUBS: 213 r := v.Reg0() 214 r1 := v.Args[0].Reg() 215 r2 := v.Args[1].Reg() 216 p := gc.Prog(v.Op.Asm()) 217 p.Scond = arm.C_SBIT 218 p.From.Type = obj.TYPE_REG 219 p.From.Reg = r2 220 p.Reg = r1 221 p.To.Type = obj.TYPE_REG 222 p.To.Reg = r 223 case ssa.OpARMSLL, 224 ssa.OpARMSRL, 225 ssa.OpARMSRA: 226 r := v.Reg() 227 r1 := v.Args[0].Reg() 228 r2 := v.Args[1].Reg() 229 p := gc.Prog(v.Op.Asm()) 230 p.From.Type = obj.TYPE_REG 231 p.From.Reg = r2 232 p.Reg = r1 233 p.To.Type = obj.TYPE_REG 234 p.To.Reg = r 235 case ssa.OpARMSRAcond: 236 // ARM shift instructions uses only the low-order byte of the shift amount 237 // generate conditional instructions to deal with large shifts 238 // flag is already set 239 // SRA.HS $31, Rarg0, Rdst // shift 31 bits to get the sign bit 240 // SRA.LO Rarg1, Rarg0, Rdst 241 r := v.Reg() 242 r1 := v.Args[0].Reg() 243 r2 := v.Args[1].Reg() 244 p := gc.Prog(arm.ASRA) 245 p.Scond = arm.C_SCOND_HS 246 p.From.Type = obj.TYPE_CONST 247 p.From.Offset = 31 248 p.Reg = r1 249 p.To.Type = obj.TYPE_REG 250 p.To.Reg = r 251 p = gc.Prog(arm.ASRA) 252 p.Scond = arm.C_SCOND_LO 253 p.From.Type = obj.TYPE_REG 254 p.From.Reg = r2 255 p.Reg = r1 256 p.To.Type = obj.TYPE_REG 257 p.To.Reg = r 258 case ssa.OpARMADDconst, 259 ssa.OpARMADCconst, 260 ssa.OpARMSUBconst, 261 ssa.OpARMSBCconst, 262 ssa.OpARMRSBconst, 263 ssa.OpARMRSCconst, 264 ssa.OpARMANDconst, 265 ssa.OpARMORconst, 266 ssa.OpARMXORconst, 267 ssa.OpARMBICconst, 268 ssa.OpARMSLLconst, 269 ssa.OpARMSRLconst, 270 ssa.OpARMSRAconst: 271 p := gc.Prog(v.Op.Asm()) 272 p.From.Type = obj.TYPE_CONST 273 p.From.Offset = v.AuxInt 274 p.Reg = v.Args[0].Reg() 275 p.To.Type = obj.TYPE_REG 276 p.To.Reg = v.Reg() 277 case ssa.OpARMADDSconst, 278 ssa.OpARMSUBSconst, 279 ssa.OpARMRSBSconst: 280 p := gc.Prog(v.Op.Asm()) 281 p.Scond = arm.C_SBIT 282 p.From.Type = obj.TYPE_CONST 283 p.From.Offset = v.AuxInt 284 p.Reg = v.Args[0].Reg() 285 p.To.Type = obj.TYPE_REG 286 p.To.Reg = v.Reg0() 287 case ssa.OpARMSRRconst: 288 genshift(arm.AMOVW, 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt) 289 case ssa.OpARMADDshiftLL, 290 ssa.OpARMADCshiftLL, 291 ssa.OpARMSUBshiftLL, 292 ssa.OpARMSBCshiftLL, 293 ssa.OpARMRSBshiftLL, 294 ssa.OpARMRSCshiftLL, 295 ssa.OpARMANDshiftLL, 296 ssa.OpARMORshiftLL, 297 ssa.OpARMXORshiftLL, 298 ssa.OpARMBICshiftLL: 299 genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt) 300 case ssa.OpARMADDSshiftLL, 301 ssa.OpARMSUBSshiftLL, 302 ssa.OpARMRSBSshiftLL: 303 p := genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LL, v.AuxInt) 304 p.Scond = arm.C_SBIT 305 case ssa.OpARMADDshiftRL, 306 ssa.OpARMADCshiftRL, 307 ssa.OpARMSUBshiftRL, 308 ssa.OpARMSBCshiftRL, 309 ssa.OpARMRSBshiftRL, 310 ssa.OpARMRSCshiftRL, 311 ssa.OpARMANDshiftRL, 312 ssa.OpARMORshiftRL, 313 ssa.OpARMXORshiftRL, 314 ssa.OpARMBICshiftRL: 315 genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt) 316 case ssa.OpARMADDSshiftRL, 317 ssa.OpARMSUBSshiftRL, 318 ssa.OpARMRSBSshiftRL: 319 p := genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LR, v.AuxInt) 320 p.Scond = arm.C_SBIT 321 case ssa.OpARMADDshiftRA, 322 ssa.OpARMADCshiftRA, 323 ssa.OpARMSUBshiftRA, 324 ssa.OpARMSBCshiftRA, 325 ssa.OpARMRSBshiftRA, 326 ssa.OpARMRSCshiftRA, 327 ssa.OpARMANDshiftRA, 328 ssa.OpARMORshiftRA, 329 ssa.OpARMXORshiftRA, 330 ssa.OpARMBICshiftRA: 331 genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt) 332 case ssa.OpARMADDSshiftRA, 333 ssa.OpARMSUBSshiftRA, 334 ssa.OpARMRSBSshiftRA: 335 p := genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_AR, v.AuxInt) 336 p.Scond = arm.C_SBIT 337 case ssa.OpARMXORshiftRR: 338 genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt) 339 case ssa.OpARMMVNshiftLL: 340 genshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt) 341 case ssa.OpARMMVNshiftRL: 342 genshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt) 343 case ssa.OpARMMVNshiftRA: 344 genshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt) 345 case ssa.OpARMMVNshiftLLreg: 346 genregshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL) 347 case ssa.OpARMMVNshiftRLreg: 348 genregshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR) 349 case ssa.OpARMMVNshiftRAreg: 350 genregshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR) 351 case ssa.OpARMADDshiftLLreg, 352 ssa.OpARMADCshiftLLreg, 353 ssa.OpARMSUBshiftLLreg, 354 ssa.OpARMSBCshiftLLreg, 355 ssa.OpARMRSBshiftLLreg, 356 ssa.OpARMRSCshiftLLreg, 357 ssa.OpARMANDshiftLLreg, 358 ssa.OpARMORshiftLLreg, 359 ssa.OpARMXORshiftLLreg, 360 ssa.OpARMBICshiftLLreg: 361 genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LL) 362 case ssa.OpARMADDSshiftLLreg, 363 ssa.OpARMSUBSshiftLLreg, 364 ssa.OpARMRSBSshiftLLreg: 365 p := genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LL) 366 p.Scond = arm.C_SBIT 367 case ssa.OpARMADDshiftRLreg, 368 ssa.OpARMADCshiftRLreg, 369 ssa.OpARMSUBshiftRLreg, 370 ssa.OpARMSBCshiftRLreg, 371 ssa.OpARMRSBshiftRLreg, 372 ssa.OpARMRSCshiftRLreg, 373 ssa.OpARMANDshiftRLreg, 374 ssa.OpARMORshiftRLreg, 375 ssa.OpARMXORshiftRLreg, 376 ssa.OpARMBICshiftRLreg: 377 genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LR) 378 case ssa.OpARMADDSshiftRLreg, 379 ssa.OpARMSUBSshiftRLreg, 380 ssa.OpARMRSBSshiftRLreg: 381 p := genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LR) 382 p.Scond = arm.C_SBIT 383 case ssa.OpARMADDshiftRAreg, 384 ssa.OpARMADCshiftRAreg, 385 ssa.OpARMSUBshiftRAreg, 386 ssa.OpARMSBCshiftRAreg, 387 ssa.OpARMRSBshiftRAreg, 388 ssa.OpARMRSCshiftRAreg, 389 ssa.OpARMANDshiftRAreg, 390 ssa.OpARMORshiftRAreg, 391 ssa.OpARMXORshiftRAreg, 392 ssa.OpARMBICshiftRAreg: 393 genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_AR) 394 case ssa.OpARMADDSshiftRAreg, 395 ssa.OpARMSUBSshiftRAreg, 396 ssa.OpARMRSBSshiftRAreg: 397 p := genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_AR) 398 p.Scond = arm.C_SBIT 399 case ssa.OpARMHMUL, 400 ssa.OpARMHMULU: 401 // 32-bit high multiplication 402 p := gc.Prog(v.Op.Asm()) 403 p.From.Type = obj.TYPE_REG 404 p.From.Reg = v.Args[0].Reg() 405 p.Reg = v.Args[1].Reg() 406 p.To.Type = obj.TYPE_REGREG 407 p.To.Reg = v.Reg() 408 p.To.Offset = arm.REGTMP // throw away low 32-bit into tmp register 409 case ssa.OpARMMULLU: 410 // 32-bit multiplication, results 64-bit, high 32-bit in out0, low 32-bit in out1 411 p := gc.Prog(v.Op.Asm()) 412 p.From.Type = obj.TYPE_REG 413 p.From.Reg = v.Args[0].Reg() 414 p.Reg = v.Args[1].Reg() 415 p.To.Type = obj.TYPE_REGREG 416 p.To.Reg = v.Reg0() // high 32-bit 417 p.To.Offset = int64(v.Reg1()) // low 32-bit 418 case ssa.OpARMMULA: 419 p := gc.Prog(v.Op.Asm()) 420 p.From.Type = obj.TYPE_REG 421 p.From.Reg = v.Args[0].Reg() 422 p.Reg = v.Args[1].Reg() 423 p.To.Type = obj.TYPE_REGREG2 424 p.To.Reg = v.Reg() // result 425 p.To.Offset = int64(v.Args[2].Reg()) // addend 426 case ssa.OpARMMOVWconst: 427 p := gc.Prog(v.Op.Asm()) 428 p.From.Type = obj.TYPE_CONST 429 p.From.Offset = v.AuxInt 430 p.To.Type = obj.TYPE_REG 431 p.To.Reg = v.Reg() 432 case ssa.OpARMMOVFconst, 433 ssa.OpARMMOVDconst: 434 p := gc.Prog(v.Op.Asm()) 435 p.From.Type = obj.TYPE_FCONST 436 p.From.Val = math.Float64frombits(uint64(v.AuxInt)) 437 p.To.Type = obj.TYPE_REG 438 p.To.Reg = v.Reg() 439 case ssa.OpARMCMP, 440 ssa.OpARMCMN, 441 ssa.OpARMTST, 442 ssa.OpARMTEQ, 443 ssa.OpARMCMPF, 444 ssa.OpARMCMPD: 445 p := gc.Prog(v.Op.Asm()) 446 p.From.Type = obj.TYPE_REG 447 // Special layout in ARM assembly 448 // Comparing to x86, the operands of ARM's CMP are reversed. 449 p.From.Reg = v.Args[1].Reg() 450 p.Reg = v.Args[0].Reg() 451 case ssa.OpARMCMPconst, 452 ssa.OpARMCMNconst, 453 ssa.OpARMTSTconst, 454 ssa.OpARMTEQconst: 455 // Special layout in ARM assembly 456 p := gc.Prog(v.Op.Asm()) 457 p.From.Type = obj.TYPE_CONST 458 p.From.Offset = v.AuxInt 459 p.Reg = v.Args[0].Reg() 460 case ssa.OpARMCMPF0, 461 ssa.OpARMCMPD0: 462 p := gc.Prog(v.Op.Asm()) 463 p.From.Type = obj.TYPE_REG 464 p.From.Reg = v.Args[0].Reg() 465 case ssa.OpARMCMPshiftLL: 466 genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt) 467 case ssa.OpARMCMPshiftRL: 468 genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt) 469 case ssa.OpARMCMPshiftRA: 470 genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt) 471 case ssa.OpARMCMPshiftLLreg: 472 genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL) 473 case ssa.OpARMCMPshiftRLreg: 474 genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR) 475 case ssa.OpARMCMPshiftRAreg: 476 genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR) 477 case ssa.OpARMMOVWaddr: 478 p := gc.Prog(arm.AMOVW) 479 p.From.Type = obj.TYPE_ADDR 480 p.To.Type = obj.TYPE_REG 481 p.To.Reg = v.Reg() 482 483 var wantreg string 484 // MOVW $sym+off(base), R 485 // the assembler expands it as the following: 486 // - base is SP: add constant offset to SP (R13) 487 // when constant is large, tmp register (R11) may be used 488 // - base is SB: load external address from constant pool (use relocation) 489 switch v.Aux.(type) { 490 default: 491 v.Fatalf("aux is of unknown type %T", v.Aux) 492 case *ssa.ExternSymbol: 493 wantreg = "SB" 494 gc.AddAux(&p.From, v) 495 case *ssa.ArgSymbol, *ssa.AutoSymbol: 496 wantreg = "SP" 497 gc.AddAux(&p.From, v) 498 case nil: 499 // No sym, just MOVW $off(SP), R 500 wantreg = "SP" 501 p.From.Reg = arm.REGSP 502 p.From.Offset = v.AuxInt 503 } 504 if reg := v.Args[0].RegName(); reg != wantreg { 505 v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg) 506 } 507 508 case ssa.OpARMMOVBload, 509 ssa.OpARMMOVBUload, 510 ssa.OpARMMOVHload, 511 ssa.OpARMMOVHUload, 512 ssa.OpARMMOVWload, 513 ssa.OpARMMOVFload, 514 ssa.OpARMMOVDload: 515 p := gc.Prog(v.Op.Asm()) 516 p.From.Type = obj.TYPE_MEM 517 p.From.Reg = v.Args[0].Reg() 518 gc.AddAux(&p.From, v) 519 p.To.Type = obj.TYPE_REG 520 p.To.Reg = v.Reg() 521 case ssa.OpARMMOVBstore, 522 ssa.OpARMMOVHstore, 523 ssa.OpARMMOVWstore, 524 ssa.OpARMMOVFstore, 525 ssa.OpARMMOVDstore: 526 p := gc.Prog(v.Op.Asm()) 527 p.From.Type = obj.TYPE_REG 528 p.From.Reg = v.Args[1].Reg() 529 p.To.Type = obj.TYPE_MEM 530 p.To.Reg = v.Args[0].Reg() 531 gc.AddAux(&p.To, v) 532 case ssa.OpARMMOVWloadidx: 533 // this is just shift 0 bits 534 fallthrough 535 case ssa.OpARMMOVWloadshiftLL: 536 p := genshift(v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt) 537 p.From.Reg = v.Args[0].Reg() 538 case ssa.OpARMMOVWloadshiftRL: 539 p := genshift(v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt) 540 p.From.Reg = v.Args[0].Reg() 541 case ssa.OpARMMOVWloadshiftRA: 542 p := genshift(v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt) 543 p.From.Reg = v.Args[0].Reg() 544 case ssa.OpARMMOVWstoreidx: 545 // this is just shift 0 bits 546 fallthrough 547 case ssa.OpARMMOVWstoreshiftLL: 548 p := gc.Prog(v.Op.Asm()) 549 p.From.Type = obj.TYPE_REG 550 p.From.Reg = v.Args[2].Reg() 551 p.To.Type = obj.TYPE_SHIFT 552 p.To.Reg = v.Args[0].Reg() 553 p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LL, v.AuxInt)) 554 case ssa.OpARMMOVWstoreshiftRL: 555 p := gc.Prog(v.Op.Asm()) 556 p.From.Type = obj.TYPE_REG 557 p.From.Reg = v.Args[2].Reg() 558 p.To.Type = obj.TYPE_SHIFT 559 p.To.Reg = v.Args[0].Reg() 560 p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LR, v.AuxInt)) 561 case ssa.OpARMMOVWstoreshiftRA: 562 p := gc.Prog(v.Op.Asm()) 563 p.From.Type = obj.TYPE_REG 564 p.From.Reg = v.Args[2].Reg() 565 p.To.Type = obj.TYPE_SHIFT 566 p.To.Reg = v.Args[0].Reg() 567 p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_AR, v.AuxInt)) 568 case ssa.OpARMMOVBreg, 569 ssa.OpARMMOVBUreg, 570 ssa.OpARMMOVHreg, 571 ssa.OpARMMOVHUreg: 572 a := v.Args[0] 573 for a.Op == ssa.OpCopy || a.Op == ssa.OpARMMOVWreg || a.Op == ssa.OpARMMOVWnop { 574 a = a.Args[0] 575 } 576 if a.Op == ssa.OpLoadReg { 577 t := a.Type 578 switch { 579 case v.Op == ssa.OpARMMOVBreg && t.Size() == 1 && t.IsSigned(), 580 v.Op == ssa.OpARMMOVBUreg && t.Size() == 1 && !t.IsSigned(), 581 v.Op == ssa.OpARMMOVHreg && t.Size() == 2 && t.IsSigned(), 582 v.Op == ssa.OpARMMOVHUreg && t.Size() == 2 && !t.IsSigned(): 583 // arg is a proper-typed load, already zero/sign-extended, don't extend again 584 if v.Reg() == v.Args[0].Reg() { 585 return 586 } 587 p := gc.Prog(arm.AMOVW) 588 p.From.Type = obj.TYPE_REG 589 p.From.Reg = v.Args[0].Reg() 590 p.To.Type = obj.TYPE_REG 591 p.To.Reg = v.Reg() 592 return 593 default: 594 } 595 } 596 fallthrough 597 case ssa.OpARMMVN, 598 ssa.OpARMCLZ, 599 ssa.OpARMSQRTD, 600 ssa.OpARMNEGF, 601 ssa.OpARMNEGD, 602 ssa.OpARMMOVWF, 603 ssa.OpARMMOVWD, 604 ssa.OpARMMOVFW, 605 ssa.OpARMMOVDW, 606 ssa.OpARMMOVFD, 607 ssa.OpARMMOVDF: 608 p := gc.Prog(v.Op.Asm()) 609 p.From.Type = obj.TYPE_REG 610 p.From.Reg = v.Args[0].Reg() 611 p.To.Type = obj.TYPE_REG 612 p.To.Reg = v.Reg() 613 case ssa.OpARMMOVWUF, 614 ssa.OpARMMOVWUD, 615 ssa.OpARMMOVFWU, 616 ssa.OpARMMOVDWU: 617 p := gc.Prog(v.Op.Asm()) 618 p.Scond = arm.C_UBIT 619 p.From.Type = obj.TYPE_REG 620 p.From.Reg = v.Args[0].Reg() 621 p.To.Type = obj.TYPE_REG 622 p.To.Reg = v.Reg() 623 case ssa.OpARMCMOVWHSconst: 624 p := gc.Prog(arm.AMOVW) 625 p.Scond = arm.C_SCOND_HS 626 p.From.Type = obj.TYPE_CONST 627 p.From.Offset = v.AuxInt 628 p.To.Type = obj.TYPE_REG 629 p.To.Reg = v.Reg() 630 case ssa.OpARMCMOVWLSconst: 631 p := gc.Prog(arm.AMOVW) 632 p.Scond = arm.C_SCOND_LS 633 p.From.Type = obj.TYPE_CONST 634 p.From.Offset = v.AuxInt 635 p.To.Type = obj.TYPE_REG 636 p.To.Reg = v.Reg() 637 case ssa.OpARMCALLstatic: 638 if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym { 639 // Deferred calls will appear to be returning to 640 // the CALL deferreturn(SB) that we are about to emit. 641 // However, the stack trace code will show the line 642 // of the instruction byte before the return PC. 643 // To avoid that being an unrelated instruction, 644 // insert an actual hardware NOP that will have the right line number. 645 // This is different from obj.ANOP, which is a virtual no-op 646 // that doesn't make it into the instruction stream. 647 ginsnop() 648 } 649 p := gc.Prog(obj.ACALL) 650 p.To.Type = obj.TYPE_MEM 651 p.To.Name = obj.NAME_EXTERN 652 p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym)) 653 if gc.Maxarg < v.AuxInt { 654 gc.Maxarg = v.AuxInt 655 } 656 case ssa.OpARMCALLclosure: 657 p := gc.Prog(obj.ACALL) 658 p.To.Type = obj.TYPE_MEM 659 p.To.Offset = 0 660 p.To.Reg = v.Args[0].Reg() 661 if gc.Maxarg < v.AuxInt { 662 gc.Maxarg = v.AuxInt 663 } 664 case ssa.OpARMCALLdefer: 665 p := gc.Prog(obj.ACALL) 666 p.To.Type = obj.TYPE_MEM 667 p.To.Name = obj.NAME_EXTERN 668 p.To.Sym = gc.Linksym(gc.Deferproc.Sym) 669 if gc.Maxarg < v.AuxInt { 670 gc.Maxarg = v.AuxInt 671 } 672 case ssa.OpARMCALLgo: 673 p := gc.Prog(obj.ACALL) 674 p.To.Type = obj.TYPE_MEM 675 p.To.Name = obj.NAME_EXTERN 676 p.To.Sym = gc.Linksym(gc.Newproc.Sym) 677 if gc.Maxarg < v.AuxInt { 678 gc.Maxarg = v.AuxInt 679 } 680 case ssa.OpARMCALLinter: 681 p := gc.Prog(obj.ACALL) 682 p.To.Type = obj.TYPE_MEM 683 p.To.Offset = 0 684 p.To.Reg = v.Args[0].Reg() 685 if gc.Maxarg < v.AuxInt { 686 gc.Maxarg = v.AuxInt 687 } 688 case ssa.OpARMDUFFZERO: 689 p := gc.Prog(obj.ADUFFZERO) 690 p.To.Type = obj.TYPE_MEM 691 p.To.Name = obj.NAME_EXTERN 692 p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg)) 693 p.To.Offset = v.AuxInt 694 case ssa.OpARMDUFFCOPY: 695 p := gc.Prog(obj.ADUFFCOPY) 696 p.To.Type = obj.TYPE_MEM 697 p.To.Name = obj.NAME_EXTERN 698 p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg)) 699 p.To.Offset = v.AuxInt 700 case ssa.OpARMLoweredNilCheck: 701 // Issue a load which will fault if arg is nil. 702 p := gc.Prog(arm.AMOVB) 703 p.From.Type = obj.TYPE_MEM 704 p.From.Reg = v.Args[0].Reg() 705 gc.AddAux(&p.From, v) 706 p.To.Type = obj.TYPE_REG 707 p.To.Reg = arm.REGTMP 708 if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers 709 gc.Warnl(v.Line, "generated nil check") 710 } 711 case ssa.OpARMLoweredZero: 712 // MOVW.P Rarg2, 4(R1) 713 // CMP Rarg1, R1 714 // BLE -2(PC) 715 // arg1 is the address of the last element to zero 716 // arg2 is known to be zero 717 // auxint is alignment 718 var sz int64 719 var mov obj.As 720 switch { 721 case v.AuxInt%4 == 0: 722 sz = 4 723 mov = arm.AMOVW 724 case v.AuxInt%2 == 0: 725 sz = 2 726 mov = arm.AMOVH 727 default: 728 sz = 1 729 mov = arm.AMOVB 730 } 731 p := gc.Prog(mov) 732 p.Scond = arm.C_PBIT 733 p.From.Type = obj.TYPE_REG 734 p.From.Reg = v.Args[2].Reg() 735 p.To.Type = obj.TYPE_MEM 736 p.To.Reg = arm.REG_R1 737 p.To.Offset = sz 738 p2 := gc.Prog(arm.ACMP) 739 p2.From.Type = obj.TYPE_REG 740 p2.From.Reg = v.Args[1].Reg() 741 p2.Reg = arm.REG_R1 742 p3 := gc.Prog(arm.ABLE) 743 p3.To.Type = obj.TYPE_BRANCH 744 gc.Patch(p3, p) 745 case ssa.OpARMLoweredMove: 746 // MOVW.P 4(R1), Rtmp 747 // MOVW.P Rtmp, 4(R2) 748 // CMP Rarg2, R1 749 // BLE -3(PC) 750 // arg2 is the address of the last element of src 751 // auxint is alignment 752 var sz int64 753 var mov obj.As 754 switch { 755 case v.AuxInt%4 == 0: 756 sz = 4 757 mov = arm.AMOVW 758 case v.AuxInt%2 == 0: 759 sz = 2 760 mov = arm.AMOVH 761 default: 762 sz = 1 763 mov = arm.AMOVB 764 } 765 p := gc.Prog(mov) 766 p.Scond = arm.C_PBIT 767 p.From.Type = obj.TYPE_MEM 768 p.From.Reg = arm.REG_R1 769 p.From.Offset = sz 770 p.To.Type = obj.TYPE_REG 771 p.To.Reg = arm.REGTMP 772 p2 := gc.Prog(mov) 773 p2.Scond = arm.C_PBIT 774 p2.From.Type = obj.TYPE_REG 775 p2.From.Reg = arm.REGTMP 776 p2.To.Type = obj.TYPE_MEM 777 p2.To.Reg = arm.REG_R2 778 p2.To.Offset = sz 779 p3 := gc.Prog(arm.ACMP) 780 p3.From.Type = obj.TYPE_REG 781 p3.From.Reg = v.Args[2].Reg() 782 p3.Reg = arm.REG_R1 783 p4 := gc.Prog(arm.ABLE) 784 p4.To.Type = obj.TYPE_BRANCH 785 gc.Patch(p4, p) 786 case ssa.OpVarDef: 787 gc.Gvardef(v.Aux.(*gc.Node)) 788 case ssa.OpVarKill: 789 gc.Gvarkill(v.Aux.(*gc.Node)) 790 case ssa.OpVarLive: 791 gc.Gvarlive(v.Aux.(*gc.Node)) 792 case ssa.OpKeepAlive: 793 gc.KeepAlive(v) 794 case ssa.OpARMEqual, 795 ssa.OpARMNotEqual, 796 ssa.OpARMLessThan, 797 ssa.OpARMLessEqual, 798 ssa.OpARMGreaterThan, 799 ssa.OpARMGreaterEqual, 800 ssa.OpARMLessThanU, 801 ssa.OpARMLessEqualU, 802 ssa.OpARMGreaterThanU, 803 ssa.OpARMGreaterEqualU: 804 // generate boolean values 805 // use conditional move 806 p := gc.Prog(arm.AMOVW) 807 p.From.Type = obj.TYPE_CONST 808 p.From.Offset = 0 809 p.To.Type = obj.TYPE_REG 810 p.To.Reg = v.Reg() 811 p = gc.Prog(arm.AMOVW) 812 p.Scond = condBits[v.Op] 813 p.From.Type = obj.TYPE_CONST 814 p.From.Offset = 1 815 p.To.Type = obj.TYPE_REG 816 p.To.Reg = v.Reg() 817 case ssa.OpSelect0, ssa.OpSelect1: 818 // nothing to do 819 case ssa.OpARMLoweredGetClosurePtr: 820 // Closure pointer is R7 (arm.REGCTXT). 821 gc.CheckLoweredGetClosurePtr(v) 822 case ssa.OpARMFlagEQ, 823 ssa.OpARMFlagLT_ULT, 824 ssa.OpARMFlagLT_UGT, 825 ssa.OpARMFlagGT_ULT, 826 ssa.OpARMFlagGT_UGT: 827 v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) 828 case ssa.OpARMInvertFlags: 829 v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) 830 default: 831 v.Fatalf("genValue not implemented: %s", v.LongString()) 832 } 833 } 834 835 var condBits = map[ssa.Op]uint8{ 836 ssa.OpARMEqual: arm.C_SCOND_EQ, 837 ssa.OpARMNotEqual: arm.C_SCOND_NE, 838 ssa.OpARMLessThan: arm.C_SCOND_LT, 839 ssa.OpARMLessThanU: arm.C_SCOND_LO, 840 ssa.OpARMLessEqual: arm.C_SCOND_LE, 841 ssa.OpARMLessEqualU: arm.C_SCOND_LS, 842 ssa.OpARMGreaterThan: arm.C_SCOND_GT, 843 ssa.OpARMGreaterThanU: arm.C_SCOND_HI, 844 ssa.OpARMGreaterEqual: arm.C_SCOND_GE, 845 ssa.OpARMGreaterEqualU: arm.C_SCOND_HS, 846 } 847 848 var blockJump = map[ssa.BlockKind]struct { 849 asm, invasm obj.As 850 }{ 851 ssa.BlockARMEQ: {arm.ABEQ, arm.ABNE}, 852 ssa.BlockARMNE: {arm.ABNE, arm.ABEQ}, 853 ssa.BlockARMLT: {arm.ABLT, arm.ABGE}, 854 ssa.BlockARMGE: {arm.ABGE, arm.ABLT}, 855 ssa.BlockARMLE: {arm.ABLE, arm.ABGT}, 856 ssa.BlockARMGT: {arm.ABGT, arm.ABLE}, 857 ssa.BlockARMULT: {arm.ABLO, arm.ABHS}, 858 ssa.BlockARMUGE: {arm.ABHS, arm.ABLO}, 859 ssa.BlockARMUGT: {arm.ABHI, arm.ABLS}, 860 ssa.BlockARMULE: {arm.ABLS, arm.ABHI}, 861 } 862 863 func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { 864 s.SetLineno(b.Line) 865 866 switch b.Kind { 867 case ssa.BlockPlain: 868 if b.Succs[0].Block() != next { 869 p := gc.Prog(obj.AJMP) 870 p.To.Type = obj.TYPE_BRANCH 871 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 872 } 873 874 case ssa.BlockDefer: 875 // defer returns in R0: 876 // 0 if we should continue executing 877 // 1 if we should jump to deferreturn call 878 p := gc.Prog(arm.ACMP) 879 p.From.Type = obj.TYPE_CONST 880 p.From.Offset = 0 881 p.Reg = arm.REG_R0 882 p = gc.Prog(arm.ABNE) 883 p.To.Type = obj.TYPE_BRANCH 884 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) 885 if b.Succs[0].Block() != next { 886 p := gc.Prog(obj.AJMP) 887 p.To.Type = obj.TYPE_BRANCH 888 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 889 } 890 891 case ssa.BlockExit: 892 gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here 893 894 case ssa.BlockRet: 895 gc.Prog(obj.ARET) 896 897 case ssa.BlockRetJmp: 898 p := gc.Prog(obj.ARET) 899 p.To.Type = obj.TYPE_MEM 900 p.To.Name = obj.NAME_EXTERN 901 p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym)) 902 903 case ssa.BlockARMEQ, ssa.BlockARMNE, 904 ssa.BlockARMLT, ssa.BlockARMGE, 905 ssa.BlockARMLE, ssa.BlockARMGT, 906 ssa.BlockARMULT, ssa.BlockARMUGT, 907 ssa.BlockARMULE, ssa.BlockARMUGE: 908 jmp := blockJump[b.Kind] 909 var p *obj.Prog 910 switch next { 911 case b.Succs[0].Block(): 912 p = gc.Prog(jmp.invasm) 913 p.To.Type = obj.TYPE_BRANCH 914 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) 915 case b.Succs[1].Block(): 916 p = gc.Prog(jmp.asm) 917 p.To.Type = obj.TYPE_BRANCH 918 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 919 default: 920 p = gc.Prog(jmp.asm) 921 p.To.Type = obj.TYPE_BRANCH 922 s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) 923 q := gc.Prog(obj.AJMP) 924 q.To.Type = obj.TYPE_BRANCH 925 s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()}) 926 } 927 928 default: 929 b.Fatalf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString()) 930 } 931 } 932