1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package arm 6 7 import ( 8 "cmd/compile/internal/gc" 9 "cmd/internal/obj" 10 "cmd/internal/obj/arm" 11 ) 12 13 func defframe(ptxt *obj.Prog) { 14 var n *gc.Node 15 16 // fill in argument size, stack size 17 ptxt.To.Type = obj.TYPE_TEXTSIZE 18 19 ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr))) 20 frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg))) 21 ptxt.To.Offset = int64(frame) 22 23 // insert code to contain ambiguously live variables 24 // so that garbage collector only sees initialized values 25 // when it looks for pointers. 26 p := ptxt 27 28 hi := int64(0) 29 lo := hi 30 r0 := uint32(0) 31 for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next { 32 n = l.N 33 if !n.Name.Needzero { 34 continue 35 } 36 if n.Class != gc.PAUTO { 37 gc.Fatal("needzero class %d", n.Class) 38 } 39 if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 { 40 gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset)) 41 } 42 if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthptr) { 43 // merge with range we already have 44 lo = gc.Rnd(n.Xoffset, int64(gc.Widthptr)) 45 46 continue 47 } 48 49 // zero old range 50 p = zerorange(p, int64(frame), lo, hi, &r0) 51 52 // set new range 53 hi = n.Xoffset + n.Type.Width 54 55 lo = n.Xoffset 56 } 57 58 // zero final range 59 zerorange(p, int64(frame), lo, hi, &r0) 60 } 61 62 func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, r0 *uint32) *obj.Prog { 63 cnt := hi - lo 64 if cnt == 0 { 65 return p 66 } 67 if *r0 == 0 { 68 p = appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0) 69 *r0 = 1 70 } 71 72 if cnt < int64(4*gc.Widthptr) { 73 for i := int64(0); i < cnt; i += int64(gc.Widthptr) { 74 p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, int32(4+frame+lo+i)) 75 } 76 } else if !gc.Nacl && (cnt <= int64(128*gc.Widthptr)) { 77 p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0) 78 p.Reg = arm.REGSP 79 p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) 80 f := gc.Sysfunc("duffzero") 81 gc.Naddr(&p.To, f) 82 gc.Afunclit(&p.To, f) 83 p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr)) 84 } else { 85 p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0) 86 p.Reg = arm.REGSP 87 p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(cnt), obj.TYPE_REG, arm.REG_R2, 0) 88 p.Reg = arm.REG_R1 89 p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4) 90 p1 := p 91 p.Scond |= arm.C_PBIT 92 p = appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0) 93 p.Reg = arm.REG_R2 94 p = appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0) 95 gc.Patch(p, p1) 96 } 97 98 return p 99 } 100 101 func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int32, ttype int, treg int, toffset int32) *obj.Prog { 102 q := gc.Ctxt.NewProg() 103 gc.Clearp(q) 104 q.As = int16(as) 105 q.Lineno = p.Lineno 106 q.From.Type = int16(ftype) 107 q.From.Reg = int16(freg) 108 q.From.Offset = int64(foffset) 109 q.To.Type = int16(ttype) 110 q.To.Reg = int16(treg) 111 q.To.Offset = int64(toffset) 112 q.Link = p.Link 113 p.Link = q 114 return q 115 } 116 117 /* 118 * generate high multiply 119 * res = (nl * nr) >> wordsize 120 */ 121 func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { 122 if nl.Ullman < nr.Ullman { 123 tmp := nl 124 nl = nr 125 nr = tmp 126 } 127 128 t := nl.Type 129 w := int(t.Width * 8) 130 var n1 gc.Node 131 gc.Regalloc(&n1, t, res) 132 gc.Cgen(nl, &n1) 133 var n2 gc.Node 134 gc.Regalloc(&n2, t, nil) 135 gc.Cgen(nr, &n2) 136 switch gc.Simtype[t.Etype] { 137 case gc.TINT8, 138 gc.TINT16: 139 gins(optoas(gc.OMUL, t), &n2, &n1) 140 gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1) 141 142 case gc.TUINT8, 143 gc.TUINT16: 144 gins(optoas(gc.OMUL, t), &n2, &n1) 145 gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(w), &n1) 146 147 // perform a long multiplication. 148 case gc.TINT32, 149 gc.TUINT32: 150 var p *obj.Prog 151 if gc.Issigned[t.Etype] { 152 p = gins(arm.AMULL, &n2, nil) 153 } else { 154 p = gins(arm.AMULLU, &n2, nil) 155 } 156 157 // n2 * n1 -> (n1 n2) 158 p.Reg = n1.Reg 159 160 p.To.Type = obj.TYPE_REGREG 161 p.To.Reg = n1.Reg 162 p.To.Offset = int64(n2.Reg) 163 164 default: 165 gc.Fatal("cgen_hmul %v", t) 166 } 167 168 gc.Cgen(&n1, res) 169 gc.Regfree(&n1) 170 gc.Regfree(&n2) 171 } 172 173 /* 174 * generate shift according to op, one of: 175 * res = nl << nr 176 * res = nl >> nr 177 */ 178 func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) { 179 if nl.Type.Width > 4 { 180 gc.Fatal("cgen_shift %v", nl.Type) 181 } 182 183 w := int(nl.Type.Width * 8) 184 185 if op == gc.OLROT { 186 v := nr.Int() 187 var n1 gc.Node 188 gc.Regalloc(&n1, nl.Type, res) 189 if w == 32 { 190 gc.Cgen(nl, &n1) 191 gshift(arm.AMOVW, &n1, arm.SHIFT_RR, int32(w)-int32(v), &n1) 192 } else { 193 var n2 gc.Node 194 gc.Regalloc(&n2, nl.Type, nil) 195 gc.Cgen(nl, &n2) 196 gshift(arm.AMOVW, &n2, arm.SHIFT_LL, int32(v), &n1) 197 gshift(arm.AORR, &n2, arm.SHIFT_LR, int32(w)-int32(v), &n1) 198 gc.Regfree(&n2) 199 200 // Ensure sign/zero-extended result. 201 gins(optoas(gc.OAS, nl.Type), &n1, &n1) 202 } 203 204 gmove(&n1, res) 205 gc.Regfree(&n1) 206 return 207 } 208 209 if nr.Op == gc.OLITERAL { 210 var n1 gc.Node 211 gc.Regalloc(&n1, nl.Type, res) 212 gc.Cgen(nl, &n1) 213 sc := uint64(nr.Int()) 214 if sc == 0 { 215 } else // nothing to do 216 if sc >= uint64(nl.Type.Width*8) { 217 if op == gc.ORSH && gc.Issigned[nl.Type.Etype] { 218 gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1) 219 } else { 220 gins(arm.AEOR, &n1, &n1) 221 } 222 } else { 223 if op == gc.ORSH && gc.Issigned[nl.Type.Etype] { 224 gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(sc), &n1) 225 } else if op == gc.ORSH { 226 gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(sc), &n1) // OLSH 227 } else { 228 gshift(arm.AMOVW, &n1, arm.SHIFT_LL, int32(sc), &n1) 229 } 230 } 231 232 if w < 32 && op == gc.OLSH { 233 gins(optoas(gc.OAS, nl.Type), &n1, &n1) 234 } 235 gmove(&n1, res) 236 gc.Regfree(&n1) 237 return 238 } 239 240 tr := nr.Type 241 var t gc.Node 242 var n1 gc.Node 243 var n2 gc.Node 244 var n3 gc.Node 245 if tr.Width > 4 { 246 var nt gc.Node 247 gc.Tempname(&nt, nr.Type) 248 if nl.Ullman >= nr.Ullman { 249 gc.Regalloc(&n2, nl.Type, res) 250 gc.Cgen(nl, &n2) 251 gc.Cgen(nr, &nt) 252 n1 = nt 253 } else { 254 gc.Cgen(nr, &nt) 255 gc.Regalloc(&n2, nl.Type, res) 256 gc.Cgen(nl, &n2) 257 } 258 259 var hi gc.Node 260 var lo gc.Node 261 split64(&nt, &lo, &hi) 262 gc.Regalloc(&n1, gc.Types[gc.TUINT32], nil) 263 gc.Regalloc(&n3, gc.Types[gc.TUINT32], nil) 264 gmove(&lo, &n1) 265 gmove(&hi, &n3) 266 splitclean() 267 gins(arm.ATST, &n3, nil) 268 gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w)) 269 p1 := gins(arm.AMOVW, &t, &n1) 270 p1.Scond = arm.C_SCOND_NE 271 tr = gc.Types[gc.TUINT32] 272 gc.Regfree(&n3) 273 } else { 274 if nl.Ullman >= nr.Ullman { 275 gc.Regalloc(&n2, nl.Type, res) 276 gc.Cgen(nl, &n2) 277 gc.Regalloc(&n1, nr.Type, nil) 278 gc.Cgen(nr, &n1) 279 } else { 280 gc.Regalloc(&n1, nr.Type, nil) 281 gc.Cgen(nr, &n1) 282 gc.Regalloc(&n2, nl.Type, res) 283 gc.Cgen(nl, &n2) 284 } 285 } 286 287 // test for shift being 0 288 gins(arm.ATST, &n1, nil) 289 290 p3 := gc.Gbranch(arm.ABEQ, nil, -1) 291 292 // test and fix up large shifts 293 // TODO: if(!bounded), don't emit some of this. 294 gc.Regalloc(&n3, tr, nil) 295 296 gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w)) 297 gmove(&t, &n3) 298 gins(arm.ACMP, &n1, &n3) 299 if op == gc.ORSH { 300 var p1 *obj.Prog 301 var p2 *obj.Prog 302 if gc.Issigned[nl.Type.Etype] { 303 p1 = gshift(arm.AMOVW, &n2, arm.SHIFT_AR, int32(w)-1, &n2) 304 p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_AR, &n1, &n2) 305 } else { 306 p1 = gins(arm.AEOR, &n2, &n2) 307 p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_LR, &n1, &n2) 308 } 309 310 p1.Scond = arm.C_SCOND_HS 311 p2.Scond = arm.C_SCOND_LO 312 } else { 313 p1 := gins(arm.AEOR, &n2, &n2) 314 p2 := gregshift(arm.AMOVW, &n2, arm.SHIFT_LL, &n1, &n2) 315 p1.Scond = arm.C_SCOND_HS 316 p2.Scond = arm.C_SCOND_LO 317 } 318 319 gc.Regfree(&n3) 320 321 gc.Patch(p3, gc.Pc) 322 323 // Left-shift of smaller word must be sign/zero-extended. 324 if w < 32 && op == gc.OLSH { 325 gins(optoas(gc.OAS, nl.Type), &n2, &n2) 326 } 327 gmove(&n2, res) 328 329 gc.Regfree(&n1) 330 gc.Regfree(&n2) 331 } 332 333 func clearfat(nl *gc.Node) { 334 /* clear a fat object */ 335 if gc.Debug['g'] != 0 { 336 gc.Dump("\nclearfat", nl) 337 } 338 339 w := uint32(nl.Type.Width) 340 341 // Avoid taking the address for simple enough types. 342 if gc.Componentgen(nil, nl) { 343 return 344 } 345 346 c := w % 4 // bytes 347 q := w / 4 // quads 348 349 var r0 gc.Node 350 r0.Op = gc.OREGISTER 351 352 r0.Reg = arm.REG_R0 353 var r1 gc.Node 354 r1.Op = gc.OREGISTER 355 r1.Reg = arm.REG_R1 356 var dst gc.Node 357 gc.Regalloc(&dst, gc.Types[gc.Tptr], &r1) 358 gc.Agen(nl, &dst) 359 var nc gc.Node 360 gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0) 361 var nz gc.Node 362 gc.Regalloc(&nz, gc.Types[gc.TUINT32], &r0) 363 gc.Cgen(&nc, &nz) 364 365 if q > 128 { 366 var end gc.Node 367 gc.Regalloc(&end, gc.Types[gc.Tptr], nil) 368 p := gins(arm.AMOVW, &dst, &end) 369 p.From.Type = obj.TYPE_ADDR 370 p.From.Offset = int64(q) * 4 371 372 p = gins(arm.AMOVW, &nz, &dst) 373 p.To.Type = obj.TYPE_MEM 374 p.To.Offset = 4 375 p.Scond |= arm.C_PBIT 376 pl := p 377 378 p = gins(arm.ACMP, &dst, nil) 379 raddr(&end, p) 380 gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl) 381 382 gc.Regfree(&end) 383 } else if q >= 4 && !gc.Nacl { 384 f := gc.Sysfunc("duffzero") 385 p := gins(obj.ADUFFZERO, nil, f) 386 gc.Afunclit(&p.To, f) 387 388 // 4 and 128 = magic constants: see ../../runtime/asm_arm.s 389 p.To.Offset = 4 * (128 - int64(q)) 390 } else { 391 var p *obj.Prog 392 for q > 0 { 393 p = gins(arm.AMOVW, &nz, &dst) 394 p.To.Type = obj.TYPE_MEM 395 p.To.Offset = 4 396 p.Scond |= arm.C_PBIT 397 398 //print("1. %v\n", p); 399 q-- 400 } 401 } 402 403 var p *obj.Prog 404 for c > 0 { 405 p = gins(arm.AMOVB, &nz, &dst) 406 p.To.Type = obj.TYPE_MEM 407 p.To.Offset = 1 408 p.Scond |= arm.C_PBIT 409 410 //print("2. %v\n", p); 411 c-- 412 } 413 414 gc.Regfree(&dst) 415 gc.Regfree(&nz) 416 } 417 418 // Called after regopt and peep have run. 419 // Expand CHECKNIL pseudo-op into actual nil pointer check. 420 func expandchecks(firstp *obj.Prog) { 421 var reg int 422 var p1 *obj.Prog 423 424 for p := firstp; p != nil; p = p.Link { 425 if p.As != obj.ACHECKNIL { 426 continue 427 } 428 if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers 429 gc.Warnl(int(p.Lineno), "generated nil check") 430 } 431 if p.From.Type != obj.TYPE_REG { 432 gc.Fatal("invalid nil check %v", p) 433 } 434 reg = int(p.From.Reg) 435 436 // check is 437 // CMP arg, $0 438 // MOV.EQ arg, 0(arg) 439 p1 = gc.Ctxt.NewProg() 440 441 gc.Clearp(p1) 442 p1.Link = p.Link 443 p.Link = p1 444 p1.Lineno = p.Lineno 445 p1.Pc = 9999 446 p1.As = arm.AMOVW 447 p1.From.Type = obj.TYPE_REG 448 p1.From.Reg = int16(reg) 449 p1.To.Type = obj.TYPE_MEM 450 p1.To.Reg = int16(reg) 451 p1.To.Offset = 0 452 p1.Scond = arm.C_SCOND_EQ 453 p.As = arm.ACMP 454 p.From.Type = obj.TYPE_CONST 455 p.From.Reg = 0 456 p.From.Offset = 0 457 p.Reg = int16(reg) 458 } 459 } 460 461 func ginsnop() { 462 var r gc.Node 463 gc.Nodreg(&r, gc.Types[gc.TINT], arm.REG_R0) 464 p := gins(arm.AAND, &r, &r) 465 p.Scond = arm.C_SCOND_EQ 466 } 467 468 /* 469 * generate 470 * as $c, n 471 */ 472 func ginscon(as int, c int64, n *gc.Node) { 473 var n1 gc.Node 474 gc.Nodconst(&n1, gc.Types[gc.TINT32], c) 475 var n2 gc.Node 476 gc.Regalloc(&n2, gc.Types[gc.TINT32], nil) 477 gmove(&n1, &n2) 478 gins(as, &n2, n) 479 gc.Regfree(&n2) 480 } 481 482 func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { 483 if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n1.Int() == 0 && n2.Op != gc.OLITERAL { 484 op = gc.Brrev(op) 485 n1, n2 = n2, n1 486 } 487 var r1, r2, g1, g2 gc.Node 488 gc.Regalloc(&r1, t, n1) 489 gc.Regalloc(&g1, n1.Type, &r1) 490 gc.Cgen(n1, &g1) 491 gmove(&g1, &r1) 492 if gc.Isint[t.Etype] && n2.Op == gc.OLITERAL && n2.Int() == 0 { 493 gins(arm.ACMP, &r1, n2) 494 } else { 495 gc.Regalloc(&r2, t, n2) 496 gc.Regalloc(&g2, n1.Type, &r2) 497 gc.Cgen(n2, &g2) 498 gmove(&g2, &r2) 499 gins(optoas(gc.OCMP, t), &r1, &r2) 500 gc.Regfree(&g2) 501 gc.Regfree(&r2) 502 } 503 gc.Regfree(&g1) 504 gc.Regfree(&r1) 505 return gc.Gbranch(optoas(op, t), nil, likely) 506 } 507 508 // addr += index*width if possible. 509 func addindex(index *gc.Node, width int64, addr *gc.Node) bool { 510 switch width { 511 case 2: 512 gshift(arm.AADD, index, arm.SHIFT_LL, 1, addr) 513 return true 514 case 4: 515 gshift(arm.AADD, index, arm.SHIFT_LL, 2, addr) 516 return true 517 case 8: 518 gshift(arm.AADD, index, arm.SHIFT_LL, 3, addr) 519 return true 520 } 521 return false 522 } 523 524 // res = runtime.getg() 525 func getg(res *gc.Node) { 526 var n1 gc.Node 527 gc.Nodreg(&n1, res.Type, arm.REGG) 528 gmove(&n1, res) 529 } 530