1 // Derived from Inferno utils/5c/txt.c 2 // http://code.google.com/p/inferno-os/source/browse/utils/5c/txt.c 3 // 4 // Copyright 1994-1999 Lucent Technologies Inc. All rights reserved. 5 // Portions Copyright 1995-1997 C H Forsyth (forsyth (a] terzarima.net) 6 // Portions Copyright 1997-1999 Vita Nuova Limited 7 // Portions Copyright 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) 8 // Portions Copyright 2004,2006 Bruce Ellis 9 // Portions Copyright 2005-2007 C H Forsyth (forsyth (a] terzarima.net) 10 // Revisions Copyright 2000-2007 Lucent Technologies Inc. and others 11 // Portions Copyright 2009 The Go Authors. All rights reserved. 12 // 13 // Permission is hereby granted, free of charge, to any person obtaining a copy 14 // of this software and associated documentation files (the "Software"), to deal 15 // in the Software without restriction, including without limitation the rights 16 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 // copies of the Software, and to permit persons to whom the Software is 18 // furnished to do so, subject to the following conditions: 19 // 20 // The above copyright notice and this permission notice shall be included in 21 // all copies or substantial portions of the Software. 22 // 23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 // THE SOFTWARE. 30 31 package arm 32 33 import ( 34 "cmd/compile/internal/gc" 35 "cmd/internal/obj" 36 "cmd/internal/obj/arm" 37 "fmt" 38 ) 39 40 var resvd = []int{ 41 arm.REG_R9, // formerly reserved for m; might be okay to reuse now; not sure about NaCl 42 arm.REG_R10, // reserved for g 43 } 44 45 /* 46 * return constant i node. 47 * overwritten by next call, but useful in calls to gins. 48 */ 49 50 var ncon_n gc.Node 51 52 func ncon(i uint32) *gc.Node { 53 if ncon_n.Type == nil { 54 gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0) 55 } 56 ncon_n.SetInt(int64(i)) 57 return &ncon_n 58 } 59 60 var sclean [10]gc.Node 61 62 var nsclean int 63 64 /* 65 * n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves. 66 */ 67 func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) { 68 if !gc.Is64(n.Type) { 69 gc.Fatal("split64 %v", n.Type) 70 } 71 72 if nsclean >= len(sclean) { 73 gc.Fatal("split64 clean") 74 } 75 sclean[nsclean].Op = gc.OEMPTY 76 nsclean++ 77 switch n.Op { 78 default: 79 switch n.Op { 80 default: 81 var n1 gc.Node 82 if !dotaddable(n, &n1) { 83 gc.Igen(n, &n1, nil) 84 sclean[nsclean-1] = n1 85 } 86 87 n = &n1 88 89 case gc.ONAME: 90 if n.Class == gc.PPARAMREF { 91 var n1 gc.Node 92 gc.Cgen(n.Name.Heapaddr, &n1) 93 sclean[nsclean-1] = n1 94 n = &n1 95 } 96 97 // nothing 98 case gc.OINDREG: 99 break 100 } 101 102 *lo = *n 103 *hi = *n 104 lo.Type = gc.Types[gc.TUINT32] 105 if n.Type.Etype == gc.TINT64 { 106 hi.Type = gc.Types[gc.TINT32] 107 } else { 108 hi.Type = gc.Types[gc.TUINT32] 109 } 110 hi.Xoffset += 4 111 112 case gc.OLITERAL: 113 var n1 gc.Node 114 n.Convconst(&n1, n.Type) 115 i := n1.Int() 116 gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i))) 117 i >>= 32 118 if n.Type.Etype == gc.TINT64 { 119 gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i))) 120 } else { 121 gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i))) 122 } 123 } 124 } 125 126 func splitclean() { 127 if nsclean <= 0 { 128 gc.Fatal("splitclean") 129 } 130 nsclean-- 131 if sclean[nsclean].Op != gc.OEMPTY { 132 gc.Regfree(&sclean[nsclean]) 133 } 134 } 135 136 func gmove(f *gc.Node, t *gc.Node) { 137 if gc.Debug['M'] != 0 { 138 fmt.Printf("gmove %v -> %v\n", f, t) 139 } 140 141 ft := gc.Simsimtype(f.Type) 142 tt := gc.Simsimtype(t.Type) 143 cvt := t.Type 144 145 if gc.Iscomplex[ft] || gc.Iscomplex[tt] { 146 gc.Complexmove(f, t) 147 return 148 } 149 150 // cannot have two memory operands; 151 // except 64-bit, which always copies via registers anyway. 152 var a int 153 var r1 gc.Node 154 if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) { 155 goto hard 156 } 157 158 // convert constant to desired type 159 if f.Op == gc.OLITERAL { 160 var con gc.Node 161 switch tt { 162 default: 163 f.Convconst(&con, t.Type) 164 165 case gc.TINT16, 166 gc.TINT8: 167 var con gc.Node 168 f.Convconst(&con, gc.Types[gc.TINT32]) 169 var r1 gc.Node 170 gc.Regalloc(&r1, con.Type, t) 171 gins(arm.AMOVW, &con, &r1) 172 gmove(&r1, t) 173 gc.Regfree(&r1) 174 return 175 176 case gc.TUINT16, 177 gc.TUINT8: 178 var con gc.Node 179 f.Convconst(&con, gc.Types[gc.TUINT32]) 180 var r1 gc.Node 181 gc.Regalloc(&r1, con.Type, t) 182 gins(arm.AMOVW, &con, &r1) 183 gmove(&r1, t) 184 gc.Regfree(&r1) 185 return 186 } 187 188 f = &con 189 ft = gc.Simsimtype(con.Type) 190 191 // constants can't move directly to memory 192 if gc.Ismem(t) && !gc.Is64(t.Type) { 193 goto hard 194 } 195 } 196 197 // value -> value copy, only one memory operand. 198 // figure out the instruction to use. 199 // break out of switch for one-instruction gins. 200 // goto rdst for "destination must be register". 201 // goto hard for "convert to cvt type first". 202 // otherwise handle and return. 203 204 switch uint32(ft)<<16 | uint32(tt) { 205 default: 206 // should not happen 207 gc.Fatal("gmove %v -> %v", f, t) 208 return 209 210 /* 211 * integer copy and truncate 212 */ 213 case gc.TINT8<<16 | gc.TINT8: // same size 214 if !gc.Ismem(f) { 215 a = arm.AMOVB 216 break 217 } 218 fallthrough 219 220 case gc.TUINT8<<16 | gc.TINT8, 221 gc.TINT16<<16 | gc.TINT8, // truncate 222 gc.TUINT16<<16 | gc.TINT8, 223 gc.TINT32<<16 | gc.TINT8, 224 gc.TUINT32<<16 | gc.TINT8: 225 a = arm.AMOVBS 226 227 case gc.TUINT8<<16 | gc.TUINT8: 228 if !gc.Ismem(f) { 229 a = arm.AMOVB 230 break 231 } 232 fallthrough 233 234 case gc.TINT8<<16 | gc.TUINT8, 235 gc.TINT16<<16 | gc.TUINT8, 236 gc.TUINT16<<16 | gc.TUINT8, 237 gc.TINT32<<16 | gc.TUINT8, 238 gc.TUINT32<<16 | gc.TUINT8: 239 a = arm.AMOVBU 240 241 case gc.TINT64<<16 | gc.TINT8, // truncate low word 242 gc.TUINT64<<16 | gc.TINT8: 243 a = arm.AMOVBS 244 245 goto trunc64 246 247 case gc.TINT64<<16 | gc.TUINT8, 248 gc.TUINT64<<16 | gc.TUINT8: 249 a = arm.AMOVBU 250 goto trunc64 251 252 case gc.TINT16<<16 | gc.TINT16: // same size 253 if !gc.Ismem(f) { 254 a = arm.AMOVH 255 break 256 } 257 fallthrough 258 259 case gc.TUINT16<<16 | gc.TINT16, 260 gc.TINT32<<16 | gc.TINT16, // truncate 261 gc.TUINT32<<16 | gc.TINT16: 262 a = arm.AMOVHS 263 264 case gc.TUINT16<<16 | gc.TUINT16: 265 if !gc.Ismem(f) { 266 a = arm.AMOVH 267 break 268 } 269 fallthrough 270 271 case gc.TINT16<<16 | gc.TUINT16, 272 gc.TINT32<<16 | gc.TUINT16, 273 gc.TUINT32<<16 | gc.TUINT16: 274 a = arm.AMOVHU 275 276 case gc.TINT64<<16 | gc.TINT16, // truncate low word 277 gc.TUINT64<<16 | gc.TINT16: 278 a = arm.AMOVHS 279 280 goto trunc64 281 282 case gc.TINT64<<16 | gc.TUINT16, 283 gc.TUINT64<<16 | gc.TUINT16: 284 a = arm.AMOVHU 285 goto trunc64 286 287 case gc.TINT32<<16 | gc.TINT32, // same size 288 gc.TINT32<<16 | gc.TUINT32, 289 gc.TUINT32<<16 | gc.TINT32, 290 gc.TUINT32<<16 | gc.TUINT32: 291 a = arm.AMOVW 292 293 case gc.TINT64<<16 | gc.TINT32, // truncate 294 gc.TUINT64<<16 | gc.TINT32, 295 gc.TINT64<<16 | gc.TUINT32, 296 gc.TUINT64<<16 | gc.TUINT32: 297 var flo gc.Node 298 var fhi gc.Node 299 split64(f, &flo, &fhi) 300 301 var r1 gc.Node 302 gc.Regalloc(&r1, t.Type, nil) 303 gins(arm.AMOVW, &flo, &r1) 304 gins(arm.AMOVW, &r1, t) 305 gc.Regfree(&r1) 306 splitclean() 307 return 308 309 case gc.TINT64<<16 | gc.TINT64, // same size 310 gc.TINT64<<16 | gc.TUINT64, 311 gc.TUINT64<<16 | gc.TINT64, 312 gc.TUINT64<<16 | gc.TUINT64: 313 var fhi gc.Node 314 var flo gc.Node 315 split64(f, &flo, &fhi) 316 317 var tlo gc.Node 318 var thi gc.Node 319 split64(t, &tlo, &thi) 320 var r1 gc.Node 321 gc.Regalloc(&r1, flo.Type, nil) 322 var r2 gc.Node 323 gc.Regalloc(&r2, fhi.Type, nil) 324 gins(arm.AMOVW, &flo, &r1) 325 gins(arm.AMOVW, &fhi, &r2) 326 gins(arm.AMOVW, &r1, &tlo) 327 gins(arm.AMOVW, &r2, &thi) 328 gc.Regfree(&r1) 329 gc.Regfree(&r2) 330 splitclean() 331 splitclean() 332 return 333 334 /* 335 * integer up-conversions 336 */ 337 case gc.TINT8<<16 | gc.TINT16, // sign extend int8 338 gc.TINT8<<16 | gc.TUINT16, 339 gc.TINT8<<16 | gc.TINT32, 340 gc.TINT8<<16 | gc.TUINT32: 341 a = arm.AMOVBS 342 343 goto rdst 344 345 case gc.TINT8<<16 | gc.TINT64, // convert via int32 346 gc.TINT8<<16 | gc.TUINT64: 347 cvt = gc.Types[gc.TINT32] 348 349 goto hard 350 351 case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 352 gc.TUINT8<<16 | gc.TUINT16, 353 gc.TUINT8<<16 | gc.TINT32, 354 gc.TUINT8<<16 | gc.TUINT32: 355 a = arm.AMOVBU 356 357 goto rdst 358 359 case gc.TUINT8<<16 | gc.TINT64, // convert via uint32 360 gc.TUINT8<<16 | gc.TUINT64: 361 cvt = gc.Types[gc.TUINT32] 362 363 goto hard 364 365 case gc.TINT16<<16 | gc.TINT32, // sign extend int16 366 gc.TINT16<<16 | gc.TUINT32: 367 a = arm.AMOVHS 368 369 goto rdst 370 371 case gc.TINT16<<16 | gc.TINT64, // convert via int32 372 gc.TINT16<<16 | gc.TUINT64: 373 cvt = gc.Types[gc.TINT32] 374 375 goto hard 376 377 case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 378 gc.TUINT16<<16 | gc.TUINT32: 379 a = arm.AMOVHU 380 381 goto rdst 382 383 case gc.TUINT16<<16 | gc.TINT64, // convert via uint32 384 gc.TUINT16<<16 | gc.TUINT64: 385 cvt = gc.Types[gc.TUINT32] 386 387 goto hard 388 389 case gc.TINT32<<16 | gc.TINT64, // sign extend int32 390 gc.TINT32<<16 | gc.TUINT64: 391 var tlo gc.Node 392 var thi gc.Node 393 split64(t, &tlo, &thi) 394 395 var r1 gc.Node 396 gc.Regalloc(&r1, tlo.Type, nil) 397 var r2 gc.Node 398 gc.Regalloc(&r2, thi.Type, nil) 399 gmove(f, &r1) 400 p1 := gins(arm.AMOVW, &r1, &r2) 401 p1.From.Type = obj.TYPE_SHIFT 402 p1.From.Offset = 2<<5 | 31<<7 | int64(r1.Reg)&15 // r1->31 403 p1.From.Reg = 0 404 405 //print("gmove: %v\n", p1); 406 gins(arm.AMOVW, &r1, &tlo) 407 408 gins(arm.AMOVW, &r2, &thi) 409 gc.Regfree(&r1) 410 gc.Regfree(&r2) 411 splitclean() 412 return 413 414 case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 415 gc.TUINT32<<16 | gc.TUINT64: 416 var thi gc.Node 417 var tlo gc.Node 418 split64(t, &tlo, &thi) 419 420 gmove(f, &tlo) 421 var r1 gc.Node 422 gc.Regalloc(&r1, thi.Type, nil) 423 gins(arm.AMOVW, ncon(0), &r1) 424 gins(arm.AMOVW, &r1, &thi) 425 gc.Regfree(&r1) 426 splitclean() 427 return 428 429 // case CASE(TFLOAT64, TUINT64): 430 /* 431 * float to integer 432 */ 433 case gc.TFLOAT32<<16 | gc.TINT8, 434 gc.TFLOAT32<<16 | gc.TUINT8, 435 gc.TFLOAT32<<16 | gc.TINT16, 436 gc.TFLOAT32<<16 | gc.TUINT16, 437 gc.TFLOAT32<<16 | gc.TINT32, 438 gc.TFLOAT32<<16 | gc.TUINT32, 439 440 // case CASE(TFLOAT32, TUINT64): 441 442 gc.TFLOAT64<<16 | gc.TINT8, 443 gc.TFLOAT64<<16 | gc.TUINT8, 444 gc.TFLOAT64<<16 | gc.TINT16, 445 gc.TFLOAT64<<16 | gc.TUINT16, 446 gc.TFLOAT64<<16 | gc.TINT32, 447 gc.TFLOAT64<<16 | gc.TUINT32: 448 fa := arm.AMOVF 449 450 a := arm.AMOVFW 451 if ft == gc.TFLOAT64 { 452 fa = arm.AMOVD 453 a = arm.AMOVDW 454 } 455 456 ta := arm.AMOVW 457 switch tt { 458 case gc.TINT8: 459 ta = arm.AMOVBS 460 461 case gc.TUINT8: 462 ta = arm.AMOVBU 463 464 case gc.TINT16: 465 ta = arm.AMOVHS 466 467 case gc.TUINT16: 468 ta = arm.AMOVHU 469 } 470 471 var r1 gc.Node 472 gc.Regalloc(&r1, gc.Types[ft], f) 473 var r2 gc.Node 474 gc.Regalloc(&r2, gc.Types[tt], t) 475 gins(fa, f, &r1) // load to fpu 476 p1 := gins(a, &r1, &r1) // convert to w 477 switch tt { 478 case gc.TUINT8, 479 gc.TUINT16, 480 gc.TUINT32: 481 p1.Scond |= arm.C_UBIT 482 } 483 484 gins(arm.AMOVW, &r1, &r2) // copy to cpu 485 gins(ta, &r2, t) // store 486 gc.Regfree(&r1) 487 gc.Regfree(&r2) 488 return 489 490 /* 491 * integer to float 492 */ 493 case gc.TINT8<<16 | gc.TFLOAT32, 494 gc.TUINT8<<16 | gc.TFLOAT32, 495 gc.TINT16<<16 | gc.TFLOAT32, 496 gc.TUINT16<<16 | gc.TFLOAT32, 497 gc.TINT32<<16 | gc.TFLOAT32, 498 gc.TUINT32<<16 | gc.TFLOAT32, 499 gc.TINT8<<16 | gc.TFLOAT64, 500 gc.TUINT8<<16 | gc.TFLOAT64, 501 gc.TINT16<<16 | gc.TFLOAT64, 502 gc.TUINT16<<16 | gc.TFLOAT64, 503 gc.TINT32<<16 | gc.TFLOAT64, 504 gc.TUINT32<<16 | gc.TFLOAT64: 505 fa := arm.AMOVW 506 507 switch ft { 508 case gc.TINT8: 509 fa = arm.AMOVBS 510 511 case gc.TUINT8: 512 fa = arm.AMOVBU 513 514 case gc.TINT16: 515 fa = arm.AMOVHS 516 517 case gc.TUINT16: 518 fa = arm.AMOVHU 519 } 520 521 a := arm.AMOVWF 522 ta := arm.AMOVF 523 if tt == gc.TFLOAT64 { 524 a = arm.AMOVWD 525 ta = arm.AMOVD 526 } 527 528 var r1 gc.Node 529 gc.Regalloc(&r1, gc.Types[ft], f) 530 var r2 gc.Node 531 gc.Regalloc(&r2, gc.Types[tt], t) 532 gins(fa, f, &r1) // load to cpu 533 gins(arm.AMOVW, &r1, &r2) // copy to fpu 534 p1 := gins(a, &r2, &r2) // convert 535 switch ft { 536 case gc.TUINT8, 537 gc.TUINT16, 538 gc.TUINT32: 539 p1.Scond |= arm.C_UBIT 540 } 541 542 gins(ta, &r2, t) // store 543 gc.Regfree(&r1) 544 gc.Regfree(&r2) 545 return 546 547 case gc.TUINT64<<16 | gc.TFLOAT32, 548 gc.TUINT64<<16 | gc.TFLOAT64: 549 gc.Fatal("gmove UINT64, TFLOAT not implemented") 550 return 551 552 /* 553 * float to float 554 */ 555 case gc.TFLOAT32<<16 | gc.TFLOAT32: 556 a = arm.AMOVF 557 558 case gc.TFLOAT64<<16 | gc.TFLOAT64: 559 a = arm.AMOVD 560 561 case gc.TFLOAT32<<16 | gc.TFLOAT64: 562 var r1 gc.Node 563 gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], t) 564 gins(arm.AMOVF, f, &r1) 565 gins(arm.AMOVFD, &r1, &r1) 566 gins(arm.AMOVD, &r1, t) 567 gc.Regfree(&r1) 568 return 569 570 case gc.TFLOAT64<<16 | gc.TFLOAT32: 571 var r1 gc.Node 572 gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], t) 573 gins(arm.AMOVD, f, &r1) 574 gins(arm.AMOVDF, &r1, &r1) 575 gins(arm.AMOVF, &r1, t) 576 gc.Regfree(&r1) 577 return 578 } 579 580 gins(a, f, t) 581 return 582 583 // TODO(kaib): we almost always require a register dest anyway, this can probably be 584 // removed. 585 // requires register destination 586 rdst: 587 { 588 gc.Regalloc(&r1, t.Type, t) 589 590 gins(a, f, &r1) 591 gmove(&r1, t) 592 gc.Regfree(&r1) 593 return 594 } 595 596 // requires register intermediate 597 hard: 598 gc.Regalloc(&r1, cvt, t) 599 600 gmove(f, &r1) 601 gmove(&r1, t) 602 gc.Regfree(&r1) 603 return 604 605 // truncate 64 bit integer 606 trunc64: 607 var fhi gc.Node 608 var flo gc.Node 609 split64(f, &flo, &fhi) 610 611 gc.Regalloc(&r1, t.Type, nil) 612 gins(a, &flo, &r1) 613 gins(a, &r1, t) 614 gc.Regfree(&r1) 615 splitclean() 616 return 617 } 618 619 func samaddr(f *gc.Node, t *gc.Node) bool { 620 if f.Op != t.Op { 621 return false 622 } 623 624 switch f.Op { 625 case gc.OREGISTER: 626 if f.Reg != t.Reg { 627 break 628 } 629 return true 630 } 631 632 return false 633 } 634 635 /* 636 * generate one instruction: 637 * as f, t 638 */ 639 func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog { 640 // Node nod; 641 // int32 v; 642 643 if f != nil && f.Op == gc.OINDEX { 644 gc.Fatal("gins OINDEX not implemented") 645 } 646 647 // gc.Regalloc(&nod, ®node, Z); 648 // v = constnode.vconst; 649 // gc.Cgen(f->right, &nod); 650 // constnode.vconst = v; 651 // idx.reg = nod.reg; 652 // gc.Regfree(&nod); 653 if t != nil && t.Op == gc.OINDEX { 654 gc.Fatal("gins OINDEX not implemented") 655 } 656 657 // gc.Regalloc(&nod, ®node, Z); 658 // v = constnode.vconst; 659 // gc.Cgen(t->right, &nod); 660 // constnode.vconst = v; 661 // idx.reg = nod.reg; 662 // gc.Regfree(&nod); 663 664 p := gc.Prog(as) 665 gc.Naddr(&p.From, f) 666 gc.Naddr(&p.To, t) 667 668 switch as { 669 case arm.ABL: 670 if p.To.Type == obj.TYPE_REG { 671 p.To.Type = obj.TYPE_MEM 672 } 673 674 case arm.ACMP, arm.ACMPF, arm.ACMPD: 675 if t != nil { 676 if f.Op != gc.OREGISTER { 677 /* generate a comparison 678 TODO(kaib): one of the args can actually be a small constant. relax the constraint and fix call sites. 679 */ 680 gc.Fatal("bad operands to gcmp") 681 } 682 p.From = p.To 683 p.To = obj.Addr{} 684 raddr(f, p) 685 } 686 687 case arm.AMULU: 688 if f != nil && f.Op != gc.OREGISTER { 689 gc.Fatal("bad operands to mul") 690 } 691 692 case arm.AMOVW: 693 if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR || p.From.Type == obj.TYPE_CONST) && (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) { 694 gc.Fatal("gins double memory") 695 } 696 697 case arm.AADD: 698 if p.To.Type == obj.TYPE_MEM { 699 gc.Fatal("gins arith to mem") 700 } 701 702 case arm.ARSB: 703 if p.From.Type == obj.TYPE_NONE { 704 gc.Fatal("rsb with no from") 705 } 706 } 707 708 if gc.Debug['g'] != 0 { 709 fmt.Printf("%v\n", p) 710 } 711 return p 712 } 713 714 /* 715 * insert n into reg slot of p 716 */ 717 func raddr(n *gc.Node, p *obj.Prog) { 718 var a obj.Addr 719 gc.Naddr(&a, n) 720 if a.Type != obj.TYPE_REG { 721 if n != nil { 722 gc.Fatal("bad in raddr: %v", gc.Oconv(int(n.Op), 0)) 723 } else { 724 gc.Fatal("bad in raddr: <null>") 725 } 726 p.Reg = 0 727 } else { 728 p.Reg = a.Reg 729 } 730 } 731 732 /* generate a constant shift 733 * arm encodes a shift by 32 as 0, thus asking for 0 shift is illegal. 734 */ 735 func gshift(as int, lhs *gc.Node, stype int32, sval int32, rhs *gc.Node) *obj.Prog { 736 if sval <= 0 || sval > 32 { 737 gc.Fatal("bad shift value: %d", sval) 738 } 739 740 sval = sval & 0x1f 741 742 p := gins(as, nil, rhs) 743 p.From.Type = obj.TYPE_SHIFT 744 p.From.Offset = int64(stype) | int64(sval)<<7 | int64(lhs.Reg)&15 745 return p 746 } 747 748 /* generate a register shift 749 */ 750 func gregshift(as int, lhs *gc.Node, stype int32, reg *gc.Node, rhs *gc.Node) *obj.Prog { 751 p := gins(as, nil, rhs) 752 p.From.Type = obj.TYPE_SHIFT 753 p.From.Offset = int64(stype) | (int64(reg.Reg)&15)<<8 | 1<<4 | int64(lhs.Reg)&15 754 return p 755 } 756 757 /* 758 * return Axxx for Oxxx on type t. 759 */ 760 func optoas(op int, t *gc.Type) int { 761 if t == nil { 762 gc.Fatal("optoas: t is nil") 763 } 764 765 a := obj.AXXX 766 switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { 767 default: 768 gc.Fatal("optoas: no entry %v-%v etype %v simtype %v", gc.Oconv(int(op), 0), t, gc.Types[t.Etype], gc.Types[gc.Simtype[t.Etype]]) 769 770 /* case CASE(OADDR, TPTR32): 771 a = ALEAL; 772 break; 773 774 case CASE(OADDR, TPTR64): 775 a = ALEAQ; 776 break; 777 */ 778 // TODO(kaib): make sure the conditional branches work on all edge cases 779 case gc.OEQ<<16 | gc.TBOOL, 780 gc.OEQ<<16 | gc.TINT8, 781 gc.OEQ<<16 | gc.TUINT8, 782 gc.OEQ<<16 | gc.TINT16, 783 gc.OEQ<<16 | gc.TUINT16, 784 gc.OEQ<<16 | gc.TINT32, 785 gc.OEQ<<16 | gc.TUINT32, 786 gc.OEQ<<16 | gc.TINT64, 787 gc.OEQ<<16 | gc.TUINT64, 788 gc.OEQ<<16 | gc.TPTR32, 789 gc.OEQ<<16 | gc.TPTR64, 790 gc.OEQ<<16 | gc.TFLOAT32, 791 gc.OEQ<<16 | gc.TFLOAT64: 792 a = arm.ABEQ 793 794 case gc.ONE<<16 | gc.TBOOL, 795 gc.ONE<<16 | gc.TINT8, 796 gc.ONE<<16 | gc.TUINT8, 797 gc.ONE<<16 | gc.TINT16, 798 gc.ONE<<16 | gc.TUINT16, 799 gc.ONE<<16 | gc.TINT32, 800 gc.ONE<<16 | gc.TUINT32, 801 gc.ONE<<16 | gc.TINT64, 802 gc.ONE<<16 | gc.TUINT64, 803 gc.ONE<<16 | gc.TPTR32, 804 gc.ONE<<16 | gc.TPTR64, 805 gc.ONE<<16 | gc.TFLOAT32, 806 gc.ONE<<16 | gc.TFLOAT64: 807 a = arm.ABNE 808 809 case gc.OLT<<16 | gc.TINT8, 810 gc.OLT<<16 | gc.TINT16, 811 gc.OLT<<16 | gc.TINT32, 812 gc.OLT<<16 | gc.TINT64, 813 gc.OLT<<16 | gc.TFLOAT32, 814 gc.OLT<<16 | gc.TFLOAT64: 815 a = arm.ABLT 816 817 case gc.OLT<<16 | gc.TUINT8, 818 gc.OLT<<16 | gc.TUINT16, 819 gc.OLT<<16 | gc.TUINT32, 820 gc.OLT<<16 | gc.TUINT64: 821 a = arm.ABLO 822 823 case gc.OLE<<16 | gc.TINT8, 824 gc.OLE<<16 | gc.TINT16, 825 gc.OLE<<16 | gc.TINT32, 826 gc.OLE<<16 | gc.TINT64, 827 gc.OLE<<16 | gc.TFLOAT32, 828 gc.OLE<<16 | gc.TFLOAT64: 829 a = arm.ABLE 830 831 case gc.OLE<<16 | gc.TUINT8, 832 gc.OLE<<16 | gc.TUINT16, 833 gc.OLE<<16 | gc.TUINT32, 834 gc.OLE<<16 | gc.TUINT64: 835 a = arm.ABLS 836 837 case gc.OGT<<16 | gc.TINT8, 838 gc.OGT<<16 | gc.TINT16, 839 gc.OGT<<16 | gc.TINT32, 840 gc.OGT<<16 | gc.TINT64, 841 gc.OGT<<16 | gc.TFLOAT32, 842 gc.OGT<<16 | gc.TFLOAT64: 843 a = arm.ABGT 844 845 case gc.OGT<<16 | gc.TUINT8, 846 gc.OGT<<16 | gc.TUINT16, 847 gc.OGT<<16 | gc.TUINT32, 848 gc.OGT<<16 | gc.TUINT64: 849 a = arm.ABHI 850 851 case gc.OGE<<16 | gc.TINT8, 852 gc.OGE<<16 | gc.TINT16, 853 gc.OGE<<16 | gc.TINT32, 854 gc.OGE<<16 | gc.TINT64, 855 gc.OGE<<16 | gc.TFLOAT32, 856 gc.OGE<<16 | gc.TFLOAT64: 857 a = arm.ABGE 858 859 case gc.OGE<<16 | gc.TUINT8, 860 gc.OGE<<16 | gc.TUINT16, 861 gc.OGE<<16 | gc.TUINT32, 862 gc.OGE<<16 | gc.TUINT64: 863 a = arm.ABHS 864 865 case gc.OCMP<<16 | gc.TBOOL, 866 gc.OCMP<<16 | gc.TINT8, 867 gc.OCMP<<16 | gc.TUINT8, 868 gc.OCMP<<16 | gc.TINT16, 869 gc.OCMP<<16 | gc.TUINT16, 870 gc.OCMP<<16 | gc.TINT32, 871 gc.OCMP<<16 | gc.TUINT32, 872 gc.OCMP<<16 | gc.TPTR32: 873 a = arm.ACMP 874 875 case gc.OCMP<<16 | gc.TFLOAT32: 876 a = arm.ACMPF 877 878 case gc.OCMP<<16 | gc.TFLOAT64: 879 a = arm.ACMPD 880 881 case gc.OPS<<16 | gc.TFLOAT32, 882 gc.OPS<<16 | gc.TFLOAT64: 883 a = arm.ABVS 884 885 case gc.OAS<<16 | gc.TBOOL: 886 a = arm.AMOVB 887 888 case gc.OAS<<16 | gc.TINT8: 889 a = arm.AMOVBS 890 891 case gc.OAS<<16 | gc.TUINT8: 892 a = arm.AMOVBU 893 894 case gc.OAS<<16 | gc.TINT16: 895 a = arm.AMOVHS 896 897 case gc.OAS<<16 | gc.TUINT16: 898 a = arm.AMOVHU 899 900 case gc.OAS<<16 | gc.TINT32, 901 gc.OAS<<16 | gc.TUINT32, 902 gc.OAS<<16 | gc.TPTR32: 903 a = arm.AMOVW 904 905 case gc.OAS<<16 | gc.TFLOAT32: 906 a = arm.AMOVF 907 908 case gc.OAS<<16 | gc.TFLOAT64: 909 a = arm.AMOVD 910 911 case gc.OADD<<16 | gc.TINT8, 912 gc.OADD<<16 | gc.TUINT8, 913 gc.OADD<<16 | gc.TINT16, 914 gc.OADD<<16 | gc.TUINT16, 915 gc.OADD<<16 | gc.TINT32, 916 gc.OADD<<16 | gc.TUINT32, 917 gc.OADD<<16 | gc.TPTR32: 918 a = arm.AADD 919 920 case gc.OADD<<16 | gc.TFLOAT32: 921 a = arm.AADDF 922 923 case gc.OADD<<16 | gc.TFLOAT64: 924 a = arm.AADDD 925 926 case gc.OSUB<<16 | gc.TINT8, 927 gc.OSUB<<16 | gc.TUINT8, 928 gc.OSUB<<16 | gc.TINT16, 929 gc.OSUB<<16 | gc.TUINT16, 930 gc.OSUB<<16 | gc.TINT32, 931 gc.OSUB<<16 | gc.TUINT32, 932 gc.OSUB<<16 | gc.TPTR32: 933 a = arm.ASUB 934 935 case gc.OSUB<<16 | gc.TFLOAT32: 936 a = arm.ASUBF 937 938 case gc.OSUB<<16 | gc.TFLOAT64: 939 a = arm.ASUBD 940 941 case gc.OMINUS<<16 | gc.TINT8, 942 gc.OMINUS<<16 | gc.TUINT8, 943 gc.OMINUS<<16 | gc.TINT16, 944 gc.OMINUS<<16 | gc.TUINT16, 945 gc.OMINUS<<16 | gc.TINT32, 946 gc.OMINUS<<16 | gc.TUINT32, 947 gc.OMINUS<<16 | gc.TPTR32: 948 a = arm.ARSB 949 950 case gc.OAND<<16 | gc.TINT8, 951 gc.OAND<<16 | gc.TUINT8, 952 gc.OAND<<16 | gc.TINT16, 953 gc.OAND<<16 | gc.TUINT16, 954 gc.OAND<<16 | gc.TINT32, 955 gc.OAND<<16 | gc.TUINT32, 956 gc.OAND<<16 | gc.TPTR32: 957 a = arm.AAND 958 959 case gc.OOR<<16 | gc.TINT8, 960 gc.OOR<<16 | gc.TUINT8, 961 gc.OOR<<16 | gc.TINT16, 962 gc.OOR<<16 | gc.TUINT16, 963 gc.OOR<<16 | gc.TINT32, 964 gc.OOR<<16 | gc.TUINT32, 965 gc.OOR<<16 | gc.TPTR32: 966 a = arm.AORR 967 968 case gc.OXOR<<16 | gc.TINT8, 969 gc.OXOR<<16 | gc.TUINT8, 970 gc.OXOR<<16 | gc.TINT16, 971 gc.OXOR<<16 | gc.TUINT16, 972 gc.OXOR<<16 | gc.TINT32, 973 gc.OXOR<<16 | gc.TUINT32, 974 gc.OXOR<<16 | gc.TPTR32: 975 a = arm.AEOR 976 977 case gc.OLSH<<16 | gc.TINT8, 978 gc.OLSH<<16 | gc.TUINT8, 979 gc.OLSH<<16 | gc.TINT16, 980 gc.OLSH<<16 | gc.TUINT16, 981 gc.OLSH<<16 | gc.TINT32, 982 gc.OLSH<<16 | gc.TUINT32, 983 gc.OLSH<<16 | gc.TPTR32: 984 a = arm.ASLL 985 986 case gc.ORSH<<16 | gc.TUINT8, 987 gc.ORSH<<16 | gc.TUINT16, 988 gc.ORSH<<16 | gc.TUINT32, 989 gc.ORSH<<16 | gc.TPTR32: 990 a = arm.ASRL 991 992 case gc.ORSH<<16 | gc.TINT8, 993 gc.ORSH<<16 | gc.TINT16, 994 gc.ORSH<<16 | gc.TINT32: 995 a = arm.ASRA 996 997 case gc.OMUL<<16 | gc.TUINT8, 998 gc.OMUL<<16 | gc.TUINT16, 999 gc.OMUL<<16 | gc.TUINT32, 1000 gc.OMUL<<16 | gc.TPTR32: 1001 a = arm.AMULU 1002 1003 case gc.OMUL<<16 | gc.TINT8, 1004 gc.OMUL<<16 | gc.TINT16, 1005 gc.OMUL<<16 | gc.TINT32: 1006 a = arm.AMUL 1007 1008 case gc.OMUL<<16 | gc.TFLOAT32: 1009 a = arm.AMULF 1010 1011 case gc.OMUL<<16 | gc.TFLOAT64: 1012 a = arm.AMULD 1013 1014 case gc.ODIV<<16 | gc.TUINT8, 1015 gc.ODIV<<16 | gc.TUINT16, 1016 gc.ODIV<<16 | gc.TUINT32, 1017 gc.ODIV<<16 | gc.TPTR32: 1018 a = arm.ADIVU 1019 1020 case gc.ODIV<<16 | gc.TINT8, 1021 gc.ODIV<<16 | gc.TINT16, 1022 gc.ODIV<<16 | gc.TINT32: 1023 a = arm.ADIV 1024 1025 case gc.OMOD<<16 | gc.TUINT8, 1026 gc.OMOD<<16 | gc.TUINT16, 1027 gc.OMOD<<16 | gc.TUINT32, 1028 gc.OMOD<<16 | gc.TPTR32: 1029 a = arm.AMODU 1030 1031 case gc.OMOD<<16 | gc.TINT8, 1032 gc.OMOD<<16 | gc.TINT16, 1033 gc.OMOD<<16 | gc.TINT32: 1034 a = arm.AMOD 1035 1036 // case CASE(OEXTEND, TINT16): 1037 // a = ACWD; 1038 // break; 1039 1040 // case CASE(OEXTEND, TINT32): 1041 // a = ACDQ; 1042 // break; 1043 1044 // case CASE(OEXTEND, TINT64): 1045 // a = ACQO; 1046 // break; 1047 1048 case gc.ODIV<<16 | gc.TFLOAT32: 1049 a = arm.ADIVF 1050 1051 case gc.ODIV<<16 | gc.TFLOAT64: 1052 a = arm.ADIVD 1053 1054 case gc.OSQRT<<16 | gc.TFLOAT64: 1055 a = arm.ASQRTD 1056 } 1057 1058 return a 1059 } 1060 1061 const ( 1062 ODynam = 1 << 0 1063 OPtrto = 1 << 1 1064 ) 1065 1066 var clean [20]gc.Node 1067 1068 var cleani int = 0 1069 1070 func sudoclean() { 1071 if clean[cleani-1].Op != gc.OEMPTY { 1072 gc.Regfree(&clean[cleani-1]) 1073 } 1074 if clean[cleani-2].Op != gc.OEMPTY { 1075 gc.Regfree(&clean[cleani-2]) 1076 } 1077 cleani -= 2 1078 } 1079 1080 func dotaddable(n *gc.Node, n1 *gc.Node) bool { 1081 if n.Op != gc.ODOT { 1082 return false 1083 } 1084 1085 var oary [10]int64 1086 var nn *gc.Node 1087 o := gc.Dotoffset(n, oary[:], &nn) 1088 if nn != nil && nn.Addable && o == 1 && oary[0] >= 0 { 1089 *n1 = *nn 1090 n1.Type = n.Type 1091 n1.Xoffset += oary[0] 1092 return true 1093 } 1094 1095 return false 1096 } 1097 1098 /* 1099 * generate code to compute address of n, 1100 * a reference to a (perhaps nested) field inside 1101 * an array or struct. 1102 * return 0 on failure, 1 on success. 1103 * on success, leaves usable address in a. 1104 * 1105 * caller is responsible for calling sudoclean 1106 * after successful sudoaddable, 1107 * to release the register used for a. 1108 */ 1109 func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool { 1110 if n.Type == nil { 1111 return false 1112 } 1113 1114 *a = obj.Addr{} 1115 1116 switch n.Op { 1117 case gc.OLITERAL: 1118 if !gc.Isconst(n, gc.CTINT) { 1119 break 1120 } 1121 v := n.Int() 1122 if v >= 32000 || v <= -32000 { 1123 break 1124 } 1125 switch as { 1126 default: 1127 return false 1128 1129 case arm.AADD, 1130 arm.ASUB, 1131 arm.AAND, 1132 arm.AORR, 1133 arm.AEOR, 1134 arm.AMOVB, 1135 arm.AMOVBS, 1136 arm.AMOVBU, 1137 arm.AMOVH, 1138 arm.AMOVHS, 1139 arm.AMOVHU, 1140 arm.AMOVW: 1141 break 1142 } 1143 1144 cleani += 2 1145 reg := &clean[cleani-1] 1146 reg1 := &clean[cleani-2] 1147 reg.Op = gc.OEMPTY 1148 reg1.Op = gc.OEMPTY 1149 gc.Naddr(a, n) 1150 return true 1151 1152 case gc.ODOT, 1153 gc.ODOTPTR: 1154 cleani += 2 1155 reg := &clean[cleani-1] 1156 reg1 := &clean[cleani-2] 1157 reg.Op = gc.OEMPTY 1158 reg1.Op = gc.OEMPTY 1159 var nn *gc.Node 1160 var oary [10]int64 1161 o := gc.Dotoffset(n, oary[:], &nn) 1162 if nn == nil { 1163 sudoclean() 1164 return false 1165 } 1166 1167 if nn.Addable && o == 1 && oary[0] >= 0 { 1168 // directly addressable set of DOTs 1169 n1 := *nn 1170 1171 n1.Type = n.Type 1172 n1.Xoffset += oary[0] 1173 gc.Naddr(a, &n1) 1174 return true 1175 } 1176 1177 gc.Regalloc(reg, gc.Types[gc.Tptr], nil) 1178 n1 := *reg 1179 n1.Op = gc.OINDREG 1180 if oary[0] >= 0 { 1181 gc.Agen(nn, reg) 1182 n1.Xoffset = oary[0] 1183 } else { 1184 gc.Cgen(nn, reg) 1185 gc.Cgen_checknil(reg) 1186 n1.Xoffset = -(oary[0] + 1) 1187 } 1188 1189 for i := 1; i < o; i++ { 1190 if oary[i] >= 0 { 1191 gc.Fatal("can't happen") 1192 } 1193 gins(arm.AMOVW, &n1, reg) 1194 gc.Cgen_checknil(reg) 1195 n1.Xoffset = -(oary[i] + 1) 1196 } 1197 1198 a.Type = obj.TYPE_NONE 1199 a.Name = obj.NAME_NONE 1200 n1.Type = n.Type 1201 gc.Naddr(a, &n1) 1202 return true 1203 1204 case gc.OINDEX: 1205 return false 1206 } 1207 1208 return false 1209 } 1210