1 // Do not edit. Bootstrap copy of /Volumes/Android/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/arm/gsubr.go 2 3 //line /Volumes/Android/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/arm/gsubr.go:1 4 // Derived from Inferno utils/5c/txt.c 5 // http://code.google.com/p/inferno-os/source/browse/utils/5c/txt.c 6 // 7 // Copyright 1994-1999 Lucent Technologies Inc. All rights reserved. 8 // Portions Copyright 1995-1997 C H Forsyth (forsyth (a] terzarima.net) 9 // Portions Copyright 1997-1999 Vita Nuova Limited 10 // Portions Copyright 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) 11 // Portions Copyright 2004,2006 Bruce Ellis 12 // Portions Copyright 2005-2007 C H Forsyth (forsyth (a] terzarima.net) 13 // Revisions Copyright 2000-2007 Lucent Technologies Inc. and others 14 // Portions Copyright 2009 The Go Authors. All rights reserved. 15 // 16 // Permission is hereby granted, free of charge, to any person obtaining a copy 17 // of this software and associated documentation files (the "Software"), to deal 18 // in the Software without restriction, including without limitation the rights 19 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 20 // copies of the Software, and to permit persons to whom the Software is 21 // furnished to do so, subject to the following conditions: 22 // 23 // The above copyright notice and this permission notice shall be included in 24 // all copies or substantial portions of the Software. 25 // 26 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 27 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 28 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 29 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 30 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 31 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 32 // THE SOFTWARE. 33 34 package arm 35 36 import ( 37 "bootstrap/compile/internal/gc" 38 "bootstrap/internal/obj" 39 "bootstrap/internal/obj/arm" 40 "fmt" 41 ) 42 43 var resvd = []int{ 44 arm.REG_R9, // formerly reserved for m; might be okay to reuse now; not sure about NaCl 45 arm.REG_R10, // reserved for g 46 } 47 48 /* 49 * return constant i node. 50 * overwritten by next call, but useful in calls to gins. 51 */ 52 53 var ncon_n gc.Node 54 55 func ncon(i uint32) *gc.Node { 56 if ncon_n.Type == nil { 57 gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0) 58 } 59 ncon_n.SetInt(int64(i)) 60 return &ncon_n 61 } 62 63 var sclean [10]gc.Node 64 65 var nsclean int 66 67 /* 68 * n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves. 69 */ 70 func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) { 71 if !gc.Is64(n.Type) { 72 gc.Fatal("split64 %v", n.Type) 73 } 74 75 if nsclean >= len(sclean) { 76 gc.Fatal("split64 clean") 77 } 78 sclean[nsclean].Op = gc.OEMPTY 79 nsclean++ 80 switch n.Op { 81 default: 82 switch n.Op { 83 default: 84 var n1 gc.Node 85 if !dotaddable(n, &n1) { 86 gc.Igen(n, &n1, nil) 87 sclean[nsclean-1] = n1 88 } 89 90 n = &n1 91 92 case gc.ONAME: 93 if n.Class == gc.PPARAMREF { 94 var n1 gc.Node 95 gc.Cgen(n.Name.Heapaddr, &n1) 96 sclean[nsclean-1] = n1 97 n = &n1 98 } 99 100 // nothing 101 case gc.OINDREG: 102 break 103 } 104 105 *lo = *n 106 *hi = *n 107 lo.Type = gc.Types[gc.TUINT32] 108 if n.Type.Etype == gc.TINT64 { 109 hi.Type = gc.Types[gc.TINT32] 110 } else { 111 hi.Type = gc.Types[gc.TUINT32] 112 } 113 hi.Xoffset += 4 114 115 case gc.OLITERAL: 116 var n1 gc.Node 117 n.Convconst(&n1, n.Type) 118 i := n1.Int() 119 gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i))) 120 i >>= 32 121 if n.Type.Etype == gc.TINT64 { 122 gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i))) 123 } else { 124 gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i))) 125 } 126 } 127 } 128 129 func splitclean() { 130 if nsclean <= 0 { 131 gc.Fatal("splitclean") 132 } 133 nsclean-- 134 if sclean[nsclean].Op != gc.OEMPTY { 135 gc.Regfree(&sclean[nsclean]) 136 } 137 } 138 139 func gmove(f *gc.Node, t *gc.Node) { 140 if gc.Debug['M'] != 0 { 141 fmt.Printf("gmove %v -> %v\n", f, t) 142 } 143 144 ft := gc.Simsimtype(f.Type) 145 tt := gc.Simsimtype(t.Type) 146 cvt := t.Type 147 148 if gc.Iscomplex[ft] || gc.Iscomplex[tt] { 149 gc.Complexmove(f, t) 150 return 151 } 152 153 // cannot have two memory operands; 154 // except 64-bit, which always copies via registers anyway. 155 var a int 156 var r1 gc.Node 157 if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) { 158 goto hard 159 } 160 161 // convert constant to desired type 162 if f.Op == gc.OLITERAL { 163 var con gc.Node 164 switch tt { 165 default: 166 f.Convconst(&con, t.Type) 167 168 case gc.TINT16, 169 gc.TINT8: 170 var con gc.Node 171 f.Convconst(&con, gc.Types[gc.TINT32]) 172 var r1 gc.Node 173 gc.Regalloc(&r1, con.Type, t) 174 gins(arm.AMOVW, &con, &r1) 175 gmove(&r1, t) 176 gc.Regfree(&r1) 177 return 178 179 case gc.TUINT16, 180 gc.TUINT8: 181 var con gc.Node 182 f.Convconst(&con, gc.Types[gc.TUINT32]) 183 var r1 gc.Node 184 gc.Regalloc(&r1, con.Type, t) 185 gins(arm.AMOVW, &con, &r1) 186 gmove(&r1, t) 187 gc.Regfree(&r1) 188 return 189 } 190 191 f = &con 192 ft = gc.Simsimtype(con.Type) 193 194 // constants can't move directly to memory 195 if gc.Ismem(t) && !gc.Is64(t.Type) { 196 goto hard 197 } 198 } 199 200 // value -> value copy, only one memory operand. 201 // figure out the instruction to use. 202 // break out of switch for one-instruction gins. 203 // goto rdst for "destination must be register". 204 // goto hard for "convert to cvt type first". 205 // otherwise handle and return. 206 207 switch uint32(ft)<<16 | uint32(tt) { 208 default: 209 // should not happen 210 gc.Fatal("gmove %v -> %v", f, t) 211 return 212 213 /* 214 * integer copy and truncate 215 */ 216 case gc.TINT8<<16 | gc.TINT8: // same size 217 if !gc.Ismem(f) { 218 a = arm.AMOVB 219 break 220 } 221 fallthrough 222 223 case gc.TUINT8<<16 | gc.TINT8, 224 gc.TINT16<<16 | gc.TINT8, // truncate 225 gc.TUINT16<<16 | gc.TINT8, 226 gc.TINT32<<16 | gc.TINT8, 227 gc.TUINT32<<16 | gc.TINT8: 228 a = arm.AMOVBS 229 230 case gc.TUINT8<<16 | gc.TUINT8: 231 if !gc.Ismem(f) { 232 a = arm.AMOVB 233 break 234 } 235 fallthrough 236 237 case gc.TINT8<<16 | gc.TUINT8, 238 gc.TINT16<<16 | gc.TUINT8, 239 gc.TUINT16<<16 | gc.TUINT8, 240 gc.TINT32<<16 | gc.TUINT8, 241 gc.TUINT32<<16 | gc.TUINT8: 242 a = arm.AMOVBU 243 244 case gc.TINT64<<16 | gc.TINT8, // truncate low word 245 gc.TUINT64<<16 | gc.TINT8: 246 a = arm.AMOVBS 247 248 goto trunc64 249 250 case gc.TINT64<<16 | gc.TUINT8, 251 gc.TUINT64<<16 | gc.TUINT8: 252 a = arm.AMOVBU 253 goto trunc64 254 255 case gc.TINT16<<16 | gc.TINT16: // same size 256 if !gc.Ismem(f) { 257 a = arm.AMOVH 258 break 259 } 260 fallthrough 261 262 case gc.TUINT16<<16 | gc.TINT16, 263 gc.TINT32<<16 | gc.TINT16, // truncate 264 gc.TUINT32<<16 | gc.TINT16: 265 a = arm.AMOVHS 266 267 case gc.TUINT16<<16 | gc.TUINT16: 268 if !gc.Ismem(f) { 269 a = arm.AMOVH 270 break 271 } 272 fallthrough 273 274 case gc.TINT16<<16 | gc.TUINT16, 275 gc.TINT32<<16 | gc.TUINT16, 276 gc.TUINT32<<16 | gc.TUINT16: 277 a = arm.AMOVHU 278 279 case gc.TINT64<<16 | gc.TINT16, // truncate low word 280 gc.TUINT64<<16 | gc.TINT16: 281 a = arm.AMOVHS 282 283 goto trunc64 284 285 case gc.TINT64<<16 | gc.TUINT16, 286 gc.TUINT64<<16 | gc.TUINT16: 287 a = arm.AMOVHU 288 goto trunc64 289 290 case gc.TINT32<<16 | gc.TINT32, // same size 291 gc.TINT32<<16 | gc.TUINT32, 292 gc.TUINT32<<16 | gc.TINT32, 293 gc.TUINT32<<16 | gc.TUINT32: 294 a = arm.AMOVW 295 296 case gc.TINT64<<16 | gc.TINT32, // truncate 297 gc.TUINT64<<16 | gc.TINT32, 298 gc.TINT64<<16 | gc.TUINT32, 299 gc.TUINT64<<16 | gc.TUINT32: 300 var flo gc.Node 301 var fhi gc.Node 302 split64(f, &flo, &fhi) 303 304 var r1 gc.Node 305 gc.Regalloc(&r1, t.Type, nil) 306 gins(arm.AMOVW, &flo, &r1) 307 gins(arm.AMOVW, &r1, t) 308 gc.Regfree(&r1) 309 splitclean() 310 return 311 312 case gc.TINT64<<16 | gc.TINT64, // same size 313 gc.TINT64<<16 | gc.TUINT64, 314 gc.TUINT64<<16 | gc.TINT64, 315 gc.TUINT64<<16 | gc.TUINT64: 316 var fhi gc.Node 317 var flo gc.Node 318 split64(f, &flo, &fhi) 319 320 var tlo gc.Node 321 var thi gc.Node 322 split64(t, &tlo, &thi) 323 var r1 gc.Node 324 gc.Regalloc(&r1, flo.Type, nil) 325 var r2 gc.Node 326 gc.Regalloc(&r2, fhi.Type, nil) 327 gins(arm.AMOVW, &flo, &r1) 328 gins(arm.AMOVW, &fhi, &r2) 329 gins(arm.AMOVW, &r1, &tlo) 330 gins(arm.AMOVW, &r2, &thi) 331 gc.Regfree(&r1) 332 gc.Regfree(&r2) 333 splitclean() 334 splitclean() 335 return 336 337 /* 338 * integer up-conversions 339 */ 340 case gc.TINT8<<16 | gc.TINT16, // sign extend int8 341 gc.TINT8<<16 | gc.TUINT16, 342 gc.TINT8<<16 | gc.TINT32, 343 gc.TINT8<<16 | gc.TUINT32: 344 a = arm.AMOVBS 345 346 goto rdst 347 348 case gc.TINT8<<16 | gc.TINT64, // convert via int32 349 gc.TINT8<<16 | gc.TUINT64: 350 cvt = gc.Types[gc.TINT32] 351 352 goto hard 353 354 case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 355 gc.TUINT8<<16 | gc.TUINT16, 356 gc.TUINT8<<16 | gc.TINT32, 357 gc.TUINT8<<16 | gc.TUINT32: 358 a = arm.AMOVBU 359 360 goto rdst 361 362 case gc.TUINT8<<16 | gc.TINT64, // convert via uint32 363 gc.TUINT8<<16 | gc.TUINT64: 364 cvt = gc.Types[gc.TUINT32] 365 366 goto hard 367 368 case gc.TINT16<<16 | gc.TINT32, // sign extend int16 369 gc.TINT16<<16 | gc.TUINT32: 370 a = arm.AMOVHS 371 372 goto rdst 373 374 case gc.TINT16<<16 | gc.TINT64, // convert via int32 375 gc.TINT16<<16 | gc.TUINT64: 376 cvt = gc.Types[gc.TINT32] 377 378 goto hard 379 380 case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 381 gc.TUINT16<<16 | gc.TUINT32: 382 a = arm.AMOVHU 383 384 goto rdst 385 386 case gc.TUINT16<<16 | gc.TINT64, // convert via uint32 387 gc.TUINT16<<16 | gc.TUINT64: 388 cvt = gc.Types[gc.TUINT32] 389 390 goto hard 391 392 case gc.TINT32<<16 | gc.TINT64, // sign extend int32 393 gc.TINT32<<16 | gc.TUINT64: 394 var tlo gc.Node 395 var thi gc.Node 396 split64(t, &tlo, &thi) 397 398 var r1 gc.Node 399 gc.Regalloc(&r1, tlo.Type, nil) 400 var r2 gc.Node 401 gc.Regalloc(&r2, thi.Type, nil) 402 gmove(f, &r1) 403 p1 := gins(arm.AMOVW, &r1, &r2) 404 p1.From.Type = obj.TYPE_SHIFT 405 p1.From.Offset = 2<<5 | 31<<7 | int64(r1.Reg)&15 // r1->31 406 p1.From.Reg = 0 407 408 //print("gmove: %v\n", p1); 409 gins(arm.AMOVW, &r1, &tlo) 410 411 gins(arm.AMOVW, &r2, &thi) 412 gc.Regfree(&r1) 413 gc.Regfree(&r2) 414 splitclean() 415 return 416 417 case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 418 gc.TUINT32<<16 | gc.TUINT64: 419 var thi gc.Node 420 var tlo gc.Node 421 split64(t, &tlo, &thi) 422 423 gmove(f, &tlo) 424 var r1 gc.Node 425 gc.Regalloc(&r1, thi.Type, nil) 426 gins(arm.AMOVW, ncon(0), &r1) 427 gins(arm.AMOVW, &r1, &thi) 428 gc.Regfree(&r1) 429 splitclean() 430 return 431 432 // case CASE(TFLOAT64, TUINT64): 433 /* 434 * float to integer 435 */ 436 case gc.TFLOAT32<<16 | gc.TINT8, 437 gc.TFLOAT32<<16 | gc.TUINT8, 438 gc.TFLOAT32<<16 | gc.TINT16, 439 gc.TFLOAT32<<16 | gc.TUINT16, 440 gc.TFLOAT32<<16 | gc.TINT32, 441 gc.TFLOAT32<<16 | gc.TUINT32, 442 443 // case CASE(TFLOAT32, TUINT64): 444 445 gc.TFLOAT64<<16 | gc.TINT8, 446 gc.TFLOAT64<<16 | gc.TUINT8, 447 gc.TFLOAT64<<16 | gc.TINT16, 448 gc.TFLOAT64<<16 | gc.TUINT16, 449 gc.TFLOAT64<<16 | gc.TINT32, 450 gc.TFLOAT64<<16 | gc.TUINT32: 451 fa := arm.AMOVF 452 453 a := arm.AMOVFW 454 if ft == gc.TFLOAT64 { 455 fa = arm.AMOVD 456 a = arm.AMOVDW 457 } 458 459 ta := arm.AMOVW 460 switch tt { 461 case gc.TINT8: 462 ta = arm.AMOVBS 463 464 case gc.TUINT8: 465 ta = arm.AMOVBU 466 467 case gc.TINT16: 468 ta = arm.AMOVHS 469 470 case gc.TUINT16: 471 ta = arm.AMOVHU 472 } 473 474 var r1 gc.Node 475 gc.Regalloc(&r1, gc.Types[ft], f) 476 var r2 gc.Node 477 gc.Regalloc(&r2, gc.Types[tt], t) 478 gins(fa, f, &r1) // load to fpu 479 p1 := gins(a, &r1, &r1) // convert to w 480 switch tt { 481 case gc.TUINT8, 482 gc.TUINT16, 483 gc.TUINT32: 484 p1.Scond |= arm.C_UBIT 485 } 486 487 gins(arm.AMOVW, &r1, &r2) // copy to cpu 488 gins(ta, &r2, t) // store 489 gc.Regfree(&r1) 490 gc.Regfree(&r2) 491 return 492 493 /* 494 * integer to float 495 */ 496 case gc.TINT8<<16 | gc.TFLOAT32, 497 gc.TUINT8<<16 | gc.TFLOAT32, 498 gc.TINT16<<16 | gc.TFLOAT32, 499 gc.TUINT16<<16 | gc.TFLOAT32, 500 gc.TINT32<<16 | gc.TFLOAT32, 501 gc.TUINT32<<16 | gc.TFLOAT32, 502 gc.TINT8<<16 | gc.TFLOAT64, 503 gc.TUINT8<<16 | gc.TFLOAT64, 504 gc.TINT16<<16 | gc.TFLOAT64, 505 gc.TUINT16<<16 | gc.TFLOAT64, 506 gc.TINT32<<16 | gc.TFLOAT64, 507 gc.TUINT32<<16 | gc.TFLOAT64: 508 fa := arm.AMOVW 509 510 switch ft { 511 case gc.TINT8: 512 fa = arm.AMOVBS 513 514 case gc.TUINT8: 515 fa = arm.AMOVBU 516 517 case gc.TINT16: 518 fa = arm.AMOVHS 519 520 case gc.TUINT16: 521 fa = arm.AMOVHU 522 } 523 524 a := arm.AMOVWF 525 ta := arm.AMOVF 526 if tt == gc.TFLOAT64 { 527 a = arm.AMOVWD 528 ta = arm.AMOVD 529 } 530 531 var r1 gc.Node 532 gc.Regalloc(&r1, gc.Types[ft], f) 533 var r2 gc.Node 534 gc.Regalloc(&r2, gc.Types[tt], t) 535 gins(fa, f, &r1) // load to cpu 536 gins(arm.AMOVW, &r1, &r2) // copy to fpu 537 p1 := gins(a, &r2, &r2) // convert 538 switch ft { 539 case gc.TUINT8, 540 gc.TUINT16, 541 gc.TUINT32: 542 p1.Scond |= arm.C_UBIT 543 } 544 545 gins(ta, &r2, t) // store 546 gc.Regfree(&r1) 547 gc.Regfree(&r2) 548 return 549 550 case gc.TUINT64<<16 | gc.TFLOAT32, 551 gc.TUINT64<<16 | gc.TFLOAT64: 552 gc.Fatal("gmove UINT64, TFLOAT not implemented") 553 return 554 555 /* 556 * float to float 557 */ 558 case gc.TFLOAT32<<16 | gc.TFLOAT32: 559 a = arm.AMOVF 560 561 case gc.TFLOAT64<<16 | gc.TFLOAT64: 562 a = arm.AMOVD 563 564 case gc.TFLOAT32<<16 | gc.TFLOAT64: 565 var r1 gc.Node 566 gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], t) 567 gins(arm.AMOVF, f, &r1) 568 gins(arm.AMOVFD, &r1, &r1) 569 gins(arm.AMOVD, &r1, t) 570 gc.Regfree(&r1) 571 return 572 573 case gc.TFLOAT64<<16 | gc.TFLOAT32: 574 var r1 gc.Node 575 gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], t) 576 gins(arm.AMOVD, f, &r1) 577 gins(arm.AMOVDF, &r1, &r1) 578 gins(arm.AMOVF, &r1, t) 579 gc.Regfree(&r1) 580 return 581 } 582 583 gins(a, f, t) 584 return 585 586 // TODO(kaib): we almost always require a register dest anyway, this can probably be 587 // removed. 588 // requires register destination 589 rdst: 590 { 591 gc.Regalloc(&r1, t.Type, t) 592 593 gins(a, f, &r1) 594 gmove(&r1, t) 595 gc.Regfree(&r1) 596 return 597 } 598 599 // requires register intermediate 600 hard: 601 gc.Regalloc(&r1, cvt, t) 602 603 gmove(f, &r1) 604 gmove(&r1, t) 605 gc.Regfree(&r1) 606 return 607 608 // truncate 64 bit integer 609 trunc64: 610 var fhi gc.Node 611 var flo gc.Node 612 split64(f, &flo, &fhi) 613 614 gc.Regalloc(&r1, t.Type, nil) 615 gins(a, &flo, &r1) 616 gins(a, &r1, t) 617 gc.Regfree(&r1) 618 splitclean() 619 return 620 } 621 622 func samaddr(f *gc.Node, t *gc.Node) bool { 623 if f.Op != t.Op { 624 return false 625 } 626 627 switch f.Op { 628 case gc.OREGISTER: 629 if f.Reg != t.Reg { 630 break 631 } 632 return true 633 } 634 635 return false 636 } 637 638 /* 639 * generate one instruction: 640 * as f, t 641 */ 642 func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog { 643 // Node nod; 644 // int32 v; 645 646 if f != nil && f.Op == gc.OINDEX { 647 gc.Fatal("gins OINDEX not implemented") 648 } 649 650 // gc.Regalloc(&nod, ®node, Z); 651 // v = constnode.vconst; 652 // gc.Cgen(f->right, &nod); 653 // constnode.vconst = v; 654 // idx.reg = nod.reg; 655 // gc.Regfree(&nod); 656 if t != nil && t.Op == gc.OINDEX { 657 gc.Fatal("gins OINDEX not implemented") 658 } 659 660 // gc.Regalloc(&nod, ®node, Z); 661 // v = constnode.vconst; 662 // gc.Cgen(t->right, &nod); 663 // constnode.vconst = v; 664 // idx.reg = nod.reg; 665 // gc.Regfree(&nod); 666 667 p := gc.Prog(as) 668 gc.Naddr(&p.From, f) 669 gc.Naddr(&p.To, t) 670 671 switch as { 672 case arm.ABL: 673 if p.To.Type == obj.TYPE_REG { 674 p.To.Type = obj.TYPE_MEM 675 } 676 677 case arm.ACMP, arm.ACMPF, arm.ACMPD: 678 if t != nil { 679 if f.Op != gc.OREGISTER { 680 /* generate a comparison 681 TODO(kaib): one of the args can actually be a small constant. relax the constraint and fix call sites. 682 */ 683 gc.Fatal("bad operands to gcmp") 684 } 685 p.From = p.To 686 p.To = obj.Addr{} 687 raddr(f, p) 688 } 689 690 case arm.AMULU: 691 if f != nil && f.Op != gc.OREGISTER { 692 gc.Fatal("bad operands to mul") 693 } 694 695 case arm.AMOVW: 696 if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR || p.From.Type == obj.TYPE_CONST) && (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) { 697 gc.Fatal("gins double memory") 698 } 699 700 case arm.AADD: 701 if p.To.Type == obj.TYPE_MEM { 702 gc.Fatal("gins arith to mem") 703 } 704 705 case arm.ARSB: 706 if p.From.Type == obj.TYPE_NONE { 707 gc.Fatal("rsb with no from") 708 } 709 } 710 711 if gc.Debug['g'] != 0 { 712 fmt.Printf("%v\n", p) 713 } 714 return p 715 } 716 717 /* 718 * insert n into reg slot of p 719 */ 720 func raddr(n *gc.Node, p *obj.Prog) { 721 var a obj.Addr 722 gc.Naddr(&a, n) 723 if a.Type != obj.TYPE_REG { 724 if n != nil { 725 gc.Fatal("bad in raddr: %v", gc.Oconv(int(n.Op), 0)) 726 } else { 727 gc.Fatal("bad in raddr: <null>") 728 } 729 p.Reg = 0 730 } else { 731 p.Reg = a.Reg 732 } 733 } 734 735 /* generate a constant shift 736 * arm encodes a shift by 32 as 0, thus asking for 0 shift is illegal. 737 */ 738 func gshift(as int, lhs *gc.Node, stype int32, sval int32, rhs *gc.Node) *obj.Prog { 739 if sval <= 0 || sval > 32 { 740 gc.Fatal("bad shift value: %d", sval) 741 } 742 743 sval = sval & 0x1f 744 745 p := gins(as, nil, rhs) 746 p.From.Type = obj.TYPE_SHIFT 747 p.From.Offset = int64(stype) | int64(sval)<<7 | int64(lhs.Reg)&15 748 return p 749 } 750 751 /* generate a register shift 752 */ 753 func gregshift(as int, lhs *gc.Node, stype int32, reg *gc.Node, rhs *gc.Node) *obj.Prog { 754 p := gins(as, nil, rhs) 755 p.From.Type = obj.TYPE_SHIFT 756 p.From.Offset = int64(stype) | (int64(reg.Reg)&15)<<8 | 1<<4 | int64(lhs.Reg)&15 757 return p 758 } 759 760 /* 761 * return Axxx for Oxxx on type t. 762 */ 763 func optoas(op int, t *gc.Type) int { 764 if t == nil { 765 gc.Fatal("optoas: t is nil") 766 } 767 768 a := obj.AXXX 769 switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { 770 default: 771 gc.Fatal("optoas: no entry %v-%v etype %v simtype %v", gc.Oconv(int(op), 0), t, gc.Types[t.Etype], gc.Types[gc.Simtype[t.Etype]]) 772 773 /* case CASE(OADDR, TPTR32): 774 a = ALEAL; 775 break; 776 777 case CASE(OADDR, TPTR64): 778 a = ALEAQ; 779 break; 780 */ 781 // TODO(kaib): make sure the conditional branches work on all edge cases 782 case gc.OEQ<<16 | gc.TBOOL, 783 gc.OEQ<<16 | gc.TINT8, 784 gc.OEQ<<16 | gc.TUINT8, 785 gc.OEQ<<16 | gc.TINT16, 786 gc.OEQ<<16 | gc.TUINT16, 787 gc.OEQ<<16 | gc.TINT32, 788 gc.OEQ<<16 | gc.TUINT32, 789 gc.OEQ<<16 | gc.TINT64, 790 gc.OEQ<<16 | gc.TUINT64, 791 gc.OEQ<<16 | gc.TPTR32, 792 gc.OEQ<<16 | gc.TPTR64, 793 gc.OEQ<<16 | gc.TFLOAT32, 794 gc.OEQ<<16 | gc.TFLOAT64: 795 a = arm.ABEQ 796 797 case gc.ONE<<16 | gc.TBOOL, 798 gc.ONE<<16 | gc.TINT8, 799 gc.ONE<<16 | gc.TUINT8, 800 gc.ONE<<16 | gc.TINT16, 801 gc.ONE<<16 | gc.TUINT16, 802 gc.ONE<<16 | gc.TINT32, 803 gc.ONE<<16 | gc.TUINT32, 804 gc.ONE<<16 | gc.TINT64, 805 gc.ONE<<16 | gc.TUINT64, 806 gc.ONE<<16 | gc.TPTR32, 807 gc.ONE<<16 | gc.TPTR64, 808 gc.ONE<<16 | gc.TFLOAT32, 809 gc.ONE<<16 | gc.TFLOAT64: 810 a = arm.ABNE 811 812 case gc.OLT<<16 | gc.TINT8, 813 gc.OLT<<16 | gc.TINT16, 814 gc.OLT<<16 | gc.TINT32, 815 gc.OLT<<16 | gc.TINT64, 816 gc.OLT<<16 | gc.TFLOAT32, 817 gc.OLT<<16 | gc.TFLOAT64: 818 a = arm.ABLT 819 820 case gc.OLT<<16 | gc.TUINT8, 821 gc.OLT<<16 | gc.TUINT16, 822 gc.OLT<<16 | gc.TUINT32, 823 gc.OLT<<16 | gc.TUINT64: 824 a = arm.ABLO 825 826 case gc.OLE<<16 | gc.TINT8, 827 gc.OLE<<16 | gc.TINT16, 828 gc.OLE<<16 | gc.TINT32, 829 gc.OLE<<16 | gc.TINT64, 830 gc.OLE<<16 | gc.TFLOAT32, 831 gc.OLE<<16 | gc.TFLOAT64: 832 a = arm.ABLE 833 834 case gc.OLE<<16 | gc.TUINT8, 835 gc.OLE<<16 | gc.TUINT16, 836 gc.OLE<<16 | gc.TUINT32, 837 gc.OLE<<16 | gc.TUINT64: 838 a = arm.ABLS 839 840 case gc.OGT<<16 | gc.TINT8, 841 gc.OGT<<16 | gc.TINT16, 842 gc.OGT<<16 | gc.TINT32, 843 gc.OGT<<16 | gc.TINT64, 844 gc.OGT<<16 | gc.TFLOAT32, 845 gc.OGT<<16 | gc.TFLOAT64: 846 a = arm.ABGT 847 848 case gc.OGT<<16 | gc.TUINT8, 849 gc.OGT<<16 | gc.TUINT16, 850 gc.OGT<<16 | gc.TUINT32, 851 gc.OGT<<16 | gc.TUINT64: 852 a = arm.ABHI 853 854 case gc.OGE<<16 | gc.TINT8, 855 gc.OGE<<16 | gc.TINT16, 856 gc.OGE<<16 | gc.TINT32, 857 gc.OGE<<16 | gc.TINT64, 858 gc.OGE<<16 | gc.TFLOAT32, 859 gc.OGE<<16 | gc.TFLOAT64: 860 a = arm.ABGE 861 862 case gc.OGE<<16 | gc.TUINT8, 863 gc.OGE<<16 | gc.TUINT16, 864 gc.OGE<<16 | gc.TUINT32, 865 gc.OGE<<16 | gc.TUINT64: 866 a = arm.ABHS 867 868 case gc.OCMP<<16 | gc.TBOOL, 869 gc.OCMP<<16 | gc.TINT8, 870 gc.OCMP<<16 | gc.TUINT8, 871 gc.OCMP<<16 | gc.TINT16, 872 gc.OCMP<<16 | gc.TUINT16, 873 gc.OCMP<<16 | gc.TINT32, 874 gc.OCMP<<16 | gc.TUINT32, 875 gc.OCMP<<16 | gc.TPTR32: 876 a = arm.ACMP 877 878 case gc.OCMP<<16 | gc.TFLOAT32: 879 a = arm.ACMPF 880 881 case gc.OCMP<<16 | gc.TFLOAT64: 882 a = arm.ACMPD 883 884 case gc.OPS<<16 | gc.TFLOAT32, 885 gc.OPS<<16 | gc.TFLOAT64: 886 a = arm.ABVS 887 888 case gc.OAS<<16 | gc.TBOOL: 889 a = arm.AMOVB 890 891 case gc.OAS<<16 | gc.TINT8: 892 a = arm.AMOVBS 893 894 case gc.OAS<<16 | gc.TUINT8: 895 a = arm.AMOVBU 896 897 case gc.OAS<<16 | gc.TINT16: 898 a = arm.AMOVHS 899 900 case gc.OAS<<16 | gc.TUINT16: 901 a = arm.AMOVHU 902 903 case gc.OAS<<16 | gc.TINT32, 904 gc.OAS<<16 | gc.TUINT32, 905 gc.OAS<<16 | gc.TPTR32: 906 a = arm.AMOVW 907 908 case gc.OAS<<16 | gc.TFLOAT32: 909 a = arm.AMOVF 910 911 case gc.OAS<<16 | gc.TFLOAT64: 912 a = arm.AMOVD 913 914 case gc.OADD<<16 | gc.TINT8, 915 gc.OADD<<16 | gc.TUINT8, 916 gc.OADD<<16 | gc.TINT16, 917 gc.OADD<<16 | gc.TUINT16, 918 gc.OADD<<16 | gc.TINT32, 919 gc.OADD<<16 | gc.TUINT32, 920 gc.OADD<<16 | gc.TPTR32: 921 a = arm.AADD 922 923 case gc.OADD<<16 | gc.TFLOAT32: 924 a = arm.AADDF 925 926 case gc.OADD<<16 | gc.TFLOAT64: 927 a = arm.AADDD 928 929 case gc.OSUB<<16 | gc.TINT8, 930 gc.OSUB<<16 | gc.TUINT8, 931 gc.OSUB<<16 | gc.TINT16, 932 gc.OSUB<<16 | gc.TUINT16, 933 gc.OSUB<<16 | gc.TINT32, 934 gc.OSUB<<16 | gc.TUINT32, 935 gc.OSUB<<16 | gc.TPTR32: 936 a = arm.ASUB 937 938 case gc.OSUB<<16 | gc.TFLOAT32: 939 a = arm.ASUBF 940 941 case gc.OSUB<<16 | gc.TFLOAT64: 942 a = arm.ASUBD 943 944 case gc.OMINUS<<16 | gc.TINT8, 945 gc.OMINUS<<16 | gc.TUINT8, 946 gc.OMINUS<<16 | gc.TINT16, 947 gc.OMINUS<<16 | gc.TUINT16, 948 gc.OMINUS<<16 | gc.TINT32, 949 gc.OMINUS<<16 | gc.TUINT32, 950 gc.OMINUS<<16 | gc.TPTR32: 951 a = arm.ARSB 952 953 case gc.OAND<<16 | gc.TINT8, 954 gc.OAND<<16 | gc.TUINT8, 955 gc.OAND<<16 | gc.TINT16, 956 gc.OAND<<16 | gc.TUINT16, 957 gc.OAND<<16 | gc.TINT32, 958 gc.OAND<<16 | gc.TUINT32, 959 gc.OAND<<16 | gc.TPTR32: 960 a = arm.AAND 961 962 case gc.OOR<<16 | gc.TINT8, 963 gc.OOR<<16 | gc.TUINT8, 964 gc.OOR<<16 | gc.TINT16, 965 gc.OOR<<16 | gc.TUINT16, 966 gc.OOR<<16 | gc.TINT32, 967 gc.OOR<<16 | gc.TUINT32, 968 gc.OOR<<16 | gc.TPTR32: 969 a = arm.AORR 970 971 case gc.OXOR<<16 | gc.TINT8, 972 gc.OXOR<<16 | gc.TUINT8, 973 gc.OXOR<<16 | gc.TINT16, 974 gc.OXOR<<16 | gc.TUINT16, 975 gc.OXOR<<16 | gc.TINT32, 976 gc.OXOR<<16 | gc.TUINT32, 977 gc.OXOR<<16 | gc.TPTR32: 978 a = arm.AEOR 979 980 case gc.OLSH<<16 | gc.TINT8, 981 gc.OLSH<<16 | gc.TUINT8, 982 gc.OLSH<<16 | gc.TINT16, 983 gc.OLSH<<16 | gc.TUINT16, 984 gc.OLSH<<16 | gc.TINT32, 985 gc.OLSH<<16 | gc.TUINT32, 986 gc.OLSH<<16 | gc.TPTR32: 987 a = arm.ASLL 988 989 case gc.ORSH<<16 | gc.TUINT8, 990 gc.ORSH<<16 | gc.TUINT16, 991 gc.ORSH<<16 | gc.TUINT32, 992 gc.ORSH<<16 | gc.TPTR32: 993 a = arm.ASRL 994 995 case gc.ORSH<<16 | gc.TINT8, 996 gc.ORSH<<16 | gc.TINT16, 997 gc.ORSH<<16 | gc.TINT32: 998 a = arm.ASRA 999 1000 case gc.OMUL<<16 | gc.TUINT8, 1001 gc.OMUL<<16 | gc.TUINT16, 1002 gc.OMUL<<16 | gc.TUINT32, 1003 gc.OMUL<<16 | gc.TPTR32: 1004 a = arm.AMULU 1005 1006 case gc.OMUL<<16 | gc.TINT8, 1007 gc.OMUL<<16 | gc.TINT16, 1008 gc.OMUL<<16 | gc.TINT32: 1009 a = arm.AMUL 1010 1011 case gc.OMUL<<16 | gc.TFLOAT32: 1012 a = arm.AMULF 1013 1014 case gc.OMUL<<16 | gc.TFLOAT64: 1015 a = arm.AMULD 1016 1017 case gc.ODIV<<16 | gc.TUINT8, 1018 gc.ODIV<<16 | gc.TUINT16, 1019 gc.ODIV<<16 | gc.TUINT32, 1020 gc.ODIV<<16 | gc.TPTR32: 1021 a = arm.ADIVU 1022 1023 case gc.ODIV<<16 | gc.TINT8, 1024 gc.ODIV<<16 | gc.TINT16, 1025 gc.ODIV<<16 | gc.TINT32: 1026 a = arm.ADIV 1027 1028 case gc.OMOD<<16 | gc.TUINT8, 1029 gc.OMOD<<16 | gc.TUINT16, 1030 gc.OMOD<<16 | gc.TUINT32, 1031 gc.OMOD<<16 | gc.TPTR32: 1032 a = arm.AMODU 1033 1034 case gc.OMOD<<16 | gc.TINT8, 1035 gc.OMOD<<16 | gc.TINT16, 1036 gc.OMOD<<16 | gc.TINT32: 1037 a = arm.AMOD 1038 1039 // case CASE(OEXTEND, TINT16): 1040 // a = ACWD; 1041 // break; 1042 1043 // case CASE(OEXTEND, TINT32): 1044 // a = ACDQ; 1045 // break; 1046 1047 // case CASE(OEXTEND, TINT64): 1048 // a = ACQO; 1049 // break; 1050 1051 case gc.ODIV<<16 | gc.TFLOAT32: 1052 a = arm.ADIVF 1053 1054 case gc.ODIV<<16 | gc.TFLOAT64: 1055 a = arm.ADIVD 1056 1057 case gc.OSQRT<<16 | gc.TFLOAT64: 1058 a = arm.ASQRTD 1059 } 1060 1061 return a 1062 } 1063 1064 const ( 1065 ODynam = 1 << 0 1066 OPtrto = 1 << 1 1067 ) 1068 1069 var clean [20]gc.Node 1070 1071 var cleani int = 0 1072 1073 func sudoclean() { 1074 if clean[cleani-1].Op != gc.OEMPTY { 1075 gc.Regfree(&clean[cleani-1]) 1076 } 1077 if clean[cleani-2].Op != gc.OEMPTY { 1078 gc.Regfree(&clean[cleani-2]) 1079 } 1080 cleani -= 2 1081 } 1082 1083 func dotaddable(n *gc.Node, n1 *gc.Node) bool { 1084 if n.Op != gc.ODOT { 1085 return false 1086 } 1087 1088 var oary [10]int64 1089 var nn *gc.Node 1090 o := gc.Dotoffset(n, oary[:], &nn) 1091 if nn != nil && nn.Addable && o == 1 && oary[0] >= 0 { 1092 *n1 = *nn 1093 n1.Type = n.Type 1094 n1.Xoffset += oary[0] 1095 return true 1096 } 1097 1098 return false 1099 } 1100 1101 /* 1102 * generate code to compute address of n, 1103 * a reference to a (perhaps nested) field inside 1104 * an array or struct. 1105 * return 0 on failure, 1 on success. 1106 * on success, leaves usable address in a. 1107 * 1108 * caller is responsible for calling sudoclean 1109 * after successful sudoaddable, 1110 * to release the register used for a. 1111 */ 1112 func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool { 1113 if n.Type == nil { 1114 return false 1115 } 1116 1117 *a = obj.Addr{} 1118 1119 switch n.Op { 1120 case gc.OLITERAL: 1121 if !gc.Isconst(n, gc.CTINT) { 1122 break 1123 } 1124 v := n.Int() 1125 if v >= 32000 || v <= -32000 { 1126 break 1127 } 1128 switch as { 1129 default: 1130 return false 1131 1132 case arm.AADD, 1133 arm.ASUB, 1134 arm.AAND, 1135 arm.AORR, 1136 arm.AEOR, 1137 arm.AMOVB, 1138 arm.AMOVBS, 1139 arm.AMOVBU, 1140 arm.AMOVH, 1141 arm.AMOVHS, 1142 arm.AMOVHU, 1143 arm.AMOVW: 1144 break 1145 } 1146 1147 cleani += 2 1148 reg := &clean[cleani-1] 1149 reg1 := &clean[cleani-2] 1150 reg.Op = gc.OEMPTY 1151 reg1.Op = gc.OEMPTY 1152 gc.Naddr(a, n) 1153 return true 1154 1155 case gc.ODOT, 1156 gc.ODOTPTR: 1157 cleani += 2 1158 reg := &clean[cleani-1] 1159 reg1 := &clean[cleani-2] 1160 reg.Op = gc.OEMPTY 1161 reg1.Op = gc.OEMPTY 1162 var nn *gc.Node 1163 var oary [10]int64 1164 o := gc.Dotoffset(n, oary[:], &nn) 1165 if nn == nil { 1166 sudoclean() 1167 return false 1168 } 1169 1170 if nn.Addable && o == 1 && oary[0] >= 0 { 1171 // directly addressable set of DOTs 1172 n1 := *nn 1173 1174 n1.Type = n.Type 1175 n1.Xoffset += oary[0] 1176 gc.Naddr(a, &n1) 1177 return true 1178 } 1179 1180 gc.Regalloc(reg, gc.Types[gc.Tptr], nil) 1181 n1 := *reg 1182 n1.Op = gc.OINDREG 1183 if oary[0] >= 0 { 1184 gc.Agen(nn, reg) 1185 n1.Xoffset = oary[0] 1186 } else { 1187 gc.Cgen(nn, reg) 1188 gc.Cgen_checknil(reg) 1189 n1.Xoffset = -(oary[0] + 1) 1190 } 1191 1192 for i := 1; i < o; i++ { 1193 if oary[i] >= 0 { 1194 gc.Fatal("can't happen") 1195 } 1196 gins(arm.AMOVW, &n1, reg) 1197 gc.Cgen_checknil(reg) 1198 n1.Xoffset = -(oary[i] + 1) 1199 } 1200 1201 a.Type = obj.TYPE_NONE 1202 a.Name = obj.NAME_NONE 1203 n1.Type = n.Type 1204 gc.Naddr(a, &n1) 1205 return true 1206 1207 case gc.OINDEX: 1208 return false 1209 } 1210 1211 return false 1212 } 1213