1 // Derived from Inferno utils/6c/txt.c 2 // http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c 3 // 4 // Copyright 1994-1999 Lucent Technologies Inc. All rights reserved. 5 // Portions Copyright 1995-1997 C H Forsyth (forsyth (a] terzarima.net) 6 // Portions Copyright 1997-1999 Vita Nuova Limited 7 // Portions Copyright 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) 8 // Portions Copyright 2004,2006 Bruce Ellis 9 // Portions Copyright 2005-2007 C H Forsyth (forsyth (a] terzarima.net) 10 // Revisions Copyright 2000-2007 Lucent Technologies Inc. and others 11 // Portions Copyright 2009 The Go Authors. All rights reserved. 12 // 13 // Permission is hereby granted, free of charge, to any person obtaining a copy 14 // of this software and associated documentation files (the "Software"), to deal 15 // in the Software without restriction, including without limitation the rights 16 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 // copies of the Software, and to permit persons to whom the Software is 18 // furnished to do so, subject to the following conditions: 19 // 20 // The above copyright notice and this permission notice shall be included in 21 // all copies or substantial portions of the Software. 22 // 23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 // THE SOFTWARE. 30 31 package ppc64 32 33 import ( 34 "cmd/compile/internal/big" 35 "cmd/compile/internal/gc" 36 "cmd/internal/obj" 37 "cmd/internal/obj/ppc64" 38 "fmt" 39 ) 40 41 var resvd = []int{ 42 ppc64.REGZERO, 43 ppc64.REGSP, // reserved for SP 44 // We need to preserve the C ABI TLS pointer because sigtramp 45 // may happen during C code and needs to access the g. C 46 // clobbers REGG, so if Go were to clobber REGTLS, sigtramp 47 // won't know which convention to use. By preserving REGTLS, 48 // we can just retrieve g from TLS when we aren't sure. 49 ppc64.REGTLS, 50 51 // TODO(austin): Consolidate REGTLS and REGG? 52 ppc64.REGG, 53 ppc64.REGTMP, // REGTMP 54 ppc64.FREGCVI, 55 ppc64.FREGZERO, 56 ppc64.FREGHALF, 57 ppc64.FREGONE, 58 ppc64.FREGTWO, 59 } 60 61 /* 62 * generate 63 * as $c, n 64 */ 65 func ginscon(as int, c int64, n2 *gc.Node) { 66 var n1 gc.Node 67 68 gc.Nodconst(&n1, gc.Types[gc.TINT64], c) 69 70 if as != ppc64.AMOVD && (c < -ppc64.BIG || c > ppc64.BIG) || n2.Op != gc.OREGISTER || as == ppc64.AMULLD { 71 // cannot have more than 16-bit of immediate in ADD, etc. 72 // instead, MOV into register first. 73 var ntmp gc.Node 74 gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) 75 76 rawgins(ppc64.AMOVD, &n1, &ntmp) 77 rawgins(as, &ntmp, n2) 78 gc.Regfree(&ntmp) 79 return 80 } 81 82 rawgins(as, &n1, n2) 83 } 84 85 /* 86 * generate 87 * as n, $c (CMP/CMPU) 88 */ 89 func ginscon2(as int, n2 *gc.Node, c int64) { 90 var n1 gc.Node 91 92 gc.Nodconst(&n1, gc.Types[gc.TINT64], c) 93 94 switch as { 95 default: 96 gc.Fatal("ginscon2") 97 98 case ppc64.ACMP: 99 if -ppc64.BIG <= c && c <= ppc64.BIG { 100 rawgins(as, n2, &n1) 101 return 102 } 103 104 case ppc64.ACMPU: 105 if 0 <= c && c <= 2*ppc64.BIG { 106 rawgins(as, n2, &n1) 107 return 108 } 109 } 110 111 // MOV n1 into register first 112 var ntmp gc.Node 113 gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) 114 115 rawgins(ppc64.AMOVD, &n1, &ntmp) 116 rawgins(as, n2, &ntmp) 117 gc.Regfree(&ntmp) 118 } 119 120 func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { 121 if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL { 122 // Reverse comparison to place constant last. 123 op = gc.Brrev(op) 124 n1, n2 = n2, n1 125 } 126 127 var r1, r2, g1, g2 gc.Node 128 gc.Regalloc(&r1, t, n1) 129 gc.Regalloc(&g1, n1.Type, &r1) 130 gc.Cgen(n1, &g1) 131 gmove(&g1, &r1) 132 if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) { 133 ginscon2(optoas(gc.OCMP, t), &r1, n2.Int()) 134 } else { 135 gc.Regalloc(&r2, t, n2) 136 gc.Regalloc(&g2, n1.Type, &r2) 137 gc.Cgen(n2, &g2) 138 gmove(&g2, &r2) 139 rawgins(optoas(gc.OCMP, t), &r1, &r2) 140 gc.Regfree(&g2) 141 gc.Regfree(&r2) 142 } 143 gc.Regfree(&g1) 144 gc.Regfree(&r1) 145 return gc.Gbranch(optoas(op, t), nil, likely) 146 } 147 148 // set up nodes representing 2^63 149 var ( 150 bigi gc.Node 151 bigf gc.Node 152 bignodes_did bool 153 ) 154 155 func bignodes() { 156 if bignodes_did { 157 return 158 } 159 bignodes_did = true 160 161 var i big.Int 162 i.SetInt64(1) 163 i.Lsh(&i, 63) 164 165 gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0) 166 bigi.SetBigInt(&i) 167 168 bigi.Convconst(&bigf, gc.Types[gc.TFLOAT64]) 169 } 170 171 /* 172 * generate move: 173 * t = f 174 * hard part is conversions. 175 */ 176 func gmove(f *gc.Node, t *gc.Node) { 177 if gc.Debug['M'] != 0 { 178 fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong)) 179 } 180 181 ft := int(gc.Simsimtype(f.Type)) 182 tt := int(gc.Simsimtype(t.Type)) 183 cvt := (*gc.Type)(t.Type) 184 185 if gc.Iscomplex[ft] || gc.Iscomplex[tt] { 186 gc.Complexmove(f, t) 187 return 188 } 189 190 // cannot have two memory operands 191 var r2 gc.Node 192 var r1 gc.Node 193 var a int 194 if gc.Ismem(f) && gc.Ismem(t) { 195 goto hard 196 } 197 198 // convert constant to desired type 199 if f.Op == gc.OLITERAL { 200 var con gc.Node 201 switch tt { 202 default: 203 f.Convconst(&con, t.Type) 204 205 case gc.TINT32, 206 gc.TINT16, 207 gc.TINT8: 208 var con gc.Node 209 f.Convconst(&con, gc.Types[gc.TINT64]) 210 var r1 gc.Node 211 gc.Regalloc(&r1, con.Type, t) 212 gins(ppc64.AMOVD, &con, &r1) 213 gmove(&r1, t) 214 gc.Regfree(&r1) 215 return 216 217 case gc.TUINT32, 218 gc.TUINT16, 219 gc.TUINT8: 220 var con gc.Node 221 f.Convconst(&con, gc.Types[gc.TUINT64]) 222 var r1 gc.Node 223 gc.Regalloc(&r1, con.Type, t) 224 gins(ppc64.AMOVD, &con, &r1) 225 gmove(&r1, t) 226 gc.Regfree(&r1) 227 return 228 } 229 230 f = &con 231 ft = tt // so big switch will choose a simple mov 232 233 // constants can't move directly to memory. 234 if gc.Ismem(t) { 235 goto hard 236 } 237 } 238 239 // float constants come from memory. 240 //if(isfloat[tt]) 241 // goto hard; 242 243 // 64-bit immediates are also from memory. 244 //if(isint[tt]) 245 // goto hard; 246 //// 64-bit immediates are really 32-bit sign-extended 247 //// unless moving into a register. 248 //if(isint[tt]) { 249 // if(mpcmpfixfix(con.val.u.xval, minintval[TINT32]) < 0) 250 // goto hard; 251 // if(mpcmpfixfix(con.val.u.xval, maxintval[TINT32]) > 0) 252 // goto hard; 253 //} 254 255 // value -> value copy, only one memory operand. 256 // figure out the instruction to use. 257 // break out of switch for one-instruction gins. 258 // goto rdst for "destination must be register". 259 // goto hard for "convert to cvt type first". 260 // otherwise handle and return. 261 262 switch uint32(ft)<<16 | uint32(tt) { 263 default: 264 gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong)) 265 266 /* 267 * integer copy and truncate 268 */ 269 case gc.TINT8<<16 | gc.TINT8, // same size 270 gc.TUINT8<<16 | gc.TINT8, 271 gc.TINT16<<16 | gc.TINT8, 272 // truncate 273 gc.TUINT16<<16 | gc.TINT8, 274 gc.TINT32<<16 | gc.TINT8, 275 gc.TUINT32<<16 | gc.TINT8, 276 gc.TINT64<<16 | gc.TINT8, 277 gc.TUINT64<<16 | gc.TINT8: 278 a = ppc64.AMOVB 279 280 case gc.TINT8<<16 | gc.TUINT8, // same size 281 gc.TUINT8<<16 | gc.TUINT8, 282 gc.TINT16<<16 | gc.TUINT8, 283 // truncate 284 gc.TUINT16<<16 | gc.TUINT8, 285 gc.TINT32<<16 | gc.TUINT8, 286 gc.TUINT32<<16 | gc.TUINT8, 287 gc.TINT64<<16 | gc.TUINT8, 288 gc.TUINT64<<16 | gc.TUINT8: 289 a = ppc64.AMOVBZ 290 291 case gc.TINT16<<16 | gc.TINT16, // same size 292 gc.TUINT16<<16 | gc.TINT16, 293 gc.TINT32<<16 | gc.TINT16, 294 // truncate 295 gc.TUINT32<<16 | gc.TINT16, 296 gc.TINT64<<16 | gc.TINT16, 297 gc.TUINT64<<16 | gc.TINT16: 298 a = ppc64.AMOVH 299 300 case gc.TINT16<<16 | gc.TUINT16, // same size 301 gc.TUINT16<<16 | gc.TUINT16, 302 gc.TINT32<<16 | gc.TUINT16, 303 // truncate 304 gc.TUINT32<<16 | gc.TUINT16, 305 gc.TINT64<<16 | gc.TUINT16, 306 gc.TUINT64<<16 | gc.TUINT16: 307 a = ppc64.AMOVHZ 308 309 case gc.TINT32<<16 | gc.TINT32, // same size 310 gc.TUINT32<<16 | gc.TINT32, 311 gc.TINT64<<16 | gc.TINT32, 312 // truncate 313 gc.TUINT64<<16 | gc.TINT32: 314 a = ppc64.AMOVW 315 316 case gc.TINT32<<16 | gc.TUINT32, // same size 317 gc.TUINT32<<16 | gc.TUINT32, 318 gc.TINT64<<16 | gc.TUINT32, 319 gc.TUINT64<<16 | gc.TUINT32: 320 a = ppc64.AMOVWZ 321 322 case gc.TINT64<<16 | gc.TINT64, // same size 323 gc.TINT64<<16 | gc.TUINT64, 324 gc.TUINT64<<16 | gc.TINT64, 325 gc.TUINT64<<16 | gc.TUINT64: 326 a = ppc64.AMOVD 327 328 /* 329 * integer up-conversions 330 */ 331 case gc.TINT8<<16 | gc.TINT16, // sign extend int8 332 gc.TINT8<<16 | gc.TUINT16, 333 gc.TINT8<<16 | gc.TINT32, 334 gc.TINT8<<16 | gc.TUINT32, 335 gc.TINT8<<16 | gc.TINT64, 336 gc.TINT8<<16 | gc.TUINT64: 337 a = ppc64.AMOVB 338 339 goto rdst 340 341 case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 342 gc.TUINT8<<16 | gc.TUINT16, 343 gc.TUINT8<<16 | gc.TINT32, 344 gc.TUINT8<<16 | gc.TUINT32, 345 gc.TUINT8<<16 | gc.TINT64, 346 gc.TUINT8<<16 | gc.TUINT64: 347 a = ppc64.AMOVBZ 348 349 goto rdst 350 351 case gc.TINT16<<16 | gc.TINT32, // sign extend int16 352 gc.TINT16<<16 | gc.TUINT32, 353 gc.TINT16<<16 | gc.TINT64, 354 gc.TINT16<<16 | gc.TUINT64: 355 a = ppc64.AMOVH 356 357 goto rdst 358 359 case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 360 gc.TUINT16<<16 | gc.TUINT32, 361 gc.TUINT16<<16 | gc.TINT64, 362 gc.TUINT16<<16 | gc.TUINT64: 363 a = ppc64.AMOVHZ 364 365 goto rdst 366 367 case gc.TINT32<<16 | gc.TINT64, // sign extend int32 368 gc.TINT32<<16 | gc.TUINT64: 369 a = ppc64.AMOVW 370 371 goto rdst 372 373 case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 374 gc.TUINT32<<16 | gc.TUINT64: 375 a = ppc64.AMOVWZ 376 377 goto rdst 378 379 //warn("gmove: convert float to int not implemented: %N -> %N\n", f, t); 380 //return; 381 // algorithm is: 382 // if small enough, use native float64 -> int64 conversion. 383 // otherwise, subtract 2^63, convert, and add it back. 384 /* 385 * float to integer 386 */ 387 case gc.TFLOAT32<<16 | gc.TINT32, 388 gc.TFLOAT64<<16 | gc.TINT32, 389 gc.TFLOAT32<<16 | gc.TINT64, 390 gc.TFLOAT64<<16 | gc.TINT64, 391 gc.TFLOAT32<<16 | gc.TINT16, 392 gc.TFLOAT32<<16 | gc.TINT8, 393 gc.TFLOAT32<<16 | gc.TUINT16, 394 gc.TFLOAT32<<16 | gc.TUINT8, 395 gc.TFLOAT64<<16 | gc.TINT16, 396 gc.TFLOAT64<<16 | gc.TINT8, 397 gc.TFLOAT64<<16 | gc.TUINT16, 398 gc.TFLOAT64<<16 | gc.TUINT8, 399 gc.TFLOAT32<<16 | gc.TUINT32, 400 gc.TFLOAT64<<16 | gc.TUINT32, 401 gc.TFLOAT32<<16 | gc.TUINT64, 402 gc.TFLOAT64<<16 | gc.TUINT64: 403 bignodes() 404 405 var r1 gc.Node 406 gc.Regalloc(&r1, gc.Types[ft], f) 407 gmove(f, &r1) 408 if tt == gc.TUINT64 { 409 gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil) 410 gmove(&bigf, &r2) 411 gins(ppc64.AFCMPU, &r1, &r2) 412 p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)) 413 gins(ppc64.AFSUB, &r2, &r1) 414 gc.Patch(p1, gc.Pc) 415 gc.Regfree(&r2) 416 } 417 418 gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil) 419 var r3 gc.Node 420 gc.Regalloc(&r3, gc.Types[gc.TINT64], t) 421 gins(ppc64.AFCTIDZ, &r1, &r2) 422 p1 := (*obj.Prog)(gins(ppc64.AFMOVD, &r2, nil)) 423 p1.To.Type = obj.TYPE_MEM 424 p1.To.Reg = ppc64.REGSP 425 p1.To.Offset = -8 426 p1 = gins(ppc64.AMOVD, nil, &r3) 427 p1.From.Type = obj.TYPE_MEM 428 p1.From.Reg = ppc64.REGSP 429 p1.From.Offset = -8 430 gc.Regfree(&r2) 431 gc.Regfree(&r1) 432 if tt == gc.TUINT64 { 433 p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)) // use CR0 here again 434 gc.Nodreg(&r1, gc.Types[gc.TINT64], ppc64.REGTMP) 435 gins(ppc64.AMOVD, &bigi, &r1) 436 gins(ppc64.AADD, &r1, &r3) 437 gc.Patch(p1, gc.Pc) 438 } 439 440 gmove(&r3, t) 441 gc.Regfree(&r3) 442 return 443 444 //warn("gmove: convert int to float not implemented: %N -> %N\n", f, t); 445 //return; 446 // algorithm is: 447 // if small enough, use native int64 -> uint64 conversion. 448 // otherwise, halve (rounding to odd?), convert, and double. 449 /* 450 * integer to float 451 */ 452 case gc.TINT32<<16 | gc.TFLOAT32, 453 gc.TINT32<<16 | gc.TFLOAT64, 454 gc.TINT64<<16 | gc.TFLOAT32, 455 gc.TINT64<<16 | gc.TFLOAT64, 456 gc.TINT16<<16 | gc.TFLOAT32, 457 gc.TINT16<<16 | gc.TFLOAT64, 458 gc.TINT8<<16 | gc.TFLOAT32, 459 gc.TINT8<<16 | gc.TFLOAT64, 460 gc.TUINT16<<16 | gc.TFLOAT32, 461 gc.TUINT16<<16 | gc.TFLOAT64, 462 gc.TUINT8<<16 | gc.TFLOAT32, 463 gc.TUINT8<<16 | gc.TFLOAT64, 464 gc.TUINT32<<16 | gc.TFLOAT32, 465 gc.TUINT32<<16 | gc.TFLOAT64, 466 gc.TUINT64<<16 | gc.TFLOAT32, 467 gc.TUINT64<<16 | gc.TFLOAT64: 468 bignodes() 469 470 var r1 gc.Node 471 gc.Regalloc(&r1, gc.Types[gc.TINT64], nil) 472 gmove(f, &r1) 473 if ft == gc.TUINT64 { 474 gc.Nodreg(&r2, gc.Types[gc.TUINT64], ppc64.REGTMP) 475 gmove(&bigi, &r2) 476 gins(ppc64.ACMPU, &r1, &r2) 477 p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)) 478 p2 := (*obj.Prog)(gins(ppc64.ASRD, nil, &r1)) 479 p2.From.Type = obj.TYPE_CONST 480 p2.From.Offset = 1 481 gc.Patch(p1, gc.Pc) 482 } 483 484 gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], t) 485 p1 := (*obj.Prog)(gins(ppc64.AMOVD, &r1, nil)) 486 p1.To.Type = obj.TYPE_MEM 487 p1.To.Reg = ppc64.REGSP 488 p1.To.Offset = -8 489 p1 = gins(ppc64.AFMOVD, nil, &r2) 490 p1.From.Type = obj.TYPE_MEM 491 p1.From.Reg = ppc64.REGSP 492 p1.From.Offset = -8 493 gins(ppc64.AFCFID, &r2, &r2) 494 gc.Regfree(&r1) 495 if ft == gc.TUINT64 { 496 p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)) // use CR0 here again 497 gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], ppc64.FREGTWO) 498 gins(ppc64.AFMUL, &r1, &r2) 499 gc.Patch(p1, gc.Pc) 500 } 501 502 gmove(&r2, t) 503 gc.Regfree(&r2) 504 return 505 506 /* 507 * float to float 508 */ 509 case gc.TFLOAT32<<16 | gc.TFLOAT32: 510 a = ppc64.AFMOVS 511 512 case gc.TFLOAT64<<16 | gc.TFLOAT64: 513 a = ppc64.AFMOVD 514 515 case gc.TFLOAT32<<16 | gc.TFLOAT64: 516 a = ppc64.AFMOVS 517 goto rdst 518 519 case gc.TFLOAT64<<16 | gc.TFLOAT32: 520 a = ppc64.AFRSP 521 goto rdst 522 } 523 524 gins(a, f, t) 525 return 526 527 // requires register destination 528 rdst: 529 { 530 gc.Regalloc(&r1, t.Type, t) 531 532 gins(a, f, &r1) 533 gmove(&r1, t) 534 gc.Regfree(&r1) 535 return 536 } 537 538 // requires register intermediate 539 hard: 540 gc.Regalloc(&r1, cvt, t) 541 542 gmove(f, &r1) 543 gmove(&r1, t) 544 gc.Regfree(&r1) 545 return 546 } 547 548 func intLiteral(n *gc.Node) (x int64, ok bool) { 549 switch { 550 case n == nil: 551 return 552 case gc.Isconst(n, gc.CTINT): 553 return n.Int(), true 554 case gc.Isconst(n, gc.CTBOOL): 555 return int64(obj.Bool2int(n.Bool())), true 556 } 557 return 558 } 559 560 // gins is called by the front end. 561 // It synthesizes some multiple-instruction sequences 562 // so the front end can stay simpler. 563 func gins(as int, f, t *gc.Node) *obj.Prog { 564 if as >= obj.A_ARCHSPECIFIC { 565 if x, ok := intLiteral(f); ok { 566 ginscon(as, x, t) 567 return nil // caller must not use 568 } 569 } 570 if as == ppc64.ACMP || as == ppc64.ACMPU { 571 if x, ok := intLiteral(t); ok { 572 ginscon2(as, f, x) 573 return nil // caller must not use 574 } 575 } 576 return rawgins(as, f, t) 577 } 578 579 /* 580 * generate one instruction: 581 * as f, t 582 */ 583 func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog { 584 // TODO(austin): Add self-move test like in 6g (but be careful 585 // of truncation moves) 586 587 p := gc.Prog(as) 588 gc.Naddr(&p.From, f) 589 gc.Naddr(&p.To, t) 590 591 switch as { 592 case obj.ACALL: 593 if p.To.Type == obj.TYPE_REG && p.To.Reg != ppc64.REG_CTR { 594 // Allow front end to emit CALL REG, and rewrite into MOV REG, CTR; CALL CTR. 595 pp := gc.Prog(as) 596 pp.From = p.From 597 pp.To.Type = obj.TYPE_REG 598 pp.To.Reg = ppc64.REG_CTR 599 600 p.As = ppc64.AMOVD 601 p.From = p.To 602 p.To.Type = obj.TYPE_REG 603 p.To.Reg = ppc64.REG_CTR 604 605 if gc.Debug['g'] != 0 { 606 fmt.Printf("%v\n", p) 607 fmt.Printf("%v\n", pp) 608 } 609 610 return pp 611 } 612 613 // Bad things the front end has done to us. Crash to find call stack. 614 case ppc64.AAND, ppc64.AMULLD: 615 if p.From.Type == obj.TYPE_CONST { 616 gc.Debug['h'] = 1 617 gc.Fatal("bad inst: %v", p) 618 } 619 case ppc64.ACMP, ppc64.ACMPU: 620 if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM { 621 gc.Debug['h'] = 1 622 gc.Fatal("bad inst: %v", p) 623 } 624 } 625 626 if gc.Debug['g'] != 0 { 627 fmt.Printf("%v\n", p) 628 } 629 630 w := int32(0) 631 switch as { 632 case ppc64.AMOVB, 633 ppc64.AMOVBU, 634 ppc64.AMOVBZ, 635 ppc64.AMOVBZU: 636 w = 1 637 638 case ppc64.AMOVH, 639 ppc64.AMOVHU, 640 ppc64.AMOVHZ, 641 ppc64.AMOVHZU: 642 w = 2 643 644 case ppc64.AMOVW, 645 ppc64.AMOVWU, 646 ppc64.AMOVWZ, 647 ppc64.AMOVWZU: 648 w = 4 649 650 case ppc64.AMOVD, 651 ppc64.AMOVDU: 652 if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR { 653 break 654 } 655 w = 8 656 } 657 658 if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) { 659 gc.Dump("f", f) 660 gc.Dump("t", t) 661 gc.Fatal("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width) 662 } 663 664 return p 665 } 666 667 /* 668 * return Axxx for Oxxx on type t. 669 */ 670 func optoas(op int, t *gc.Type) int { 671 if t == nil { 672 gc.Fatal("optoas: t is nil") 673 } 674 675 a := int(obj.AXXX) 676 switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { 677 default: 678 gc.Fatal("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t) 679 680 case gc.OEQ<<16 | gc.TBOOL, 681 gc.OEQ<<16 | gc.TINT8, 682 gc.OEQ<<16 | gc.TUINT8, 683 gc.OEQ<<16 | gc.TINT16, 684 gc.OEQ<<16 | gc.TUINT16, 685 gc.OEQ<<16 | gc.TINT32, 686 gc.OEQ<<16 | gc.TUINT32, 687 gc.OEQ<<16 | gc.TINT64, 688 gc.OEQ<<16 | gc.TUINT64, 689 gc.OEQ<<16 | gc.TPTR32, 690 gc.OEQ<<16 | gc.TPTR64, 691 gc.OEQ<<16 | gc.TFLOAT32, 692 gc.OEQ<<16 | gc.TFLOAT64: 693 a = ppc64.ABEQ 694 695 case gc.ONE<<16 | gc.TBOOL, 696 gc.ONE<<16 | gc.TINT8, 697 gc.ONE<<16 | gc.TUINT8, 698 gc.ONE<<16 | gc.TINT16, 699 gc.ONE<<16 | gc.TUINT16, 700 gc.ONE<<16 | gc.TINT32, 701 gc.ONE<<16 | gc.TUINT32, 702 gc.ONE<<16 | gc.TINT64, 703 gc.ONE<<16 | gc.TUINT64, 704 gc.ONE<<16 | gc.TPTR32, 705 gc.ONE<<16 | gc.TPTR64, 706 gc.ONE<<16 | gc.TFLOAT32, 707 gc.ONE<<16 | gc.TFLOAT64: 708 a = ppc64.ABNE 709 710 case gc.OLT<<16 | gc.TINT8, // ACMP 711 gc.OLT<<16 | gc.TINT16, 712 gc.OLT<<16 | gc.TINT32, 713 gc.OLT<<16 | gc.TINT64, 714 gc.OLT<<16 | gc.TUINT8, 715 // ACMPU 716 gc.OLT<<16 | gc.TUINT16, 717 gc.OLT<<16 | gc.TUINT32, 718 gc.OLT<<16 | gc.TUINT64, 719 gc.OLT<<16 | gc.TFLOAT32, 720 // AFCMPU 721 gc.OLT<<16 | gc.TFLOAT64: 722 a = ppc64.ABLT 723 724 case gc.OLE<<16 | gc.TINT8, // ACMP 725 gc.OLE<<16 | gc.TINT16, 726 gc.OLE<<16 | gc.TINT32, 727 gc.OLE<<16 | gc.TINT64, 728 gc.OLE<<16 | gc.TUINT8, 729 // ACMPU 730 gc.OLE<<16 | gc.TUINT16, 731 gc.OLE<<16 | gc.TUINT32, 732 gc.OLE<<16 | gc.TUINT64: 733 // No OLE for floats, because it mishandles NaN. 734 // Front end must reverse comparison or use OLT and OEQ together. 735 a = ppc64.ABLE 736 737 case gc.OGT<<16 | gc.TINT8, 738 gc.OGT<<16 | gc.TINT16, 739 gc.OGT<<16 | gc.TINT32, 740 gc.OGT<<16 | gc.TINT64, 741 gc.OGT<<16 | gc.TUINT8, 742 gc.OGT<<16 | gc.TUINT16, 743 gc.OGT<<16 | gc.TUINT32, 744 gc.OGT<<16 | gc.TUINT64, 745 gc.OGT<<16 | gc.TFLOAT32, 746 gc.OGT<<16 | gc.TFLOAT64: 747 a = ppc64.ABGT 748 749 case gc.OGE<<16 | gc.TINT8, 750 gc.OGE<<16 | gc.TINT16, 751 gc.OGE<<16 | gc.TINT32, 752 gc.OGE<<16 | gc.TINT64, 753 gc.OGE<<16 | gc.TUINT8, 754 gc.OGE<<16 | gc.TUINT16, 755 gc.OGE<<16 | gc.TUINT32, 756 gc.OGE<<16 | gc.TUINT64: 757 // No OGE for floats, because it mishandles NaN. 758 // Front end must reverse comparison or use OLT and OEQ together. 759 a = ppc64.ABGE 760 761 case gc.OCMP<<16 | gc.TBOOL, 762 gc.OCMP<<16 | gc.TINT8, 763 gc.OCMP<<16 | gc.TINT16, 764 gc.OCMP<<16 | gc.TINT32, 765 gc.OCMP<<16 | gc.TPTR32, 766 gc.OCMP<<16 | gc.TINT64: 767 a = ppc64.ACMP 768 769 case gc.OCMP<<16 | gc.TUINT8, 770 gc.OCMP<<16 | gc.TUINT16, 771 gc.OCMP<<16 | gc.TUINT32, 772 gc.OCMP<<16 | gc.TUINT64, 773 gc.OCMP<<16 | gc.TPTR64: 774 a = ppc64.ACMPU 775 776 case gc.OCMP<<16 | gc.TFLOAT32, 777 gc.OCMP<<16 | gc.TFLOAT64: 778 a = ppc64.AFCMPU 779 780 case gc.OAS<<16 | gc.TBOOL, 781 gc.OAS<<16 | gc.TINT8: 782 a = ppc64.AMOVB 783 784 case gc.OAS<<16 | gc.TUINT8: 785 a = ppc64.AMOVBZ 786 787 case gc.OAS<<16 | gc.TINT16: 788 a = ppc64.AMOVH 789 790 case gc.OAS<<16 | gc.TUINT16: 791 a = ppc64.AMOVHZ 792 793 case gc.OAS<<16 | gc.TINT32: 794 a = ppc64.AMOVW 795 796 case gc.OAS<<16 | gc.TUINT32, 797 gc.OAS<<16 | gc.TPTR32: 798 a = ppc64.AMOVWZ 799 800 case gc.OAS<<16 | gc.TINT64, 801 gc.OAS<<16 | gc.TUINT64, 802 gc.OAS<<16 | gc.TPTR64: 803 a = ppc64.AMOVD 804 805 case gc.OAS<<16 | gc.TFLOAT32: 806 a = ppc64.AFMOVS 807 808 case gc.OAS<<16 | gc.TFLOAT64: 809 a = ppc64.AFMOVD 810 811 case gc.OADD<<16 | gc.TINT8, 812 gc.OADD<<16 | gc.TUINT8, 813 gc.OADD<<16 | gc.TINT16, 814 gc.OADD<<16 | gc.TUINT16, 815 gc.OADD<<16 | gc.TINT32, 816 gc.OADD<<16 | gc.TUINT32, 817 gc.OADD<<16 | gc.TPTR32, 818 gc.OADD<<16 | gc.TINT64, 819 gc.OADD<<16 | gc.TUINT64, 820 gc.OADD<<16 | gc.TPTR64: 821 a = ppc64.AADD 822 823 case gc.OADD<<16 | gc.TFLOAT32: 824 a = ppc64.AFADDS 825 826 case gc.OADD<<16 | gc.TFLOAT64: 827 a = ppc64.AFADD 828 829 case gc.OSUB<<16 | gc.TINT8, 830 gc.OSUB<<16 | gc.TUINT8, 831 gc.OSUB<<16 | gc.TINT16, 832 gc.OSUB<<16 | gc.TUINT16, 833 gc.OSUB<<16 | gc.TINT32, 834 gc.OSUB<<16 | gc.TUINT32, 835 gc.OSUB<<16 | gc.TPTR32, 836 gc.OSUB<<16 | gc.TINT64, 837 gc.OSUB<<16 | gc.TUINT64, 838 gc.OSUB<<16 | gc.TPTR64: 839 a = ppc64.ASUB 840 841 case gc.OSUB<<16 | gc.TFLOAT32: 842 a = ppc64.AFSUBS 843 844 case gc.OSUB<<16 | gc.TFLOAT64: 845 a = ppc64.AFSUB 846 847 case gc.OMINUS<<16 | gc.TINT8, 848 gc.OMINUS<<16 | gc.TUINT8, 849 gc.OMINUS<<16 | gc.TINT16, 850 gc.OMINUS<<16 | gc.TUINT16, 851 gc.OMINUS<<16 | gc.TINT32, 852 gc.OMINUS<<16 | gc.TUINT32, 853 gc.OMINUS<<16 | gc.TPTR32, 854 gc.OMINUS<<16 | gc.TINT64, 855 gc.OMINUS<<16 | gc.TUINT64, 856 gc.OMINUS<<16 | gc.TPTR64: 857 a = ppc64.ANEG 858 859 case gc.OAND<<16 | gc.TINT8, 860 gc.OAND<<16 | gc.TUINT8, 861 gc.OAND<<16 | gc.TINT16, 862 gc.OAND<<16 | gc.TUINT16, 863 gc.OAND<<16 | gc.TINT32, 864 gc.OAND<<16 | gc.TUINT32, 865 gc.OAND<<16 | gc.TPTR32, 866 gc.OAND<<16 | gc.TINT64, 867 gc.OAND<<16 | gc.TUINT64, 868 gc.OAND<<16 | gc.TPTR64: 869 a = ppc64.AAND 870 871 case gc.OOR<<16 | gc.TINT8, 872 gc.OOR<<16 | gc.TUINT8, 873 gc.OOR<<16 | gc.TINT16, 874 gc.OOR<<16 | gc.TUINT16, 875 gc.OOR<<16 | gc.TINT32, 876 gc.OOR<<16 | gc.TUINT32, 877 gc.OOR<<16 | gc.TPTR32, 878 gc.OOR<<16 | gc.TINT64, 879 gc.OOR<<16 | gc.TUINT64, 880 gc.OOR<<16 | gc.TPTR64: 881 a = ppc64.AOR 882 883 case gc.OXOR<<16 | gc.TINT8, 884 gc.OXOR<<16 | gc.TUINT8, 885 gc.OXOR<<16 | gc.TINT16, 886 gc.OXOR<<16 | gc.TUINT16, 887 gc.OXOR<<16 | gc.TINT32, 888 gc.OXOR<<16 | gc.TUINT32, 889 gc.OXOR<<16 | gc.TPTR32, 890 gc.OXOR<<16 | gc.TINT64, 891 gc.OXOR<<16 | gc.TUINT64, 892 gc.OXOR<<16 | gc.TPTR64: 893 a = ppc64.AXOR 894 895 // TODO(minux): handle rotates 896 //case CASE(OLROT, TINT8): 897 //case CASE(OLROT, TUINT8): 898 //case CASE(OLROT, TINT16): 899 //case CASE(OLROT, TUINT16): 900 //case CASE(OLROT, TINT32): 901 //case CASE(OLROT, TUINT32): 902 //case CASE(OLROT, TPTR32): 903 //case CASE(OLROT, TINT64): 904 //case CASE(OLROT, TUINT64): 905 //case CASE(OLROT, TPTR64): 906 // a = 0//???; RLDC? 907 // break; 908 909 case gc.OLSH<<16 | gc.TINT8, 910 gc.OLSH<<16 | gc.TUINT8, 911 gc.OLSH<<16 | gc.TINT16, 912 gc.OLSH<<16 | gc.TUINT16, 913 gc.OLSH<<16 | gc.TINT32, 914 gc.OLSH<<16 | gc.TUINT32, 915 gc.OLSH<<16 | gc.TPTR32, 916 gc.OLSH<<16 | gc.TINT64, 917 gc.OLSH<<16 | gc.TUINT64, 918 gc.OLSH<<16 | gc.TPTR64: 919 a = ppc64.ASLD 920 921 case gc.ORSH<<16 | gc.TUINT8, 922 gc.ORSH<<16 | gc.TUINT16, 923 gc.ORSH<<16 | gc.TUINT32, 924 gc.ORSH<<16 | gc.TPTR32, 925 gc.ORSH<<16 | gc.TUINT64, 926 gc.ORSH<<16 | gc.TPTR64: 927 a = ppc64.ASRD 928 929 case gc.ORSH<<16 | gc.TINT8, 930 gc.ORSH<<16 | gc.TINT16, 931 gc.ORSH<<16 | gc.TINT32, 932 gc.ORSH<<16 | gc.TINT64: 933 a = ppc64.ASRAD 934 935 // TODO(minux): handle rotates 936 //case CASE(ORROTC, TINT8): 937 //case CASE(ORROTC, TUINT8): 938 //case CASE(ORROTC, TINT16): 939 //case CASE(ORROTC, TUINT16): 940 //case CASE(ORROTC, TINT32): 941 //case CASE(ORROTC, TUINT32): 942 //case CASE(ORROTC, TINT64): 943 //case CASE(ORROTC, TUINT64): 944 // a = 0//??? RLDC?? 945 // break; 946 947 case gc.OHMUL<<16 | gc.TINT64: 948 a = ppc64.AMULHD 949 950 case gc.OHMUL<<16 | gc.TUINT64, 951 gc.OHMUL<<16 | gc.TPTR64: 952 a = ppc64.AMULHDU 953 954 case gc.OMUL<<16 | gc.TINT8, 955 gc.OMUL<<16 | gc.TINT16, 956 gc.OMUL<<16 | gc.TINT32, 957 gc.OMUL<<16 | gc.TINT64: 958 a = ppc64.AMULLD 959 960 case gc.OMUL<<16 | gc.TUINT8, 961 gc.OMUL<<16 | gc.TUINT16, 962 gc.OMUL<<16 | gc.TUINT32, 963 gc.OMUL<<16 | gc.TPTR32, 964 // don't use word multiply, the high 32-bit are undefined. 965 gc.OMUL<<16 | gc.TUINT64, 966 gc.OMUL<<16 | gc.TPTR64: 967 // for 64-bit multiplies, signedness doesn't matter. 968 a = ppc64.AMULLD 969 970 case gc.OMUL<<16 | gc.TFLOAT32: 971 a = ppc64.AFMULS 972 973 case gc.OMUL<<16 | gc.TFLOAT64: 974 a = ppc64.AFMUL 975 976 case gc.ODIV<<16 | gc.TINT8, 977 gc.ODIV<<16 | gc.TINT16, 978 gc.ODIV<<16 | gc.TINT32, 979 gc.ODIV<<16 | gc.TINT64: 980 a = ppc64.ADIVD 981 982 case gc.ODIV<<16 | gc.TUINT8, 983 gc.ODIV<<16 | gc.TUINT16, 984 gc.ODIV<<16 | gc.TUINT32, 985 gc.ODIV<<16 | gc.TPTR32, 986 gc.ODIV<<16 | gc.TUINT64, 987 gc.ODIV<<16 | gc.TPTR64: 988 a = ppc64.ADIVDU 989 990 case gc.ODIV<<16 | gc.TFLOAT32: 991 a = ppc64.AFDIVS 992 993 case gc.ODIV<<16 | gc.TFLOAT64: 994 a = ppc64.AFDIV 995 } 996 997 return a 998 } 999 1000 const ( 1001 ODynam = 1 << 0 1002 OAddable = 1 << 1 1003 ) 1004 1005 func xgen(n *gc.Node, a *gc.Node, o int) bool { 1006 // TODO(minux) 1007 1008 return -1 != 0 /*TypeKind(100016)*/ 1009 } 1010 1011 func sudoclean() { 1012 return 1013 } 1014 1015 /* 1016 * generate code to compute address of n, 1017 * a reference to a (perhaps nested) field inside 1018 * an array or struct. 1019 * return 0 on failure, 1 on success. 1020 * on success, leaves usable address in a. 1021 * 1022 * caller is responsible for calling sudoclean 1023 * after successful sudoaddable, 1024 * to release the register used for a. 1025 */ 1026 func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool { 1027 // TODO(minux) 1028 1029 *a = obj.Addr{} 1030 return false 1031 } 1032