Home | History | Annotate | Download | only in ppc64
      1 // Do not edit. Bootstrap copy of /Volumes/Android/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ppc64/ggen.go
      2 
      3 //line /Volumes/Android/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ppc64/ggen.go:1
      4 // Copyright 2009 The Go Authors. All rights reserved.
      5 // Use of this source code is governed by a BSD-style
      6 // license that can be found in the LICENSE file.
      7 
      8 package ppc64
      9 
     10 import (
     11 	"bootstrap/compile/internal/gc"
     12 	"bootstrap/internal/obj"
     13 	"bootstrap/internal/obj/ppc64"
     14 	"fmt"
     15 )
     16 
     17 func defframe(ptxt *obj.Prog) {
     18 	var n *gc.Node
     19 
     20 	// fill in argument size, stack size
     21 	ptxt.To.Type = obj.TYPE_TEXTSIZE
     22 
     23 	ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
     24 	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
     25 	ptxt.To.Offset = int64(frame)
     26 
     27 	// insert code to zero ambiguously live variables
     28 	// so that the garbage collector only sees initialized values
     29 	// when it looks for pointers.
     30 	p := ptxt
     31 
     32 	hi := int64(0)
     33 	lo := hi
     34 
     35 	// iterate through declarations - they are sorted in decreasing xoffset order.
     36 	for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next {
     37 		n = l.N
     38 		if !n.Name.Needzero {
     39 			continue
     40 		}
     41 		if n.Class != gc.PAUTO {
     42 			gc.Fatal("needzero class %d", n.Class)
     43 		}
     44 		if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
     45 			gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
     46 		}
     47 
     48 		if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
     49 			// merge with range we already have
     50 			lo = n.Xoffset
     51 
     52 			continue
     53 		}
     54 
     55 		// zero old range
     56 		p = zerorange(p, int64(frame), lo, hi)
     57 
     58 		// set new range
     59 		hi = n.Xoffset + n.Type.Width
     60 
     61 		lo = n.Xoffset
     62 	}
     63 
     64 	// zero final range
     65 	zerorange(p, int64(frame), lo, hi)
     66 }
     67 
     68 func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
     69 	cnt := hi - lo
     70 	if cnt == 0 {
     71 		return p
     72 	}
     73 	if cnt < int64(4*gc.Widthptr) {
     74 		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
     75 			p = appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, 8+frame+lo+i)
     76 		}
     77 		// TODO(dfc): https://golang.org/issue/12108
     78 		// If DUFFZERO is used inside a tail call (see genwrapper) it will
     79 		// overwrite the link register.
     80 	} else if false && cnt <= int64(128*gc.Widthptr) {
     81 		p = appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGRT1, 0)
     82 		p.Reg = ppc64.REGSP
     83 		p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
     84 		f := gc.Sysfunc("duffzero")
     85 		gc.Naddr(&p.To, f)
     86 		gc.Afunclit(&p.To, f)
     87 		p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
     88 	} else {
     89 		p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGTMP, 0)
     90 		p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
     91 		p.Reg = ppc64.REGSP
     92 		p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
     93 		p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
     94 		p.Reg = ppc64.REGRT1
     95 		p = appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(gc.Widthptr))
     96 		p1 := p
     97 		p = appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
     98 		p = appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
     99 		gc.Patch(p, p1)
    100 	}
    101 
    102 	return p
    103 }
    104 
    105 func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
    106 	q := gc.Ctxt.NewProg()
    107 	gc.Clearp(q)
    108 	q.As = int16(as)
    109 	q.Lineno = p.Lineno
    110 	q.From.Type = int16(ftype)
    111 	q.From.Reg = int16(freg)
    112 	q.From.Offset = foffset
    113 	q.To.Type = int16(ttype)
    114 	q.To.Reg = int16(treg)
    115 	q.To.Offset = toffset
    116 	q.Link = p.Link
    117 	p.Link = q
    118 	return q
    119 }
    120 
    121 func ginsnop() {
    122 	var reg gc.Node
    123 	gc.Nodreg(&reg, gc.Types[gc.TINT], ppc64.REG_R0)
    124 	gins(ppc64.AOR, &reg, &reg)
    125 }
    126 
    127 var panicdiv *gc.Node
    128 
    129 /*
    130  * generate division.
    131  * generates one of:
    132  *	res = nl / nr
    133  *	res = nl % nr
    134  * according to op.
    135  */
    136 func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
    137 	// Have to be careful about handling
    138 	// most negative int divided by -1 correctly.
    139 	// The hardware will generate undefined result.
    140 	// Also need to explicitly trap on division on zero,
    141 	// the hardware will silently generate undefined result.
    142 	// DIVW will leave unpredicable result in higher 32-bit,
    143 	// so always use DIVD/DIVDU.
    144 	t := nl.Type
    145 
    146 	t0 := t
    147 	check := 0
    148 	if gc.Issigned[t.Etype] {
    149 		check = 1
    150 		if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) {
    151 			check = 0
    152 		} else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 {
    153 			check = 0
    154 		}
    155 	}
    156 
    157 	if t.Width < 8 {
    158 		if gc.Issigned[t.Etype] {
    159 			t = gc.Types[gc.TINT64]
    160 		} else {
    161 			t = gc.Types[gc.TUINT64]
    162 		}
    163 		check = 0
    164 	}
    165 
    166 	a := optoas(gc.ODIV, t)
    167 
    168 	var tl gc.Node
    169 	gc.Regalloc(&tl, t0, nil)
    170 	var tr gc.Node
    171 	gc.Regalloc(&tr, t0, nil)
    172 	if nl.Ullman >= nr.Ullman {
    173 		gc.Cgen(nl, &tl)
    174 		gc.Cgen(nr, &tr)
    175 	} else {
    176 		gc.Cgen(nr, &tr)
    177 		gc.Cgen(nl, &tl)
    178 	}
    179 
    180 	if t != t0 {
    181 		// Convert
    182 		tl2 := tl
    183 
    184 		tr2 := tr
    185 		tl.Type = t
    186 		tr.Type = t
    187 		gmove(&tl2, &tl)
    188 		gmove(&tr2, &tr)
    189 	}
    190 
    191 	// Handle divide-by-zero panic.
    192 	p1 := gins(optoas(gc.OCMP, t), &tr, nil)
    193 
    194 	p1.To.Type = obj.TYPE_REG
    195 	p1.To.Reg = ppc64.REGZERO
    196 	p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
    197 	if panicdiv == nil {
    198 		panicdiv = gc.Sysfunc("panicdivide")
    199 	}
    200 	gc.Ginscall(panicdiv, -1)
    201 	gc.Patch(p1, gc.Pc)
    202 
    203 	var p2 *obj.Prog
    204 	if check != 0 {
    205 		var nm1 gc.Node
    206 		gc.Nodconst(&nm1, t, -1)
    207 		gins(optoas(gc.OCMP, t), &tr, &nm1)
    208 		p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
    209 		if op == gc.ODIV {
    210 			// a / (-1) is -a.
    211 			gins(optoas(gc.OMINUS, t), nil, &tl)
    212 
    213 			gmove(&tl, res)
    214 		} else {
    215 			// a % (-1) is 0.
    216 			var nz gc.Node
    217 			gc.Nodconst(&nz, t, 0)
    218 
    219 			gmove(&nz, res)
    220 		}
    221 
    222 		p2 = gc.Gbranch(obj.AJMP, nil, 0)
    223 		gc.Patch(p1, gc.Pc)
    224 	}
    225 
    226 	p1 = gins(a, &tr, &tl)
    227 	if op == gc.ODIV {
    228 		gc.Regfree(&tr)
    229 		gmove(&tl, res)
    230 	} else {
    231 		// A%B = A-(A/B*B)
    232 		var tm gc.Node
    233 		gc.Regalloc(&tm, t, nil)
    234 
    235 		// patch div to use the 3 register form
    236 		// TODO(minux): add gins3?
    237 		p1.Reg = p1.To.Reg
    238 
    239 		p1.To.Reg = tm.Reg
    240 		gins(optoas(gc.OMUL, t), &tr, &tm)
    241 		gc.Regfree(&tr)
    242 		gins(optoas(gc.OSUB, t), &tm, &tl)
    243 		gc.Regfree(&tm)
    244 		gmove(&tl, res)
    245 	}
    246 
    247 	gc.Regfree(&tl)
    248 	if check != 0 {
    249 		gc.Patch(p2, gc.Pc)
    250 	}
    251 }
    252 
    253 /*
    254  * generate high multiply:
    255  *   res = (nl*nr) >> width
    256  */
    257 func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
    258 	// largest ullman on left.
    259 	if nl.Ullman < nr.Ullman {
    260 		tmp := (*gc.Node)(nl)
    261 		nl = nr
    262 		nr = tmp
    263 	}
    264 
    265 	t := (*gc.Type)(nl.Type)
    266 	w := int(int(t.Width * 8))
    267 	var n1 gc.Node
    268 	gc.Cgenr(nl, &n1, res)
    269 	var n2 gc.Node
    270 	gc.Cgenr(nr, &n2, nil)
    271 	switch gc.Simtype[t.Etype] {
    272 	case gc.TINT8,
    273 		gc.TINT16,
    274 		gc.TINT32:
    275 		gins(optoas(gc.OMUL, t), &n2, &n1)
    276 		p := (*obj.Prog)(gins(ppc64.ASRAD, nil, &n1))
    277 		p.From.Type = obj.TYPE_CONST
    278 		p.From.Offset = int64(w)
    279 
    280 	case gc.TUINT8,
    281 		gc.TUINT16,
    282 		gc.TUINT32:
    283 		gins(optoas(gc.OMUL, t), &n2, &n1)
    284 		p := (*obj.Prog)(gins(ppc64.ASRD, nil, &n1))
    285 		p.From.Type = obj.TYPE_CONST
    286 		p.From.Offset = int64(w)
    287 
    288 	case gc.TINT64,
    289 		gc.TUINT64:
    290 		if gc.Issigned[t.Etype] {
    291 			gins(ppc64.AMULHD, &n2, &n1)
    292 		} else {
    293 			gins(ppc64.AMULHDU, &n2, &n1)
    294 		}
    295 
    296 	default:
    297 		gc.Fatal("cgen_hmul %v", t)
    298 	}
    299 
    300 	gc.Cgen(&n1, res)
    301 	gc.Regfree(&n1)
    302 	gc.Regfree(&n2)
    303 }
    304 
    305 /*
    306  * generate shift according to op, one of:
    307  *	res = nl << nr
    308  *	res = nl >> nr
    309  */
    310 func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
    311 	a := int(optoas(op, nl.Type))
    312 
    313 	if nr.Op == gc.OLITERAL {
    314 		var n1 gc.Node
    315 		gc.Regalloc(&n1, nl.Type, res)
    316 		gc.Cgen(nl, &n1)
    317 		sc := uint64(nr.Int())
    318 		if sc >= uint64(nl.Type.Width*8) {
    319 			// large shift gets 2 shifts by width-1
    320 			var n3 gc.Node
    321 			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
    322 
    323 			gins(a, &n3, &n1)
    324 			gins(a, &n3, &n1)
    325 		} else {
    326 			gins(a, nr, &n1)
    327 		}
    328 		gmove(&n1, res)
    329 		gc.Regfree(&n1)
    330 		return
    331 	}
    332 
    333 	if nl.Ullman >= gc.UINF {
    334 		var n4 gc.Node
    335 		gc.Tempname(&n4, nl.Type)
    336 		gc.Cgen(nl, &n4)
    337 		nl = &n4
    338 	}
    339 
    340 	if nr.Ullman >= gc.UINF {
    341 		var n5 gc.Node
    342 		gc.Tempname(&n5, nr.Type)
    343 		gc.Cgen(nr, &n5)
    344 		nr = &n5
    345 	}
    346 
    347 	// Allow either uint32 or uint64 as shift type,
    348 	// to avoid unnecessary conversion from uint32 to uint64
    349 	// just to do the comparison.
    350 	tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
    351 
    352 	if tcount.Etype < gc.TUINT32 {
    353 		tcount = gc.Types[gc.TUINT32]
    354 	}
    355 
    356 	var n1 gc.Node
    357 	gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
    358 	var n3 gc.Node
    359 	gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
    360 
    361 	var n2 gc.Node
    362 	gc.Regalloc(&n2, nl.Type, res)
    363 
    364 	if nl.Ullman >= nr.Ullman {
    365 		gc.Cgen(nl, &n2)
    366 		gc.Cgen(nr, &n1)
    367 		gmove(&n1, &n3)
    368 	} else {
    369 		gc.Cgen(nr, &n1)
    370 		gmove(&n1, &n3)
    371 		gc.Cgen(nl, &n2)
    372 	}
    373 
    374 	gc.Regfree(&n3)
    375 
    376 	// test and fix up large shifts
    377 	if !bounded {
    378 		gc.Nodconst(&n3, tcount, nl.Type.Width*8)
    379 		gins(optoas(gc.OCMP, tcount), &n1, &n3)
    380 		p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, tcount), nil, +1))
    381 		if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
    382 			gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
    383 			gins(a, &n3, &n2)
    384 		} else {
    385 			gc.Nodconst(&n3, nl.Type, 0)
    386 			gmove(&n3, &n2)
    387 		}
    388 
    389 		gc.Patch(p1, gc.Pc)
    390 	}
    391 
    392 	gins(a, &n1, &n2)
    393 
    394 	gmove(&n2, res)
    395 
    396 	gc.Regfree(&n1)
    397 	gc.Regfree(&n2)
    398 }
    399 
    400 func clearfat(nl *gc.Node) {
    401 	/* clear a fat object */
    402 	if gc.Debug['g'] != 0 {
    403 		fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
    404 	}
    405 
    406 	w := uint64(uint64(nl.Type.Width))
    407 
    408 	// Avoid taking the address for simple enough types.
    409 	if gc.Componentgen(nil, nl) {
    410 		return
    411 	}
    412 
    413 	c := uint64(w % 8) // bytes
    414 	q := uint64(w / 8) // dwords
    415 
    416 	if gc.Reginuse(ppc64.REGRT1) {
    417 		gc.Fatal("%v in use during clearfat", obj.Rconv(ppc64.REGRT1))
    418 	}
    419 
    420 	var r0 gc.Node
    421 	gc.Nodreg(&r0, gc.Types[gc.TUINT64], ppc64.REGZERO)
    422 	var dst gc.Node
    423 	gc.Nodreg(&dst, gc.Types[gc.Tptr], ppc64.REGRT1)
    424 	gc.Regrealloc(&dst)
    425 	gc.Agen(nl, &dst)
    426 
    427 	var boff uint64
    428 	if q > 128 {
    429 		p := gins(ppc64.ASUB, nil, &dst)
    430 		p.From.Type = obj.TYPE_CONST
    431 		p.From.Offset = 8
    432 
    433 		var end gc.Node
    434 		gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
    435 		p = gins(ppc64.AMOVD, &dst, &end)
    436 		p.From.Type = obj.TYPE_ADDR
    437 		p.From.Offset = int64(q * 8)
    438 
    439 		p = gins(ppc64.AMOVDU, &r0, &dst)
    440 		p.To.Type = obj.TYPE_MEM
    441 		p.To.Offset = 8
    442 		pl := (*obj.Prog)(p)
    443 
    444 		p = gins(ppc64.ACMP, &dst, &end)
    445 		gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), pl)
    446 
    447 		gc.Regfree(&end)
    448 
    449 		// The loop leaves R3 on the last zeroed dword
    450 		boff = 8
    451 		// TODO(dfc): https://golang.org/issue/12108
    452 		// If DUFFZERO is used inside a tail call (see genwrapper) it will
    453 		// overwrite the link register.
    454 	} else if false && q >= 4 {
    455 		p := gins(ppc64.ASUB, nil, &dst)
    456 		p.From.Type = obj.TYPE_CONST
    457 		p.From.Offset = 8
    458 		f := (*gc.Node)(gc.Sysfunc("duffzero"))
    459 		p = gins(obj.ADUFFZERO, nil, f)
    460 		gc.Afunclit(&p.To, f)
    461 
    462 		// 4 and 128 = magic constants: see ../../runtime/asm_ppc64x.s
    463 		p.To.Offset = int64(4 * (128 - q))
    464 
    465 		// duffzero leaves R3 on the last zeroed dword
    466 		boff = 8
    467 	} else {
    468 		var p *obj.Prog
    469 		for t := uint64(0); t < q; t++ {
    470 			p = gins(ppc64.AMOVD, &r0, &dst)
    471 			p.To.Type = obj.TYPE_MEM
    472 			p.To.Offset = int64(8 * t)
    473 		}
    474 
    475 		boff = 8 * q
    476 	}
    477 
    478 	var p *obj.Prog
    479 	for t := uint64(0); t < c; t++ {
    480 		p = gins(ppc64.AMOVB, &r0, &dst)
    481 		p.To.Type = obj.TYPE_MEM
    482 		p.To.Offset = int64(t + boff)
    483 	}
    484 
    485 	gc.Regfree(&dst)
    486 }
    487 
    488 // Called after regopt and peep have run.
    489 // Expand CHECKNIL pseudo-op into actual nil pointer check.
    490 func expandchecks(firstp *obj.Prog) {
    491 	var p1 *obj.Prog
    492 	var p2 *obj.Prog
    493 
    494 	for p := (*obj.Prog)(firstp); p != nil; p = p.Link {
    495 		if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
    496 			fmt.Printf("expandchecks: %v\n", p)
    497 		}
    498 		if p.As != obj.ACHECKNIL {
    499 			continue
    500 		}
    501 		if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
    502 			gc.Warnl(int(p.Lineno), "generated nil check")
    503 		}
    504 		if p.From.Type != obj.TYPE_REG {
    505 			gc.Fatal("invalid nil check %v\n", p)
    506 		}
    507 
    508 		/*
    509 			// check is
    510 			//	TD $4, R0, arg (R0 is always zero)
    511 			// eqv. to:
    512 			// 	tdeq r0, arg
    513 			// NOTE: this needs special runtime support to make SIGTRAP recoverable.
    514 			reg = p->from.reg;
    515 			p->as = ATD;
    516 			p->from = p->to = p->from3 = zprog.from;
    517 			p->from.type = TYPE_CONST;
    518 			p->from.offset = 4;
    519 			p->from.reg = 0;
    520 			p->reg = REGZERO;
    521 			p->to.type = TYPE_REG;
    522 			p->to.reg = reg;
    523 		*/
    524 		// check is
    525 		//	CMP arg, R0
    526 		//	BNE 2(PC) [likely]
    527 		//	MOVD R0, 0(R0)
    528 		p1 = gc.Ctxt.NewProg()
    529 
    530 		p2 = gc.Ctxt.NewProg()
    531 		gc.Clearp(p1)
    532 		gc.Clearp(p2)
    533 		p1.Link = p2
    534 		p2.Link = p.Link
    535 		p.Link = p1
    536 		p1.Lineno = p.Lineno
    537 		p2.Lineno = p.Lineno
    538 		p1.Pc = 9999
    539 		p2.Pc = 9999
    540 		p.As = ppc64.ACMP
    541 		p.To.Type = obj.TYPE_REG
    542 		p.To.Reg = ppc64.REGZERO
    543 		p1.As = ppc64.ABNE
    544 
    545 		//p1->from.type = TYPE_CONST;
    546 		//p1->from.offset = 1; // likely
    547 		p1.To.Type = obj.TYPE_BRANCH
    548 
    549 		p1.To.Val = p2.Link
    550 
    551 		// crash by write to memory address 0.
    552 		p2.As = ppc64.AMOVD
    553 
    554 		p2.From.Type = obj.TYPE_REG
    555 		p2.From.Reg = ppc64.REGZERO
    556 		p2.To.Type = obj.TYPE_MEM
    557 		p2.To.Reg = ppc64.REGZERO
    558 		p2.To.Offset = 0
    559 	}
    560 }
    561 
    562 // res = runtime.getg()
    563 func getg(res *gc.Node) {
    564 	var n1 gc.Node
    565 	gc.Nodreg(&n1, res.Type, ppc64.REGG)
    566 	gmove(&n1, res)
    567 }
    568