Home | History | Annotate | Download | only in arm
      1 // Do not edit. Bootstrap copy of /Volumes/Android/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/arm/ggen.go
      2 
      3 //line /Volumes/Android/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/arm/ggen.go:1
      4 // Copyright 2009 The Go Authors. All rights reserved.
      5 // Use of this source code is governed by a BSD-style
      6 // license that can be found in the LICENSE file.
      7 
      8 package arm
      9 
     10 import (
     11 	"bootstrap/compile/internal/gc"
     12 	"bootstrap/internal/obj"
     13 	"bootstrap/internal/obj/arm"
     14 )
     15 
     16 func defframe(ptxt *obj.Prog) {
     17 	var n *gc.Node
     18 
     19 	// fill in argument size, stack size
     20 	ptxt.To.Type = obj.TYPE_TEXTSIZE
     21 
     22 	ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
     23 	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
     24 	ptxt.To.Offset = int64(frame)
     25 
     26 	// insert code to contain ambiguously live variables
     27 	// so that garbage collector only sees initialized values
     28 	// when it looks for pointers.
     29 	p := ptxt
     30 
     31 	hi := int64(0)
     32 	lo := hi
     33 	r0 := uint32(0)
     34 	for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next {
     35 		n = l.N
     36 		if !n.Name.Needzero {
     37 			continue
     38 		}
     39 		if n.Class != gc.PAUTO {
     40 			gc.Fatal("needzero class %d", n.Class)
     41 		}
     42 		if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
     43 			gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
     44 		}
     45 		if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthptr) {
     46 			// merge with range we already have
     47 			lo = gc.Rnd(n.Xoffset, int64(gc.Widthptr))
     48 
     49 			continue
     50 		}
     51 
     52 		// zero old range
     53 		p = zerorange(p, int64(frame), lo, hi, &r0)
     54 
     55 		// set new range
     56 		hi = n.Xoffset + n.Type.Width
     57 
     58 		lo = n.Xoffset
     59 	}
     60 
     61 	// zero final range
     62 	zerorange(p, int64(frame), lo, hi, &r0)
     63 }
     64 
     65 func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, r0 *uint32) *obj.Prog {
     66 	cnt := hi - lo
     67 	if cnt == 0 {
     68 		return p
     69 	}
     70 	if *r0 == 0 {
     71 		p = appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
     72 		*r0 = 1
     73 	}
     74 
     75 	if cnt < int64(4*gc.Widthptr) {
     76 		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
     77 			p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, int32(4+frame+lo+i))
     78 		}
     79 	} else if !gc.Nacl && (cnt <= int64(128*gc.Widthptr)) {
     80 		p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0)
     81 		p.Reg = arm.REGSP
     82 		p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
     83 		f := gc.Sysfunc("duffzero")
     84 		gc.Naddr(&p.To, f)
     85 		gc.Afunclit(&p.To, f)
     86 		p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
     87 	} else {
     88 		p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0)
     89 		p.Reg = arm.REGSP
     90 		p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(cnt), obj.TYPE_REG, arm.REG_R2, 0)
     91 		p.Reg = arm.REG_R1
     92 		p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
     93 		p1 := p
     94 		p.Scond |= arm.C_PBIT
     95 		p = appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
     96 		p.Reg = arm.REG_R2
     97 		p = appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
     98 		gc.Patch(p, p1)
     99 	}
    100 
    101 	return p
    102 }
    103 
    104 func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int32, ttype int, treg int, toffset int32) *obj.Prog {
    105 	q := gc.Ctxt.NewProg()
    106 	gc.Clearp(q)
    107 	q.As = int16(as)
    108 	q.Lineno = p.Lineno
    109 	q.From.Type = int16(ftype)
    110 	q.From.Reg = int16(freg)
    111 	q.From.Offset = int64(foffset)
    112 	q.To.Type = int16(ttype)
    113 	q.To.Reg = int16(treg)
    114 	q.To.Offset = int64(toffset)
    115 	q.Link = p.Link
    116 	p.Link = q
    117 	return q
    118 }
    119 
    120 /*
    121  * generate high multiply
    122  *  res = (nl * nr) >> wordsize
    123  */
    124 func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
    125 	if nl.Ullman < nr.Ullman {
    126 		tmp := nl
    127 		nl = nr
    128 		nr = tmp
    129 	}
    130 
    131 	t := nl.Type
    132 	w := int(t.Width * 8)
    133 	var n1 gc.Node
    134 	gc.Regalloc(&n1, t, res)
    135 	gc.Cgen(nl, &n1)
    136 	var n2 gc.Node
    137 	gc.Regalloc(&n2, t, nil)
    138 	gc.Cgen(nr, &n2)
    139 	switch gc.Simtype[t.Etype] {
    140 	case gc.TINT8,
    141 		gc.TINT16:
    142 		gins(optoas(gc.OMUL, t), &n2, &n1)
    143 		gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
    144 
    145 	case gc.TUINT8,
    146 		gc.TUINT16:
    147 		gins(optoas(gc.OMUL, t), &n2, &n1)
    148 		gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(w), &n1)
    149 
    150 		// perform a long multiplication.
    151 	case gc.TINT32,
    152 		gc.TUINT32:
    153 		var p *obj.Prog
    154 		if gc.Issigned[t.Etype] {
    155 			p = gins(arm.AMULL, &n2, nil)
    156 		} else {
    157 			p = gins(arm.AMULLU, &n2, nil)
    158 		}
    159 
    160 		// n2 * n1 -> (n1 n2)
    161 		p.Reg = n1.Reg
    162 
    163 		p.To.Type = obj.TYPE_REGREG
    164 		p.To.Reg = n1.Reg
    165 		p.To.Offset = int64(n2.Reg)
    166 
    167 	default:
    168 		gc.Fatal("cgen_hmul %v", t)
    169 	}
    170 
    171 	gc.Cgen(&n1, res)
    172 	gc.Regfree(&n1)
    173 	gc.Regfree(&n2)
    174 }
    175 
    176 /*
    177  * generate shift according to op, one of:
    178  *	res = nl << nr
    179  *	res = nl >> nr
    180  */
    181 func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
    182 	if nl.Type.Width > 4 {
    183 		gc.Fatal("cgen_shift %v", nl.Type)
    184 	}
    185 
    186 	w := int(nl.Type.Width * 8)
    187 
    188 	if op == gc.OLROT {
    189 		v := nr.Int()
    190 		var n1 gc.Node
    191 		gc.Regalloc(&n1, nl.Type, res)
    192 		if w == 32 {
    193 			gc.Cgen(nl, &n1)
    194 			gshift(arm.AMOVW, &n1, arm.SHIFT_RR, int32(w)-int32(v), &n1)
    195 		} else {
    196 			var n2 gc.Node
    197 			gc.Regalloc(&n2, nl.Type, nil)
    198 			gc.Cgen(nl, &n2)
    199 			gshift(arm.AMOVW, &n2, arm.SHIFT_LL, int32(v), &n1)
    200 			gshift(arm.AORR, &n2, arm.SHIFT_LR, int32(w)-int32(v), &n1)
    201 			gc.Regfree(&n2)
    202 
    203 			// Ensure sign/zero-extended result.
    204 			gins(optoas(gc.OAS, nl.Type), &n1, &n1)
    205 		}
    206 
    207 		gmove(&n1, res)
    208 		gc.Regfree(&n1)
    209 		return
    210 	}
    211 
    212 	if nr.Op == gc.OLITERAL {
    213 		var n1 gc.Node
    214 		gc.Regalloc(&n1, nl.Type, res)
    215 		gc.Cgen(nl, &n1)
    216 		sc := uint64(nr.Int())
    217 		if sc == 0 {
    218 		} else // nothing to do
    219 		if sc >= uint64(nl.Type.Width*8) {
    220 			if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
    221 				gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
    222 			} else {
    223 				gins(arm.AEOR, &n1, &n1)
    224 			}
    225 		} else {
    226 			if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
    227 				gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(sc), &n1)
    228 			} else if op == gc.ORSH {
    229 				gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(sc), &n1) // OLSH
    230 			} else {
    231 				gshift(arm.AMOVW, &n1, arm.SHIFT_LL, int32(sc), &n1)
    232 			}
    233 		}
    234 
    235 		if w < 32 && op == gc.OLSH {
    236 			gins(optoas(gc.OAS, nl.Type), &n1, &n1)
    237 		}
    238 		gmove(&n1, res)
    239 		gc.Regfree(&n1)
    240 		return
    241 	}
    242 
    243 	tr := nr.Type
    244 	var t gc.Node
    245 	var n1 gc.Node
    246 	var n2 gc.Node
    247 	var n3 gc.Node
    248 	if tr.Width > 4 {
    249 		var nt gc.Node
    250 		gc.Tempname(&nt, nr.Type)
    251 		if nl.Ullman >= nr.Ullman {
    252 			gc.Regalloc(&n2, nl.Type, res)
    253 			gc.Cgen(nl, &n2)
    254 			gc.Cgen(nr, &nt)
    255 			n1 = nt
    256 		} else {
    257 			gc.Cgen(nr, &nt)
    258 			gc.Regalloc(&n2, nl.Type, res)
    259 			gc.Cgen(nl, &n2)
    260 		}
    261 
    262 		var hi gc.Node
    263 		var lo gc.Node
    264 		split64(&nt, &lo, &hi)
    265 		gc.Regalloc(&n1, gc.Types[gc.TUINT32], nil)
    266 		gc.Regalloc(&n3, gc.Types[gc.TUINT32], nil)
    267 		gmove(&lo, &n1)
    268 		gmove(&hi, &n3)
    269 		splitclean()
    270 		gins(arm.ATST, &n3, nil)
    271 		gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
    272 		p1 := gins(arm.AMOVW, &t, &n1)
    273 		p1.Scond = arm.C_SCOND_NE
    274 		tr = gc.Types[gc.TUINT32]
    275 		gc.Regfree(&n3)
    276 	} else {
    277 		if nl.Ullman >= nr.Ullman {
    278 			gc.Regalloc(&n2, nl.Type, res)
    279 			gc.Cgen(nl, &n2)
    280 			gc.Regalloc(&n1, nr.Type, nil)
    281 			gc.Cgen(nr, &n1)
    282 		} else {
    283 			gc.Regalloc(&n1, nr.Type, nil)
    284 			gc.Cgen(nr, &n1)
    285 			gc.Regalloc(&n2, nl.Type, res)
    286 			gc.Cgen(nl, &n2)
    287 		}
    288 	}
    289 
    290 	// test for shift being 0
    291 	gins(arm.ATST, &n1, nil)
    292 
    293 	p3 := gc.Gbranch(arm.ABEQ, nil, -1)
    294 
    295 	// test and fix up large shifts
    296 	// TODO: if(!bounded), don't emit some of this.
    297 	gc.Regalloc(&n3, tr, nil)
    298 
    299 	gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
    300 	gmove(&t, &n3)
    301 	gins(arm.ACMP, &n1, &n3)
    302 	if op == gc.ORSH {
    303 		var p1 *obj.Prog
    304 		var p2 *obj.Prog
    305 		if gc.Issigned[nl.Type.Etype] {
    306 			p1 = gshift(arm.AMOVW, &n2, arm.SHIFT_AR, int32(w)-1, &n2)
    307 			p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_AR, &n1, &n2)
    308 		} else {
    309 			p1 = gins(arm.AEOR, &n2, &n2)
    310 			p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_LR, &n1, &n2)
    311 		}
    312 
    313 		p1.Scond = arm.C_SCOND_HS
    314 		p2.Scond = arm.C_SCOND_LO
    315 	} else {
    316 		p1 := gins(arm.AEOR, &n2, &n2)
    317 		p2 := gregshift(arm.AMOVW, &n2, arm.SHIFT_LL, &n1, &n2)
    318 		p1.Scond = arm.C_SCOND_HS
    319 		p2.Scond = arm.C_SCOND_LO
    320 	}
    321 
    322 	gc.Regfree(&n3)
    323 
    324 	gc.Patch(p3, gc.Pc)
    325 
    326 	// Left-shift of smaller word must be sign/zero-extended.
    327 	if w < 32 && op == gc.OLSH {
    328 		gins(optoas(gc.OAS, nl.Type), &n2, &n2)
    329 	}
    330 	gmove(&n2, res)
    331 
    332 	gc.Regfree(&n1)
    333 	gc.Regfree(&n2)
    334 }
    335 
    336 func clearfat(nl *gc.Node) {
    337 	/* clear a fat object */
    338 	if gc.Debug['g'] != 0 {
    339 		gc.Dump("\nclearfat", nl)
    340 	}
    341 
    342 	w := uint32(nl.Type.Width)
    343 
    344 	// Avoid taking the address for simple enough types.
    345 	if gc.Componentgen(nil, nl) {
    346 		return
    347 	}
    348 
    349 	c := w % 4 // bytes
    350 	q := w / 4 // quads
    351 
    352 	var r0 gc.Node
    353 	r0.Op = gc.OREGISTER
    354 
    355 	r0.Reg = arm.REG_R0
    356 	var r1 gc.Node
    357 	r1.Op = gc.OREGISTER
    358 	r1.Reg = arm.REG_R1
    359 	var dst gc.Node
    360 	gc.Regalloc(&dst, gc.Types[gc.Tptr], &r1)
    361 	gc.Agen(nl, &dst)
    362 	var nc gc.Node
    363 	gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0)
    364 	var nz gc.Node
    365 	gc.Regalloc(&nz, gc.Types[gc.TUINT32], &r0)
    366 	gc.Cgen(&nc, &nz)
    367 
    368 	if q > 128 {
    369 		var end gc.Node
    370 		gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
    371 		p := gins(arm.AMOVW, &dst, &end)
    372 		p.From.Type = obj.TYPE_ADDR
    373 		p.From.Offset = int64(q) * 4
    374 
    375 		p = gins(arm.AMOVW, &nz, &dst)
    376 		p.To.Type = obj.TYPE_MEM
    377 		p.To.Offset = 4
    378 		p.Scond |= arm.C_PBIT
    379 		pl := p
    380 
    381 		p = gins(arm.ACMP, &dst, nil)
    382 		raddr(&end, p)
    383 		gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)
    384 
    385 		gc.Regfree(&end)
    386 	} else if q >= 4 && !gc.Nacl {
    387 		f := gc.Sysfunc("duffzero")
    388 		p := gins(obj.ADUFFZERO, nil, f)
    389 		gc.Afunclit(&p.To, f)
    390 
    391 		// 4 and 128 = magic constants: see ../../runtime/asm_arm.s
    392 		p.To.Offset = 4 * (128 - int64(q))
    393 	} else {
    394 		var p *obj.Prog
    395 		for q > 0 {
    396 			p = gins(arm.AMOVW, &nz, &dst)
    397 			p.To.Type = obj.TYPE_MEM
    398 			p.To.Offset = 4
    399 			p.Scond |= arm.C_PBIT
    400 
    401 			//print("1. %v\n", p);
    402 			q--
    403 		}
    404 	}
    405 
    406 	var p *obj.Prog
    407 	for c > 0 {
    408 		p = gins(arm.AMOVB, &nz, &dst)
    409 		p.To.Type = obj.TYPE_MEM
    410 		p.To.Offset = 1
    411 		p.Scond |= arm.C_PBIT
    412 
    413 		//print("2. %v\n", p);
    414 		c--
    415 	}
    416 
    417 	gc.Regfree(&dst)
    418 	gc.Regfree(&nz)
    419 }
    420 
    421 // Called after regopt and peep have run.
    422 // Expand CHECKNIL pseudo-op into actual nil pointer check.
    423 func expandchecks(firstp *obj.Prog) {
    424 	var reg int
    425 	var p1 *obj.Prog
    426 
    427 	for p := firstp; p != nil; p = p.Link {
    428 		if p.As != obj.ACHECKNIL {
    429 			continue
    430 		}
    431 		if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
    432 			gc.Warnl(int(p.Lineno), "generated nil check")
    433 		}
    434 		if p.From.Type != obj.TYPE_REG {
    435 			gc.Fatal("invalid nil check %v", p)
    436 		}
    437 		reg = int(p.From.Reg)
    438 
    439 		// check is
    440 		//	CMP arg, $0
    441 		//	MOV.EQ arg, 0(arg)
    442 		p1 = gc.Ctxt.NewProg()
    443 
    444 		gc.Clearp(p1)
    445 		p1.Link = p.Link
    446 		p.Link = p1
    447 		p1.Lineno = p.Lineno
    448 		p1.Pc = 9999
    449 		p1.As = arm.AMOVW
    450 		p1.From.Type = obj.TYPE_REG
    451 		p1.From.Reg = int16(reg)
    452 		p1.To.Type = obj.TYPE_MEM
    453 		p1.To.Reg = int16(reg)
    454 		p1.To.Offset = 0
    455 		p1.Scond = arm.C_SCOND_EQ
    456 		p.As = arm.ACMP
    457 		p.From.Type = obj.TYPE_CONST
    458 		p.From.Reg = 0
    459 		p.From.Offset = 0
    460 		p.Reg = int16(reg)
    461 	}
    462 }
    463 
    464 func ginsnop() {
    465 	var r gc.Node
    466 	gc.Nodreg(&r, gc.Types[gc.TINT], arm.REG_R0)
    467 	p := gins(arm.AAND, &r, &r)
    468 	p.Scond = arm.C_SCOND_EQ
    469 }
    470 
    471 /*
    472  * generate
    473  *	as $c, n
    474  */
    475 func ginscon(as int, c int64, n *gc.Node) {
    476 	var n1 gc.Node
    477 	gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
    478 	var n2 gc.Node
    479 	gc.Regalloc(&n2, gc.Types[gc.TINT32], nil)
    480 	gmove(&n1, &n2)
    481 	gins(as, &n2, n)
    482 	gc.Regfree(&n2)
    483 }
    484 
    485 func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
    486 	if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n1.Int() == 0 && n2.Op != gc.OLITERAL {
    487 		op = gc.Brrev(op)
    488 		n1, n2 = n2, n1
    489 	}
    490 	var r1, r2, g1, g2 gc.Node
    491 	gc.Regalloc(&r1, t, n1)
    492 	gc.Regalloc(&g1, n1.Type, &r1)
    493 	gc.Cgen(n1, &g1)
    494 	gmove(&g1, &r1)
    495 	if gc.Isint[t.Etype] && n2.Op == gc.OLITERAL && n2.Int() == 0 {
    496 		gins(arm.ACMP, &r1, n2)
    497 	} else {
    498 		gc.Regalloc(&r2, t, n2)
    499 		gc.Regalloc(&g2, n1.Type, &r2)
    500 		gc.Cgen(n2, &g2)
    501 		gmove(&g2, &r2)
    502 		gins(optoas(gc.OCMP, t), &r1, &r2)
    503 		gc.Regfree(&g2)
    504 		gc.Regfree(&r2)
    505 	}
    506 	gc.Regfree(&g1)
    507 	gc.Regfree(&r1)
    508 	return gc.Gbranch(optoas(op, t), nil, likely)
    509 }
    510 
    511 // addr += index*width if possible.
    512 func addindex(index *gc.Node, width int64, addr *gc.Node) bool {
    513 	switch width {
    514 	case 2:
    515 		gshift(arm.AADD, index, arm.SHIFT_LL, 1, addr)
    516 		return true
    517 	case 4:
    518 		gshift(arm.AADD, index, arm.SHIFT_LL, 2, addr)
    519 		return true
    520 	case 8:
    521 		gshift(arm.AADD, index, arm.SHIFT_LL, 3, addr)
    522 		return true
    523 	}
    524 	return false
    525 }
    526 
    527 // res = runtime.getg()
    528 func getg(res *gc.Node) {
    529 	var n1 gc.Node
    530 	gc.Nodreg(&n1, res.Type, arm.REGG)
    531 	gmove(&n1, res)
    532 }
    533