Home | History | Annotate | Download | only in runtime
      1 // Copyright 2013 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 package runtime
      6 
      7 import "unsafe"
      8 
      9 const (
     10 	// stackDebug == 0: no logging
     11 	//            == 1: logging of per-stack operations
     12 	//            == 2: logging of per-frame operations
     13 	//            == 3: logging of per-word updates
     14 	//            == 4: logging of per-word reads
     15 	stackDebug       = 0
     16 	stackFromSystem  = 0 // allocate stacks from system memory instead of the heap
     17 	stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
     18 	stackPoisonCopy  = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
     19 
     20 	stackCache = 1
     21 )
     22 
     23 const (
     24 	uintptrMask = 1<<(8*ptrSize) - 1
     25 	poisonStack = uintptrMask & 0x6868686868686868
     26 
     27 	// Goroutine preemption request.
     28 	// Stored into g->stackguard0 to cause split stack check failure.
     29 	// Must be greater than any real sp.
     30 	// 0xfffffade in hex.
     31 	stackPreempt = uintptrMask & -1314
     32 
     33 	// Thread is forking.
     34 	// Stored into g->stackguard0 to cause split stack check failure.
     35 	// Must be greater than any real sp.
     36 	stackFork = uintptrMask & -1234
     37 )
     38 
     39 // Global pool of spans that have free stacks.
     40 // Stacks are assigned an order according to size.
     41 //     order = log_2(size/FixedStack)
     42 // There is a free list for each order.
     43 // TODO: one lock per order?
     44 var stackpool [_NumStackOrders]mspan
     45 var stackpoolmu mutex
     46 
     47 // List of stack spans to be freed at the end of GC. Protected by
     48 // stackpoolmu.
     49 var stackFreeQueue mspan
     50 
     51 // Cached value of haveexperiment("framepointer")
     52 var framepointer_enabled bool
     53 
     54 func stackinit() {
     55 	if _StackCacheSize&_PageMask != 0 {
     56 		throw("cache size must be a multiple of page size")
     57 	}
     58 	for i := range stackpool {
     59 		mSpanList_Init(&stackpool[i])
     60 	}
     61 	mSpanList_Init(&stackFreeQueue)
     62 }
     63 
     64 // Allocates a stack from the free pool.  Must be called with
     65 // stackpoolmu held.
     66 func stackpoolalloc(order uint8) gclinkptr {
     67 	list := &stackpool[order]
     68 	s := list.next
     69 	if s == list {
     70 		// no free stacks.  Allocate another span worth.
     71 		s = mHeap_AllocStack(&mheap_, _StackCacheSize>>_PageShift)
     72 		if s == nil {
     73 			throw("out of memory")
     74 		}
     75 		if s.ref != 0 {
     76 			throw("bad ref")
     77 		}
     78 		if s.freelist.ptr() != nil {
     79 			throw("bad freelist")
     80 		}
     81 		for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order {
     82 			x := gclinkptr(uintptr(s.start)<<_PageShift + i)
     83 			x.ptr().next = s.freelist
     84 			s.freelist = x
     85 		}
     86 		mSpanList_Insert(list, s)
     87 	}
     88 	x := s.freelist
     89 	if x.ptr() == nil {
     90 		throw("span has no free stacks")
     91 	}
     92 	s.freelist = x.ptr().next
     93 	s.ref++
     94 	if s.freelist.ptr() == nil {
     95 		// all stacks in s are allocated.
     96 		mSpanList_Remove(s)
     97 	}
     98 	return x
     99 }
    100 
    101 // Adds stack x to the free pool.  Must be called with stackpoolmu held.
    102 func stackpoolfree(x gclinkptr, order uint8) {
    103 	s := mHeap_Lookup(&mheap_, (unsafe.Pointer)(x))
    104 	if s.state != _MSpanStack {
    105 		throw("freeing stack not in a stack span")
    106 	}
    107 	if s.freelist.ptr() == nil {
    108 		// s will now have a free stack
    109 		mSpanList_Insert(&stackpool[order], s)
    110 	}
    111 	x.ptr().next = s.freelist
    112 	s.freelist = x
    113 	s.ref--
    114 	if gcphase == _GCoff && s.ref == 0 {
    115 		// Span is completely free. Return it to the heap
    116 		// immediately if we're sweeping.
    117 		//
    118 		// If GC is active, we delay the free until the end of
    119 		// GC to avoid the following type of situation:
    120 		//
    121 		// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
    122 		// 2) The stack that pointer points to is copied
    123 		// 3) The old stack is freed
    124 		// 4) The containing span is marked free
    125 		// 5) GC attempts to mark the SudoG.elem pointer. The
    126 		//    marking fails because the pointer looks like a
    127 		//    pointer into a free span.
    128 		//
    129 		// By not freeing, we prevent step #4 until GC is done.
    130 		mSpanList_Remove(s)
    131 		s.freelist = 0
    132 		mHeap_FreeStack(&mheap_, s)
    133 	}
    134 }
    135 
    136 // stackcacherefill/stackcacherelease implement a global pool of stack segments.
    137 // The pool is required to prevent unlimited growth of per-thread caches.
    138 func stackcacherefill(c *mcache, order uint8) {
    139 	if stackDebug >= 1 {
    140 		print("stackcacherefill order=", order, "\n")
    141 	}
    142 
    143 	// Grab some stacks from the global cache.
    144 	// Grab half of the allowed capacity (to prevent thrashing).
    145 	var list gclinkptr
    146 	var size uintptr
    147 	lock(&stackpoolmu)
    148 	for size < _StackCacheSize/2 {
    149 		x := stackpoolalloc(order)
    150 		x.ptr().next = list
    151 		list = x
    152 		size += _FixedStack << order
    153 	}
    154 	unlock(&stackpoolmu)
    155 	c.stackcache[order].list = list
    156 	c.stackcache[order].size = size
    157 }
    158 
    159 func stackcacherelease(c *mcache, order uint8) {
    160 	if stackDebug >= 1 {
    161 		print("stackcacherelease order=", order, "\n")
    162 	}
    163 	x := c.stackcache[order].list
    164 	size := c.stackcache[order].size
    165 	lock(&stackpoolmu)
    166 	for size > _StackCacheSize/2 {
    167 		y := x.ptr().next
    168 		stackpoolfree(x, order)
    169 		x = y
    170 		size -= _FixedStack << order
    171 	}
    172 	unlock(&stackpoolmu)
    173 	c.stackcache[order].list = x
    174 	c.stackcache[order].size = size
    175 }
    176 
    177 func stackcache_clear(c *mcache) {
    178 	if stackDebug >= 1 {
    179 		print("stackcache clear\n")
    180 	}
    181 	lock(&stackpoolmu)
    182 	for order := uint8(0); order < _NumStackOrders; order++ {
    183 		x := c.stackcache[order].list
    184 		for x.ptr() != nil {
    185 			y := x.ptr().next
    186 			stackpoolfree(x, order)
    187 			x = y
    188 		}
    189 		c.stackcache[order].list = 0
    190 		c.stackcache[order].size = 0
    191 	}
    192 	unlock(&stackpoolmu)
    193 }
    194 
    195 func stackalloc(n uint32) (stack, []stkbar) {
    196 	// Stackalloc must be called on scheduler stack, so that we
    197 	// never try to grow the stack during the code that stackalloc runs.
    198 	// Doing so would cause a deadlock (issue 1547).
    199 	thisg := getg()
    200 	if thisg != thisg.m.g0 {
    201 		throw("stackalloc not on scheduler stack")
    202 	}
    203 	if n&(n-1) != 0 {
    204 		throw("stack size not a power of 2")
    205 	}
    206 	if stackDebug >= 1 {
    207 		print("stackalloc ", n, "\n")
    208 	}
    209 
    210 	// Compute the size of stack barrier array.
    211 	maxstkbar := gcMaxStackBarriers(int(n))
    212 	nstkbar := unsafe.Sizeof(stkbar{}) * uintptr(maxstkbar)
    213 
    214 	if debug.efence != 0 || stackFromSystem != 0 {
    215 		v := sysAlloc(round(uintptr(n), _PageSize), &memstats.stacks_sys)
    216 		if v == nil {
    217 			throw("out of memory (stackalloc)")
    218 		}
    219 		top := uintptr(n) - nstkbar
    220 		stkbarSlice := slice{add(v, top), 0, maxstkbar}
    221 		return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice))
    222 	}
    223 
    224 	// Small stacks are allocated with a fixed-size free-list allocator.
    225 	// If we need a stack of a bigger size, we fall back on allocating
    226 	// a dedicated span.
    227 	var v unsafe.Pointer
    228 	if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
    229 		order := uint8(0)
    230 		n2 := n
    231 		for n2 > _FixedStack {
    232 			order++
    233 			n2 >>= 1
    234 		}
    235 		var x gclinkptr
    236 		c := thisg.m.mcache
    237 		if c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 {
    238 			// c == nil can happen in the guts of exitsyscall or
    239 			// procresize. Just get a stack from the global pool.
    240 			// Also don't touch stackcache during gc
    241 			// as it's flushed concurrently.
    242 			lock(&stackpoolmu)
    243 			x = stackpoolalloc(order)
    244 			unlock(&stackpoolmu)
    245 		} else {
    246 			x = c.stackcache[order].list
    247 			if x.ptr() == nil {
    248 				stackcacherefill(c, order)
    249 				x = c.stackcache[order].list
    250 			}
    251 			c.stackcache[order].list = x.ptr().next
    252 			c.stackcache[order].size -= uintptr(n)
    253 		}
    254 		v = (unsafe.Pointer)(x)
    255 	} else {
    256 		s := mHeap_AllocStack(&mheap_, round(uintptr(n), _PageSize)>>_PageShift)
    257 		if s == nil {
    258 			throw("out of memory")
    259 		}
    260 		v = (unsafe.Pointer)(s.start << _PageShift)
    261 	}
    262 
    263 	if raceenabled {
    264 		racemalloc(v, uintptr(n))
    265 	}
    266 	if stackDebug >= 1 {
    267 		print("  allocated ", v, "\n")
    268 	}
    269 	top := uintptr(n) - nstkbar
    270 	stkbarSlice := slice{add(v, top), 0, maxstkbar}
    271 	return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice))
    272 }
    273 
    274 func stackfree(stk stack, n uintptr) {
    275 	gp := getg()
    276 	v := (unsafe.Pointer)(stk.lo)
    277 	if n&(n-1) != 0 {
    278 		throw("stack not a power of 2")
    279 	}
    280 	if stk.lo+n < stk.hi {
    281 		throw("bad stack size")
    282 	}
    283 	if stackDebug >= 1 {
    284 		println("stackfree", v, n)
    285 		memclr(v, n) // for testing, clobber stack data
    286 	}
    287 	if debug.efence != 0 || stackFromSystem != 0 {
    288 		if debug.efence != 0 || stackFaultOnFree != 0 {
    289 			sysFault(v, n)
    290 		} else {
    291 			sysFree(v, n, &memstats.stacks_sys)
    292 		}
    293 		return
    294 	}
    295 	if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
    296 		order := uint8(0)
    297 		n2 := n
    298 		for n2 > _FixedStack {
    299 			order++
    300 			n2 >>= 1
    301 		}
    302 		x := gclinkptr(v)
    303 		c := gp.m.mcache
    304 		if c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 {
    305 			lock(&stackpoolmu)
    306 			stackpoolfree(x, order)
    307 			unlock(&stackpoolmu)
    308 		} else {
    309 			if c.stackcache[order].size >= _StackCacheSize {
    310 				stackcacherelease(c, order)
    311 			}
    312 			x.ptr().next = c.stackcache[order].list
    313 			c.stackcache[order].list = x
    314 			c.stackcache[order].size += n
    315 		}
    316 	} else {
    317 		s := mHeap_Lookup(&mheap_, v)
    318 		if s.state != _MSpanStack {
    319 			println(hex(s.start<<_PageShift), v)
    320 			throw("bad span state")
    321 		}
    322 		if gcphase == _GCoff {
    323 			// Free the stack immediately if we're
    324 			// sweeping.
    325 			mHeap_FreeStack(&mheap_, s)
    326 		} else {
    327 			// Otherwise, add it to a list of stack spans
    328 			// to be freed at the end of GC.
    329 			//
    330 			// TODO(austin): Make it possible to re-use
    331 			// these spans as stacks, like we do for small
    332 			// stack spans. (See issue #11466.)
    333 			lock(&stackpoolmu)
    334 			mSpanList_Insert(&stackFreeQueue, s)
    335 			unlock(&stackpoolmu)
    336 		}
    337 	}
    338 }
    339 
    340 var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
    341 
    342 var ptrnames = []string{
    343 	0: "scalar",
    344 	1: "ptr",
    345 }
    346 
    347 // Stack frame layout
    348 //
    349 // (x86)
    350 // +------------------+
    351 // | args from caller |
    352 // +------------------+ <- frame->argp
    353 // |  return address  |
    354 // +------------------+
    355 // |  caller's BP (*) | (*) if framepointer_enabled && varp < sp
    356 // +------------------+ <- frame->varp
    357 // |     locals       |
    358 // +------------------+
    359 // |  args to callee  |
    360 // +------------------+ <- frame->sp
    361 //
    362 // (arm)
    363 // +------------------+
    364 // | args from caller |
    365 // +------------------+ <- frame->argp
    366 // | caller's retaddr |
    367 // +------------------+ <- frame->varp
    368 // |     locals       |
    369 // +------------------+
    370 // |  args to callee  |
    371 // +------------------+
    372 // |  return address  |
    373 // +------------------+ <- frame->sp
    374 
    375 type adjustinfo struct {
    376 	old   stack
    377 	delta uintptr // ptr distance from old to new stack (newbase - oldbase)
    378 }
    379 
    380 // Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
    381 // If so, it rewrites *vpp to point into the new stack.
    382 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
    383 	pp := (*unsafe.Pointer)(vpp)
    384 	p := *pp
    385 	if stackDebug >= 4 {
    386 		print("        ", pp, ":", p, "\n")
    387 	}
    388 	if adjinfo.old.lo <= uintptr(p) && uintptr(p) < adjinfo.old.hi {
    389 		*pp = add(p, adjinfo.delta)
    390 		if stackDebug >= 3 {
    391 			print("        adjust ptr ", pp, ":", p, " -> ", *pp, "\n")
    392 		}
    393 	}
    394 }
    395 
    396 // Information from the compiler about the layout of stack frames.
    397 type bitvector struct {
    398 	n        int32 // # of bits
    399 	bytedata *uint8
    400 }
    401 
    402 type gobitvector struct {
    403 	n        uintptr
    404 	bytedata []uint8
    405 }
    406 
    407 func gobv(bv bitvector) gobitvector {
    408 	return gobitvector{
    409 		uintptr(bv.n),
    410 		(*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8],
    411 	}
    412 }
    413 
    414 func ptrbit(bv *gobitvector, i uintptr) uint8 {
    415 	return (bv.bytedata[i/8] >> (i % 8)) & 1
    416 }
    417 
    418 // bv describes the memory starting at address scanp.
    419 // Adjust any pointers contained therein.
    420 func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f *_func) {
    421 	bv := gobv(*cbv)
    422 	minp := adjinfo.old.lo
    423 	maxp := adjinfo.old.hi
    424 	delta := adjinfo.delta
    425 	num := uintptr(bv.n)
    426 	for i := uintptr(0); i < num; i++ {
    427 		if stackDebug >= 4 {
    428 			print("        ", add(scanp, i*ptrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*ptrSize))), " # ", i, " ", bv.bytedata[i/8], "\n")
    429 		}
    430 		if ptrbit(&bv, i) == 1 {
    431 			pp := (*uintptr)(add(scanp, i*ptrSize))
    432 			p := *pp
    433 			if f != nil && 0 < p && p < _PageSize && debug.invalidptr != 0 || p == poisonStack {
    434 				// Looks like a junk value in a pointer slot.
    435 				// Live analysis wrong?
    436 				getg().m.traceback = 2
    437 				print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
    438 				throw("invalid stack pointer")
    439 			}
    440 			if minp <= p && p < maxp {
    441 				if stackDebug >= 3 {
    442 					print("adjust ptr ", p, " ", funcname(f), "\n")
    443 				}
    444 				*pp = p + delta
    445 			}
    446 		}
    447 	}
    448 }
    449 
    450 // Note: the argument/return area is adjusted by the callee.
    451 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
    452 	adjinfo := (*adjustinfo)(arg)
    453 	targetpc := frame.continpc
    454 	if targetpc == 0 {
    455 		// Frame is dead.
    456 		return true
    457 	}
    458 	f := frame.fn
    459 	if stackDebug >= 2 {
    460 		print("    adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
    461 	}
    462 	if f.entry == systemstack_switchPC {
    463 		// A special routine at the bottom of stack of a goroutine that does an systemstack call.
    464 		// We will allow it to be copied even though we don't
    465 		// have full GC info for it (because it is written in asm).
    466 		return true
    467 	}
    468 	if targetpc != f.entry {
    469 		targetpc--
    470 	}
    471 	pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc)
    472 	if pcdata == -1 {
    473 		pcdata = 0 // in prologue
    474 	}
    475 
    476 	// Adjust local variables if stack frame has been allocated.
    477 	size := frame.varp - frame.sp
    478 	var minsize uintptr
    479 	switch thechar {
    480 	case '6', '8':
    481 		minsize = 0
    482 	case '7':
    483 		minsize = spAlign
    484 	default:
    485 		minsize = ptrSize
    486 	}
    487 	if size > minsize {
    488 		var bv bitvector
    489 		stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
    490 		if stackmap == nil || stackmap.n <= 0 {
    491 			print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
    492 			throw("missing stackmap")
    493 		}
    494 		// Locals bitmap information, scan just the pointers in locals.
    495 		if pcdata < 0 || pcdata >= stackmap.n {
    496 			// don't know where we are
    497 			print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
    498 			throw("bad symbol table")
    499 		}
    500 		bv = stackmapdata(stackmap, pcdata)
    501 		size = uintptr(bv.n) * ptrSize
    502 		if stackDebug >= 3 {
    503 			print("      locals ", pcdata, "/", stackmap.n, " ", size/ptrSize, " words ", bv.bytedata, "\n")
    504 		}
    505 		adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f)
    506 	}
    507 
    508 	// Adjust saved base pointer if there is one.
    509 	if thechar == '6' && frame.argp-frame.varp == 2*regSize {
    510 		if !framepointer_enabled {
    511 			print("runtime: found space for saved base pointer, but no framepointer experiment\n")
    512 			print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n")
    513 			throw("bad frame layout")
    514 		}
    515 		if stackDebug >= 3 {
    516 			print("      saved bp\n")
    517 		}
    518 		adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
    519 	}
    520 
    521 	// Adjust arguments.
    522 	if frame.arglen > 0 {
    523 		var bv bitvector
    524 		if frame.argmap != nil {
    525 			bv = *frame.argmap
    526 		} else {
    527 			stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
    528 			if stackmap == nil || stackmap.n <= 0 {
    529 				print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", uintptr(frame.arglen), "\n")
    530 				throw("missing stackmap")
    531 			}
    532 			if pcdata < 0 || pcdata >= stackmap.n {
    533 				// don't know where we are
    534 				print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
    535 				throw("bad symbol table")
    536 			}
    537 			bv = stackmapdata(stackmap, pcdata)
    538 		}
    539 		if stackDebug >= 3 {
    540 			print("      args\n")
    541 		}
    542 		adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, nil)
    543 	}
    544 	return true
    545 }
    546 
    547 func adjustctxt(gp *g, adjinfo *adjustinfo) {
    548 	adjustpointer(adjinfo, (unsafe.Pointer)(&gp.sched.ctxt))
    549 }
    550 
    551 func adjustdefers(gp *g, adjinfo *adjustinfo) {
    552 	// Adjust defer argument blocks the same way we adjust active stack frames.
    553 	tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
    554 
    555 	// Adjust pointers in the Defer structs.
    556 	// Defer structs themselves are never on the stack.
    557 	for d := gp._defer; d != nil; d = d.link {
    558 		adjustpointer(adjinfo, (unsafe.Pointer)(&d.fn))
    559 		adjustpointer(adjinfo, (unsafe.Pointer)(&d.sp))
    560 		adjustpointer(adjinfo, (unsafe.Pointer)(&d._panic))
    561 	}
    562 }
    563 
    564 func adjustpanics(gp *g, adjinfo *adjustinfo) {
    565 	// Panics are on stack and already adjusted.
    566 	// Update pointer to head of list in G.
    567 	adjustpointer(adjinfo, (unsafe.Pointer)(&gp._panic))
    568 }
    569 
    570 func adjustsudogs(gp *g, adjinfo *adjustinfo) {
    571 	// the data elements pointed to by a SudoG structure
    572 	// might be in the stack.
    573 	for s := gp.waiting; s != nil; s = s.waitlink {
    574 		adjustpointer(adjinfo, (unsafe.Pointer)(&s.elem))
    575 		adjustpointer(adjinfo, (unsafe.Pointer)(&s.selectdone))
    576 	}
    577 }
    578 
    579 func adjuststkbar(gp *g, adjinfo *adjustinfo) {
    580 	for i := int(gp.stkbarPos); i < len(gp.stkbar); i++ {
    581 		adjustpointer(adjinfo, (unsafe.Pointer)(&gp.stkbar[i].savedLRPtr))
    582 	}
    583 }
    584 
    585 func fillstack(stk stack, b byte) {
    586 	for p := stk.lo; p < stk.hi; p++ {
    587 		*(*byte)(unsafe.Pointer(p)) = b
    588 	}
    589 }
    590 
    591 // Copies gp's stack to a new stack of a different size.
    592 // Caller must have changed gp status to Gcopystack.
    593 func copystack(gp *g, newsize uintptr) {
    594 	if gp.syscallsp != 0 {
    595 		throw("stack growth not allowed in system call")
    596 	}
    597 	old := gp.stack
    598 	if old.lo == 0 {
    599 		throw("nil stackbase")
    600 	}
    601 	used := old.hi - gp.sched.sp
    602 
    603 	// allocate new stack
    604 	new, newstkbar := stackalloc(uint32(newsize))
    605 	if stackPoisonCopy != 0 {
    606 		fillstack(new, 0xfd)
    607 	}
    608 	if stackDebug >= 1 {
    609 		print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]/", gp.stackAlloc, " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
    610 	}
    611 
    612 	// adjust pointers in the to-be-copied frames
    613 	var adjinfo adjustinfo
    614 	adjinfo.old = old
    615 	adjinfo.delta = new.hi - old.hi
    616 	gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
    617 
    618 	// adjust other miscellaneous things that have pointers into stacks.
    619 	adjustctxt(gp, &adjinfo)
    620 	adjustdefers(gp, &adjinfo)
    621 	adjustpanics(gp, &adjinfo)
    622 	adjustsudogs(gp, &adjinfo)
    623 	adjuststkbar(gp, &adjinfo)
    624 
    625 	// copy the stack to the new location
    626 	if stackPoisonCopy != 0 {
    627 		fillstack(new, 0xfb)
    628 	}
    629 	memmove(unsafe.Pointer(new.hi-used), unsafe.Pointer(old.hi-used), used)
    630 
    631 	// copy old stack barriers to new stack barrier array
    632 	newstkbar = newstkbar[:len(gp.stkbar)]
    633 	copy(newstkbar, gp.stkbar)
    634 
    635 	// Swap out old stack for new one
    636 	gp.stack = new
    637 	gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
    638 	gp.sched.sp = new.hi - used
    639 	oldsize := gp.stackAlloc
    640 	gp.stackAlloc = newsize
    641 	gp.stkbar = newstkbar
    642 
    643 	// free old stack
    644 	if stackPoisonCopy != 0 {
    645 		fillstack(old, 0xfc)
    646 	}
    647 	stackfree(old, oldsize)
    648 }
    649 
    650 // round x up to a power of 2.
    651 func round2(x int32) int32 {
    652 	s := uint(0)
    653 	for 1<<s < x {
    654 		s++
    655 	}
    656 	return 1 << s
    657 }
    658 
    659 // Called from runtimemorestack when more stack is needed.
    660 // Allocate larger stack and relocate to new stack.
    661 // Stack growth is multiplicative, for constant amortized cost.
    662 //
    663 // g->atomicstatus will be Grunning or Gscanrunning upon entry.
    664 // If the GC is trying to stop this g then it will set preemptscan to true.
    665 func newstack() {
    666 	thisg := getg()
    667 	// TODO: double check all gp. shouldn't be getg().
    668 	if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
    669 		throw("stack growth after fork")
    670 	}
    671 	if thisg.m.morebuf.g.ptr() != thisg.m.curg {
    672 		print("runtime: newstack called from g=", thisg.m.morebuf.g, "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
    673 		morebuf := thisg.m.morebuf
    674 		traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
    675 		throw("runtime: wrong goroutine in newstack")
    676 	}
    677 	if thisg.m.curg.throwsplit {
    678 		gp := thisg.m.curg
    679 		// Update syscallsp, syscallpc in case traceback uses them.
    680 		morebuf := thisg.m.morebuf
    681 		gp.syscallsp = morebuf.sp
    682 		gp.syscallpc = morebuf.pc
    683 		print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
    684 			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
    685 			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
    686 
    687 		traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
    688 		throw("runtime: stack split at bad time")
    689 	}
    690 
    691 	gp := thisg.m.curg
    692 	morebuf := thisg.m.morebuf
    693 	thisg.m.morebuf.pc = 0
    694 	thisg.m.morebuf.lr = 0
    695 	thisg.m.morebuf.sp = 0
    696 	thisg.m.morebuf.g = 0
    697 	rewindmorestack(&gp.sched)
    698 
    699 	// NOTE: stackguard0 may change underfoot, if another thread
    700 	// is about to try to preempt gp. Read it just once and use that same
    701 	// value now and below.
    702 	preempt := atomicloaduintptr(&gp.stackguard0) == stackPreempt
    703 
    704 	// Be conservative about where we preempt.
    705 	// We are interested in preempting user Go code, not runtime code.
    706 	// If we're holding locks, mallocing, or preemption is disabled, don't
    707 	// preempt.
    708 	// This check is very early in newstack so that even the status change
    709 	// from Grunning to Gwaiting and back doesn't happen in this case.
    710 	// That status change by itself can be viewed as a small preemption,
    711 	// because the GC might change Gwaiting to Gscanwaiting, and then
    712 	// this goroutine has to wait for the GC to finish before continuing.
    713 	// If the GC is in some way dependent on this goroutine (for example,
    714 	// it needs a lock held by the goroutine), that small preemption turns
    715 	// into a real deadlock.
    716 	if preempt {
    717 		if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning {
    718 			// Let the goroutine keep running for now.
    719 			// gp->preempt is set, so it will be preempted next time.
    720 			gp.stackguard0 = gp.stack.lo + _StackGuard
    721 			gogo(&gp.sched) // never return
    722 		}
    723 	}
    724 
    725 	// The goroutine must be executing in order to call newstack,
    726 	// so it must be Grunning (or Gscanrunning).
    727 	casgstatus(gp, _Grunning, _Gwaiting)
    728 	gp.waitreason = "stack growth"
    729 
    730 	if gp.stack.lo == 0 {
    731 		throw("missing stack in newstack")
    732 	}
    733 	sp := gp.sched.sp
    734 	if thechar == '6' || thechar == '8' {
    735 		// The call to morestack cost a word.
    736 		sp -= ptrSize
    737 	}
    738 	if stackDebug >= 1 || sp < gp.stack.lo {
    739 		print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
    740 			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
    741 			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
    742 	}
    743 	if sp < gp.stack.lo {
    744 		print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ")
    745 		print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
    746 		throw("runtime: split stack overflow")
    747 	}
    748 
    749 	if gp.sched.ctxt != nil {
    750 		// morestack wrote sched.ctxt on its way in here,
    751 		// without a write barrier. Run the write barrier now.
    752 		// It is not possible to be preempted between then
    753 		// and now, so it's okay.
    754 		writebarrierptr_nostore((*uintptr)(unsafe.Pointer(&gp.sched.ctxt)), uintptr(gp.sched.ctxt))
    755 	}
    756 
    757 	if preempt {
    758 		if gp == thisg.m.g0 {
    759 			throw("runtime: preempt g0")
    760 		}
    761 		if thisg.m.p == 0 && thisg.m.locks == 0 {
    762 			throw("runtime: g is running but p is not")
    763 		}
    764 		if gp.preemptscan {
    765 			for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
    766 				// Likely to be racing with the GC as
    767 				// it sees a _Gwaiting and does the
    768 				// stack scan. If so, gcworkdone will
    769 				// be set and gcphasework will simply
    770 				// return.
    771 			}
    772 			if !gp.gcscandone {
    773 				scanstack(gp)
    774 				gp.gcscandone = true
    775 			}
    776 			gp.preemptscan = false
    777 			gp.preempt = false
    778 			casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
    779 			casgstatus(gp, _Gwaiting, _Grunning)
    780 			gp.stackguard0 = gp.stack.lo + _StackGuard
    781 			gogo(&gp.sched) // never return
    782 		}
    783 
    784 		// Act like goroutine called runtime.Gosched.
    785 		casgstatus(gp, _Gwaiting, _Grunning)
    786 		gopreempt_m(gp) // never return
    787 	}
    788 
    789 	// Allocate a bigger segment and move the stack.
    790 	oldsize := int(gp.stackAlloc)
    791 	newsize := oldsize * 2
    792 	if uintptr(newsize) > maxstacksize {
    793 		print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
    794 		throw("stack overflow")
    795 	}
    796 
    797 	casgstatus(gp, _Gwaiting, _Gcopystack)
    798 
    799 	// The concurrent GC will not scan the stack while we are doing the copy since
    800 	// the gp is in a Gcopystack status.
    801 	copystack(gp, uintptr(newsize))
    802 	if stackDebug >= 1 {
    803 		print("stack grow done\n")
    804 	}
    805 	casgstatus(gp, _Gcopystack, _Grunning)
    806 	gogo(&gp.sched)
    807 }
    808 
    809 //go:nosplit
    810 func nilfunc() {
    811 	*(*uint8)(nil) = 0
    812 }
    813 
    814 // adjust Gobuf as if it executed a call to fn
    815 // and then did an immediate gosave.
    816 func gostartcallfn(gobuf *gobuf, fv *funcval) {
    817 	var fn unsafe.Pointer
    818 	if fv != nil {
    819 		fn = (unsafe.Pointer)(fv.fn)
    820 	} else {
    821 		fn = unsafe.Pointer(funcPC(nilfunc))
    822 	}
    823 	gostartcall(gobuf, fn, (unsafe.Pointer)(fv))
    824 }
    825 
    826 // Maybe shrink the stack being used by gp.
    827 // Called at garbage collection time.
    828 func shrinkstack(gp *g) {
    829 	if readgstatus(gp) == _Gdead {
    830 		if gp.stack.lo != 0 {
    831 			// Free whole stack - it will get reallocated
    832 			// if G is used again.
    833 			stackfree(gp.stack, gp.stackAlloc)
    834 			gp.stack.lo = 0
    835 			gp.stack.hi = 0
    836 			gp.stkbar = nil
    837 			gp.stkbarPos = 0
    838 		}
    839 		return
    840 	}
    841 	if gp.stack.lo == 0 {
    842 		throw("missing stack in shrinkstack")
    843 	}
    844 
    845 	if debug.gcshrinkstackoff > 0 {
    846 		return
    847 	}
    848 
    849 	oldsize := gp.stackAlloc
    850 	newsize := oldsize / 2
    851 	// Don't shrink the allocation below the minimum-sized stack
    852 	// allocation.
    853 	if newsize < _FixedStack {
    854 		return
    855 	}
    856 	// Compute how much of the stack is currently in use and only
    857 	// shrink the stack if gp is using less than a quarter of its
    858 	// current stack. The currently used stack includes everything
    859 	// down to the SP plus the stack guard space that ensures
    860 	// there's room for nosplit functions.
    861 	avail := gp.stack.hi - gp.stack.lo
    862 	if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
    863 		return
    864 	}
    865 
    866 	// We can't copy the stack if we're in a syscall.
    867 	// The syscall might have pointers into the stack.
    868 	if gp.syscallsp != 0 {
    869 		return
    870 	}
    871 	if goos_windows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
    872 		return
    873 	}
    874 
    875 	if stackDebug > 0 {
    876 		print("shrinking stack ", oldsize, "->", newsize, "\n")
    877 	}
    878 
    879 	oldstatus := casgcopystack(gp)
    880 	copystack(gp, newsize)
    881 	casgstatus(gp, _Gcopystack, oldstatus)
    882 }
    883 
    884 // freeStackSpans frees unused stack spans at the end of GC.
    885 func freeStackSpans() {
    886 	lock(&stackpoolmu)
    887 
    888 	// Scan stack pools for empty stack spans.
    889 	for order := range stackpool {
    890 		list := &stackpool[order]
    891 		for s := list.next; s != list; {
    892 			next := s.next
    893 			if s.ref == 0 {
    894 				mSpanList_Remove(s)
    895 				s.freelist = 0
    896 				mHeap_FreeStack(&mheap_, s)
    897 			}
    898 			s = next
    899 		}
    900 	}
    901 
    902 	// Free queued stack spans.
    903 	for stackFreeQueue.next != &stackFreeQueue {
    904 		s := stackFreeQueue.next
    905 		mSpanList_Remove(s)
    906 		mHeap_FreeStack(&mheap_, s)
    907 	}
    908 
    909 	unlock(&stackpoolmu)
    910 }
    911 
    912 //go:nosplit
    913 func morestackc() {
    914 	systemstack(func() {
    915 		throw("attempt to execute C code on Go stack")
    916 	})
    917 }
    918