Home | History | Annotate | Download | only in runtime
      1 // Copyright 2013 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 package runtime
      6 
      7 import (
      8 	"runtime/internal/atomic"
      9 	"runtime/internal/sys"
     10 	"unsafe"
     11 )
     12 
     13 /*
     14 Stack layout parameters.
     15 Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
     16 
     17 The per-goroutine g->stackguard is set to point StackGuard bytes
     18 above the bottom of the stack.  Each function compares its stack
     19 pointer against g->stackguard to check for overflow.  To cut one
     20 instruction from the check sequence for functions with tiny frames,
     21 the stack is allowed to protrude StackSmall bytes below the stack
     22 guard.  Functions with large frames don't bother with the check and
     23 always call morestack.  The sequences are (for amd64, others are
     24 similar):
     25 
     26 	guard = g->stackguard
     27 	frame = function's stack frame size
     28 	argsize = size of function arguments (call + return)
     29 
     30 	stack frame size <= StackSmall:
     31 		CMPQ guard, SP
     32 		JHI 3(PC)
     33 		MOVQ m->morearg, $(argsize << 32)
     34 		CALL morestack(SB)
     35 
     36 	stack frame size > StackSmall but < StackBig
     37 		LEAQ (frame-StackSmall)(SP), R0
     38 		CMPQ guard, R0
     39 		JHI 3(PC)
     40 		MOVQ m->morearg, $(argsize << 32)
     41 		CALL morestack(SB)
     42 
     43 	stack frame size >= StackBig:
     44 		MOVQ m->morearg, $((argsize << 32) | frame)
     45 		CALL morestack(SB)
     46 
     47 The bottom StackGuard - StackSmall bytes are important: there has
     48 to be enough room to execute functions that refuse to check for
     49 stack overflow, either because they need to be adjacent to the
     50 actual caller's frame (deferproc) or because they handle the imminent
     51 stack overflow (morestack).
     52 
     53 For example, deferproc might call malloc, which does one of the
     54 above checks (without allocating a full frame), which might trigger
     55 a call to morestack.  This sequence needs to fit in the bottom
     56 section of the stack.  On amd64, morestack's frame is 40 bytes, and
     57 deferproc's frame is 56 bytes.  That fits well within the
     58 StackGuard - StackSmall bytes at the bottom.
     59 The linkers explore all possible call traces involving non-splitting
     60 functions to make sure that this limit cannot be violated.
     61 */
     62 
     63 const (
     64 	// StackSystem is a number of additional bytes to add
     65 	// to each stack below the usual guard area for OS-specific
     66 	// purposes like signal handling. Used on Windows, Plan 9,
     67 	// and Darwin/ARM because they do not use a separate stack.
     68 	_StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024
     69 
     70 	// The minimum size of stack used by Go code
     71 	_StackMin = 2048
     72 
     73 	// The minimum stack size to allocate.
     74 	// The hackery here rounds FixedStack0 up to a power of 2.
     75 	_FixedStack0 = _StackMin + _StackSystem
     76 	_FixedStack1 = _FixedStack0 - 1
     77 	_FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
     78 	_FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
     79 	_FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
     80 	_FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
     81 	_FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
     82 	_FixedStack  = _FixedStack6 + 1
     83 
     84 	// Functions that need frames bigger than this use an extra
     85 	// instruction to do the stack split check, to avoid overflow
     86 	// in case SP - framesize wraps below zero.
     87 	// This value can be no bigger than the size of the unmapped
     88 	// space at zero.
     89 	_StackBig = 4096
     90 
     91 	// The stack guard is a pointer this many bytes above the
     92 	// bottom of the stack.
     93 	_StackGuard = 880*sys.StackGuardMultiplier + _StackSystem
     94 
     95 	// After a stack split check the SP is allowed to be this
     96 	// many bytes below the stack guard. This saves an instruction
     97 	// in the checking sequence for tiny frames.
     98 	_StackSmall = 128
     99 
    100 	// The maximum number of bytes that a chain of NOSPLIT
    101 	// functions can use.
    102 	_StackLimit = _StackGuard - _StackSystem - _StackSmall
    103 )
    104 
    105 // Goroutine preemption request.
    106 // Stored into g->stackguard0 to cause split stack check failure.
    107 // Must be greater than any real sp.
    108 // 0xfffffade in hex.
    109 const (
    110 	_StackPreempt = uintptrMask & -1314
    111 	_StackFork    = uintptrMask & -1234
    112 )
    113 
    114 const (
    115 	// stackDebug == 0: no logging
    116 	//            == 1: logging of per-stack operations
    117 	//            == 2: logging of per-frame operations
    118 	//            == 3: logging of per-word updates
    119 	//            == 4: logging of per-word reads
    120 	stackDebug       = 0
    121 	stackFromSystem  = 0 // allocate stacks from system memory instead of the heap
    122 	stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
    123 	stackPoisonCopy  = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
    124 
    125 	stackCache = 1
    126 
    127 	// check the BP links during traceback.
    128 	debugCheckBP = false
    129 )
    130 
    131 const (
    132 	uintptrMask = 1<<(8*sys.PtrSize) - 1
    133 
    134 	// Goroutine preemption request.
    135 	// Stored into g->stackguard0 to cause split stack check failure.
    136 	// Must be greater than any real sp.
    137 	// 0xfffffade in hex.
    138 	stackPreempt = uintptrMask & -1314
    139 
    140 	// Thread is forking.
    141 	// Stored into g->stackguard0 to cause split stack check failure.
    142 	// Must be greater than any real sp.
    143 	stackFork = uintptrMask & -1234
    144 )
    145 
    146 // Global pool of spans that have free stacks.
    147 // Stacks are assigned an order according to size.
    148 //     order = log_2(size/FixedStack)
    149 // There is a free list for each order.
    150 // TODO: one lock per order?
    151 var stackpool [_NumStackOrders]mSpanList
    152 var stackpoolmu mutex
    153 
    154 // Global pool of large stack spans.
    155 var stackLarge struct {
    156 	lock mutex
    157 	free [_MHeapMap_Bits]mSpanList // free lists by log_2(s.npages)
    158 }
    159 
    160 func stackinit() {
    161 	if _StackCacheSize&_PageMask != 0 {
    162 		throw("cache size must be a multiple of page size")
    163 	}
    164 	for i := range stackpool {
    165 		stackpool[i].init()
    166 	}
    167 	for i := range stackLarge.free {
    168 		stackLarge.free[i].init()
    169 	}
    170 }
    171 
    172 // stacklog2 returns log_2(n).
    173 func stacklog2(n uintptr) int {
    174 	log2 := 0
    175 	for n > 1 {
    176 		n >>= 1
    177 		log2++
    178 	}
    179 	return log2
    180 }
    181 
    182 // Allocates a stack from the free pool. Must be called with
    183 // stackpoolmu held.
    184 func stackpoolalloc(order uint8) gclinkptr {
    185 	list := &stackpool[order]
    186 	s := list.first
    187 	if s == nil {
    188 		// no free stacks. Allocate another span worth.
    189 		s = mheap_.allocStack(_StackCacheSize >> _PageShift)
    190 		if s == nil {
    191 			throw("out of memory")
    192 		}
    193 		if s.allocCount != 0 {
    194 			throw("bad allocCount")
    195 		}
    196 		if s.stackfreelist.ptr() != nil {
    197 			throw("bad stackfreelist")
    198 		}
    199 		for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order {
    200 			x := gclinkptr(s.base() + i)
    201 			x.ptr().next = s.stackfreelist
    202 			s.stackfreelist = x
    203 		}
    204 		list.insert(s)
    205 	}
    206 	x := s.stackfreelist
    207 	if x.ptr() == nil {
    208 		throw("span has no free stacks")
    209 	}
    210 	s.stackfreelist = x.ptr().next
    211 	s.allocCount++
    212 	if s.stackfreelist.ptr() == nil {
    213 		// all stacks in s are allocated.
    214 		list.remove(s)
    215 	}
    216 	return x
    217 }
    218 
    219 // Adds stack x to the free pool. Must be called with stackpoolmu held.
    220 func stackpoolfree(x gclinkptr, order uint8) {
    221 	s := mheap_.lookup(unsafe.Pointer(x))
    222 	if s.state != _MSpanStack {
    223 		throw("freeing stack not in a stack span")
    224 	}
    225 	if s.stackfreelist.ptr() == nil {
    226 		// s will now have a free stack
    227 		stackpool[order].insert(s)
    228 	}
    229 	x.ptr().next = s.stackfreelist
    230 	s.stackfreelist = x
    231 	s.allocCount--
    232 	if gcphase == _GCoff && s.allocCount == 0 {
    233 		// Span is completely free. Return it to the heap
    234 		// immediately if we're sweeping.
    235 		//
    236 		// If GC is active, we delay the free until the end of
    237 		// GC to avoid the following type of situation:
    238 		//
    239 		// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
    240 		// 2) The stack that pointer points to is copied
    241 		// 3) The old stack is freed
    242 		// 4) The containing span is marked free
    243 		// 5) GC attempts to mark the SudoG.elem pointer. The
    244 		//    marking fails because the pointer looks like a
    245 		//    pointer into a free span.
    246 		//
    247 		// By not freeing, we prevent step #4 until GC is done.
    248 		stackpool[order].remove(s)
    249 		s.stackfreelist = 0
    250 		mheap_.freeStack(s)
    251 	}
    252 }
    253 
    254 // stackcacherefill/stackcacherelease implement a global pool of stack segments.
    255 // The pool is required to prevent unlimited growth of per-thread caches.
    256 //
    257 //go:systemstack
    258 func stackcacherefill(c *mcache, order uint8) {
    259 	if stackDebug >= 1 {
    260 		print("stackcacherefill order=", order, "\n")
    261 	}
    262 
    263 	// Grab some stacks from the global cache.
    264 	// Grab half of the allowed capacity (to prevent thrashing).
    265 	var list gclinkptr
    266 	var size uintptr
    267 	lock(&stackpoolmu)
    268 	for size < _StackCacheSize/2 {
    269 		x := stackpoolalloc(order)
    270 		x.ptr().next = list
    271 		list = x
    272 		size += _FixedStack << order
    273 	}
    274 	unlock(&stackpoolmu)
    275 	c.stackcache[order].list = list
    276 	c.stackcache[order].size = size
    277 }
    278 
    279 //go:systemstack
    280 func stackcacherelease(c *mcache, order uint8) {
    281 	if stackDebug >= 1 {
    282 		print("stackcacherelease order=", order, "\n")
    283 	}
    284 	x := c.stackcache[order].list
    285 	size := c.stackcache[order].size
    286 	lock(&stackpoolmu)
    287 	for size > _StackCacheSize/2 {
    288 		y := x.ptr().next
    289 		stackpoolfree(x, order)
    290 		x = y
    291 		size -= _FixedStack << order
    292 	}
    293 	unlock(&stackpoolmu)
    294 	c.stackcache[order].list = x
    295 	c.stackcache[order].size = size
    296 }
    297 
    298 //go:systemstack
    299 func stackcache_clear(c *mcache) {
    300 	if stackDebug >= 1 {
    301 		print("stackcache clear\n")
    302 	}
    303 	lock(&stackpoolmu)
    304 	for order := uint8(0); order < _NumStackOrders; order++ {
    305 		x := c.stackcache[order].list
    306 		for x.ptr() != nil {
    307 			y := x.ptr().next
    308 			stackpoolfree(x, order)
    309 			x = y
    310 		}
    311 		c.stackcache[order].list = 0
    312 		c.stackcache[order].size = 0
    313 	}
    314 	unlock(&stackpoolmu)
    315 }
    316 
    317 // stackalloc allocates an n byte stack.
    318 //
    319 // stackalloc must run on the system stack because it uses per-P
    320 // resources and must not split the stack.
    321 //
    322 //go:systemstack
    323 func stackalloc(n uint32) (stack, []stkbar) {
    324 	// Stackalloc must be called on scheduler stack, so that we
    325 	// never try to grow the stack during the code that stackalloc runs.
    326 	// Doing so would cause a deadlock (issue 1547).
    327 	thisg := getg()
    328 	if thisg != thisg.m.g0 {
    329 		throw("stackalloc not on scheduler stack")
    330 	}
    331 	if n&(n-1) != 0 {
    332 		throw("stack size not a power of 2")
    333 	}
    334 	if stackDebug >= 1 {
    335 		print("stackalloc ", n, "\n")
    336 	}
    337 
    338 	// Compute the size of stack barrier array.
    339 	maxstkbar := gcMaxStackBarriers(int(n))
    340 	nstkbar := unsafe.Sizeof(stkbar{}) * uintptr(maxstkbar)
    341 	var stkbarSlice slice
    342 
    343 	if debug.efence != 0 || stackFromSystem != 0 {
    344 		v := sysAlloc(round(uintptr(n), _PageSize), &memstats.stacks_sys)
    345 		if v == nil {
    346 			throw("out of memory (stackalloc)")
    347 		}
    348 		top := uintptr(n) - nstkbar
    349 		if maxstkbar != 0 {
    350 			stkbarSlice = slice{add(v, top), 0, maxstkbar}
    351 		}
    352 		return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice))
    353 	}
    354 
    355 	// Small stacks are allocated with a fixed-size free-list allocator.
    356 	// If we need a stack of a bigger size, we fall back on allocating
    357 	// a dedicated span.
    358 	var v unsafe.Pointer
    359 	if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
    360 		order := uint8(0)
    361 		n2 := n
    362 		for n2 > _FixedStack {
    363 			order++
    364 			n2 >>= 1
    365 		}
    366 		var x gclinkptr
    367 		c := thisg.m.mcache
    368 		if c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 {
    369 			// c == nil can happen in the guts of exitsyscall or
    370 			// procresize. Just get a stack from the global pool.
    371 			// Also don't touch stackcache during gc
    372 			// as it's flushed concurrently.
    373 			lock(&stackpoolmu)
    374 			x = stackpoolalloc(order)
    375 			unlock(&stackpoolmu)
    376 		} else {
    377 			x = c.stackcache[order].list
    378 			if x.ptr() == nil {
    379 				stackcacherefill(c, order)
    380 				x = c.stackcache[order].list
    381 			}
    382 			c.stackcache[order].list = x.ptr().next
    383 			c.stackcache[order].size -= uintptr(n)
    384 		}
    385 		v = unsafe.Pointer(x)
    386 	} else {
    387 		var s *mspan
    388 		npage := uintptr(n) >> _PageShift
    389 		log2npage := stacklog2(npage)
    390 
    391 		// Try to get a stack from the large stack cache.
    392 		lock(&stackLarge.lock)
    393 		if !stackLarge.free[log2npage].isEmpty() {
    394 			s = stackLarge.free[log2npage].first
    395 			stackLarge.free[log2npage].remove(s)
    396 		}
    397 		unlock(&stackLarge.lock)
    398 
    399 		if s == nil {
    400 			// Allocate a new stack from the heap.
    401 			s = mheap_.allocStack(npage)
    402 			if s == nil {
    403 				throw("out of memory")
    404 			}
    405 		}
    406 		v = unsafe.Pointer(s.base())
    407 	}
    408 
    409 	if raceenabled {
    410 		racemalloc(v, uintptr(n))
    411 	}
    412 	if msanenabled {
    413 		msanmalloc(v, uintptr(n))
    414 	}
    415 	if stackDebug >= 1 {
    416 		print("  allocated ", v, "\n")
    417 	}
    418 	top := uintptr(n) - nstkbar
    419 	if maxstkbar != 0 {
    420 		stkbarSlice = slice{add(v, top), 0, maxstkbar}
    421 	}
    422 	return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice))
    423 }
    424 
    425 // stackfree frees an n byte stack allocation at stk.
    426 //
    427 // stackfree must run on the system stack because it uses per-P
    428 // resources and must not split the stack.
    429 //
    430 //go:systemstack
    431 func stackfree(stk stack, n uintptr) {
    432 	gp := getg()
    433 	v := unsafe.Pointer(stk.lo)
    434 	if n&(n-1) != 0 {
    435 		throw("stack not a power of 2")
    436 	}
    437 	if stk.lo+n < stk.hi {
    438 		throw("bad stack size")
    439 	}
    440 	if stackDebug >= 1 {
    441 		println("stackfree", v, n)
    442 		memclrNoHeapPointers(v, n) // for testing, clobber stack data
    443 	}
    444 	if debug.efence != 0 || stackFromSystem != 0 {
    445 		if debug.efence != 0 || stackFaultOnFree != 0 {
    446 			sysFault(v, n)
    447 		} else {
    448 			sysFree(v, n, &memstats.stacks_sys)
    449 		}
    450 		return
    451 	}
    452 	if msanenabled {
    453 		msanfree(v, n)
    454 	}
    455 	if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
    456 		order := uint8(0)
    457 		n2 := n
    458 		for n2 > _FixedStack {
    459 			order++
    460 			n2 >>= 1
    461 		}
    462 		x := gclinkptr(v)
    463 		c := gp.m.mcache
    464 		if c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 {
    465 			lock(&stackpoolmu)
    466 			stackpoolfree(x, order)
    467 			unlock(&stackpoolmu)
    468 		} else {
    469 			if c.stackcache[order].size >= _StackCacheSize {
    470 				stackcacherelease(c, order)
    471 			}
    472 			x.ptr().next = c.stackcache[order].list
    473 			c.stackcache[order].list = x
    474 			c.stackcache[order].size += n
    475 		}
    476 	} else {
    477 		s := mheap_.lookup(v)
    478 		if s.state != _MSpanStack {
    479 			println(hex(s.base()), v)
    480 			throw("bad span state")
    481 		}
    482 		if gcphase == _GCoff {
    483 			// Free the stack immediately if we're
    484 			// sweeping.
    485 			mheap_.freeStack(s)
    486 		} else {
    487 			// If the GC is running, we can't return a
    488 			// stack span to the heap because it could be
    489 			// reused as a heap span, and this state
    490 			// change would race with GC. Add it to the
    491 			// large stack cache instead.
    492 			log2npage := stacklog2(s.npages)
    493 			lock(&stackLarge.lock)
    494 			stackLarge.free[log2npage].insert(s)
    495 			unlock(&stackLarge.lock)
    496 		}
    497 	}
    498 }
    499 
    500 var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
    501 
    502 var ptrnames = []string{
    503 	0: "scalar",
    504 	1: "ptr",
    505 }
    506 
    507 // Stack frame layout
    508 //
    509 // (x86)
    510 // +------------------+
    511 // | args from caller |
    512 // +------------------+ <- frame->argp
    513 // |  return address  |
    514 // +------------------+
    515 // |  caller's BP (*) | (*) if framepointer_enabled && varp < sp
    516 // +------------------+ <- frame->varp
    517 // |     locals       |
    518 // +------------------+
    519 // |  args to callee  |
    520 // +------------------+ <- frame->sp
    521 //
    522 // (arm)
    523 // +------------------+
    524 // | args from caller |
    525 // +------------------+ <- frame->argp
    526 // | caller's retaddr |
    527 // +------------------+ <- frame->varp
    528 // |     locals       |
    529 // +------------------+
    530 // |  args to callee  |
    531 // +------------------+
    532 // |  return address  |
    533 // +------------------+ <- frame->sp
    534 
    535 type adjustinfo struct {
    536 	old   stack
    537 	delta uintptr // ptr distance from old to new stack (newbase - oldbase)
    538 	cache pcvalueCache
    539 
    540 	// sghi is the highest sudog.elem on the stack.
    541 	sghi uintptr
    542 }
    543 
    544 // Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
    545 // If so, it rewrites *vpp to point into the new stack.
    546 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
    547 	pp := (*uintptr)(vpp)
    548 	p := *pp
    549 	if stackDebug >= 4 {
    550 		print("        ", pp, ":", hex(p), "\n")
    551 	}
    552 	if adjinfo.old.lo <= p && p < adjinfo.old.hi {
    553 		*pp = p + adjinfo.delta
    554 		if stackDebug >= 3 {
    555 			print("        adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
    556 		}
    557 	}
    558 }
    559 
    560 // Information from the compiler about the layout of stack frames.
    561 type bitvector struct {
    562 	n        int32 // # of bits
    563 	bytedata *uint8
    564 }
    565 
    566 type gobitvector struct {
    567 	n        uintptr
    568 	bytedata []uint8
    569 }
    570 
    571 func gobv(bv bitvector) gobitvector {
    572 	return gobitvector{
    573 		uintptr(bv.n),
    574 		(*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8],
    575 	}
    576 }
    577 
    578 func ptrbit(bv *gobitvector, i uintptr) uint8 {
    579 	return (bv.bytedata[i/8] >> (i % 8)) & 1
    580 }
    581 
    582 // bv describes the memory starting at address scanp.
    583 // Adjust any pointers contained therein.
    584 func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f *_func) {
    585 	bv := gobv(*cbv)
    586 	minp := adjinfo.old.lo
    587 	maxp := adjinfo.old.hi
    588 	delta := adjinfo.delta
    589 	num := bv.n
    590 	// If this frame might contain channel receive slots, use CAS
    591 	// to adjust pointers. If the slot hasn't been received into
    592 	// yet, it may contain stack pointers and a concurrent send
    593 	// could race with adjusting those pointers. (The sent value
    594 	// itself can never contain stack pointers.)
    595 	useCAS := uintptr(scanp) < adjinfo.sghi
    596 	for i := uintptr(0); i < num; i++ {
    597 		if stackDebug >= 4 {
    598 			print("        ", add(scanp, i*sys.PtrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*sys.PtrSize))), " # ", i, " ", bv.bytedata[i/8], "\n")
    599 		}
    600 		if ptrbit(&bv, i) == 1 {
    601 			pp := (*uintptr)(add(scanp, i*sys.PtrSize))
    602 		retry:
    603 			p := *pp
    604 			if f != nil && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
    605 				// Looks like a junk value in a pointer slot.
    606 				// Live analysis wrong?
    607 				getg().m.traceback = 2
    608 				print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
    609 				throw("invalid pointer found on stack")
    610 			}
    611 			if minp <= p && p < maxp {
    612 				if stackDebug >= 3 {
    613 					print("adjust ptr ", hex(p), " ", funcname(f), "\n")
    614 				}
    615 				if useCAS {
    616 					ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
    617 					if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
    618 						goto retry
    619 					}
    620 				} else {
    621 					*pp = p + delta
    622 				}
    623 			}
    624 		}
    625 	}
    626 }
    627 
    628 // Note: the argument/return area is adjusted by the callee.
    629 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
    630 	adjinfo := (*adjustinfo)(arg)
    631 	targetpc := frame.continpc
    632 	if targetpc == 0 {
    633 		// Frame is dead.
    634 		return true
    635 	}
    636 	f := frame.fn
    637 	if stackDebug >= 2 {
    638 		print("    adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
    639 	}
    640 	if f.entry == systemstack_switchPC {
    641 		// A special routine at the bottom of stack of a goroutine that does an systemstack call.
    642 		// We will allow it to be copied even though we don't
    643 		// have full GC info for it (because it is written in asm).
    644 		return true
    645 	}
    646 	if targetpc != f.entry {
    647 		targetpc--
    648 	}
    649 	pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, &adjinfo.cache)
    650 	if pcdata == -1 {
    651 		pcdata = 0 // in prologue
    652 	}
    653 
    654 	// Adjust local variables if stack frame has been allocated.
    655 	size := frame.varp - frame.sp
    656 	var minsize uintptr
    657 	switch sys.ArchFamily {
    658 	case sys.ARM64:
    659 		minsize = sys.SpAlign
    660 	default:
    661 		minsize = sys.MinFrameSize
    662 	}
    663 	if size > minsize {
    664 		var bv bitvector
    665 		stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
    666 		if stackmap == nil || stackmap.n <= 0 {
    667 			print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
    668 			throw("missing stackmap")
    669 		}
    670 		// Locals bitmap information, scan just the pointers in locals.
    671 		if pcdata < 0 || pcdata >= stackmap.n {
    672 			// don't know where we are
    673 			print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
    674 			throw("bad symbol table")
    675 		}
    676 		bv = stackmapdata(stackmap, pcdata)
    677 		size = uintptr(bv.n) * sys.PtrSize
    678 		if stackDebug >= 3 {
    679 			print("      locals ", pcdata, "/", stackmap.n, " ", size/sys.PtrSize, " words ", bv.bytedata, "\n")
    680 		}
    681 		adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f)
    682 	}
    683 
    684 	// Adjust saved base pointer if there is one.
    685 	if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize {
    686 		if !framepointer_enabled {
    687 			print("runtime: found space for saved base pointer, but no framepointer experiment\n")
    688 			print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n")
    689 			throw("bad frame layout")
    690 		}
    691 		if stackDebug >= 3 {
    692 			print("      saved bp\n")
    693 		}
    694 		if debugCheckBP {
    695 			// Frame pointers should always point to the next higher frame on
    696 			// the Go stack (or be nil, for the top frame on the stack).
    697 			bp := *(*uintptr)(unsafe.Pointer(frame.varp))
    698 			if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
    699 				println("runtime: found invalid frame pointer")
    700 				print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
    701 				throw("bad frame pointer")
    702 			}
    703 		}
    704 		adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
    705 	}
    706 
    707 	// Adjust arguments.
    708 	if frame.arglen > 0 {
    709 		var bv bitvector
    710 		if frame.argmap != nil {
    711 			bv = *frame.argmap
    712 		} else {
    713 			stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
    714 			if stackmap == nil || stackmap.n <= 0 {
    715 				print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", frame.arglen, "\n")
    716 				throw("missing stackmap")
    717 			}
    718 			if pcdata < 0 || pcdata >= stackmap.n {
    719 				// don't know where we are
    720 				print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
    721 				throw("bad symbol table")
    722 			}
    723 			bv = stackmapdata(stackmap, pcdata)
    724 		}
    725 		if stackDebug >= 3 {
    726 			print("      args\n")
    727 		}
    728 		adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, nil)
    729 	}
    730 	return true
    731 }
    732 
    733 func adjustctxt(gp *g, adjinfo *adjustinfo) {
    734 	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
    735 	if !framepointer_enabled {
    736 		return
    737 	}
    738 	if debugCheckBP {
    739 		bp := gp.sched.bp
    740 		if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
    741 			println("runtime: found invalid top frame pointer")
    742 			print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
    743 			throw("bad top frame pointer")
    744 		}
    745 	}
    746 	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
    747 }
    748 
    749 func adjustdefers(gp *g, adjinfo *adjustinfo) {
    750 	// Adjust defer argument blocks the same way we adjust active stack frames.
    751 	tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
    752 
    753 	// Adjust pointers in the Defer structs.
    754 	// Defer structs themselves are never on the stack.
    755 	for d := gp._defer; d != nil; d = d.link {
    756 		adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
    757 		adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
    758 		adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
    759 	}
    760 }
    761 
    762 func adjustpanics(gp *g, adjinfo *adjustinfo) {
    763 	// Panics are on stack and already adjusted.
    764 	// Update pointer to head of list in G.
    765 	adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
    766 }
    767 
    768 func adjustsudogs(gp *g, adjinfo *adjustinfo) {
    769 	// the data elements pointed to by a SudoG structure
    770 	// might be in the stack.
    771 	for s := gp.waiting; s != nil; s = s.waitlink {
    772 		adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
    773 		adjustpointer(adjinfo, unsafe.Pointer(&s.selectdone))
    774 	}
    775 }
    776 
    777 func adjuststkbar(gp *g, adjinfo *adjustinfo) {
    778 	for i := int(gp.stkbarPos); i < len(gp.stkbar); i++ {
    779 		adjustpointer(adjinfo, unsafe.Pointer(&gp.stkbar[i].savedLRPtr))
    780 	}
    781 }
    782 
    783 func fillstack(stk stack, b byte) {
    784 	for p := stk.lo; p < stk.hi; p++ {
    785 		*(*byte)(unsafe.Pointer(p)) = b
    786 	}
    787 }
    788 
    789 func findsghi(gp *g, stk stack) uintptr {
    790 	var sghi uintptr
    791 	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
    792 		p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
    793 		if stk.lo <= p && p < stk.hi && p > sghi {
    794 			sghi = p
    795 		}
    796 		p = uintptr(unsafe.Pointer(sg.selectdone)) + unsafe.Sizeof(sg.selectdone)
    797 		if stk.lo <= p && p < stk.hi && p > sghi {
    798 			sghi = p
    799 		}
    800 	}
    801 	return sghi
    802 }
    803 
    804 // syncadjustsudogs adjusts gp's sudogs and copies the part of gp's
    805 // stack they refer to while synchronizing with concurrent channel
    806 // operations. It returns the number of bytes of stack copied.
    807 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
    808 	if gp.waiting == nil {
    809 		return 0
    810 	}
    811 
    812 	// Lock channels to prevent concurrent send/receive.
    813 	// It's important that we *only* do this for async
    814 	// copystack; otherwise, gp may be in the middle of
    815 	// putting itself on wait queues and this would
    816 	// self-deadlock.
    817 	var lastc *hchan
    818 	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
    819 		if sg.c != lastc {
    820 			lock(&sg.c.lock)
    821 		}
    822 		lastc = sg.c
    823 	}
    824 
    825 	// Adjust sudogs.
    826 	adjustsudogs(gp, adjinfo)
    827 
    828 	// Copy the part of the stack the sudogs point in to
    829 	// while holding the lock to prevent races on
    830 	// send/receive slots.
    831 	var sgsize uintptr
    832 	if adjinfo.sghi != 0 {
    833 		oldBot := adjinfo.old.hi - used
    834 		newBot := oldBot + adjinfo.delta
    835 		sgsize = adjinfo.sghi - oldBot
    836 		memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
    837 	}
    838 
    839 	// Unlock channels.
    840 	lastc = nil
    841 	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
    842 		if sg.c != lastc {
    843 			unlock(&sg.c.lock)
    844 		}
    845 		lastc = sg.c
    846 	}
    847 
    848 	return sgsize
    849 }
    850 
    851 // Copies gp's stack to a new stack of a different size.
    852 // Caller must have changed gp status to Gcopystack.
    853 //
    854 // If sync is true, this is a self-triggered stack growth and, in
    855 // particular, no other G may be writing to gp's stack (e.g., via a
    856 // channel operation). If sync is false, copystack protects against
    857 // concurrent channel operations.
    858 func copystack(gp *g, newsize uintptr, sync bool) {
    859 	if gp.syscallsp != 0 {
    860 		throw("stack growth not allowed in system call")
    861 	}
    862 	old := gp.stack
    863 	if old.lo == 0 {
    864 		throw("nil stackbase")
    865 	}
    866 	used := old.hi - gp.sched.sp
    867 
    868 	// allocate new stack
    869 	new, newstkbar := stackalloc(uint32(newsize))
    870 	if stackPoisonCopy != 0 {
    871 		fillstack(new, 0xfd)
    872 	}
    873 	if stackDebug >= 1 {
    874 		print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]/", gp.stackAlloc, " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
    875 	}
    876 
    877 	// Compute adjustment.
    878 	var adjinfo adjustinfo
    879 	adjinfo.old = old
    880 	adjinfo.delta = new.hi - old.hi
    881 
    882 	// Adjust sudogs, synchronizing with channel ops if necessary.
    883 	ncopy := used
    884 	if sync {
    885 		adjustsudogs(gp, &adjinfo)
    886 	} else {
    887 		// sudogs can point in to the stack. During concurrent
    888 		// shrinking, these areas may be written to. Find the
    889 		// highest such pointer so we can handle everything
    890 		// there and below carefully. (This shouldn't be far
    891 		// from the bottom of the stack, so there's little
    892 		// cost in handling everything below it carefully.)
    893 		adjinfo.sghi = findsghi(gp, old)
    894 
    895 		// Synchronize with channel ops and copy the part of
    896 		// the stack they may interact with.
    897 		ncopy -= syncadjustsudogs(gp, used, &adjinfo)
    898 	}
    899 
    900 	// Copy the stack (or the rest of it) to the new location
    901 	memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
    902 
    903 	// Disallow sigprof scans of this stack and block if there's
    904 	// one in progress.
    905 	gcLockStackBarriers(gp)
    906 
    907 	// Adjust remaining structures that have pointers into stacks.
    908 	// We have to do most of these before we traceback the new
    909 	// stack because gentraceback uses them.
    910 	adjustctxt(gp, &adjinfo)
    911 	adjustdefers(gp, &adjinfo)
    912 	adjustpanics(gp, &adjinfo)
    913 	adjuststkbar(gp, &adjinfo)
    914 	if adjinfo.sghi != 0 {
    915 		adjinfo.sghi += adjinfo.delta
    916 	}
    917 
    918 	// copy old stack barriers to new stack barrier array
    919 	newstkbar = newstkbar[:len(gp.stkbar)]
    920 	copy(newstkbar, gp.stkbar)
    921 
    922 	// Swap out old stack for new one
    923 	gp.stack = new
    924 	gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
    925 	gp.sched.sp = new.hi - used
    926 	oldsize := gp.stackAlloc
    927 	gp.stackAlloc = newsize
    928 	gp.stkbar = newstkbar
    929 	gp.stktopsp += adjinfo.delta
    930 
    931 	// Adjust pointers in the new stack.
    932 	gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
    933 
    934 	gcUnlockStackBarriers(gp)
    935 
    936 	// free old stack
    937 	if stackPoisonCopy != 0 {
    938 		fillstack(old, 0xfc)
    939 	}
    940 	stackfree(old, oldsize)
    941 }
    942 
    943 // round x up to a power of 2.
    944 func round2(x int32) int32 {
    945 	s := uint(0)
    946 	for 1<<s < x {
    947 		s++
    948 	}
    949 	return 1 << s
    950 }
    951 
    952 // Called from runtimemorestack when more stack is needed.
    953 // Allocate larger stack and relocate to new stack.
    954 // Stack growth is multiplicative, for constant amortized cost.
    955 //
    956 // g->atomicstatus will be Grunning or Gscanrunning upon entry.
    957 // If the GC is trying to stop this g then it will set preemptscan to true.
    958 //
    959 // ctxt is the value of the context register on morestack. newstack
    960 // will write it to g.sched.ctxt.
    961 func newstack(ctxt unsafe.Pointer) {
    962 	thisg := getg()
    963 	// TODO: double check all gp. shouldn't be getg().
    964 	if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
    965 		throw("stack growth after fork")
    966 	}
    967 	if thisg.m.morebuf.g.ptr() != thisg.m.curg {
    968 		print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
    969 		morebuf := thisg.m.morebuf
    970 		traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
    971 		throw("runtime: wrong goroutine in newstack")
    972 	}
    973 
    974 	gp := thisg.m.curg
    975 	// Write ctxt to gp.sched. We do this here instead of in
    976 	// morestack so it has the necessary write barrier.
    977 	gp.sched.ctxt = ctxt
    978 
    979 	if thisg.m.curg.throwsplit {
    980 		// Update syscallsp, syscallpc in case traceback uses them.
    981 		morebuf := thisg.m.morebuf
    982 		gp.syscallsp = morebuf.sp
    983 		gp.syscallpc = morebuf.pc
    984 		print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
    985 			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
    986 			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
    987 
    988 		traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
    989 		throw("runtime: stack split at bad time")
    990 	}
    991 
    992 	morebuf := thisg.m.morebuf
    993 	thisg.m.morebuf.pc = 0
    994 	thisg.m.morebuf.lr = 0
    995 	thisg.m.morebuf.sp = 0
    996 	thisg.m.morebuf.g = 0
    997 
    998 	// NOTE: stackguard0 may change underfoot, if another thread
    999 	// is about to try to preempt gp. Read it just once and use that same
   1000 	// value now and below.
   1001 	preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt
   1002 
   1003 	// Be conservative about where we preempt.
   1004 	// We are interested in preempting user Go code, not runtime code.
   1005 	// If we're holding locks, mallocing, or preemption is disabled, don't
   1006 	// preempt.
   1007 	// This check is very early in newstack so that even the status change
   1008 	// from Grunning to Gwaiting and back doesn't happen in this case.
   1009 	// That status change by itself can be viewed as a small preemption,
   1010 	// because the GC might change Gwaiting to Gscanwaiting, and then
   1011 	// this goroutine has to wait for the GC to finish before continuing.
   1012 	// If the GC is in some way dependent on this goroutine (for example,
   1013 	// it needs a lock held by the goroutine), that small preemption turns
   1014 	// into a real deadlock.
   1015 	if preempt {
   1016 		if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning {
   1017 			// Let the goroutine keep running for now.
   1018 			// gp->preempt is set, so it will be preempted next time.
   1019 			gp.stackguard0 = gp.stack.lo + _StackGuard
   1020 			gogo(&gp.sched) // never return
   1021 		}
   1022 	}
   1023 
   1024 	if gp.stack.lo == 0 {
   1025 		throw("missing stack in newstack")
   1026 	}
   1027 	sp := gp.sched.sp
   1028 	if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 {
   1029 		// The call to morestack cost a word.
   1030 		sp -= sys.PtrSize
   1031 	}
   1032 	if stackDebug >= 1 || sp < gp.stack.lo {
   1033 		print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
   1034 			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
   1035 			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
   1036 	}
   1037 	if sp < gp.stack.lo {
   1038 		print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ")
   1039 		print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
   1040 		throw("runtime: split stack overflow")
   1041 	}
   1042 
   1043 	if preempt {
   1044 		if gp == thisg.m.g0 {
   1045 			throw("runtime: preempt g0")
   1046 		}
   1047 		if thisg.m.p == 0 && thisg.m.locks == 0 {
   1048 			throw("runtime: g is running but p is not")
   1049 		}
   1050 		// Synchronize with scang.
   1051 		casgstatus(gp, _Grunning, _Gwaiting)
   1052 		if gp.preemptscan {
   1053 			for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
   1054 				// Likely to be racing with the GC as
   1055 				// it sees a _Gwaiting and does the
   1056 				// stack scan. If so, gcworkdone will
   1057 				// be set and gcphasework will simply
   1058 				// return.
   1059 			}
   1060 			if !gp.gcscandone {
   1061 				// gcw is safe because we're on the
   1062 				// system stack.
   1063 				gcw := &gp.m.p.ptr().gcw
   1064 				scanstack(gp, gcw)
   1065 				if gcBlackenPromptly {
   1066 					gcw.dispose()
   1067 				}
   1068 				gp.gcscandone = true
   1069 			}
   1070 			gp.preemptscan = false
   1071 			gp.preempt = false
   1072 			casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
   1073 			// This clears gcscanvalid.
   1074 			casgstatus(gp, _Gwaiting, _Grunning)
   1075 			gp.stackguard0 = gp.stack.lo + _StackGuard
   1076 			gogo(&gp.sched) // never return
   1077 		}
   1078 
   1079 		// Act like goroutine called runtime.Gosched.
   1080 		casgstatus(gp, _Gwaiting, _Grunning)
   1081 		gopreempt_m(gp) // never return
   1082 	}
   1083 
   1084 	// Allocate a bigger segment and move the stack.
   1085 	oldsize := int(gp.stackAlloc)
   1086 	newsize := oldsize * 2
   1087 	if uintptr(newsize) > maxstacksize {
   1088 		print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
   1089 		throw("stack overflow")
   1090 	}
   1091 
   1092 	// The goroutine must be executing in order to call newstack,
   1093 	// so it must be Grunning (or Gscanrunning).
   1094 	casgstatus(gp, _Grunning, _Gcopystack)
   1095 
   1096 	// The concurrent GC will not scan the stack while we are doing the copy since
   1097 	// the gp is in a Gcopystack status.
   1098 	copystack(gp, uintptr(newsize), true)
   1099 	if stackDebug >= 1 {
   1100 		print("stack grow done\n")
   1101 	}
   1102 	casgstatus(gp, _Gcopystack, _Grunning)
   1103 	gogo(&gp.sched)
   1104 }
   1105 
   1106 //go:nosplit
   1107 func nilfunc() {
   1108 	*(*uint8)(nil) = 0
   1109 }
   1110 
   1111 // adjust Gobuf as if it executed a call to fn
   1112 // and then did an immediate gosave.
   1113 func gostartcallfn(gobuf *gobuf, fv *funcval) {
   1114 	var fn unsafe.Pointer
   1115 	if fv != nil {
   1116 		fn = unsafe.Pointer(fv.fn)
   1117 	} else {
   1118 		fn = unsafe.Pointer(funcPC(nilfunc))
   1119 	}
   1120 	gostartcall(gobuf, fn, unsafe.Pointer(fv))
   1121 }
   1122 
   1123 // Maybe shrink the stack being used by gp.
   1124 // Called at garbage collection time.
   1125 // gp must be stopped, but the world need not be.
   1126 func shrinkstack(gp *g) {
   1127 	gstatus := readgstatus(gp)
   1128 	if gstatus&^_Gscan == _Gdead {
   1129 		if gp.stack.lo != 0 {
   1130 			// Free whole stack - it will get reallocated
   1131 			// if G is used again.
   1132 			stackfree(gp.stack, gp.stackAlloc)
   1133 			gp.stack.lo = 0
   1134 			gp.stack.hi = 0
   1135 			gp.stkbar = nil
   1136 			gp.stkbarPos = 0
   1137 		}
   1138 		return
   1139 	}
   1140 	if gp.stack.lo == 0 {
   1141 		throw("missing stack in shrinkstack")
   1142 	}
   1143 	if gstatus&_Gscan == 0 {
   1144 		throw("bad status in shrinkstack")
   1145 	}
   1146 
   1147 	if debug.gcshrinkstackoff > 0 {
   1148 		return
   1149 	}
   1150 	if gp.startpc == gcBgMarkWorkerPC {
   1151 		// We're not allowed to shrink the gcBgMarkWorker
   1152 		// stack (see gcBgMarkWorker for explanation).
   1153 		return
   1154 	}
   1155 
   1156 	oldsize := gp.stackAlloc
   1157 	newsize := oldsize / 2
   1158 	// Don't shrink the allocation below the minimum-sized stack
   1159 	// allocation.
   1160 	if newsize < _FixedStack {
   1161 		return
   1162 	}
   1163 	// Compute how much of the stack is currently in use and only
   1164 	// shrink the stack if gp is using less than a quarter of its
   1165 	// current stack. The currently used stack includes everything
   1166 	// down to the SP plus the stack guard space that ensures
   1167 	// there's room for nosplit functions.
   1168 	avail := gp.stack.hi - gp.stack.lo
   1169 	if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
   1170 		return
   1171 	}
   1172 
   1173 	// We can't copy the stack if we're in a syscall.
   1174 	// The syscall might have pointers into the stack.
   1175 	if gp.syscallsp != 0 {
   1176 		return
   1177 	}
   1178 	if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
   1179 		return
   1180 	}
   1181 
   1182 	if stackDebug > 0 {
   1183 		print("shrinking stack ", oldsize, "->", newsize, "\n")
   1184 	}
   1185 
   1186 	copystack(gp, newsize, false)
   1187 }
   1188 
   1189 // freeStackSpans frees unused stack spans at the end of GC.
   1190 func freeStackSpans() {
   1191 	lock(&stackpoolmu)
   1192 
   1193 	// Scan stack pools for empty stack spans.
   1194 	for order := range stackpool {
   1195 		list := &stackpool[order]
   1196 		for s := list.first; s != nil; {
   1197 			next := s.next
   1198 			if s.allocCount == 0 {
   1199 				list.remove(s)
   1200 				s.stackfreelist = 0
   1201 				mheap_.freeStack(s)
   1202 			}
   1203 			s = next
   1204 		}
   1205 	}
   1206 
   1207 	unlock(&stackpoolmu)
   1208 
   1209 	// Free large stack spans.
   1210 	lock(&stackLarge.lock)
   1211 	for i := range stackLarge.free {
   1212 		for s := stackLarge.free[i].first; s != nil; {
   1213 			next := s.next
   1214 			stackLarge.free[i].remove(s)
   1215 			mheap_.freeStack(s)
   1216 			s = next
   1217 		}
   1218 	}
   1219 	unlock(&stackLarge.lock)
   1220 }
   1221 
   1222 //go:nosplit
   1223 func morestackc() {
   1224 	systemstack(func() {
   1225 		throw("attempt to execute C code on Go stack")
   1226 	})
   1227 }
   1228